1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20 package org.apache.hadoop.hbase.regionserver.wal;
21
22 import java.io.DataInput;
23 import java.io.DataOutput;
24 import java.io.FileNotFoundException;
25 import java.io.IOException;
26 import java.io.OutputStream;
27 import java.io.UnsupportedEncodingException;
28 import java.lang.reflect.InvocationTargetException;
29 import java.lang.reflect.Method;
30 import java.net.URLEncoder;
31 import java.util.ArrayList;
32 import java.util.Arrays;
33 import java.util.Collections;
34 import java.util.LinkedList;
35 import java.util.List;
36 import java.util.Map;
37 import java.util.NavigableSet;
38 import java.util.SortedMap;
39 import java.util.TreeMap;
40 import java.util.TreeSet;
41 import java.util.UUID;
42 import java.util.concurrent.ConcurrentSkipListMap;
43 import java.util.concurrent.CopyOnWriteArrayList;
44 import java.util.concurrent.atomic.AtomicBoolean;
45 import java.util.concurrent.atomic.AtomicInteger;
46 import java.util.concurrent.atomic.AtomicLong;
47 import java.util.concurrent.locks.Lock;
48 import java.util.concurrent.locks.ReentrantLock;
49 import java.util.regex.Matcher;
50 import java.util.regex.Pattern;
51
52 import org.apache.commons.logging.Log;
53 import org.apache.commons.logging.LogFactory;
54 import org.apache.hadoop.conf.Configuration;
55 import org.apache.hadoop.fs.FSDataOutputStream;
56 import org.apache.hadoop.fs.FileStatus;
57 import org.apache.hadoop.fs.FileSystem;
58 import org.apache.hadoop.fs.Path;
59 import org.apache.hadoop.fs.PathFilter;
60 import org.apache.hadoop.fs.Syncable;
61 import org.apache.hadoop.hbase.HBaseConfiguration;
62 import org.apache.hadoop.hbase.HConstants;
63 import org.apache.hadoop.hbase.HRegionInfo;
64 import org.apache.hadoop.hbase.HTableDescriptor;
65 import org.apache.hadoop.hbase.KeyValue;
66 import org.apache.hadoop.hbase.ServerName;
67 import org.apache.hadoop.hbase.util.Bytes;
68 import org.apache.hadoop.hbase.util.ClassSize;
69 import org.apache.hadoop.hbase.util.FSUtils;
70 import org.apache.hadoop.hbase.util.HasThread;
71 import org.apache.hadoop.hbase.util.Threads;
72 import org.apache.hadoop.io.Writable;
73 import org.apache.hadoop.util.StringUtils;
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114 public class HLog implements Syncable {
115 static final Log LOG = LogFactory.getLog(HLog.class);
116 public static final byte [] METAFAMILY = Bytes.toBytes("METAFAMILY");
117 static final byte [] METAROW = Bytes.toBytes("METAROW");
118
119
120 public static final String SPLITTING_EXT = "-splitting";
121 public static final boolean SPLIT_SKIP_ERRORS_DEFAULT = false;
122
123
124
125
126
127 public static final String RECOVERED_EDITS_DIR = "recovered.edits";
128 private static final Pattern EDITFILES_NAME_PATTERN =
129 Pattern.compile("-?[0-9]+");
130 public static final String RECOVERED_LOG_TMPFILE_SUFFIX = ".temp";
131
132 private final FileSystem fs;
133 private final Path dir;
134 private final Configuration conf;
135
136 private List<WALActionsListener> listeners =
137 new CopyOnWriteArrayList<WALActionsListener>();
138 private final long optionalFlushInterval;
139 private final long blocksize;
140 private final String prefix;
141 private final AtomicLong unflushedEntries = new AtomicLong(0);
142 private volatile long syncedTillHere = 0;
143 private long lastDeferredTxid;
144 private final Path oldLogDir;
145 private volatile boolean logRollRunning;
146
147 private static Class<? extends Writer> logWriterClass;
148 private static Class<? extends Reader> logReaderClass;
149
150 private WALCoprocessorHost coprocessorHost;
151
152 static void resetLogReaderClass() {
153 HLog.logReaderClass = null;
154 }
155
156 private FSDataOutputStream hdfs_out;
157
158
159 private int minTolerableReplication;
160 private Method getNumCurrentReplicas;
161 final static Object [] NO_ARGS = new Object []{};
162
163 public interface Reader {
164 void init(FileSystem fs, Path path, Configuration c) throws IOException;
165 void close() throws IOException;
166 Entry next() throws IOException;
167 Entry next(Entry reuse) throws IOException;
168 void seek(long pos) throws IOException;
169 long getPosition() throws IOException;
170 void reset() throws IOException;
171 }
172
173 public interface Writer {
174 void init(FileSystem fs, Path path, Configuration c) throws IOException;
175 void close() throws IOException;
176 void sync() throws IOException;
177 void append(Entry entry) throws IOException;
178 long getLength() throws IOException;
179 }
180
181
182
183
184 Writer writer;
185
186
187
188
189 final SortedMap<Long, Path> outputfiles =
190 Collections.synchronizedSortedMap(new TreeMap<Long, Path>());
191
192
193
194
195
196 private final ConcurrentSkipListMap<byte [], Long> lastSeqWritten =
197 new ConcurrentSkipListMap<byte [], Long>(Bytes.BYTES_COMPARATOR);
198
199 private volatile boolean closed = false;
200
201 private final AtomicLong logSeqNum = new AtomicLong(0);
202
203
204 private volatile long filenum = -1;
205
206
207 private final AtomicInteger numEntries = new AtomicInteger(0);
208
209
210
211
212
213 private volatile int consecutiveLogRolls = 0;
214 private final int lowReplicationRollLimit;
215
216
217
218
219 private volatile boolean lowReplicationRollEnabled = true;
220
221
222
223 private final long logrollsize;
224
225
226
227 private final Lock cacheFlushLock = new ReentrantLock();
228
229
230
231
232 private final Object updateLock = new Object();
233 private final Object flushLock = new Object();
234
235 private final boolean enabled;
236
237
238
239
240
241
242 private final int maxLogs;
243
244
245
246
247 private final LogSyncer logSyncer;
248
249
250 private final int closeErrorsTolerated;
251
252 private final AtomicInteger closeErrorCount = new AtomicInteger();
253
254
255
256
257 private static final Pattern pattern = Pattern.compile(".*\\.\\d*");
258
259 static byte [] COMPLETE_CACHE_FLUSH;
260 static {
261 try {
262 COMPLETE_CACHE_FLUSH =
263 "HBASE::CACHEFLUSH".getBytes(HConstants.UTF8_ENCODING);
264 } catch (UnsupportedEncodingException e) {
265 assert(false);
266 }
267 }
268
269 public static class Metric {
270 public long min = Long.MAX_VALUE;
271 public long max = 0;
272 public long total = 0;
273 public int count = 0;
274
275 synchronized void inc(final long val) {
276 min = Math.min(min, val);
277 max = Math.max(max, val);
278 total += val;
279 ++count;
280 }
281
282 synchronized Metric get() {
283 Metric copy = new Metric();
284 copy.min = min;
285 copy.max = max;
286 copy.total = total;
287 copy.count = count;
288 this.min = Long.MAX_VALUE;
289 this.max = 0;
290 this.total = 0;
291 this.count = 0;
292 return copy;
293 }
294 }
295
296
297 private static Metric writeTime = new Metric();
298 private static Metric writeSize = new Metric();
299
300 private static Metric syncTime = new Metric();
301
302 private static AtomicLong slowHLogAppendCount = new AtomicLong();
303 private static Metric slowHLogAppendTime = new Metric();
304
305 public static Metric getWriteTime() {
306 return writeTime.get();
307 }
308
309 public static Metric getWriteSize() {
310 return writeSize.get();
311 }
312
313 public static Metric getSyncTime() {
314 return syncTime.get();
315 }
316
317 public static long getSlowAppendCount() {
318 return slowHLogAppendCount.get();
319 }
320
321 public static Metric getSlowAppendTime() {
322 return slowHLogAppendTime.get();
323 }
324
325
326
327
328
329
330
331
332
333
334 public HLog(final FileSystem fs, final Path dir, final Path oldLogDir,
335 final Configuration conf)
336 throws IOException {
337 this(fs, dir, oldLogDir, conf, null, true, null);
338 }
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359 public HLog(final FileSystem fs, final Path dir, final Path oldLogDir,
360 final Configuration conf, final List<WALActionsListener> listeners,
361 final String prefix) throws IOException {
362 this(fs, dir, oldLogDir, conf, listeners, true, prefix);
363 }
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385 public HLog(final FileSystem fs, final Path dir, final Path oldLogDir,
386 final Configuration conf, final List<WALActionsListener> listeners,
387 final boolean failIfLogDirExists, final String prefix)
388 throws IOException {
389 super();
390 this.fs = fs;
391 this.dir = dir;
392 this.conf = conf;
393 if (listeners != null) {
394 for (WALActionsListener i: listeners) {
395 registerWALActionsListener(i);
396 }
397 }
398 this.blocksize = conf.getLong("hbase.regionserver.hlog.blocksize",
399 getDefaultBlockSize());
400
401 float multi = conf.getFloat("hbase.regionserver.logroll.multiplier", 0.95f);
402 this.logrollsize = (long)(this.blocksize * multi);
403 this.optionalFlushInterval =
404 conf.getLong("hbase.regionserver.optionallogflushinterval", 1 * 1000);
405 if (failIfLogDirExists && fs.exists(dir)) {
406 throw new IOException("Target HLog directory already exists: " + dir);
407 }
408 if (!fs.mkdirs(dir)) {
409 throw new IOException("Unable to mkdir " + dir);
410 }
411 this.oldLogDir = oldLogDir;
412 if (!fs.exists(oldLogDir)) {
413 if (!fs.mkdirs(this.oldLogDir)) {
414 throw new IOException("Unable to mkdir " + this.oldLogDir);
415 }
416 }
417 this.maxLogs = conf.getInt("hbase.regionserver.maxlogs", 32);
418 this.minTolerableReplication = conf.getInt(
419 "hbase.regionserver.hlog.tolerable.lowreplication",
420 this.fs.getDefaultReplication());
421 this.lowReplicationRollLimit = conf.getInt(
422 "hbase.regionserver.hlog.lowreplication.rolllimit", 5);
423 this.enabled = conf.getBoolean("hbase.regionserver.hlog.enabled", true);
424 this.closeErrorsTolerated = conf.getInt(
425 "hbase.regionserver.logroll.errors.tolerated", 0);
426
427 LOG.info("HLog configuration: blocksize=" +
428 StringUtils.byteDesc(this.blocksize) +
429 ", rollsize=" + StringUtils.byteDesc(this.logrollsize) +
430 ", enabled=" + this.enabled +
431 ", optionallogflushinternal=" + this.optionalFlushInterval + "ms");
432
433 this.prefix = prefix == null || prefix.isEmpty() ?
434 "hlog" : URLEncoder.encode(prefix, "UTF8");
435
436 rollWriter();
437
438
439 this.getNumCurrentReplicas = getGetNumCurrentReplicas(this.hdfs_out);
440
441 logSyncer = new LogSyncer(this.optionalFlushInterval);
442
443 if (this.optionalFlushInterval > 0) {
444 Threads.setDaemonThreadRunning(logSyncer.getThread(), Thread.currentThread().getName()
445 + ".logSyncer");
446 } else {
447 LOG.info("hbase.regionserver.optionallogflushinterval is set as "
448 + this.optionalFlushInterval + ". Deferred log syncing won't work. "
449 + "Any Mutation, marked to be deferred synced, will be flushed immediately.");
450 }
451 coprocessorHost = new WALCoprocessorHost(this, conf);
452 }
453
454
455
456 private long getDefaultBlockSize() throws IOException {
457 Method m = null;
458 Class<? extends FileSystem> cls = this.fs.getClass();
459 try {
460 m = cls.getMethod("getDefaultBlockSize",
461 new Class<?>[] { Path.class });
462 } catch (NoSuchMethodException e) {
463 LOG.info("FileSystem doesn't support getDefaultBlockSize");
464 } catch (SecurityException e) {
465 LOG.info("Doesn't have access to getDefaultBlockSize on "
466 + "FileSystems", e);
467 m = null;
468 }
469 if (null == m) {
470 return this.fs.getDefaultBlockSize();
471 } else {
472 try {
473 Object ret = m.invoke(this.fs, this.dir);
474 return ((Long)ret).longValue();
475 } catch (Exception e) {
476 throw new IOException(e);
477 }
478 }
479 }
480
481
482
483
484
485 private Method getGetNumCurrentReplicas(final FSDataOutputStream os) {
486 Method m = null;
487 if (os != null) {
488 Class<? extends OutputStream> wrappedStreamClass = os.getWrappedStream()
489 .getClass();
490 try {
491 m = wrappedStreamClass.getDeclaredMethod("getNumCurrentReplicas",
492 new Class<?>[] {});
493 m.setAccessible(true);
494 } catch (NoSuchMethodException e) {
495 LOG.info("FileSystem's output stream doesn't support"
496 + " getNumCurrentReplicas; --HDFS-826 not available; fsOut="
497 + wrappedStreamClass.getName());
498 } catch (SecurityException e) {
499 LOG.info("Doesn't have access to getNumCurrentReplicas on "
500 + "FileSystems's output stream --HDFS-826 not available; fsOut="
501 + wrappedStreamClass.getName(), e);
502 m = null;
503 }
504 }
505 if (m != null) {
506 LOG.info("Using getNumCurrentReplicas--HDFS-826");
507 }
508 return m;
509 }
510
511 public void registerWALActionsListener(final WALActionsListener listener) {
512 this.listeners.add(listener);
513 }
514
515 public boolean unregisterWALActionsListener(final WALActionsListener listener) {
516 return this.listeners.remove(listener);
517 }
518
519
520
521
522 public long getFilenum() {
523 return this.filenum;
524 }
525
526
527
528
529
530
531
532
533
534 public void setSequenceNumber(final long newvalue) {
535 for (long id = this.logSeqNum.get(); id < newvalue &&
536 !this.logSeqNum.compareAndSet(id, newvalue); id = this.logSeqNum.get()) {
537
538
539 LOG.debug("Changed sequenceid from " + logSeqNum + " to " + newvalue);
540 }
541 }
542
543
544
545
546 public long getSequenceNumber() {
547 return logSeqNum.get();
548 }
549
550
551
552
553
554
555
556
557 OutputStream getOutputStream() {
558 return this.hdfs_out.getWrappedStream();
559 }
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581 public byte [][] rollWriter() throws FailedLogCloseException, IOException {
582 return rollWriter(false);
583 }
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607 public byte [][] rollWriter(boolean force)
608 throws FailedLogCloseException, IOException {
609
610 if (!force && this.writer != null && this.numEntries.get() <= 0) {
611 return null;
612 }
613 byte [][] regionsToFlush = null;
614 this.cacheFlushLock.lock();
615 this.logRollRunning = true;
616 try {
617 if (closed) {
618 LOG.debug("HLog closed. Skipping rolling of writer");
619 return regionsToFlush;
620 }
621
622
623 long currentFilenum = this.filenum;
624 Path oldPath = null;
625 if (currentFilenum > 0) {
626 oldPath = computeFilename(currentFilenum);
627 }
628 this.filenum = System.currentTimeMillis();
629 Path newPath = computeFilename();
630
631
632 if (!this.listeners.isEmpty()) {
633 for (WALActionsListener i : this.listeners) {
634 i.preLogRoll(oldPath, newPath);
635 }
636 }
637 HLog.Writer nextWriter = this.createWriterInstance(fs, newPath, conf);
638
639
640
641 FSDataOutputStream nextHdfsOut = null;
642 if (nextWriter instanceof SequenceFileLogWriter) {
643 nextHdfsOut = ((SequenceFileLogWriter)nextWriter).getWriterFSDataOutputStream();
644 }
645
646 synchronized (updateLock) {
647
648 Path oldFile = cleanupCurrentWriter(currentFilenum);
649 this.writer = nextWriter;
650 this.hdfs_out = nextHdfsOut;
651
652 LOG.info((oldFile != null?
653 "Roll " + FSUtils.getPath(oldFile) + ", entries=" +
654 this.numEntries.get() +
655 ", filesize=" +
656 this.fs.getFileStatus(oldFile).getLen() + ". ": "") +
657 " for " + FSUtils.getPath(newPath));
658 this.numEntries.set(0);
659 }
660
661 if (!this.listeners.isEmpty()) {
662 for (WALActionsListener i : this.listeners) {
663 i.postLogRoll(oldPath, newPath);
664 }
665 }
666
667
668 if (this.outputfiles.size() > 0) {
669 if (this.lastSeqWritten.isEmpty()) {
670 LOG.debug("Last sequenceid written is empty. Deleting all old hlogs");
671
672
673
674 for (Map.Entry<Long, Path> e : this.outputfiles.entrySet()) {
675 archiveLogFile(e.getValue(), e.getKey());
676 }
677 this.outputfiles.clear();
678 } else {
679 regionsToFlush = cleanOldLogs();
680 }
681 }
682 } finally {
683 this.logRollRunning = false;
684 this.cacheFlushLock.unlock();
685 }
686 return regionsToFlush;
687 }
688
689
690
691
692
693
694
695
696
697
698
699 protected Writer createWriterInstance(final FileSystem fs, final Path path,
700 final Configuration conf) throws IOException {
701 return createWriter(fs, path, conf);
702 }
703
704
705
706
707
708
709
710
711
712
713
714
715 public static Reader getReader(final FileSystem fs, final Path path,
716 Configuration conf)
717 throws IOException {
718 try {
719
720 if (logReaderClass == null) {
721
722 logReaderClass = conf.getClass("hbase.regionserver.hlog.reader.impl",
723 SequenceFileLogReader.class, Reader.class);
724 }
725
726
727 HLog.Reader reader = logReaderClass.newInstance();
728 reader.init(fs, path, conf);
729 return reader;
730 } catch (IOException e) {
731 throw e;
732 }
733 catch (Exception e) {
734 throw new IOException("Cannot get log reader", e);
735 }
736 }
737
738
739
740
741
742
743
744
745 public static Writer createWriter(final FileSystem fs,
746 final Path path, Configuration conf)
747 throws IOException {
748 try {
749 if (logWriterClass == null) {
750 logWriterClass = conf.getClass("hbase.regionserver.hlog.writer.impl",
751 SequenceFileLogWriter.class, Writer.class);
752 }
753 HLog.Writer writer = (HLog.Writer) logWriterClass.newInstance();
754 writer.init(fs, path, conf);
755 return writer;
756 } catch (Exception e) {
757 throw new IOException("cannot get log writer", e);
758 }
759 }
760
761
762
763
764
765
766
767
768 private byte [][] cleanOldLogs() throws IOException {
769 Long oldestOutstandingSeqNum = getOldestOutstandingSeqNum();
770
771
772 TreeSet<Long> sequenceNumbers =
773 new TreeSet<Long>(this.outputfiles.headMap(
774 (Long.valueOf(oldestOutstandingSeqNum.longValue()))).keySet());
775
776 int logsToRemove = sequenceNumbers.size();
777 if (logsToRemove > 0) {
778 if (LOG.isDebugEnabled()) {
779
780 byte [] oldestRegion = getOldestRegion(oldestOutstandingSeqNum);
781 LOG.debug("Found " + logsToRemove + " hlogs to remove" +
782 " out of total " + this.outputfiles.size() + ";" +
783 " oldest outstanding sequenceid is " + oldestOutstandingSeqNum +
784 " from region " + Bytes.toStringBinary(oldestRegion));
785 }
786 for (Long seq : sequenceNumbers) {
787 archiveLogFile(this.outputfiles.remove(seq), seq);
788 }
789 }
790
791
792
793 byte [][] regions = null;
794 int logCount = this.outputfiles == null? 0: this.outputfiles.size();
795 if (logCount > this.maxLogs && logCount > 0) {
796
797 regions = findMemstoresWithEditsEqualOrOlderThan(this.outputfiles.firstKey(),
798 this.lastSeqWritten);
799 if (regions != null) {
800 StringBuilder sb = new StringBuilder();
801 for (int i = 0; i < regions.length; i++) {
802 if (i > 0) sb.append(", ");
803 sb.append(Bytes.toStringBinary(regions[i]));
804 }
805 LOG.info("Too many hlogs: logs=" + logCount + ", maxlogs=" +
806 this.maxLogs + "; forcing flush of " + regions.length + " regions(s): " +
807 sb.toString());
808 }
809 }
810 return regions;
811 }
812
813
814
815
816
817
818
819
820
821 static byte [][] findMemstoresWithEditsEqualOrOlderThan(final long oldestWALseqid,
822 final Map<byte [], Long> regionsToSeqids) {
823
824 List<byte []> regions = null;
825 for (Map.Entry<byte [], Long> e: regionsToSeqids.entrySet()) {
826 if (e.getValue().longValue() <= oldestWALseqid) {
827 if (regions == null) regions = new ArrayList<byte []>();
828
829 regions.add(e.getKey());
830 }
831 }
832 return regions == null?
833 null: regions.toArray(new byte [][] {HConstants.EMPTY_BYTE_ARRAY});
834 }
835
836
837
838
839 private Long getOldestOutstandingSeqNum() {
840 return Collections.min(this.lastSeqWritten.values());
841 }
842
843
844
845
846
847 private byte [] getOldestRegion(final Long oldestOutstandingSeqNum) {
848 byte [] oldestRegion = null;
849 for (Map.Entry<byte [], Long> e: this.lastSeqWritten.entrySet()) {
850 if (e.getValue().longValue() == oldestOutstandingSeqNum.longValue()) {
851
852 oldestRegion = e.getKey();
853 break;
854 }
855 }
856 return oldestRegion;
857 }
858
859
860
861
862
863
864
865 Path cleanupCurrentWriter(final long currentfilenum) throws IOException {
866 Path oldFile = null;
867 if (this.writer != null) {
868
869 try {
870
871
872 if (this.unflushedEntries.get() != this.syncedTillHere) {
873 LOG.debug("cleanupCurrentWriter " +
874 " waiting for transactions to get synced " +
875 " total " + this.unflushedEntries.get() +
876 " synced till here " + syncedTillHere);
877 sync();
878 }
879 this.writer.close();
880 this.writer = null;
881 closeErrorCount.set(0);
882 } catch (IOException e) {
883 LOG.error("Failed close of HLog writer", e);
884 int errors = closeErrorCount.incrementAndGet();
885 if (errors <= closeErrorsTolerated && !hasDeferredEntries()) {
886 LOG.warn("Riding over HLog close failure! error count="+errors);
887 } else {
888 if (hasDeferredEntries()) {
889 LOG.error("Aborting due to unflushed edits in HLog");
890 }
891
892
893
894 FailedLogCloseException flce =
895 new FailedLogCloseException("#" + currentfilenum);
896 flce.initCause(e);
897 throw flce;
898 }
899 }
900 if (currentfilenum >= 0) {
901 oldFile = computeFilename(currentfilenum);
902 this.outputfiles.put(Long.valueOf(this.logSeqNum.get()), oldFile);
903 }
904 }
905 return oldFile;
906 }
907
908 private void archiveLogFile(final Path p, final Long seqno) throws IOException {
909 Path newPath = getHLogArchivePath(this.oldLogDir, p);
910 LOG.info("moving old hlog file " + FSUtils.getPath(p) +
911 " whose highest sequenceid is " + seqno + " to " +
912 FSUtils.getPath(newPath));
913
914
915 if (!this.listeners.isEmpty()) {
916 for (WALActionsListener i : this.listeners) {
917 i.preLogArchive(p, newPath);
918 }
919 }
920 if (!this.fs.rename(p, newPath)) {
921 throw new IOException("Unable to rename " + p + " to " + newPath);
922 }
923
924 if (!this.listeners.isEmpty()) {
925 for (WALActionsListener i : this.listeners) {
926 i.postLogArchive(p, newPath);
927 }
928 }
929 }
930
931
932
933
934
935
936 protected Path computeFilename() {
937 return computeFilename(this.filenum);
938 }
939
940
941
942
943
944
945
946 protected Path computeFilename(long filenum) {
947 if (filenum < 0) {
948 throw new RuntimeException("hlog file number can't be < 0");
949 }
950 return new Path(dir, prefix + "." + filenum);
951 }
952
953
954
955
956
957
958 public void closeAndDelete() throws IOException {
959 close();
960 if (!fs.exists(this.dir)) return;
961 FileStatus[] files = fs.listStatus(this.dir);
962 for(FileStatus file : files) {
963
964 Path p = getHLogArchivePath(this.oldLogDir, file.getPath());
965
966 if (!this.listeners.isEmpty()) {
967 for (WALActionsListener i : this.listeners) {
968 i.preLogArchive(file.getPath(), p);
969 }
970 }
971
972 if (!fs.rename(file.getPath(),p)) {
973 throw new IOException("Unable to rename " + file.getPath() + " to " + p);
974 }
975
976 if (!this.listeners.isEmpty()) {
977 for (WALActionsListener i : this.listeners) {
978 i.postLogArchive(file.getPath(), p);
979 }
980 }
981 }
982 LOG.debug("Moved " + files.length + " log files to " +
983 FSUtils.getPath(this.oldLogDir));
984 if (!fs.delete(dir, true)) {
985 LOG.info("Unable to delete " + dir);
986 }
987 }
988
989
990
991
992
993
994 public void close() throws IOException {
995
996 if (this.optionalFlushInterval > 0) {
997 try {
998 logSyncer.close();
999
1000 logSyncer.join(this.optionalFlushInterval * 2);
1001 } catch (InterruptedException e) {
1002 LOG.error("Exception while waiting for syncer thread to die", e);
1003 }
1004 }
1005
1006 cacheFlushLock.lock();
1007 try {
1008
1009 if (!this.listeners.isEmpty()) {
1010 for (WALActionsListener i : this.listeners) {
1011 i.logCloseRequested();
1012 }
1013 }
1014 synchronized (updateLock) {
1015 this.closed = true;
1016 if (LOG.isDebugEnabled()) {
1017 LOG.debug("closing hlog writer in " + this.dir.toString());
1018 }
1019 if (this.writer != null) {
1020 this.writer.close();
1021 }
1022 }
1023 } finally {
1024 cacheFlushLock.unlock();
1025 }
1026 }
1027
1028
1029
1030
1031
1032
1033
1034
1035 protected HLogKey makeKey(byte[] regionName, byte[] tableName, long seqnum,
1036 long now, UUID clusterId) {
1037 return new HLogKey(regionName, tableName, seqnum, now, clusterId);
1038 }
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050 public long append(HRegionInfo regionInfo, HLogKey logKey, WALEdit logEdit,
1051 HTableDescriptor htd, boolean doSync)
1052 throws IOException {
1053 if (this.closed) {
1054 throw new IOException("Cannot append; log is closed");
1055 }
1056 long txid = 0;
1057 synchronized (updateLock) {
1058 long seqNum = obtainSeqNum();
1059 logKey.setLogSeqNum(seqNum);
1060
1061
1062
1063
1064
1065 this.lastSeqWritten.putIfAbsent(regionInfo.getEncodedNameAsBytes(),
1066 Long.valueOf(seqNum));
1067 doWrite(regionInfo, logKey, logEdit, htd);
1068 txid = this.unflushedEntries.incrementAndGet();
1069 this.numEntries.incrementAndGet();
1070 if (htd.isDeferredLogFlush()) {
1071 lastDeferredTxid = txid;
1072 }
1073 }
1074
1075
1076
1077 if (doSync &&
1078 (regionInfo.isMetaRegion() ||
1079 !htd.isDeferredLogFlush())) {
1080
1081 this.sync(txid);
1082 }
1083 return txid;
1084 }
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096 public void append(HRegionInfo info, byte [] tableName, WALEdit edits,
1097 final long now, HTableDescriptor htd)
1098 throws IOException {
1099 append(info, tableName, edits, HConstants.DEFAULT_CLUSTER_ID, now, htd);
1100 }
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128 private long append(HRegionInfo info, byte [] tableName, WALEdit edits, UUID clusterId,
1129 final long now, HTableDescriptor htd, boolean doSync)
1130 throws IOException {
1131 if (edits.isEmpty()) return this.unflushedEntries.get();;
1132 if (this.closed) {
1133 throw new IOException("Cannot append; log is closed");
1134 }
1135 long txid = 0;
1136 synchronized (this.updateLock) {
1137 long seqNum = obtainSeqNum();
1138
1139
1140
1141
1142
1143
1144
1145 byte [] encodedRegionName = info.getEncodedNameAsBytes();
1146 this.lastSeqWritten.putIfAbsent(encodedRegionName, seqNum);
1147 HLogKey logKey = makeKey(encodedRegionName, tableName, seqNum, now, clusterId);
1148 doWrite(info, logKey, edits, htd);
1149 this.numEntries.incrementAndGet();
1150 txid = this.unflushedEntries.incrementAndGet();
1151 if (htd.isDeferredLogFlush()) {
1152 lastDeferredTxid = txid;
1153 }
1154 }
1155
1156
1157 if (doSync &&
1158 (info.isMetaRegion() ||
1159 !htd.isDeferredLogFlush())) {
1160
1161 this.sync(txid);
1162 }
1163 return txid;
1164 }
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179 public long appendNoSync(HRegionInfo info, byte [] tableName, WALEdit edits,
1180 UUID clusterId, final long now, HTableDescriptor htd)
1181 throws IOException {
1182 return append(info, tableName, edits, clusterId, now, htd, false);
1183 }
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198 public long append(HRegionInfo info, byte [] tableName, WALEdit edits,
1199 UUID clusterId, final long now, HTableDescriptor htd)
1200 throws IOException {
1201 return append(info, tableName, edits, clusterId, now, htd, true);
1202 }
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212 class LogSyncer extends HasThread {
1213
1214 private final long optionalFlushInterval;
1215
1216 private AtomicBoolean closeLogSyncer = new AtomicBoolean(false);
1217
1218
1219
1220
1221
1222
1223
1224 private List<Entry> pendingWrites = new LinkedList<Entry>();
1225
1226 LogSyncer(long optionalFlushInterval) {
1227 this.optionalFlushInterval = optionalFlushInterval;
1228 }
1229
1230 @Override
1231 public void run() {
1232 try {
1233
1234
1235 while(!this.isInterrupted() && !closeLogSyncer.get()) {
1236
1237 try {
1238 if (unflushedEntries.get() <= syncedTillHere) {
1239 synchronized (closeLogSyncer) {
1240 closeLogSyncer.wait(this.optionalFlushInterval);
1241 }
1242 }
1243
1244
1245
1246 sync();
1247 } catch (IOException e) {
1248 LOG.error("Error while syncing, requesting close of hlog ", e);
1249 requestLogRoll();
1250 }
1251 }
1252 } catch (InterruptedException e) {
1253 LOG.debug(getName() + " interrupted while waiting for sync requests");
1254 } finally {
1255 LOG.info(getName() + " exiting");
1256 }
1257 }
1258
1259
1260
1261
1262 synchronized void append(Entry e) throws IOException {
1263 pendingWrites.add(e);
1264 }
1265
1266
1267
1268 synchronized List<Entry> getPendingWrites() {
1269 List<Entry> save = this.pendingWrites;
1270 this.pendingWrites = new LinkedList<Entry>();
1271 return save;
1272 }
1273
1274
1275 void hlogFlush(Writer writer, List<Entry> pending) throws IOException {
1276 if (pending == null) return;
1277
1278
1279 for (Entry e : pending) {
1280 writer.append(e);
1281 }
1282 }
1283
1284 void close() {
1285 synchronized (closeLogSyncer) {
1286 closeLogSyncer.set(true);
1287 closeLogSyncer.notifyAll();
1288 }
1289 }
1290 }
1291
1292
1293 private void syncer() throws IOException {
1294 syncer(this.unflushedEntries.get());
1295 }
1296
1297
1298 private void syncer(long txid) throws IOException {
1299 Writer tempWriter;
1300 synchronized (this.updateLock) {
1301 if (this.closed) return;
1302 tempWriter = this.writer;
1303 }
1304
1305
1306 if (txid <= this.syncedTillHere) {
1307 return;
1308 }
1309 try {
1310 long doneUpto;
1311 long now = System.currentTimeMillis();
1312
1313
1314
1315
1316 IOException ioe = null;
1317 List<Entry> pending = null;
1318 synchronized (flushLock) {
1319 if (txid <= this.syncedTillHere) {
1320 return;
1321 }
1322 doneUpto = this.unflushedEntries.get();
1323 pending = logSyncer.getPendingWrites();
1324 try {
1325 logSyncer.hlogFlush(tempWriter, pending);
1326 } catch(IOException io) {
1327 ioe = io;
1328 LOG.error("syncer encountered error, will retry. txid=" + txid, ioe);
1329 }
1330 }
1331 if (ioe != null && pending != null) {
1332 synchronized (this.updateLock) {
1333 synchronized (flushLock) {
1334
1335 tempWriter = this.writer;
1336 logSyncer.hlogFlush(tempWriter, pending);
1337 }
1338 }
1339 }
1340
1341 if (txid <= this.syncedTillHere) {
1342 return;
1343 }
1344 try {
1345 tempWriter.sync();
1346 } catch (IOException io) {
1347 synchronized (this.updateLock) {
1348
1349 tempWriter = this.writer;
1350 tempWriter.sync();
1351 }
1352 }
1353 this.syncedTillHere = Math.max(this.syncedTillHere, doneUpto);
1354
1355 syncTime.inc(System.currentTimeMillis() - now);
1356 if (!this.logRollRunning) {
1357 checkLowReplication();
1358 try {
1359 if (tempWriter.getLength() > this.logrollsize) {
1360 requestLogRoll();
1361 }
1362 } catch (IOException x) {
1363 LOG.debug("Log roll failed and will be retried. (This is not an error)");
1364 }
1365 }
1366 } catch (IOException e) {
1367 LOG.fatal("Could not sync. Requesting close of hlog", e);
1368 requestLogRoll();
1369 throw e;
1370 }
1371 }
1372
1373 private void checkLowReplication() {
1374
1375
1376 try {
1377 int numCurrentReplicas = getLogReplication();
1378 if (numCurrentReplicas != 0
1379 && numCurrentReplicas < this.minTolerableReplication) {
1380 if (this.lowReplicationRollEnabled) {
1381 if (this.consecutiveLogRolls < this.lowReplicationRollLimit) {
1382 LOG.warn("HDFS pipeline error detected. " + "Found "
1383 + numCurrentReplicas + " replicas but expecting no less than "
1384 + this.minTolerableReplication + " replicas. "
1385 + " Requesting close of hlog.");
1386 requestLogRoll();
1387
1388
1389
1390 this.consecutiveLogRolls++;
1391 } else {
1392 LOG.warn("Too many consecutive RollWriter requests, it's a sign of "
1393 + "the total number of live datanodes is lower than the tolerable replicas.");
1394 this.consecutiveLogRolls = 0;
1395 this.lowReplicationRollEnabled = false;
1396 }
1397 }
1398 } else if (numCurrentReplicas >= this.minTolerableReplication) {
1399
1400 if (!this.lowReplicationRollEnabled) {
1401
1402
1403
1404 if (this.numEntries.get() <= 1) {
1405 return;
1406 }
1407
1408
1409 this.lowReplicationRollEnabled = true;
1410 LOG.info("LowReplication-Roller was enabled.");
1411 }
1412 }
1413 } catch (Exception e) {
1414 LOG.warn("Unable to invoke DFSOutputStream.getNumCurrentReplicas" + e +
1415 " still proceeding ahead...");
1416 }
1417 }
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431 int getLogReplication()
1432 throws IllegalArgumentException, IllegalAccessException, InvocationTargetException {
1433 if (this.getNumCurrentReplicas != null && this.hdfs_out != null) {
1434 Object repl = this.getNumCurrentReplicas.invoke(getOutputStream(), NO_ARGS);
1435 if (repl instanceof Integer) {
1436 return ((Integer)repl).intValue();
1437 }
1438 }
1439 return 0;
1440 }
1441
1442 boolean canGetCurReplicas() {
1443 return this.getNumCurrentReplicas != null;
1444 }
1445
1446 public void hsync() throws IOException {
1447 syncer();
1448 }
1449
1450 public void hflush() throws IOException {
1451 syncer();
1452 }
1453
1454 public void sync() throws IOException {
1455 syncer();
1456 }
1457
1458 public void sync(long txid) throws IOException {
1459 syncer(txid);
1460 }
1461
1462 private void requestLogRoll() {
1463 if (!this.listeners.isEmpty()) {
1464 for (WALActionsListener i: this.listeners) {
1465 i.logRollRequested();
1466 }
1467 }
1468 }
1469
1470 protected void doWrite(HRegionInfo info, HLogKey logKey, WALEdit logEdit,
1471 HTableDescriptor htd)
1472 throws IOException {
1473 if (!this.enabled) {
1474 return;
1475 }
1476 if (!this.listeners.isEmpty()) {
1477 for (WALActionsListener i: this.listeners) {
1478 i.visitLogEntryBeforeWrite(htd, logKey, logEdit);
1479 }
1480 }
1481 try {
1482 long now = System.currentTimeMillis();
1483
1484 if (!coprocessorHost.preWALWrite(info, logKey, logEdit)) {
1485
1486 logSyncer.append(new HLog.Entry(logKey, logEdit));
1487 }
1488 long took = System.currentTimeMillis() - now;
1489 coprocessorHost.postWALWrite(info, logKey, logEdit);
1490 writeTime.inc(took);
1491 long len = 0;
1492 for (KeyValue kv : logEdit.getKeyValues()) {
1493 len += kv.getLength();
1494 }
1495 writeSize.inc(len);
1496 if (took > 1000) {
1497 LOG.warn(String.format(
1498 "%s took %d ms appending an edit to hlog; editcount=%d, len~=%s",
1499 Thread.currentThread().getName(), took, this.numEntries.get(),
1500 StringUtils.humanReadableInt(len)));
1501 slowHLogAppendCount.incrementAndGet();
1502 slowHLogAppendTime.inc(took);
1503 }
1504 } catch (IOException e) {
1505 LOG.fatal("Could not append. Requesting close of hlog", e);
1506 requestLogRoll();
1507 throw e;
1508 }
1509 }
1510
1511
1512
1513 int getNumEntries() {
1514 return numEntries.get();
1515 }
1516
1517
1518
1519
1520 private long obtainSeqNum() {
1521 return this.logSeqNum.incrementAndGet();
1522 }
1523
1524
1525 int getNumLogFiles() {
1526 return outputfiles.size();
1527 }
1528
1529 private byte[] getSnapshotName(byte[] encodedRegionName) {
1530 byte snp[] = new byte[encodedRegionName.length + 3];
1531
1532
1533
1534 snp[0] = 's'; snp[1] = 'n'; snp[2] = 'p';
1535 for (int i = 0; i < encodedRegionName.length; i++) {
1536 snp[i+3] = encodedRegionName[i];
1537 }
1538 return snp;
1539 }
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568 public long startCacheFlush(final byte[] encodedRegionName) {
1569 this.cacheFlushLock.lock();
1570 Long seq = this.lastSeqWritten.remove(encodedRegionName);
1571
1572
1573
1574 if (seq != null) {
1575
1576
1577
1578
1579 Long oldseq =
1580 lastSeqWritten.put(getSnapshotName(encodedRegionName), seq);
1581 if (oldseq != null) {
1582 LOG.error("Logic Error Snapshot seq id from earlier flush still" +
1583 " present! for region " + Bytes.toString(encodedRegionName) +
1584 " overwritten oldseq=" + oldseq + "with new seq=" + seq);
1585 Runtime.getRuntime().halt(1);
1586 }
1587 }
1588 return obtainSeqNum();
1589 }
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602 public void completeCacheFlush(final byte [] encodedRegionName,
1603 final byte [] tableName, final long logSeqId, final boolean isMetaRegion)
1604 throws IOException {
1605 try {
1606 if (this.closed) {
1607 return;
1608 }
1609 long txid = 0;
1610 synchronized (updateLock) {
1611 long now = System.currentTimeMillis();
1612 WALEdit edit = completeCacheFlushLogEdit();
1613 HLogKey key = makeKey(encodedRegionName, tableName, logSeqId,
1614 System.currentTimeMillis(), HConstants.DEFAULT_CLUSTER_ID);
1615 logSyncer.append(new Entry(key, edit));
1616 txid = this.unflushedEntries.incrementAndGet();
1617 writeTime.inc(System.currentTimeMillis() - now);
1618 long len = 0;
1619 for (KeyValue kv : edit.getKeyValues()) {
1620 len += kv.getLength();
1621 }
1622 writeSize.inc(len);
1623 this.numEntries.incrementAndGet();
1624 }
1625
1626 this.sync(txid);
1627
1628 } finally {
1629
1630
1631
1632 this.lastSeqWritten.remove(getSnapshotName(encodedRegionName));
1633 this.cacheFlushLock.unlock();
1634 }
1635 }
1636
1637 private WALEdit completeCacheFlushLogEdit() {
1638 KeyValue kv = new KeyValue(METAROW, METAFAMILY, null,
1639 System.currentTimeMillis(), COMPLETE_CACHE_FLUSH);
1640 WALEdit e = new WALEdit();
1641 e.add(kv);
1642 return e;
1643 }
1644
1645
1646
1647
1648
1649
1650
1651 public void abortCacheFlush(byte[] encodedRegionName) {
1652 Long snapshot_seq =
1653 this.lastSeqWritten.remove(getSnapshotName(encodedRegionName));
1654 if (snapshot_seq != null) {
1655
1656
1657
1658
1659 Long current_memstore_earliest_seq =
1660 this.lastSeqWritten.put(encodedRegionName, snapshot_seq);
1661 if (current_memstore_earliest_seq != null &&
1662 (current_memstore_earliest_seq.longValue() <=
1663 snapshot_seq.longValue())) {
1664 LOG.error("Logic Error region " + Bytes.toString(encodedRegionName) +
1665 "acquired edits out of order current memstore seq=" +
1666 current_memstore_earliest_seq + " snapshot seq=" + snapshot_seq);
1667 Runtime.getRuntime().halt(1);
1668 }
1669 }
1670 this.cacheFlushLock.unlock();
1671 }
1672
1673
1674
1675
1676
1677 public static boolean isMetaFamily(byte [] family) {
1678 return Bytes.equals(METAFAMILY, family);
1679 }
1680
1681
1682
1683
1684
1685
1686 public boolean isLowReplicationRollEnabled() {
1687 return lowReplicationRollEnabled;
1688 }
1689
1690 @SuppressWarnings("unchecked")
1691 public static Class<? extends HLogKey> getKeyClass(Configuration conf) {
1692 return (Class<? extends HLogKey>)
1693 conf.getClass("hbase.regionserver.hlog.keyclass", HLogKey.class);
1694 }
1695
1696 public static HLogKey newKey(Configuration conf) throws IOException {
1697 Class<? extends HLogKey> keyClass = getKeyClass(conf);
1698 try {
1699 return keyClass.newInstance();
1700 } catch (InstantiationException e) {
1701 throw new IOException("cannot create hlog key");
1702 } catch (IllegalAccessException e) {
1703 throw new IOException("cannot create hlog key");
1704 }
1705 }
1706
1707
1708
1709
1710
1711 public static class Entry implements Writable {
1712 private WALEdit edit;
1713 private HLogKey key;
1714
1715 public Entry() {
1716 edit = new WALEdit();
1717 key = new HLogKey();
1718 }
1719
1720
1721
1722
1723
1724
1725 public Entry(HLogKey key, WALEdit edit) {
1726 super();
1727 this.key = key;
1728 this.edit = edit;
1729 }
1730
1731
1732
1733
1734 public WALEdit getEdit() {
1735 return edit;
1736 }
1737
1738
1739
1740
1741 public HLogKey getKey() {
1742 return key;
1743 }
1744
1745
1746
1747
1748
1749 public void setCompressionContext(CompressionContext compressionContext) {
1750 edit.setCompressionContext(compressionContext);
1751 key.setCompressionContext(compressionContext);
1752 }
1753
1754 @Override
1755 public String toString() {
1756 return this.key + "=" + this.edit;
1757 }
1758
1759 @Override
1760 public void write(DataOutput dataOutput) throws IOException {
1761 this.key.write(dataOutput);
1762 this.edit.write(dataOutput);
1763 }
1764
1765 @Override
1766 public void readFields(DataInput dataInput) throws IOException {
1767 this.key.readFields(dataInput);
1768 this.edit.readFields(dataInput);
1769 }
1770 }
1771
1772
1773
1774
1775
1776
1777
1778 public static String getHLogDirectoryName(final String serverName) {
1779 StringBuilder dirName = new StringBuilder(HConstants.HREGION_LOGDIR_NAME);
1780 dirName.append("/");
1781 dirName.append(serverName);
1782 return dirName.toString();
1783 }
1784
1785
1786
1787
1788
1789
1790 protected Path getDir() {
1791 return dir;
1792 }
1793
1794
1795
1796
1797
1798
1799 public static boolean validateHLogFilename(String filename) {
1800 return pattern.matcher(filename).matches();
1801 }
1802
1803 static Path getHLogArchivePath(Path oldLogDir, Path p) {
1804 return new Path(oldLogDir, p.getName());
1805 }
1806
1807 static String formatRecoveredEditsFileName(final long seqid) {
1808 return String.format("%019d", seqid);
1809 }
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819 public static NavigableSet<Path> getSplitEditFilesSorted(final FileSystem fs,
1820 final Path regiondir)
1821 throws IOException {
1822 NavigableSet<Path> filesSorted = new TreeSet<Path>();
1823 Path editsdir = getRegionDirRecoveredEditsDir(regiondir);
1824 if (!fs.exists(editsdir)) return filesSorted;
1825 FileStatus[] files = FSUtils.listStatus(fs, editsdir, new PathFilter() {
1826 @Override
1827 public boolean accept(Path p) {
1828 boolean result = false;
1829 try {
1830
1831
1832
1833
1834 Matcher m = EDITFILES_NAME_PATTERN.matcher(p.getName());
1835 result = fs.isFile(p) && m.matches();
1836
1837
1838 if (p.getName().endsWith(RECOVERED_LOG_TMPFILE_SUFFIX)) {
1839 result = false;
1840 }
1841 } catch (IOException e) {
1842 LOG.warn("Failed isFile check on " + p);
1843 }
1844 return result;
1845 }
1846 });
1847 if (files == null) return filesSorted;
1848 for (FileStatus status: files) {
1849 filesSorted.add(status.getPath());
1850 }
1851 return filesSorted;
1852 }
1853
1854
1855
1856
1857
1858
1859
1860
1861 public static Path moveAsideBadEditsFile(final FileSystem fs,
1862 final Path edits)
1863 throws IOException {
1864 Path moveAsideName = new Path(edits.getParent(), edits.getName() + "." +
1865 System.currentTimeMillis());
1866 if (!fs.rename(edits, moveAsideName)) {
1867 LOG.warn("Rename failed from " + edits + " to " + moveAsideName);
1868 }
1869 return moveAsideName;
1870 }
1871
1872
1873
1874
1875
1876
1877 public static Path getRegionDirRecoveredEditsDir(final Path regiondir) {
1878 return new Path(regiondir, RECOVERED_EDITS_DIR);
1879 }
1880
1881 public static final long FIXED_OVERHEAD = ClassSize.align(
1882 ClassSize.OBJECT + (5 * ClassSize.REFERENCE) +
1883 ClassSize.ATOMIC_INTEGER + Bytes.SIZEOF_INT + (3 * Bytes.SIZEOF_LONG));
1884
1885 private static void usage() {
1886 System.err.println("Usage: HLog <ARGS>");
1887 System.err.println("Arguments:");
1888 System.err.println(" --dump Dump textual representation of passed one or more files");
1889 System.err.println(" For example: HLog --dump hdfs://example.com:9000/hbase/.logs/MACHINE/LOGFILE");
1890 System.err.println(" --split Split the passed directory of WAL logs");
1891 System.err.println(" For example: HLog --split hdfs://example.com:9000/hbase/.logs/DIR");
1892 }
1893
1894 private static void split(final Configuration conf, final Path p)
1895 throws IOException {
1896 FileSystem fs = FileSystem.get(conf);
1897 if (!fs.exists(p)) {
1898 throw new FileNotFoundException(p.toString());
1899 }
1900 final Path baseDir = new Path(conf.get(HConstants.HBASE_DIR));
1901 final Path oldLogDir = new Path(baseDir, HConstants.HREGION_OLDLOGDIR_NAME);
1902 if (!fs.getFileStatus(p).isDir()) {
1903 throw new IOException(p + " is not a directory");
1904 }
1905
1906 HLogSplitter logSplitter = HLogSplitter.createLogSplitter(
1907 conf, baseDir, p, oldLogDir, fs);
1908 logSplitter.splitLog();
1909 }
1910
1911
1912
1913
1914 public WALCoprocessorHost getCoprocessorHost() {
1915 return coprocessorHost;
1916 }
1917
1918
1919 boolean hasDeferredEntries() {
1920 return lastDeferredTxid > syncedTillHere;
1921 }
1922
1923
1924
1925
1926
1927
1928
1929
1930 public static void main(String[] args) throws IOException {
1931 if (args.length < 2) {
1932 usage();
1933 System.exit(-1);
1934 }
1935
1936 if (args[0].compareTo("--dump") == 0) {
1937 HLogPrettyPrinter.run(Arrays.copyOfRange(args, 1, args.length));
1938 } else if (args[0].compareTo("--split") == 0) {
1939 Configuration conf = HBaseConfiguration.create();
1940 for (int i = 1; i < args.length; i++) {
1941 try {
1942 conf.set("fs.default.name", args[i]);
1943 conf.set("fs.defaultFS", args[i]);
1944 Path logPath = new Path(args[i]);
1945 split(conf, logPath);
1946 } catch (Throwable t) {
1947 t.printStackTrace(System.err);
1948 System.exit(-1);
1949 }
1950 }
1951 } else {
1952 usage();
1953 System.exit(-1);
1954 }
1955 }
1956 }