1   /**
2    * Copyright 2007 The Apache Software Foundation
3    *
4    * Licensed to the Apache Software Foundation (ASF) under one
5    * or more contributor license agreements.  See the NOTICE file
6    * distributed with this work for additional information
7    * regarding copyright ownership.  The ASF licenses this file
8    * to you under the Apache License, Version 2.0 (the
9    * "License"); you may not use this file except in compliance
10   * with the License.  You may obtain a copy of the License at
11   *
12   *     http://www.apache.org/licenses/LICENSE-2.0
13   *
14   * Unless required by applicable law or agreed to in writing, software
15   * distributed under the License is distributed on an "AS IS" BASIS,
16   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17   * See the License for the specific language governing permissions and
18   * limitations under the License.
19   */
20  package org.apache.hadoop.hbase.regionserver.wal;
21  
22  import static org.junit.Assert.assertEquals;
23  import static org.junit.Assert.assertTrue;
24  
25  import java.io.IOException;
26  import java.lang.reflect.Method;
27  import java.util.HashMap;
28  import java.util.List;
29  import java.util.Map;
30  
31  import org.apache.commons.logging.Log;
32  import org.apache.commons.logging.LogFactory;
33  import org.apache.commons.logging.impl.Log4JLogger;
34  import org.apache.hadoop.conf.Configuration;
35  import org.apache.hadoop.fs.FSDataInputStream;
36  import org.apache.hadoop.fs.FSDataOutputStream;
37  import org.apache.hadoop.fs.FileStatus;
38  import org.apache.hadoop.fs.FileSystem;
39  import org.apache.hadoop.fs.Path;
40  import org.apache.hadoop.hbase.HBaseTestingUtility;
41  import org.apache.hadoop.hbase.HConstants;
42  import org.apache.hadoop.hbase.HRegionInfo;
43  import org.apache.hadoop.hbase.HTableDescriptor;
44  import org.apache.hadoop.hbase.KeyValue;
45  import org.apache.hadoop.hbase.regionserver.wal.HLog.Reader;
46  import org.apache.hadoop.hbase.util.Bytes;
47  import org.apache.hadoop.hbase.util.FSUtils;
48  import org.apache.hadoop.hdfs.DFSClient;
49  import org.apache.hadoop.hdfs.MiniDFSCluster;
50  import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction;
51  import org.apache.hadoop.hdfs.server.datanode.DataNode;
52  import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
53  import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
54  import org.apache.hadoop.io.SequenceFile;
55  import org.apache.log4j.Level;
56  import org.junit.After;
57  import org.junit.Before;
58  import org.junit.BeforeClass;
59  import org.junit.Test;
60  
61  /** JUnit test case for HLog */
62  public class TestHLog  {
63    private static final Log LOG = LogFactory.getLog(TestHLog.class);
64    {
65      ((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
66      ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
67      ((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
68      ((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
69      ((Log4JLogger)HLog.LOG).getLogger().setLevel(Level.ALL);
70    }
71  
72    private static Configuration conf;
73    private static FileSystem fs;
74    private static Path dir;
75    private static MiniDFSCluster cluster;
76    private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
77    private static Path hbaseDir;
78    private static Path oldLogDir;
79  
80    @Before
81    public void setUp() throws Exception {
82  
83      FileStatus[] entries = fs.listStatus(new Path("/"));
84      for (FileStatus dir : entries) {
85        fs.delete(dir.getPath(), true);
86      }
87  
88    }
89  
90    @After
91    public void tearDown() throws Exception {
92    }
93    @BeforeClass
94    public static void setUpBeforeClass() throws Exception {
95      // Make block sizes small.
96      TEST_UTIL.getConfiguration().setInt("dfs.blocksize", 1024 * 1024);
97      TEST_UTIL.getConfiguration().setInt(
98          "hbase.regionserver.flushlogentries", 1);
99      // needed for testAppendClose()
100     TEST_UTIL.getConfiguration().setBoolean("dfs.support.append", true);
101     // quicker heartbeat interval for faster DN death notification
102     TEST_UTIL.getConfiguration().setInt("heartbeat.recheck.interval", 5000);
103     TEST_UTIL.getConfiguration().setInt("dfs.heartbeat.interval", 1);
104     TEST_UTIL.getConfiguration().setInt("dfs.socket.timeout", 5000);
105     // faster failover with cluster.shutdown();fs.close() idiom
106     TEST_UTIL.getConfiguration()
107         .setInt("ipc.client.connect.max.retries", 1);
108     TEST_UTIL.getConfiguration().setInt(
109         "dfs.client.block.recovery.retries", 1);
110     TEST_UTIL.startMiniCluster(3);
111 
112     conf = TEST_UTIL.getConfiguration();
113     cluster = TEST_UTIL.getDFSCluster();
114     fs = cluster.getFileSystem();
115 
116     hbaseDir = new Path(TEST_UTIL.getConfiguration().get("hbase.rootdir"));
117     oldLogDir = new Path(hbaseDir, ".oldlogs");
118     dir = new Path(hbaseDir, getName());
119   }
120   private static String getName() {
121     // TODO Auto-generated method stub
122     return "TestHLog";
123   }
124 
125   /**
126    * Just write multiple logs then split.  Before fix for HADOOP-2283, this
127    * would fail.
128    * @throws IOException
129    */
130   @Test
131   public void testSplit() throws IOException {
132 
133     final byte [] tableName = Bytes.toBytes(getName());
134     final byte [] rowName = tableName;
135     Path logdir = new Path(hbaseDir, HConstants.HREGION_LOGDIR_NAME);
136     HLog log = new HLog(fs, logdir, oldLogDir, conf);
137     final int howmany = 3;
138     HRegionInfo[] infos = new HRegionInfo[3];
139     Path tabledir = new Path(hbaseDir, getName());
140     fs.mkdirs(tabledir);
141     for(int i = 0; i < howmany; i++) {
142       infos[i] = new HRegionInfo(new HTableDescriptor(tableName),
143                 Bytes.toBytes("" + i), Bytes.toBytes("" + (i+1)), false);
144       fs.mkdirs(new Path(tabledir, infos[i].getEncodedName()));
145       LOG.info("allo " + new Path(tabledir, infos[i].getEncodedName()).toString());
146     }
147     // Add edits for three regions.
148     try {
149       for (int ii = 0; ii < howmany; ii++) {
150         for (int i = 0; i < howmany; i++) {
151 
152           for (int j = 0; j < howmany; j++) {
153             WALEdit edit = new WALEdit();
154             byte [] family = Bytes.toBytes("column");
155             byte [] qualifier = Bytes.toBytes(Integer.toString(j));
156             byte [] column = Bytes.toBytes("column:" + Integer.toString(j));
157             edit.add(new KeyValue(rowName, family, qualifier,
158                 System.currentTimeMillis(), column));
159             LOG.info("Region " + i + ": " + edit);
160             log.append(infos[i], tableName, edit,
161               System.currentTimeMillis());
162           }
163         }
164         log.rollWriter();
165       }
166       log.close();
167       HLogSplitter logSplitter = HLogSplitter.createLogSplitter(conf,
168           hbaseDir, logdir, this.oldLogDir, this.fs);
169       List<Path> splits =
170         logSplitter.splitLog();
171       verifySplits(splits, howmany);
172       log = null;
173     } finally {
174       if (log != null) {
175         log.closeAndDelete();
176       }
177     }
178   }
179 
180   /**
181    * Test new HDFS-265 sync.
182    * @throws Exception
183    */
184   @Test
185   public void Broken_testSync() throws Exception {
186     byte [] bytes = Bytes.toBytes(getName());
187     // First verify that using streams all works.
188     Path p = new Path(dir, getName() + ".fsdos");
189     FSDataOutputStream out = fs.create(p);
190     out.write(bytes);
191     out.sync();
192     FSDataInputStream in = fs.open(p);
193     assertTrue(in.available() > 0);
194     byte [] buffer = new byte [1024];
195     int read = in.read(buffer);
196     assertEquals(bytes.length, read);
197     out.close();
198     in.close();
199     Path subdir = new Path(dir, "hlogdir");
200     HLog wal = new HLog(fs, subdir, oldLogDir, conf);
201     final int total = 20;
202 
203     HRegionInfo info = new HRegionInfo(new HTableDescriptor(bytes),
204                 null,null, false);
205 
206     for (int i = 0; i < total; i++) {
207       WALEdit kvs = new WALEdit();
208       kvs.add(new KeyValue(Bytes.toBytes(i), bytes, bytes));
209       wal.append(info, bytes, kvs, System.currentTimeMillis());
210     }
211     // Now call sync and try reading.  Opening a Reader before you sync just
212     // gives you EOFE.
213     wal.sync();
214     // Open a Reader.
215     Path walPath = wal.computeFilename();
216     HLog.Reader reader = HLog.getReader(fs, walPath, conf);
217     int count = 0;
218     HLog.Entry entry = new HLog.Entry();
219     while ((entry = reader.next(entry)) != null) count++;
220     assertEquals(total, count);
221     reader.close();
222     // Add test that checks to see that an open of a Reader works on a file
223     // that has had a sync done on it.
224     for (int i = 0; i < total; i++) {
225       WALEdit kvs = new WALEdit();
226       kvs.add(new KeyValue(Bytes.toBytes(i), bytes, bytes));
227       wal.append(info, bytes, kvs, System.currentTimeMillis());
228     }
229     reader = HLog.getReader(fs, walPath, conf);
230     count = 0;
231     while((entry = reader.next(entry)) != null) count++;
232     assertTrue(count >= total);
233     reader.close();
234     // If I sync, should see double the edits.
235     wal.sync();
236     reader = HLog.getReader(fs, walPath, conf);
237     count = 0;
238     while((entry = reader.next(entry)) != null) count++;
239     assertEquals(total * 2, count);
240     // Now do a test that ensures stuff works when we go over block boundary,
241     // especially that we return good length on file.
242     final byte [] value = new byte[1025 * 1024];  // Make a 1M value.
243     for (int i = 0; i < total; i++) {
244       WALEdit kvs = new WALEdit();
245       kvs.add(new KeyValue(Bytes.toBytes(i), bytes, value));
246       wal.append(info, bytes, kvs, System.currentTimeMillis());
247     }
248     // Now I should have written out lots of blocks.  Sync then read.
249     wal.sync();
250     reader = HLog.getReader(fs, walPath, conf);
251     count = 0;
252     while((entry = reader.next(entry)) != null) count++;
253     assertEquals(total * 3, count);
254     reader.close();
255     // Close it and ensure that closed, Reader gets right length also.
256     wal.close();
257     reader = HLog.getReader(fs, walPath, conf);
258     count = 0;
259     while((entry = reader.next(entry)) != null) count++;
260     assertEquals(total * 3, count);
261     reader.close();
262   }
263 
264   /**
265    * Test the findMemstoresWithEditsEqualOrOlderThan method.
266    * @throws IOException
267    */
268   @Test
269   public void testFindMemstoresWithEditsEqualOrOlderThan() throws IOException {
270     Map<byte [], Long> regionsToSeqids = new HashMap<byte [], Long>();
271     for (int i = 0; i < 10; i++) {
272       Long l = Long.valueOf(i);
273       regionsToSeqids.put(l.toString().getBytes(), l);
274     }
275     byte [][] regions =
276       HLog.findMemstoresWithEditsEqualOrOlderThan(1, regionsToSeqids);
277     assertEquals(2, regions.length);
278     assertTrue(Bytes.equals(regions[0], "0".getBytes()) ||
279         Bytes.equals(regions[0], "1".getBytes()));
280     regions = HLog.findMemstoresWithEditsEqualOrOlderThan(3, regionsToSeqids);
281     int count = 4;
282     assertEquals(count, regions.length);
283     // Regions returned are not ordered.
284     for (int i = 0; i < count; i++) {
285       assertTrue(Bytes.equals(regions[i], "0".getBytes()) ||
286         Bytes.equals(regions[i], "1".getBytes()) ||
287         Bytes.equals(regions[i], "2".getBytes()) ||
288         Bytes.equals(regions[i], "3".getBytes()));
289     }
290   }
291 
292   private void verifySplits(List<Path> splits, final int howmany)
293   throws IOException {
294     assertEquals(howmany, splits.size());
295     for (int i = 0; i < splits.size(); i++) {
296       LOG.info("Verifying=" + splits.get(i));
297       HLog.Reader reader = HLog.getReader(fs, splits.get(i), conf);
298       try {
299         int count = 0;
300         String previousRegion = null;
301         long seqno = -1;
302         HLog.Entry entry = new HLog.Entry();
303         while((entry = reader.next(entry)) != null) {
304           HLogKey key = entry.getKey();
305           String region = Bytes.toString(key.getEncodedRegionName());
306           // Assert that all edits are for same region.
307           if (previousRegion != null) {
308             assertEquals(previousRegion, region);
309           }
310           LOG.info("oldseqno=" + seqno + ", newseqno=" + key.getLogSeqNum());
311           assertTrue(seqno < key.getLogSeqNum());
312           seqno = key.getLogSeqNum();
313           previousRegion = region;
314           count++;
315         }
316         assertEquals(howmany * howmany, count);
317       } finally {
318         reader.close();
319       }
320     }
321   }
322   
323   // For this test to pass, requires:
324   // 1. HDFS-200 (append support)
325   // 2. HDFS-988 (SafeMode should freeze file operations
326   //              [FSNamesystem.nextGenerationStampForBlock])
327   // 3. HDFS-142 (on restart, maintain pendingCreates)
328   @Test
329   public void testAppendClose() throws Exception {
330     byte [] tableName = Bytes.toBytes(getName());
331     HRegionInfo regioninfo = new HRegionInfo(new HTableDescriptor(tableName),
332         HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, false);
333     Path subdir = new Path(dir, "hlogdir");
334     Path archdir = new Path(dir, "hlogdir_archive");
335     HLog wal = new HLog(fs, subdir, archdir, conf);
336     final int total = 20;
337 
338     for (int i = 0; i < total; i++) {
339       WALEdit kvs = new WALEdit();
340       kvs.add(new KeyValue(Bytes.toBytes(i), tableName, tableName));
341       wal.append(regioninfo, tableName, kvs, System.currentTimeMillis());
342     }
343     // Now call sync to send the data to HDFS datanodes
344     wal.sync();
345      int namenodePort = cluster.getNameNodePort();
346     final Path walPath = wal.computeFilename();
347     
348 
349     // Stop the cluster.  (ensure restart since we're sharing MiniDFSCluster)
350     try {
351       cluster.getNameNode().setSafeMode(SafeModeAction.SAFEMODE_ENTER);
352       cluster.shutdown();
353       try {
354         // wal.writer.close() will throw an exception,
355         // but still call this since it closes the LogSyncer thread first
356         wal.close();
357       } catch (IOException e) {
358         LOG.info(e);
359       }
360       fs.close(); // closing FS last so DFSOutputStream can't call close
361       LOG.info("STOPPED first instance of the cluster");
362     } finally {
363       // Restart the cluster
364       while (cluster.isClusterUp()){
365         LOG.error("Waiting for cluster to go down");
366         Thread.sleep(1000);
367       }
368       cluster = new MiniDFSCluster(namenodePort, conf, 5, false, true, true, null, null, null, null);
369       cluster.waitActive();
370       fs = cluster.getFileSystem();
371       LOG.info("START second instance.");
372     }
373 
374     // set the lease period to be 1 second so that the
375     // namenode triggers lease recovery upon append request
376     Method setLeasePeriod = cluster.getClass()
377       .getDeclaredMethod("setLeasePeriod", new Class[]{Long.TYPE, Long.TYPE});
378     setLeasePeriod.setAccessible(true);
379     setLeasePeriod.invoke(cluster,
380                           new Object[]{new Long(1000), new Long(1000)});
381     try {
382       Thread.sleep(1000);
383     } catch (InterruptedException e) {
384       LOG.info(e);
385     }
386     
387     // Now try recovering the log, like the HMaster would do
388     final FileSystem recoveredFs = fs;
389     final Configuration rlConf = conf;
390     
391     class RecoverLogThread extends Thread {
392       public Exception exception = null;
393       public void run() {
394           try {
395             FSUtils.recoverFileLease(recoveredFs, walPath, rlConf);
396           } catch (IOException e) {
397             exception = e;
398           }
399       }
400     }
401 
402     RecoverLogThread t = new RecoverLogThread();
403     t.start();
404     // Timeout after 60 sec. Without correct patches, would be an infinite loop
405     t.join(60 * 1000);
406     if(t.isAlive()) {
407       t.interrupt();
408       throw new Exception("Timed out waiting for HLog.recoverLog()");
409     }
410 
411     if (t.exception != null)
412       throw t.exception;
413 
414     // Make sure you can read all the content
415     SequenceFile.Reader reader
416       = new SequenceFile.Reader(this.fs, walPath, this.conf);
417     int count = 0;
418     HLogKey key = HLog.newKey(conf);
419     WALEdit val = new WALEdit();
420     while (reader.next(key, val)) {
421       count++;
422       assertTrue("Should be one KeyValue per WALEdit",
423                  val.getKeyValues().size() == 1);
424     }
425     assertEquals(total, count);
426     reader.close();
427   }
428 
429   /**
430    * Tests that we can write out an edit, close, and then read it back in again.
431    * @throws IOException
432    */
433   @Test
434   public void testEditAdd() throws IOException {
435     final int COL_COUNT = 10;
436     final byte [] tableName = Bytes.toBytes("tablename");
437     final byte [] row = Bytes.toBytes("row");
438     HLog.Reader reader = null;
439     HLog log = new HLog(fs, dir, oldLogDir, conf);
440     try {
441       // Write columns named 1, 2, 3, etc. and then values of single byte
442       // 1, 2, 3...
443       long timestamp = System.currentTimeMillis();
444       WALEdit cols = new WALEdit();
445       for (int i = 0; i < COL_COUNT; i++) {
446         cols.add(new KeyValue(row, Bytes.toBytes("column"),
447             Bytes.toBytes(Integer.toString(i)),
448           timestamp, new byte[] { (byte)(i + '0') }));
449       }
450       HRegionInfo info = new HRegionInfo(new HTableDescriptor(tableName),
451         row,Bytes.toBytes(Bytes.toString(row) + "1"), false);
452       log.append(info, tableName, cols, System.currentTimeMillis());
453       long logSeqId = log.startCacheFlush();
454       log.completeCacheFlush(info.getEncodedNameAsBytes(), tableName, logSeqId, info.isMetaRegion());
455       log.close();
456       Path filename = log.computeFilename();
457       log = null;
458       // Now open a reader on the log and assert append worked.
459       reader = HLog.getReader(fs, filename, conf);
460       // Above we added all columns on a single row so we only read one
461       // entry in the below... thats why we have '1'.
462       for (int i = 0; i < 1; i++) {
463         HLog.Entry entry = reader.next(null);
464         if (entry == null) break;
465         HLogKey key = entry.getKey();
466         WALEdit val = entry.getEdit();
467         assertTrue(Bytes.equals(info.getEncodedNameAsBytes(), key.getEncodedRegionName()));
468         assertTrue(Bytes.equals(tableName, key.getTablename()));
469         KeyValue kv = val.getKeyValues().get(0);
470         assertTrue(Bytes.equals(row, kv.getRow()));
471         assertEquals((byte)(i + '0'), kv.getValue()[0]);
472         System.out.println(key + " " + val);
473       }
474       HLog.Entry entry = null;
475       while ((entry = reader.next(null)) != null) {
476         HLogKey key = entry.getKey();
477         WALEdit val = entry.getEdit();
478         // Assert only one more row... the meta flushed row.
479         assertTrue(Bytes.equals(info.getEncodedNameAsBytes(), key.getEncodedRegionName()));
480         assertTrue(Bytes.equals(tableName, key.getTablename()));
481         KeyValue kv = val.getKeyValues().get(0);
482         assertTrue(Bytes.equals(HLog.METAROW, kv.getRow()));
483         assertTrue(Bytes.equals(HLog.METAFAMILY, kv.getFamily()));
484         assertEquals(0, Bytes.compareTo(HLog.COMPLETE_CACHE_FLUSH,
485           val.getKeyValues().get(0).getValue()));
486         System.out.println(key + " " + val);
487       }
488     } finally {
489       if (log != null) {
490         log.closeAndDelete();
491       }
492       if (reader != null) {
493         reader.close();
494       }
495     }
496   }
497 
498   /**
499    * @throws IOException
500    */
501   @Test
502   public void testAppend() throws IOException {
503     final int COL_COUNT = 10;
504     final byte [] tableName = Bytes.toBytes("tablename");
505     final byte [] row = Bytes.toBytes("row");
506     Reader reader = null;
507     HLog log = new HLog(fs, dir, oldLogDir, conf);
508     try {
509       // Write columns named 1, 2, 3, etc. and then values of single byte
510       // 1, 2, 3...
511       long timestamp = System.currentTimeMillis();
512       WALEdit cols = new WALEdit();
513       for (int i = 0; i < COL_COUNT; i++) {
514         cols.add(new KeyValue(row, Bytes.toBytes("column"),
515           Bytes.toBytes(Integer.toString(i)),
516           timestamp, new byte[] { (byte)(i + '0') }));
517       }
518       HRegionInfo hri = new HRegionInfo(new HTableDescriptor(tableName),
519           HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
520       log.append(hri, tableName, cols, System.currentTimeMillis());
521       long logSeqId = log.startCacheFlush();
522       log.completeCacheFlush(hri.getEncodedNameAsBytes(), tableName, logSeqId, false);
523       log.close();
524       Path filename = log.computeFilename();
525       log = null;
526       // Now open a reader on the log and assert append worked.
527       reader = HLog.getReader(fs, filename, conf);
528       HLog.Entry entry = reader.next();
529       assertEquals(COL_COUNT, entry.getEdit().size());
530       int idx = 0;
531       for (KeyValue val : entry.getEdit().getKeyValues()) {
532         assertTrue(Bytes.equals(hri.getEncodedNameAsBytes(),
533           entry.getKey().getEncodedRegionName()));
534         assertTrue(Bytes.equals(tableName, entry.getKey().getTablename()));
535         assertTrue(Bytes.equals(row, val.getRow()));
536         assertEquals((byte)(idx + '0'), val.getValue()[0]);
537         System.out.println(entry.getKey() + " " + val);
538         idx++;
539       }
540 
541       // Get next row... the meta flushed row.
542       entry = reader.next();
543       assertEquals(1, entry.getEdit().size());
544       for (KeyValue val : entry.getEdit().getKeyValues()) {
545         assertTrue(Bytes.equals(hri.getEncodedNameAsBytes(),
546           entry.getKey().getEncodedRegionName()));
547         assertTrue(Bytes.equals(tableName, entry.getKey().getTablename()));
548         assertTrue(Bytes.equals(HLog.METAROW, val.getRow()));
549         assertTrue(Bytes.equals(HLog.METAFAMILY, val.getFamily()));
550         assertEquals(0, Bytes.compareTo(HLog.COMPLETE_CACHE_FLUSH,
551           val.getValue()));
552         System.out.println(entry.getKey() + " " + val);
553       }
554     } finally {
555       if (log != null) {
556         log.closeAndDelete();
557       }
558       if (reader != null) {
559         reader.close();
560       }
561     }
562   }
563 
564   /**
565    * Test that we can visit entries before they are appended
566    * @throws Exception
567    */
568   @Test
569   public void testVisitors() throws Exception {
570     final int COL_COUNT = 10;
571     final byte [] tableName = Bytes.toBytes("tablename");
572     final byte [] row = Bytes.toBytes("row");
573     HLog log = new HLog(fs, dir, oldLogDir, conf);
574     DumbWALObserver visitor = new DumbWALObserver();
575     log.registerWALActionsListener(visitor);
576     long timestamp = System.currentTimeMillis();
577     HRegionInfo hri = new HRegionInfo(new HTableDescriptor(tableName),
578         HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
579     for (int i = 0; i < COL_COUNT; i++) {
580       WALEdit cols = new WALEdit();
581       cols.add(new KeyValue(row, Bytes.toBytes("column"),
582           Bytes.toBytes(Integer.toString(i)),
583           timestamp, new byte[]{(byte) (i + '0')}));
584       log.append(hri, tableName, cols, System.currentTimeMillis());
585     }
586     assertEquals(COL_COUNT, visitor.increments);
587     log.unregisterWALActionsListener(visitor);
588     WALEdit cols = new WALEdit();
589     cols.add(new KeyValue(row, Bytes.toBytes("column"),
590         Bytes.toBytes(Integer.toString(11)),
591         timestamp, new byte[]{(byte) (11 + '0')}));
592     log.append(hri, tableName, cols, System.currentTimeMillis());
593     assertEquals(COL_COUNT, visitor.increments);
594   }
595 
596   @Test
597   public void testLogCleaning() throws Exception {
598     LOG.info("testLogCleaning");
599     final byte [] tableName = Bytes.toBytes("testLogCleaning");
600     final byte [] tableName2 = Bytes.toBytes("testLogCleaning2");
601 
602     HLog log = new HLog(fs, dir, oldLogDir, conf);
603     HRegionInfo hri = new HRegionInfo(new HTableDescriptor(tableName),
604         HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
605     HRegionInfo hri2 = new HRegionInfo(new HTableDescriptor(tableName2),
606         HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
607 
608     // Add a single edit and make sure that rolling won't remove the file
609     // Before HBASE-3198 it used to delete it
610     addEdits(log, hri, tableName, 1);
611     log.rollWriter();
612     assertEquals(1, log.getNumLogFiles());
613 
614     // See if there's anything wrong with more than 1 edit
615     addEdits(log, hri, tableName, 2);
616     log.rollWriter();
617     assertEquals(2, log.getNumLogFiles());
618 
619     // Now mix edits from 2 regions, still no flushing
620     addEdits(log, hri, tableName, 1);
621     addEdits(log, hri2, tableName2, 1);
622     addEdits(log, hri, tableName, 1);
623     addEdits(log, hri2, tableName2, 1);
624     log.rollWriter();
625     assertEquals(3, log.getNumLogFiles());
626 
627     // Flush the first region, we expect to see the first two files getting
628     // archived
629     long seqId = log.startCacheFlush();
630     log.completeCacheFlush(hri.getEncodedNameAsBytes(), tableName, seqId, false);
631     log.rollWriter();
632     assertEquals(2, log.getNumLogFiles());
633 
634     // Flush the second region, which removes all the remaining output files
635     // since the oldest was completely flushed and the two others only contain
636     // flush information
637     seqId = log.startCacheFlush();
638     log.completeCacheFlush(hri2.getEncodedNameAsBytes(), tableName2, seqId, false);
639     log.rollWriter();
640     assertEquals(0, log.getNumLogFiles());
641   }
642 
643   private void addEdits(HLog log, HRegionInfo hri, byte [] tableName,
644                         int times) throws IOException {
645     final byte [] row = Bytes.toBytes("row");
646     for (int i = 0; i < times; i++) {
647       long timestamp = System.currentTimeMillis();
648       WALEdit cols = new WALEdit();
649       cols.add(new KeyValue(row, row, row, timestamp, row));
650       log.append(hri, tableName, cols, timestamp);
651     }
652   }
653 
654   static class DumbWALObserver implements WALObserver {
655     int increments = 0;
656 
657     @Override
658     public void visitLogEntryBeforeWrite(HRegionInfo info, HLogKey logKey,
659                                          WALEdit logEdit) {
660       increments++;
661     }
662 
663     @Override
664     public void logRolled(Path newFile) {
665       // TODO Auto-generated method stub
666       
667     }
668 
669     @Override
670     public void logRollRequested() {
671       // TODO Auto-generated method stub
672       
673     }
674 
675     @Override
676     public void logCloseRequested() {
677       // not interested
678     }
679   }
680 }