1   /**
2    * Copyright 2007 The Apache Software Foundation
3    *
4    * Licensed to the Apache Software Foundation (ASF) under one
5    * or more contributor license agreements.  See the NOTICE file
6    * distributed with this work for additional information
7    * regarding copyright ownership.  The ASF licenses this file
8    * to you under the Apache License, Version 2.0 (the
9    * "License"); you may not use this file except in compliance
10   * with the License.  You may obtain a copy of the License at
11   *
12   *     http://www.apache.org/licenses/LICENSE-2.0
13   *
14   * Unless required by applicable law or agreed to in writing, software
15   * distributed under the License is distributed on an "AS IS" BASIS,
16   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17   * See the License for the specific language governing permissions and
18   * limitations under the License.
19   */
20  package org.apache.hadoop.hbase.regionserver;
21  
22  import java.io.IOException;
23  import java.nio.ByteBuffer;
24  import java.util.ArrayList;
25  import java.util.Arrays;
26  import java.util.Collections;
27  import java.util.Comparator;
28  import java.util.List;
29  import java.util.Map;
30  import java.util.TreeSet;
31  import java.util.regex.Pattern;
32  
33  import org.apache.commons.logging.Log;
34  import org.apache.commons.logging.LogFactory;
35  import org.apache.hadoop.conf.Configuration;
36  import org.apache.hadoop.fs.FileStatus;
37  import org.apache.hadoop.fs.FileSystem;
38  import org.apache.hadoop.fs.Path;
39  import org.apache.hadoop.hbase.HBaseTestCase;
40  import org.apache.hadoop.hbase.HRegionInfo;
41  import org.apache.hadoop.hbase.KeyValue;
42  import org.apache.hadoop.hbase.SmallTests;
43  import org.apache.hadoop.hbase.client.Scan;
44  import org.apache.hadoop.hbase.io.HFileLink;
45  import org.apache.hadoop.hbase.io.HalfStoreFileReader;
46  import org.apache.hadoop.hbase.io.Reference;
47  import org.apache.hadoop.hbase.io.Reference.Range;
48  import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
49  import org.apache.hadoop.hbase.io.hfile.BlockCache;
50  import org.apache.hadoop.hbase.io.hfile.CacheConfig;
51  import org.apache.hadoop.hbase.io.hfile.CacheStats;
52  import org.apache.hadoop.hbase.io.hfile.HFile;
53  import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder;
54  import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoderImpl;
55  import org.apache.hadoop.hbase.io.hfile.HFileScanner;
56  import org.apache.hadoop.hbase.io.hfile.NoOpDataBlockEncoder;
57  import org.apache.hadoop.hbase.regionserver.StoreFile.BloomType;
58  import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
59  import org.apache.hadoop.hbase.util.BloomFilterFactory;
60  import org.apache.hadoop.hbase.util.Bytes;
61  import org.apache.hadoop.hbase.util.ChecksumType;
62  import org.apache.hadoop.hbase.util.FSUtils;
63  import org.junit.experimental.categories.Category;
64  import org.mockito.Mockito;
65  
66  import com.google.common.base.Joiner;
67  import com.google.common.collect.Iterables;
68  import com.google.common.collect.Lists;
69  
70  /**
71   * Test HStoreFile
72   */
73  @Category(SmallTests.class)
74  public class TestStoreFile extends HBaseTestCase {
75    static final Log LOG = LogFactory.getLog(TestStoreFile.class);
76    private CacheConfig cacheConf =  new CacheConfig(conf);
77    private String ROOT_DIR;
78    private Map<String, Long> startingMetrics;
79  
80    private static final ChecksumType CKTYPE = ChecksumType.CRC32;
81    private static final int CKBYTES = 512;
82  
83    @Override
84    public void setUp() throws Exception {
85      super.setUp();
86      startingMetrics = SchemaMetrics.getMetricsSnapshot();
87      ROOT_DIR = new Path(this.testDir, "TestStoreFile").toString();
88    }
89  
90    @Override
91    public void tearDown() throws Exception {
92      super.tearDown();
93      SchemaMetrics.validateMetricChanges(startingMetrics);
94    }
95  
96    /**
97     * Write a file and then assert that we can read from top and bottom halves
98     * using two HalfMapFiles.
99     * @throws Exception
100    */
101   public void testBasicHalfMapFile() throws Exception {
102     // Make up a directory hierarchy that has a regiondir ("7e0102") and familyname.
103     Path outputDir = new Path(new Path(this.testDir, "7e0102"),
104         "familyname");
105     StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf,
106         this.fs, 2 * 1024)
107             .withOutputDir(outputDir)
108             .build();
109     writeStoreFile(writer);
110     checkHalfHFile(new StoreFile(this.fs, writer.getPath(), conf, cacheConf,
111         StoreFile.BloomType.NONE, NoOpDataBlockEncoder.INSTANCE));
112   }
113 
114   private void writeStoreFile(final StoreFile.Writer writer) throws IOException {
115     writeStoreFile(writer, Bytes.toBytes(getName()), Bytes.toBytes(getName()));
116   }
117 
118   // pick an split point (roughly halfway)
119   byte[] SPLITKEY = new byte[] { (LAST_CHAR-FIRST_CHAR)/2, FIRST_CHAR};
120 
121   /*
122    * Writes HStoreKey and ImmutableBytes data to passed writer and
123    * then closes it.
124    * @param writer
125    * @throws IOException
126    */
127   public static void writeStoreFile(final StoreFile.Writer writer, byte[] fam, byte[] qualifier)
128   throws IOException {
129     long now = System.currentTimeMillis();
130     try {
131       for (char d = FIRST_CHAR; d <= LAST_CHAR; d++) {
132         for (char e = FIRST_CHAR; e <= LAST_CHAR; e++) {
133           byte[] b = new byte[] { (byte) d, (byte) e };
134           writer.append(new KeyValue(b, fam, qualifier, now, b));
135         }
136       }
137     } finally {
138       writer.close();
139     }
140   }
141 
142   /**
143    * Test that our mechanism of writing store files in one region to reference
144    * store files in other regions works.
145    * @throws IOException
146    */
147   public void testReference()
148   throws IOException {
149     // Make up a directory hierarchy that has a regiondir ("7e0102") and familyname.
150     Path storedir = new Path(new Path(this.testDir, "7e0102"), "familyname");
151     // Make a store file and write data to it.
152     StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf,
153         this.fs, 8 * 1024)
154             .withOutputDir(storedir)
155             .build();
156     writeStoreFile(writer);
157     StoreFile hsf = new StoreFile(this.fs, writer.getPath(), conf, cacheConf,
158         StoreFile.BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
159     StoreFile.Reader reader = hsf.createReader();
160     // Split on a row, not in middle of row.  Midkey returned by reader
161     // may be in middle of row.  Create new one with empty column and
162     // timestamp.
163     KeyValue kv = KeyValue.createKeyValueFromKey(reader.midkey());
164     byte [] midRow = kv.getRow();
165     kv = KeyValue.createKeyValueFromKey(reader.getLastKey());
166     byte [] finalRow = kv.getRow();
167     // Make a reference
168     Path refPath = StoreFile.split(fs, storedir, hsf, midRow, Range.top);
169     StoreFile refHsf = new StoreFile(this.fs, refPath, conf, cacheConf,
170         StoreFile.BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
171     // Now confirm that I can read from the reference and that it only gets
172     // keys from top half of the file.
173     HFileScanner s = refHsf.createReader().getScanner(false, false);
174     for(boolean first = true; (!s.isSeeked() && s.seekTo()) || s.next();) {
175       ByteBuffer bb = s.getKey();
176       kv = KeyValue.createKeyValueFromKey(bb);
177       if (first) {
178         assertTrue(Bytes.equals(kv.getRow(), midRow));
179         first = false;
180       }
181     }
182     assertTrue(Bytes.equals(kv.getRow(), finalRow));
183   }
184 
185   public void testHFileLink() throws IOException {
186     final String columnFamily = "f";
187 
188     Configuration testConf = new Configuration(this.conf);
189     FSUtils.setRootDir(testConf, this.testDir);
190 
191     HRegionInfo hri = new HRegionInfo(Bytes.toBytes("table-link"));
192     Path storedir = new Path(new Path(this.testDir,
193       new Path(hri.getTableNameAsString(), hri.getEncodedName())), columnFamily);
194 
195     // Make a store file and write data to it.
196     StoreFile.Writer writer = new StoreFile.WriterBuilder(testConf, cacheConf,
197          this.fs, 8 * 1024)
198             .withOutputDir(storedir)
199             .build();
200     Path storeFilePath = writer.getPath();
201     writeStoreFile(writer);
202     writer.close();
203 
204     Path dstPath = new Path(this.testDir, new Path("test-region", columnFamily));
205     HFileLink.create(testConf, this.fs, dstPath, hri, storeFilePath.getName());
206     Path linkFilePath = new Path(dstPath,
207                   HFileLink.createHFileLinkName(hri, storeFilePath.getName()));
208 
209     // Try to open store file from link
210     StoreFile hsf = new StoreFile(this.fs, linkFilePath, testConf, cacheConf,
211         StoreFile.BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
212     assertTrue(hsf.isLink());
213 
214     // Now confirm that I can read from the link
215     int count = 1;
216     HFileScanner s = hsf.createReader().getScanner(false, false);
217     s.seekTo();
218     while (s.next()) {
219       count++;
220     }
221     assertEquals((LAST_CHAR - FIRST_CHAR + 1) * (LAST_CHAR - FIRST_CHAR + 1), count);
222   }
223 
224   /**
225    * Validate that we can handle valid tables with '.', '_', and '-' chars.
226    */
227   public void testStoreFileNames() {
228     String[] legalHFileLink = { "MyTable_02=abc012-def345", "MyTable_02.300=abc012-def345",
229       "MyTable_02-400=abc012-def345", "MyTable_02-400.200=abc012-def345",
230       "MyTable_02=abc012-def345_SeqId_1_", "MyTable_02=abc012-def345_SeqId_20_" };
231     for (String name: legalHFileLink) {
232       assertTrue("should be a valid link: " + name, HFileLink.isHFileLink(name));
233       assertTrue("should be a valid StoreFile" + name, StoreFile.validateStoreFileName(name));
234       assertFalse("should not be a valid reference: " + name, StoreFile.isReference(name));
235 
236       String refName = name + ".6789";
237       assertTrue("should be a valid link reference: " + refName, StoreFile.isReference(refName));
238       assertTrue("should be a valid StoreFile" + refName, StoreFile.validateStoreFileName(refName));
239     }
240 
241     String[] illegalHFileLink = { ".MyTable_02=abc012-def345", "-MyTable_02.300=abc012-def345",
242       "MyTable_02-400=abc0_12-def345", "MyTable_02-400.200=abc012-def345...." };
243     for (String name: illegalHFileLink) {
244       assertFalse("should not be a valid link: " + name, HFileLink.isHFileLink(name));
245     }
246   }
247 
248   /**
249    * This test creates an hfile and then the dir structures and files to verify that references
250    * to hfilelinks (created by snapshot clones) can be properly interpreted.
251    */
252   public void testReferenceToHFileLink() throws IOException {
253     final String columnFamily = "f";
254 
255     Path rootDir = FSUtils.getRootDir(conf);
256 
257     String tablename = "_original-evil-name"; // adding legal table name chars to verify regex handles it.
258     HRegionInfo hri = new HRegionInfo(Bytes.toBytes(tablename));
259     // store dir = <root>/<tablename>/<rgn>/<cf>
260     Path storedir = new Path(new Path(rootDir,
261       new Path(hri.getTableNameAsString(), hri.getEncodedName())), columnFamily);
262 
263     // Make a store file and write data to it. <root>/<tablename>/<rgn>/<cf>/<file>
264     StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf,
265          this.fs, 8 * 1024)
266             .withOutputDir(storedir)
267             .build();
268     Path storeFilePath = writer.getPath();
269     writeStoreFile(writer);
270     writer.close();
271 
272     // create link to store file. <root>/clone/region/<cf>/<hfile>-<region>-<table>
273     String target = "clone";
274     Path dstPath = new Path(rootDir, new Path(new Path(target, "7e0102"), columnFamily));
275     HFileLink.create(conf, this.fs, dstPath, hri, storeFilePath.getName());
276     Path linkFilePath = new Path(dstPath,
277                   HFileLink.createHFileLinkName(hri, storeFilePath.getName()));
278 
279     // create splits of the link.
280     // <root>/clone/splitA/<cf>/<reftohfilelink>,
281     // <root>/clone/splitB/<cf>/<reftohfilelink>
282     Path splitDirA = new Path(new Path(rootDir,
283         new Path(target, "571A")), columnFamily);
284     Path splitDirB = new Path(new Path(rootDir,
285         new Path(target, "571B")), columnFamily);
286     StoreFile f = new StoreFile(fs, linkFilePath, conf, cacheConf, BloomType.NONE,
287         NoOpDataBlockEncoder.INSTANCE);
288     byte[] splitRow = SPLITKEY;
289     Path pathA = StoreFile.split(fs, splitDirA, f, splitRow, Range.top); // top
290     Path pathB = StoreFile.split(fs, splitDirB, f, splitRow, Range.bottom); // bottom
291 
292     // OK test the thing
293     FSUtils.logFileSystemState(fs, rootDir, LOG);
294 
295     // There is a case where a file with the hfilelink pattern is actually a daughter
296     // reference to a hfile link.  This code in StoreFile that handles this case.
297 
298     // Try to open store file from link
299     StoreFile hsfA = new StoreFile(this.fs, pathA,  conf, cacheConf,
300         StoreFile.BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
301 
302     // Now confirm that I can read from the ref to link
303     int count = 1;
304     HFileScanner s = hsfA.createReader().getScanner(false, false);
305     s.seekTo();
306     while (s.next()) {
307       count++;
308     }
309     assertTrue(count > 0); // read some rows here
310 
311     // Try to open store file from link
312     StoreFile hsfB = new StoreFile(this.fs, pathB,  conf, cacheConf,
313         StoreFile.BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
314 
315     // Now confirm that I can read from the ref to link
316     HFileScanner sB = hsfB.createReader().getScanner(false, false);
317     sB.seekTo();
318     while (sB.next()) {
319       count++;
320     }
321 
322     // read the rest of the rows
323     assertEquals((LAST_CHAR - FIRST_CHAR + 1) * (LAST_CHAR - FIRST_CHAR + 1), count);
324   }
325 
326   private void checkHalfHFile(final StoreFile f)
327   throws IOException {
328     byte [] midkey = f.createReader().midkey();
329     KeyValue midKV = KeyValue.createKeyValueFromKey(midkey);
330     byte [] midRow = midKV.getRow();
331     // Create top split.
332     Path topDir = Store.getStoreHomedir(this.testDir, "1",
333       Bytes.toBytes(f.getPath().getParent().getName()));
334     if (this.fs.exists(topDir)) {
335       this.fs.delete(topDir, true);
336     }
337     Path topPath = StoreFile.split(this.fs, topDir, f, midRow, Range.top);
338     // Create bottom split.
339     Path bottomDir = Store.getStoreHomedir(this.testDir, "2",
340       Bytes.toBytes(f.getPath().getParent().getName()));
341     if (this.fs.exists(bottomDir)) {
342       this.fs.delete(bottomDir, true);
343     }
344     Path bottomPath = StoreFile.split(this.fs, bottomDir,
345       f, midRow, Range.bottom);
346     // Make readers on top and bottom.
347     StoreFile.Reader top =
348         new StoreFile(this.fs, topPath, conf, cacheConf, BloomType.NONE,
349             NoOpDataBlockEncoder.INSTANCE).createReader();
350     StoreFile.Reader bottom = new StoreFile(this.fs, bottomPath,
351         conf, cacheConf, BloomType.NONE,
352         NoOpDataBlockEncoder.INSTANCE).createReader();
353     ByteBuffer previous = null;
354     LOG.info("Midkey: " + midKV.toString());
355     ByteBuffer bbMidkeyBytes = ByteBuffer.wrap(midkey);
356     try {
357       // Now make two HalfMapFiles and assert they can read the full backing
358       // file, one from the top and the other from the bottom.
359       // Test bottom half first.
360       // Now test reading from the top.
361       boolean first = true;
362       ByteBuffer key = null;
363       HFileScanner topScanner = top.getScanner(false, false);
364       while ((!topScanner.isSeeked() && topScanner.seekTo()) ||
365           (topScanner.isSeeked() && topScanner.next())) {
366         key = topScanner.getKey();
367 
368         if (topScanner.getReader().getComparator().compare(key.array(),
369           key.arrayOffset(), key.limit(), midkey, 0, midkey.length) < 0) {
370           fail("key=" + Bytes.toStringBinary(key) + " < midkey=" +
371               Bytes.toStringBinary(midkey));
372         }
373         if (first) {
374           first = false;
375           LOG.info("First in top: " + Bytes.toString(Bytes.toBytes(key)));
376         }
377       }
378       LOG.info("Last in top: " + Bytes.toString(Bytes.toBytes(key)));
379 
380       first = true;
381       HFileScanner bottomScanner = bottom.getScanner(false, false);
382       while ((!bottomScanner.isSeeked() && bottomScanner.seekTo()) ||
383           bottomScanner.next()) {
384         previous = bottomScanner.getKey();
385         key = bottomScanner.getKey();
386         if (first) {
387           first = false;
388           LOG.info("First in bottom: " +
389             Bytes.toString(Bytes.toBytes(previous)));
390         }
391         assertTrue(key.compareTo(bbMidkeyBytes) < 0);
392       }
393       if (previous != null) {
394         LOG.info("Last in bottom: " + Bytes.toString(Bytes.toBytes(previous)));
395       }
396       // Remove references.
397       this.fs.delete(topPath, false);
398       this.fs.delete(bottomPath, false);
399 
400       // Next test using a midkey that does not exist in the file.
401       // First, do a key that is < than first key. Ensure splits behave
402       // properly.
403       byte [] badmidkey = Bytes.toBytes("  .");
404       topPath = StoreFile.split(this.fs, topDir, f, badmidkey, Range.top);
405       bottomPath = StoreFile.split(this.fs, bottomDir, f, badmidkey,
406         Range.bottom);
407       top = new StoreFile(this.fs, topPath, conf, cacheConf,
408           StoreFile.BloomType.NONE,
409           NoOpDataBlockEncoder.INSTANCE).createReader();
410       bottom = new StoreFile(this.fs, bottomPath, conf, cacheConf,
411           StoreFile.BloomType.NONE,
412           NoOpDataBlockEncoder.INSTANCE).createReader();
413       bottomScanner = bottom.getScanner(false, false);
414       int count = 0;
415       while ((!bottomScanner.isSeeked() && bottomScanner.seekTo()) ||
416           bottomScanner.next()) {
417         count++;
418       }
419       // When badkey is < than the bottom, should return no values.
420       assertTrue(count == 0);
421       // Now read from the top.
422       first = true;
423       topScanner = top.getScanner(false, false);
424       while ((!topScanner.isSeeked() && topScanner.seekTo()) ||
425           topScanner.next()) {
426         key = topScanner.getKey();
427         assertTrue(topScanner.getReader().getComparator().compare(key.array(),
428           key.arrayOffset(), key.limit(), badmidkey, 0, badmidkey.length) >= 0);
429         if (first) {
430           first = false;
431           KeyValue keyKV = KeyValue.createKeyValueFromKey(key);
432           LOG.info("First top when key < bottom: " + keyKV);
433           String tmp = Bytes.toString(keyKV.getRow());
434           for (int i = 0; i < tmp.length(); i++) {
435             assertTrue(tmp.charAt(i) == 'a');
436           }
437         }
438       }
439       KeyValue keyKV = KeyValue.createKeyValueFromKey(key);
440       LOG.info("Last top when key < bottom: " + keyKV);
441       String tmp = Bytes.toString(keyKV.getRow());
442       for (int i = 0; i < tmp.length(); i++) {
443         assertTrue(tmp.charAt(i) == 'z');
444       }
445       // Remove references.
446       this.fs.delete(topPath, false);
447       this.fs.delete(bottomPath, false);
448 
449       // Test when badkey is > than last key in file ('||' > 'zz').
450       badmidkey = Bytes.toBytes("|||");
451       topPath = StoreFile.split(this.fs, topDir, f, badmidkey, Range.top);
452       bottomPath = StoreFile.split(this.fs, bottomDir, f, badmidkey,
453         Range.bottom);
454       top = new StoreFile(this.fs, topPath, conf, cacheConf,
455           StoreFile.BloomType.NONE,
456           NoOpDataBlockEncoder.INSTANCE).createReader();
457       bottom = new StoreFile(this.fs, bottomPath, conf, cacheConf,
458           StoreFile.BloomType.NONE,
459           NoOpDataBlockEncoder.INSTANCE).createReader();
460       first = true;
461       bottomScanner = bottom.getScanner(false, false);
462       while ((!bottomScanner.isSeeked() && bottomScanner.seekTo()) ||
463           bottomScanner.next()) {
464         key = bottomScanner.getKey();
465         if (first) {
466           first = false;
467           keyKV = KeyValue.createKeyValueFromKey(key);
468           LOG.info("First bottom when key > top: " + keyKV);
469           tmp = Bytes.toString(keyKV.getRow());
470           for (int i = 0; i < tmp.length(); i++) {
471             assertTrue(tmp.charAt(i) == 'a');
472           }
473         }
474       }
475       keyKV = KeyValue.createKeyValueFromKey(key);
476       LOG.info("Last bottom when key > top: " + keyKV);
477       for (int i = 0; i < tmp.length(); i++) {
478         assertTrue(Bytes.toString(keyKV.getRow()).charAt(i) == 'z');
479       }
480       count = 0;
481       topScanner = top.getScanner(false, false);
482       while ((!topScanner.isSeeked() && topScanner.seekTo()) ||
483           (topScanner.isSeeked() && topScanner.next())) {
484         count++;
485       }
486       // When badkey is < than the bottom, should return no values.
487       assertTrue(count == 0);
488     } finally {
489       if (top != null) {
490         top.close(true); // evict since we are about to delete the file
491       }
492       if (bottom != null) {
493         bottom.close(true); // evict since we are about to delete the file
494       }
495       fs.delete(f.getPath(), true);
496     }
497   }
498 
499   private static final String localFormatter = "%010d";
500 
501   private void bloomWriteRead(StoreFile.Writer writer, FileSystem fs)
502   throws Exception {
503     float err = conf.getFloat(
504         BloomFilterFactory.IO_STOREFILE_BLOOM_ERROR_RATE, 0);
505     Path f = writer.getPath();
506     long now = System.currentTimeMillis();
507     for (int i = 0; i < 2000; i += 2) {
508       String row = String.format(localFormatter, i);
509       KeyValue kv = new KeyValue(row.getBytes(), "family".getBytes(),
510         "col".getBytes(), now, "value".getBytes());
511       writer.append(kv);
512     }
513     writer.close();
514 
515     StoreFile.Reader reader = new StoreFile.Reader(fs, f, cacheConf,
516         DataBlockEncoding.NONE);
517     reader.loadFileInfo();
518     reader.loadBloomfilter();
519     StoreFileScanner scanner = reader.getStoreFileScanner(false, false);
520 
521     // check false positives rate
522     int falsePos = 0;
523     int falseNeg = 0;
524     for (int i = 0; i < 2000; i++) {
525       String row = String.format(localFormatter, i);
526       TreeSet<byte[]> columns = new TreeSet<byte[]>(Bytes.BYTES_COMPARATOR);
527       columns.add("family:col".getBytes());
528 
529       Scan scan = new Scan(row.getBytes(),row.getBytes());
530       scan.addColumn("family".getBytes(), "family:col".getBytes());
531       boolean exists = scanner.shouldUseScanner(scan, columns, Long.MIN_VALUE);
532       if (i % 2 == 0) {
533         if (!exists) falseNeg++;
534       } else {
535         if (exists) falsePos++;
536       }
537     }
538     reader.close(true); // evict because we are about to delete the file
539     fs.delete(f, true);
540     assertEquals("False negatives: " + falseNeg, 0, falseNeg);
541     int maxFalsePos = (int) (2 * 2000 * err);
542     assertTrue("Too many false positives: " + falsePos + " (err=" + err
543         + ", expected no more than " + maxFalsePos + ")",
544         falsePos <= maxFalsePos);
545   }
546 
547   public void testBloomFilter() throws Exception {
548     FileSystem fs = FileSystem.getLocal(conf);
549     conf.setFloat(BloomFilterFactory.IO_STOREFILE_BLOOM_ERROR_RATE,
550         (float) 0.01);
551     conf.setBoolean(BloomFilterFactory.IO_STOREFILE_BLOOM_ENABLED, true);
552 
553     // write the file
554     Path f = new Path(ROOT_DIR, getName());
555     StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, fs,
556         StoreFile.DEFAULT_BLOCKSIZE_SMALL)
557             .withFilePath(f)
558             .withBloomType(StoreFile.BloomType.ROW)
559             .withMaxKeyCount(2000)
560             .withChecksumType(CKTYPE)
561             .withBytesPerChecksum(CKBYTES)
562             .build();
563     bloomWriteRead(writer, fs);
564   }
565 
566   public void testDeleteFamilyBloomFilter() throws Exception {
567     FileSystem fs = FileSystem.getLocal(conf);
568     conf.setFloat(BloomFilterFactory.IO_STOREFILE_BLOOM_ERROR_RATE,
569         (float) 0.01);
570     conf.setBoolean(BloomFilterFactory.IO_STOREFILE_BLOOM_ENABLED, true);
571     float err = conf.getFloat(BloomFilterFactory.IO_STOREFILE_BLOOM_ERROR_RATE,
572         0);
573 
574     // write the file
575     Path f = new Path(ROOT_DIR, getName());
576 
577     StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf,
578         fs, StoreFile.DEFAULT_BLOCKSIZE_SMALL)
579             .withFilePath(f)
580             .withMaxKeyCount(2000)
581             .withChecksumType(CKTYPE)
582             .withBytesPerChecksum(CKBYTES)
583             .build();
584 
585     // add delete family
586     long now = System.currentTimeMillis();
587     for (int i = 0; i < 2000; i += 2) {
588       String row = String.format(localFormatter, i);
589       KeyValue kv = new KeyValue(row.getBytes(), "family".getBytes(),
590           "col".getBytes(), now, KeyValue.Type.DeleteFamily, "value".getBytes());
591       writer.append(kv);
592     }
593     writer.close();
594 
595     StoreFile.Reader reader = new StoreFile.Reader(fs, f, cacheConf,
596         DataBlockEncoding.NONE);
597     reader.loadFileInfo();
598     reader.loadBloomfilter();
599 
600     // check false positives rate
601     int falsePos = 0;
602     int falseNeg = 0;
603     for (int i = 0; i < 2000; i++) {
604       String row = String.format(localFormatter, i);
605       byte[] rowKey = Bytes.toBytes(row);
606       boolean exists = reader.passesDeleteFamilyBloomFilter(rowKey, 0,
607           rowKey.length);
608       if (i % 2 == 0) {
609         if (!exists)
610           falseNeg++;
611       } else {
612         if (exists)
613           falsePos++;
614       }
615     }
616     assertEquals(1000, reader.getDeleteFamilyCnt());
617     reader.close(true); // evict because we are about to delete the file
618     fs.delete(f, true);
619     assertEquals("False negatives: " + falseNeg, 0, falseNeg);
620     int maxFalsePos = (int) (2 * 2000 * err);
621     assertTrue("Too many false positives: " + falsePos + " (err=" + err
622         + ", expected no more than " + maxFalsePos, falsePos <= maxFalsePos);
623   }
624 
625   public void testBloomTypes() throws Exception {
626     float err = (float) 0.01;
627     FileSystem fs = FileSystem.getLocal(conf);
628     conf.setFloat(BloomFilterFactory.IO_STOREFILE_BLOOM_ERROR_RATE, err);
629     conf.setBoolean(BloomFilterFactory.IO_STOREFILE_BLOOM_ENABLED, true);
630 
631     int rowCount = 50;
632     int colCount = 10;
633     int versions = 2;
634 
635     // run once using columns and once using rows
636     StoreFile.BloomType[] bt =
637       {StoreFile.BloomType.ROWCOL, StoreFile.BloomType.ROW};
638     int[] expKeys    = {rowCount*colCount, rowCount};
639     // below line deserves commentary.  it is expected bloom false positives
640     //  column = rowCount*2*colCount inserts
641     //  row-level = only rowCount*2 inserts, but failures will be magnified by
642     //              2nd for loop for every column (2*colCount)
643     float[] expErr   = {2*rowCount*colCount*err, 2*rowCount*2*colCount*err};
644 
645     for (int x : new int[]{0,1}) {
646       // write the file
647       Path f = new Path(ROOT_DIR, getName() + x);
648       StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf,
649           fs, StoreFile.DEFAULT_BLOCKSIZE_SMALL)
650               .withFilePath(f)
651               .withBloomType(bt[x])
652               .withMaxKeyCount(expKeys[x])
653               .withChecksumType(CKTYPE)
654               .withBytesPerChecksum(CKBYTES)
655               .build();
656 
657       long now = System.currentTimeMillis();
658       for (int i = 0; i < rowCount*2; i += 2) { // rows
659         for (int j = 0; j < colCount*2; j += 2) {   // column qualifiers
660           String row = String.format(localFormatter, i);
661           String col = String.format(localFormatter, j);
662           for (int k= 0; k < versions; ++k) { // versions
663             KeyValue kv = new KeyValue(row.getBytes(),
664               "family".getBytes(), ("col" + col).getBytes(),
665                 now-k, Bytes.toBytes((long)-1));
666             writer.append(kv);
667           }
668         }
669       }
670       writer.close();
671 
672       StoreFile.Reader reader = new StoreFile.Reader(fs, f, cacheConf,
673           DataBlockEncoding.NONE);
674       reader.loadFileInfo();
675       reader.loadBloomfilter();
676       StoreFileScanner scanner = reader.getStoreFileScanner(false, false);
677       assertEquals(expKeys[x], reader.generalBloomFilter.getKeyCount());
678 
679       // check false positives rate
680       int falsePos = 0;
681       int falseNeg = 0;
682       for (int i = 0; i < rowCount*2; ++i) { // rows
683         for (int j = 0; j < colCount*2; ++j) {   // column qualifiers
684           String row = String.format(localFormatter, i);
685           String col = String.format(localFormatter, j);
686           TreeSet<byte[]> columns = new TreeSet<byte[]>(Bytes.BYTES_COMPARATOR);
687           columns.add(("col" + col).getBytes());
688 
689           Scan scan = new Scan(row.getBytes(),row.getBytes());
690           scan.addColumn("family".getBytes(), ("col"+col).getBytes());
691           boolean exists =
692               scanner.shouldUseScanner(scan, columns, Long.MIN_VALUE);
693           boolean shouldRowExist = i % 2 == 0;
694           boolean shouldColExist = j % 2 == 0;
695           shouldColExist = shouldColExist || bt[x] == StoreFile.BloomType.ROW;
696           if (shouldRowExist && shouldColExist) {
697             if (!exists) falseNeg++;
698           } else {
699             if (exists) falsePos++;
700           }
701         }
702       }
703       reader.close(true); // evict because we are about to delete the file
704       fs.delete(f, true);
705       System.out.println(bt[x].toString());
706       System.out.println("  False negatives: " + falseNeg);
707       System.out.println("  False positives: " + falsePos);
708       assertEquals(0, falseNeg);
709       assertTrue(falsePos < 2*expErr[x]);
710     }
711   }
712 
713   public void testBloomEdgeCases() throws Exception {
714     float err = (float)0.005;
715     FileSystem fs = FileSystem.getLocal(conf);
716     Path f = new Path(ROOT_DIR, getName());
717     conf.setFloat(BloomFilterFactory.IO_STOREFILE_BLOOM_ERROR_RATE, err);
718     conf.setBoolean(BloomFilterFactory.IO_STOREFILE_BLOOM_ENABLED, true);
719     conf.setInt(BloomFilterFactory.IO_STOREFILE_BLOOM_MAX_KEYS, 1000);
720 
721     // This test only runs for HFile format version 1.
722     conf.setInt(HFile.FORMAT_VERSION_KEY, 1);
723 
724     // this should not create a bloom because the max keys is too small
725     StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, fs,
726         StoreFile.DEFAULT_BLOCKSIZE_SMALL)
727             .withFilePath(f)
728             .withBloomType(StoreFile.BloomType.ROW)
729             .withMaxKeyCount(2000)
730             .withChecksumType(CKTYPE)
731             .withBytesPerChecksum(CKBYTES)
732             .build();
733     assertFalse(writer.hasGeneralBloom());
734     writer.close();
735     fs.delete(f, true);
736 
737     conf.setInt(BloomFilterFactory.IO_STOREFILE_BLOOM_MAX_KEYS,
738         Integer.MAX_VALUE);
739 
740     // TODO: commented out because we run out of java heap space on trunk
741     // the below config caused IllegalArgumentException in our production cluster
742     // however, the resulting byteSize is < MAX_INT, so this should work properly
743     writer = new StoreFile.WriterBuilder(conf, cacheConf, fs,
744         StoreFile.DEFAULT_BLOCKSIZE_SMALL)
745             .withFilePath(f)
746             .withBloomType(StoreFile.BloomType.ROW)
747             .withMaxKeyCount(27244696)
748             .build();
749     assertTrue(writer.hasGeneralBloom());
750     bloomWriteRead(writer, fs);
751 
752     // this, however, is too large and should not create a bloom
753     // because Java can't create a contiguous array > MAX_INT
754     writer = new StoreFile.WriterBuilder(conf, cacheConf, fs,
755         StoreFile.DEFAULT_BLOCKSIZE_SMALL)
756             .withFilePath(f)
757             .withBloomType(StoreFile.BloomType.ROW)
758             .withMaxKeyCount(Integer.MAX_VALUE)
759             .withChecksumType(CKTYPE)
760             .withBytesPerChecksum(CKBYTES)
761             .build();
762     assertFalse(writer.hasGeneralBloom());
763     writer.close();
764     fs.delete(f, true);
765   }
766 
767   public void testFlushTimeComparator() {
768     assertOrdering(StoreFile.Comparators.FLUSH_TIME,
769         mockStoreFile(true, 1000, -1, "/foo/123"),
770         mockStoreFile(true, 1000, -1, "/foo/126"),
771         mockStoreFile(true, 2000, -1, "/foo/126"),
772         mockStoreFile(false, -1, 1, "/foo/1"),
773         mockStoreFile(false, -1, 3, "/foo/2"),
774         mockStoreFile(false, -1, 5, "/foo/2"),
775         mockStoreFile(false, -1, 5, "/foo/3"));
776   }
777 
778   /**
779    * Assert that the given comparator orders the given storefiles in the
780    * same way that they're passed.
781    */
782   private void assertOrdering(Comparator<StoreFile> comparator, StoreFile ... sfs) {
783     ArrayList<StoreFile> sorted = Lists.newArrayList(sfs);
784     Collections.shuffle(sorted);
785     Collections.sort(sorted, comparator);
786     LOG.debug("sfs: " + Joiner.on(",").join(sfs));
787     LOG.debug("sorted: " + Joiner.on(",").join(sorted));
788     assertTrue(Iterables.elementsEqual(Arrays.asList(sfs), sorted));
789   }
790 
791   /**
792    * Create a mock StoreFile with the given attributes.
793    */
794   private StoreFile mockStoreFile(boolean bulkLoad, long bulkTimestamp,
795       long seqId, String path) {
796     StoreFile mock = Mockito.mock(StoreFile.class);
797     Mockito.doReturn(bulkLoad).when(mock).isBulkLoadResult();
798     Mockito.doReturn(bulkTimestamp).when(mock).getBulkLoadTimestamp();
799     if (bulkLoad) {
800       // Bulk load files will throw if you ask for their sequence ID
801       Mockito.doThrow(new IllegalAccessError("bulk load"))
802         .when(mock).getMaxSequenceId();
803     } else {
804       Mockito.doReturn(seqId).when(mock).getMaxSequenceId();
805     }
806     Mockito.doReturn(new Path(path)).when(mock).getPath();
807     String name = "mock storefile, bulkLoad=" + bulkLoad +
808       " bulkTimestamp=" + bulkTimestamp +
809       " seqId=" + seqId +
810       " path=" + path;
811     Mockito.doReturn(name).when(mock).toString();
812     return mock;
813   }
814 
815   /**
816    * Generate a list of KeyValues for testing based on given parameters
817    * @param timestamps
818    * @param numRows
819    * @param qualifier
820    * @param family
821    * @return
822    */
823   List<KeyValue> getKeyValueSet(long[] timestamps, int numRows,
824       byte[] qualifier, byte[] family) {
825     List<KeyValue> kvList = new ArrayList<KeyValue>();
826     for (int i=1;i<=numRows;i++) {
827       byte[] b = Bytes.toBytes(i) ;
828       LOG.info(Bytes.toString(b));
829       LOG.info(Bytes.toString(b));
830       for (long timestamp: timestamps)
831       {
832         kvList.add(new KeyValue(b, family, qualifier, timestamp, b));
833       }
834     }
835     return kvList;
836   }
837 
838   /**
839    * Test to ensure correctness when using StoreFile with multiple timestamps
840    * @throws IOException
841    */
842   public void testMultipleTimestamps() throws IOException {
843     byte[] family = Bytes.toBytes("familyname");
844     byte[] qualifier = Bytes.toBytes("qualifier");
845     int numRows = 10;
846     long[] timestamps = new long[] {20,10,5,1};
847     Scan scan = new Scan();
848 
849     // Make up a directory hierarchy that has a regiondir ("7e0102") and familyname.
850     Path storedir = new Path(new Path(this.testDir, "7e0102"), "familyname");
851     Path dir = new Path(storedir, "1234567890");
852     StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf,
853         this.fs, 8 * 1024)
854             .withOutputDir(dir)
855             .build();
856 
857     List<KeyValue> kvList = getKeyValueSet(timestamps,numRows,
858         family, qualifier);
859 
860     for (KeyValue kv : kvList) {
861       writer.append(kv);
862     }
863     writer.appendMetadata(0, false);
864     writer.close();
865 
866     StoreFile hsf = new StoreFile(this.fs, writer.getPath(), conf, cacheConf,
867         StoreFile.BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
868     StoreFile.Reader reader = hsf.createReader();
869     StoreFileScanner scanner = reader.getStoreFileScanner(false, false);
870     TreeSet<byte[]> columns = new TreeSet<byte[]>(Bytes.BYTES_COMPARATOR);
871     columns.add(qualifier);
872 
873     scan.setTimeRange(20, 100);
874     assertTrue(scanner.shouldUseScanner(scan, columns, Long.MIN_VALUE));
875 
876     scan.setTimeRange(1, 2);
877     assertTrue(scanner.shouldUseScanner(scan, columns, Long.MIN_VALUE));
878 
879     scan.setTimeRange(8, 10);
880     assertTrue(scanner.shouldUseScanner(scan, columns, Long.MIN_VALUE));
881 
882     scan.setTimeRange(7, 50);
883     assertTrue(scanner.shouldUseScanner(scan, columns, Long.MIN_VALUE));
884 
885     // This test relies on the timestamp range optimization
886     scan.setTimeRange(27, 50);
887     assertTrue(!scanner.shouldUseScanner(scan, columns, Long.MIN_VALUE));
888   }
889 
890   public void testCacheOnWriteEvictOnClose() throws Exception {
891     Configuration conf = this.conf;
892 
893     // Find a home for our files (regiondir ("7e0102") and familyname).
894     Path baseDir = new Path(new Path(this.testDir, "7e0102"),"twoCOWEOC");
895 
896     // Grab the block cache and get the initial hit/miss counts
897     BlockCache bc = new CacheConfig(conf).getBlockCache();
898     assertNotNull(bc);
899     CacheStats cs = bc.getStats();
900     long startHit = cs.getHitCount();
901     long startMiss = cs.getMissCount();
902     long startEvicted = cs.getEvictedCount();
903 
904     // Let's write a StoreFile with three blocks, with cache on write off
905     conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, false);
906     CacheConfig cacheConf = new CacheConfig(conf);
907     Path pathCowOff = new Path(baseDir, "123456789");
908     StoreFile.Writer writer = writeStoreFile(conf, cacheConf, pathCowOff, 3);
909     StoreFile hsf = new StoreFile(this.fs, writer.getPath(), conf, cacheConf,
910         StoreFile.BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
911     LOG.debug(hsf.getPath().toString());
912 
913     // Read this file, we should see 3 misses
914     StoreFile.Reader reader = hsf.createReader();
915     reader.loadFileInfo();
916     StoreFileScanner scanner = reader.getStoreFileScanner(true, true);
917     scanner.seek(KeyValue.LOWESTKEY);
918     while (scanner.next() != null);
919     assertEquals(startHit, cs.getHitCount());
920     assertEquals(startMiss + 3, cs.getMissCount());
921     assertEquals(startEvicted, cs.getEvictedCount());
922     startMiss += 3;
923     scanner.close();
924     reader.close(cacheConf.shouldEvictOnClose());
925 
926     // Now write a StoreFile with three blocks, with cache on write on
927     conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, true);
928     cacheConf = new CacheConfig(conf);
929     Path pathCowOn = new Path(baseDir, "123456788");
930     writer = writeStoreFile(conf, cacheConf, pathCowOn, 3);
931     hsf = new StoreFile(this.fs, writer.getPath(), conf, cacheConf,
932         StoreFile.BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
933 
934     // Read this file, we should see 3 hits
935     reader = hsf.createReader();
936     scanner = reader.getStoreFileScanner(true, true);
937     scanner.seek(KeyValue.LOWESTKEY);
938     while (scanner.next() != null);
939     assertEquals(startHit + 3, cs.getHitCount());
940     assertEquals(startMiss, cs.getMissCount());
941     assertEquals(startEvicted, cs.getEvictedCount());
942     startHit += 3;
943     scanner.close();
944     reader.close(cacheConf.shouldEvictOnClose());
945 
946     // Let's read back the two files to ensure the blocks exactly match
947     hsf = new StoreFile(this.fs, pathCowOff, conf, cacheConf,
948         StoreFile.BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
949     StoreFile.Reader readerOne = hsf.createReader();
950     readerOne.loadFileInfo();
951     StoreFileScanner scannerOne = readerOne.getStoreFileScanner(true, true);
952     scannerOne.seek(KeyValue.LOWESTKEY);
953     hsf = new StoreFile(this.fs, pathCowOn, conf, cacheConf,
954         StoreFile.BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
955     StoreFile.Reader readerTwo = hsf.createReader();
956     readerTwo.loadFileInfo();
957     StoreFileScanner scannerTwo = readerTwo.getStoreFileScanner(true, true);
958     scannerTwo.seek(KeyValue.LOWESTKEY);
959     KeyValue kv1 = null;
960     KeyValue kv2 = null;
961     while ((kv1 = scannerOne.next()) != null) {
962       kv2 = scannerTwo.next();
963       assertTrue(kv1.equals(kv2));
964       assertTrue(Bytes.compareTo(
965           kv1.getBuffer(), kv1.getKeyOffset(), kv1.getKeyLength(),
966           kv2.getBuffer(), kv2.getKeyOffset(), kv2.getKeyLength()) == 0);
967       assertTrue(Bytes.compareTo(
968           kv1.getBuffer(), kv1.getValueOffset(), kv1.getValueLength(),
969           kv2.getBuffer(), kv2.getValueOffset(), kv2.getValueLength()) == 0);
970     }
971     assertNull(scannerTwo.next());
972     assertEquals(startHit + 6, cs.getHitCount());
973     assertEquals(startMiss, cs.getMissCount());
974     assertEquals(startEvicted, cs.getEvictedCount());
975     startHit += 6;
976     scannerOne.close();
977     readerOne.close(cacheConf.shouldEvictOnClose());
978     scannerTwo.close();
979     readerTwo.close(cacheConf.shouldEvictOnClose());
980 
981     // Let's close the first file with evict on close turned on
982     conf.setBoolean("hbase.rs.evictblocksonclose", true);
983     cacheConf = new CacheConfig(conf);
984     hsf = new StoreFile(this.fs, pathCowOff, conf, cacheConf,
985         StoreFile.BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
986     reader = hsf.createReader();
987     reader.close(cacheConf.shouldEvictOnClose());
988 
989     // We should have 3 new evictions
990     assertEquals(startHit, cs.getHitCount());
991     assertEquals(startMiss, cs.getMissCount());
992     assertEquals(startEvicted + 3, cs.getEvictedCount());
993     startEvicted += 3;
994 
995     // Let's close the second file with evict on close turned off
996     conf.setBoolean("hbase.rs.evictblocksonclose", false);
997     cacheConf = new CacheConfig(conf);
998     hsf = new StoreFile(this.fs, pathCowOn, conf, cacheConf,
999         StoreFile.BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
1000     reader = hsf.createReader();
1001     reader.close(cacheConf.shouldEvictOnClose());
1002 
1003     // We expect no changes
1004     assertEquals(startHit, cs.getHitCount());
1005     assertEquals(startMiss, cs.getMissCount());
1006     assertEquals(startEvicted, cs.getEvictedCount());
1007   }
1008 
1009   private StoreFile.Writer writeStoreFile(Configuration conf,
1010       CacheConfig cacheConf, Path path, int numBlocks)
1011   throws IOException {
1012     // Let's put ~5 small KVs in each block, so let's make 5*numBlocks KVs
1013     int numKVs = 5 * numBlocks;
1014     List<KeyValue> kvs = new ArrayList<KeyValue>(numKVs);
1015     byte [] b = Bytes.toBytes("x");
1016     int totalSize = 0;
1017     for (int i=numKVs;i>0;i--) {
1018       KeyValue kv = new KeyValue(b, b, b, i, b);
1019       kvs.add(kv);
1020       // kv has memstoreTS 0, which takes 1 byte to store.
1021       totalSize += kv.getLength() + 1;
1022     }
1023     int blockSize = totalSize / numBlocks;
1024     StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, fs,
1025         blockSize)
1026             .withFilePath(path)
1027             .withMaxKeyCount(2000)
1028             .withChecksumType(CKTYPE)
1029             .withBytesPerChecksum(CKBYTES)
1030             .build();
1031     // We'll write N-1 KVs to ensure we don't write an extra block
1032     kvs.remove(kvs.size()-1);
1033     for (KeyValue kv : kvs) {
1034       writer.append(kv);
1035     }
1036     writer.appendMetadata(0, false);
1037     writer.close();
1038     return writer;
1039   }
1040 
1041   /**
1042    * Check if data block encoding information is saved correctly in HFile's
1043    * file info.
1044    */
1045   public void testDataBlockEncodingMetaData() throws IOException {
1046     // Make up a directory hierarchy that has a regiondir ("7e0102") and familyname.
1047     Path dir = new Path(new Path(this.testDir, "7e0102"), "familyname");
1048     Path path = new Path(dir, "1234567890");
1049 
1050     DataBlockEncoding dataBlockEncoderAlgo =
1051         DataBlockEncoding.FAST_DIFF;
1052     HFileDataBlockEncoder dataBlockEncoder =
1053         new HFileDataBlockEncoderImpl(
1054             dataBlockEncoderAlgo,
1055             dataBlockEncoderAlgo);
1056     cacheConf = new CacheConfig(conf);
1057     StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, fs,
1058         HFile.DEFAULT_BLOCKSIZE)
1059             .withFilePath(path)
1060             .withDataBlockEncoder(dataBlockEncoder)
1061             .withMaxKeyCount(2000)
1062             .withChecksumType(CKTYPE)
1063             .withBytesPerChecksum(CKBYTES)
1064             .build();
1065     writer.close();
1066 
1067     StoreFile storeFile = new StoreFile(fs, writer.getPath(), conf,
1068         cacheConf, BloomType.NONE, dataBlockEncoder);
1069     StoreFile.Reader reader = storeFile.createReader();
1070 
1071     Map<byte[], byte[]> fileInfo = reader.loadFileInfo();
1072     byte[] value = fileInfo.get(HFileDataBlockEncoder.DATA_BLOCK_ENCODING);
1073 
1074     assertEquals(dataBlockEncoderAlgo.getNameInBytes(), value);
1075   }
1076 
1077   @org.junit.Rule
1078   public org.apache.hadoop.hbase.ResourceCheckerJUnitRule cu =
1079     new org.apache.hadoop.hbase.ResourceCheckerJUnitRule();
1080 }
1081