1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20 package org.apache.hadoop.hbase.io.hfile;
21
22 import static org.junit.Assert.*;
23
24 import java.io.ByteArrayOutputStream;
25 import java.io.DataOutputStream;
26 import java.io.IOException;
27 import java.io.OutputStream;
28 import java.nio.ByteBuffer;
29 import java.util.ArrayList;
30 import java.util.Collection;
31 import java.util.Collections;
32 import java.util.HashMap;
33 import java.util.List;
34 import java.util.Map;
35 import java.util.Random;
36 import java.util.concurrent.Callable;
37 import java.util.concurrent.Executor;
38 import java.util.concurrent.ExecutorCompletionService;
39 import java.util.concurrent.Executors;
40 import java.util.concurrent.Future;
41
42 import org.apache.commons.logging.Log;
43 import org.apache.commons.logging.LogFactory;
44 import org.apache.hadoop.fs.FSDataInputStream;
45 import org.apache.hadoop.fs.FSDataOutputStream;
46 import org.apache.hadoop.fs.FileSystem;
47 import org.apache.hadoop.fs.Path;
48 import org.apache.hadoop.hbase.HBaseTestingUtility;
49 import org.apache.hadoop.hbase.MediumTests;
50 import org.apache.hadoop.hbase.KeyValue;
51 import org.apache.hadoop.hbase.fs.HFileSystem;
52 import org.apache.hadoop.hbase.io.DoubleOutputStream;
53 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
54 import org.apache.hadoop.hbase.util.Bytes;
55 import org.apache.hadoop.hbase.util.ChecksumType;
56 import org.apache.hadoop.hbase.util.ClassSize;
57 import org.apache.hadoop.io.WritableUtils;
58 import org.apache.hadoop.io.compress.CompressionOutputStream;
59 import org.apache.hadoop.io.compress.Compressor;
60
61 import static org.apache.hadoop.hbase.io.hfile.Compression.Algorithm.*;
62 import org.junit.Before;
63 import org.junit.Test;
64 import org.junit.experimental.categories.Category;
65 import org.junit.runner.RunWith;
66 import org.junit.runners.Parameterized;
67 import org.junit.runners.Parameterized.Parameters;
68
69 @Category(MediumTests.class)
70 @RunWith(Parameterized.class)
71 public class TestHFileBlock {
72
73 private static final boolean detailedLogging = false;
74 private static final boolean[] BOOLEAN_VALUES = new boolean[] { false, true };
75
76 private static final Log LOG = LogFactory.getLog(TestHFileBlock.class);
77
78 static final Compression.Algorithm[] COMPRESSION_ALGORITHMS = {
79 NONE, GZ };
80
81 private static final int NUM_TEST_BLOCKS = 1000;
82 private static final int NUM_READER_THREADS = 26;
83
84
85 private static int NUM_KEYVALUES = 50;
86 private static int FIELD_LENGTH = 10;
87 private static float CHANCE_TO_REPEAT = 0.6f;
88
89 private static final HBaseTestingUtility TEST_UTIL =
90 new HBaseTestingUtility();
91 private FileSystem fs;
92 private int uncompressedSizeV1;
93
94 private final boolean includesMemstoreTS;
95
96 public TestHFileBlock(boolean includesMemstoreTS) {
97 this.includesMemstoreTS = includesMemstoreTS;
98 }
99
100 @Parameters
101 public static Collection<Object[]> parameters() {
102 return HBaseTestingUtility.BOOLEAN_PARAMETERIZED;
103 }
104
105 @Before
106 public void setUp() throws IOException {
107 fs = HFileSystem.get(TEST_UTIL.getConfiguration());
108 }
109
110 static void writeTestBlockContents(DataOutputStream dos) throws IOException {
111
112 for (int i = 0; i < 1000; ++i)
113 dos.writeInt(i / 100);
114 }
115
116 static int writeTestKeyValues(OutputStream dos, int seed, boolean includesMemstoreTS)
117 throws IOException {
118 List<KeyValue> keyValues = new ArrayList<KeyValue>();
119 Random randomizer = new Random(42l + seed);
120
121
122 for (int i = 0; i < NUM_KEYVALUES; ++i) {
123 byte[] row;
124 long timestamp;
125 byte[] family;
126 byte[] qualifier;
127 byte[] value;
128
129
130 if (0 < i && randomizer.nextFloat() < CHANCE_TO_REPEAT) {
131 row = keyValues.get(randomizer.nextInt(keyValues.size())).getRow();
132 } else {
133 row = new byte[FIELD_LENGTH];
134 randomizer.nextBytes(row);
135 }
136 if (0 == i) {
137 family = new byte[FIELD_LENGTH];
138 randomizer.nextBytes(family);
139 } else {
140 family = keyValues.get(0).getFamily();
141 }
142 if (0 < i && randomizer.nextFloat() < CHANCE_TO_REPEAT) {
143 qualifier = keyValues.get(
144 randomizer.nextInt(keyValues.size())).getQualifier();
145 } else {
146 qualifier = new byte[FIELD_LENGTH];
147 randomizer.nextBytes(qualifier);
148 }
149 if (0 < i && randomizer.nextFloat() < CHANCE_TO_REPEAT) {
150 value = keyValues.get(randomizer.nextInt(keyValues.size())).getValue();
151 } else {
152 value = new byte[FIELD_LENGTH];
153 randomizer.nextBytes(value);
154 }
155 if (0 < i && randomizer.nextFloat() < CHANCE_TO_REPEAT) {
156 timestamp = keyValues.get(
157 randomizer.nextInt(keyValues.size())).getTimestamp();
158 } else {
159 timestamp = randomizer.nextLong();
160 }
161
162 keyValues.add(new KeyValue(row, family, qualifier, timestamp, value));
163 }
164
165
166 int totalSize = 0;
167 Collections.sort(keyValues, KeyValue.COMPARATOR);
168 DataOutputStream dataOutputStream = new DataOutputStream(dos);
169 for (KeyValue kv : keyValues) {
170 totalSize += kv.getLength();
171 dataOutputStream.write(kv.getBuffer(), kv.getOffset(), kv.getLength());
172 if (includesMemstoreTS) {
173 long memstoreTS = randomizer.nextLong();
174 WritableUtils.writeVLong(dataOutputStream, memstoreTS);
175 totalSize += WritableUtils.getVIntSize(memstoreTS);
176 }
177 }
178
179 return totalSize;
180 }
181
182 public byte[] createTestV1Block(Compression.Algorithm algo)
183 throws IOException {
184 Compressor compressor = algo.getCompressor();
185 ByteArrayOutputStream baos = new ByteArrayOutputStream();
186 OutputStream os = algo.createCompressionStream(baos, compressor, 0);
187 DataOutputStream dos = new DataOutputStream(os);
188 BlockType.META.write(dos);
189 writeTestBlockContents(dos);
190 uncompressedSizeV1 = dos.size();
191 dos.flush();
192 algo.returnCompressor(compressor);
193 return baos.toByteArray();
194 }
195
196 static HFileBlock.Writer createTestV2Block(Compression.Algorithm algo,
197 boolean includesMemstoreTS) throws IOException {
198 final BlockType blockType = BlockType.DATA;
199 HFileBlock.Writer hbw = new HFileBlock.Writer(algo, null,
200 includesMemstoreTS, HFileReaderV2.MAX_MINOR_VERSION,
201 HFile.DEFAULT_CHECKSUM_TYPE,
202 HFile.DEFAULT_BYTES_PER_CHECKSUM);
203 DataOutputStream dos = hbw.startWriting(blockType);
204 writeTestBlockContents(dos);
205 byte[] headerAndData = hbw.getHeaderAndDataForTest();
206 assertEquals(1000 * 4, hbw.getUncompressedSizeWithoutHeader());
207 hbw.releaseCompressor();
208 return hbw;
209 }
210
211 public String createTestBlockStr(Compression.Algorithm algo,
212 int correctLength) throws IOException {
213 HFileBlock.Writer hbw = createTestV2Block(algo, includesMemstoreTS);
214 byte[] testV2Block = hbw.getHeaderAndDataForTest();
215 int osOffset = HFileBlock.HEADER_SIZE_WITH_CHECKSUMS + 9;
216 if (testV2Block.length == correctLength) {
217
218
219
220
221
222 testV2Block[osOffset] = 3;
223 }
224 return Bytes.toStringBinary(testV2Block);
225 }
226
227 @Test
228 public void testNoCompression() throws IOException {
229 assertEquals(4000, createTestV2Block(NONE, includesMemstoreTS).
230 getBlockForCaching().getUncompressedSizeWithoutHeader());
231 }
232
233 @Test
234 public void testGzipCompression() throws IOException {
235 final String correctTestBlockStr =
236 "DATABLK*\\x00\\x00\\x00>\\x00\\x00\\x0F\\xA0\\xFF\\xFF\\xFF\\xFF"
237 + "\\xFF\\xFF\\xFF\\xFF"
238 + "\\x01\\x00\\x00@\\x00\\x00\\x00\\x00["
239
240 + "\\x1F\\x8B"
241 + "\\x08"
242 + "\\x00"
243 + "\\x00\\x00\\x00\\x00"
244 + "\\x00"
245
246
247 + "\\x03"
248 + "\\xED\\xC3\\xC1\\x11\\x00 \\x08\\xC00DD\\xDD\\x7Fa"
249 + "\\xD6\\xE8\\xA3\\xB9K\\x84`\\x96Q\\xD3\\xA8\\xDB\\xA8e\\xD4c"
250 + "\\xD46\\xEA5\\xEA3\\xEA7\\xE7\\x00LI\\x5Cs\\xA0\\x0F\\x00\\x00"
251 + "\\xAB\\x85g\\x91";
252 final int correctGzipBlockLength = 95;
253 assertEquals(correctTestBlockStr, createTestBlockStr(GZ,
254 correctGzipBlockLength));
255 }
256
257 @Test
258 public void testReaderV1() throws IOException {
259 for (Compression.Algorithm algo : COMPRESSION_ALGORITHMS) {
260 for (boolean pread : new boolean[] { false, true }) {
261 byte[] block = createTestV1Block(algo);
262 Path path = new Path(TEST_UTIL.getDataTestDir(),
263 "blocks_v1_"+ algo);
264 LOG.info("Creating temporary file at " + path);
265 FSDataOutputStream os = fs.create(path);
266 int totalSize = 0;
267 int numBlocks = 50;
268 for (int i = 0; i < numBlocks; ++i) {
269 os.write(block);
270 totalSize += block.length;
271 }
272 os.close();
273
274 FSDataInputStream is = fs.open(path);
275 HFileBlock.FSReader hbr = new HFileBlock.FSReaderV1(is, algo,
276 totalSize);
277 HFileBlock b;
278 int numBlocksRead = 0;
279 long pos = 0;
280 while (pos < totalSize) {
281 b = hbr.readBlockData(pos, block.length, uncompressedSizeV1, pread);
282 b.sanityCheck();
283 pos += block.length;
284 numBlocksRead++;
285 }
286 assertEquals(numBlocks, numBlocksRead);
287 is.close();
288 }
289 }
290 }
291
292 @Test
293 public void testReaderV2() throws IOException {
294 for (Compression.Algorithm algo : COMPRESSION_ALGORITHMS) {
295 for (boolean pread : new boolean[] { false, true }) {
296 LOG.info("testReaderV2: Compression algorithm: " + algo +
297 ", pread=" + pread);
298 Path path = new Path(TEST_UTIL.getDataTestDir(), "blocks_v2_"
299 + algo);
300 FSDataOutputStream os = fs.create(path);
301 HFileBlock.Writer hbw = new HFileBlock.Writer(algo, null,
302 includesMemstoreTS,
303 HFileReaderV2.MAX_MINOR_VERSION,
304 HFile.DEFAULT_CHECKSUM_TYPE,
305 HFile.DEFAULT_BYTES_PER_CHECKSUM);
306 long totalSize = 0;
307 for (int blockId = 0; blockId < 2; ++blockId) {
308 DataOutputStream dos = hbw.startWriting(BlockType.DATA);
309 for (int i = 0; i < 1234; ++i)
310 dos.writeInt(i);
311 hbw.writeHeaderAndData(os);
312 totalSize += hbw.getOnDiskSizeWithHeader();
313 }
314 os.close();
315
316 FSDataInputStream is = fs.open(path);
317 HFileBlock.FSReader hbr = new HFileBlock.FSReaderV2(is, algo,
318 totalSize);
319 HFileBlock b = hbr.readBlockData(0, -1, -1, pread);
320 is.close();
321 assertEquals(0, HFile.getChecksumFailuresCount());
322
323 b.sanityCheck();
324 assertEquals(4936, b.getUncompressedSizeWithoutHeader());
325 assertEquals(algo == GZ ? 2173 : 4936,
326 b.getOnDiskSizeWithoutHeader() - b.totalChecksumBytes());
327 String blockStr = b.toString();
328
329 if (algo == GZ) {
330 is = fs.open(path);
331 hbr = new HFileBlock.FSReaderV2(is, algo, totalSize);
332 b = hbr.readBlockData(0, 2173 + HFileBlock.HEADER_SIZE_WITH_CHECKSUMS +
333 b.totalChecksumBytes(), -1, pread);
334 assertEquals(blockStr, b.toString());
335 int wrongCompressedSize = 2172;
336 try {
337 b = hbr.readBlockData(0, wrongCompressedSize
338 + HFileBlock.HEADER_SIZE_WITH_CHECKSUMS, -1, pread);
339 fail("Exception expected");
340 } catch (IOException ex) {
341 String expectedPrefix = "On-disk size without header provided is "
342 + wrongCompressedSize + ", but block header contains "
343 + b.getOnDiskSizeWithoutHeader() + ".";
344 assertTrue("Invalid exception message: '" + ex.getMessage()
345 + "'.\nMessage is expected to start with: '" + expectedPrefix
346 + "'", ex.getMessage().startsWith(expectedPrefix));
347 }
348 is.close();
349 }
350 }
351 }
352 }
353
354
355
356
357
358 @Test
359 public void testDataBlockEncoding() throws IOException {
360 final int numBlocks = 5;
361 for (Compression.Algorithm algo : COMPRESSION_ALGORITHMS) {
362 for (boolean pread : new boolean[] { false, true }) {
363 for (DataBlockEncoding encoding : DataBlockEncoding.values()) {
364 Path path = new Path(TEST_UTIL.getDataTestDir(), "blocks_v2_"
365 + algo + "_" + encoding.toString());
366 FSDataOutputStream os = fs.create(path);
367 HFileDataBlockEncoder dataBlockEncoder =
368 new HFileDataBlockEncoderImpl(encoding);
369 HFileBlock.Writer hbw = new HFileBlock.Writer(algo, dataBlockEncoder,
370 includesMemstoreTS,
371 HFileReaderV2.MAX_MINOR_VERSION,
372 HFile.DEFAULT_CHECKSUM_TYPE,
373 HFile.DEFAULT_BYTES_PER_CHECKSUM);
374 long totalSize = 0;
375 final List<Integer> encodedSizes = new ArrayList<Integer>();
376 final List<ByteBuffer> encodedBlocks = new ArrayList<ByteBuffer>();
377 for (int blockId = 0; blockId < numBlocks; ++blockId) {
378 DataOutputStream dos = hbw.startWriting(BlockType.DATA);
379 writeEncodedBlock(encoding, dos, encodedSizes, encodedBlocks,
380 blockId, includesMemstoreTS);
381
382 hbw.writeHeaderAndData(os);
383 totalSize += hbw.getOnDiskSizeWithHeader();
384 }
385 os.close();
386
387 FSDataInputStream is = fs.open(path);
388 HFileBlock.FSReaderV2 hbr = new HFileBlock.FSReaderV2(is, algo,
389 totalSize);
390 hbr.setDataBlockEncoder(dataBlockEncoder);
391 hbr.setIncludesMemstoreTS(includesMemstoreTS);
392
393 HFileBlock b;
394 int pos = 0;
395 for (int blockId = 0; blockId < numBlocks; ++blockId) {
396 b = hbr.readBlockData(pos, -1, -1, pread);
397 assertEquals(0, HFile.getChecksumFailuresCount());
398 b.sanityCheck();
399 pos += b.getOnDiskSizeWithHeader();
400
401 assertEquals((int) encodedSizes.get(blockId),
402 b.getUncompressedSizeWithoutHeader());
403 ByteBuffer actualBuffer = b.getBufferWithoutHeader();
404 if (encoding != DataBlockEncoding.NONE) {
405
406 assertEquals(0, actualBuffer.get(0));
407 assertEquals(encoding.getId(), actualBuffer.get(1));
408 actualBuffer.position(2);
409 actualBuffer = actualBuffer.slice();
410 }
411
412 ByteBuffer expectedBuffer = encodedBlocks.get(blockId);
413 expectedBuffer.rewind();
414
415
416 assertBuffersEqual(expectedBuffer, actualBuffer, algo, encoding,
417 pread);
418 }
419 is.close();
420 }
421 }
422 }
423 }
424
425 static void writeEncodedBlock(DataBlockEncoding encoding,
426 DataOutputStream dos, final List<Integer> encodedSizes,
427 final List<ByteBuffer> encodedBlocks, int blockId,
428 boolean includesMemstoreTS) throws IOException {
429 ByteArrayOutputStream baos = new ByteArrayOutputStream();
430 DoubleOutputStream doubleOutputStream =
431 new DoubleOutputStream(dos, baos);
432
433 final int rawBlockSize = writeTestKeyValues(doubleOutputStream,
434 blockId, includesMemstoreTS);
435
436 ByteBuffer rawBuf = ByteBuffer.wrap(baos.toByteArray());
437 rawBuf.rewind();
438
439 final int encodedSize;
440 final ByteBuffer encodedBuf;
441 if (encoding == DataBlockEncoding.NONE) {
442 encodedSize = rawBlockSize;
443 encodedBuf = rawBuf;
444 } else {
445 ByteArrayOutputStream encodedOut = new ByteArrayOutputStream();
446 encoding.getEncoder().compressKeyValues(
447 new DataOutputStream(encodedOut),
448 rawBuf.duplicate(), includesMemstoreTS);
449
450
451 encodedSize = encodedOut.size() + DataBlockEncoding.ID_SIZE;
452 encodedBuf = ByteBuffer.wrap(encodedOut.toByteArray());
453 }
454 encodedSizes.add(encodedSize);
455 encodedBlocks.add(encodedBuf);
456 }
457
458 static void assertBuffersEqual(ByteBuffer expectedBuffer,
459 ByteBuffer actualBuffer, Compression.Algorithm compression,
460 DataBlockEncoding encoding, boolean pread) {
461 if (!actualBuffer.equals(expectedBuffer)) {
462 int prefix = 0;
463 int minLimit = Math.min(expectedBuffer.limit(), actualBuffer.limit());
464 while (prefix < minLimit &&
465 expectedBuffer.get(prefix) == actualBuffer.get(prefix)) {
466 prefix++;
467 }
468
469 fail(String.format(
470 "Content mismath for compression %s, encoding %s, " +
471 "pread %s, commonPrefix %d, expected %s, got %s",
472 compression, encoding, pread, prefix,
473 nextBytesToStr(expectedBuffer, prefix),
474 nextBytesToStr(actualBuffer, prefix)));
475 }
476 }
477
478
479
480
481
482 private static String nextBytesToStr(ByteBuffer buf, int pos) {
483 int maxBytes = buf.limit() - pos;
484 int numBytes = Math.min(16, maxBytes);
485 return Bytes.toStringBinary(buf.array(), buf.arrayOffset() + pos,
486 numBytes) + (numBytes < maxBytes ? "..." : "");
487 }
488
489 @Test
490 public void testPreviousOffset() throws IOException {
491 for (Compression.Algorithm algo : COMPRESSION_ALGORITHMS) {
492 for (boolean pread : BOOLEAN_VALUES) {
493 for (boolean cacheOnWrite : BOOLEAN_VALUES) {
494 Random rand = defaultRandom();
495 LOG.info("testPreviousOffset:Compression algorithm: " + algo +
496 ", pread=" + pread +
497 ", cacheOnWrite=" + cacheOnWrite);
498 Path path = new Path(TEST_UTIL.getDataTestDir(), "prev_offset");
499 List<Long> expectedOffsets = new ArrayList<Long>();
500 List<Long> expectedPrevOffsets = new ArrayList<Long>();
501 List<BlockType> expectedTypes = new ArrayList<BlockType>();
502 List<ByteBuffer> expectedContents = cacheOnWrite
503 ? new ArrayList<ByteBuffer>() : null;
504 long totalSize = writeBlocks(rand, algo, path, expectedOffsets,
505 expectedPrevOffsets, expectedTypes, expectedContents);
506
507 FSDataInputStream is = fs.open(path);
508 HFileBlock.FSReader hbr = new HFileBlock.FSReaderV2(is, algo,
509 totalSize);
510 long curOffset = 0;
511 for (int i = 0; i < NUM_TEST_BLOCKS; ++i) {
512 if (!pread) {
513 assertEquals(is.getPos(), curOffset + (i == 0 ? 0 :
514 HFileBlock.HEADER_SIZE_WITH_CHECKSUMS));
515 }
516
517 assertEquals(expectedOffsets.get(i).longValue(), curOffset);
518 if (detailedLogging) {
519 LOG.info("Reading block #" + i + " at offset " + curOffset);
520 }
521 HFileBlock b = hbr.readBlockData(curOffset, -1, -1, pread);
522 if (detailedLogging) {
523 LOG.info("Block #" + i + ": " + b);
524 }
525 assertEquals("Invalid block #" + i + "'s type:",
526 expectedTypes.get(i), b.getBlockType());
527 assertEquals("Invalid previous block offset for block " + i
528 + " of " + "type " + b.getBlockType() + ":",
529 (long) expectedPrevOffsets.get(i), b.getPrevBlockOffset());
530 b.sanityCheck();
531 assertEquals(curOffset, b.getOffset());
532
533
534
535 HFileBlock b2 = hbr.readBlockData(curOffset,
536 b.getOnDiskSizeWithHeader(), -1, pread);
537 b2.sanityCheck();
538
539 assertEquals(b.getBlockType(), b2.getBlockType());
540 assertEquals(b.getOnDiskSizeWithoutHeader(),
541 b2.getOnDiskSizeWithoutHeader());
542 assertEquals(b.getOnDiskSizeWithHeader(),
543 b2.getOnDiskSizeWithHeader());
544 assertEquals(b.getUncompressedSizeWithoutHeader(),
545 b2.getUncompressedSizeWithoutHeader());
546 assertEquals(b.getPrevBlockOffset(), b2.getPrevBlockOffset());
547 assertEquals(curOffset, b2.getOffset());
548 assertEquals(b.getBytesPerChecksum(), b2.getBytesPerChecksum());
549 assertEquals(b.getOnDiskDataSizeWithHeader(),
550 b2.getOnDiskDataSizeWithHeader());
551 assertEquals(0, HFile.getChecksumFailuresCount());
552
553 curOffset += b.getOnDiskSizeWithHeader();
554
555 if (cacheOnWrite) {
556
557
558
559
560 ByteBuffer bufRead = b.getBufferWithHeader();
561 ByteBuffer bufExpected = expectedContents.get(i);
562 boolean bytesAreCorrect = Bytes.compareTo(bufRead.array(),
563 bufRead.arrayOffset(),
564 bufRead.limit() - b.totalChecksumBytes(),
565 bufExpected.array(), bufExpected.arrayOffset(),
566 bufExpected.limit()) == 0;
567 String wrongBytesMsg = "";
568
569 if (!bytesAreCorrect) {
570
571
572 wrongBytesMsg = "Expected bytes in block #" + i + " (algo="
573 + algo + ", pread=" + pread
574 + ", cacheOnWrite=" + cacheOnWrite + "):\n";
575 wrongBytesMsg += Bytes.toStringBinary(bufExpected.array(),
576 bufExpected.arrayOffset(), Math.min(32,
577 bufExpected.limit()))
578 + ", actual:\n"
579 + Bytes.toStringBinary(bufRead.array(),
580 bufRead.arrayOffset(), Math.min(32, bufRead.limit()));
581 if (detailedLogging) {
582 LOG.warn("expected header" +
583 HFileBlock.toStringHeader(bufExpected) +
584 "\nfound header" +
585 HFileBlock.toStringHeader(bufRead));
586 LOG.warn("bufread offset " + bufRead.arrayOffset() +
587 " limit " + bufRead.limit() +
588 " expected offset " + bufExpected.arrayOffset() +
589 " limit " + bufExpected.limit());
590 LOG.warn(wrongBytesMsg);
591 }
592 }
593 assertTrue(wrongBytesMsg, bytesAreCorrect);
594 }
595 }
596
597 assertEquals(curOffset, fs.getFileStatus(path).getLen());
598 is.close();
599 }
600 }
601 }
602 }
603
604 private Random defaultRandom() {
605 return new Random(189237);
606 }
607
608 private class BlockReaderThread implements Callable<Boolean> {
609 private final String clientId;
610 private final HFileBlock.FSReader hbr;
611 private final List<Long> offsets;
612 private final List<BlockType> types;
613 private final long fileSize;
614
615 public BlockReaderThread(String clientId,
616 HFileBlock.FSReader hbr, List<Long> offsets, List<BlockType> types,
617 long fileSize) {
618 this.clientId = clientId;
619 this.offsets = offsets;
620 this.hbr = hbr;
621 this.types = types;
622 this.fileSize = fileSize;
623 }
624
625 @Override
626 public Boolean call() throws Exception {
627 Random rand = new Random(clientId.hashCode());
628 long endTime = System.currentTimeMillis() + 10000;
629 int numBlocksRead = 0;
630 int numPositionalRead = 0;
631 int numWithOnDiskSize = 0;
632 while (System.currentTimeMillis() < endTime) {
633 int blockId = rand.nextInt(NUM_TEST_BLOCKS);
634 long offset = offsets.get(blockId);
635 boolean pread = rand.nextBoolean();
636 boolean withOnDiskSize = rand.nextBoolean();
637 long expectedSize =
638 (blockId == NUM_TEST_BLOCKS - 1 ? fileSize
639 : offsets.get(blockId + 1)) - offset;
640
641 HFileBlock b;
642 try {
643 long onDiskSizeArg = withOnDiskSize ? expectedSize : -1;
644 b = hbr.readBlockData(offset, onDiskSizeArg, -1, pread);
645 } catch (IOException ex) {
646 LOG.error("Error in client " + clientId + " trying to read block at "
647 + offset + ", pread=" + pread + ", withOnDiskSize=" +
648 withOnDiskSize, ex);
649 return false;
650 }
651
652 assertEquals(types.get(blockId), b.getBlockType());
653 assertEquals(expectedSize, b.getOnDiskSizeWithHeader());
654 assertEquals(offset, b.getOffset());
655
656 ++numBlocksRead;
657 if (pread)
658 ++numPositionalRead;
659 if (withOnDiskSize)
660 ++numWithOnDiskSize;
661 }
662 LOG.info("Client " + clientId + " successfully read " + numBlocksRead +
663 " blocks (with pread: " + numPositionalRead + ", with onDiskSize " +
664 "specified: " + numWithOnDiskSize + ")");
665
666 return true;
667 }
668
669 }
670
671 @Test
672 public void testConcurrentReading() throws Exception {
673 for (Compression.Algorithm compressAlgo : COMPRESSION_ALGORITHMS) {
674 Path path =
675 new Path(TEST_UTIL.getDataTestDir(), "concurrent_reading");
676 Random rand = defaultRandom();
677 List<Long> offsets = new ArrayList<Long>();
678 List<BlockType> types = new ArrayList<BlockType>();
679 writeBlocks(rand, compressAlgo, path, offsets, null, types, null);
680 FSDataInputStream is = fs.open(path);
681 long fileSize = fs.getFileStatus(path).getLen();
682 HFileBlock.FSReader hbr = new HFileBlock.FSReaderV2(is, compressAlgo,
683 fileSize);
684
685 Executor exec = Executors.newFixedThreadPool(NUM_READER_THREADS);
686 ExecutorCompletionService<Boolean> ecs =
687 new ExecutorCompletionService<Boolean>(exec);
688
689 for (int i = 0; i < NUM_READER_THREADS; ++i) {
690 ecs.submit(new BlockReaderThread("reader_" + (char) ('A' + i), hbr,
691 offsets, types, fileSize));
692 }
693
694 for (int i = 0; i < NUM_READER_THREADS; ++i) {
695 Future<Boolean> result = ecs.take();
696 assertTrue(result.get());
697 if (detailedLogging) {
698 LOG.info(String.valueOf(i + 1)
699 + " reader threads finished successfully (algo=" + compressAlgo
700 + ")");
701 }
702 }
703
704 is.close();
705 }
706 }
707
708 private long writeBlocks(Random rand, Compression.Algorithm compressAlgo,
709 Path path, List<Long> expectedOffsets, List<Long> expectedPrevOffsets,
710 List<BlockType> expectedTypes, List<ByteBuffer> expectedContents
711 ) throws IOException {
712 boolean cacheOnWrite = expectedContents != null;
713 FSDataOutputStream os = fs.create(path);
714 HFileBlock.Writer hbw = new HFileBlock.Writer(compressAlgo, null,
715 includesMemstoreTS,
716 HFileReaderV2.MAX_MINOR_VERSION,
717 HFile.DEFAULT_CHECKSUM_TYPE,
718 HFile.DEFAULT_BYTES_PER_CHECKSUM);
719 Map<BlockType, Long> prevOffsetByType = new HashMap<BlockType, Long>();
720 long totalSize = 0;
721 for (int i = 0; i < NUM_TEST_BLOCKS; ++i) {
722 long pos = os.getPos();
723 int blockTypeOrdinal = rand.nextInt(BlockType.values().length);
724 if (blockTypeOrdinal == BlockType.ENCODED_DATA.ordinal()) {
725 blockTypeOrdinal = BlockType.DATA.ordinal();
726 }
727 BlockType bt = BlockType.values()[blockTypeOrdinal];
728 DataOutputStream dos = hbw.startWriting(bt);
729 int size = rand.nextInt(500);
730 for (int j = 0; j < size; ++j) {
731
732 dos.writeShort(i + 1);
733 dos.writeInt(j + 1);
734 }
735
736 if (expectedOffsets != null)
737 expectedOffsets.add(os.getPos());
738
739 if (expectedPrevOffsets != null) {
740 Long prevOffset = prevOffsetByType.get(bt);
741 expectedPrevOffsets.add(prevOffset != null ? prevOffset : -1);
742 prevOffsetByType.put(bt, os.getPos());
743 }
744
745 expectedTypes.add(bt);
746
747 hbw.writeHeaderAndData(os);
748 totalSize += hbw.getOnDiskSizeWithHeader();
749
750 if (cacheOnWrite)
751 expectedContents.add(hbw.getUncompressedBufferWithHeader());
752
753 if (detailedLogging) {
754 LOG.info("Written block #" + i + " of type " + bt
755 + ", uncompressed size " + hbw.getUncompressedSizeWithoutHeader()
756 + " at offset " + pos);
757 }
758 }
759 os.close();
760 LOG.info("Created a temporary file at " + path + ", "
761 + fs.getFileStatus(path).getLen() + " byte, compression=" +
762 compressAlgo);
763 return totalSize;
764 }
765
766 @Test
767 public void testBlockHeapSize() {
768 if (ClassSize.is32BitJVM()) {
769 assertTrue(HFileBlock.BYTE_BUFFER_HEAP_SIZE == 64);
770 } else {
771 assertTrue(HFileBlock.BYTE_BUFFER_HEAP_SIZE == 80);
772 }
773
774 for (int size : new int[] { 100, 256, 12345 }) {
775 byte[] byteArr = new byte[HFileBlock.HEADER_SIZE_WITH_CHECKSUMS + size];
776 ByteBuffer buf = ByteBuffer.wrap(byteArr, 0, size);
777 HFileBlock block = new HFileBlock(BlockType.DATA, size, size, -1, buf,
778 HFileBlock.FILL_HEADER, -1, includesMemstoreTS,
779 HFileBlock.MINOR_VERSION_NO_CHECKSUM, 0, ChecksumType.NULL.getCode(),
780 0);
781 long byteBufferExpectedSize =
782 ClassSize.align(ClassSize.estimateBase(buf.getClass(), true)
783 + HFileBlock.HEADER_SIZE_WITH_CHECKSUMS + size);
784 long hfileBlockExpectedSize =
785 ClassSize.align(ClassSize.estimateBase(HFileBlock.class, true));
786 long expected = hfileBlockExpectedSize + byteBufferExpectedSize;
787 assertEquals("Block data size: " + size + ", byte buffer expected " +
788 "size: " + byteBufferExpectedSize + ", HFileBlock class expected " +
789 "size: " + hfileBlockExpectedSize + ";", expected,
790 block.heapSize());
791 }
792 }
793
794
795 @org.junit.Rule
796 public org.apache.hadoop.hbase.ResourceCheckerJUnitRule cu =
797 new org.apache.hadoop.hbase.ResourceCheckerJUnitRule();
798 }
799