1   /**
2    * Copyright 2011 The Apache Software Foundation
3    *
4    * Licensed to the Apache Software Foundation (ASF) under one
5    * or more contributor license agreements.  See the NOTICE file
6    * distributed with this work for additional information
7    * regarding copyright ownership.  The ASF licenses this file
8    * to you under the Apache License, Version 2.0 (the
9    * "License"); you may not use this file except in compliance
10   * with the License.  You may obtain a copy of the License at
11   *
12   *     http://www.apache.org/licenses/LICENSE-2.0
13   *
14   * Unless required by applicable law or agreed to in writing, software
15   * distributed under the License is distributed on an "AS IS" BASIS,
16   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17   * See the License for the specific language governing permissions and
18   * limitations under the License.
19   */
20  
21  package org.apache.hadoop.hbase.io.hfile.slab;
22  
23  import static org.junit.Assert.*;
24  import java.nio.ByteBuffer;
25  
26  import org.apache.hadoop.hbase.SmallTests;
27  import org.junit.*;
28  import org.junit.experimental.categories.Category;
29  
30  /**Test cases for Slab.java*/
31  @Category(SmallTests.class)
32  public class TestSlab {
33    static final int BLOCKSIZE = 1000;
34    static final int NUMBLOCKS = 100;
35    Slab testSlab;
36    ByteBuffer[] buffers = new ByteBuffer[NUMBLOCKS];
37  
38    @Before
39    public void setUp() {
40      testSlab = new Slab(BLOCKSIZE, NUMBLOCKS);
41    }
42  
43    @After
44    public void tearDown() {
45      testSlab.shutdown();
46    }
47  
48    @Test
49    public void testBasicFunctionality() throws InterruptedException {
50      for (int i = 0; i < NUMBLOCKS; i++) {
51        buffers[i] = testSlab.alloc(BLOCKSIZE);
52        assertEquals(BLOCKSIZE, buffers[i].limit());
53      }
54  
55      // write an unique integer to each allocated buffer.
56      for (int i = 0; i < NUMBLOCKS; i++) {
57        buffers[i].putInt(i);
58      }
59  
60      // make sure the bytebuffers remain unique (the slab allocator hasn't
61      // allocated the same chunk of memory twice)
62      for (int i = 0; i < NUMBLOCKS; i++) {
63        buffers[i].putInt(i);
64      }
65  
66      for (int i = 0; i < NUMBLOCKS; i++) {
67        testSlab.free(buffers[i]); // free all the buffers.
68      }
69  
70      for (int i = 0; i < NUMBLOCKS; i++) {
71        buffers[i] = testSlab.alloc(BLOCKSIZE);
72        assertEquals(BLOCKSIZE, buffers[i].limit());
73      }
74    }
75  
76  
77    @org.junit.Rule
78    public org.apache.hadoop.hbase.ResourceCheckerJUnitRule cu =
79      new org.apache.hadoop.hbase.ResourceCheckerJUnitRule();
80  }
81