1   /*
2    * Copyright 2010 The Apache Software Foundation
3    *
4    * Licensed to the Apache Software Foundation (ASF) under one
5    * or more contributor license agreements.  See the NOTICE file
6    * distributed with this work for additional information
7    * regarding copyright ownership.  The ASF licenses this file
8    * to you under the Apache License, Version 2.0 (the
9    * "License"); you may not use this file except in compliance
10   * with the License.  You may obtain a copy of the License at
11   *
12   *     http://www.apache.org/licenses/LICENSE-2.0
13   *
14   * Unless required by applicable law or agreed to in writing, software
15   * distributed under the License is distributed on an "AS IS" BASIS,
16   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17   * See the License for the specific language governing permissions and
18   * limitations under the License.
19   */
20  package org.apache.hadoop.hbase.client;
21  
22  import static org.junit.Assert.assertFalse;
23  import static org.junit.Assert.assertNotNull;
24  import static org.junit.Assert.assertNull;
25  import static org.junit.Assert.assertTrue;
26  import static org.junit.Assert.fail;
27  
28  import java.io.IOException;
29  import java.lang.reflect.Field;
30  import java.util.ArrayList;
31  import java.util.HashMap;
32  import java.util.List;
33  import java.util.Map;
34  import java.util.Random;
35  import java.util.concurrent.ExecutorService;
36  import java.util.concurrent.SynchronousQueue;
37  import java.util.concurrent.ThreadPoolExecutor;
38  import java.util.concurrent.TimeUnit;
39  
40  import org.apache.commons.logging.Log;
41  import org.apache.commons.logging.LogFactory;
42  import org.apache.hadoop.conf.Configuration;
43  import org.apache.hadoop.hbase.HBaseConfiguration;
44  import org.apache.hadoop.hbase.HBaseTestingUtility;
45  import org.apache.hadoop.hbase.HConstants;
46  import org.apache.hadoop.hbase.HRegionLocation;
47  import org.apache.hadoop.hbase.MediumTests;
48  import org.apache.hadoop.hbase.ZooKeeperConnectionException;
49  import org.apache.hadoop.hbase.client.HConnectionManager.HConnectionImplementation;
50  import org.apache.hadoop.hbase.client.HConnectionManager.HConnectionKey;
51  import org.apache.hadoop.hbase.util.Bytes;
52  import org.apache.hadoop.hbase.util.Threads;
53  import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
54  import org.junit.AfterClass;
55  import org.junit.Assert;
56  import org.junit.BeforeClass;
57  import org.junit.Test;
58  import org.junit.experimental.categories.Category;
59  
60  /**
61   * This class is for testing HCM features
62   */
63  @Category(MediumTests.class)
64  public class TestHCM {
65    private static final Log LOG = LogFactory.getLog(TestHCM.class);
66    private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
67    private static final byte[] TABLE_NAME = Bytes.toBytes("test");
68    private static final byte[] TABLE_NAME1 = Bytes.toBytes("test1");
69    private static final byte[] TABLE_NAME2 = Bytes.toBytes("test2");
70    private static final byte[] FAM_NAM = Bytes.toBytes("f");
71    private static final byte[] ROW = Bytes.toBytes("bbb");
72  
73    @BeforeClass
74    public static void setUpBeforeClass() throws Exception {
75      TEST_UTIL.startMiniCluster(1);
76    }
77  
78    @AfterClass public static void tearDownAfterClass() throws Exception {
79      TEST_UTIL.shutdownMiniCluster();
80    }
81  
82    /**
83     * @throws InterruptedException 
84     * @throws IllegalAccessException 
85     * @throws NoSuchFieldException 
86     * @throws ZooKeeperConnectionException 
87     * @throws IllegalArgumentException 
88     * @throws SecurityException 
89     * @see https://issues.apache.org/jira/browse/HBASE-2925
90     */
91    // Disabling.  Of course this test will OOME using new Configuration each time
92    // St.Ack 20110428
93    // @Test
94    public void testManyNewConnectionsDoesnotOOME()
95    throws SecurityException, IllegalArgumentException,
96    ZooKeeperConnectionException, NoSuchFieldException, IllegalAccessException,
97    InterruptedException {
98      createNewConfigurations();
99    }
100 
101   private static Random _randy = new Random();
102 
103   public static void createNewConfigurations() throws SecurityException,
104   IllegalArgumentException, NoSuchFieldException,
105   IllegalAccessException, InterruptedException, ZooKeeperConnectionException {
106     HConnection last = null;
107     for (int i = 0; i <= (HConnectionManager.MAX_CACHED_HBASE_INSTANCES * 2); i++) {
108       // set random key to differentiate the connection from previous ones
109       Configuration configuration = HBaseConfiguration.create();
110       configuration.set("somekey", String.valueOf(_randy.nextInt()));
111       System.out.println("Hash Code: " + configuration.hashCode());
112       HConnection connection = HConnectionManager.getConnection(configuration);
113       if (last != null) {
114         if (last == connection) {
115           System.out.println("!! Got same connection for once !!");
116         }
117       }
118       // change the configuration once, and the cached connection is lost forever:
119       //      the hashtable holding the cache won't be able to find its own keys
120       //      to remove them, so the LRU strategy does not work.
121       configuration.set("someotherkey", String.valueOf(_randy.nextInt()));
122       last = connection;
123       LOG.info("Cache Size: " + getHConnectionManagerCacheSize());
124       Thread.sleep(100);
125     }
126     Assert.assertEquals(1,
127       getHConnectionManagerCacheSize());
128   }
129 
130   private static int getHConnectionManagerCacheSize(){
131     return HConnectionTestingUtility.getConnectionCount();
132   }
133   
134   @Test
135   public void testClusterConnection() throws IOException {
136     ThreadPoolExecutor otherPool = new ThreadPoolExecutor(1, 1, 5, TimeUnit.SECONDS,
137         new SynchronousQueue<Runnable>(), Threads.newDaemonThreadFactory("test-hcm"));
138 
139     HConnection con1 = HConnectionManager.createConnection(TEST_UTIL.getConfiguration());
140     HConnection con2 = HConnectionManager.createConnection(TEST_UTIL.getConfiguration(), otherPool);
141     // make sure the internally created ExecutorService is the one passed
142     assertTrue(otherPool == ((HConnectionImplementation) con2).getCurrentBatchPool());
143 
144     String tableName = "testClusterConnection";
145     TEST_UTIL.createTable(tableName.getBytes(), FAM_NAM).close();
146     HTable t = (HTable) con1.getTable(tableName, otherPool);
147     // make sure passing a pool to the getTable does not trigger creation of an
148     // internal pool
149     assertNull("Internal Thread pool should be null",
150         ((HConnectionImplementation) con1).getCurrentBatchPool());
151     // table should use the pool passed
152     assertTrue(otherPool == t.getPool());
153     t.close();
154 
155     t = (HTable) con2.getTable(tableName);
156     // table should use the connectin's internal pool
157     assertTrue(otherPool == t.getPool());
158     t.close();
159 
160     t = (HTable) con2.getTable(Bytes.toBytes(tableName));
161     // try other API too
162     assertTrue(otherPool == t.getPool());
163     t.close();
164 
165     t = (HTable) con1.getTable(tableName);
166     ExecutorService pool = ((HConnectionImplementation) con1).getCurrentBatchPool();
167     // make sure an internal pool was created
168     assertNotNull("An internal Thread pool should have been created", pool);
169     // and that the table is using it
170     assertTrue(t.getPool() == pool);
171     t.close();
172 
173     t = (HTable) con1.getTable(tableName);
174     // still using the *same* internal pool
175     assertTrue(t.getPool() == pool);
176     t.close();
177 
178     con1.close();
179     // if the pool was created on demand it should be closed upon connectin
180     // close
181     assertTrue(pool.isShutdown());
182 
183     con2.close();
184     // if the pool is passed, it is not closed
185     assertFalse(otherPool.isShutdown());
186     otherPool.shutdownNow();
187   }
188   
189   @Test
190   public void abortingHConnectionRemovesItselfFromHCM() throws Exception {
191     // Save off current HConnections
192     Map<HConnectionKey, HConnectionImplementation> oldHBaseInstances = 
193         new HashMap<HConnectionKey, HConnectionImplementation>();
194     oldHBaseInstances.putAll(HConnectionManager.HBASE_INSTANCES);
195     
196     HConnectionManager.HBASE_INSTANCES.clear();
197 
198     try {
199       HConnection connection = HConnectionManager.getConnection(TEST_UTIL.getConfiguration());
200       connection.abort("test abortingHConnectionRemovesItselfFromHCM", new Exception(
201           "test abortingHConnectionRemovesItselfFromHCM"));
202       Assert.assertNotSame(connection,
203         HConnectionManager.getConnection(TEST_UTIL.getConfiguration()));
204     } finally {
205       // Put original HConnections back
206       HConnectionManager.HBASE_INSTANCES.clear();
207       HConnectionManager.HBASE_INSTANCES.putAll(oldHBaseInstances);
208     }
209   }
210 
211   /**
212    * Test that when we delete a location using the first row of a region
213    * that we really delete it.
214    * @throws Exception
215    */
216   @Test
217   public void testRegionCaching() throws Exception{
218     HTable table = TEST_UTIL.createTable(TABLE_NAME, FAM_NAM);
219     TEST_UTIL.createMultiRegions(table, FAM_NAM);
220     Put put = new Put(ROW);
221     put.add(FAM_NAM, ROW, ROW);
222     table.put(put);
223     HConnectionManager.HConnectionImplementation conn =
224         (HConnectionManager.HConnectionImplementation)table.getConnection();
225     assertNotNull(conn.getCachedLocation(TABLE_NAME, ROW));
226     conn.deleteCachedLocation(TABLE_NAME, ROW);
227     HRegionLocation rl = conn.getCachedLocation(TABLE_NAME, ROW);
228     assertNull("What is this location?? " + rl, rl);
229     table.close();
230   }
231 
232   /**
233    * Test that Connection or Pool are not closed when managed externally
234    * @throws Exception
235    */
236   @Test
237   public void testConnectionManagement() throws Exception{
238     TEST_UTIL.createTable(TABLE_NAME1, FAM_NAM);
239     HConnection conn = HConnectionManager.createConnection(TEST_UTIL.getConfiguration());
240 
241     HTableInterface table = conn.getTable(TABLE_NAME1);
242     table.close();
243     assertFalse(conn.isClosed());
244     assertFalse(((HTable)table).getPool().isShutdown());
245     table = conn.getTable(TABLE_NAME1);
246     table.close();
247     assertFalse(((HTable)table).getPool().isShutdown());
248     conn.close();
249     assertTrue(((HTable)table).getPool().isShutdown());
250   }
251 
252   /**
253    * Make sure that {@link Configuration} instances that are essentially the
254    * same map to the same {@link HConnection} instance.
255    */
256   @Test
257   public void testConnectionSameness() throws Exception {
258     HConnection previousConnection = null;
259     for (int i = 0; i < 2; i++) {
260       // set random key to differentiate the connection from previous ones
261       Configuration configuration = TEST_UTIL.getConfiguration();
262       configuration.set("some_key", String.valueOf(_randy.nextInt()));
263       LOG.info("The hash code of the current configuration is: "
264           + configuration.hashCode());
265       HConnection currentConnection = HConnectionManager
266           .getConnection(configuration);
267       if (previousConnection != null) {
268         assertTrue(
269             "Did not get the same connection even though its key didn't change",
270             previousConnection == currentConnection);
271       }
272       previousConnection = currentConnection;
273       // change the configuration, so that it is no longer reachable from the
274       // client's perspective. However, since its part of the LRU doubly linked
275       // list, it will eventually get thrown out, at which time it should also
276       // close the corresponding {@link HConnection}.
277       configuration.set("other_key", String.valueOf(_randy.nextInt()));
278     }
279   }
280 
281   /**
282    * Makes sure that there is no leaking of
283    * {@link HConnectionManager.TableServers} in the {@link HConnectionManager}
284    * class.
285    */
286   @Test
287   public void testConnectionUniqueness() throws Exception {
288     int zkmaxconnections = TEST_UTIL.getConfiguration().
289       getInt(HConstants.ZOOKEEPER_MAX_CLIENT_CNXNS,
290         HConstants.DEFAULT_ZOOKEPER_MAX_CLIENT_CNXNS);
291     // Test up to a max that is < the maximum number of zk connections.  If we
292     // go above zk connections, we just fall into cycle where we are failing
293     // to set up a session and test runs for a long time.
294     int maxConnections = Math.min(zkmaxconnections - 1, 20);
295     List<HConnection> connections = new ArrayList<HConnection>(maxConnections);
296     HConnection previousConnection = null;
297     try {
298       for (int i = 0; i < maxConnections; i++) {
299         // set random key to differentiate the connection from previous ones
300         Configuration configuration = new Configuration(TEST_UTIL.getConfiguration());
301         configuration.set("some_key", String.valueOf(_randy.nextInt()));
302         configuration.set(HConstants.HBASE_CLIENT_INSTANCE_ID,
303             String.valueOf(_randy.nextInt()));
304         LOG.info("The hash code of the current configuration is: "
305             + configuration.hashCode());
306         HConnection currentConnection =
307           HConnectionManager.getConnection(configuration);
308         if (previousConnection != null) {
309           assertTrue("Got the same connection even though its key changed!",
310               previousConnection != currentConnection);
311         }
312         // change the configuration, so that it is no longer reachable from the
313         // client's perspective. However, since its part of the LRU doubly linked
314         // list, it will eventually get thrown out, at which time it should also
315         // close the corresponding {@link HConnection}.
316         configuration.set("other_key", String.valueOf(_randy.nextInt()));
317 
318         previousConnection = currentConnection;
319         LOG.info("The current HConnectionManager#HBASE_INSTANCES cache size is: "
320             + getHConnectionManagerCacheSize());
321         Thread.sleep(50);
322         connections.add(currentConnection);
323       }
324     } finally {
325       for (HConnection c: connections) {
326         // Clean up connections made so we don't interfere w/ subsequent tests.
327         HConnectionManager.deleteConnection(c.getConfiguration());
328       }
329     }
330   }
331 
332   @Test
333   public void testClosing() throws Exception {
334     Configuration configuration =
335       new Configuration(TEST_UTIL.getConfiguration());
336     configuration.set(HConstants.HBASE_CLIENT_INSTANCE_ID,
337         String.valueOf(_randy.nextInt()));
338 
339     HConnection c1 = HConnectionManager.createConnection(configuration);
340     // We create two connections with the same key.
341     HConnection c2 = HConnectionManager.createConnection(configuration);
342 
343     HConnection c3 = HConnectionManager.getConnection(configuration);
344     HConnection c4 = HConnectionManager.getConnection(configuration);
345     assertTrue(c3 == c4);
346 
347     c1.close();
348     assertTrue(c1.isClosed());
349     assertFalse(c2.isClosed());
350     assertFalse(c3.isClosed());
351 
352     c3.close();
353     // still a reference left
354     assertFalse(c3.isClosed());
355     c3.close();
356     assertTrue(c3.isClosed());
357     // c3 was removed from the cache
358     HConnection c5 = HConnectionManager.getConnection(configuration);
359     assertTrue(c5 != c3);
360 
361     assertFalse(c2.isClosed());
362     c2.close();
363     assertTrue(c2.isClosed());
364     c5.close();
365     assertTrue(c5.isClosed());
366   }
367 
368   /**
369    * Trivial test to verify that nobody messes with
370    * {@link HConnectionManager#createConnection(Configuration)}
371    */
372   @Test
373   public void testCreateConnection() throws Exception {
374     Configuration configuration = TEST_UTIL.getConfiguration();
375     HConnection c1 = HConnectionManager.createConnection(configuration);
376     HConnection c2 = HConnectionManager.createConnection(configuration);
377     // created from the same configuration, yet they are different
378     assertTrue(c1 != c2);
379     assertTrue(c1.getConfiguration() == c2.getConfiguration());
380     // make sure these were not cached
381     HConnection c3 = HConnectionManager.getConnection(configuration);
382     assertTrue(c1 != c3);
383     assertTrue(c2 != c3);
384   }
385 
386   /**
387    * Tests that a destroyed connection does not have a live zookeeper.
388    * Below is timing based.  We put up a connection to a table and then close the connection while
389    * having a background thread running that is forcing close of the connection to try and
390    * provoke a close catastrophe; we are hoping for a car crash so we can see if we are leaking
391    * zk connections.
392    * @throws Exception
393    */
394   @Test
395   public void testDeleteForZKConnLeak() throws Exception {
396     TEST_UTIL.createTable(TABLE_NAME2, FAM_NAM);
397     final Configuration config = HBaseConfiguration.create(TEST_UTIL.getConfiguration());
398     config.setInt("zookeeper.recovery.retry", 1);
399     config.setInt("zookeeper.recovery.retry.intervalmill", 1000);
400     config.setInt("hbase.rpc.timeout", 2000);
401     config.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1);
402 
403     ThreadPoolExecutor pool = new ThreadPoolExecutor(1, 10,
404       5, TimeUnit.SECONDS,
405       new SynchronousQueue<Runnable>(),
406       Threads.newDaemonThreadFactory("test-hcm-delete"));
407 
408     pool.submit(new Runnable() {
409       @Override
410       public void run() {
411         while (!Thread.interrupted()) {
412           try {
413             HConnection conn = HConnectionManager.getConnection(config);
414             LOG.info("Connection " + conn);
415             HConnectionManager.deleteStaleConnection(conn);
416             LOG.info("Connection closed " + conn);
417             // TODO: This sleep time should be less than the time that it takes to open and close
418             // a table.  Ideally we would do a few runs first to measure.  For now this is
419             // timing based; hopefully we hit the bad condition.
420             Threads.sleep(10);
421           } catch (Exception e) {
422           }
423         }
424       }
425     });
426 
427     // Use connection multiple times.
428     for (int i = 0; i < 30; i++) {
429       HConnection c1 = null;
430       try {
431         c1 = HConnectionManager.getConnection(config);
432         LOG.info("HTable connection " + i + " " + c1);
433         HTable table = new HTable(TABLE_NAME2, c1, pool);
434         table.close();
435         LOG.info("HTable connection " + i + " closed " + c1);
436       } catch (Exception e) {
437         LOG.info("We actually want this to happen!!!!  So we can see if we are leaking zk", e);
438       } finally {
439         if (c1 != null) {
440           if (c1.isClosed()) {
441             // cannot use getZooKeeper as method instantiates watcher if null
442             Field zkwField = c1.getClass().getDeclaredField("zooKeeper");
443             zkwField.setAccessible(true);
444             Object watcher = zkwField.get(c1);
445 
446             if (watcher != null) {
447               if (((ZooKeeperWatcher)watcher).getRecoverableZooKeeper().getState().isAlive()) {
448                 // non-synchronized access to watcher; sleep and check again in case zk connection
449                 // hasn't been cleaned up yet.
450                 Thread.sleep(1000);
451                 if (((ZooKeeperWatcher) watcher).getRecoverableZooKeeper().getState().isAlive()) {
452                   pool.shutdownNow();
453                   fail("Live zookeeper in closed connection");
454                 }
455               }
456             }
457           }
458           c1.close();
459         }
460       }
461     }
462     pool.shutdownNow();
463   }
464 
465   @org.junit.Rule
466   public org.apache.hadoop.hbase.ResourceCheckerJUnitRule cu =
467     new org.apache.hadoop.hbase.ResourceCheckerJUnitRule();
468 }
469