1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20 package org.apache.hadoop.hbase;
21
22 import static org.junit.Assert.assertTrue;
23
24 import java.io.File;
25 import java.io.IOException;
26 import java.io.OutputStream;
27 import java.lang.reflect.Field;
28 import java.security.MessageDigest;
29 import java.util.ArrayList;
30 import java.util.Arrays;
31 import java.util.List;
32 import java.util.Map;
33 import java.util.NavigableSet;
34 import java.util.UUID;
35
36 import org.apache.commons.logging.Log;
37 import org.apache.commons.logging.LogFactory;
38 import org.apache.commons.logging.impl.Jdk14Logger;
39 import org.apache.commons.logging.impl.Log4JLogger;
40 import org.apache.hadoop.conf.Configuration;
41 import org.apache.hadoop.fs.FileSystem;
42 import org.apache.hadoop.fs.Path;
43 import org.apache.hadoop.hbase.client.Delete;
44 import org.apache.hadoop.hbase.client.Get;
45 import org.apache.hadoop.hbase.client.HBaseAdmin;
46 import org.apache.hadoop.hbase.client.HConnection;
47 import org.apache.hadoop.hbase.client.HTable;
48 import org.apache.hadoop.hbase.client.Put;
49 import org.apache.hadoop.hbase.client.Result;
50 import org.apache.hadoop.hbase.client.ResultScanner;
51 import org.apache.hadoop.hbase.client.Scan;
52 import org.apache.hadoop.hbase.master.HMaster;
53 import org.apache.hadoop.hbase.regionserver.HRegion;
54 import org.apache.hadoop.hbase.regionserver.HRegionServer;
55 import org.apache.hadoop.hbase.regionserver.InternalScanner;
56 import org.apache.hadoop.hbase.regionserver.ReadWriteConsistencyControl;
57 import org.apache.hadoop.hbase.regionserver.Store;
58 import org.apache.hadoop.hbase.security.User;
59 import org.apache.hadoop.hbase.util.Bytes;
60 import org.apache.hadoop.hbase.util.FSUtils;
61 import org.apache.hadoop.hbase.util.Threads;
62 import org.apache.hadoop.hbase.util.Writables;
63 import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
64 import org.apache.hadoop.hbase.zookeeper.ZKConfig;
65 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
66 import org.apache.hadoop.hdfs.DFSClient;
67 import org.apache.hadoop.hdfs.DistributedFileSystem;
68 import org.apache.hadoop.hdfs.MiniDFSCluster;
69 import org.apache.hadoop.hdfs.server.namenode.NameNode;
70 import org.apache.hadoop.mapred.MiniMRCluster;
71 import org.apache.zookeeper.ZooKeeper;
72
73
74
75
76
77
78
79
80
81
82 public class HBaseTestingUtility {
83 private final static Log LOG = LogFactory.getLog(HBaseTestingUtility.class);
84 private Configuration conf;
85 private MiniZooKeeperCluster zkCluster = null;
86
87
88
89
90 private boolean passedZkCluster = false;
91 private MiniDFSCluster dfsCluster = null;
92 private MiniHBaseCluster hbaseCluster = null;
93 private MiniMRCluster mrCluster = null;
94
95 private File clusterTestBuildDir = null;
96
97
98
99
100
101 public static final String TEST_DIRECTORY_KEY = "test.build.data";
102
103
104
105
106 public static final String DEFAULT_TEST_DIRECTORY = "target/test-data";
107
108 public HBaseTestingUtility() {
109 this(HBaseConfiguration.create());
110 }
111
112 public HBaseTestingUtility(Configuration conf) {
113 this.conf = conf;
114 }
115
116
117
118
119
120
121
122
123
124
125
126
127 public Configuration getConfiguration() {
128 return this.conf;
129 }
130
131
132
133
134
135
136
137
138 public static Path getTestDir() {
139 return new Path(System.getProperty(TEST_DIRECTORY_KEY,
140 DEFAULT_TEST_DIRECTORY));
141 }
142
143
144
145
146
147
148
149
150
151 public static Path getTestDir(final String subdirName) {
152 return new Path(getTestDir(), subdirName);
153 }
154
155
156
157
158
159
160
161
162
163
164
165
166
167 public File setupClusterTestBuildDir() {
168 String randomStr = UUID.randomUUID().toString();
169 String dirStr = getTestDir(randomStr).toString();
170 File dir = new File(dirStr).getAbsoluteFile();
171
172 dir.deleteOnExit();
173 return dir;
174 }
175
176
177
178
179 void isRunningCluster(String passedBuildPath) throws IOException {
180 if (this.clusterTestBuildDir == null || passedBuildPath != null) return;
181 throw new IOException("Cluster already running at " +
182 this.clusterTestBuildDir);
183 }
184
185
186
187
188
189
190
191
192 public MiniDFSCluster startMiniDFSCluster(int servers) throws Exception {
193 return startMiniDFSCluster(servers, null);
194 }
195
196
197
198
199
200
201
202
203
204
205 public MiniDFSCluster startMiniDFSCluster(int servers, final File dir)
206 throws Exception {
207
208
209
210
211 if (dir == null) {
212 this.clusterTestBuildDir = setupClusterTestBuildDir();
213 } else {
214 this.clusterTestBuildDir = dir;
215 }
216 System.setProperty(TEST_DIRECTORY_KEY, this.clusterTestBuildDir.toString());
217 System.setProperty("test.cache.data", this.clusterTestBuildDir.toString());
218 this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
219 true, null, null, null, null);
220
221 FileSystem fs = this.dfsCluster.getFileSystem();
222 this.conf.set("fs.defaultFS", fs.getUri().toString());
223
224 this.conf.set("fs.default.name", fs.getUri().toString());
225 return this.dfsCluster;
226 }
227
228
229
230
231
232
233 public void shutdownMiniDFSCluster() throws Exception {
234 if (this.dfsCluster != null) {
235
236 this.dfsCluster.shutdown();
237 }
238 }
239
240
241
242
243
244
245
246
247 public MiniZooKeeperCluster startMiniZKCluster() throws Exception {
248 return startMiniZKCluster(setupClusterTestBuildDir());
249
250 }
251
252 private MiniZooKeeperCluster startMiniZKCluster(final File dir)
253 throws Exception {
254 this.passedZkCluster = false;
255 if (this.zkCluster != null) {
256 throw new IOException("Cluster already running at " + dir);
257 }
258 this.zkCluster = new MiniZooKeeperCluster();
259 int clientPort = this.zkCluster.startup(dir);
260 this.conf.set("hbase.zookeeper.property.clientPort",
261 Integer.toString(clientPort));
262 return this.zkCluster;
263 }
264
265
266
267
268
269
270
271 public void shutdownMiniZKCluster() throws IOException {
272 if (this.zkCluster != null) {
273 this.zkCluster.shutdown();
274 this.zkCluster = null;
275 }
276 }
277
278
279
280
281
282
283
284 public MiniHBaseCluster startMiniCluster() throws Exception {
285 return startMiniCluster(1, 1);
286 }
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301 public MiniHBaseCluster startMiniCluster(final int numSlaves)
302 throws Exception {
303 return startMiniCluster(1, numSlaves);
304 }
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322 public MiniHBaseCluster startMiniCluster(final int numMasters,
323 final int numSlaves)
324 throws Exception {
325 LOG.info("Starting up minicluster with " + numMasters + " master(s) and " +
326 numSlaves + " regionserver(s) and datanode(s)");
327
328 String testBuildPath = conf.get(TEST_DIRECTORY_KEY, null);
329 isRunningCluster(testBuildPath);
330 if (testBuildPath != null) {
331 LOG.info("Using passed path: " + testBuildPath);
332 }
333
334
335 this.clusterTestBuildDir = testBuildPath == null?
336 setupClusterTestBuildDir() : new File(testBuildPath);
337 System.setProperty(TEST_DIRECTORY_KEY, this.clusterTestBuildDir.getPath());
338
339
340 startMiniDFSCluster(numSlaves, this.clusterTestBuildDir);
341 this.dfsCluster.waitClusterUp();
342
343
344 if (this.zkCluster == null) {
345 startMiniZKCluster(this.clusterTestBuildDir);
346 }
347 return startMiniHBaseCluster(numMasters, numSlaves);
348 }
349
350
351
352
353
354
355
356
357
358
359
360
361 public MiniHBaseCluster startMiniHBaseCluster(final int numMasters,
362 final int numSlaves)
363 throws IOException, InterruptedException {
364
365 createRootDir();
366 Configuration c = new Configuration(this.conf);
367 this.hbaseCluster = new MiniHBaseCluster(c, numMasters, numSlaves);
368
369 HTable t = new HTable(c, HConstants.META_TABLE_NAME);
370 ResultScanner s = t.getScanner(new Scan());
371 while (s.next() != null) {
372 continue;
373 }
374 LOG.info("Minicluster is up");
375 return this.hbaseCluster;
376 }
377
378
379
380
381
382
383
384 public void restartHBaseCluster(int servers) throws IOException, InterruptedException {
385 this.hbaseCluster = new MiniHBaseCluster(this.conf, servers);
386
387 HTable t = new HTable(new Configuration(this.conf), HConstants.META_TABLE_NAME);
388 ResultScanner s = t.getScanner(new Scan());
389 while (s.next() != null) {
390 continue;
391 }
392 LOG.info("HBase has been restarted");
393 }
394
395
396
397
398
399
400 public MiniHBaseCluster getMiniHBaseCluster() {
401 return this.hbaseCluster;
402 }
403
404
405
406
407
408
409 public void shutdownMiniCluster() throws IOException {
410 LOG.info("Shutting down minicluster");
411 shutdownMiniHBaseCluster();
412 if (!this.passedZkCluster) shutdownMiniZKCluster();
413 if (this.dfsCluster != null) {
414
415 this.dfsCluster.shutdown();
416 }
417
418 if (this.clusterTestBuildDir != null && this.clusterTestBuildDir.exists()) {
419
420 if (!FSUtils.deleteDirectory(FileSystem.getLocal(this.conf),
421 new Path(this.clusterTestBuildDir.toString()))) {
422 LOG.warn("Failed delete of " + this.clusterTestBuildDir.toString());
423 }
424 this.clusterTestBuildDir = null;
425 }
426 LOG.info("Minicluster is down");
427 }
428
429
430
431
432
433 public void shutdownMiniHBaseCluster() throws IOException {
434 if (this.hbaseCluster != null) {
435 this.hbaseCluster.shutdown();
436
437 this.hbaseCluster.join();
438 }
439 this.hbaseCluster = null;
440 }
441
442
443
444
445
446
447
448
449
450 public Path createRootDir() throws IOException {
451 FileSystem fs = FileSystem.get(this.conf);
452 Path hbaseRootdir = fs.makeQualified(fs.getHomeDirectory());
453 this.conf.set(HConstants.HBASE_DIR, hbaseRootdir.toString());
454 fs.mkdirs(hbaseRootdir);
455 FSUtils.setVersion(fs, hbaseRootdir);
456 return hbaseRootdir;
457 }
458
459
460
461
462
463 public void flush() throws IOException {
464 this.hbaseCluster.flushcache();
465 }
466
467
468
469
470
471 public void flush(byte [] tableName) throws IOException {
472 this.hbaseCluster.flushcache(tableName);
473 }
474
475
476
477
478
479
480
481
482
483 public HTable createTable(byte[] tableName, byte[] family)
484 throws IOException{
485 return createTable(tableName, new byte[][]{family});
486 }
487
488
489
490
491
492
493
494
495 public HTable createTable(byte[] tableName, byte[][] families)
496 throws IOException {
497 return createTable(tableName, families,
498 new Configuration(getConfiguration()));
499 }
500
501
502
503
504
505
506
507
508
509 public HTable createTable(byte[] tableName, byte[][] families,
510 final Configuration c)
511 throws IOException {
512 HTableDescriptor desc = new HTableDescriptor(tableName);
513 for(byte[] family : families) {
514 desc.addFamily(new HColumnDescriptor(family));
515 }
516 getHBaseAdmin().createTable(desc);
517 return new HTable(c, tableName);
518 }
519
520
521
522
523
524
525
526
527
528 public HTable createTable(byte[] tableName, byte[] family, int numVersions)
529 throws IOException {
530 return createTable(tableName, new byte[][]{family}, numVersions);
531 }
532
533
534
535
536
537
538
539
540
541 public HTable createTable(byte[] tableName, byte[][] families,
542 int numVersions)
543 throws IOException {
544 HTableDescriptor desc = new HTableDescriptor(tableName);
545 for (byte[] family : families) {
546 HColumnDescriptor hcd = new HColumnDescriptor(family, numVersions,
547 HColumnDescriptor.DEFAULT_COMPRESSION,
548 HColumnDescriptor.DEFAULT_IN_MEMORY,
549 HColumnDescriptor.DEFAULT_BLOCKCACHE,
550 Integer.MAX_VALUE, HColumnDescriptor.DEFAULT_TTL,
551 HColumnDescriptor.DEFAULT_BLOOMFILTER,
552 HColumnDescriptor.DEFAULT_REPLICATION_SCOPE);
553 desc.addFamily(hcd);
554 }
555 getHBaseAdmin().createTable(desc);
556 return new HTable(new Configuration(getConfiguration()), tableName);
557 }
558
559
560
561
562
563
564
565
566
567 public HTable createTable(byte[] tableName, byte[][] families,
568 int[] numVersions)
569 throws IOException {
570 HTableDescriptor desc = new HTableDescriptor(tableName);
571 int i = 0;
572 for (byte[] family : families) {
573 HColumnDescriptor hcd = new HColumnDescriptor(family, numVersions[i],
574 HColumnDescriptor.DEFAULT_COMPRESSION,
575 HColumnDescriptor.DEFAULT_IN_MEMORY,
576 HColumnDescriptor.DEFAULT_BLOCKCACHE,
577 Integer.MAX_VALUE, HColumnDescriptor.DEFAULT_TTL,
578 HColumnDescriptor.DEFAULT_BLOOMFILTER,
579 HColumnDescriptor.DEFAULT_REPLICATION_SCOPE);
580 desc.addFamily(hcd);
581 i++;
582 }
583 getHBaseAdmin().createTable(desc);
584 return new HTable(new Configuration(getConfiguration()), tableName);
585 }
586
587
588
589
590
591 public void deleteTable(byte[] tableName) throws IOException {
592 HBaseAdmin admin = new HBaseAdmin(getConfiguration());
593 admin.disableTable(tableName);
594 admin.deleteTable(tableName);
595 }
596
597
598
599
600
601
602
603 public HTable truncateTable(byte [] tableName) throws IOException {
604 HTable table = new HTable(getConfiguration(), tableName);
605 Scan scan = new Scan();
606 ResultScanner resScan = table.getScanner(scan);
607 for(Result res : resScan) {
608 Delete del = new Delete(res.getRow());
609 table.delete(del);
610 }
611 resScan = table.getScanner(scan);
612 return table;
613 }
614
615
616
617
618
619
620
621
622 public int loadTable(final HTable t, final byte[] f) throws IOException {
623 t.setAutoFlush(false);
624 byte[] k = new byte[3];
625 int rowCount = 0;
626 for (byte b1 = 'a'; b1 <= 'z'; b1++) {
627 for (byte b2 = 'a'; b2 <= 'z'; b2++) {
628 for (byte b3 = 'a'; b3 <= 'z'; b3++) {
629 k[0] = b1;
630 k[1] = b2;
631 k[2] = b3;
632 Put put = new Put(k);
633 put.add(f, null, k);
634 t.put(put);
635 rowCount++;
636 }
637 }
638 }
639 t.flushCommits();
640 return rowCount;
641 }
642
643
644
645
646
647
648
649 public int loadRegion(final HRegion r, final byte[] f)
650 throws IOException {
651 byte[] k = new byte[3];
652 int rowCount = 0;
653 for (byte b1 = 'a'; b1 <= 'z'; b1++) {
654 for (byte b2 = 'a'; b2 <= 'z'; b2++) {
655 for (byte b3 = 'a'; b3 <= 'z'; b3++) {
656 k[0] = b1;
657 k[1] = b2;
658 k[2] = b3;
659 Put put = new Put(k);
660 put.add(f, null, k);
661 if (r.getLog() == null) put.setWriteToWAL(false);
662 r.put(put);
663 rowCount++;
664 }
665 }
666 }
667 return rowCount;
668 }
669
670
671
672
673 public int countRows(final HTable table) throws IOException {
674 Scan scan = new Scan();
675 ResultScanner results = table.getScanner(scan);
676 int count = 0;
677 for (@SuppressWarnings("unused") Result res : results) {
678 count++;
679 }
680 results.close();
681 return count;
682 }
683
684
685
686
687 public String checksumRows(final HTable table) throws Exception {
688 Scan scan = new Scan();
689 ResultScanner results = table.getScanner(scan);
690 MessageDigest digest = MessageDigest.getInstance("MD5");
691 for (Result res : results) {
692 digest.update(res.getRow());
693 }
694 results.close();
695 return digest.toString();
696 }
697
698
699
700
701
702
703
704
705
706 public int createMultiRegions(HTable table, byte[] columnFamily)
707 throws IOException {
708 return createMultiRegions(getConfiguration(), table, columnFamily);
709 }
710
711 public static final byte[][] KEYS = {
712 HConstants.EMPTY_BYTE_ARRAY, Bytes.toBytes("bbb"),
713 Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
714 Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
715 Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
716 Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
717 Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
718 Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
719 Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
720 Bytes.toBytes("xxx"), Bytes.toBytes("yyy")
721 };
722
723
724
725
726
727
728
729
730
731 public int createMultiRegions(final Configuration c, final HTable table,
732 final byte[] columnFamily)
733 throws IOException {
734 return createMultiRegions(c, table, columnFamily, KEYS);
735 }
736
737
738
739
740
741
742
743
744
745
746 public int createMultiRegions(final Configuration c, final HTable table,
747 final byte [] family, int numRegions)
748 throws IOException {
749 if (numRegions < 3) throw new IOException("Must create at least 3 regions");
750 byte [] startKey = Bytes.toBytes("aaaaa");
751 byte [] endKey = Bytes.toBytes("zzzzz");
752 byte [][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3);
753 byte [][] regionStartKeys = new byte[splitKeys.length+1][];
754 for (int i=0;i<splitKeys.length;i++) {
755 regionStartKeys[i+1] = splitKeys[i];
756 }
757 regionStartKeys[0] = HConstants.EMPTY_BYTE_ARRAY;
758 return createMultiRegions(c, table, family, regionStartKeys);
759 }
760
761 public int createMultiRegions(final Configuration c, final HTable table,
762 final byte[] columnFamily, byte [][] startKeys)
763 throws IOException {
764 Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
765 HTable meta = new HTable(c, HConstants.META_TABLE_NAME);
766 HTableDescriptor htd = table.getTableDescriptor();
767 if(!htd.hasFamily(columnFamily)) {
768 HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
769 htd.addFamily(hcd);
770 }
771
772
773
774
775 List<byte[]> rows = getMetaTableRows(htd.getName());
776 List<HRegionInfo> newRegions = new ArrayList<HRegionInfo>(startKeys.length);
777
778 int count = 0;
779 for (int i = 0; i < startKeys.length; i++) {
780 int j = (i + 1) % startKeys.length;
781 HRegionInfo hri = new HRegionInfo(table.getTableDescriptor(),
782 startKeys[i], startKeys[j]);
783 Put put = new Put(hri.getRegionName());
784 put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
785 Writables.getBytes(hri));
786 meta.put(put);
787 LOG.info("createMultiRegions: inserted " + hri.toString());
788 newRegions.add(hri);
789 count++;
790 }
791
792 for (byte[] row : rows) {
793 LOG.info("createMultiRegions: deleting meta row -> " +
794 Bytes.toStringBinary(row));
795 meta.delete(new Delete(row));
796 }
797
798 HConnection conn = table.getConnection();
799 conn.clearRegionCache();
800
801 if (getHBaseAdmin().isTableEnabled(table.getTableName())) {
802 for(HRegionInfo hri : newRegions) {
803 hbaseCluster.getMaster().assignRegion(hri);
804 }
805 }
806 return count;
807 }
808
809
810
811
812
813
814
815
816
817
818
819 public List<HRegionInfo> createMultiRegionsInMeta(final Configuration conf,
820 final HTableDescriptor htd, byte [][] startKeys)
821 throws IOException {
822 HTable meta = new HTable(conf, HConstants.META_TABLE_NAME);
823 Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
824 List<HRegionInfo> newRegions = new ArrayList<HRegionInfo>(startKeys.length);
825
826 int count = 0;
827 for (int i = 0; i < startKeys.length; i++) {
828 int j = (i + 1) % startKeys.length;
829 HRegionInfo hri = new HRegionInfo(htd, startKeys[i], startKeys[j]);
830 Put put = new Put(hri.getRegionName());
831 put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
832 Writables.getBytes(hri));
833 meta.put(put);
834 LOG.info("createMultiRegionsInMeta: inserted " + hri.toString());
835 newRegions.add(hri);
836 count++;
837 }
838 return newRegions;
839 }
840
841
842
843
844
845
846 public List<byte[]> getMetaTableRows() throws IOException {
847
848 HTable t = new HTable(new Configuration(this.conf), HConstants.META_TABLE_NAME);
849 List<byte[]> rows = new ArrayList<byte[]>();
850 ResultScanner s = t.getScanner(new Scan());
851 for (Result result : s) {
852 LOG.info("getMetaTableRows: row -> " +
853 Bytes.toStringBinary(result.getRow()));
854 rows.add(result.getRow());
855 }
856 s.close();
857 return rows;
858 }
859
860
861
862
863
864
865 public List<byte[]> getMetaTableRows(byte[] tableName) throws IOException {
866
867 HTable t = new HTable(new Configuration(this.conf), HConstants.META_TABLE_NAME);
868 List<byte[]> rows = new ArrayList<byte[]>();
869 ResultScanner s = t.getScanner(new Scan());
870 for (Result result : s) {
871 HRegionInfo info = Writables.getHRegionInfo(
872 result.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER));
873 HTableDescriptor desc = info.getTableDesc();
874 if (Bytes.compareTo(desc.getName(), tableName) == 0) {
875 LOG.info("getMetaTableRows: row -> " +
876 Bytes.toStringBinary(result.getRow()));
877 rows.add(result.getRow());
878 }
879 }
880 s.close();
881 return rows;
882 }
883
884
885
886
887
888
889
890
891
892
893
894 public HRegionServer getRSForFirstRegionInTable(byte[] tableName)
895 throws IOException {
896 List<byte[]> metaRows = getMetaTableRows(tableName);
897 if (metaRows == null || metaRows.size() == 0) {
898 return null;
899 }
900 int index = hbaseCluster.getServerWith(metaRows.get(0));
901 return hbaseCluster.getRegionServerThreads().get(index).getRegionServer();
902 }
903
904
905
906
907
908
909
910 public void startMiniMapReduceCluster() throws IOException {
911 startMiniMapReduceCluster(2);
912 }
913
914
915
916
917
918
919
920 public void startMiniMapReduceCluster(final int servers) throws IOException {
921 LOG.info("Starting mini mapreduce cluster...");
922
923 Configuration c = getConfiguration();
924 System.setProperty("hadoop.log.dir", c.get("hadoop.log.dir"));
925 c.set("mapred.output.dir", c.get("hadoop.tmp.dir"));
926 mrCluster = new MiniMRCluster(servers,
927 FileSystem.get(c).getUri().toString(), 1);
928 LOG.info("Mini mapreduce cluster started");
929 c.set("mapred.job.tracker",
930 mrCluster.createJobConf().get("mapred.job.tracker"));
931 }
932
933
934
935
936 public void shutdownMiniMapReduceCluster() {
937 LOG.info("Stopping mini mapreduce cluster...");
938 if (mrCluster != null) {
939 mrCluster.shutdown();
940 }
941
942 conf.set("mapred.job.tracker", "local");
943 LOG.info("Mini mapreduce cluster stopped");
944 }
945
946
947
948
949
950
951 public void enableDebug(Class<?> clazz) {
952 Log l = LogFactory.getLog(clazz);
953 if (l instanceof Log4JLogger) {
954 ((Log4JLogger) l).getLogger().setLevel(org.apache.log4j.Level.DEBUG);
955 } else if (l instanceof Jdk14Logger) {
956 ((Jdk14Logger) l).getLogger().setLevel(java.util.logging.Level.ALL);
957 }
958 }
959
960
961
962
963
964 public void expireMasterSession() throws Exception {
965 HMaster master = hbaseCluster.getMaster();
966 expireSession(master.getZooKeeper(), master);
967 }
968
969
970
971
972
973
974 public void expireRegionServerSession(int index) throws Exception {
975 HRegionServer rs = hbaseCluster.getRegionServer(index);
976 expireSession(rs.getZooKeeper(), rs);
977 }
978
979 public void expireSession(ZooKeeperWatcher nodeZK, Server server)
980 throws Exception {
981 Configuration c = new Configuration(this.conf);
982 String quorumServers = ZKConfig.getZKQuorumServersString(c);
983 int sessionTimeout = 5 * 1000;
984 ZooKeeper zk = nodeZK.getZooKeeper();
985 byte[] password = zk.getSessionPasswd();
986 long sessionID = zk.getSessionId();
987
988 ZooKeeper newZK = new ZooKeeper(quorumServers,
989 sessionTimeout, EmptyWatcher.instance, sessionID, password);
990 newZK.close();
991 final long sleep = sessionTimeout * 5L;
992 LOG.info("ZK Closed Session 0x" + Long.toHexString(sessionID) +
993 "; sleeping=" + sleep);
994
995 Thread.sleep(sleep);
996
997 new HTable(new Configuration(conf), HConstants.META_TABLE_NAME);
998 }
999
1000
1001
1002
1003
1004
1005 public MiniHBaseCluster getHBaseCluster() {
1006 return hbaseCluster;
1007 }
1008
1009
1010
1011
1012
1013
1014
1015 public HBaseAdmin getHBaseAdmin()
1016 throws IOException {
1017 return new HBaseAdmin(new Configuration(getConfiguration()));
1018 }
1019
1020
1021
1022
1023
1024
1025
1026 public void closeRegion(String regionName) throws IOException {
1027 closeRegion(Bytes.toBytes(regionName));
1028 }
1029
1030
1031
1032
1033
1034
1035
1036 public void closeRegion(byte[] regionName) throws IOException {
1037 HBaseAdmin admin = getHBaseAdmin();
1038 admin.closeRegion(regionName, null);
1039 }
1040
1041
1042
1043
1044
1045
1046
1047
1048 public void closeRegionByRow(String row, HTable table) throws IOException {
1049 closeRegionByRow(Bytes.toBytes(row), table);
1050 }
1051
1052
1053
1054
1055
1056
1057
1058
1059 public void closeRegionByRow(byte[] row, HTable table) throws IOException {
1060 HRegionLocation hrl = table.getRegionLocation(row);
1061 closeRegion(hrl.getRegionInfo().getRegionName());
1062 }
1063
1064 public MiniZooKeeperCluster getZkCluster() {
1065 return zkCluster;
1066 }
1067
1068 public void setZkCluster(MiniZooKeeperCluster zkCluster) {
1069 this.passedZkCluster = true;
1070 this.zkCluster = zkCluster;
1071 }
1072
1073 public MiniDFSCluster getDFSCluster() {
1074 return dfsCluster;
1075 }
1076
1077 public FileSystem getTestFileSystem() throws IOException {
1078 return FileSystem.get(conf);
1079 }
1080
1081
1082
1083
1084
1085 public boolean cleanupTestDir() throws IOException {
1086 return deleteDir(getTestDir());
1087 }
1088
1089
1090
1091
1092
1093
1094 public boolean cleanupTestDir(final String subdir) throws IOException {
1095 return deleteDir(getTestDir(subdir));
1096 }
1097
1098
1099
1100
1101
1102
1103 public boolean deleteDir(final Path dir) throws IOException {
1104 FileSystem fs = getTestFileSystem();
1105 if (fs.exists(dir)) {
1106 return fs.delete(getTestDir(), true);
1107 }
1108 return false;
1109 }
1110
1111 public void waitTableAvailable(byte[] table, long timeoutMillis)
1112 throws InterruptedException, IOException {
1113 HBaseAdmin admin = getHBaseAdmin();
1114 long startWait = System.currentTimeMillis();
1115 while (!admin.isTableAvailable(table)) {
1116 assertTrue("Timed out waiting for table " + Bytes.toStringBinary(table),
1117 System.currentTimeMillis() - startWait < timeoutMillis);
1118 Thread.sleep(500);
1119 }
1120 }
1121
1122
1123
1124
1125
1126
1127
1128
1129 public boolean ensureSomeRegionServersAvailable(final int num)
1130 throws IOException {
1131 if (this.getHBaseCluster().getLiveRegionServerThreads().size() < num) {
1132
1133 LOG.info("Started new server=" +
1134 this.getHBaseCluster().startRegionServer());
1135 return true;
1136 }
1137 return false;
1138 }
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149 public static User getDifferentUser(final Configuration c,
1150 final String differentiatingSuffix)
1151 throws IOException {
1152 FileSystem currentfs = FileSystem.get(c);
1153 if (!(currentfs instanceof DistributedFileSystem)) {
1154 return User.getCurrent();
1155 }
1156
1157
1158 String username = User.getCurrent().getName() +
1159 differentiatingSuffix;
1160 User user = User.createUserForTesting(c, username,
1161 new String[]{"supergroup"});
1162 return user;
1163 }
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175 public void setNameNodeNameSystemLeasePeriod(final int soft, final int hard)
1176 throws SecurityException, NoSuchFieldException, IllegalArgumentException, IllegalAccessException {
1177
1178
1179
1180
1181 Field field = this.dfsCluster.getClass().getDeclaredField("nameNode");
1182 field.setAccessible(true);
1183 NameNode nn = (NameNode)field.get(this.dfsCluster);
1184 nn.namesystem.leaseManager.setLeasePeriod(100, 50000);
1185 }
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200 public static void setMaxRecoveryErrorCount(final OutputStream stream,
1201 final int max) {
1202 try {
1203 Class<?> [] clazzes = DFSClient.class.getDeclaredClasses();
1204 for (Class<?> clazz: clazzes) {
1205 String className = clazz.getSimpleName();
1206 if (className.equals("DFSOutputStream")) {
1207 if (clazz.isInstance(stream)) {
1208 Field maxRecoveryErrorCountField =
1209 stream.getClass().getDeclaredField("maxRecoveryErrorCount");
1210 maxRecoveryErrorCountField.setAccessible(true);
1211 maxRecoveryErrorCountField.setInt(stream, max);
1212 break;
1213 }
1214 }
1215 }
1216 } catch (Exception e) {
1217 LOG.info("Could not set max recovery field", e);
1218 }
1219 }
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230 public void waitUntilAllRegionsAssigned(final int countOfRegions)
1231 throws IOException {
1232 HTable meta = new HTable(getConfiguration(), HConstants.META_TABLE_NAME);
1233 while (true) {
1234 int rows = 0;
1235 Scan scan = new Scan();
1236 scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
1237 ResultScanner s = meta.getScanner(scan);
1238 for (Result r = null; (r = s.next()) != null;) {
1239 byte [] b =
1240 r.getValue(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
1241 if (b == null || b.length <= 0) {
1242 break;
1243 }
1244 rows++;
1245 }
1246 s.close();
1247
1248 if (rows == countOfRegions) {
1249 break;
1250 }
1251 LOG.info("Found=" + rows);
1252 Threads.sleep(1000);
1253 }
1254 }
1255
1256
1257
1258
1259
1260 public static List<KeyValue> getFromStoreFile(Store store,
1261 Get get) throws IOException {
1262 ReadWriteConsistencyControl.resetThreadReadPoint();
1263 Scan scan = new Scan(get);
1264 InternalScanner scanner = (InternalScanner) store.getScanner(scan,
1265 scan.getFamilyMap().get(store.getFamily().getName()));
1266
1267 List<KeyValue> result = new ArrayList<KeyValue>();
1268 scanner.next(result);
1269 if (!result.isEmpty()) {
1270
1271 KeyValue kv = result.get(0);
1272 if (!Bytes.equals(kv.getRow(), get.getRow())) {
1273 result.clear();
1274 }
1275 }
1276 return result;
1277 }
1278
1279
1280
1281
1282
1283 public static List<KeyValue> getFromStoreFile(Store store,
1284 byte [] row,
1285 NavigableSet<byte[]> columns
1286 ) throws IOException {
1287 Get get = new Get(row);
1288 Map<byte[], NavigableSet<byte[]>> s = get.getFamilyMap();
1289 s.put(store.getFamily().getName(), columns);
1290
1291 return getFromStoreFile(store,get);
1292 }
1293 }