1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19 package org.apache.hadoop.hbase.avro;
20
21 import java.io.IOException;
22 import java.nio.ByteBuffer;
23 import java.util.Collection;
24 import java.util.List;
25
26 import org.apache.avro.Schema;
27 import org.apache.avro.generic.GenericArray;
28 import org.apache.avro.generic.GenericData;
29 import org.apache.avro.util.Utf8;
30 import org.apache.hadoop.hbase.ClusterStatus;
31 import org.apache.hadoop.hbase.HColumnDescriptor;
32 import org.apache.hadoop.hbase.HServerAddress;
33 import org.apache.hadoop.hbase.HServerLoad;
34 import org.apache.hadoop.hbase.HTableDescriptor;
35 import org.apache.hadoop.hbase.KeyValue;
36 import org.apache.hadoop.hbase.ServerName;
37 import org.apache.hadoop.hbase.avro.generated.AClusterStatus;
38 import org.apache.hadoop.hbase.avro.generated.AColumn;
39 import org.apache.hadoop.hbase.avro.generated.AColumnValue;
40 import org.apache.hadoop.hbase.avro.generated.ACompressionAlgorithm;
41 import org.apache.hadoop.hbase.avro.generated.ADelete;
42 import org.apache.hadoop.hbase.avro.generated.AFamilyDescriptor;
43 import org.apache.hadoop.hbase.avro.generated.AGet;
44 import org.apache.hadoop.hbase.avro.generated.AIllegalArgument;
45 import org.apache.hadoop.hbase.avro.generated.APut;
46 import org.apache.hadoop.hbase.avro.generated.ARegionLoad;
47 import org.apache.hadoop.hbase.avro.generated.AResult;
48 import org.apache.hadoop.hbase.avro.generated.AResultEntry;
49 import org.apache.hadoop.hbase.avro.generated.AScan;
50 import org.apache.hadoop.hbase.avro.generated.AServerAddress;
51 import org.apache.hadoop.hbase.avro.generated.AServerInfo;
52 import org.apache.hadoop.hbase.avro.generated.AServerLoad;
53 import org.apache.hadoop.hbase.avro.generated.ATableDescriptor;
54 import org.apache.hadoop.hbase.client.Delete;
55 import org.apache.hadoop.hbase.client.Get;
56 import org.apache.hadoop.hbase.client.Put;
57 import org.apache.hadoop.hbase.client.Result;
58 import org.apache.hadoop.hbase.client.Scan;
59 import org.apache.hadoop.hbase.io.hfile.Compression;
60 import org.apache.hadoop.hbase.util.Bytes;
61
62 public class AvroUtil {
63
64
65
66
67
68 static public AServerAddress hsaToASA(HServerAddress hsa) throws IOException {
69 AServerAddress asa = new AServerAddress();
70 asa.hostname = new Utf8(hsa.getHostname());
71 asa.inetSocketAddress = new Utf8(hsa.getInetSocketAddress().toString());
72 asa.port = hsa.getPort();
73 return asa;
74 }
75
76 static public ARegionLoad hrlToARL(HServerLoad.RegionLoad rl) throws IOException {
77 ARegionLoad arl = new ARegionLoad();
78 arl.memStoreSizeMB = rl.getMemStoreSizeMB();
79 arl.name = ByteBuffer.wrap(rl.getName());
80 arl.storefileIndexSizeMB = rl.getStorefileIndexSizeMB();
81 arl.storefiles = rl.getStorefiles();
82 arl.storefileSizeMB = rl.getStorefileSizeMB();
83 arl.stores = rl.getStores();
84 return arl;
85 }
86
87 static public AServerLoad hslToASL(HServerLoad hsl) throws IOException {
88 AServerLoad asl = new AServerLoad();
89 asl.load = hsl.getLoad();
90 asl.maxHeapMB = hsl.getMaxHeapMB();
91 asl.memStoreSizeInMB = hsl.getMemStoreSizeInMB();
92 asl.numberOfRegions = hsl.getNumberOfRegions();
93 asl.numberOfRequests = hsl.getNumberOfRequests();
94
95 Collection<HServerLoad.RegionLoad> regionLoads = hsl.getRegionsLoad().values();
96 Schema s = Schema.createArray(ARegionLoad.SCHEMA$);
97 GenericData.Array<ARegionLoad> aregionLoads = null;
98 if (regionLoads != null) {
99 aregionLoads = new GenericData.Array<ARegionLoad>(regionLoads.size(), s);
100 for (HServerLoad.RegionLoad rl : regionLoads) {
101 aregionLoads.add(hrlToARL(rl));
102 }
103 } else {
104 aregionLoads = new GenericData.Array<ARegionLoad>(0, s);
105 }
106 asl.regionsLoad = aregionLoads;
107
108 asl.storefileIndexSizeInMB = hsl.getStorefileIndexSizeInMB();
109 asl.storefiles = hsl.getStorefiles();
110 asl.storefileSizeInMB = hsl.getStorefileSizeInMB();
111 asl.usedHeapMB = hsl.getUsedHeapMB();
112 return asl;
113 }
114
115 static public AServerInfo hsiToASI(ServerName sn, HServerLoad hsl) throws IOException {
116 AServerInfo asi = new AServerInfo();
117 asi.infoPort = -1;
118 asi.load = hslToASL(hsl);
119 asi.serverAddress = hsaToASA(new HServerAddress(sn.getHostname(), sn.getPort()));
120 asi.serverName = new Utf8(sn.toString());
121 asi.startCode = sn.getStartcode();
122 return asi;
123 }
124
125 static public AClusterStatus csToACS(ClusterStatus cs) throws IOException {
126 AClusterStatus acs = new AClusterStatus();
127 acs.averageLoad = cs.getAverageLoad();
128 Collection<ServerName> deadServerNames = cs.getDeadServerNames();
129 Schema stringArraySchema = Schema.createArray(Schema.create(Schema.Type.STRING));
130 GenericData.Array<CharSequence> adeadServerNames = null;
131 if (deadServerNames != null) {
132 adeadServerNames = new GenericData.Array<CharSequence>(deadServerNames.size(), stringArraySchema);
133 for (ServerName deadServerName : deadServerNames) {
134 adeadServerNames.add(new Utf8(deadServerName.toString()));
135 }
136 } else {
137 adeadServerNames = new GenericData.Array<CharSequence>(0, stringArraySchema);
138 }
139 acs.deadServerNames = adeadServerNames;
140 acs.deadServers = cs.getDeadServers();
141 acs.hbaseVersion = new Utf8(cs.getHBaseVersion());
142 acs.regionsCount = cs.getRegionsCount();
143 acs.requestsCount = cs.getRequestsCount();
144 Collection<ServerName> hserverInfos = cs.getServers();
145 Schema s = Schema.createArray(AServerInfo.SCHEMA$);
146 GenericData.Array<AServerInfo> aserverInfos = null;
147 if (hserverInfos != null) {
148 aserverInfos = new GenericData.Array<AServerInfo>(hserverInfos.size(), s);
149 for (ServerName hsi : hserverInfos) {
150 aserverInfos.add(hsiToASI(hsi, cs.getLoad(hsi)));
151 }
152 } else {
153 aserverInfos = new GenericData.Array<AServerInfo>(0, s);
154 }
155 acs.serverInfos = aserverInfos;
156 acs.servers = cs.getServers().size();
157 return acs;
158 }
159
160
161
162
163
164 static public ATableDescriptor htdToATD(HTableDescriptor table) throws IOException {
165 ATableDescriptor atd = new ATableDescriptor();
166 atd.name = ByteBuffer.wrap(table.getName());
167 Collection<HColumnDescriptor> families = table.getFamilies();
168 Schema afdSchema = Schema.createArray(AFamilyDescriptor.SCHEMA$);
169 GenericData.Array<AFamilyDescriptor> afamilies = null;
170 if (families.size() > 0) {
171 afamilies = new GenericData.Array<AFamilyDescriptor>(families.size(), afdSchema);
172 for (HColumnDescriptor hcd : families) {
173 AFamilyDescriptor afamily = hcdToAFD(hcd);
174 afamilies.add(afamily);
175 }
176 } else {
177 afamilies = new GenericData.Array<AFamilyDescriptor>(0, afdSchema);
178 }
179 atd.families = afamilies;
180 atd.maxFileSize = table.getMaxFileSize();
181 atd.memStoreFlushSize = table.getMemStoreFlushSize();
182 atd.rootRegion = table.isRootRegion();
183 atd.metaRegion = table.isMetaRegion();
184 atd.metaTable = table.isMetaTable();
185 atd.readOnly = table.isReadOnly();
186 atd.deferredLogFlush = table.isDeferredLogFlush();
187 return atd;
188 }
189
190 static public HTableDescriptor atdToHTD(ATableDescriptor atd) throws IOException, AIllegalArgument {
191 HTableDescriptor htd = new HTableDescriptor(Bytes.toBytes(atd.name));
192 if (atd.families != null && atd.families.size() > 0) {
193 for (AFamilyDescriptor afd : atd.families) {
194 htd.addFamily(afdToHCD(afd));
195 }
196 }
197 if (atd.maxFileSize != null) {
198 htd.setMaxFileSize(atd.maxFileSize);
199 }
200 if (atd.memStoreFlushSize != null) {
201 htd.setMemStoreFlushSize(atd.memStoreFlushSize);
202 }
203 if (atd.readOnly != null) {
204 htd.setReadOnly(atd.readOnly);
205 }
206 if (atd.deferredLogFlush != null) {
207 htd.setDeferredLogFlush(atd.deferredLogFlush);
208 }
209 if (atd.rootRegion != null || atd.metaRegion != null || atd.metaTable != null) {
210 AIllegalArgument aie = new AIllegalArgument();
211 aie.message = new Utf8("Can't set root or meta flag on create table.");
212 throw aie;
213 }
214 return htd;
215 }
216
217
218
219
220
221 static public AFamilyDescriptor hcdToAFD(HColumnDescriptor hcd) throws IOException {
222 AFamilyDescriptor afamily = new AFamilyDescriptor();
223 afamily.name = ByteBuffer.wrap(hcd.getName());
224 String compressionAlgorithm = hcd.getCompressionType().getName();
225 if (compressionAlgorithm == "LZO") {
226 afamily.compression = ACompressionAlgorithm.LZO;
227 } else if (compressionAlgorithm == "GZ") {
228 afamily.compression = ACompressionAlgorithm.GZ;
229 } else {
230 afamily.compression = ACompressionAlgorithm.NONE;
231 }
232 afamily.maxVersions = hcd.getMaxVersions();
233 afamily.blocksize = hcd.getBlocksize();
234 afamily.inMemory = hcd.isInMemory();
235 afamily.timeToLive = hcd.getTimeToLive();
236 afamily.blockCacheEnabled = hcd.isBlockCacheEnabled();
237 return afamily;
238 }
239
240 static public HColumnDescriptor afdToHCD(AFamilyDescriptor afd) throws IOException {
241 HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toBytes(afd.name));
242
243 ACompressionAlgorithm compressionAlgorithm = afd.compression;
244 if (compressionAlgorithm == ACompressionAlgorithm.LZO) {
245 hcd.setCompressionType(Compression.Algorithm.LZO);
246 } else if (compressionAlgorithm == ACompressionAlgorithm.GZ) {
247 hcd.setCompressionType(Compression.Algorithm.GZ);
248 } else {
249 hcd.setCompressionType(Compression.Algorithm.NONE);
250 }
251
252 if (afd.maxVersions != null) {
253 hcd.setMaxVersions(afd.maxVersions);
254 }
255
256 if (afd.blocksize != null) {
257 hcd.setBlocksize(afd.blocksize);
258 }
259
260 if (afd.inMemory != null) {
261 hcd.setInMemory(afd.inMemory);
262 }
263
264 if (afd.timeToLive != null) {
265 hcd.setTimeToLive(afd.timeToLive);
266 }
267
268 if (afd.blockCacheEnabled != null) {
269 hcd.setBlockCacheEnabled(afd.blockCacheEnabled);
270 }
271 return hcd;
272 }
273
274
275
276
277
278
279 static public Get agetToGet(AGet aget) throws IOException {
280 Get get = new Get(Bytes.toBytes(aget.row));
281 if (aget.columns != null) {
282 for (AColumn acolumn : aget.columns) {
283 if (acolumn.qualifier != null) {
284 get.addColumn(Bytes.toBytes(acolumn.family), Bytes.toBytes(acolumn.qualifier));
285 } else {
286 get.addFamily(Bytes.toBytes(acolumn.family));
287 }
288 }
289 }
290 if (aget.timestamp != null) {
291 get.setTimeStamp(aget.timestamp);
292 }
293 if (aget.timerange != null) {
294 get.setTimeRange(aget.timerange.minStamp, aget.timerange.maxStamp);
295 }
296 if (aget.maxVersions != null) {
297 get.setMaxVersions(aget.maxVersions);
298 }
299 return get;
300 }
301
302
303 static public AResult resultToAResult(Result result) {
304 AResult aresult = new AResult();
305 byte[] row = result.getRow();
306 aresult.row = ByteBuffer.wrap(row != null ? row : new byte[1]);
307 Schema s = Schema.createArray(AResultEntry.SCHEMA$);
308 GenericData.Array<AResultEntry> entries = null;
309 List<KeyValue> resultKeyValues = result.list();
310 if (resultKeyValues != null && resultKeyValues.size() > 0) {
311 entries = new GenericData.Array<AResultEntry>(resultKeyValues.size(), s);
312 for (KeyValue resultKeyValue : resultKeyValues) {
313 AResultEntry entry = new AResultEntry();
314 entry.family = ByteBuffer.wrap(resultKeyValue.getFamily());
315 entry.qualifier = ByteBuffer.wrap(resultKeyValue.getQualifier());
316 entry.value = ByteBuffer.wrap(resultKeyValue.getValue());
317 entry.timestamp = resultKeyValue.getTimestamp();
318 entries.add(entry);
319 }
320 } else {
321 entries = new GenericData.Array<AResultEntry>(0, s);
322 }
323 aresult.entries = entries;
324 return aresult;
325 }
326
327
328
329
330
331 static public Put aputToPut(APut aput) throws IOException {
332 Put put = new Put(Bytes.toBytes(aput.row));
333 for (AColumnValue acv : aput.columnValues) {
334 if (acv.timestamp != null) {
335 put.add(Bytes.toBytes(acv.family),
336 Bytes.toBytes(acv.qualifier),
337 acv.timestamp,
338 Bytes.toBytes(acv.value));
339 } else {
340 put.add(Bytes.toBytes(acv.family),
341 Bytes.toBytes(acv.qualifier),
342 Bytes.toBytes(acv.value));
343 }
344 }
345 return put;
346 }
347
348
349
350
351
352 static public Delete adeleteToDelete(ADelete adelete) throws IOException {
353 Delete delete = new Delete(Bytes.toBytes(adelete.row));
354 if (adelete.columns != null) {
355 for (AColumn acolumn : adelete.columns) {
356 if (acolumn.qualifier != null) {
357 delete.deleteColumns(Bytes.toBytes(acolumn.family), Bytes.toBytes(acolumn.qualifier));
358 } else {
359 delete.deleteFamily(Bytes.toBytes(acolumn.family));
360 }
361 }
362 }
363 return delete;
364 }
365
366
367
368
369
370 static public Scan ascanToScan(AScan ascan) throws IOException {
371 Scan scan = new Scan();
372 if (ascan.startRow != null) {
373 scan.setStartRow(Bytes.toBytes(ascan.startRow));
374 }
375 if (ascan.stopRow != null) {
376 scan.setStopRow(Bytes.toBytes(ascan.stopRow));
377 }
378 if (ascan.columns != null) {
379 for (AColumn acolumn : ascan.columns) {
380 if (acolumn.qualifier != null) {
381 scan.addColumn(Bytes.toBytes(acolumn.family), Bytes.toBytes(acolumn.qualifier));
382 } else {
383 scan.addFamily(Bytes.toBytes(acolumn.family));
384 }
385 }
386 }
387 if (ascan.timestamp != null) {
388 scan.setTimeStamp(ascan.timestamp);
389 }
390 if (ascan.timerange != null) {
391 scan.setTimeRange(ascan.timerange.minStamp, ascan.timerange.maxStamp);
392 }
393 if (ascan.maxVersions != null) {
394 scan.setMaxVersions(ascan.maxVersions);
395 }
396 return scan;
397 }
398
399
400 static public GenericArray<AResult> resultsToAResults(Result[] results) {
401 Schema s = Schema.createArray(AResult.SCHEMA$);
402 GenericData.Array<AResult> aresults = null;
403 if (results != null && results.length > 0) {
404 aresults = new GenericData.Array<AResult>(results.length, s);
405 for (Result result : results) {
406 aresults.add(resultToAResult(result));
407 }
408 } else {
409 aresults = new GenericData.Array<AResult>(0, s);
410 }
411 return aresults;
412 }
413 }