1 /**
2 *
3 * Licensed to the Apache Software Foundation (ASF) under one
4 * or more contributor license agreements. See the NOTICE file
5 * distributed with this work for additional information
6 * regarding copyright ownership. The ASF licenses this file
7 * to you under the Apache License, Version 2.0 (the
8 * "License"); you may not use this file except in compliance
9 * with the License. You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 */
19 package org.apache.hadoop.hbase.mapred;
20
21 import java.io.IOException;
22
23 import org.apache.hadoop.hbase.client.HTable;
24 import org.apache.hadoop.hbase.client.Result;
25 import org.apache.hadoop.hbase.filter.Filter;
26 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
27 import org.apache.hadoop.mapred.RecordReader;
28
29
30 /**
31 * Iterate over an HBase table data, return (Text, RowResult) pairs
32 */
33 public class TableRecordReader
34 implements RecordReader<ImmutableBytesWritable, Result> {
35
36 private TableRecordReaderImpl recordReaderImpl = new TableRecordReaderImpl();
37
38 /**
39 * Restart from survivable exceptions by creating a new scanner.
40 *
41 * @param firstRow
42 * @throws IOException
43 */
44 public void restart(byte[] firstRow) throws IOException {
45 this.recordReaderImpl.restart(firstRow);
46 }
47
48 /**
49 * Build the scanner. Not done in constructor to allow for extension.
50 *
51 * @throws IOException
52 */
53 public void init() throws IOException {
54 this.recordReaderImpl.restart(this.recordReaderImpl.getStartRow());
55 }
56
57 /**
58 * @param htable the {@link HTable} to scan.
59 */
60 public void setHTable(HTable htable) {
61 this.recordReaderImpl.setHTable(htable);
62 }
63
64 /**
65 * @param inputColumns the columns to be placed in {@link Result}.
66 */
67 public void setInputColumns(final byte [][] inputColumns) {
68 this.recordReaderImpl.setInputColumns(inputColumns);
69 }
70
71 /**
72 * @param startRow the first row in the split
73 */
74 public void setStartRow(final byte [] startRow) {
75 this.recordReaderImpl.setStartRow(startRow);
76 }
77
78 /**
79 *
80 * @param endRow the last row in the split
81 */
82 public void setEndRow(final byte [] endRow) {
83 this.recordReaderImpl.setEndRow(endRow);
84 }
85
86 /**
87 * @param rowFilter the {@link Filter} to be used.
88 */
89 public void setRowFilter(Filter rowFilter) {
90 this.recordReaderImpl.setRowFilter(rowFilter);
91 }
92
93 public void close() {
94 this.recordReaderImpl.close();
95 }
96
97 /**
98 * @return ImmutableBytesWritable
99 *
100 * @see org.apache.hadoop.mapred.RecordReader#createKey()
101 */
102 public ImmutableBytesWritable createKey() {
103 return this.recordReaderImpl.createKey();
104 }
105
106 /**
107 * @return RowResult
108 *
109 * @see org.apache.hadoop.mapred.RecordReader#createValue()
110 */
111 public Result createValue() {
112 return this.recordReaderImpl.createValue();
113 }
114
115 public long getPos() {
116
117 // This should be the ordinal tuple in the range;
118 // not clear how to calculate...
119 return this.recordReaderImpl.getPos();
120 }
121
122 public float getProgress() {
123 // Depends on the total number of tuples and getPos
124 return this.recordReaderImpl.getPos();
125 }
126
127 /**
128 * @param key HStoreKey as input key.
129 * @param value MapWritable as input value
130 * @return true if there was more data
131 * @throws IOException
132 */
133 public boolean next(ImmutableBytesWritable key, Result value)
134 throws IOException {
135 return this.recordReaderImpl.next(key, value);
136 }
137 }