001 /** 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018 019 package org.apache.hadoop.mapred.join; 020 021 import java.io.IOException; 022 import java.util.ArrayList; 023 import java.util.PriorityQueue; 024 025 import org.apache.hadoop.classification.InterfaceAudience; 026 import org.apache.hadoop.classification.InterfaceStability; 027 import org.apache.hadoop.io.Writable; 028 import org.apache.hadoop.io.WritableComparable; 029 import org.apache.hadoop.io.WritableComparator; 030 import org.apache.hadoop.mapred.JobConf; 031 032 /** 033 * Prefer the "rightmost" data source for this key. 034 * For example, <tt>override(S1,S2,S3)</tt> will prefer values 035 * from S3 over S2, and values from S2 over S1 for all keys 036 * emitted from all sources. 037 * @deprecated Use 038 * {@link org.apache.hadoop.mapreduce.lib.join.OverrideRecordReader} instead 039 */ 040 @Deprecated 041 @InterfaceAudience.Public 042 @InterfaceStability.Stable 043 public class OverrideRecordReader<K extends WritableComparable, 044 V extends Writable> 045 extends MultiFilterRecordReader<K,V> { 046 047 OverrideRecordReader(int id, JobConf conf, int capacity, 048 Class<? extends WritableComparator> cmpcl) throws IOException { 049 super(id, conf, capacity, cmpcl); 050 } 051 052 /** 053 * Emit the value with the highest position in the tuple. 054 */ 055 @SuppressWarnings("unchecked") // No static typeinfo on Tuples 056 protected V emit(TupleWritable dst) { 057 return (V) dst.iterator().next(); 058 } 059 060 /** 061 * Instead of filling the JoinCollector with iterators from all 062 * data sources, fill only the rightmost for this key. 063 * This not only saves space by discarding the other sources, but 064 * it also emits the number of key-value pairs in the preferred 065 * RecordReader instead of repeating that stream n times, where 066 * n is the cardinality of the cross product of the discarded 067 * streams for the given key. 068 */ 069 protected void fillJoinCollector(K iterkey) throws IOException { 070 final PriorityQueue<ComposableRecordReader<K,?>> q = getRecordReaderQueue(); 071 if (!q.isEmpty()) { 072 int highpos = -1; 073 ArrayList<ComposableRecordReader<K,?>> list = 074 new ArrayList<ComposableRecordReader<K,?>>(kids.length); 075 q.peek().key(iterkey); 076 final WritableComparator cmp = getComparator(); 077 while (0 == cmp.compare(q.peek().key(), iterkey)) { 078 ComposableRecordReader<K,?> t = q.poll(); 079 if (-1 == highpos || list.get(highpos).id() < t.id()) { 080 highpos = list.size(); 081 } 082 list.add(t); 083 if (q.isEmpty()) 084 break; 085 } 086 ComposableRecordReader<K,?> t = list.remove(highpos); 087 t.accept(jc, iterkey); 088 for (ComposableRecordReader<K,?> rr : list) { 089 rr.skip(iterkey); 090 } 091 list.add(t); 092 for (ComposableRecordReader<K,?> rr : list) { 093 if (rr.hasNext()) { 094 q.add(rr); 095 } 096 } 097 } 098 } 099 100 }