001 /** 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018 019 package org.apache.hadoop.mapreduce; 020 021 import java.io.IOException; 022 023 import org.apache.hadoop.classification.InterfaceAudience; 024 import org.apache.hadoop.classification.InterfaceStability; 025 import org.apache.hadoop.conf.Configuration; 026 import org.apache.hadoop.io.RawComparator; 027 import org.apache.hadoop.mapred.RawKeyValueIterator; 028 029 /** 030 * Reduces a set of intermediate values which share a key to a smaller set of 031 * values. 032 * 033 * <p><code>Reducer</code> implementations 034 * can access the {@link Configuration} for the job via the 035 * {@link JobContext#getConfiguration()} method.</p> 036 037 * <p><code>Reducer</code> has 3 primary phases:</p> 038 * <ol> 039 * <li> 040 * 041 * <h4 id="Shuffle">Shuffle</h4> 042 * 043 * <p>The <code>Reducer</code> copies the sorted output from each 044 * {@link Mapper} using HTTP across the network.</p> 045 * </li> 046 * 047 * <li> 048 * <h4 id="Sort">Sort</h4> 049 * 050 * <p>The framework merge sorts <code>Reducer</code> inputs by 051 * <code>key</code>s 052 * (since different <code>Mapper</code>s may have output the same key).</p> 053 * 054 * <p>The shuffle and sort phases occur simultaneously i.e. while outputs are 055 * being fetched they are merged.</p> 056 * 057 * <h5 id="SecondarySort">SecondarySort</h5> 058 * 059 * <p>To achieve a secondary sort on the values returned by the value 060 * iterator, the application should extend the key with the secondary 061 * key and define a grouping comparator. The keys will be sorted using the 062 * entire key, but will be grouped using the grouping comparator to decide 063 * which keys and values are sent in the same call to reduce.The grouping 064 * comparator is specified via 065 * {@link Job#setGroupingComparatorClass(Class)}. The sort order is 066 * controlled by 067 * {@link Job#setSortComparatorClass(Class)}.</p> 068 * 069 * 070 * For example, say that you want to find duplicate web pages and tag them 071 * all with the url of the "best" known example. You would set up the job 072 * like: 073 * <ul> 074 * <li>Map Input Key: url</li> 075 * <li>Map Input Value: document</li> 076 * <li>Map Output Key: document checksum, url pagerank</li> 077 * <li>Map Output Value: url</li> 078 * <li>Partitioner: by checksum</li> 079 * <li>OutputKeyComparator: by checksum and then decreasing pagerank</li> 080 * <li>OutputValueGroupingComparator: by checksum</li> 081 * </ul> 082 * </li> 083 * 084 * <li> 085 * <h4 id="Reduce">Reduce</h4> 086 * 087 * <p>In this phase the 088 * {@link #reduce(Object, Iterable, Context)} 089 * method is called for each <code><key, (collection of values)></code> in 090 * the sorted inputs.</p> 091 * <p>The output of the reduce task is typically written to a 092 * {@link RecordWriter} via 093 * {@link Context#write(Object, Object)}.</p> 094 * </li> 095 * </ol> 096 * 097 * <p>The output of the <code>Reducer</code> is <b>not re-sorted</b>.</p> 098 * 099 * <p>Example:</p> 100 * <p><blockquote><pre> 101 * public class IntSumReducer<Key> extends Reducer<Key,IntWritable, 102 * Key,IntWritable> { 103 * private IntWritable result = new IntWritable(); 104 * 105 * public void reduce(Key key, Iterable<IntWritable> values, 106 * Context context) throws IOException, InterruptedException { 107 * int sum = 0; 108 * for (IntWritable val : values) { 109 * sum += val.get(); 110 * } 111 * result.set(sum); 112 * context.write(key, result); 113 * } 114 * } 115 * </pre></blockquote></p> 116 * 117 * @see Mapper 118 * @see Partitioner 119 */ 120 @InterfaceAudience.Public 121 @InterfaceStability.Stable 122 public class Reducer<KEYIN,VALUEIN,KEYOUT,VALUEOUT> { 123 124 /** 125 * The <code>Context</code> passed on to the {@link Reducer} implementations. 126 */ 127 public abstract class Context 128 implements ReduceContext<KEYIN,VALUEIN,KEYOUT,VALUEOUT> { 129 } 130 131 /** 132 * Called once at the start of the task. 133 */ 134 protected void setup(Context context 135 ) throws IOException, InterruptedException { 136 // NOTHING 137 } 138 139 /** 140 * This method is called once for each key. Most applications will define 141 * their reduce class by overriding this method. The default implementation 142 * is an identity function. 143 */ 144 @SuppressWarnings("unchecked") 145 protected void reduce(KEYIN key, Iterable<VALUEIN> values, Context context 146 ) throws IOException, InterruptedException { 147 for(VALUEIN value: values) { 148 context.write((KEYOUT) key, (VALUEOUT) value); 149 } 150 } 151 152 /** 153 * Called once at the end of the task. 154 */ 155 protected void cleanup(Context context 156 ) throws IOException, InterruptedException { 157 // NOTHING 158 } 159 160 /** 161 * Advanced application writers can use the 162 * {@link #run(org.apache.hadoop.mapreduce.Reducer.Context)} method to 163 * control how the reduce task works. 164 */ 165 @SuppressWarnings("unchecked") 166 public void run(Context context) throws IOException, InterruptedException { 167 setup(context); 168 while (context.nextKey()) { 169 reduce(context.getCurrentKey(), context.getValues(), context); 170 // If a back up store is used, reset it 171 ((ReduceContext.ValueIterator) 172 (context.getValues().iterator())).resetBackupStore(); 173 } 174 cleanup(context); 175 } 176 }