001    /**
002     * Licensed to the Apache Software Foundation (ASF) under one
003     * or more contributor license agreements.  See the NOTICE file
004     * distributed with this work for additional information
005     * regarding copyright ownership.  The ASF licenses this file
006     * to you under the Apache License, Version 2.0 (the
007     * "License"); you may not use this file except in compliance
008     * with the License.  You may obtain a copy of the License at
009     *
010     *     http://www.apache.org/licenses/LICENSE-2.0
011     *
012     * Unless required by applicable law or agreed to in writing, software
013     * distributed under the License is distributed on an "AS IS" BASIS,
014     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015     * See the License for the specific language governing permissions and
016     * limitations under the License.
017     */
018    
019    package org.apache.hadoop.mapred.lib;
020    
021    import java.io.IOException;
022    import java.util.List;
023    
024    import org.apache.hadoop.classification.InterfaceAudience;
025    import org.apache.hadoop.classification.InterfaceStability;
026    import org.apache.hadoop.fs.PathFilter;
027    import org.apache.hadoop.mapred.InputFormat;
028    import org.apache.hadoop.mapred.InputSplit;
029    import org.apache.hadoop.mapred.JobConf;
030    import org.apache.hadoop.mapred.Reporter;
031    import org.apache.hadoop.mapred.RecordReader;
032    import org.apache.hadoop.mapreduce.Job;
033    import org.apache.hadoop.mapreduce.TaskAttemptContext;
034    
035    /**
036     * An abstract {@link org.apache.hadoop.mapred.InputFormat} that returns {@link CombineFileSplit}'s
037     * in {@link org.apache.hadoop.mapred.InputFormat#getSplits(JobConf, int)} method. 
038     * Splits are constructed from the files under the input paths. 
039     * A split cannot have files from different pools.
040     * Each split returned may contain blocks from different files.
041     * If a maxSplitSize is specified, then blocks on the same node are
042     * combined to form a single split. Blocks that are left over are
043     * then combined with other blocks in the same rack. 
044     * If maxSplitSize is not specified, then blocks from the same rack
045     * are combined in a single split; no attempt is made to create
046     * node-local splits.
047     * If the maxSplitSize is equal to the block size, then this class
048     * is similar to the default spliting behaviour in Hadoop: each
049     * block is a locally processed split.
050     * Subclasses implement {@link org.apache.hadoop.mapred.InputFormat#getRecordReader(InputSplit, JobConf, Reporter)}
051     * to construct <code>RecordReader</code>'s for <code>CombineFileSplit</code>'s.
052     * @see CombineFileSplit
053     * @deprecated Use 
054     * {@link org.apache.hadoop.mapreduce.lib.input.CombineFileInputFormat}
055     */
056    @Deprecated
057    @InterfaceAudience.Public
058    @InterfaceStability.Stable
059    public abstract class CombineFileInputFormat<K, V>
060      extends org.apache.hadoop.mapreduce.lib.input.CombineFileInputFormat<K, V> 
061      implements InputFormat<K, V>{
062    
063      /**
064       * default constructor
065       */
066      public CombineFileInputFormat() {
067      }
068    
069      public InputSplit[] getSplits(JobConf job, int numSplits) 
070        throws IOException {
071        List<org.apache.hadoop.mapreduce.InputSplit> newStyleSplits =
072          super.getSplits(new Job(job));
073        InputSplit[] ret = new InputSplit[newStyleSplits.size()];
074        for(int pos = 0; pos < newStyleSplits.size(); ++pos) {
075          org.apache.hadoop.mapreduce.lib.input.CombineFileSplit newStyleSplit = 
076            (org.apache.hadoop.mapreduce.lib.input.CombineFileSplit) newStyleSplits.get(pos);
077          ret[pos] = new CombineFileSplit(job, newStyleSplit.getPaths(),
078            newStyleSplit.getStartOffsets(), newStyleSplit.getLengths(),
079            newStyleSplit.getLocations());
080        }
081        return ret;
082      }
083      
084      /**
085       * Create a new pool and add the filters to it.
086       * A split cannot have files from different pools.
087       * @deprecated Use {@link #createPool(List)}.
088       */
089      @Deprecated
090      protected void createPool(JobConf conf, List<PathFilter> filters) {
091        createPool(filters);
092      }
093    
094      /**
095       * Create a new pool and add the filters to it. 
096       * A pathname can satisfy any one of the specified filters.
097       * A split cannot have files from different pools.
098       * @deprecated Use {@link #createPool(PathFilter...)}.
099       */
100      @Deprecated
101      protected void createPool(JobConf conf, PathFilter... filters) {
102        createPool(filters);
103      }
104    
105      /**
106       * This is not implemented yet. 
107       */
108      public abstract RecordReader<K, V> getRecordReader(InputSplit split,
109                                          JobConf job, Reporter reporter)
110        throws IOException;
111    
112      // abstract method from super class implemented to return null
113      public org.apache.hadoop.mapreduce.RecordReader<K, V> createRecordReader(
114          org.apache.hadoop.mapreduce.InputSplit split,
115          TaskAttemptContext context) throws IOException {
116        return null;
117      }
118    
119    }