org.apache.hadoop.hive.ql.plan
Class mapredWork
java.lang.Object
org.apache.hadoop.hive.ql.plan.mapredWork
- All Implemented Interfaces:
- Serializable
public class mapredWork
- extends Object
- implements Serializable
- See Also:
- Serialized Form
Constructor Summary |
mapredWork()
|
mapredWork(String command,
LinkedHashMap<String,ArrayList<String>> pathToAliases,
LinkedHashMap<String,partitionDesc> pathToPartitionInfo,
LinkedHashMap<String,Operator<? extends Serializable>> aliasToWork,
tableDesc keyDesc,
List<tableDesc> tagToValueDesc,
Operator<?> reducer,
Integer numReduceTasks)
|
Method Summary |
void |
addMapWork(String path,
String alias,
Operator<?> work,
partitionDesc pd)
|
LinkedHashMap<String,Operator<? extends Serializable>> |
getAliasToWork()
|
String |
getCommand()
|
tableDesc |
getKeyDesc()
|
boolean |
getNeedsTagging()
|
Integer |
getNumReduceTasks()
If the number of reducers is -1, the runtime will automatically
figure it out by input data size. |
LinkedHashMap<String,ArrayList<String>> |
getPathToAliases()
|
LinkedHashMap<String,partitionDesc> |
getPathToPartitionInfo()
|
Operator<?> |
getReducer()
|
List<tableDesc> |
getTagToValueDesc()
|
void |
initialize()
|
String |
isInvalid()
|
void |
setAliasToWork(LinkedHashMap<String,Operator<? extends Serializable>> aliasToWork)
|
void |
setCommand(String command)
|
void |
setKeyDesc(tableDesc keyDesc)
|
void |
setNeedsTagging(boolean needsTagging)
|
void |
setNumReduceTasks(Integer numReduceTasks)
|
void |
setPathToAliases(LinkedHashMap<String,ArrayList<String>> pathToAliases)
|
void |
setPathToPartitionInfo(LinkedHashMap<String,partitionDesc> pathToPartitionInfo)
|
void |
setReducer(Operator<?> reducer)
|
void |
setTagToValueDesc(List<tableDesc> tagToValueDesc)
|
String |
toXML()
|
Methods inherited from class java.lang.Object |
clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait |
mapredWork
public mapredWork()
mapredWork
public mapredWork(String command,
LinkedHashMap<String,ArrayList<String>> pathToAliases,
LinkedHashMap<String,partitionDesc> pathToPartitionInfo,
LinkedHashMap<String,Operator<? extends Serializable>> aliasToWork,
tableDesc keyDesc,
List<tableDesc> tagToValueDesc,
Operator<?> reducer,
Integer numReduceTasks)
getCommand
public String getCommand()
setCommand
public void setCommand(String command)
getPathToAliases
public LinkedHashMap<String,ArrayList<String>> getPathToAliases()
setPathToAliases
public void setPathToAliases(LinkedHashMap<String,ArrayList<String>> pathToAliases)
getPathToPartitionInfo
public LinkedHashMap<String,partitionDesc> getPathToPartitionInfo()
setPathToPartitionInfo
public void setPathToPartitionInfo(LinkedHashMap<String,partitionDesc> pathToPartitionInfo)
getAliasToWork
public LinkedHashMap<String,Operator<? extends Serializable>> getAliasToWork()
setAliasToWork
public void setAliasToWork(LinkedHashMap<String,Operator<? extends Serializable>> aliasToWork)
getKeyDesc
public tableDesc getKeyDesc()
setKeyDesc
public void setKeyDesc(tableDesc keyDesc)
getTagToValueDesc
public List<tableDesc> getTagToValueDesc()
setTagToValueDesc
public void setTagToValueDesc(List<tableDesc> tagToValueDesc)
getReducer
public Operator<?> getReducer()
setReducer
public void setReducer(Operator<?> reducer)
getNumReduceTasks
public Integer getNumReduceTasks()
- If the number of reducers is -1, the runtime will automatically
figure it out by input data size.
The number of reducers will be a positive number only in case the
target table is bucketed into N buckets (through CREATE TABLE).
This feature is not supported yet, so the number of reducers will
always be -1 for now.
setNumReduceTasks
public void setNumReduceTasks(Integer numReduceTasks)
addMapWork
public void addMapWork(String path,
String alias,
Operator<?> work,
partitionDesc pd)
isInvalid
public String isInvalid()
toXML
public String toXML()
initialize
public void initialize()
getNeedsTagging
public boolean getNeedsTagging()
setNeedsTagging
public void setNeedsTagging(boolean needsTagging)
Copyright © 2009 The Apache Software Foundation