Uses of Class
org.apache.hadoop.hive.ql.metadata.HiveException

Packages that use HiveException
org.apache.hadoop.hive.ql.exec   
org.apache.hadoop.hive.ql.metadata   
org.apache.hadoop.hive.ql.parse   
org.apache.hadoop.hive.ql.session   
 

Uses of HiveException in org.apache.hadoop.hive.ql.exec
 

Methods in org.apache.hadoop.hive.ql.exec that throw HiveException
 void FileSinkOperator.close(boolean abort)
           
 void ScriptOperator.close(boolean abort)
           
 void JoinOperator.close(boolean abort)
          All done
 void GroupByOperator.close(boolean abort)
          We need to forward all the aggregations to children.
 void Operator.close(boolean abort)
           
 void JoinOperator.endGroup()
          Forward a record of join results.
 void Operator.endGroup()
           
 void ExprNodeColumnEvaluator.evaluate(Object row, ObjectInspector rowInspector, InspectableObject result)
           
 void ExprNodeNullEvaluator.evaluate(Object row, ObjectInspector rowInspector, InspectableObject result)
           
 void ExprNodeIndexEvaluator.evaluate(Object row, ObjectInspector rowInspector, InspectableObject result)
           
 void ExprNodeFuncEvaluator.evaluate(Object row, ObjectInspector rowInspector, InspectableObject result)
           
 void ExprNodeFieldEvaluator.evaluate(Object row, ObjectInspector rowInspector, InspectableObject result)
           
 void ExprNodeConstantEvaluator.evaluate(Object row, ObjectInspector rowInspector, InspectableObject result)
           
abstract  void ExprNodeEvaluator.evaluate(Object row, ObjectInspector rowInspector, InspectableObject result)
          Evaluate the expression given the row and rowInspector.
 ObjectInspector ExprNodeColumnEvaluator.evaluateInspector(ObjectInspector rowInspector)
           
 ObjectInspector ExprNodeNullEvaluator.evaluateInspector(ObjectInspector rowInspector)
           
 ObjectInspector ExprNodeIndexEvaluator.evaluateInspector(ObjectInspector rowInspector)
           
 ObjectInspector ExprNodeFuncEvaluator.evaluateInspector(ObjectInspector rowInspector)
           
 ObjectInspector ExprNodeFieldEvaluator.evaluateInspector(ObjectInspector rowInspector)
           
 ObjectInspector ExprNodeConstantEvaluator.evaluateInspector(ObjectInspector rowInspector)
           
abstract  ObjectInspector ExprNodeEvaluator.evaluateInspector(ObjectInspector rowInspector)
          Metadata evaluation.
protected  void GroupByOperator.forward(ArrayList<Object> keys, UDAFEvaluator[] aggs)
          Forward a record of keys and aggregation results.
protected  void Operator.forward(Object row, ObjectInspector rowInspector)
           
 void FileSinkOperator.initialize(org.apache.hadoop.conf.Configuration hconf, org.apache.hadoop.mapred.Reporter reporter)
           
 void CollectOperator.initialize(org.apache.hadoop.conf.Configuration hconf, org.apache.hadoop.mapred.Reporter reporter)
           
 void TableScanOperator.initialize(org.apache.hadoop.conf.Configuration hconf, org.apache.hadoop.mapred.Reporter reporter)
           
 void SelectOperator.initialize(org.apache.hadoop.conf.Configuration hconf, org.apache.hadoop.mapred.Reporter reporter)
           
 void ScriptOperator.initialize(org.apache.hadoop.conf.Configuration hconf, org.apache.hadoop.mapred.Reporter reporter)
           
 void ReduceSinkOperator.initialize(org.apache.hadoop.conf.Configuration hconf, org.apache.hadoop.mapred.Reporter reporter)
           
 void LimitOperator.initialize(org.apache.hadoop.conf.Configuration hconf, org.apache.hadoop.mapred.Reporter reporter)
           
 void JoinOperator.initialize(org.apache.hadoop.conf.Configuration hconf, org.apache.hadoop.mapred.Reporter reporter)
           
 void GroupByOperator.initialize(org.apache.hadoop.conf.Configuration hconf, org.apache.hadoop.mapred.Reporter reporter)
           
 void ForwardOperator.initialize(org.apache.hadoop.conf.Configuration hconf, org.apache.hadoop.mapred.Reporter reporter)
           
 void FilterOperator.initialize(org.apache.hadoop.conf.Configuration hconf, org.apache.hadoop.mapred.Reporter reporter)
           
 void ExtractOperator.initialize(org.apache.hadoop.conf.Configuration hconf, org.apache.hadoop.mapred.Reporter reporter)
           
 void MapOperator.initialize(org.apache.hadoop.conf.Configuration hconf, org.apache.hadoop.mapred.Reporter reporter)
           
 void Operator.initialize(org.apache.hadoop.conf.Configuration hconf, org.apache.hadoop.mapred.Reporter reporter)
           
static Object FunctionRegistry.invoke(Method m, Object thisObject, Object[] arguments)
           
 void FileSinkOperator.jobClose(org.apache.hadoop.conf.Configuration hconf, boolean success)
           
 void Operator.jobClose(org.apache.hadoop.conf.Configuration conf, boolean success)
          Unlike other operator interfaces which are called from map or reduce task, jobClose is called from the jobclient side once the job has completed
static void ExecDriver.main(String[] args)
           
protected  UDAFEvaluator[] GroupByOperator.newAggregations()
           
 void FileSinkOperator.process(Object row, ObjectInspector rowInspector)
           
 void CollectOperator.process(Object row, ObjectInspector rowInspector)
           
 void UnionOperator.process(Object row, ObjectInspector rowInspector)
           
 void TableScanOperator.process(Object row, ObjectInspector rowInspector)
          Currently, the table scan operator does not do anything special other than just forwarding the row.
 void SelectOperator.process(Object row, ObjectInspector rowInspector)
           
 void ScriptOperator.process(Object row, ObjectInspector rowInspector)
           
 void ReduceSinkOperator.process(Object row, ObjectInspector rowInspector)
           
 void LimitOperator.process(Object row, ObjectInspector rowInspector)
           
 void JoinOperator.process(Object row, ObjectInspector rowInspector)
           
 void GroupByOperator.process(Object row, ObjectInspector rowInspector)
           
 void ForwardOperator.process(Object row, ObjectInspector rowInspector)
           
 void FilterOperator.process(Object row, ObjectInspector rowInspector)
           
 void ExtractOperator.process(Object row, ObjectInspector rowInspector)
           
 void MapOperator.process(Object row, ObjectInspector rowInspector)
           
abstract  void Operator.process(Object row, ObjectInspector rowInspector)
           
 void MapOperator.process(org.apache.hadoop.io.Writable value)
           
 void JoinOperator.startGroup()
           
 void Operator.startGroup()
           
protected  void GroupByOperator.updateAggregations(UDAFEvaluator[] aggs, Object row, ObjectInspector rowInspector, boolean hashAggr, boolean newEntry, Object[][] lastInvoke)
           
 

Constructors in org.apache.hadoop.hive.ql.exec that throw HiveException
ExecDriver(mapredWork plan, org.apache.hadoop.mapred.JobConf job, boolean isSilent)
          Constructor/Initialization for invocation as independent utility
 

Uses of HiveException in org.apache.hadoop.hive.ql.metadata
 

Subclasses of HiveException in org.apache.hadoop.hive.ql.metadata
 class InvalidTableException
          Generic exception class for Hive
 

Methods in org.apache.hadoop.hive.ql.metadata that throw HiveException
 void Hive.alterTable(String tblName, Table newTbl)
          Updates the existing table metadata with the new metadata.
 void HiveMetaStoreChecker.checkMetastore(String dbName, String tableName, List<Map<String,String>> partitions, CheckResult result)
          Check the metastore for inconsistencies, data missing in either the metastore or on the dfs.
 void Table.checkValidity()
           
protected  void Partition.copyFiles(org.apache.hadoop.fs.Path srcf)
          Inserts files specified into the partition.
protected  void Table.copyFiles(org.apache.hadoop.fs.Path srcf)
          Inserts files specified into the partition.
protected  void Hive.copyFiles(org.apache.hadoop.fs.Path srcf, org.apache.hadoop.fs.Path destf, org.apache.hadoop.fs.FileSystem fs)
           
 Partition Hive.createPartition(Table tbl, Map<String,String> partSpec)
          Creates a partition.
 Partition Hive.createPartition(Table tbl, Map<String,String> partSpec, org.apache.hadoop.fs.Path location)
          Creates a partition
 void Hive.createTable(String tableName, List<String> columns, List<String> partCols, Class<? extends org.apache.hadoop.mapred.InputFormat> fileInputFormat, Class<? extends org.apache.hadoop.mapred.OutputFormat> fileOutputFormat)
          Creates a table metdata and the directory for the table data
 void Hive.createTable(String tableName, List<String> columns, List<String> partCols, Class<? extends org.apache.hadoop.mapred.InputFormat> fileInputFormat, Class<? extends org.apache.hadoop.mapred.OutputFormat> fileOutputFormat, int bucketCount, List<String> bucketCols)
          Creates a table metdata and the directory for the table data
 void Hive.createTable(Table tbl)
          Creates the table with the give objects
 void Hive.createTable(Table tbl, boolean ifNotExists)
          Creates the table with the give objects
 boolean Hive.dropPartition(String db_name, String tbl_name, List<String> part_vals, boolean deleteData)
           
 void Hive.dropTable(String tableName)
          Deprecated. Use Hive.dropTable(String, String) instead
 void Hive.dropTable(String tableName, boolean deleteData, boolean ignoreUnknownTab)
          Deprecated. Use Hive.dropTable(String, String, boolean, boolean) instead
 void Hive.dropTable(String dbName, String tableName)
          Drops table along with the data in it.
 void Hive.dropTable(String dbName, String tableName, boolean deleteData, boolean ignoreUnknownTab)
          Drops the table.
static Hive Hive.get()
           
static Hive Hive.get(HiveConf c)
          Gets hive object for the current thread.
static Hive Hive.get(HiveConf c, boolean needsRefresh)
          get a connection to metastore.
 List<String> Hive.getAllTables()
           
static List<FieldSchema> Hive.getFieldsFromDeserializer(String name, Deserializer serde)
           
 Partition Hive.getPartition(Table tbl, Map<String,String> partSpec, boolean forceCreate)
          Returns partition metadata
 List<String> Hive.getPartitionNames(String dbName, String tblName, short max)
           
 List<Partition> Hive.getPartitions(Table tbl)
          get all the partitions that the table has
 org.apache.hadoop.fs.Path[] Partition.getPath(Sample s)
           
 Table Hive.getTable(String tableName)
          Deprecated. Use Hive.getTable(String, String) instead
 Table Hive.getTable(String tableName, boolean throwException)
          Deprecated. Use Hive.getTable(String, String, boolean) instead
 Table Hive.getTable(String dbName, String tableName)
          Returns metadata of the table.
 Table Hive.getTable(String dbName, String tableName, boolean throwException)
          Returns metadata of the table
 List<String> Hive.getTablesByPattern(String tablePattern)
          returns all existing tables that match the given pattern.
protected  List<String> Hive.getTablesForDb(String database, String tablePattern)
           
protected  void Table.initSerDe()
           
 boolean Table.isValidSpec(Map<String,String> spec)
           
 void Hive.loadPartition(org.apache.hadoop.fs.Path loadPath, String tableName, AbstractMap<String,String> partSpec, boolean replace, org.apache.hadoop.fs.Path tmpDirPath)
          Load a directory into a Hive Table Partition - Alters existing content of the partition with the contents of loadPath.
 void Hive.loadTable(org.apache.hadoop.fs.Path loadPath, String tableName, boolean replace, org.apache.hadoop.fs.Path tmpDirPath)
          Load a directory into a Hive Table.
 void Table.reinitSerDe()
           
protected  void Partition.replaceFiles(org.apache.hadoop.fs.Path srcf, org.apache.hadoop.fs.Path tmpd)
          Replaces files in the partition with new data set specified by srcf.
protected  void Table.replaceFiles(org.apache.hadoop.fs.Path srcf, org.apache.hadoop.fs.Path tmpd)
          Replaces files in the partition with new data set specified by srcf.
protected  void Hive.replaceFiles(org.apache.hadoop.fs.Path srcf, org.apache.hadoop.fs.Path destf, org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.Path tmppath)
          Replaces files in the partition with new data set specifed by srcf.
 void Table.setBucketCols(List<String> bucketCols)
           
 void Table.setInputFormatClass(String name)
           
 void Table.setOutputFormatClass(String name)
           
 void Table.setSortCols(List<Order> sortOrder)
           
 

Constructors in org.apache.hadoop.hive.ql.metadata that throw HiveException
Partition(Table tbl, Map<String,String> partSpec, org.apache.hadoop.fs.Path location)
          Create partition object with the given info.
Partition(Table tbl, Partition tp)
           
Sample(int num, int fraction, Dimension d)
           
Table()
          Table (only used internally)
Table(String name, Properties schema, Deserializer deserializer, Class<? extends org.apache.hadoop.mapred.InputFormat<?,?>> inputFormatClass, Class<? extends org.apache.hadoop.mapred.OutputFormat<?,?>> outputFormatClass, URI dataLocation, Hive hive)
          Table Create a TableMetaInfo object presumably with the intent of saving it to the metastore
 

Uses of HiveException in org.apache.hadoop.hive.ql.parse
 

Methods in org.apache.hadoop.hive.ql.parse that throw HiveException
 PartitionPruner.PrunedPartitionList PartitionPruner.prune()
          From the table metadata prune the partitions to return the partitions.
 

Uses of HiveException in org.apache.hadoop.hive.ql.session
 

Methods in org.apache.hadoop.hive.ql.session that throw HiveException
 Hive SessionState.getDb()
           
 



Copyright © 2009 The Apache Software Foundation