15 package com.cloudera.impala.service;
 
   17 import java.util.ArrayList;
 
   18 import java.util.HashMap;
 
   19 import java.util.Iterator;
 
   20 import java.util.LinkedHashMap;
 
   21 import java.util.List;
 
   24 import java.util.concurrent.ExecutorService;
 
   25 import java.util.concurrent.Executors;
 
   26 import java.util.concurrent.atomic.AtomicBoolean;
 
   27 import java.util.concurrent.atomic.AtomicInteger;
 
   29 import org.apache.hadoop.hive.common.StatsSetupConst;
 
   30 import org.apache.hadoop.hive.conf.HiveConf;
 
   31 import org.apache.hadoop.hive.metastore.TableType;
 
   32 import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
 
   33 import org.apache.hadoop.hive.metastore.api.BooleanColumnStatsData;
 
   34 import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
 
   35 import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData;
 
   36 import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc;
 
   37 import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
 
   38 import org.apache.hadoop.hive.metastore.api.DecimalColumnStatsData;
 
   39 import org.apache.hadoop.hive.metastore.api.DoubleColumnStatsData;
 
   40 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 
   41 import org.apache.hadoop.hive.metastore.api.LongColumnStatsData;
 
   42 import org.apache.hadoop.hive.metastore.api.MetaException;
 
   43 import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
 
   44 import org.apache.hadoop.hive.metastore.api.Partition;
 
   45 import org.apache.hadoop.hive.metastore.api.SerDeInfo;
 
   46 import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
 
   47 import org.apache.hadoop.hive.metastore.api.StringColumnStatsData;
 
   48 import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 
   49 import org.apache.hadoop.hive.metastore.Warehouse;
 
   50 import org.apache.log4j.Logger;
 
   51 import org.apache.thrift.TException;
 
   83 import com.cloudera.impala.common.Pair;
 
   84 import com.cloudera.impala.thrift.ImpalaInternalServiceConstants;
 
   85 import com.cloudera.impala.thrift.JniCatalogConstants;
 
   86 import com.cloudera.impala.thrift.TAlterTableAddPartitionParams;
 
   87 import com.cloudera.impala.thrift.TAlterTableAddReplaceColsParams;
 
   88 import com.cloudera.impala.thrift.TAlterTableChangeColParams;
 
   89 import com.cloudera.impala.thrift.TAlterTableDropColParams;
 
   90 import com.cloudera.impala.thrift.TAlterTableDropPartitionParams;
 
   91 import com.cloudera.impala.thrift.TAlterTableOrViewRenameParams;
 
   92 import com.cloudera.impala.thrift.TAlterTableParams;
 
   93 import com.cloudera.impala.thrift.TAlterTableSetCachedParams;
 
   94 import com.cloudera.impala.thrift.TAlterTableSetFileFormatParams;
 
   95 import com.cloudera.impala.thrift.TAlterTableSetLocationParams;
 
   96 import com.cloudera.impala.thrift.TAlterTableSetTblPropertiesParams;
 
   97 import com.cloudera.impala.thrift.TAlterTableUpdateStatsParams;
 
   98 import com.cloudera.impala.thrift.TCatalogObject;
 
   99 import com.cloudera.impala.thrift.TCatalogObjectType;
 
  100 import com.cloudera.impala.thrift.TCatalogUpdateResult;
 
  101 import com.cloudera.impala.thrift.TColumn;
 
  102 import com.cloudera.impala.thrift.TColumnStats;
 
  103 import com.cloudera.impala.thrift.TColumnType;
 
  104 import com.cloudera.impala.thrift.TColumnValue;
 
  105 import com.cloudera.impala.thrift.TCreateDataSourceParams;
 
  106 import com.cloudera.impala.thrift.TCreateDbParams;
 
  107 import com.cloudera.impala.thrift.TCreateDropRoleParams;
 
  108 import com.cloudera.impala.thrift.TCreateFunctionParams;
 
  109 import com.cloudera.impala.thrift.TCreateOrAlterViewParams;
 
  110 import com.cloudera.impala.thrift.TCreateTableLikeParams;
 
  111 import com.cloudera.impala.thrift.TCreateTableParams;
 
  112 import com.cloudera.impala.thrift.TDatabase;
 
  113 import com.cloudera.impala.thrift.TDdlExecRequest;
 
  114 import com.cloudera.impala.thrift.TDdlExecResponse;
 
  115 import com.cloudera.impala.thrift.TDropDataSourceParams;
 
  116 import com.cloudera.impala.thrift.TDropDbParams;
 
  117 import com.cloudera.impala.thrift.TDropFunctionParams;
 
  118 import com.cloudera.impala.thrift.TDropStatsParams;
 
  119 import com.cloudera.impala.thrift.TDropTableOrViewParams;
 
  120 import com.cloudera.impala.thrift.TErrorCode;
 
  121 import com.cloudera.impala.thrift.TGrantRevokePrivParams;
 
  122 import com.cloudera.impala.thrift.TGrantRevokeRoleParams;
 
  123 import com.cloudera.impala.thrift.THdfsCachingOp;
 
  124 import com.cloudera.impala.thrift.THdfsFileFormat;
 
  125 import com.cloudera.impala.thrift.TPartitionKeyValue;
 
  126 import com.cloudera.impala.thrift.TPartitionStats;
 
  127 import com.cloudera.impala.thrift.TPrivilege;
 
  128 import com.cloudera.impala.thrift.TResetMetadataRequest;
 
  129 import com.cloudera.impala.thrift.TResetMetadataResponse;
 
  130 import com.cloudera.impala.thrift.TResultRow;
 
  131 import com.cloudera.impala.thrift.TResultSet;
 
  132 import com.cloudera.impala.thrift.TResultSetMetadata;
 
  133 import com.cloudera.impala.thrift.TStatus;
 
  134 import com.cloudera.impala.thrift.TTable;
 
  135 import com.cloudera.impala.thrift.TTableName;
 
  136 import com.cloudera.impala.thrift.TTableStats;
 
  137 import com.cloudera.impala.thrift.TUpdateCatalogRequest;
 
  138 import com.cloudera.impala.thrift.TUpdateCatalogResponse;
 
  140 import com.google.common.base.Joiner;
 
  141 import com.google.common.base.Preconditions;
 
  142 import com.google.common.collect.Lists;
 
  143 import com.google.common.collect.Maps;
 
  144 import com.google.common.collect.Sets;
 
  145 import com.google.common.util.concurrent.SettableFuture;
 
  157       "Error making '%s' RPC to Hive Metastore: ";
 
  165   private static final Logger 
LOG = Logger.getLogger(CatalogOpExecutor.class);
 
  177     TDdlExecResponse response = 
new TDdlExecResponse();
 
  178     response.setResult(
new TCatalogUpdateResult());
 
  180     User requestingUser = null;
 
  181     if (ddlRequest.isSetHeader()) {
 
  182       requestingUser = 
new User(ddlRequest.getHeader().getRequesting_user());
 
  185     switch (ddlRequest.ddl_type) {
 
  187         alterTable(ddlRequest.getAlter_table_params(), response);
 
  190         alterView(ddlRequest.getAlter_view_params(), response);
 
  192       case CREATE_DATABASE:
 
  195       case CREATE_TABLE_AS_SELECT:
 
  196         response.setNew_table_created(
 
  197             createTable(ddlRequest.getCreate_table_params(), response));
 
  200         createTable(ddlRequest.getCreate_table_params(), response);
 
  202       case CREATE_TABLE_LIKE:
 
  206         createView(ddlRequest.getCreate_view_params(), response);
 
  208       case CREATE_FUNCTION:
 
  211       case CREATE_DATA_SOURCE:
 
  215         Preconditions.checkState(
false, 
"Compute stats should trigger an ALTER TABLE.");
 
  218         dropStats(ddlRequest.getDrop_stats_params(), response);
 
  230       case DROP_DATA_SOURCE:
 
  231         dropDataSource(ddlRequest.getDrop_data_source_params(), response);
 
  235         createDropRole(requestingUser, ddlRequest.getCreate_drop_role_params(),
 
  243       case GRANT_PRIVILEGE:
 
  244       case REVOKE_PRIVILEGE:
 
  246             ddlRequest.getGrant_revoke_priv_params(), response);
 
  248       default: 
throw new IllegalStateException(
"Unexpected DDL exec request type: " +
 
  249           ddlRequest.ddl_type);
 
  254     response.getResult().setStatus(
new TStatus(TErrorCode.OK, 
new ArrayList<String>()));
 
  262   private void alterTable(TAlterTableParams params, TDdlExecResponse response)
 
  264     switch (params.getAlter_type()) {
 
  265       case ADD_REPLACE_COLUMNS:
 
  266         TAlterTableAddReplaceColsParams addReplaceColParams =
 
  267             params.getAdd_replace_cols_params();
 
  269             addReplaceColParams.getColumns(),
 
  270             addReplaceColParams.isReplace_existing_cols());
 
  273         TAlterTableAddPartitionParams addPartParams = params.getAdd_partition_params();
 
  279             params.getTable_name()), addPartParams.getPartition_spec(),
 
  280             addPartParams.isIf_not_exists(), addPartParams.getLocation(),
 
  281             addPartParams.getCache_op());
 
  283         response.result.setVersion(
 
  284             response.result.getUpdated_catalog_object().getCatalog_version());
 
  287         TAlterTableDropColParams dropColParams = params.getDrop_col_params();
 
  289             dropColParams.getCol_name());
 
  292         TAlterTableChangeColParams changeColParams = params.getChange_col_params();
 
  294             changeColParams.getCol_name(), changeColParams.getNew_col_def());
 
  297         TAlterTableDropPartitionParams dropPartParams = params.getDrop_partition_params();
 
  302             params.getTable_name()), dropPartParams.getPartition_spec(),
 
  303             dropPartParams.isIf_exists());
 
  305         response.result.setVersion(
 
  306             response.result.getUpdated_catalog_object().getCatalog_version());
 
  310         TAlterTableOrViewRenameParams renameParams = params.getRename_params();
 
  316       case SET_FILE_FORMAT:
 
  317         TAlterTableSetFileFormatParams fileFormatParams =
 
  318             params.getSet_file_format_params();
 
  319         List<TPartitionKeyValue> fileFormatPartitionSpec = null;
 
  320         if (fileFormatParams.isSetPartition_spec()) {
 
  321           fileFormatPartitionSpec = fileFormatParams.getPartition_spec();
 
  324             fileFormatPartitionSpec, fileFormatParams.getFile_format());
 
  327         TAlterTableSetLocationParams setLocationParams = params.getSet_location_params();
 
  328         List<TPartitionKeyValue> partitionSpec = null;
 
  329         if (setLocationParams.isSetPartition_spec()) {
 
  330           partitionSpec = setLocationParams.getPartition_spec();
 
  333             partitionSpec, setLocationParams.getLocation());
 
  335       case SET_TBL_PROPERTIES:
 
  337             params.getSet_tbl_properties_params());
 
  340         Preconditions.checkState(params.isSetUpdate_stats_params());
 
  344         Preconditions.checkState(params.isSetSet_cached_params());
 
  345         if (params.getSet_cached_params().getPartition_spec() == null) {
 
  347               params.getSet_cached_params());
 
  350               params.getSet_cached_params());
 
  354         throw new UnsupportedOperationException(
 
  355             "Unknown ALTER TABLE operation type: " + params.getAlter_type());
 
  358     Table refreshedTable = catalog_.reloadTable(params.getTable_name());
 
  360     response.result.setVersion(
 
  361         response.result.getUpdated_catalog_object().getCatalog_version());
 
  370     Preconditions.checkNotNull(partition);
 
  373       throw new CatalogException(
"Table " + tbl.getFullName() + 
" is not an HDFS table");
 
  375     HdfsTable hdfsTable = (HdfsTable) tbl;
 
  376     HdfsPartition hdfsPartition = hdfsTable.createPartition(partition.getSd(), partition);
 
  377     return catalog_.addPartition(hdfsPartition);
 
  385   private void alterView(TCreateOrAlterViewParams params, TDdlExecResponse resp)
 
  387     TableName tableName = TableName.fromThrift(params.getView_name());
 
  388     Preconditions.checkState(tableName != null && tableName.isFullyQualified());
 
  389     Preconditions.checkState(params.getColumns() != null &&
 
  390         params.getColumns().size() > 0,
 
  391           "Null or empty column list given as argument to DdlExecutor.alterView");
 
  397       if (!msTbl.getTableType().equalsIgnoreCase((TableType.VIRTUAL_VIEW.toString()))) {
 
  399             String.format(
"ALTER VIEW not allowed on a table: %s",
 
  405       LOG.debug(String.format(
"Altering view %s", tableName));
 
  409     Table refreshedTbl = catalog_.reloadTable(tableName.toThrift());
 
  411     resp.result.setVersion(resp.result.getUpdated_catalog_object().getCatalog_version());
 
  420     Preconditions.checkState(params.isSetPartition_stats() && params.isSetTable_stats());
 
  422     TableName tableName = TableName.fromThrift(params.getTable_name());
 
  423     Preconditions.checkState(tableName != null && tableName.isFullyQualified());
 
  424     LOG.info(String.format(
"Updating table stats for: %s", tableName));
 
  429     org.apache.hadoop.hive.metastore.api.Table msTbl =
 
  430         table.getMetaStoreTable().deepCopy();
 
  431     List<HdfsPartition> partitions = Lists.newArrayList();
 
  434       HdfsTable hdfsTable = (HdfsTable) table;
 
  436         if (!p.isDefaultPartition()) partitions.add(p);
 
  441     int numTargetedPartitions;
 
  442     int numUpdatedColumns = 0;
 
  445       List<HdfsPartition> modifiedParts = Lists.newArrayList();
 
  449       ColumnStatistics colStats = null;
 
  450       if (params.isSetColumn_stats()) {
 
  453         numUpdatedColumns = colStats.getStatsObjSize();
 
  460         if (numUpdatedColumns > 0) {
 
  461           Preconditions.checkNotNull(colStats);
 
  464             msClient.getHiveClient().updateTableColumnStatistics(colStats);
 
  465           } 
catch (Exception e) {
 
  467                     "updateTableColumnStatistics"), e);
 
  479     TResultSet resultSet = 
new TResultSet();
 
  480     resultSet.setSchema(
new TResultSetMetadata(Lists.newArrayList(
 
  482     TColumnValue resultColVal = 
new TColumnValue();
 
  483     resultColVal.setString_val(
"Updated " + numTargetedPartitions + 
" partition(s) and " +
 
  484         numUpdatedColumns + 
" column(s).");
 
  485     TResultRow resultRow = 
new TResultRow();
 
  486     resultRow.setColVals(Lists.newArrayList(resultColVal));
 
  487     resultSet.setRows(Lists.newArrayList(resultRow));
 
  488     resp.setResult_set(resultSet);
 
  502       org.apache.hadoop.hive.metastore.api.
Table msTbl,
 
  503       List<HdfsPartition> partitions, List<HdfsPartition> modifiedParts)
 
  505     Preconditions.checkState(params.isSetPartition_stats());
 
  506     Preconditions.checkState(params.isSetTable_stats());
 
  508     int numTargetedPartitions = 0;
 
  512       List<String> partitionValues = partition.getPartitionValuesAsStrings(
false);
 
  513       TPartitionStats partitionStats = params.partition_stats.get(partitionValues);
 
  514       TPartitionStats existingPartStats =
 
  515           PartitionStatsUtil.partStatsFromParameters(partition.getParameters());
 
  517       if (partitionStats == null) {
 
  522         if (params.expect_all_partitions == 
false) 
continue;
 
  525         partitionStats = 
new TPartitionStats();
 
  526         if (params.is_incremental) {
 
  527           partitionStats.intermediate_col_stats = Maps.newHashMap();
 
  529         partitionStats.stats = 
new TTableStats();
 
  530         partitionStats.stats.setNum_rows(0L);
 
  533       long numRows = partitionStats.stats.num_rows;
 
  534       LOG.debug(String.format(
"Updating stats for partition %s: numRows=%s",
 
  535               partition.getValuesAsString(), numRows));
 
  537       boolean updatedPartition = 
false;
 
  540       if (existingPartStats == null || !existingPartStats.equals(partitionStats)) {
 
  541         PartitionStatsUtil.partStatsToParameters(partitionStats, partition);
 
  542         updatedPartition = 
true;
 
  545       String existingRowCount =
 
  546           partition.getParameters().
get(StatsSetupConst.ROW_COUNT);
 
  547       String newRowCount = String.valueOf(numRows);
 
  549       if (existingRowCount == null || !existingRowCount.equals(newRowCount)) {
 
  551         partition.putToParameters(StatsSetupConst.ROW_COUNT, newRowCount);
 
  552         partition.putToParameters(StatsSetupConst.STATS_GENERATED_VIA_STATS_TASK,
 
  553             StatsSetupConst.TRUE);
 
  554         updatedPartition = 
true;
 
  556       if (updatedPartition) {
 
  557         ++numTargetedPartitions;
 
  558         modifiedParts.add(partition);
 
  563     if (table.getNumClusteringCols() == 0 || table instanceof 
HBaseTable) {
 
  564       numTargetedPartitions = 1;
 
  568     msTbl.putToParameters(StatsSetupConst.ROW_COUNT,
 
  569         String.valueOf(params.getTable_stats().num_rows));
 
  570     msTbl.putToParameters(StatsSetupConst.STATS_GENERATED_VIA_STATS_TASK,
 
  571         StatsSetupConst.TRUE);
 
  572     return numTargetedPartitions;
 
  581       Map<String, TColumnStats> columnStats, 
Table table) {
 
  583     ColumnStatistics colStats = 
new ColumnStatistics();
 
  584     colStats.setStatsDesc(
 
  585         new ColumnStatisticsDesc(
true, table.
getDb().getName(), table.
getName()));
 
  587     for (Map.Entry<String, TColumnStats> entry: columnStats.entrySet()) {
 
  588       String colName = entry.getKey();
 
  589       Column tableCol = table.getColumn(entry.getKey());
 
  591       if (tableCol == null) 
continue;
 
  592       ColumnStatisticsData colStatsData =
 
  594       if (colStatsData == null) 
continue;
 
  595       LOG.debug(String.format(
"Updating column stats for %s: numDVs=%s numNulls=%s " +
 
  596           "maxSize=%s avgSize=%s", colName, entry.getValue().getNum_distinct_values(),
 
  597           entry.getValue().getNum_nulls(), entry.getValue().getMax_size(),
 
  598           entry.getValue().getAvg_size()));
 
  599       ColumnStatisticsObj colStatsObj = 
new ColumnStatisticsObj(colName,
 
  600           tableCol.getType().toString(), colStatsData);
 
  601       colStats.addToStatsObj(colStatsObj);
 
  608     ColumnStatisticsData colStatsData = 
new ColumnStatisticsData();
 
  609     long ndvs = colStats.getNum_distinct_values();
 
  610     long numNulls = colStats.getNum_nulls();
 
  615         colStatsData.setBooleanStats(
new BooleanColumnStatsData(1, -1, numNulls));
 
  624         colStatsData.setLongStats(
new LongColumnStatsData(numNulls, ndvs));
 
  630         colStatsData.setDoubleStats(
new DoubleColumnStatsData(numNulls, ndvs));
 
  635         long maxStrLen = colStats.getMax_size();
 
  636         double avgStrLen = colStats.getAvg_size();
 
  637         colStatsData.setStringStats(
 
  638             new StringColumnStatsData(maxStrLen, avgStrLen, numNulls, ndvs));
 
  643         colStatsData.setDecimalStats(
 
  644             new DecimalColumnStatsData(numNulls, ndvs));
 
  660     Preconditions.checkNotNull(params);
 
  661     String dbName = params.getDb();
 
  662     Preconditions.checkState(dbName != null && !dbName.isEmpty(),
 
  663         "Null or empty database name passed as argument to Catalog.createDatabase");
 
  664     if (params.if_not_exists && 
catalog_.
getDb(dbName) != null) {
 
  665       LOG.debug(
"Skipping database creation because " + dbName + 
" already exists and " +
 
  666           "IF NOT EXISTS was specified.");
 
  670     org.apache.hadoop.hive.metastore.api.Database db =
 
  671         new org.apache.hadoop.hive.metastore.api.Database();
 
  673     if (params.getComment() != null) {
 
  674       db.setDescription(params.getComment());
 
  676     if (params.getLocation() != null) {
 
  677       db.setLocationUri(params.getLocation());
 
  679     LOG.debug(
"Creating database " + dbName);
 
  685         newDb = catalog_.addDb(dbName);
 
  686       } 
catch (AlreadyExistsException e) {
 
  687         if (!params.if_not_exists) {
 
  691         LOG.debug(String.format(
"Ignoring '%s' when creating database %s because " +
 
  692             "IF NOT EXISTS was specified.", e, dbName));
 
  693         newDb = catalog_.getDb(dbName);
 
  694       } 
catch (TException e) {
 
  701       Preconditions.checkNotNull(newDb);
 
  702       TCatalogObject thriftDb = 
new TCatalogObject(TCatalogObjectType.DATABASE,
 
  704       thriftDb.setDb(newDb.toThrift());
 
  705       thriftDb.setCatalog_version(newDb.getCatalogVersion());
 
  706       resp.result.setUpdated_catalog_object(thriftDb);
 
  708     resp.result.setVersion(resp.result.getUpdated_catalog_object().getCatalog_version());
 
  713     Function fn = Function.fromThrift(params.getFn());
 
  714     LOG.debug(String.format(
"Adding %s: %s",
 
  715         fn.getClass().getSimpleName(), fn.signatureString()));
 
  717         catalog_.getFunction(fn, Function.CompareMode.IS_INDISTINGUISHABLE);
 
  718     if (existingFn != null && !params.if_not_exists) {
 
  722     catalog_.addFunction(fn);
 
  723     TCatalogObject addedObject = 
new TCatalogObject();
 
  724     addedObject.setType(TCatalogObjectType.FUNCTION);
 
  725     addedObject.setFn(fn.toThrift());
 
  726     addedObject.setCatalog_version(fn.getCatalogVersion());
 
  727     resp.result.setUpdated_catalog_object(addedObject);
 
  728     resp.result.setVersion(fn.getCatalogVersion());
 
  733     if (
LOG.isDebugEnabled()) { LOG.debug(
"Adding DATA SOURCE: " + params.toString()); }
 
  734     DataSource dataSource = DataSource.fromThrift(params.getData_source());
 
  736       if (!params.if_not_exists) {
 
  742       resp.result.setVersion(catalog_.getCatalogVersion());
 
  745     catalog_.addDataSource(dataSource);
 
  746     TCatalogObject addedObject = 
new TCatalogObject();
 
  747     addedObject.setType(TCatalogObjectType.DATA_SOURCE);
 
  748     addedObject.setData_source(dataSource.toThrift());
 
  749     addedObject.setCatalog_version(dataSource.getCatalogVersion());
 
  750     resp.result.setUpdated_catalog_object(addedObject);
 
  751     resp.result.setVersion(dataSource.getCatalogVersion());
 
  756     if (
LOG.isDebugEnabled()) { LOG.debug(
"Drop DATA SOURCE: " + params.toString()); }
 
  757     DataSource dataSource = catalog_.getDataSource(params.getData_source());
 
  758     if (dataSource == null) {
 
  759       if (!params.if_exists) {
 
  761             " does not exists.");
 
  765       resp.result.setVersion(catalog_.getCatalogVersion());
 
  768     catalog_.removeDataSource(params.getData_source());
 
  769     TCatalogObject removedObject = 
new TCatalogObject();
 
  770     removedObject.setType(TCatalogObjectType.DATA_SOURCE);
 
  771     removedObject.setData_source(dataSource.toThrift());
 
  772     removedObject.setCatalog_version(dataSource.getCatalogVersion());
 
  773     resp.result.setRemoved_catalog_object(removedObject);
 
  774     resp.result.setVersion(dataSource.getCatalogVersion());
 
  782   private void dropStats(TDropStatsParams params, TDdlExecResponse resp)
 
  785         params.getTable_name().getTable_name());
 
  786     Preconditions.checkNotNull(table);
 
  788     if (params.getPartition_spec() == null) {
 
  794           ((
HdfsTable)table).getPartitionFromThriftPartitionSpec(
 
  795               params.getPartition_spec());
 
  796       if (partition == null) {
 
  797         List<String> partitionDescription = Lists.newArrayList();
 
  798         for (TPartitionKeyValue v: params.getPartition_spec()) {
 
  799           partitionDescription.add(v.name + 
" = " + v.value);
 
  802             Joiner.on(
"/").join(partitionDescription));
 
  807           PartitionStatsUtil.deletePartStats(partition);
 
  811             partition.markDirty();
 
  817     Table refreshedTable = catalog_.reloadTable(params.getTable_name());
 
  819     resp.result.setVersion(
 
  820         resp.result.getUpdated_catalog_object().getCatalog_version());
 
  829     int numColsUpdated = 0;
 
  831       msClient = catalog_.getMetaStoreClient();
 
  833       for (
Column col: table.getColumns()) {
 
  835         if (!col.getStats().hasStats()) 
continue;
 
  839             msClient.getHiveClient().deleteTableColumnStatistics(
 
  840                 table.getDb().getName(), table.getName(), col.getName());
 
  843         } 
catch (NoSuchObjectException e) {
 
  847         } 
catch (TException e) {
 
  850                   "delete_table_column_statistics"), e);
 
  854       if (msClient != null) msClient.release();
 
  856     return numColsUpdated;
 
  867     org.apache.hadoop.hive.metastore.api.Table msTbl = table.getMetaStoreTable();
 
  868     int numTargetedPartitions = 0;
 
  869     if (msTbl.getParameters().remove(StatsSetupConst.ROW_COUNT) != null) {
 
  871       ++numTargetedPartitions;
 
  874     if (!(table instanceof 
HdfsTable) || table.getNumClusteringCols() == 0) {
 
  877       return numTargetedPartitions;
 
  881     HdfsTable hdfsTable = (HdfsTable) table;
 
  882     Preconditions.checkNotNull(hdfsTable);
 
  885     List<HdfsPartition> modifiedParts = Lists.newArrayList();
 
  887       boolean isModified = 
false;
 
  890       if (part.getId() == ImpalaInternalServiceConstants.DEFAULT_PARTITION_ID) {
 
  893       if (part.getPartitionStats() != null) {
 
  894         PartitionStatsUtil.deletePartStats(part);
 
  899       if (part.getParameters().remove(StatsSetupConst.ROW_COUNT) != null) {
 
  903       if (isModified) modifiedParts.add(part);
 
  907     return modifiedParts.size();
 
  918     Preconditions.checkNotNull(params);
 
  919     Preconditions.checkState(params.getDb() != null && !params.getDb().isEmpty(),
 
  920         "Null or empty database name passed as argument to Catalog.dropDatabase");
 
  922     LOG.debug(
"Dropping database " + params.getDb());
 
  923     Db db = catalog_.getDb(params.db);
 
  928     TCatalogObject removedObject = 
new TCatalogObject();
 
  932         msClient.getHiveClient().
dropDatabase(params.getDb(), 
true, params.if_exists);
 
  933       } 
catch (TException e) {
 
  939       Db removedDb = catalog_.removeDb(params.getDb());
 
  942       if (removedDb == null) {
 
  943         removedObject.setCatalog_version(catalog_.getCatalogVersion());
 
  945         removedObject.setCatalog_version(removedDb.getCatalogVersion());
 
  948     removedObject.setType(TCatalogObjectType.DATABASE);
 
  949     removedObject.setDb(
new TDatabase());
 
  950     removedObject.getDb().setDb_name(params.getDb());
 
  951     resp.result.setVersion(removedObject.getCatalog_version());
 
  952     resp.result.setRemoved_catalog_object(removedObject);
 
  962     TableName tableName = TableName.fromThrift(params.getTable_name());
 
  963     Preconditions.checkState(tableName != null && tableName.isFullyQualified());
 
  964     LOG.debug(String.format(
"Dropping table/view %s", tableName));
 
  966     TCatalogObject removedObject = 
new TCatalogObject();
 
  970         msClient.getHiveClient().dropTable(
 
  971             tableName.
getDb(), tableName.getTbl(), 
true, params.if_exists);
 
  972       } 
catch (TException e) {
 
  979       Table table = catalog_.removeTable(params.getTable_name().db_name,
 
  980           params.getTable_name().table_name);
 
  982         resp.result.setVersion(table.getCatalogVersion());
 
  984           HdfsTable hdfsTable = (HdfsTable) table;
 
  987               HdfsCachingUtil.uncacheTbl(table.getMetaStoreTable());
 
  988             } 
catch (Exception e) {
 
  989               LOG.error(
"Unable to uncache table: " + table.getFullName(), e);
 
  994               if (partition.isMarkedCached()) {
 
  996                   HdfsCachingUtil.uncachePartition(partition);
 
  997                 } 
catch (Exception e) {
 
  998                   LOG.error(
"Unable to uncache partition: " +
 
  999                       partition.getPartitionName(), e);
 
 1006         resp.result.setVersion(catalog_.getCatalogVersion());
 
 1009     removedObject.setType(TCatalogObjectType.TABLE);
 
 1010     removedObject.setTable(
new TTable());
 
 1011     removedObject.getTable().setTbl_name(tableName.
getTbl());
 
 1012     removedObject.getTable().setDb_name(tableName.
getDb());
 
 1013     removedObject.setCatalog_version(resp.result.getVersion());
 
 1014     resp.result.setRemoved_catalog_object(removedObject);
 
 1017   private void dropFunction(TDropFunctionParams params, TDdlExecResponse resp)
 
 1019     ArrayList<Type> argTypes = Lists.newArrayList();
 
 1020     for (TColumnType t: params.arg_types) {
 
 1021       argTypes.add(Type.fromThrift(t));
 
 1025     LOG.debug(String.format(
"Dropping Function %s", desc.signatureString()));
 
 1026     Function fn = catalog_.removeFunction(desc);
 
 1028       if (!params.if_exists) {
 
 1034       resp.result.setVersion(catalog_.getCatalogVersion());
 
 1036       TCatalogObject removedObject = 
new TCatalogObject();
 
 1037       removedObject.setType(TCatalogObjectType.FUNCTION);
 
 1038       removedObject.setFn(fn.toThrift());
 
 1039       removedObject.setCatalog_version(fn.getCatalogVersion());
 
 1040       resp.result.setRemoved_catalog_object(removedObject);
 
 1041       resp.result.setVersion(fn.getCatalogVersion());
 
 1050   private boolean createTable(TCreateTableParams params, TDdlExecResponse response)
 
 1052     Preconditions.checkNotNull(params);
 
 1053     TableName tableName = TableName.fromThrift(params.getTable_name());
 
 1054     Preconditions.checkState(tableName != null && tableName.isFullyQualified());
 
 1055     Preconditions.checkState(params.getColumns() != null &&
 
 1056         params.getColumns().size() > 0,
 
 1057         "Null or empty column list given as argument to Catalog.createTable");
 
 1059     if (params.if_not_exists &&
 
 1061       LOG.debug(String.format(
"Skipping table creation because %s already exists and " +
 
 1062           "IF NOT EXISTS was specified.", tableName));
 
 1066     org.apache.hadoop.hive.metastore.api.Table tbl =
 
 1068     LOG.debug(String.format(
"Creating table %s", tableName));
 
 1069     return createTable(tbl, params.if_not_exists, params.getCache_op(), response);
 
 1077   private void createView(TCreateOrAlterViewParams params, TDdlExecResponse response)
 
 1079     TableName tableName = TableName.fromThrift(params.getView_name());
 
 1080     Preconditions.checkState(tableName != null && tableName.isFullyQualified());
 
 1081     Preconditions.checkState(params.getColumns() != null &&
 
 1082         params.getColumns().size() > 0,
 
 1083           "Null or empty column list given as argument to DdlExecutor.createView");
 
 1084     if (params.if_not_exists &&
 
 1086       LOG.debug(String.format(
"Skipping view creation because %s already exists and " +
 
 1087           "ifNotExists is true.", tableName));
 
 1091     org.apache.hadoop.hive.metastore.api.Table view =
 
 1092         new org.apache.hadoop.hive.metastore.api.Table();
 
 1094     LOG.debug(String.format(
"Creating view %s", tableName));
 
 1095     createTable(view, params.if_not_exists, null, response);
 
 1106     Preconditions.checkNotNull(params);
 
 1108     THdfsFileFormat fileFormat =
 
 1109         params.isSetFile_format() ? params.getFile_format() : null;
 
 1110     String comment = params.isSetComment() ? params.getComment() : null;
 
 1111     TableName tblName = TableName.fromThrift(params.getTable_name());
 
 1112     TableName srcTblName = TableName.fromThrift(params.getSrc_table_name());
 
 1113     Preconditions.checkState(tblName != null && tblName.isFullyQualified());
 
 1114     Preconditions.checkState(srcTblName != null && srcTblName.isFullyQualified());
 
 1116     if (params.if_not_exists &&
 
 1118       LOG.debug(String.format(
"Skipping table creation because %s already exists and " +
 
 1119           "IF NOT EXISTS was specified.", tblName));
 
 1124     org.apache.hadoop.hive.metastore.api.Table tbl =
 
 1125         srcTable.getMetaStoreTable().deepCopy();
 
 1126     tbl.setDbName(tblName.getDb());
 
 1127     tbl.setTableName(tblName.getTbl());
 
 1128     tbl.setOwner(params.getOwner());
 
 1129     if (tbl.getParameters() == null) {
 
 1130       tbl.setParameters(
new HashMap<String, String>());
 
 1132     if (comment != null) {
 
 1133       tbl.getParameters().put(
"comment", comment);
 
 1136     if (params.is_external) {
 
 1137       tbl.setTableType(TableType.EXTERNAL_TABLE.toString());
 
 1138       tbl.putToParameters(
"EXTERNAL", 
"TRUE");
 
 1140       tbl.setTableType(TableType.MANAGED_TABLE.toString());
 
 1141       if (tbl.getParameters().containsKey(
"EXTERNAL")) {
 
 1142         tbl.getParameters().
remove(
"EXTERNAL");
 
 1148     tbl.getSd().setLocation(params.getLocation());
 
 1149     if (fileFormat != null) {
 
 1151     } 
else if (fileFormat == null && srcTable instanceof 
View) {
 
 1158     tbl.putToParameters(StatsSetupConst.ROW_COUNT, 
"-1");
 
 1159     LOG.debug(String.format(
"Creating table %s LIKE %s", tblName, srcTblName));
 
 1160     createTable(tbl, params.if_not_exists, null, response);
 
 1172   private boolean createTable(org.apache.hadoop.hive.metastore.api.Table newTable,
 
 1173       boolean ifNotExists, THdfsCachingOp cacheOp, TDdlExecResponse response)
 
 1181         if (cacheOp != null && cacheOp.isSet_cached() &&
 
 1182             newTable.getSd().getLocation() == null) {
 
 1183           newTable = msClient.getHiveClient().getTable(newTable.getDbName(),
 
 1184               newTable.getTableName());
 
 1186       } 
catch (AlreadyExistsException e) {
 
 1191         LOG.debug(String.format(
"Ignoring '%s' when creating table %s.%s because " +
 
 1192             "IF NOT EXISTS was specified.", e,
 
 1193             newTable.getDbName(), newTable.getTableName()));
 
 1195       } 
catch (TException e) {
 
 1205     if (cacheOp != null && cacheOp.isSet_cached()) {
 
 1206       short replication = cacheOp.isSetReplication() ? cacheOp.getReplication() :
 
 1207           JniCatalogConstants.HDFS_DEFAULT_CACHE_REPLICATION_FACTOR;
 
 1208       long id = HdfsCachingUtil.submitCacheTblDirective(newTable,
 
 1209           cacheOp.getCache_pool_name(), replication);
 
 1210       catalog_.watchCacheDirs(Lists.<Long>newArrayList(
id),
 
 1211           new TTableName(newTable.getDbName(), newTable.getTableName()));
 
 1215     Table newTbl = catalog_.addTable(newTable.getDbName(), newTable.getTableName());
 
 1217     response.result.setVersion(
 
 1218         response.result.getUpdated_catalog_object().getCatalog_version());
 
 1226       org.apache.hadoop.hive.metastore.api.Table view) {
 
 1227     view.setTableType(TableType.VIRTUAL_VIEW.toString());
 
 1228     view.setViewOriginalText(params.getOriginal_view_def());
 
 1229     view.setViewExpandedText(params.getExpanded_view_def());
 
 1230     view.setDbName(params.getView_name().getDb_name());
 
 1231     view.setTableName(params.getView_name().getTable_name());
 
 1232     view.setOwner(params.getOwner());
 
 1233     if (view.getParameters() == null) view.setParameters(
new HashMap<String, String>());
 
 1234     if (params.isSetComment() &&  params.getComment() != null) {
 
 1235       view.getParameters().put(
"comment", params.getComment());
 
 1239     StorageDescriptor sd = 
new StorageDescriptor();
 
 1242     sd.setSerdeInfo(
new SerDeInfo());
 
 1252     org.apache.hadoop.hive.metastore.api.Table msTbl = 
getMetaStoreTable(tableName);
 
 1255     if (replaceExistingCols) {
 
 1256       msTbl.getSd().setCols(newColumns);
 
 1260         msTbl.getSd().addToCols(fs);
 
 1273       org.apache.hadoop.hive.metastore.api.Table msTbl = 
getMetaStoreTable(tableName);
 
 1275       Iterator<FieldSchema> iterator = msTbl.getSd().getColsIterator();
 
 1276       while (iterator.hasNext()) {
 
 1277         FieldSchema fs = iterator.next();
 
 1278         if (fs.getName().toLowerCase().equals(colName.toLowerCase())) {
 
 1279           fs.setName(newCol.getColumnName());
 
 1280           Type type = Type.fromThrift(newCol.getColumnType());
 
 1281           fs.setType(type.toString().toLowerCase());
 
 1283           if (newCol.getComment() != null) {
 
 1284             fs.setComment(newCol.getComment());
 
 1288         if (!iterator.hasNext()) {
 
 1290               "Column name %s not found in table %s.", colName, tableName));
 
 1307       List<TPartitionKeyValue> partitionSpec, 
boolean ifNotExists, String location,
 
 1309     org.apache.hadoop.hive.metastore.api.Partition partition =
 
 1310         new org.apache.hadoop.hive.metastore.api.Partition();
 
 1312         tableName.getTbl(), partitionSpec)) {
 
 1313       LOG.debug(String.format(
"Skipping partition creation because (%s) already exists" +
 
 1314           " and ifNotExists is true.", Joiner.on(
", ").join(partitionSpec)));
 
 1318     Table result = null;
 
 1319     List<Long> cacheIds = null;
 
 1321       org.apache.hadoop.hive.metastore.api.Table msTbl = 
getMetaStoreTable(tableName);
 
 1322       partition.setDbName(tableName.getDb());
 
 1323       partition.setTableName(tableName.getTbl());
 
 1325       Long parentTblCacheDirId =
 
 1326           HdfsCachingUtil.getCacheDirectiveId(msTbl.getParameters());
 
 1328       List<String> values = Lists.newArrayList();
 
 1330       for (FieldSchema fs: msTbl.getPartitionKeys()) {
 
 1331         for (TPartitionKeyValue kv: partitionSpec) {
 
 1332           if (fs.getName().toLowerCase().equals(kv.getName().toLowerCase())) {
 
 1333             values.add(kv.getValue());
 
 1337       partition.setValues(values);
 
 1338       StorageDescriptor sd = msTbl.getSd().deepCopy();
 
 1339       sd.setLocation(location);
 
 1340       partition.setSd(sd);
 
 1344         partition = msClient.getHiveClient().add_partition(partition);
 
 1345         String cachePoolName = null;
 
 1346         Short replication = null;
 
 1347         if (cacheOp == null && parentTblCacheDirId != null) {
 
 1350           cachePoolName = HdfsCachingUtil.getCachePool(parentTblCacheDirId);
 
 1351           Preconditions.checkNotNull(cachePoolName);
 
 1352           replication = HdfsCachingUtil.getCacheReplication(parentTblCacheDirId);
 
 1353           Preconditions.checkNotNull(replication);
 
 1354         } 
else if (cacheOp != null && cacheOp.isSet_cached()) {
 
 1356           cachePoolName = cacheOp.getCache_pool_name();
 
 1362           if (!cacheOp.isSetReplication() && parentTblCacheDirId != null) {
 
 1363             replication = HdfsCachingUtil.getCacheReplication(parentTblCacheDirId);
 
 1365             replication = HdfsCachingUtil.getReplicationOrDefault(cacheOp);
 
 1369         if (cachePoolName != null) {
 
 1370           long id = HdfsCachingUtil.submitCachePartitionDirective(partition,
 
 1371               cachePoolName, replication);
 
 1372           cacheIds = Lists.<Long>newArrayList(
id);
 
 1374           msClient.getHiveClient().alter_partition(partition.getDbName(),
 
 1375               partition.getTableName(), partition);
 
 1378       } 
catch (AlreadyExistsException e) {
 
 1383         LOG.debug(String.format(
"Ignoring '%s' when adding partition to %s because" +
 
 1384             " ifNotExists is true.", e, tableName));
 
 1385       } 
catch (TException e) {
 
 1392     if (cacheIds != null) catalog_.watchCacheDirs(cacheIds, tableName.toThrift());
 
 1407       List<TPartitionKeyValue> partitionSpec, 
boolean ifExists)
 
 1411       LOG.debug(String.format(
"Skipping partition drop because (%s) does not exist " +
 
 1412           "and ifExists is true.", Joiner.on(
", ").join(partitionSpec)));
 
 1416     HdfsPartition part = catalog_.getHdfsPartition(tableName.getDb(),
 
 1417         tableName.getTbl(), partitionSpec);
 
 1419       org.apache.hadoop.hive.metastore.api.Table msTbl = 
getMetaStoreTable(tableName);
 
 1420       List<String> values = Lists.newArrayList();
 
 1422       for (FieldSchema fs: msTbl.getPartitionKeys()) {
 
 1423         for (TPartitionKeyValue kv: partitionSpec) {
 
 1424           if (fs.getName().toLowerCase().equals(kv.getName().toLowerCase())) {
 
 1425             values.add(kv.getValue());
 
 1431         msClient.getHiveClient().dropPartition(tableName.getDb(),
 
 1432             tableName.getTbl(), values);
 
 1435           HdfsCachingUtil.uncachePartition(part);
 
 1437       } 
catch (NoSuchObjectException e) {
 
 1442         LOG.debug(String.format(
"Ignoring '%s' when dropping partition from %s because" +
 
 1443             " ifExists is true.", e, tableName));
 
 1444       } 
catch (TException e) {
 
 1451     return catalog_.dropPartition(tableName, partitionSpec);
 
 1460       org.apache.hadoop.hive.metastore.api.Table msTbl = 
getMetaStoreTable(tableName);
 
 1463       Iterator<FieldSchema> iterator = msTbl.getSd().getColsIterator();
 
 1464       while (iterator.hasNext()) {
 
 1465         FieldSchema fs = iterator.next();
 
 1466         if (fs.getName().toLowerCase().equals(colName.toLowerCase())) {
 
 1470         if (!iterator.hasNext()) {
 
 1472               "Column name %s not found in table %s.", colName, tableName));
 
 1486       TDdlExecResponse response)
 
 1489       org.apache.hadoop.hive.metastore.api.Table msTbl = 
getMetaStoreTable(tableName);
 
 1490       msTbl.setDbName(newTableName.getDb());
 
 1491       msTbl.setTableName(newTableName.getTbl());
 
 1500         ColumnStatistics hmsColStats = null;
 
 1501         if (!msTbl.getTableType().equalsIgnoreCase(TableType.VIRTUAL_VIEW.toString()) &&
 
 1502             !tableName.getDb().equalsIgnoreCase(newTableName.getDb())) {
 
 1504           Map<String, TColumnStats> colStats = Maps.newHashMap();
 
 1506             colStats.put(c.getName(), c.getStats().toThrift());
 
 1510           hmsColStats.setStatsDesc(
 
 1511               new ColumnStatisticsDesc(
true, newTableName.getDb(), newTableName.getTbl()));
 
 1513           LOG.trace(String.format(
"Dropping column stats for table %s being " +
 
 1514               "renamed to %s to workaround HIVE-9720.",
 
 1515               tableName.toString(), newTableName.toString()));
 
 1517           msClient.getHiveClient().deleteTableColumnStatistics(
 
 1518               tableName.getDb(), tableName.getTbl(), null);
 
 1522         msClient.getHiveClient().alter_table(
 
 1523             tableName.getDb(), tableName.getTbl(), msTbl);
 
 1525         if (hmsColStats != null) {
 
 1526           LOG.trace(String.format(
"Restoring column stats for table %s being " +
 
 1527               "renamed to %s to workaround HIVE-9720.",
 
 1528               tableName.toString(), newTableName.toString()));
 
 1529           msClient.getHiveClient().updateTableColumnStatistics(hmsColStats);
 
 1531       } 
catch (TException e) {
 
 1543     TCatalogObject removedObject = 
new TCatalogObject();
 
 1544     removedObject.setType(TCatalogObjectType.TABLE);
 
 1545     removedObject.setTable(
new TTable());
 
 1546     removedObject.getTable().setTbl_name(tableName.getTbl());
 
 1547     removedObject.getTable().setDb_name(tableName.getDb());
 
 1548     removedObject.setCatalog_version(newTable.getCatalog_version());
 
 1549     response.result.setRemoved_catalog_object(removedObject);
 
 1550     response.result.setUpdated_catalog_object(newTable);
 
 1551     response.result.setVersion(newTable.getCatalog_version());
 
 1561       List<TPartitionKeyValue> partitionSpec, THdfsFileFormat fileFormat)
 
 1563     Preconditions.checkState(partitionSpec == null || !partitionSpec.isEmpty());
 
 1564     if (partitionSpec == null) {
 
 1566         org.apache.hadoop.hive.metastore.api.Table msTbl = 
getMetaStoreTable(tableName);
 
 1573             tableName.getDb(), tableName.getTbl(), partitionSpec);
 
 1574         Preconditions.checkNotNull(partition);
 
 1575         partition.setFileFormat(HdfsFileFormat.fromThrift(fileFormat));
 
 1579           partition.markDirty();
 
 1589       THdfsFileFormat fileFormat) {
 
 1590     StorageDescriptor tempSd =
 
 1592     sd.setInputFormat(tempSd.getInputFormat());
 
 1593     sd.setOutputFormat(tempSd.getOutputFormat());
 
 1594     sd.getSerdeInfo().setSerializationLib(tempSd.getSerdeInfo().getSerializationLib());
 
 1602       List<TPartitionKeyValue> partitionSpec, String location) 
throws ImpalaException {
 
 1603     Preconditions.checkState(partitionSpec == null || !partitionSpec.isEmpty());
 
 1604     if (partitionSpec == null) {
 
 1606         org.apache.hadoop.hive.metastore.api.Table msTbl = 
getMetaStoreTable(tableName);
 
 1607         msTbl.getSd().setLocation(location);
 
 1613             tableName.getDb(), tableName.getTbl(), partitionSpec);
 
 1614         partition.setLocation(location);
 
 1618           partition.markDirty();
 
 1630     Map<String, String> properties = params.getProperties();
 
 1631     Preconditions.checkNotNull(properties);
 
 1633       if (params.isSetPartition_spec()) {
 
 1636             tableName.getDb(), tableName.getTbl(), params.getPartition_spec());
 
 1637         switch (params.getTarget()) {
 
 1639             partition.getParameters().putAll(properties);
 
 1641           case SERDE_PROPERTY:
 
 1642             partition.getSerdeInfo().getParameters().putAll(properties);
 
 1645             throw new UnsupportedOperationException(
 
 1646                 "Unknown target TTablePropertyType: " + params.getTarget());
 
 1651           partition.markDirty();
 
 1655         org.apache.hadoop.hive.metastore.api.Table msTbl = 
getMetaStoreTable(tableName);
 
 1656         switch (params.getTarget()) {
 
 1658             msTbl.getParameters().putAll(properties);
 
 1660           case SERDE_PROPERTY:
 
 1661             msTbl.getSd().getSerdeInfo().getParameters().putAll(properties);
 
 1664             throw new UnsupportedOperationException(
 
 1665                 "Unknown target TTablePropertyType: " + params.getTarget());
 
 1682     THdfsCachingOp cacheOp = params.getCache_op();
 
 1683     Preconditions.checkNotNull(cacheOp);
 
 1690     HdfsTable hdfsTable = (HdfsTable) table;
 
 1691     org.apache.hadoop.hive.metastore.api.Table msTbl = table.getMetaStoreTable();
 
 1692     Long cacheDirId = HdfsCachingUtil.getCacheDirectiveId(msTbl.getParameters());
 
 1693     if (cacheOp.isSet_cached()) {
 
 1696       List<Long> cacheDirIds = Lists.newArrayList();
 
 1697       short cacheReplication = HdfsCachingUtil.getReplicationOrDefault(cacheOp);
 
 1701       if (cacheDirId == null) {
 
 1702         cacheDirIds.add(HdfsCachingUtil.submitCacheTblDirective(msTbl,
 
 1703             cacheOp.getCache_pool_name(), cacheReplication));
 
 1707           HdfsCachingUtil.validateCachePool(cacheOp, cacheDirId, tableName);
 
 1708           cacheDirIds.add(HdfsCachingUtil.modifyCacheDirective(cacheDirId, msTbl,
 
 1709               cacheOp.getCache_pool_name(), cacheReplication));
 
 1713       if (table.getNumClusteringCols() > 0) {
 
 1719           if (partition.getId() == ImpalaInternalServiceConstants.DEFAULT_PARTITION_ID) {
 
 1724           if (!partition.isMarkedCached() ||
 
 1725               HdfsCachingUtil.isUpdateOp(cacheOp, partition.getParameters())) {
 
 1729               if (!partition.isMarkedCached()) {
 
 1730                 cacheDirIds.add(HdfsCachingUtil.submitCachePartitionDirective(
 
 1731                     partition, cacheOp.getCache_pool_name(), cacheReplication));
 
 1734                     HdfsCachingUtil.getCacheDirectiveId(partition.getParameters());
 
 1735                 cacheDirIds.add(HdfsCachingUtil.modifyCacheDirective(directiveId,
 
 1736                     partition, cacheOp.getCache_pool_name(), cacheReplication));
 
 1739               if (partition.isMarkedCached()) {
 
 1740                 LOG.error(
"Unable to modify cache partition: " +
 
 1741                     partition.getPartitionName(), e);
 
 1743                 LOG.error(
"Unable to cache partition: " +
 
 1744                     partition.getPartitionName(), e);
 
 1752               partition.markDirty();
 
 1759       if (cacheDirIds.isEmpty()) 
return;
 
 1763       catalog_.watchCacheDirs(cacheDirIds, tableName.toThrift());
 
 1766       if (cacheDirId != null) HdfsCachingUtil.uncacheTbl(msTbl);
 
 1768       if (table.getNumClusteringCols() > 0) {
 
 1769         for (
HdfsPartition partition: ((HdfsTable) table).getPartitions()) {
 
 1770           if (partition.getId() == ImpalaInternalServiceConstants.DEFAULT_PARTITION_ID) {
 
 1773           if (partition.isMarkedCached()) {
 
 1774             HdfsCachingUtil.uncachePartition(partition);
 
 1778               partition.markDirty();
 
 1798     THdfsCachingOp cacheOp = params.getCache_op();
 
 1799     Preconditions.checkNotNull(cacheOp);
 
 1800     Preconditions.checkNotNull(params.getPartition_spec());
 
 1804           tableName.getDb(), tableName.getTbl(), params.getPartition_spec());
 
 1805       if (cacheOp.isSet_cached()) {
 
 1808         Long directiveId = HdfsCachingUtil.getCacheDirectiveId(
 
 1809             partition.getParameters());
 
 1810         short replication = HdfsCachingUtil.getReplicationOrDefault(cacheOp);
 
 1811         List<Long> cacheDirs = Lists.newArrayList();
 
 1813         if (directiveId == null) {
 
 1814           cacheDirs.add(HdfsCachingUtil.submitCachePartitionDirective(partition,
 
 1815               cacheOp.getCache_pool_name(), replication));
 
 1818             HdfsCachingUtil.validateCachePool(cacheOp, directiveId, tableName, partition);
 
 1819             cacheDirs.add(HdfsCachingUtil.modifyCacheDirective(directiveId, partition,
 
 1820                 cacheOp.getCache_pool_name(), replication));
 
 1826         if (!cacheDirs.isEmpty()) {
 
 1827           catalog_.watchCacheDirs(cacheDirs, tableName.toThrift());
 
 1833         HdfsCachingUtil.uncachePartition(partition);
 
 1838         partition.markDirty();
 
 1856     long lastDdlTime = -1;
 
 1859       msTbl.putToParameters(
"transient_lastDdlTime", Long.toString(lastDdlTime));
 
 1860       msClient.getHiveClient().alter_table(
 
 1861           msTbl.getDbName(), msTbl.getTableName(), msTbl);
 
 1862     } 
catch (TException e) {
 
 1863       throw new ImpalaRuntimeException(
 
 1867       catalog_.updateLastDdlTime(
 
 1868           new TTableName(msTbl.getDbName(), msTbl.getTableName()), lastDdlTime);
 
 1876       msClient.getHiveClient().alter_partition(
 
 1877           tableName.getDb(), tableName.getTbl(), partition.toHmsPartition());
 
 1878       org.apache.hadoop.hive.metastore.api.Table msTbl = 
getMetaStoreTable(tableName);
 
 1880     } 
catch (TException e) {
 
 1892       TCreateDropRoleParams createDropRoleParams, TDdlExecResponse resp)
 
 1894     Preconditions.checkNotNull(requestingUser);
 
 1898     if (createDropRoleParams.isIs_drop()) {
 
 1899       role = catalog_.getSentryProxy().dropRole(requestingUser,
 
 1900           createDropRoleParams.getRole_name());
 
 1902         role = 
new Role(createDropRoleParams.getRole_name(), Sets.<String>newHashSet());
 
 1903         role.setCatalogVersion(catalog_.getCatalogVersion());
 
 1906       role = catalog_.getSentryProxy().createRole(requestingUser,
 
 1907           createDropRoleParams.getRole_name());
 
 1909     Preconditions.checkNotNull(role);
 
 1911     TCatalogObject catalogObject = 
new TCatalogObject();
 
 1912     catalogObject.setType(role.getCatalogObjectType());
 
 1913     catalogObject.setRole(role.toThrift());
 
 1914     catalogObject.setCatalog_version(role.getCatalogVersion());
 
 1915     if (createDropRoleParams.isIs_drop()) {
 
 1916       resp.result.setRemoved_catalog_object(catalogObject);
 
 1918       resp.result.setUpdated_catalog_object(catalogObject);
 
 1920     resp.result.setVersion(role.getCatalogVersion());
 
 1928       TGrantRevokeRoleParams grantRevokeRoleParams, TDdlExecResponse resp)
 
 1930     Preconditions.checkNotNull(requestingUser);
 
 1933     String roleName = grantRevokeRoleParams.getRole_names().
get(0);
 
 1934     String groupName = grantRevokeRoleParams.getGroup_names().
get(0);
 
 1936     if (grantRevokeRoleParams.isIs_grant()) {
 
 1937       role = catalog_.getSentryProxy().grantRoleGroup(requestingUser, roleName,
 
 1940       role = catalog_.getSentryProxy().revokeRoleGroup(requestingUser, roleName,
 
 1943     Preconditions.checkNotNull(role);
 
 1944     TCatalogObject catalogObject = 
new TCatalogObject();
 
 1945     catalogObject.setType(role.getCatalogObjectType());
 
 1946     catalogObject.setRole(role.toThrift());
 
 1947     catalogObject.setCatalog_version(role.getCatalogVersion());
 
 1948     resp.result.setUpdated_catalog_object(catalogObject);
 
 1949     resp.result.setVersion(role.getCatalogVersion());
 
 1957       TGrantRevokePrivParams grantRevokePrivParams, TDdlExecResponse resp)
 
 1959     Preconditions.checkNotNull(requestingUser);
 
 1961     String roleName = grantRevokePrivParams.getRole_name();
 
 1962     List<TCatalogObject> updatedPrivs = Lists.newArrayList();
 
 1963     for (TPrivilege privilege: grantRevokePrivParams.getPrivileges()) {
 
 1965       if (grantRevokePrivParams.isIs_grant()) {
 
 1966           rolePriv = catalog_.getSentryProxy().grantRolePrivilege(requestingUser,
 
 1967               roleName, privilege);
 
 1969         rolePriv = catalog_.getSentryProxy().revokeRolePrivilege(requestingUser,
 
 1970             roleName, privilege);
 
 1971         if (rolePriv == null) {
 
 1972           rolePriv = RolePrivilege.fromThrift(privilege);
 
 1973           rolePriv.setCatalogVersion(catalog_.getCatalogVersion());
 
 1976       Preconditions.checkNotNull(rolePriv);
 
 1977       TCatalogObject catalogObject = 
new TCatalogObject();
 
 1978       catalogObject.setType(rolePriv.getCatalogObjectType());
 
 1979       catalogObject.setPrivilege(rolePriv.toThrift());
 
 1980       catalogObject.setCatalog_version(rolePriv.getCatalogVersion());
 
 1981       updatedPrivs.add(catalogObject);
 
 1988     if (updatedPrivs.size() == 1) {
 
 1991       if (grantRevokePrivParams.isIs_grant() ||
 
 1992           grantRevokePrivParams.getPrivileges().
get(0).isHas_grant_opt()) {
 
 1993         resp.result.setUpdated_catalog_object(updatedPrivs.get(0));
 
 1995         resp.result.setRemoved_catalog_object(updatedPrivs.get(0));
 
 1997       resp.result.setVersion(updatedPrivs.get(0).getCatalog_version());
 
 1998     } 
else if (updatedPrivs.size() > 1) {
 
 1999       resp.result.setVersion(
 
 2000           updatedPrivs.get(updatedPrivs.size() - 1).getCatalog_version());
 
 2009       throw new CatalogException(
"Sentry Service is not enabled on the " +
 
 2022     List<org.apache.hadoop.hive.metastore.api.Partition> hmsPartitions =
 
 2023         Lists.newArrayList();
 
 2025       org.apache.hadoop.hive.metastore.api.Partition msPart = p.toHmsPartition();
 
 2026       if (msPart != null) hmsPartitions.add(msPart);
 
 2028     if (hmsPartitions.size() == 0) 
return;
 
 2030       msClient = catalog_.getMetaStoreClient();
 
 2034         int numPartitionsToUpdate =
 
 2039             msClient.getHiveClient().alter_partitions(dbName, tableName,
 
 2040                 hmsPartitions.subList(i, numPartitionsToUpdate));
 
 2041           } 
catch (TException e) {
 
 2048       if (msClient != null) msClient.release();
 
 2057     Preconditions.checkState(tableName != null && tableName.isFullyQualified());
 
 2059         .getMetaStoreTable().deepCopy();
 
 2063     List<FieldSchema> fsList = Lists.newArrayList();
 
 2065     for (TColumn col: columns) {
 
 2066       Type type = Type.fromThrift(col.getColumnType());
 
 2068       String typeSql = type.toSql().toLowerCase();
 
 2069       FieldSchema fs = 
new FieldSchema(col.getColumnName(), typeSql, col.getComment());
 
 2076     if (table != null) 
return table.toTCatalogObject();
 
 2077     return new TCatalogObject(TCatalogObjectType.TABLE,
 
 2088       MetaStoreClient msClient) 
throws MetaException, NoSuchObjectException, TException {
 
 2089     Preconditions.checkNotNull(msTbl);
 
 2090     LOG.debug(
"Updating lastDdlTime for table: " + msTbl.getTableName());
 
 2091     Map<String, String> params = msTbl.getParameters();
 
 2093     params.put(
"transient_lastDdlTime", Long.toString(lastDdlTime));
 
 2094     msTbl.setParameters(params);
 
 2095     if (msClient != null) {
 
 2096       msClient.getHiveClient().alter_table(
 
 2097           msTbl.getDbName(), msTbl.getTableName(), msTbl);
 
 2099     catalog_.updateLastDdlTime(
 
 2100         new TTableName(msTbl.getDbName(), msTbl.getTableName()), lastDdlTime);
 
 2108       org.apache.hadoop.hive.metastore.api.Table msTbl) {
 
 2109     long existingLastDdlTime = CatalogServiceCatalog.getLastDdlTime(msTbl);
 
 2110     long currentTime = System.currentTimeMillis() / 1000;
 
 2111     if (existingLastDdlTime == currentTime) ++currentTime;
 
 2121   public static org.apache.hadoop.hive.metastore.api.Table
 
 2123     Preconditions.checkNotNull(params);
 
 2124     TableName tableName = TableName.fromThrift(params.getTable_name());
 
 2125     org.apache.hadoop.hive.metastore.api.Table tbl =
 
 2126         new org.apache.hadoop.hive.metastore.api.Table();
 
 2127     tbl.setDbName(tableName.getDb());
 
 2128     tbl.setTableName(tableName.getTbl());
 
 2129     tbl.setOwner(params.getOwner());
 
 2130     if (params.isSetTable_properties()) {
 
 2131       tbl.setParameters(params.getTable_properties());
 
 2133       tbl.setParameters(
new HashMap<String, String>());
 
 2136     if (params.getComment() != null) {
 
 2137       tbl.getParameters().put(
"comment", params.getComment());
 
 2139     if (params.is_external) {
 
 2140       tbl.setTableType(TableType.EXTERNAL_TABLE.toString());
 
 2141       tbl.putToParameters(
"EXTERNAL", 
"TRUE");
 
 2143       tbl.setTableType(TableType.MANAGED_TABLE.toString());
 
 2146     StorageDescriptor sd = HiveStorageDescriptorFactory.createSd(
 
 2149     if (params.isSetSerde_properties()) {
 
 2150       if (sd.getSerdeInfo().getParameters() == null) {
 
 2151         sd.getSerdeInfo().setParameters(params.getSerde_properties());
 
 2153         sd.getSerdeInfo().getParameters().putAll(params.getSerde_properties());
 
 2157     if (params.getLocation() != null) {
 
 2158       sd.setLocation(params.getLocation());
 
 2163     if (params.getPartition_columns() != null) {
 
 2167       tbl.setPartitionKeys(
new ArrayList<FieldSchema>());
 
 2187     TResetMetadataResponse resp = 
new TResetMetadataResponse();
 
 2188     resp.setResult(
new TCatalogUpdateResult());
 
 2191     if (req.isSetTable_name()) {
 
 2196       Pair<Db, Table> modifiedObjects = 
new Pair<Db, Table>(null, null);
 
 2198       boolean wasRemoved = 
false;
 
 2199       if (req.isIs_refresh()) {
 
 2200         modifiedObjects.second = catalog_.reloadTable(req.getTable_name());
 
 2202         wasRemoved = catalog_.invalidateTable(req.getTable_name(), modifiedObjects);
 
 2205       if (modifiedObjects.first == null) {
 
 2207         if (modifiedObjects.second != null) {
 
 2211             resp.getResult().setRemoved_catalog_object(thriftTable);
 
 2213             resp.getResult().setUpdated_catalog_object(thriftTable);
 
 2218               req.getTable_name().getDb_name() + 
"." 
 2219               + req.getTable_name().getTable_name());
 
 2221         resp.getResult().setVersion(thriftTable.getCatalog_version());
 
 2225         Preconditions.checkState(!req.isIs_refresh());
 
 2226         Preconditions.checkNotNull(modifiedObjects.first);
 
 2227         Preconditions.checkNotNull(modifiedObjects.second);
 
 2231         Preconditions.checkState(modifiedObjects.first.getCatalogVersion() <
 
 2232             modifiedObjects.second.getCatalogVersion());
 
 2237         resp.getResult().setVersion(modifiedObjects.second.getCatalogVersion());
 
 2241       Preconditions.checkArgument(!req.isIs_refresh());
 
 2243       resp.result.setVersion(catalog_.getCatalogVersion());
 
 2245     resp.getResult().setStatus(
 
 2246         new TStatus(TErrorCode.OK, 
new ArrayList<String>()));
 
 2263     TUpdateCatalogResponse response = 
new TUpdateCatalogResponse();
 
 2268           update.getTarget_table());
 
 2275     List<Long> cacheDirIds = Lists.<Long>newArrayList();
 
 2279     String cachePoolName = null;
 
 2280     Short cacheReplication = 0;
 
 2281     Long cacheDirId = HdfsCachingUtil.getCacheDirectiveId(
 
 2282         table.getMetaStoreTable().getParameters());
 
 2283     if (cacheDirId != null) {
 
 2285         cachePoolName = HdfsCachingUtil.getCachePool(cacheDirId);
 
 2286         cacheReplication = HdfsCachingUtil.getCacheReplication(cacheDirId);
 
 2287         Preconditions.checkNotNull(cacheReplication);
 
 2293             String.format(
"Cache directive %d was not found, uncache the table %s.%s" +
 
 2294                 "to remove this message.", cacheDirId, update.getDb_name(),
 
 2295                 update.getTarget_table()));
 
 2302     List<String> errorMessages = Lists.newArrayList();
 
 2309       Set<String> partsToCreate = Sets.newHashSet(update.getCreated_partitions());
 
 2310       for (
HdfsPartition partition: ((HdfsTable) table).getPartitions()) {
 
 2312         if (partition.getId() == ImpalaInternalServiceConstants.DEFAULT_PARTITION_ID) {
 
 2317         String partName = partition.getPartitionName() + 
"/";
 
 2321         if (partsToCreate.remove(partName) && partition.isMarkedCached()) {
 
 2326           cacheDirIds.add(HdfsCachingUtil.getCacheDirectiveId(
 
 2327               partition.getParameters()));
 
 2329         if (partsToCreate.size() == 0) 
break;
 
 2332       if (!partsToCreate.isEmpty()) {
 
 2336           List<org.apache.hadoop.hive.metastore.api.Partition> hmsParts =
 
 2337               Lists.newArrayList();
 
 2338           HiveConf hiveConf = 
new HiveConf(this.getClass());
 
 2339           Warehouse warehouse = 
new Warehouse(hiveConf);
 
 2340           for (String partName: partsToCreate) {
 
 2341             org.apache.hadoop.hive.metastore.api.Partition partition =
 
 2342                 new org.apache.hadoop.hive.metastore.api.Partition();
 
 2343             hmsParts.add(partition);
 
 2345             partition.setDbName(tblName.getDb());
 
 2346             partition.setTableName(tblName.getTbl());
 
 2348             partition.setParameters(
new HashMap<String, String>());
 
 2349             partition.setSd(msTbl.getSd().deepCopy());
 
 2350             partition.getSd().setSerdeInfo(msTbl.getSd().getSerdeInfo().deepCopy());
 
 2351             partition.getSd().setLocation(msTbl.getSd().getLocation() + 
"/" +
 
 2352                 partName.substring(0, partName.length() - 1));
 
 2353             MetaStoreUtils.updatePartitionStatsFast(partition, warehouse);
 
 2360           List<org.apache.hadoop.hive.metastore.api.Partition> addedHmsParts =
 
 2361               msClient.getHiveClient().add_partitions(hmsParts, 
true, 
true);
 
 2363           if (addedHmsParts.size() > 0) {
 
 2364             if (cachePoolName != null) {
 
 2365               List<org.apache.hadoop.hive.metastore.api.Partition> cachedHmsParts =
 
 2366                   Lists.newArrayList();
 
 2369               for (org.apache.hadoop.hive.metastore.api.Partition part: addedHmsParts) {
 
 2371                   cacheDirIds.add(HdfsCachingUtil.submitCachePartitionDirective(
 
 2372                       part, cachePoolName, cacheReplication));
 
 2373                   cachedHmsParts.add(part);
 
 2375                   String msg = String.format(
"Partition %s.%s(%s): State: Not cached." +
 
 2376                       " Action: Cache manully via 'ALTER TABLE'.",
 
 2377                       part.getDbName(), part.getTableName(), part.getValues());
 
 2379                   errorMessages.add(msg);
 
 2383                 msClient.getHiveClient().alter_partitions(tblName.
getDb(),
 
 2384                     tblName.getTbl(), cachedHmsParts);
 
 2385               } 
catch (Exception e) {
 
 2386                 LOG.error(
"Failed in alter_partitions: ", e);
 
 2388                 for (org.apache.hadoop.hive.metastore.api.Partition part:
 
 2391                     HdfsCachingUtil.uncachePartition(part);
 
 2393                     String msg = String.format(
 
 2394                         "Partition %s.%s(%s): State: Leaked caching directive. " +
 
 2395                         "Action: Manually uncache directory %s via hdfs cacheAdmin.",
 
 2396                         part.getDbName(), part.getTableName(), part.getValues(),
 
 2397                         part.getSd().getLocation());
 
 2399                     errorMessages.add(msg);
 
 2406         } 
catch (AlreadyExistsException e) {
 
 2408               "AlreadyExistsException thrown although ifNotExists given", e);
 
 2409         } 
catch (Exception e) {
 
 2420     response.setResult(
new TCatalogUpdateResult());
 
 2422     if (errorMessages.size() > 0) {
 
 2423       errorMessages.add(
"Please refer to the catalogd error log for details " +
 
 2424           "regarding the failed un/caching operations.");
 
 2425       response.getResult().setStatus(
 
 2426           new TStatus(TErrorCode.INTERNAL_ERROR, errorMessages));
 
 2428       response.getResult().setStatus(
 
 2429           new TStatus(TErrorCode.OK, 
new ArrayList<String>()));
 
 2432     Table refreshedTbl = catalog_.reloadTable(tblName.toThrift());
 
 2434     response.getResult().setVersion(
 
 2435         response.getResult().getUpdated_catalog_object().getCatalog_version());
 
 2441     Preconditions.checkNotNull(msTbl);
 
 2442     LinkedHashMap<String, String> hm =
 
 2443         org.apache.hadoop.hive.metastore.Warehouse.makeSpecFromName(partName);
 
 2444     List<String> partVals = Lists.newArrayList();
 
 2445     for (FieldSchema field: msTbl.getPartitionKeys()) {
 
 2446       String key = field.getName();
 
 2447       String val = hm.get(key);
 
 2469     Table tbl = catalog_.getOrLoadTable(dbName, tblName);
 
 2475       throw new CatalogException(String.format(
"Table '%s.%s' was modified while " +
 
 2476           "operation was in progress, aborting execution.", dbName, tblName));
 
 2483         throw (TableLoadingException) e;
 
 2485       throw new TableLoadingException(e.getMessage(), e);
 
 2487     Preconditions.checkNotNull(tbl);
 
 2488     Preconditions.checkState(tbl.isLoaded());
 
TPartitionStats getPartitionStats()
static ColumnStatistics createHiveColStats(Map< String, TColumnStats > columnStats, Table table)
int updateTableStats(Table table, TAlterTableUpdateStatsParams params, org.apache.hadoop.hive.metastore.api.Table msTbl, List< HdfsPartition > partitions, List< HdfsPartition > modifiedParts)
void dropFunction(TDropFunctionParams params, TDdlExecResponse resp)
void createTableLike(TCreateTableLikeParams params, TDdlExecResponse response)
static final String HMS_RPC_ERROR_FORMAT_STR
void alterTableSetLocation(TableName tableName, List< TPartitionKeyValue > partitionSpec, String location)
void alterTable(TAlterTableParams params, TDdlExecResponse response)
void alterTableSetTblProperties(TableName tableName, TAlterTableSetTblPropertiesParams params)
void alterTableOrViewRename(TableName tableName, TableName newTableName, TDdlExecResponse response)
static TableName fromThrift(TTableName tableName)
long updateLastDdlTime(org.apache.hadoop.hive.metastore.api.Table msTbl, MetaStoreClient msClient)
boolean createTable(org.apache.hadoop.hive.metastore.api.Table newTable, boolean ifNotExists, THdfsCachingOp cacheOp, TDdlExecResponse response)
TUpdateCatalogResponse updateCatalog(TUpdateCatalogRequest update)
void createView(TCreateOrAlterViewParams params, TDdlExecResponse response)
static final ScalarType STRING
Map< String, String > getParameters()
PrimitiveType getPrimitiveType()
void dropTableOrView(TDropTableOrViewParams params, TDdlExecResponse resp)
static void setStorageDescriptorFileFormat(StorageDescriptor sd, THdfsFileFormat fileFormat)
CatalogOpExecutor(CatalogServiceCatalog catalog)
Table renameTable(TTableName oldTableName, TTableName newTableName)
void alterTableDropCol(TableName tableName, String colName)
List< HdfsPartition > getPartitions()
static final long INITIAL_CATALOG_VERSION
static List< FieldSchema > buildFieldSchemaList(List< TColumn > columns)
static TUniqueId getServiceId()
ArrayList< Column > getColumns()
void alterTableSetFileFormat(TableName tableName, List< TPartitionKeyValue > partitionSpec, THdfsFileFormat fileFormat)
void alterTableSetCached(TableName tableName, TAlterTableSetCachedParams params)
static boolean isUpdateOp(THdfsCachingOp op, Map< String, String > params)
void alterTableChangeCol(TableName tableName, String colName, TColumn newCol)
DataSource getDataSource(String dataSourceName)
Table alterTableDropPartition(TableName tableName, List< TPartitionKeyValue > partitionSpec, boolean ifExists)
void grantRevokeRolePrivilege(User requestingUser, TGrantRevokePrivParams grantRevokePrivParams, TDdlExecResponse resp)
void applyAlterTable(org.apache.hadoop.hive.metastore.api.Table msTbl)
void alterTableAddReplaceCols(TableName tableName, List< TColumn > columns, boolean replaceExistingCols)
static long calculateDdlTime(org.apache.hadoop.hive.metastore.api.Table msTbl)
Table getExistingTable(String dbName, String tblName)
int dropColumnStats(Table table)
Table alterTableAddPartition(TableName tableName, List< TPartitionKeyValue > partitionSpec, boolean ifNotExists, String location, THdfsCachingOp cacheOp)
org.apache.hadoop.hive.metastore.api.Table getMetaStoreTable(TableName tableName)
void alterPartitionSetCached(TableName tableName, TAlterTableSetCachedParams params)
void alterView(TCreateOrAlterViewParams params, TDdlExecResponse resp)
void applyAlterPartition(TableName tableName, HdfsPartition partition)
void dropDataSource(TDropDataSourceParams params, TDdlExecResponse resp)
static FunctionName fromThrift(TFunctionName fnName)
void dropDatabase(TDropDbParams params, TDdlExecResponse resp)
TDdlExecResponse execDdlRequest(TDdlExecRequest ddlRequest)
final CatalogServiceCatalog catalog_
void alterTableUpdateStats(TAlterTableUpdateStatsParams params, TDdlExecResponse resp)
static final short MAX_PARTITION_UPDATES_PER_RPC
void createFunction(TCreateFunctionParams params, TDdlExecResponse resp)
void grantRevokeRoleGroup(User requestingUser, TGrantRevokeRoleParams grantRevokeRoleParams, TDdlExecResponse resp)
boolean createTable(TCreateTableParams params, TDdlExecResponse response)
boolean containsHdfsPartition(String dbName, String tableName, List< TPartitionKeyValue > partitionSpec)
Table addHdfsPartition(TableName tableName, Partition partition)
void toThrift(TColumnType container)
List< String > getPartValsFromName(org.apache.hadoop.hive.metastore.api.Table msTbl, String partName)
int dropTableStats(Table table)
final Object metastoreDdlLock_
Table(TableId id, org.apache.hadoop.hive.metastore.api.Table msTable, Db db, String name, String owner)
void createDatabase(TCreateDbParams params, TDdlExecResponse resp)
void bulkAlterPartitions(String dbName, String tableName, List< HdfsPartition > modifiedParts)
TResetMetadataResponse execResetMetadata(TResetMetadataRequest req)
static TCatalogObject TableToTCatalogObject(Table table)
void createDataSource(TCreateDataSourceParams params, TDdlExecResponse resp)
void verifySentryServiceEnabled()
void setViewAttributes(TCreateOrAlterViewParams params, org.apache.hadoop.hive.metastore.api.Table view)
boolean containsTable(String dbName, String tableName)
int getNumClusteringCols()
void createDropRole(User requestingUser, TCreateDropRoleParams createDropRoleParams, TDdlExecResponse resp)
static final ScalarType INVALID
void dropStats(TDropStatsParams params, TDdlExecResponse resp)
void watchCacheDirs(List< Long > dirIds, TTableName tblName)
SentryProxy getSentryProxy()
static ColumnStatisticsData createHiveColStatsData(TColumnStats colStats, Type colType)
static org.apache.hadoop.hive.metastore.api.Table createMetaStoreTable(TCreateTableParams params)