1 package com.cloudera.impala.analysis;
6 import org.apache.hadoop.hive.metastore.api.FieldSchema;
14 import com.cloudera.impala.thrift.TPartitionKeyValue;
15 import com.google.common.base.Joiner;
16 import com.google.common.base.Preconditions;
17 import com.google.common.collect.ImmutableList;
18 import com.google.common.collect.Lists;
19 import com.google.common.collect.Sets;
39 this.partitionSpec_ = ImmutableList.copyOf(partitionSpec);
46 public String
getTbl() {
return tableName_.getTbl(); }
82 String tableName = table.getDb().getName() +
"." +
getTbl();
85 if (table.getMetaStoreTable().getPartitionKeysSize() == 0) {
95 Set<String> targetPartitionKeys = Sets.newHashSet();
96 for (FieldSchema fs: table.getMetaStoreTable().getPartitionKeys()) {
97 targetPartitionKeys.add(fs.getName().toLowerCase());
101 if (targetPartitionKeys.size() != partitionSpec_.size()) {
102 throw new AnalysisException(String.format(
"Items in partition spec must exactly " +
103 "match the partition columns in the table definition: %s (%d vs %d)",
104 tableName, partitionSpec_.size(), targetPartitionKeys.size()));
107 Set<String> keyNames = Sets.newHashSet();
112 if (!keyNames.add(pk.getColName().toLowerCase())) {
116 Column c = table.getColumn(pk.getColName());
119 "Partition column '%s' not found in table: %s", pk.getColName(), tableName));
120 }
else if (!targetPartitionKeys.contains(pk.getColName().toLowerCase())) {
122 "Column '%s' is not a partition column in table: %s",
123 pk.getColName(), tableName));
129 Type colType = c.getType();
130 Type literalType = pk.getValue().getType();
131 Type compatibleType =
132 Type.getAssignmentCompatibleType(colType, literalType);
133 if (!compatibleType.
isValid()) {
135 +
"has incompatible type: '%s'. Expected type: '%s'.",
136 pk.getColName(), literalType, colType));
139 if (!compatibleType.equals(colType)) {
141 String.format(
"Partition key value may result in loss of precision.\n" +
142 "Would need to cast '%s' to '%s' for partition column: %s",
143 pk.getValue().toSql(), colType.toString(), pk.getColName()));
147 Preconditions.checkState(table instanceof
HdfsTable);
167 List<TPartitionKeyValue> thriftPartitionSpec = Lists.newArrayList();
169 String value = PartitionKeyValue.getPartitionKeyValueString(
171 thriftPartitionSpec.add(
new TPartitionKeyValue(kv.getColName(), value));
173 return thriftPartitionSpec;
178 List<String> partitionSpecStr = Lists.newArrayList();
180 partitionSpecStr.add(kv.getColName() +
"=" + kv.getValue().toSql());
182 return String.format(
"PARTITION (%s)", Joiner.on(
", ").join(partitionSpecStr));
List< TPartitionKeyValue > toThrift()
void setPartitionShouldExist()
List< PartitionKeyValue > getPartitionSpecKeyValues()
boolean partitionExists()
void setPartitionShouldNotExist()
String getNullPartitionKeyValue()
void setTableName(TableName tableName)
Privilege privilegeRequirement_
String nullPartitionKeyValue_
final ImmutableList< PartitionKeyValue > partitionSpec_
PartitionSpec(List< PartitionKeyValue > partitionSpec)
void analyze(Analyzer analyzer)
void setPrivilegeRequirement(Privilege privilege)
Boolean partitionShouldExist_