Impala
Impalaistheopensource,nativeanalyticdatabaseforApacheHadoop.
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros
AlterTableAddPartitionStmt.java
Go to the documentation of this file.
1 // Copyright 2012 Cloudera Inc.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 package com.cloudera.impala.analysis;
16 
19 import com.cloudera.impala.thrift.TAlterTableAddPartitionParams;
20 import com.cloudera.impala.thrift.TAlterTableParams;
21 import com.cloudera.impala.thrift.TAlterTableType;
22 import com.google.common.base.Preconditions;
23 import org.apache.hadoop.fs.permission.FsAction;
24 
29  private final HdfsUri location_;
30  private final boolean ifNotExists_;
32  private final HdfsCachingOp cacheOp_;
33 
35  PartitionSpec partitionSpec, HdfsUri location, boolean ifNotExists,
36  HdfsCachingOp cacheOp) {
37  super(tableName);
38  Preconditions.checkState(partitionSpec != null);
39  location_ = location;
40  ifNotExists_ = ifNotExists;
41  partitionSpec_ = partitionSpec;
42  partitionSpec_.setTableName(tableName);
43  cacheOp_ = cacheOp;
44  }
45 
46  public boolean getIfNotExists() { return ifNotExists_; }
47  public HdfsUri getLocation() { return location_; }
48 
49  @Override
50  public String toSql() {
51  StringBuilder sb = new StringBuilder("ALTER TABLE " + getTbl());
52  sb.append(" ADD ");
53  if (ifNotExists_) {
54  sb.append("IF NOT EXISTS ");
55  }
56  sb.append(" " + partitionSpec_.toSql());
57  if (location_ != null) {
58  sb.append(String.format(" LOCATION '%s'", location_));
59  }
60  if (cacheOp_ != null) sb.append(cacheOp_.toSql());
61  return sb.toString();
62  }
63 
64  @Override
65  public TAlterTableParams toThrift() {
66  TAlterTableParams params = super.toThrift();
67  params.setAlter_type(TAlterTableType.ADD_PARTITION);
68  TAlterTableAddPartitionParams addPartParams = new TAlterTableAddPartitionParams();
69  addPartParams.setPartition_spec(partitionSpec_.toThrift());
70  addPartParams.setLocation(location_ == null ? null : location_.toString());
71  addPartParams.setIf_not_exists(ifNotExists_);
72  if (cacheOp_ != null) addPartParams.setCache_op(cacheOp_.toThrift());
73  params.setAdd_partition_params(addPartParams);
74  return params;
75  }
76 
77  @Override
78  public void analyze(Analyzer analyzer) throws AnalysisException {
79  super.analyze(analyzer);
80  if (!ifNotExists_) partitionSpec_.setPartitionShouldNotExist();
81  partitionSpec_.setPrivilegeRequirement(Privilege.ALTER);
82  partitionSpec_.analyze(analyzer);
83 
84  if (location_ != null) {
85  location_.analyze(analyzer, Privilege.ALL, FsAction.READ_WRITE);
86  }
87  if (cacheOp_ != null) cacheOp_.analyze(analyzer);
88  }
89 }
AlterTableAddPartitionStmt(TableName tableName, PartitionSpec partitionSpec, HdfsUri location, boolean ifNotExists, HdfsCachingOp cacheOp)