Impala
Impalaistheopensource,nativeanalyticdatabaseforApacheHadoop.
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros
CatalogObjectToFromThriftTest.java
Go to the documentation of this file.
1 // Copyright 2013 Cloudera Inc.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 package com.cloudera.impala.catalog;
16 
17 import static org.junit.Assert.fail;
18 
19 import java.util.Map;
20 
21 import junit.framework.Assert;
22 
23 import org.junit.AfterClass;
24 import org.junit.BeforeClass;
25 import org.junit.Test;
26 
31 import com.cloudera.impala.thrift.ImpalaInternalServiceConstants;
32 import com.cloudera.impala.thrift.TAccessLevel;
33 import com.cloudera.impala.thrift.THBaseTable;
34 import com.cloudera.impala.thrift.THdfsPartition;
35 import com.cloudera.impala.thrift.THdfsTable;
36 import com.cloudera.impala.thrift.TTable;
37 import com.cloudera.impala.thrift.TTableType;
38 import com.google.common.collect.Lists;
39 
45 
46  @BeforeClass
47  public static void setUp() throws Exception {
48  catalog_ = CatalogServiceTestCatalog.create();
49  }
50 
51  @AfterClass
52  public static void cleanUp() { catalog_.close(); }
53 
54  @Test
55  public void TestPartitionedTable() throws CatalogException {
56  String[] dbNames = {"functional", "functional_avro", "functional_parquet",
57  "functional_seq"};
58  for (String dbName: dbNames) {
59  Table table = catalog_.getOrLoadTable(dbName, "alltypes");
60  TTable thriftTable = table.toThrift();
61  Assert.assertEquals(thriftTable.tbl_name, "alltypes");
62  Assert.assertEquals(thriftTable.db_name, dbName);
63  Assert.assertTrue(thriftTable.isSetTable_type());
64  Assert.assertEquals(thriftTable.getClustering_columns().size(), 2);
65  Assert.assertEquals(thriftTable.getTable_type(), TTableType.HDFS_TABLE);
66  THdfsTable hdfsTable = thriftTable.getHdfs_table();
67  Assert.assertTrue(hdfsTable.hdfsBaseDir != null);
68 
69  // The table has 24 partitions + the default partition
70  Assert.assertEquals(hdfsTable.getPartitions().size(), 25);
71  Assert.assertTrue(hdfsTable.getPartitions().containsKey(
72  new Long(ImpalaInternalServiceConstants.DEFAULT_PARTITION_ID)));
73 
74  for (Map.Entry<Long, THdfsPartition> kv: hdfsTable.getPartitions().entrySet()) {
75  if (kv.getKey() == ImpalaInternalServiceConstants.DEFAULT_PARTITION_ID) {
76  Assert.assertEquals(kv.getValue().getPartitionKeyExprs().size(), 0);
77  } else {
78  Assert.assertEquals(kv.getValue().getPartitionKeyExprs().size(), 2);
79  }
80  }
81 
82  // Now try to load the thrift struct.
83  Table newTable = Table.fromThrift(catalog_.getDb(dbName), thriftTable);
84  Assert.assertTrue(newTable instanceof HdfsTable);
85  Assert.assertEquals(newTable.name_, thriftTable.tbl_name);
86  Assert.assertEquals(newTable.numClusteringCols_, 2);
87  // Currently only have table stats on "functional.alltypes"
88  if (dbName.equals("functional")) Assert.assertEquals(7300, newTable.numRows_);
89 
90  HdfsTable newHdfsTable = (HdfsTable) newTable;
91  Assert.assertEquals(newHdfsTable.getPartitions().size(), 25);
92  boolean foundDefaultPartition = false;
93  for (HdfsPartition hdfsPart: newHdfsTable.getPartitions()) {
94  if (hdfsPart.getId() == ImpalaInternalServiceConstants.DEFAULT_PARTITION_ID) {
95  Assert.assertEquals(foundDefaultPartition, false);
96  foundDefaultPartition = true;
97  } else {
98  Assert.assertEquals(hdfsPart.getFileDescriptors().size(), 1);
99  Assert.assertTrue(
100  hdfsPart.getFileDescriptors().get(0).getFileBlocks().size() > 0);
101 
102  // Verify the partition access level is getting set properly. The alltypes_seq
103  // table has two partitions that are read_only.
104  if (dbName.equals("functional_seq") && (
105  hdfsPart.getPartitionName().equals("year=2009/month=1") ||
106  hdfsPart.getPartitionName().equals("year=2009/month=3"))) {
107  Assert.assertEquals(TAccessLevel.READ_ONLY, hdfsPart.getAccessLevel());
108  } else {
109  Assert.assertEquals(TAccessLevel.READ_WRITE, hdfsPart.getAccessLevel());
110  }
111  }
112  }
113  Assert.assertEquals(foundDefaultPartition, true);
114  }
115  }
116 
122  @Test
124  Table table = catalog_.getOrLoadTable("functional_avro_snap",
125  "schema_resolution_test");
126  TTable thriftTable = table.toThrift();
127  Assert.assertEquals(thriftTable.tbl_name, "schema_resolution_test");
128  Assert.assertTrue(thriftTable.isSetTable_type());
129  Assert.assertEquals(thriftTable.getColumns().size(), 8);
130  Assert.assertEquals(thriftTable.getClustering_columns().size(), 0);
131  Assert.assertEquals(thriftTable.getTable_type(), TTableType.HDFS_TABLE);
132 
133  // Now try to load the thrift struct.
134  Table newTable = Table.fromThrift(catalog_.getDb("functional_avro_snap"),
135  thriftTable);
136  Assert.assertEquals(newTable.getColumns().size(), 8);
137 
138  // The table schema does not match the Avro schema - it has only 2 columns.
139  Assert.assertEquals(newTable.getMetaStoreTable().getSd().getCols().size(), 2);
140  }
141 
142  @Test
143  public void TestHBaseTables() throws CatalogException {
144  String dbName = "functional_hbase";
145  Table table = catalog_.getOrLoadTable(dbName, "alltypes");
146  TTable thriftTable = table.toThrift();
147  Assert.assertEquals(thriftTable.tbl_name, "alltypes");
148  Assert.assertEquals(thriftTable.db_name, dbName);
149  Assert.assertTrue(thriftTable.isSetTable_type());
150  Assert.assertEquals(thriftTable.getClustering_columns().size(), 1);
151  Assert.assertEquals(thriftTable.getTable_type(), TTableType.HBASE_TABLE);
152  THBaseTable hbaseTable = thriftTable.getHbase_table();
153  Assert.assertEquals(hbaseTable.getFamilies().size(), 13);
154  Assert.assertEquals(hbaseTable.getQualifiers().size(), 13);
155  Assert.assertEquals(hbaseTable.getBinary_encoded().size(), 13);
156  for (boolean isBinaryEncoded: hbaseTable.getBinary_encoded()) {
157  // None of the columns should be binary encoded.
158  Assert.assertTrue(!isBinaryEncoded);
159  }
160 
161  Table newTable = Table.fromThrift(catalog_.getDb(dbName), thriftTable);
162  Assert.assertTrue(newTable instanceof HBaseTable);
163  HBaseTable newHBaseTable = (HBaseTable) newTable;
164  Assert.assertEquals(newHBaseTable.getColumns().size(), 13);
165  Assert.assertEquals(newHBaseTable.getColumn("double_col").getType(),
166  Type.DOUBLE);
167  Assert.assertEquals(newHBaseTable.getNumClusteringCols(), 1);
168  }
169 
170  @Test
172  throws CatalogException {
173  String dbName = "functional_hbase";
174  Table table = catalog_.getOrLoadTable(dbName, "alltypessmallbinary");
175  TTable thriftTable = table.toThrift();
176  Assert.assertEquals(thriftTable.tbl_name, "alltypessmallbinary");
177  Assert.assertEquals(thriftTable.db_name, dbName);
178  Assert.assertTrue(thriftTable.isSetTable_type());
179  Assert.assertEquals(thriftTable.getClustering_columns().size(), 1);
180  Assert.assertEquals(thriftTable.getTable_type(), TTableType.HBASE_TABLE);
181  THBaseTable hbaseTable = thriftTable.getHbase_table();
182  Assert.assertEquals(hbaseTable.getFamilies().size(), 13);
183  Assert.assertEquals(hbaseTable.getQualifiers().size(), 13);
184  Assert.assertEquals(hbaseTable.getBinary_encoded().size(), 13);
185 
186  // Count the number of columns that are binary encoded.
187  int numBinaryEncodedCols = 0;
188  for (boolean isBinaryEncoded: hbaseTable.getBinary_encoded()) {
189  if (isBinaryEncoded) ++numBinaryEncodedCols;
190  }
191  Assert.assertEquals(numBinaryEncodedCols, 10);
192 
193  // Verify that creating a table from this thrift struct results in a valid
194  // Table.
195  Table newTable = Table.fromThrift(catalog_.getDb(dbName), thriftTable);
196  Assert.assertTrue(newTable instanceof HBaseTable);
197  HBaseTable newHBaseTable = (HBaseTable) newTable;
198  Assert.assertEquals(newHBaseTable.getColumns().size(), 13);
199  Assert.assertEquals(newHBaseTable.getColumn("double_col").getType(),
200  Type.DOUBLE);
201  Assert.assertEquals(newHBaseTable.getNumClusteringCols(), 1);
202  }
203 
204  @Test
206  Table table = catalog_.getOrLoadTable("functional", "hive_index_tbl");
207  TTable thriftTable = table.toThrift();
208  Assert.assertEquals(thriftTable.tbl_name, "hive_index_tbl");
209  Assert.assertEquals(thriftTable.db_name, "functional");
210 
211  table = catalog_.getOrLoadTable("functional", "alltypes");
212  HdfsTable hdfsTable = (HdfsTable) table;
213  // Get a partition from the table.
214  HdfsPartition part =
215  hdfsTable.getPartitions().get(hdfsTable.getPartitions().size() - 1);
216 
217  // Create a dummy partition with an invalid decimal type.
218  try {
219  HdfsPartition dummyPart = new HdfsPartition(hdfsTable, part.toHmsPartition(),
220  Lists.newArrayList(LiteralExpr.create("1.1", ScalarType.createDecimalType(1, 0)),
221  LiteralExpr.create("1.1", ScalarType.createDecimalType(1, 0))),
222  null, Lists.<HdfsPartition.FileDescriptor>newArrayList(),
223  TAccessLevel.READ_WRITE);
224  fail("Expected metadata to be malformed.");
225  } catch (AnalysisException e) {
226  Assert.assertTrue(e.getMessage().contains("invalid DECIMAL(1,0) value: 1.1"));
227  }
228  }
229 
230  @Test
231  public void TestView() throws CatalogException {
232  Table table = catalog_.getOrLoadTable("functional", "view_view");
233  TTable thriftTable = table.toThrift();
234  Assert.assertEquals(thriftTable.tbl_name, "view_view");
235  Assert.assertEquals(thriftTable.db_name, "functional");
236  Assert.assertFalse(thriftTable.isSetHdfs_table());
237  Assert.assertFalse(thriftTable.isSetHbase_table());
238  Assert.assertTrue(thriftTable.isSetMetastore_table());
239  }
240 }
org.apache.hadoop.hive.metastore.api.Partition toHmsPartition()
List< HdfsPartition > getPartitions()
Definition: HdfsTable.java:429
static final ScalarType DOUBLE
Definition: Type.java:52
uint64_t Test(T *ht, const ProbeTuple *input, uint64_t num_tuples)