15 package com.cloudera.impala.catalog;
17 import static org.junit.Assert.fail;
21 import junit.framework.Assert;
23 import org.junit.AfterClass;
24 import org.junit.BeforeClass;
31 import com.cloudera.impala.thrift.ImpalaInternalServiceConstants;
32 import com.cloudera.impala.thrift.TAccessLevel;
33 import com.cloudera.impala.thrift.THBaseTable;
34 import com.cloudera.impala.thrift.THdfsPartition;
35 import com.cloudera.impala.thrift.THdfsTable;
36 import com.cloudera.impala.thrift.TTable;
37 import com.cloudera.impala.thrift.TTableType;
38 import com.google.common.collect.Lists;
47 public static void setUp() throws Exception {
48 catalog_ = CatalogServiceTestCatalog.create();
52 public static void cleanUp() { catalog_.close(); }
56 String[] dbNames = {
"functional",
"functional_avro",
"functional_parquet",
58 for (String dbName: dbNames) {
59 Table table = catalog_.getOrLoadTable(dbName,
"alltypes");
60 TTable thriftTable = table.toThrift();
61 Assert.assertEquals(thriftTable.tbl_name,
"alltypes");
62 Assert.assertEquals(thriftTable.db_name, dbName);
63 Assert.assertTrue(thriftTable.isSetTable_type());
64 Assert.assertEquals(thriftTable.getClustering_columns().size(), 2);
65 Assert.assertEquals(thriftTable.getTable_type(), TTableType.HDFS_TABLE);
66 THdfsTable hdfsTable = thriftTable.getHdfs_table();
67 Assert.assertTrue(hdfsTable.hdfsBaseDir != null);
70 Assert.assertEquals(hdfsTable.getPartitions().size(), 25);
71 Assert.assertTrue(hdfsTable.getPartitions().containsKey(
72 new Long(ImpalaInternalServiceConstants.DEFAULT_PARTITION_ID)));
74 for (Map.Entry<Long, THdfsPartition> kv: hdfsTable.getPartitions().entrySet()) {
75 if (kv.getKey() == ImpalaInternalServiceConstants.DEFAULT_PARTITION_ID) {
76 Assert.assertEquals(kv.getValue().getPartitionKeyExprs().size(), 0);
78 Assert.assertEquals(kv.getValue().getPartitionKeyExprs().size(), 2);
83 Table newTable = Table.fromThrift(catalog_.getDb(dbName), thriftTable);
84 Assert.assertTrue(newTable instanceof
HdfsTable);
85 Assert.assertEquals(newTable.name_, thriftTable.tbl_name);
86 Assert.assertEquals(newTable.numClusteringCols_, 2);
88 if (dbName.equals(
"functional")) Assert.assertEquals(7300, newTable.
numRows_);
91 Assert.assertEquals(newHdfsTable.getPartitions().size(), 25);
92 boolean foundDefaultPartition =
false;
94 if (hdfsPart.getId() == ImpalaInternalServiceConstants.DEFAULT_PARTITION_ID) {
95 Assert.assertEquals(foundDefaultPartition,
false);
96 foundDefaultPartition =
true;
98 Assert.assertEquals(hdfsPart.getFileDescriptors().size(), 1);
100 hdfsPart.getFileDescriptors().
get(0).getFileBlocks().size() > 0);
104 if (dbName.equals(
"functional_seq") && (
105 hdfsPart.getPartitionName().equals(
"year=2009/month=1") ||
106 hdfsPart.getPartitionName().equals(
"year=2009/month=3"))) {
107 Assert.assertEquals(TAccessLevel.READ_ONLY, hdfsPart.getAccessLevel());
109 Assert.assertEquals(TAccessLevel.READ_WRITE, hdfsPart.getAccessLevel());
113 Assert.assertEquals(foundDefaultPartition,
true);
124 Table table = catalog_.getOrLoadTable(
"functional_avro_snap",
125 "schema_resolution_test");
126 TTable thriftTable = table.toThrift();
127 Assert.assertEquals(thriftTable.tbl_name,
"schema_resolution_test");
128 Assert.assertTrue(thriftTable.isSetTable_type());
129 Assert.assertEquals(thriftTable.getColumns().size(), 8);
130 Assert.assertEquals(thriftTable.getClustering_columns().size(), 0);
131 Assert.assertEquals(thriftTable.getTable_type(), TTableType.HDFS_TABLE);
134 Table newTable = Table.fromThrift(catalog_.getDb(
"functional_avro_snap"),
136 Assert.assertEquals(newTable.getColumns().size(), 8);
139 Assert.assertEquals(newTable.getMetaStoreTable().getSd().getCols().size(), 2);
144 String dbName =
"functional_hbase";
145 Table table = catalog_.getOrLoadTable(dbName,
"alltypes");
146 TTable thriftTable = table.toThrift();
147 Assert.assertEquals(thriftTable.tbl_name,
"alltypes");
148 Assert.assertEquals(thriftTable.db_name, dbName);
149 Assert.assertTrue(thriftTable.isSetTable_type());
150 Assert.assertEquals(thriftTable.getClustering_columns().size(), 1);
151 Assert.assertEquals(thriftTable.getTable_type(), TTableType.HBASE_TABLE);
152 THBaseTable hbaseTable = thriftTable.getHbase_table();
153 Assert.assertEquals(hbaseTable.getFamilies().size(), 13);
154 Assert.assertEquals(hbaseTable.getQualifiers().size(), 13);
155 Assert.assertEquals(hbaseTable.getBinary_encoded().size(), 13);
156 for (
boolean isBinaryEncoded: hbaseTable.getBinary_encoded()) {
158 Assert.assertTrue(!isBinaryEncoded);
161 Table newTable = Table.fromThrift(catalog_.getDb(dbName), thriftTable);
162 Assert.assertTrue(newTable instanceof
HBaseTable);
164 Assert.assertEquals(newHBaseTable.getColumns().size(), 13);
165 Assert.assertEquals(newHBaseTable.getColumn(
"double_col").getType(),
167 Assert.assertEquals(newHBaseTable.getNumClusteringCols(), 1);
173 String dbName =
"functional_hbase";
174 Table table = catalog_.getOrLoadTable(dbName,
"alltypessmallbinary");
175 TTable thriftTable = table.toThrift();
176 Assert.assertEquals(thriftTable.tbl_name,
"alltypessmallbinary");
177 Assert.assertEquals(thriftTable.db_name, dbName);
178 Assert.assertTrue(thriftTable.isSetTable_type());
179 Assert.assertEquals(thriftTable.getClustering_columns().size(), 1);
180 Assert.assertEquals(thriftTable.getTable_type(), TTableType.HBASE_TABLE);
181 THBaseTable hbaseTable = thriftTable.getHbase_table();
182 Assert.assertEquals(hbaseTable.getFamilies().size(), 13);
183 Assert.assertEquals(hbaseTable.getQualifiers().size(), 13);
184 Assert.assertEquals(hbaseTable.getBinary_encoded().size(), 13);
187 int numBinaryEncodedCols = 0;
188 for (
boolean isBinaryEncoded: hbaseTable.getBinary_encoded()) {
189 if (isBinaryEncoded) ++numBinaryEncodedCols;
191 Assert.assertEquals(numBinaryEncodedCols, 10);
195 Table newTable = Table.fromThrift(catalog_.getDb(dbName), thriftTable);
196 Assert.assertTrue(newTable instanceof
HBaseTable);
198 Assert.assertEquals(newHBaseTable.getColumns().size(), 13);
199 Assert.assertEquals(newHBaseTable.getColumn(
"double_col").getType(),
201 Assert.assertEquals(newHBaseTable.getNumClusteringCols(), 1);
206 Table table = catalog_.getOrLoadTable(
"functional",
"hive_index_tbl");
207 TTable thriftTable = table.toThrift();
208 Assert.assertEquals(thriftTable.tbl_name,
"hive_index_tbl");
209 Assert.assertEquals(thriftTable.db_name,
"functional");
211 table = catalog_.getOrLoadTable(
"functional",
"alltypes");
215 hdfsTable.getPartitions().
get(hdfsTable.getPartitions().size() - 1);
220 Lists.newArrayList(LiteralExpr.create(
"1.1", ScalarType.createDecimalType(1, 0)),
221 LiteralExpr.create(
"1.1", ScalarType.createDecimalType(1, 0))),
223 TAccessLevel.READ_WRITE);
224 fail(
"Expected metadata to be malformed.");
226 Assert.assertTrue(e.getMessage().contains(
"invalid DECIMAL(1,0) value: 1.1"));
232 Table table = catalog_.getOrLoadTable(
"functional",
"view_view");
233 TTable thriftTable = table.toThrift();
234 Assert.assertEquals(thriftTable.tbl_name,
"view_view");
235 Assert.assertEquals(thriftTable.db_name,
"functional");
236 Assert.assertFalse(thriftTable.isSetHdfs_table());
237 Assert.assertFalse(thriftTable.isSetHbase_table());
238 Assert.assertTrue(thriftTable.isSetMetastore_table());
static CatalogServiceCatalog catalog_
void TestHBaseTableWithBinaryEncodedCols()
void TestTableLoadingErrors()
org.apache.hadoop.hive.metastore.api.Partition toHmsPartition()
List< HdfsPartition > getPartitions()
static final ScalarType DOUBLE
uint64_t Test(T *ht, const ProbeTuple *input, uint64_t num_tuples)
void TestMismatchedAvroAndTableSchemas()
void TestPartitionedTable()