15 package com.cloudera.impala.catalog;
17 import java.util.Collections;
18 import java.util.List;
19 import java.util.concurrent.ConcurrentHashMap;
20 import java.util.concurrent.atomic.AtomicReference;
22 import org.apache.log4j.Logger;
26 import com.cloudera.impala.thrift.TCatalogObject;
27 import com.cloudera.impala.thrift.TFunction;
28 import com.cloudera.impala.thrift.TPartitionKeyValue;
29 import com.cloudera.impala.thrift.TTableName;
31 import com.google.common.base.Joiner;
32 import com.google.common.base.Preconditions;
33 import com.google.common.collect.Lists;
54 private static final Logger
LOG = Logger.getLogger(Catalog.class);
72 protected AtomicReference<ConcurrentHashMap<String, Db>>
dbCache_ =
73 new AtomicReference<ConcurrentHashMap<String, Db>>(
74 new ConcurrentHashMap<String, Db>());
85 new CatalogObjectCache<HdfsCachePool>(
false);
91 public Catalog(
boolean initMetastoreClientPool) {
92 if (initMetastoreClientPool) {
108 return dbCache_.get().put(db.
getName().toLowerCase(), db);
116 Preconditions.checkState(dbName != null && !dbName.isEmpty(),
117 "Null or empty database name given as argument to Catalog.getDb");
118 return dbCache_.get().
get(dbName.toLowerCase());
127 return dbCache_.get().
remove(dbName.toLowerCase());
150 return db.getTable(tableName);
159 Db db =
getDb(tableName.getDb_name());
160 if (db == null)
return null;
161 return db.removeTable(tableName.getTable_name());
175 Preconditions.checkNotNull(dbName);
189 return (db == null) ?
false : db.containsTable(tableName);
198 return dataSources_.add(dataSource);
206 Preconditions.checkNotNull(dataSourceName);
207 return dataSources_.remove(dataSourceName.toLowerCase());
214 Preconditions.checkNotNull(dataSourceName);
215 return dataSources_.get(dataSourceName.toLowerCase());
222 return dataSources_.getValues();
243 List<DataSource> dataSources = Lists.newArrayListWithCapacity(names.size());
244 for (String
name: names) {
245 dataSources.add(dataSources_.get(
name));
261 if (db == null)
return false;
262 return db.addFunction(fn);
272 if (db == null)
return null;
273 return db.getFunction(desc, mode);
277 return builtinsDb_.getFunction(desc, mode);
287 if (db == null)
return null;
288 return db.removeFunction(desc);
297 if (db == null)
return false;
298 return db.containsFunction(name.getFunction());
305 return hdfsCachePools_.add(cachePool);
313 return hdfsCachePools_.get(poolName);
320 public void close() { metaStoreClientPool_.close(); }
339 String matchPattern) {
340 List<String> filtered = Lists.newArrayList();
341 if (matchPattern == null) {
342 filtered = Lists.newArrayList(candidates);
344 PatternMatcher matcher = PatternMatcher.createHivePatternMatcher(matchPattern);
345 for (String candidate: candidates) {
346 if (matcher.
matches(candidate)) filtered.add(candidate);
349 Collections.sort(filtered, String.CASE_INSENSITIVE_ORDER);
363 String partitionNotFoundMsg =
364 "Partition not found: " + Joiner.on(
", ").join(partitionSpec);
372 ((HdfsTable) table).getPartitionFromThriftPartitionSpec(partitionSpec);
401 TCatalogObject result =
new TCatalogObject();
402 switch (objectDesc.getType()) {
404 Db db =
getDb(objectDesc.getDb().getDb_name());
407 "Database not found: " + objectDesc.getDb().getDb_name());
409 result.setType(db.getCatalogObjectType());
410 result.setCatalog_version(db.getCatalogVersion());
411 result.setDb(db.toThrift());
417 objectDesc.getTable().getTbl_name());
420 objectDesc.getTable().getTbl_name());
422 result.setType(table.getCatalogObjectType());
423 result.setCatalog_version(table.getCatalogVersion());
424 result.setTable(table.toThrift());
428 TFunction tfn = objectDesc.getFn();
429 Function desc = Function.fromThrift(tfn);
434 result.setType(fn.getCatalogObjectType());
435 result.setCatalog_version(fn.getCatalogVersion());
436 result.setFn(fn.toThrift());
440 String dataSrcName = objectDesc.getData_source().getName();
442 if (dataSrc == null) {
445 result.setType(dataSrc.getCatalogObjectType());
446 result.setCatalog_version(dataSrc.getCatalogVersion());
447 result.setData_source(dataSrc.toThrift());
450 case HDFS_CACHE_POOL: {
454 "Hdfs cache pool not found: " + objectDesc.getCache_pool().getPool_name());
456 result.setType(pool.getCatalogObjectType());
457 result.setCatalog_version(pool.getCatalogVersion());
458 result.setCache_pool(pool.toThrift());
462 Role role = authPolicy_.getRole(objectDesc.getRole().getRole_name());
465 objectDesc.getRole().getRole_name());
467 result.setType(role.getCatalogObjectType());
468 result.setCatalog_version(role.getCatalogVersion());
469 result.setRole(role.toThrift());
472 Role tmpRole = authPolicy_.getRole(objectDesc.getPrivilege().getRole_id());
473 if (tmpRole == null) {
475 objectDesc.getPrivilege().getRole_id());
478 if (p.getName().equalsIgnoreCase(
479 objectDesc.getPrivilege().getPrivilege_name())) {
480 result.setType(p.getCatalogObjectType());
481 result.setCatalog_version(p.getCatalogVersion());
482 result.setPrivilege(p.toThrift());
487 "privilege: '%s'", tmpRole.
getName(),
488 objectDesc.getPrivilege().getPrivilege_name()));
489 default:
throw new IllegalStateException(
490 "Unexpected TCatalogObject type: " + objectDesc.getType());
Function getFunction(Function desc, Function.CompareMode mode)
final MetaStoreClientPool metaStoreClientPool_
AtomicReference< ConcurrentHashMap< String, Db > > dbCache_
boolean addDataSource(DataSource dataSource)
List< RolePrivilege > getPrivileges()
List< String > filterStringsByPattern(Iterable< String > candidates, String matchPattern)
final CatalogObjectCache< DataSource > dataSources_
final CatalogObjectCache< HdfsCachePool > hdfsCachePools_
List< String > getTableNames(String dbName, String tablePattern)
List< DataSource > getDataSources()
boolean addFunction(Function fn)
Catalog(boolean initMetastoreClientPool)
boolean containsFunction(FunctionName name)
Db removeDb(String dbName)
DataSource removeDataSource(String dataSourceName)
static final String BUILTINS_DB
List< String > getDataSourceNames(String pattern)
List< DataSource > getDataSources(String pattern)
Table removeTable(TTableName tableName)
Table getTable(String dbName, String tableName)
static final long INITIAL_CATALOG_VERSION
HdfsPartition getHdfsPartition(String dbName, String tableName, List< TPartitionKeyValue > partitionSpec)
Function removeFunction(Function desc)
DataSource getDataSource(String dataSourceName)
List< String > getDbNames(String dbPattern)
boolean addHdfsCachePool(HdfsCachePool cachePool)
boolean matches(String candidate)
AuthorizationPolicy authPolicy_
static final int META_STORE_CLIENT_POOL_SIZE
HdfsCachePool getHdfsCachePool(String poolName)
static Function getBuiltin(Function desc, Function.CompareMode mode)
boolean containsHdfsPartition(String dbName, String tableName, List< TPartitionKeyValue > partitionSpec)
MetaStoreClient getMetaStoreClient()
boolean containsTable(String dbName, String tableName)
static final String DEFAULT_DB
TCatalogObject getTCatalogObject(TCatalogObject objectDesc)