2 package com.cloudera.impala.testutil;
5 import java.io.FileWriter;
8 import org.apache.hadoop.fs.Path;
9 import org.apache.hadoop.hdfs.DistributedFileSystem;
10 import org.apache.hadoop.hdfs.HdfsConfiguration;
11 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
12 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
20 import com.cloudera.impala.thrift.ImpalaInternalServiceConstants;
30 @SuppressWarnings(
"deprecation")
31 public static
void main(String[] args)
34 if (args.length != 1) {
35 throw new Exception(
"Invalid args: BlockIdGenerator <output_file>");
38 HdfsConfiguration hdfsConfig =
new HdfsConfiguration();
39 File output =
new File(args[0]);
40 FileWriter writer = null;
43 writer =
new FileWriter(output);
46 Catalog catalog = CatalogServiceTestCatalog.create();
47 for (String dbName: catalog.
getDbNames(null)) {
48 Db database = catalog.getDb(dbName);
50 Table table = database.getTable(tableName);
52 if (table == null || !(table instanceof
HdfsTable)) {
55 HdfsTable hdfsTable = (HdfsTable)table;
58 writer.write(tableName +
":");
61 if (partition.getId() ==
62 ImpalaInternalServiceConstants.DEFAULT_PARTITION_ID) {
65 List<FileDescriptor> fileDescriptors = partition.getFileDescriptors();
67 Path p =
new Path(partition.getLocation(), fd.getFileName());
70 DistributedFileSystem dfs =
71 (DistributedFileSystem)p.getFileSystem(hdfsConfig);
72 LocatedBlocks locations = dfs.getClient().getNamenode().getBlockLocations(
73 p.toUri().getPath(), 0, fd.getFileLength());
75 for (LocatedBlock lb : locations.getLocatedBlocks()) {
76 long id = lb.getBlock().getBlockId();
77 writer.write(
" " + id);
85 if (writer != null) writer.close();
List< String > getAllTableNames()
List< HdfsPartition > getPartitions()
List< String > getDbNames(String dbPattern)
static void main(String[] args)