Impala
Impalaistheopensource,nativeanalyticdatabaseforApacheHadoop.
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros
HdfsPartition.java
Go to the documentation of this file.
1 // Copyright 2012 Cloudera Inc.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 package com.cloudera.impala.catalog;
16 
17 import java.util.ArrayList;
18 import java.util.Arrays;
19 import java.util.List;
20 import java.util.Map;
21 import java.util.concurrent.atomic.AtomicLong;
22 
23 import org.apache.commons.lang.ArrayUtils;
24 import org.slf4j.Logger;
25 import org.slf4j.LoggerFactory;
26 
33 import com.cloudera.impala.thrift.ImpalaInternalServiceConstants;
34 import com.cloudera.impala.thrift.TAccessLevel;
35 import com.cloudera.impala.thrift.TExpr;
36 import com.cloudera.impala.thrift.TExprNode;
37 import com.cloudera.impala.thrift.THdfsCompression;
38 import com.cloudera.impala.thrift.THdfsFileBlock;
39 import com.cloudera.impala.thrift.THdfsFileDesc;
40 import com.cloudera.impala.thrift.THdfsPartition;
41 import com.cloudera.impala.thrift.TNetworkAddress;
42 import com.cloudera.impala.thrift.TPartitionStats;
43 import com.cloudera.impala.thrift.TTableStats;
45 import com.google.common.base.Joiner;
46 import com.google.common.base.Objects;
47 import com.google.common.base.Preconditions;
48 import com.google.common.collect.ImmutableList;
49 import com.google.common.collect.ImmutableMap;
50 import com.google.common.collect.Lists;
51 import com.google.common.collect.Maps;
52 import com.google.common.annotations.VisibleForTesting;
53 
60 public class HdfsPartition implements Comparable<HdfsPartition> {
65  static public class FileDescriptor {
66  private final THdfsFileDesc fileDescriptor_;
67 
68  public String getFileName() { return fileDescriptor_.getFile_name(); }
69  public long getFileLength() { return fileDescriptor_.getLength(); }
70  public THdfsCompression getFileCompression() {
71  return fileDescriptor_.getCompression();
72  }
73  public long getModificationTime() {
74  return fileDescriptor_.getLast_modification_time();
75  }
76  public List<THdfsFileBlock> getFileBlocks() {
77  return fileDescriptor_.getFile_blocks();
78  }
79 
80  public THdfsFileDesc toThrift() { return fileDescriptor_; }
81 
82  public FileDescriptor(String fileName, long fileLength, long modificationTime) {
83  Preconditions.checkNotNull(fileName);
84  Preconditions.checkArgument(fileLength >= 0);
85  fileDescriptor_ = new THdfsFileDesc();
86  fileDescriptor_.setFile_name(fileName);
87  fileDescriptor_.setLength(fileLength);
88  fileDescriptor_.setLast_modification_time(modificationTime);
89  fileDescriptor_.setCompression(
90  HdfsCompression.fromFileName(fileName).toThrift());
91  List<THdfsFileBlock> emptyFileBlockList = Lists.newArrayList();
92  fileDescriptor_.setFile_blocks(emptyFileBlockList);
93  }
94 
95  private FileDescriptor(THdfsFileDesc fileDesc) {
96  this(fileDesc.getFile_name(), fileDesc.length, fileDesc.last_modification_time);
97  for (THdfsFileBlock block: fileDesc.getFile_blocks()) {
98  fileDescriptor_.addToFile_blocks(block);
99  }
100  }
101 
102  public void addFileBlock(FileBlock blockMd) {
103  fileDescriptor_.addToFile_blocks(blockMd.toThrift());
104  }
105 
106  public static FileDescriptor fromThrift(THdfsFileDesc desc) {
107  return new FileDescriptor(desc);
108  }
109 
110  @Override
111  public String toString() {
112  return Objects.toStringHelper(this)
113  .add("FileName", getFileName())
114  .add("Length", getFileLength()).toString();
115  }
116  }
117 
121  public static class BlockReplica {
122  private final boolean isCached_;
123  private final int hostIdx_;
124 
130  public BlockReplica(int hostIdx, boolean isCached) {
131  hostIdx_ = hostIdx;
133  }
134 
139  public static TNetworkAddress parseLocation(String location) {
140  Preconditions.checkNotNull(location);
141  String[] ip_port = location.split(":");
142  if (ip_port.length != 2) return null;
143  try {
144  return new TNetworkAddress(ip_port[0], Integer.parseInt(ip_port[1]));
145  } catch (NumberFormatException e) {
146  return null;
147  }
148  }
149 
150  public boolean isCached() { return isCached_; }
151  public int getHostIdx() { return hostIdx_; }
152  }
153 
157  public static class FileBlock {
158  private final THdfsFileBlock fileBlock_;
159  private boolean isCached_; // Set to true if there is at least one cached replica.
160 
161  private FileBlock(THdfsFileBlock fileBlock) {
162  fileBlock_ = fileBlock;
163  isCached_ = false;
164  for (boolean isCached: fileBlock.getIs_replica_cached()) {
165  isCached_ |= isCached;
166  }
167  }
168 
174  public FileBlock(long offset, long blockLength,
175  List<BlockReplica> replicaHostIdxs) {
176  Preconditions.checkNotNull(replicaHostIdxs);
177  fileBlock_ = new THdfsFileBlock();
178  fileBlock_.setOffset(offset);
179  fileBlock_.setLength(blockLength);
180 
181  fileBlock_.setReplica_host_idxs(new ArrayList<Integer>(replicaHostIdxs.size()));
182  fileBlock_.setIs_replica_cached(new ArrayList<Boolean>(replicaHostIdxs.size()));
183  isCached_ = false;
184  for (BlockReplica replica: replicaHostIdxs) {
185  fileBlock_.addToReplica_host_idxs(replica.getHostIdx());
186  fileBlock_.addToIs_replica_cached(replica.isCached());
187  isCached_ |= replica.isCached();
188  }
189  }
190 
191  public long getOffset() { return fileBlock_.getOffset(); }
192  public long getLength() { return fileBlock_.getLength(); }
193  // Returns true if at there at least one cached replica.
194  public boolean isCached() { return isCached_; }
195  public List<Integer> getReplicaHostIdxs() {
196  return fileBlock_.getReplica_host_idxs();
197  }
198 
204  public static void setDiskIds(int[] diskIds, THdfsFileBlock fileBlock) {
205  Preconditions.checkArgument(
206  diskIds.length == fileBlock.getReplica_host_idxs().size());
207  fileBlock.setDisk_ids(Arrays.asList(ArrayUtils.toObject(diskIds)));
208  }
209 
214  public int getDiskId(int hostIndex) {
215  if (fileBlock_.disk_ids == null) return -1;
216  return fileBlock_.getDisk_ids().get(hostIndex);
217  }
218 
219  public boolean isCached(int hostIndex) {
220  return fileBlock_.getIs_replica_cached().get(hostIndex);
221  }
222 
223  public THdfsFileBlock toThrift() { return fileBlock_; }
224 
225  public static FileBlock fromThrift(THdfsFileBlock thriftFileBlock) {
226  return new FileBlock(thriftFileBlock);
227  }
228 
229  @Override
230  public String toString() {
231  return Objects.toStringHelper(this)
232  .add("offset", fileBlock_.offset)
233  .add("length", fileBlock_.length)
234  .add("#disks", fileBlock_.getDisk_idsSize())
235  .toString();
236  }
237  }
238 
239  private final HdfsTable table_;
240  private final List<LiteralExpr> partitionKeyValues_;
241  // estimated number of rows in partition; -1: unknown
242  private long numRows_ = -1;
243  private static AtomicLong partitionIdCounter_ = new AtomicLong();
244 
245  // A unique ID for each partition, used to identify a partition in the thrift
246  // representation of a table.
247  private final long id_;
248 
249  /*
250  * Note: Although you can write multiple formats to a single partition (by changing
251  * the format before each write), Hive won't let you read that data and neither should
252  * we. We should therefore treat mixing formats inside one partition as user error.
253  * It's easy to add per-file metadata to FileDescriptor if this changes.
254  */
256 
257  private final List<FileDescriptor> fileDescriptors_;
258  private String location_;
259  private final static Logger LOG = LoggerFactory.getLogger(HdfsPartition.class);
260  private boolean isDirty_ = false;
261  // True if this partition is marked as cached. Does not necessarily mean the data is
262  // cached.
263  private boolean isMarkedCached_ = false;
264  private final TAccessLevel accessLevel_;
265 
266  // (k,v) pairs of parameters for this partition, stored in the HMS. Used by Impala to
267  // store intermediate state for statistics computations.
268  private Map<String, String> hmsParameters_;
269 
271  return fileFormatDescriptor_;
272  }
273 
274  public boolean isDefaultPartition() {
275  return id_ == ImpalaInternalServiceConstants.DEFAULT_PARTITION_ID;
276  }
277 
287  public String getPartitionName() {
288  List<String> partitionCols = Lists.newArrayList();
289  List<String> partitionValues = Lists.newArrayList();
290  for (int i = 0; i < getTable().getNumClusteringCols(); ++i) {
291  partitionCols.add(getTable().getColumns().get(i).getName());
292  }
293 
294  return org.apache.hadoop.hive.common.FileUtils.makePartName(
295  partitionCols, getPartitionValuesAsStrings(true));
296  }
297 
303  public List<String> getPartitionValuesAsStrings(boolean mapNullsToHiveKey) {
304  List<String> ret = Lists.newArrayList();
305  for (LiteralExpr partValue: getPartitionValues()) {
306  if (mapNullsToHiveKey) {
307  ret.add(PartitionKeyValue.getPartitionKeyValueString(
308  partValue, getTable().getNullPartitionKeyValue()));
309  } else {
310  ret.add(partValue.getStringValue());
311  }
312  }
313  return ret;
314  }
315 
322  public String getConjunctSql() {
323  List<String> partitionCols = Lists.newArrayList();
324  List<String> partitionValues = Lists.newArrayList();
325  for (int i = 0; i < getTable().getNumClusteringCols(); ++i) {
326  partitionCols.add(ToSqlUtils.getIdentSql(getTable().getColumns().get(i).getName()));
327  }
328 
329  List<String> conjuncts = Lists.newArrayList();
330  for (int i = 0; i < partitionCols.size(); ++i) {
331  LiteralExpr expr = getPartitionValues().get(i);
332  String sql = expr.toSql();
333  if (expr instanceof NullLiteral || sql.isEmpty()) {
334  conjuncts.add(ToSqlUtils.getIdentSql(partitionCols.get(i))
335  + " IS NULL");
336  } else {
337  conjuncts.add(ToSqlUtils.getIdentSql(partitionCols.get(i))
338  + "=" + sql);
339  }
340  }
341  return "(" + Joiner.on(" AND " ).join(conjuncts) + ")";
342  }
343 
347  public String getValuesAsString() {
348  StringBuilder partDescription = new StringBuilder();
349  for (int i = 0; i < getTable().getNumClusteringCols(); ++i) {
350  String columnName = getTable().getColumns().get(i).getName();
351  String value = PartitionKeyValue.getPartitionKeyValueString(
352  getPartitionValues().get(i),
353  getTable().getNullPartitionKeyValue());
354  partDescription.append(columnName + "=" + value);
355  if (i != getTable().getNumClusteringCols() - 1) partDescription.append("/");
356  }
357  return partDescription.toString();
358  }
359 
364  public String getLocation() { return location_; }
365  public long getId() { return id_; }
366  public HdfsTable getTable() { return table_; }
367  public void setNumRows(long numRows) { numRows_ = numRows; }
368  public long getNumRows() { return numRows_; }
369  public boolean isMarkedCached() { return isMarkedCached_; }
370  void markCached() { isMarkedCached_ = true; }
371 
372  public void setFileFormat(HdfsFileFormat fileFormat) {
373  fileFormatDescriptor_.setFileFormat(fileFormat);
374  }
375 
376  public void setLocation(String location) { location_ = location; }
377 
378  public org.apache.hadoop.hive.metastore.api.SerDeInfo getSerdeInfo() {
379  return cachedMsPartitionDescriptor_.sdSerdeInfo;
380  }
381 
382  // May return null if no per-partition stats were recorded, or if the per-partition
383  // stats could not be deserialised from the parameter map.
384  public TPartitionStats getPartitionStats() {
385  try {
386  return PartitionStatsUtil.partStatsFromParameters(hmsParameters_);
387  } catch (ImpalaException e) {
388  LOG.warn("Could not deserialise incremental stats state for " + getPartitionName() +
389  ", consider DROP INCREMENTAL STATS ... PARTITION ... and recomputing " +
390  "incremental stats for this table.");
391  return null;
392  }
393  }
394 
395  public boolean hasIncrementalStats() {
396  TPartitionStats partStats = getPartitionStats();
397  return partStats != null && partStats.intermediate_col_stats != null;
398  }
399 
404  public TAccessLevel getAccessLevel() { return accessLevel_; }
405 
409  public String getParameter(String key) {
410  return hmsParameters_.get(key);
411  }
412 
413  public Map<String, String> getParameters() { return hmsParameters_; }
414 
415  public void putToParameters(String k, String v) { hmsParameters_.put(k, v); }
416 
422  public void markDirty() { isDirty_ = true; }
423  public boolean isDirty() { return isDirty_; }
424 
428  public List<LiteralExpr> getPartitionValues() { return partitionKeyValues_; }
429  public List<HdfsPartition.FileDescriptor> getFileDescriptors() {
430  return fileDescriptors_;
431  }
432 
433  public boolean hasFileDescriptors() { return !fileDescriptors_.isEmpty(); }
434 
435  // Struct-style class for caching all the information we need to reconstruct an
436  // HMS-compatible Partition object, for use in RPCs to the metastore. We do this rather
437  // than cache the Thrift partition object itself as the latter can be large - thanks
438  // mostly to the inclusion of the full FieldSchema list. This class is read-only - if
439  // any field can be mutated by Impala it should belong to HdfsPartition itself (see
440  // HdfsPartition.location_ for an example).
441  //
442  // TODO: Cache this descriptor in HdfsTable so that identical descriptors are shared
443  // between HdfsPartition instances.
444  private static class CachedHmsPartitionDescriptor {
445  public final String sdOutputFormat;
446  public final boolean sdCompressed;
447  public final int sdNumBuckets;
448  public final org.apache.hadoop.hive.metastore.api.SerDeInfo sdSerdeInfo;
449  public final List<String> sdBucketCols;
450  public final List<org.apache.hadoop.hive.metastore.api.Order> sdSortCols;
451  public final Map<String, String> sdParameters;
452  public final int msCreateTime;
453  public final int msLastAccessTime;
454 
456  org.apache.hadoop.hive.metastore.api.Partition msPartition) {
457  org.apache.hadoop.hive.metastore.api.StorageDescriptor sd = null;
458  if (msPartition != null) {
459  sd = msPartition.getSd();
460  msCreateTime = msPartition.getCreateTime();
461  msLastAccessTime = msPartition.getLastAccessTime();
462  } else {
464  }
465  if (sd != null) {
466  sdOutputFormat = sd.getOutputFormat();
467  sdCompressed = sd.isCompressed();
468  sdNumBuckets = sd.getNumBuckets();
469  sdSerdeInfo = sd.getSerdeInfo();
470  sdBucketCols = ImmutableList.copyOf(sd.getBucketCols());
471  sdSortCols = ImmutableList.copyOf(sd.getSortCols());
472  sdParameters = ImmutableMap.copyOf(sd.getParameters());
473  } else {
474  sdOutputFormat = "";
475  sdCompressed = false;
476  sdNumBuckets = 0;
477  sdSerdeInfo = null;
478  sdBucketCols = ImmutableList.of();
479  sdSortCols = ImmutableList.of();
480  sdParameters = ImmutableMap.of();
481  }
482  }
483  }
484 
486 
491  public org.apache.hadoop.hive.metastore.api.Partition toHmsPartition() {
492  if (cachedMsPartitionDescriptor_ == null) return null;
493  Preconditions.checkNotNull(table_.getFieldSchemas());
494  org.apache.hadoop.hive.metastore.api.StorageDescriptor storageDescriptor =
495  new org.apache.hadoop.hive.metastore.api.StorageDescriptor(
496  table_.getNonPartitionFieldSchemas(), location_,
497  fileFormatDescriptor_.getFileFormat().toJavaClassName(),
505  org.apache.hadoop.hive.metastore.api.Partition partition =
506  new org.apache.hadoop.hive.metastore.api.Partition(
507  getPartitionValuesAsStrings(true), getTable().getDb().getName(),
510  getParameters());
511  return partition;
512  }
513 
514  private HdfsPartition(HdfsTable table,
515  org.apache.hadoop.hive.metastore.api.Partition msPartition,
516  List<LiteralExpr> partitionKeyValues,
517  HdfsStorageDescriptor fileFormatDescriptor,
518  List<HdfsPartition.FileDescriptor> fileDescriptors, long id,
519  String location, TAccessLevel accessLevel) {
520  table_ = table;
521  if (msPartition == null) {
523  } else {
525  }
526  location_ = location;
527  partitionKeyValues_ = ImmutableList.copyOf(partitionKeyValues);
528  fileDescriptors_ = ImmutableList.copyOf(fileDescriptors);
529  fileFormatDescriptor_ = fileFormatDescriptor;
530  id_ = id;
531  accessLevel_ = accessLevel;
532  if (msPartition != null && msPartition.getParameters() != null) {
533  isMarkedCached_ = HdfsCachingUtil.getCacheDirectiveId(
534  msPartition.getParameters()) != null;
535  hmsParameters_ = msPartition.getParameters();
536  } else {
537  hmsParameters_ = Maps.newHashMap();
538  }
539 
540  // TODO: instead of raising an exception, we should consider marking this partition
541  // invalid and moving on, so that table loading won't fail and user can query other
542  // partitions.
543  for (FileDescriptor fileDescriptor: fileDescriptors_) {
544  StringBuilder errorMsg = new StringBuilder();
545  if (!getInputFormatDescriptor().getFileFormat().isFileCompressionTypeSupported(
546  fileDescriptor.getFileName(), errorMsg)) {
547  throw new RuntimeException(errorMsg.toString());
548  }
549  }
550  }
551 
552  public HdfsPartition(HdfsTable table,
553  org.apache.hadoop.hive.metastore.api.Partition msPartition,
554  List<LiteralExpr> partitionKeyValues,
555  HdfsStorageDescriptor fileFormatDescriptor,
556  List<HdfsPartition.FileDescriptor> fileDescriptors, TAccessLevel accessLevel) {
557  this(table, msPartition, partitionKeyValues, fileFormatDescriptor, fileDescriptors,
558  partitionIdCounter_.getAndIncrement(), msPartition != null ?
559  msPartition.getSd().getLocation() : table.getLocation(), accessLevel);
560  }
561 
563  HdfsTable table, HdfsStorageDescriptor storageDescriptor) {
564  List<LiteralExpr> emptyExprList = Lists.newArrayList();
565  List<FileDescriptor> emptyFileDescriptorList = Lists.newArrayList();
566  return new HdfsPartition(table, null, emptyExprList,
567  storageDescriptor, emptyFileDescriptorList,
568  ImpalaInternalServiceConstants.DEFAULT_PARTITION_ID, null,
569  TAccessLevel.READ_WRITE);
570  }
571 
575  public long getSize() {
576  long result = 0;
577  for (HdfsPartition.FileDescriptor fileDescriptor: fileDescriptors_) {
578  result += fileDescriptor.getFileLength();
579  }
580  return result;
581  }
582 
583  @Override
584  public String toString() {
585  return Objects.toStringHelper(this)
586  .add("fileDescriptors", fileDescriptors_)
587  .toString();
588  }
589 
590  public static HdfsPartition fromThrift(HdfsTable table,
591  long id, THdfsPartition thriftPartition) {
592  HdfsStorageDescriptor storageDesc = new HdfsStorageDescriptor(table.getName(),
593  HdfsFileFormat.fromThrift(thriftPartition.getFileFormat()),
594  thriftPartition.lineDelim,
595  thriftPartition.fieldDelim,
596  thriftPartition.collectionDelim,
597  thriftPartition.mapKeyDelim,
598  thriftPartition.escapeChar,
599  (byte) '"', // TODO: We should probably add quoteChar to THdfsPartition.
600  thriftPartition.blockSize);
601 
602  List<LiteralExpr> literalExpr = Lists.newArrayList();
603  if (id != ImpalaInternalServiceConstants.DEFAULT_PARTITION_ID) {
604  List<Column> clusterCols = Lists.newArrayList();
605  for (int i = 0; i < table.getNumClusteringCols(); ++i) {
606  clusterCols.add(table.getColumns().get(i));
607  }
608 
609  List<TExprNode> exprNodes = Lists.newArrayList();
610  for (TExpr expr: thriftPartition.getPartitionKeyExprs()) {
611  for (TExprNode node: expr.getNodes()) {
612  exprNodes.add(node);
613  }
614  }
615  Preconditions.checkState(clusterCols.size() == exprNodes.size(),
616  String.format("Number of partition columns (%d) does not match number " +
617  "of partition key expressions (%d)",
618  clusterCols.size(), exprNodes.size()));
619 
620  for (int i = 0; i < exprNodes.size(); ++i) {
621  literalExpr.add(LiteralExpr.fromThrift(
622  exprNodes.get(i), clusterCols.get(i).getType()));
623  }
624  }
625 
626  List<HdfsPartition.FileDescriptor> fileDescriptors = Lists.newArrayList();
627  if (thriftPartition.isSetFile_desc()) {
628  for (THdfsFileDesc desc: thriftPartition.getFile_desc()) {
629  fileDescriptors.add(HdfsPartition.FileDescriptor.fromThrift(desc));
630  }
631  }
632 
633  TAccessLevel accessLevel = thriftPartition.isSetAccess_level() ?
634  thriftPartition.getAccess_level() : TAccessLevel.READ_WRITE;
635  HdfsPartition partition = new HdfsPartition(table, null, literalExpr, storageDesc,
636  fileDescriptors, id, thriftPartition.getLocation(), accessLevel);
637  if (thriftPartition.isSetStats()) {
638  partition.setNumRows(thriftPartition.getStats().getNum_rows());
639  }
640  if (thriftPartition.isSetIs_marked_cached()) {
641  partition.isMarkedCached_ = thriftPartition.isIs_marked_cached();
642  }
643 
644  if (thriftPartition.isSetHms_parameters()) {
645  partition.hmsParameters_ = thriftPartition.getHms_parameters();
646  } else {
647  partition.hmsParameters_ = Maps.newHashMap();
648  }
649 
650  return partition;
651  }
652 
658  public void checkWellFormed() throws CatalogException {
659  try {
660  // Validate all the partition key/values to ensure you can convert them toThrift()
661  Expr.treesToThrift(getPartitionValues());
662  } catch (Exception e) {
663  throw new CatalogException("Partition (" + getPartitionName() +
664  ") has invalid partition column values: ", e);
665  }
666  }
667 
668  public THdfsPartition toThrift(boolean includeFileDesc) {
669  List<TExpr> thriftExprs = Expr.treesToThrift(getPartitionValues());
670 
671  THdfsPartition thriftHdfsPart = new THdfsPartition(
673  fileFormatDescriptor_.getFieldDelim(),
675  fileFormatDescriptor_.getMapKeyDelim(),
677  fileFormatDescriptor_.getFileFormat().toThrift(), thriftExprs,
678  fileFormatDescriptor_.getBlockSize());
679  thriftHdfsPart.setLocation(location_);
680  thriftHdfsPart.setStats(new TTableStats(numRows_));
681  thriftHdfsPart.setAccess_level(accessLevel_);
682  thriftHdfsPart.setIs_marked_cached(isMarkedCached_);
683  thriftHdfsPart.setId(getId());
684  thriftHdfsPart.setHms_parameters(hmsParameters_);
685  if (includeFileDesc) {
686  // Add block location information
687  for (FileDescriptor fd: fileDescriptors_) {
688  thriftHdfsPart.addToFile_desc(fd.toThrift());
689  }
690  }
691 
692  return thriftHdfsPart;
693  }
694 
698  @Override
699  public int compareTo(HdfsPartition o) {
701  }
702 
703  @VisibleForTesting
704  public static int comparePartitionKeyValues(List<LiteralExpr> lhs,
705  List<LiteralExpr> rhs) {
706  int sizeDiff = lhs.size() - rhs.size();
707  if (sizeDiff != 0) return sizeDiff;
708  for(int i = 0; i < lhs.size(); ++i) {
709  int cmp = lhs.get(i).compareTo(rhs.get(i));
710  if (cmp != 0) return cmp;
711  }
712  return 0;
713  }
714 }
final List< LiteralExpr > partitionKeyValues_
List< HdfsPartition.FileDescriptor > getFileDescriptors()
FileDescriptor(String fileName, long fileLength, long modificationTime)
List< String > getPartitionValuesAsStrings(boolean mapNullsToHiveKey)
static void setDiskIds(int[] diskIds, THdfsFileBlock fileBlock)
final HdfsStorageDescriptor fileFormatDescriptor_
org.apache.hadoop.hive.metastore.api.SerDeInfo getSerdeInfo()
HdfsPartition(HdfsTable table, org.apache.hadoop.hive.metastore.api.Partition msPartition, List< LiteralExpr > partitionKeyValues, HdfsStorageDescriptor fileFormatDescriptor, List< HdfsPartition.FileDescriptor > fileDescriptors, TAccessLevel accessLevel)
final List< org.apache.hadoop.hive.metastore.api.Order > sdSortCols
org.apache.hadoop.hive.metastore.api.Partition toHmsPartition()
final org.apache.hadoop.hive.metastore.api.SerDeInfo sdSerdeInfo
static FileBlock fromThrift(THdfsFileBlock thriftFileBlock)
static TNetworkAddress parseLocation(String location)
CachedHmsPartitionDescriptor(org.apache.hadoop.hive.metastore.api.Partition msPartition)
HdfsPartition(HdfsTable table, org.apache.hadoop.hive.metastore.api.Partition msPartition, List< LiteralExpr > partitionKeyValues, HdfsStorageDescriptor fileFormatDescriptor, List< HdfsPartition.FileDescriptor > fileDescriptors, long id, String location, TAccessLevel accessLevel)
THdfsPartition toThrift(boolean includeFileDesc)
HdfsStorageDescriptor getInputFormatDescriptor()
static int comparePartitionKeyValues(List< LiteralExpr > lhs, List< LiteralExpr > rhs)
static HdfsPartition defaultPartition(HdfsTable table, HdfsStorageDescriptor storageDescriptor)
uint8_t offset[7 *64-sizeof(uint64_t)]
FileBlock(long offset, long blockLength, List< BlockReplica > replicaHostIdxs)
void setFileFormat(HdfsFileFormat fileFormat)
final CachedHmsPartitionDescriptor cachedMsPartitionDescriptor_
final List< FileDescriptor > fileDescriptors_
static HdfsPartition fromThrift(HdfsTable table, long id, THdfsPartition thriftPartition)
static FileDescriptor fromThrift(THdfsFileDesc desc)