Impala
Impalaistheopensource,nativeanalyticdatabaseforApacheHadoop.
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros
AnalyzeExprsTest.java
Go to the documentation of this file.
1 // Copyright (c) 2012 Cloudera, Inc. All rights reserved.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 package com.cloudera.impala.analysis;
16 
17 import static org.junit.Assert.assertEquals;
18 import static org.junit.Assert.assertNotNull;
19 import static org.junit.Assert.assertTrue;
20 
21 import java.math.BigInteger;
22 import java.util.ArrayList;
23 import java.util.List;
24 
25 import junit.framework.Assert;
26 
27 import org.junit.Test;
28 
39 import com.cloudera.impala.thrift.TExpr;
40 import com.cloudera.impala.thrift.TQueryOptions;
41 import com.google.common.base.Joiner;
42 import com.google.common.base.Preconditions;
43 import com.google.common.base.Predicates;
44 import com.google.common.collect.Lists;
45 
46 public class AnalyzeExprsTest extends AnalyzerTest {
47 
48  @Test
50  testNumericLiteral(Byte.toString(Byte.MIN_VALUE), Type.TINYINT);
51  testNumericLiteral(Byte.toString(Byte.MAX_VALUE), Type.TINYINT);
52  testNumericLiteral("- " + Byte.toString(Byte.MIN_VALUE), Type.SMALLINT);
53  testNumericLiteral("- " + Byte.toString(Byte.MAX_VALUE), Type.TINYINT);
54 
55  testNumericLiteral(Short.toString(Short.MIN_VALUE), Type.SMALLINT);
56  testNumericLiteral(Short.toString(Short.MAX_VALUE), Type.SMALLINT);
57  testNumericLiteral("- " + Short.toString(Short.MIN_VALUE), Type.INT);
58  testNumericLiteral("- " + Short.toString(Short.MAX_VALUE), Type.SMALLINT);
59 
60  testNumericLiteral(Integer.toString(Integer.MIN_VALUE), Type.INT);
61  testNumericLiteral(Integer.toString(Integer.MAX_VALUE), Type.INT);
62  testNumericLiteral("- " + Integer.toString(Integer.MIN_VALUE), Type.BIGINT);
63  testNumericLiteral("- " + Integer.toString(Integer.MAX_VALUE), Type.INT);
64 
65  testNumericLiteral(Long.toString(Long.MIN_VALUE), Type.BIGINT);
66  testNumericLiteral(Long.toString(Long.MAX_VALUE), Type.BIGINT);
67  testNumericLiteral(Long.toString(Long.MIN_VALUE), Type.BIGINT);
68  testNumericLiteral("- " + Long.toString(Long.MAX_VALUE), Type.BIGINT);
69 
70  // Result type is a decimal because Long can't hold the value.
71  testNumericLiteral(Long.toString(Long.MIN_VALUE) + "1",
72  ScalarType.createDecimalType(20, 0));
73  testNumericLiteral(Long.toString(Long.MIN_VALUE) + "1",
74  ScalarType.createDecimalType(20, 0));
75  // Test min int64-1.
76  BigInteger minMinusOne = BigInteger.valueOf(Long.MIN_VALUE);
77  minMinusOne = minMinusOne.subtract(BigInteger.ONE);
78  testNumericLiteral(minMinusOne.toString(), ScalarType.createDecimalType(19, 0));
79  // Test max int64+1.
80  BigInteger maxPlusOne = BigInteger.valueOf(Long.MAX_VALUE);
81  maxPlusOne = maxPlusOne.add(BigInteger.ONE);
82  testNumericLiteral(maxPlusOne.toString(), ScalarType.createDecimalType(19, 0));
83 
84  // Test floating-point types.
85  testNumericLiteral(Float.toString(Float.MIN_VALUE), Type.DOUBLE);
86  testNumericLiteral(Float.toString(Float.MAX_VALUE), Type.DOUBLE);
87  testNumericLiteral("-" + Float.toString(Float.MIN_VALUE), Type.DOUBLE);
88  testNumericLiteral("-" + Float.toString(Float.MAX_VALUE), Type.DOUBLE);
89  testNumericLiteral(Double.toString(Double.MIN_VALUE), Type.DOUBLE);
90  testNumericLiteral(Double.toString(Double.MAX_VALUE), Type.DOUBLE);
91  testNumericLiteral("-" + Double.toString(Double.MIN_VALUE), Type.DOUBLE);
92  testNumericLiteral("-" + Double.toString(Double.MAX_VALUE), Type.DOUBLE);
93 
94  AnalysisError(String.format("select %s1", Double.toString(Double.MAX_VALUE)),
95  "Numeric literal '1.7976931348623157E+3081' exceeds maximum range of doubles.");
96  AnalysisError(String.format("select %s1", Double.toString(Double.MIN_VALUE)),
97  "Numeric literal '4.9E-3241' underflows minimum resolution of doubles.");
98 
99  testNumericLiteral("0.99999999999999999999999999999999999999",
101  testNumericLiteral("99999999999999999999999999999999999999.",
103  testNumericLiteral("-0.99999999999999999999999999999999999999",
105  testNumericLiteral("-99999999999999999999999999999999999999.",
107  testNumericLiteral("999999999999999999999.99999999999999999",
109  testNumericLiteral("-999999999999999999.99999999999999999999",
111  }
112 
117  private void testNumericLiteral(String literal, Type expectedType) {
118  SelectStmt selectStmt = (SelectStmt) AnalyzesOk("select " + literal);
119  Type actualType = selectStmt.resultExprs_.get(0).getType();
120  Assert.assertTrue("Expected Type: " + expectedType + " Actual type: " + actualType,
121  expectedType.equals(actualType));
122  }
123 
124  @Test
126  AnalyzesOk("select cast (0 as timestamp)");
127  AnalyzesOk("select cast (0.1 as timestamp)");
128  AnalyzesOk("select cast ('1970-10-10 10:00:00.123' as timestamp)");
129  }
130 
131  @Test
133  // Test predicates in where clause.
134  AnalyzesOk("select * from functional.AllTypes where true");
135  AnalyzesOk("select * from functional.AllTypes where false");
136  AnalyzesOk("select * from functional.AllTypes where NULL");
137  AnalyzesOk("select * from functional.AllTypes where bool_col = true");
138  AnalyzesOk("select * from functional.AllTypes where bool_col = false");
139  AnalyzesOk("select * from functional.AllTypes where bool_col = NULL");
140  AnalyzesOk("select * from functional.AllTypes where NULL = NULL");
141  AnalyzesOk("select * from functional.AllTypes where NULL and NULL or NULL");
142  AnalyzesOk("select * from functional.AllTypes where true or false");
143  AnalyzesOk("select * from functional.AllTypes where true and false");
144  AnalyzesOk("select * from functional.AllTypes " +
145  "where true or false and bool_col = false");
146  AnalyzesOk("select * from functional.AllTypes " +
147  "where true and false or bool_col = false");
148  // In select list.
149  AnalyzesOk("select bool_col = true from functional.AllTypes");
150  AnalyzesOk("select bool_col = false from functional.AllTypes");
151  AnalyzesOk("select bool_col = NULL from functional.AllTypes");
152  AnalyzesOk("select true or false and bool_col = false from functional.AllTypes");
153  AnalyzesOk("select true and false or bool_col = false from functional.AllTypes");
154  AnalyzesOk("select NULL or NULL and NULL from functional.AllTypes");
155  }
156 
157  @Test
159  AnalyzesOk("select * from functional.alltypes where bool_col != true");
160  AnalyzesOk("select * from functional.alltypes where tinyint_col <> 1");
161  AnalyzesOk("select * from functional.alltypes where smallint_col <= 23");
162  AnalyzesOk("select * from functional.alltypes where int_col > 15");
163  AnalyzesOk("select * from functional.alltypes where bigint_col >= 17");
164  AnalyzesOk("select * from functional.alltypes where float_col < 15.0");
165  AnalyzesOk("select * from functional.alltypes where double_col > 7.7");
166  // automatic type cast if compatible
167  AnalyzesOk("select * from functional.alltypes where 1 = 0");
168  AnalyzesOk("select * from functional.alltypes where int_col = smallint_col");
169  AnalyzesOk("select * from functional.alltypes where bigint_col = float_col");
170  AnalyzesOk("select * from functional.alltypes where bool_col = 0");
171  AnalyzesOk("select * from functional.alltypes where int_col = cast('0' as int)");
172  AnalyzesOk("select * from functional.alltypes where cast(string_col as int) = 15");
173  // tests with NULL
174  AnalyzesOk("select * from functional.alltypes where bool_col != NULL");
175  AnalyzesOk("select * from functional.alltypes where tinyint_col <> NULL");
176  AnalyzesOk("select * from functional.alltypes where smallint_col <= NULL");
177  AnalyzesOk("select * from functional.alltypes where int_col > NULL");
178  AnalyzesOk("select * from functional.alltypes where bigint_col >= NULL");
179  AnalyzesOk("select * from functional.alltypes where float_col < NULL");
180  AnalyzesOk("select * from functional.alltypes where double_col > NULL");
181  AnalyzesOk("select * from functional.alltypes where string_col = NULL");
182  AnalyzesOk("select * from functional.alltypes where timestamp_col = NULL");
183 
184  AnalyzesOk("select cast('hi' as CHAR(2)) = cast('hi' as CHAR(3))");
185  AnalyzesOk("select cast('hi' as CHAR(2)) = 'hi'");
186 
187  // invalid casts
188  AnalysisError("select * from functional.alltypes where bool_col = '15'",
189  "operands of type BOOLEAN and STRING are not comparable: bool_col = '15'");
190  // Complex types are not comparable.
191  AnalysisError("select 1 from functional.allcomplextypes where int_array_col = 1",
192  "operands of type ARRAY<INT> and TINYINT are not comparable: int_array_col = 1");
193  AnalysisError("select 1 from functional.allcomplextypes where int_map_col = 1",
194  "operands of type MAP<STRING,INT> and TINYINT are not comparable: " +
195  "int_map_col = 1");
196  AnalysisError("select 1 from functional.allcomplextypes where int_struct_col = 1",
197  "operands of type STRUCT<f1:INT,f2:INT> and TINYINT are not comparable: " +
198  "int_struct_col = 1");
199  // Complex types are not comparable even if identical.
200  // TODO: Reconsider this behavior. Such a comparison should ideally work,
201  // but may require complex-typed SlotRefs and BE functions able to accept
202  // complex-typed parameters.
203  AnalysisError("select 1 from functional.allcomplextypes " +
204  "where int_map_col = int_map_col",
205  "operands of type MAP<STRING,INT> and MAP<STRING,INT> are not comparable: " +
206  "int_map_col = int_map_col");
207  // TODO: Enable once date and datetime are implemented.
208  // AnalysisError("select * from functional.alltypes where date_col = 15",
209  // "operands are not comparable: date_col = 15");
210  // AnalysisError("select * from functional.alltypes where datetime_col = 1.0",
211  // "operands are not comparable: datetime_col = 1.0");
212  }
213 
214 
215  @Test
216  public void TestDecimalCasts() throws AnalysisException {
217  AnalyzesOk("select cast(1.1 as boolean)");
218  AnalyzesOk("select cast(1.1 as timestamp)");
219 
220  AnalysisError("select cast(true as decimal)",
221  "Invalid type cast of TRUE from BOOLEAN to DECIMAL(9,0)");
222  AnalysisError("select cast(cast(1 as timestamp) as decimal)",
223  "Invalid type cast of CAST(1 AS TIMESTAMP) from TIMESTAMP to DECIMAL(9,0)");
224 
225  for (Type type: Type.getSupportedTypes()) {
226  if (type.isNull() || type.isDecimal() || type.isBoolean() || type.isDateType()
227  || type.getPrimitiveType() == PrimitiveType.VARCHAR
228  || type.getPrimitiveType() == PrimitiveType.CHAR) {
229  continue;
230  }
231  AnalyzesOk("select cast(1.1 as " + type + ")");
232  AnalyzesOk("select cast(cast(1 as " + type + ") as decimal)");
233  }
234 
235  // Casts to all other decimals are supported.
236  for (int precision = 1; precision <= ScalarType.MAX_PRECISION; ++precision) {
237  for (int scale = 0; scale < precision; ++scale) {
238  Type t = ScalarType.createDecimalType(precision, scale);
239  AnalyzesOk("select cast(1.1 as " + t.toSql() + ")");
240  AnalyzesOk("select cast(cast(1 as " + t.toSql() + ") as decimal)");
241  }
242  }
243 
244  AnalysisError("select cast(1 as decimal(0, 1))",
245  "Decimal precision must be > 0: 0");
246  }
247 
251  private void testExprCast(String literal, Type expectedType) {
252  SelectStmt selectStmt = (SelectStmt) AnalyzesOk("select cast(" + literal +
253  " as " + expectedType.toSql() + ")");
254  Type actualType = selectStmt.resultExprs_.get(0).getType();
255  Assert.assertTrue("Expected Type: " + expectedType + " Actual type: " + actualType,
256  expectedType.equals(actualType));
257  }
258 
259  @Test
260  public void TestStringCasts() throws AnalysisException {
261  // No implicit cast from STRING to numeric and boolean
262  AnalysisError("select * from functional.alltypes where tinyint_col = '1'",
263  "operands of type TINYINT and STRING are not comparable: tinyint_col = '1'");
264  AnalysisError("select * from functional.alltypes where bool_col = '0'",
265  "operands of type BOOLEAN and STRING are not comparable: bool_col = '0'");
266  // No explicit cast from STRING to boolean.
267  AnalysisError("select cast('false' as boolean) from functional.alltypes",
268  "Invalid type cast of 'false' from STRING to BOOLEAN");
269 
270  AnalyzesOk("select * from functional.alltypes where " +
271  "tinyint_col = cast('0.5' as float)");
272  AnalyzesOk("select * from functional.alltypes where " +
273  "smallint_col = cast('0.5' as float)");
274  AnalyzesOk("select * from functional.alltypes where int_col = cast('0.5' as float)");
275  AnalyzesOk("select * from functional.alltypes where " +
276  "bigint_col = cast('0.5' as float)");
277  AnalyzesOk("select 1.0 = cast('" + Double.toString(Double.MIN_VALUE) +
278  "' as double)");
279  AnalyzesOk("select 1.0 = cast('-" + Double.toString(Double.MIN_VALUE) +
280  "' as double)");
281  AnalyzesOk("select 1.0 = cast('" + Double.toString(Double.MAX_VALUE) +
282  "' as double)");
283  AnalyzesOk("select 1.0 = cast('-" + Double.toString(Double.MAX_VALUE) +
284  "' as double)");
285  // Test chains of minus. Note that "--" is the a comment symbol.
286  AnalyzesOk("select * from functional.alltypes where " +
287  "tinyint_col = cast('-1' as tinyint)");
288  AnalyzesOk("select * from functional.alltypes where " +
289  "tinyint_col = cast('- -1' as tinyint)");
290  AnalyzesOk("select * from functional.alltypes where " +
291  "tinyint_col = cast('- - -1' as tinyint)");
292  AnalyzesOk("select * from functional.alltypes where " +
293  "tinyint_col = cast('- - - -1' as tinyint)");
294  // Test correct casting to compatible type on bitwise ops.
295  AnalyzesOk("select 1 | cast('" + Byte.toString(Byte.MIN_VALUE) + "' as int)");
296  AnalyzesOk("select 1 | cast('" + Byte.toString(Byte.MAX_VALUE) + "' as int)");
297  AnalyzesOk("select 1 | cast('" + Short.toString(Short.MIN_VALUE) + "' as int)");
298  AnalyzesOk("select 1 | cast('" + Short.toString(Short.MAX_VALUE) + "' as int)");
299  AnalyzesOk("select 1 | cast('" + Integer.toString(Integer.MIN_VALUE) + "' as int)");
300  AnalyzesOk("select 1 | cast('" + Integer.toString(Integer.MAX_VALUE) + "' as int)");
301  // We need to add 1 to MIN_VALUE because there are no negative integer literals.
302  // The reason is that whether a minus belongs to an
303  // arithmetic expr or a literal must be decided by the parser, not the lexer.
304  AnalyzesOk("select 1 | cast('" + Long.toString(Long.MIN_VALUE + 1) + "' as bigint)");
305  AnalyzesOk("select 1 | cast('" + Long.toString(Long.MAX_VALUE) + "' as bigint)");
306  // Cast to numeric never overflow
307  AnalyzesOk("select * from functional.alltypes where tinyint_col = " +
308  "cast('" + Long.toString(Long.MIN_VALUE) + "1' as tinyint)");
309  AnalyzesOk("select * from functional.alltypes where tinyint_col = " +
310  "cast('" + Long.toString(Long.MAX_VALUE) + "1' as tinyint)");
311  AnalyzesOk("select * from functional.alltypes where tinyint_col = " +
312  "cast('" + Double.toString(Double.MAX_VALUE) + "1' as tinyint)");
313  // Java converts a float underflow to 0.0.
314  // Since there is no easy, reliable way to detect underflow,
315  // we don't consider it an error.
316  AnalyzesOk("select * from functional.alltypes where tinyint_col = " +
317  "cast('" + Double.toString(Double.MIN_VALUE) + "1' as tinyint)");
318  // Cast never raise analysis exception
319  AnalyzesOk("select * from functional.alltypes where " +
320  "tinyint_col = cast('--1' as tinyint)");
321 
322  // Cast string literal to string
323  AnalyzesOk("select cast('abc' as string)");
324 
325  // Cast decimal to string
326  AnalyzesOk("select cast(cast('1.234' as decimal) as string)");
327 
328  // Cast to / from VARCHAR
329  AnalyzesOk("select cast('helloworld' as VARCHAR(3))");
330  AnalyzesOk("select cast(cast('helloworld' as VARCHAR(3)) as string)");
331  AnalyzesOk("select cast(cast('3.0' as VARCHAR(5)) as float)");
332  AnalyzesOk("select NULL = cast('123' as CHAR(3))");
333  AnalysisError("select now() = cast('hi' as CHAR(3))",
334  "operands of type TIMESTAMP and CHAR(3) are not comparable: " +
335  "now() = CAST('hi' AS CHAR(3))");
336  testExprCast("cast('Hello' as VARCHAR(5))", ScalarType.createVarcharType(7));
337  testExprCast("cast('Hello' as VARCHAR(5))", ScalarType.createVarcharType(3));
338 
339  AnalysisError("select cast('foo' as varchar(0))",
340  "Varchar size must be > 0: 0");
341  AnalysisError("select cast('foo' as varchar(65356))",
342  "Varchar size must be <= 65355: 65356");
343  AnalysisError("select cast('foo' as char(0))",
344  "Char size must be > 0: 0");
345  AnalysisError("select cast('foo' as char(256))",
346  "Char size must be <= 255: 256");
347 
348  testExprCast("'Hello'", ScalarType.createCharType(5));
349  testExprCast("cast('Hello' as CHAR(5))", ScalarType.STRING);
350  testExprCast("cast('Hello' as CHAR(5))", ScalarType.createVarcharType(7));
351  testExprCast("cast('Hello' as VARCHAR(5))", ScalarType.createCharType(7));
352  testExprCast("cast('Hello' as CHAR(7))", ScalarType.createVarcharType(5));
353  testExprCast("cast('Hello' as VARCHAR(7))", ScalarType.createCharType(5));
354  testExprCast("cast('Hello' as CHAR(5))", ScalarType.createVarcharType(5));
355  testExprCast("cast('Hello' as VARCHAR(5))", ScalarType.createCharType(5));
357 
358  testExprCast("cast('abcde' as char(10)) IN " +
359  "(cast('abcde' as CHAR(20)), cast('abcde' as VARCHAR(10)), 'abcde')",
361  testExprCast("'abcde' IN " +
362  "(cast('abcde' as CHAR(20)), cast('abcde' as VARCHAR(10)), 'abcde')",
364  testExprCast("cast('abcde' as varchar(10)) IN " +
365  "(cast('abcde' as CHAR(20)), cast('abcde' as VARCHAR(10)), 'abcde')",
367 
368  }
369 
373  @Test
374  public void TestNullCasts() throws AnalysisException {
375  for (Type type: Type.getSupportedTypes()) {
376  // Cannot cast to NULL_TYPE
377  if (type.isNull()) continue;
378  if (type.isDecimal()) type = Type.DEFAULT_DECIMAL;
379  if (type.getPrimitiveType() == PrimitiveType.VARCHAR) {
380  type = ScalarType.createVarcharType(1);
381  }
382  if (type.getPrimitiveType() == PrimitiveType.CHAR) {
383  type = ScalarType.createCharType(1);
384  }
385  checkExprType("select cast(null as " + type + ")", type);
386  }
387  }
388 
392  @Test
394  AnalysisError("select cast(1 as array<int>)",
395  "Unsupported cast to complex type: ARRAY<INT>");
396  AnalysisError("select cast(1 as map<int, int>)",
397  "Unsupported cast to complex type: MAP<INT,INT>");
398  AnalysisError("select cast(1 as struct<a:int,b:char(20)>)",
399  "Unsupported cast to complex type: STRUCT<a:INT,b:CHAR(20)>");
400  }
401 
402  @Test
403  public void TestLikePredicates() throws AnalysisException {
404  AnalyzesOk("select * from functional.alltypes where string_col like 'test%'");
405  AnalyzesOk("select * from functional.alltypes where string_col like string_col");
406  AnalyzesOk("select * from functional.alltypes where 'test' like string_col");
407  AnalyzesOk("select * from functional.alltypes where string_col rlike 'test%'");
408  AnalyzesOk("select * from functional.alltypes where string_col regexp 'test.*'");
409  AnalysisError("select * from functional.alltypes where string_col like 5",
410  "right operand of LIKE must be of type STRING");
411  AnalysisError("select * from functional.alltypes where 'test' like 5",
412  "right operand of LIKE must be of type STRING");
413  AnalysisError("select * from functional.alltypes where int_col like 'test%'",
414  "left operand of LIKE must be of type STRING");
415  AnalysisError("select * from functional.alltypes where string_col regexp 'test]['",
416  "invalid regular expression in 'string_col REGEXP 'test][''");
417  // Test NULLs.
418  String[] likePreds = new String[] {"LIKE", "RLIKE", "REGEXP"};
419  for (String likePred: likePreds) {
420  AnalyzesOk(String.format("select * from functional.alltypes " +
421  "where string_col %s NULL", likePred));
422  AnalyzesOk(String.format("select * from functional.alltypes " +
423  "where NULL %s string_col", likePred));
424  AnalyzesOk(String.format("select * from functional.alltypes " +
425  "where NULL %s NULL", likePred));
426  }
427  }
428 
429  @Test
431  AnalyzesOk("select * from functional.alltypes where " +
432  "string_col = '5' and int_col = 5");
433  AnalyzesOk("select * from functional.alltypes where " +
434  "string_col = '5' or int_col = 5");
435  AnalyzesOk("select * from functional.alltypes where (string_col = '5' " +
436  "or int_col = 5) and string_col > '1'");
437  AnalyzesOk("select * from functional.alltypes where not string_col = '5'");
438  AnalyzesOk("select * from functional.alltypes where int_col = cast('5' as int)");
439 
440  // Test all combinations of truth values and bool_col with all boolean operators.
441  String[] operands = new String[]{ "true", "false", "NULL", "bool_col" };
442  for (String lop: operands) {
443  for (String rop: operands) {
444  for (CompoundPredicate.Operator op: CompoundPredicate.Operator.values()) {
445  // Unary operator tested elsewhere (below).
446  if (op == CompoundPredicate.Operator.NOT) continue;
447  String expr = String.format("%s %s %s", lop, op, rop);
448  AnalyzesOk(String.format("select %s from functional.alltypes where %s",
449  expr, expr));
450  }
451  }
452  String notExpr = String.format("%s %s", CompoundPredicate.Operator.NOT, lop);
453  AnalyzesOk(String.format("select %s from functional.alltypes where %s",
454  notExpr, notExpr));
455  }
456 
457  // arbitrary exprs as operands should fail to analyze
458  AnalysisError("select * from functional.alltypes where 1 + 2 and false",
459  "Operand '1 + 2' part of predicate '1 + 2 AND FALSE' should return " +
460  "type 'BOOLEAN' but returns type 'SMALLINT'.");
461  AnalysisError("select * from functional.alltypes where 1 + 2 or true",
462  "Operand '1 + 2' part of predicate '1 + 2 OR TRUE' should return " +
463  "type 'BOOLEAN' but returns type 'SMALLINT'.");
464  AnalysisError("select * from functional.alltypes where not 1 + 2",
465  "Operand '1 + 2' part of predicate 'NOT 1 + 2' should return " +
466  "type 'BOOLEAN' but returns type 'SMALLINT'.");
467  AnalysisError("select * from functional.alltypes where 1 + 2 and true",
468  "Operand '1 + 2' part of predicate '1 + 2 AND TRUE' should return " +
469  "type 'BOOLEAN' but returns type 'SMALLINT'.");
470  AnalysisError("select * from functional.alltypes where false and trim('abc')",
471  "Operand 'trim('abc')' part of predicate 'FALSE AND trim('abc')' should " +
472  "return type 'BOOLEAN' but returns type 'STRING'.");
473  AnalysisError("select * from functional.alltypes where bool_col or double_col",
474  "Operand 'double_col' part of predicate 'bool_col OR double_col' should " +
475  "return type 'BOOLEAN' but returns type 'DOUBLE'.");
476  AnalysisError("select int_array_col or true from functional.allcomplextypes",
477  "Operand 'int_array_col' part of predicate 'int_array_col OR TRUE' should " +
478  "return type 'BOOLEAN' but returns type 'ARRAY<INT>'");
479  AnalysisError("select false and int_struct_col from functional.allcomplextypes",
480  "Operand 'int_struct_col' part of predicate 'FALSE AND int_struct_col' should " +
481  "return type 'BOOLEAN' but returns type 'STRUCT<f1:INT,f2:INT>'.");
482  AnalysisError("select not int_map_col from functional.allcomplextypes",
483  "Operand 'int_map_col' part of predicate 'NOT int_map_col' should return " +
484  "type 'BOOLEAN' but returns type 'MAP<STRING,INT>'.");
485  }
486 
487  @Test
489  AnalyzesOk("select * from functional.alltypes where int_col is null");
490  AnalyzesOk("select * from functional.alltypes where string_col is not null");
491  AnalyzesOk("select * from functional.alltypes where null is not null");
492  AnalyzesOk("select 1 from functional.allcomplextypes where int_map_col is null");
493  }
494 
495  @Test
497  AnalyzesOk("select * from functional.alltypes " +
498  "where tinyint_col between smallint_col and int_col");
499  AnalyzesOk("select * from functional.alltypes " +
500  "where tinyint_col not between smallint_col and int_col");
501  AnalyzesOk("select * from functional.alltypes " +
502  "where 'abc' between string_col and date_string_col");
503  AnalyzesOk("select * from functional.alltypes " +
504  "where 'abc' not between string_col and date_string_col");
505  // Additional predicates before and/or after between predicate.
506  AnalyzesOk("select * from functional.alltypes " +
507  "where string_col = 'abc' and tinyint_col between 10 and 20");
508  AnalyzesOk("select * from functional.alltypes " +
509  "where tinyint_col between 10 and 20 and string_col = 'abc'");
510  AnalyzesOk("select * from functional.alltypes " +
511  "where bool_col and tinyint_col between 10 and 20 and string_col = 'abc'");
512  // Chaining/nesting of between predicates.
513  AnalyzesOk("select * from functional.alltypes " +
514  "where true between false and true and 'b' between 'a' and 'c'");
515  // true between ('b' between 'a' and 'b') and ('bb' between 'aa' and 'cc)
516  AnalyzesOk("select * from functional.alltypes " +
517  "where true between 'b' between 'a' and 'c' and 'bb' between 'aa' and 'cc'");
518  // Test proper precedence with exprs before between.
519  AnalyzesOk("select 5 + 1 between 4 and 10");
520  AnalyzesOk("select 'abc' like '%a' between true and false");
521  AnalyzesOk("select false between (true and true) and (false and true)");
522  // Lower and upper bounds require implicit casts.
523  AnalyzesOk("select * from functional.alltypes " +
524  "where double_col between smallint_col and int_col");
525  // Comparison expr requires implicit cast.
526  AnalyzesOk("select * from functional.alltypes " +
527  "where smallint_col between float_col and double_col");
528  // Test NULLs.
529  AnalyzesOk("select * from functional.alltypes " +
530  "where NULL between float_col and double_col");
531  AnalyzesOk("select * from functional.alltypes " +
532  "where smallint_col between NULL and double_col");
533  AnalyzesOk("select * from functional.alltypes " +
534  "where smallint_col between float_col and NULL");
535  AnalyzesOk("select * from functional.alltypes " +
536  "where NULL between NULL and NULL");
537  // Incompatible types.
538  AnalysisError("select * from functional.alltypes " +
539  "where string_col between bool_col and double_col",
540  "Incompatible return types 'STRING' and 'BOOLEAN' " +
541  "of exprs 'string_col' and 'bool_col'.");
542  AnalysisError("select * from functional.alltypes " +
543  "where timestamp_col between int_col and double_col",
544  "Incompatible return types 'TIMESTAMP' and 'INT' " +
545  "of exprs 'timestamp_col' and 'int_col'.");
546  AnalysisError("select 1 from functional.allcomplextypes " +
547  "where int_struct_col between 10 and 20",
548  "Incompatible return types 'STRUCT<f1:INT,f2:INT>' and 'TINYINT' " +
549  "of exprs 'int_struct_col' and '10'.");
550  }
551 
552  @Test
553  public void TestInPredicates() throws AnalysisException {
554  AnalyzesOk("select * from functional.alltypes where int_col in (1, 2, 3, 4)");
555  AnalyzesOk("select * from functional.alltypes where int_col not in (1, 2, 3, 4)");
556  AnalyzesOk("select * from functional.alltypes where " +
557  "string_col in ('a', 'b', 'c', 'd')");
558  AnalyzesOk("select * from functional.alltypes where " +
559  "string_col not in ('a', 'b', 'c', 'd')");
560  // Test booleans.
561  AnalyzesOk("select * from functional.alltypes where " +
562  "true in (bool_col, true and false)");
563  AnalyzesOk("select * from functional.alltypes where " +
564  "true not in (bool_col, true and false)");
565  // In list requires implicit casts.
566  AnalyzesOk("select * from functional.alltypes where " +
567  "double_col in (int_col, bigint_col)");
568  // Comparison expr requires implicit cast.
569  AnalyzesOk("select * from functional.alltypes where " +
570  "int_col in (double_col, bigint_col)");
571  // Test predicates.
572  AnalyzesOk("select * from functional.alltypes where " +
573  "!true in (false or true, true and false)");
574  // Test NULLs.
575  AnalyzesOk("select * from functional.alltypes where " +
576  "NULL in (NULL, NULL)");
577  // Test IN in binary predicates
578  AnalyzesOk("select bool_col = (int_col in (1,2)), " +
579  "case when tinyint_col in (10, NULL) then tinyint_col else NULL end " +
580  "from functional.alltypestiny where int_col > (bool_col in (false)) " +
581  "and (int_col in (1,2)) = (select min(bool_col) from functional.alltypes) " +
582  "and (int_col in (3,4)) = (tinyint_col in (4,5))");
583  // Incompatible types.
584  AnalysisError("select * from functional.alltypes where " +
585  "string_col in (bool_col, double_col)",
586  "Incompatible return types 'STRING' and 'BOOLEAN' " +
587  "of exprs 'string_col' and 'bool_col'.");
588  AnalysisError("select * from functional.alltypes where " +
589  "timestamp_col in (int_col, double_col)",
590  "Incompatible return types 'TIMESTAMP' and 'INT' " +
591  "of exprs 'timestamp_col' and 'int_col'.");
592  AnalysisError("select * from functional.alltypes where " +
593  "timestamp_col in (NULL, int_col)",
594  "Incompatible return types 'TIMESTAMP' and 'INT' " +
595  "of exprs 'timestamp_col' and 'int_col'.");
596  AnalysisError("select 1 from functional.allcomplextypes where " +
597  "int_array_col in (id, NULL)",
598  "Incompatible return types 'ARRAY<INT>' and 'INT' " +
599  "of exprs 'int_array_col' and 'id'.");
600  }
601 
602  @Test
603  public void TestAnalyticExprs() throws AnalysisException {
604  AnalyzesOk("select int_col from functional.alltypessmall order by count(*) over () "
605  + "limit 10");
606  AnalyzesOk("select sum(int_col) over () from functional.alltypes");
607  AnalyzesOk("select avg(bigint_col) over (partition by id) from functional.alltypes");
608  AnalyzesOk("select count(smallint_col) over (partition by id order by tinyint_col) "
609  + "from functional.alltypes");
610  AnalyzesOk("select min(int_col) over (partition by id order by tinyint_col "
611  + "rows between unbounded preceding and current row) from functional.alltypes");
612  AnalyzesOk("select sum(int_col) over (partition by id order by tinyint_col "
613  + "rows 2 preceding) from functional.alltypes");
614  AnalyzesOk("select sum(int_col) over (partition by id order by tinyint_col "
615  + "rows between 2 preceding and unbounded following) from functional.alltypes");
616  AnalyzesOk("select min(int_col) over (partition by id order by tinyint_col "
617  + "rows between unbounded preceding and 2 preceding) from functional.alltypes");
618  AnalyzesOk("select sum(int_col) over (partition by id order by tinyint_col "
619  + "rows between 2 following and unbounded following) from functional.alltypes");
620  // TODO: Enable after RANGE windows with offset boundaries are supported
621  //AnalyzesOk("select sum(int_col) over (partition by id order by tinyint_col "
622  // + "range between 2 preceding and unbounded following) from functional.alltypes");
623  //AnalyzesOk("select sum(int_col) over (partition by id order by tinyint_col "
624  // + "range between 2 preceding and unbounded following) from functional.alltypes");
625  AnalyzesOk("select sum(int_col) over (partition by id order by tinyint_col "
626  + "rows between 2 preceding and 6 following) from functional.alltypes");
627  // TODO: substitute constants in-line so that 2*3 becomes implicitly castable
628  // to tinyint
629  //+ "range between 2 preceding and 2 * 3 following) from functional.alltypes");
630  AnalyzesOk("select sum(int_col) over (partition by id order by tinyint_col "
631  + "rows between 2 preceding and unbounded following) from functional.alltypes");
632  AnalyzesOk("select lead(int_col, 1, null) over "
633  + "(partition by id order by tinyint_col) from functional.alltypes");
634  AnalyzesOk("select rank() over "
635  + "(partition by id order by tinyint_col) from functional.alltypes");
636  AnalyzesOk(
637  "select id from functional.alltypes order by rank() over (order by tinyint_col)");
638 
639  // legal combinations of analytic and agg functions
640  AnalyzesOk("select sum(count(id)) over (partition by min(int_col) "
641  + "order by max(bigint_col)) from functional.alltypes group by id, tinyint_col "
642  + "order by rank() over (order by max(bool_col), tinyint_col)");
643  AnalyzesOk("select lead(count(id)) over (order by tinyint_col) "
644  + "from functional.alltypes group by id, tinyint_col "
645  + "order by rank() over (order by tinyint_col)");
646  AnalyzesOk("select min(count(id)) over (order by tinyint_col) "
647  + "from functional.alltypes group by id, tinyint_col "
648  + "order by rank() over (order by tinyint_col)");
649  // IMPALA-1231: COUNT(t1.int_col) shows up in two different ways: as agg output
650  // and as an analytic fn call
651  AnalyzesOk("select t1.int_col, COUNT(t1.int_col) OVER (ORDER BY t1.int_col) "
652  + "FROM functional.alltypes t1 GROUP BY t1.int_col HAVING COUNT(t1.int_col) > 1");
653 
654  // legal windows
655  AnalyzesOk(
656  "select sum(int_col) over (partition by id order by tinyint_col, int_col "
657  + "rows between 2 following and 4 following) from functional.alltypes");
658  AnalyzesOk(
659  "select max(int_col) over (partition by id order by tinyint_col, int_col "
660  + "range between unbounded preceding and current row) "
661  + "from functional.alltypes");
662  AnalyzesOk(
663  "select sum(int_col) over (partition by id order by tinyint_col, int_col "
664  + "range between current row and unbounded following) "
665  + "from functional.alltypes");
666  AnalyzesOk(
667  "select max(int_col) over (partition by id order by tinyint_col, int_col "
668  + "range between unbounded preceding and unbounded following) "
669  + "from functional.alltypes");
670  AnalyzesOk("select sum(int_col) over (partition by id order by tinyint_col "
671  + "rows between 4 preceding and 2 preceding) from functional.alltypes");
672  AnalyzesOk("select sum(int_col) over (partition by id order by tinyint_col "
673  + "rows between 2 following and 4 following) from functional.alltypes");
674  AnalyzesOk( "select "
675  + "2 * min(tinyint_col) over (partition by id order by tinyint_col "
676  + " rows between unbounded preceding and current row), "
677  + "concat(max(string_col) over (partition by timestamp_col, int_col "
678  + " order by tinyint_col, smallint_col "
679  + " rows between unbounded preceding and current row), "
680  + "min(string_col) over (partition by timestamp_col, int_col "
681  + " order by tinyint_col, smallint_col "
682  + " rows between unbounded preceding and current row)) "
683  + "from functional.alltypes");
684  AnalyzesOk(
685  "select sum(int_col) over (partition by id order by tinyint_col, int_col "
686  + "rows between current row and current row) from functional.alltypes");
688  "select sum(int_col) over (partition by id order by tinyint_col, int_col "
689  + "range between current row and current row) from functional.alltypes",
690  "RANGE is only supported with both the lower and upper bounds UNBOUNDED or one "
691  + "UNBOUNDED and the other CURRENT ROW.");
692  AnalysisError("select sum(int_col) over (partition by id order by tinyint_col "
693  + "range between 2 following and 2 following) from functional.alltypes",
694  "RANGE is only supported with both the lower and upper bounds UNBOUNDED or one "
695  + "UNBOUNDED and the other CURRENT ROW.");
696 
697  // Min/max do not support start bounds with offsets
698  AnalysisError("select max(int_col) over (partition by id order by tinyint_col "
699  + "rows 2 preceding) from functional.alltypes",
700  "'max(int_col)' is only supported with an UNBOUNDED PRECEDING start bound.");
701  // If the query can be re-written so that the start is unbounded, it should
702  // be supported (IMPALA-1433).
703  AnalyzesOk("select max(id) over (order by id rows between current row and "
704  + "unbounded following) from functional.alltypes");
705  AnalyzesOk("select min(int_col) over (partition by id order by tinyint_col "
706  + "rows between 2 preceding and unbounded following) from functional.alltypes");
707  // TODO: Enable after RANGE windows with offset boundaries are supported
708  //AnalysisError("select max(int_col) over (partition by id order by tinyint_col "
709  // + "range 2 preceding) from functional.alltypes",
710  // "'max(int_col)' is only supported with an UNBOUNDED PRECEDING start bound.");
711 
712  // missing grouping expr
714  "select lead(count(bigint_col)) over (order by tinyint_col) "
715  + "from functional.alltypes group by id, tinyint_col "
716  + "order by rank() over (order by smallint_col)",
717  "ORDER BY expression not produced by aggregation output (missing from GROUP "
718  + "BY clause?): rank() OVER (ORDER BY smallint_col ASC)");
719  // missing Over clause
720  AnalysisError("select 1, lag(int_col) from functional.alltypes",
721  "Analytic function requires an OVER clause: lag(int_col)");
722  // no FROM clause
723  AnalysisError("select 1, count(*) over()",
724  "Analytic expressions require FROM clause");
725  // can't mix DISTINCT and analytics
727  "select distinct int_col, sum(double_col) over () from functional.alltypes",
728  "cannot combine SELECT DISTINCT with analytic functions");
729  // analytic expr in Group By
731  "select id, count(*) from functional.alltypes "
732  + "group by 1, rank() over(order by int_col)",
733  "GROUP BY expression must not contain analytic expressions: rank() OVER "
734  + "(ORDER BY int_col ASC)");
736  "select id, rank() over(order by int_col), count(*) "
737  + "from functional.alltypes group by 1, 2",
738  "GROUP BY expression must not contain analytic expressions: rank() OVER "
739  + "(ORDER BY int_col ASC)");
740  // analytic expr in Having
742  "select id, count(*) from functional.alltypes group by 1 "
743  + "having rank() over(order by int_col) > 1",
744  "HAVING clause must not contain analytic expressions: rank() OVER "
745  + "(ORDER BY int_col ASC)");
746  // analytic expr in Where
748  "select id, tinyint_col from functional.alltypes "
749  + "where row_number() over(order by id) > 1",
750  "WHERE clause must not contain analytic expressions: row_number() OVER "
751  + "(ORDER BY id ASC)");
752  // analytic expr with Distinct
754  "select id, tinyint_col, sum(distinct tinyint_col) over(order by id) "
755  + "from functional.alltypes",
756  "DISTINCT not allowed in analytic function");
757  // select list alias needs to be ignored
759  "select min(id) over (order by tinyint_col) as X from functional.alltypes "
760  + "group by id, tinyint_col order by rank() over (order by X)",
761  "Nesting of analytic expressions is not allowed");
762  // IMPALA-1256: AnalyticExpr.resetAnalysisState() didn't sync up w/ orderByElements_
763  AnalyzesOk("with t as ("
764  + "select * from (select sum(t1.year) over ("
765  + " order by max(t1.id), t1.year "
766  + " rows between unbounded preceding and 5 preceding) "
767  + "from functional.alltypes t1 group by t1.year) t1) select * from t");
768  AnalyzesOk("with t as ("
769  + "select sum(t1.smallint_col) over () from functional.alltypes t1) "
770  + "select * from t");
771  // IMPALA-1234
772  AnalyzesOk("with t as (select 1 as int_col_1 from functional.alltypesagg t1) "
773  + "select count(t1.int_col_1) as int_col_1 from t t1 where t1.int_col_1 is null "
774  + "group by t1.int_col_1 union all "
775  + "select min(t1.day) over () from functional.alltypesagg t1");
776  // IMPALA-1354: Constant expressions in order by and partition by exprs
778  "select rank() over (order by 1) from functional.alltypestiny",
779  "Expressions in the ORDER BY clause must not be constant: 1");
781  "select rank() over (partition by 2 order by id) from functional.alltypestiny",
782  "Expressions in the PARTITION BY clause must not be constant: 2");
784  "select rank() over (partition by 2 order by 1) from functional.alltypestiny",
785  "Expressions in the PARTITION BY clause must not be constant: 2");
786 
787  // nested analytic exprs
789  "select sum(int_col) over (partition by id, rank() over (order by int_col) "
790  + "order by tinyint_col, int_col "
791  + "rows between 2 following and 4 following) from functional.alltypes",
792  "Nesting of analytic expressions is not allowed");
794  "select lead(rank() over (order by int_col)) over (partition by id "
795  + "order by tinyint_col, int_col) from functional.alltypes",
796  "Nesting of analytic expressions is not allowed");
798  "select max(int_col) over (partition by id "
799  + "order by rank() over (order by tinyint_col), int_col) "
800  + "from functional.alltypes",
801  "Nesting of analytic expressions is not allowed");
802 
803  // lead/lag variants
804  AnalyzesOk(
805  "select lag(int_col, 10, 5 + 1) over (partition by id, bool_col "
806  + "order by tinyint_col, int_col) from functional.alltypes");
807  AnalyzesOk(
808  "select lead(string_col, 1, 'default') over ("
809  + "order by tinyint_col, int_col) from functional.alltypes");
810  AnalyzesOk(
811  "select lag(bool_col, 3) over ("
812  + "order by id, int_col) from functional.alltypes");
813  AnalyzesOk(
814  "select lead(float_col, 2) over (partition by string_col, timestamp_col "
815  + "order by tinyint_col, int_col) from functional.alltypes");
816  AnalyzesOk(
817  "select lag(double_col) over ("
818  + "order by string_col, int_col) from functional.alltypes");
819  AnalyzesOk(
820  "select lead(timestamp_col) over (partition by id "
821  + "order by tinyint_col, int_col) from functional.alltypes");
822  // missing offset w/ default
824  "select lag(string_col, 'x') over (partition by id "
825  + "order by tinyint_col, int_col) from functional.alltypes",
826  "No matching function with signature: lag(STRING, STRING)");
827  // mismatched default type
829  "select lead(int_col, 1, 'x') over ("
830  + "order by tinyint_col, int_col) from functional.alltypes",
831  "No matching function with signature: lead(INT, TINYINT, STRING)");
832  // missing params
834  "select lag() over (partition by id "
835  + "order by tinyint_col, int_col) from functional.alltypes",
836  "No matching function with signature: lag()");
837  // bad offsets
839  "select lead(int_col, -1) over ("
840  + "order by tinyint_col, int_col) from functional.alltypes",
841  "The offset parameter of LEAD/LAG must be a constant positive integer");
843  "select lag(int_col, tinyint_col * 2, 5) over ("
844  + "order by tinyint_col, int_col) from functional.alltypes",
845  "The offset parameter of LEAD/LAG must be a constant positive integer");
847  "select lag(int_col, 1, int_col) over ("
848  + "order by tinyint_col, int_col) from functional.alltypes",
849  "The default parameter (parameter 3) of LEAD/LAG must be a constant");
850 
851  // wrong type of function
852  AnalysisError("select abs(float_col) over (partition by id order by tinyint_col "
853  + "rows between unbounded preceding and current row) from functional.alltypes",
854  "OVER clause requires aggregate or analytic function: abs(float_col)");
855  AnalysisError("select group_concat(string_col) over (order by tinyint_col "
856  + "rows between unbounded preceding and current row) from functional.alltypes",
857  "Aggregate function 'group_concat(string_col)' not supported with OVER clause.");
858  // Order By missing
859  AnalysisError("select sum(int_col) over (partition by id "
860  + "rows between unbounded preceding and current row) from functional.alltypes",
861  "Windowing clause requires ORDER BY clause");
862  // Order By missing for ranking fn
863  AnalysisError("select dense_rank() over (partition by id) from functional.alltypes",
864  "'dense_rank()' requires an ORDER BY clause");
865  // Order By missing for offset fn
866  AnalysisError("select lag(tinyint_col, 1, null) over (partition by id) "
867  + "from functional.alltypes",
868  "'lag(tinyint_col, 1, NULL)' requires an ORDER BY clause");
869  // Window for ranking fn
870  AnalysisError("select row_number() over (partition by id order by tinyint_col "
871  + "rows between unbounded preceding and current row) from functional.alltypes",
872  "Windowing clause not allowed with 'row_number()'");
873  // Window for offset fn
874  AnalysisError("select lead(tinyint_col, 1, null) over (partition by id "
875  + "order by tinyint_col rows between unbounded preceding and current row) "
876  + "from functional.alltypes",
877  "Windowing clause not allowed with 'lead(tinyint_col, 1, NULL)'");
878 
879  // windows
880  AnalysisError("select sum(tinyint_col) over (partition by id "
881  + "order by tinyint_col rows between unbounded following and current row) "
882  + "from functional.alltypes",
883  "UNBOUNDED FOLLOWING is only allowed for upper bound of BETWEEN");
884  AnalysisError("select sum(tinyint_col) over (partition by id "
885  + "order by tinyint_col rows unbounded following) from functional.alltypes",
886  "UNBOUNDED FOLLOWING is only allowed for upper bound of BETWEEN");
887  AnalysisError("select sum(tinyint_col) over (partition by id "
888  + "order by tinyint_col rows between current row and unbounded preceding) "
889  + "from functional.alltypes",
890  "UNBOUNDED PRECEDING is only allowed for lower bound of BETWEEN");
891  AnalysisError("select sum(tinyint_col) over (partition by id "
892  + "order by tinyint_col rows 2 following) from functional.alltypes",
893  "FOLLOWING requires a BETWEEN clause");
895  "select sum(tinyint_col) over (partition by id "
896  + "order by tinyint_col rows between 2 following and current row) "
897  + "from functional.alltypes",
898  "A lower window bound of FOLLOWING requires that the upper bound also be "
899  + "FOLLOWING");
901  "select sum(tinyint_col) over (partition by id "
902  + "order by tinyint_col rows between current row and 2 preceding) "
903  + "from functional.alltypes",
904  "An upper window bound of PRECEDING requires that the lower bound also be "
905  + "PRECEDING");
906 
907  // offset boundaries
909  "select min(int_col) over (partition by id order by tinyint_col "
910  + "rows between tinyint_col preceding and current row) "
911  + "from functional.alltypes",
912  "For ROWS window, the value of a PRECEDING/FOLLOWING offset must be a "
913  + "constant positive integer: tinyint_col PRECEDING");
915  "select min(int_col) over (partition by id order by tinyint_col "
916  + "rows between current row and '2' following) from functional.alltypes",
917  "For ROWS window, the value of a PRECEDING/FOLLOWING offset must be a "
918  + "constant positive integer: '2' FOLLOWING");
920  "select min(int_col) over (partition by id order by tinyint_col "
921  + "rows between -2 preceding and current row) from functional.alltypes",
922  "For ROWS window, the value of a PRECEDING/FOLLOWING offset must be a "
923  + "constant positive integer: -2 PRECEDING");
925  "select min(int_col) over (partition by id order by tinyint_col "
926  + "rows between 2 preceding and 3 preceding) from functional.alltypes",
927  "Offset boundaries are in the wrong order: ROWS BETWEEN 2 PRECEDING AND "
928  + "3 PRECEDING");
930  "select min(int_col) over (partition by id order by tinyint_col "
931  + "rows between count(*) preceding and current row) from functional.alltypes",
932  "For ROWS window, the value of a PRECEDING/FOLLOWING offset must be a "
933  + "constant positive integer: count(*) PRECEDING");
934 
935  // TODO: Enable after RANGE windows with offset boundaries are supported.
936  //AnalysisError(
937  // "select min(int_col) over (partition by id order by float_col "
938  // + "range between -2.1 preceding and current row) from functional.alltypes",
939  // "For RANGE window, the value of a PRECEDING/FOLLOWING offset must be a "
940  // + "constant positive number: -2.1 PRECEDING");
941  //AnalysisError(
942  // "select min(int_col) over (partition by id order by int_col "
943  // + "range between current row and 2.1 following) from functional.alltypes",
944  // "The value expression of a PRECEDING/FOLLOWING clause of a RANGE window must "
945  // + "be implicitly convertable to the ORDER BY expression's type: 2.1 cannot "
946  // + "be implicitly converted to INT");
947  //AnalysisError(
948  // "select min(int_col) over (partition by id order by int_col "
949  // + "range between 2 * tinyint_col preceding and current row) "
950  // + "from functional.alltypes",
951  // "For RANGE window, the value of a PRECEDING/FOLLOWING offset must be a "
952  // + "constant positive number");
953  //AnalysisError(
954  // "select min(int_col) over (partition by id order by int_col "
955  // + "range between 3.1 following and 2.0 following) "
956  // + "from functional.alltypes",
957  // "Offset boundaries are in the wrong order");
959  //AnalysisError(
960  // "select min(int_col) over (partition by id order by int_col, tinyint_col "
961  // + "range between 2 preceding and current row) "
962  // + "from functional.alltypes",
963  // "Only one ORDER BY expression allowed if used with a RANGE window with "
964  // + "PRECEDING/FOLLOWING");
965  }
966 
970  @Test
972  // Test all non-decimal numeric types and the null type.
973  // Decimal has custom type promotion rules which are tested elsewhere.
974  Type[] numericTypes = new Type[] { Type.TINYINT, Type.SMALLINT, Type.INT,
975  Type.BIGINT, Type.FLOAT, Type.DOUBLE , Type.NULL };
976  for (Type type1: numericTypes) {
977  for (Type type2: numericTypes) {
978  Type t = Type.getAssignmentCompatibleType(type1, type2);
979  assertTrue(t.isScalarType());
980  ScalarType compatibleType = (ScalarType) t;
981  Type promotedType = compatibleType.getNextResolutionType();
982  boolean inputsNull = false;
983  if (type1.isNull() && type2.isNull()) {
984  inputsNull = true;
985  promotedType = Type.DOUBLE;
986  compatibleType = Type.INT;
987  }
988 
989  // +, -, *, %
990  typeCastTest(type1, type2, false, ArithmeticExpr.Operator.ADD, null,
991  promotedType);
992  typeCastTest(type1, type2, true, ArithmeticExpr.Operator.ADD, null,
993  promotedType);
994  typeCastTest(type1, type2, false, ArithmeticExpr.Operator.SUBTRACT, null,
995  promotedType);
996  typeCastTest(type1, type2, true, ArithmeticExpr.Operator.SUBTRACT, null,
997  promotedType);
998  typeCastTest(type1, type2, false, ArithmeticExpr.Operator.MULTIPLY, null,
999  promotedType);
1000  typeCastTest(type1, type2, true, ArithmeticExpr.Operator.MULTIPLY, null,
1001  promotedType);
1002  typeCastTest(type1, type2, false, ArithmeticExpr.Operator.MOD, null,
1003  inputsNull ? Type.DOUBLE : compatibleType);
1004  typeCastTest(type1, type2, true, ArithmeticExpr.Operator.MOD, null,
1005  inputsNull ? Type.DOUBLE : compatibleType);
1006 
1007  // /
1008  typeCastTest(type1, type2, false, ArithmeticExpr.Operator.DIVIDE, null,
1009  Type.DOUBLE);
1010  typeCastTest(type1, type2, true, ArithmeticExpr.Operator.DIVIDE, null,
1011  Type.DOUBLE);
1012 
1013  // div, &, |, ^ only for fixed-point types
1014  if ((!type1.isFixedPointType() && !type1.isNull())
1015  || (!type2.isFixedPointType() && !type2.isNull())) {
1016  continue;
1017  }
1018  typeCastTest(type1, type2, false, ArithmeticExpr.Operator.INT_DIVIDE, null,
1019  compatibleType);
1020  typeCastTest(type1, type2, true, ArithmeticExpr.Operator.INT_DIVIDE, null,
1021  compatibleType);
1022  typeCastTest(type1, type2, false, ArithmeticExpr.Operator.BITAND, null,
1023  compatibleType);
1024  typeCastTest(type1, type2, true, ArithmeticExpr.Operator.BITAND, null,
1025  compatibleType);
1026  typeCastTest(type1, type2, false, ArithmeticExpr.Operator.BITOR, null,
1027  compatibleType);
1028  typeCastTest(type1, type2, true, ArithmeticExpr.Operator.BITOR, null,
1029  compatibleType);
1030  typeCastTest(type1, type2, false, ArithmeticExpr.Operator.BITXOR, null,
1031  compatibleType);
1032  typeCastTest(type1, type2, true, ArithmeticExpr.Operator.BITXOR, null,
1033  compatibleType);
1034  }
1035  }
1036 
1037  List<Type> fixedPointTypes = new ArrayList<Type>(Type.getIntegerTypes());
1038  fixedPointTypes.add(Type.NULL);
1039  for (Type type: fixedPointTypes) {
1040  typeCastTest(null, type, false, ArithmeticExpr.Operator.BITNOT, null,
1041  type.isNull() ? Type.INT : type);
1042  }
1043  }
1044 
1048  @Test
1050  // Test all non-decimal numeric types and the null type.
1051  // Decimal has custom type promotion rules which are tested elsewhere.
1052  Type[] types = new Type[] { Type.TINYINT, Type.SMALLINT, Type.INT,
1053  Type.BIGINT, Type.FLOAT, Type.DOUBLE , Type.NULL };
1054 
1055  // test on all comparison ops
1056  for (BinaryPredicate.Operator cmpOp: BinaryPredicate.Operator.values()) {
1057  for (Type type1: types) {
1058  for (Type type2: types) {
1059  Type compatibleType =
1060  Type.getAssignmentCompatibleType(type1, type2);
1061  typeCastTest(type1, type2, false, null, cmpOp, compatibleType);
1062  typeCastTest(type1, type2, true, null, cmpOp, compatibleType);
1063  }
1064  }
1065  }
1066  }
1067 
1073  private void typeCastTest(Type type1, Type type2,
1074  boolean op1IsLiteral, ArithmeticExpr.Operator arithmeticOp,
1075  BinaryPredicate.Operator cmpOp, Type opType) throws AnalysisException {
1076  Preconditions.checkState((arithmeticOp == null) != (cmpOp == null));
1077  boolean arithmeticMode = arithmeticOp != null;
1078  String op1 = "";
1079  if (type1 != null) {
1080  if (op1IsLiteral) {
1081  op1 = typeToLiteralValue_.get(type1);
1082  } else {
1083  op1 = TestSchemaUtils.getAllTypesColumn(type1);
1084  }
1085  }
1086  String op2 = TestSchemaUtils.getAllTypesColumn(type2);
1087  String queryStr = null;
1088  if (arithmeticMode) {
1089  queryStr = "select " + op1 + " " + arithmeticOp.toString() + " " + op2 +
1090  " AS a from functional.alltypes";
1091  } else {
1092  queryStr = "select int_col from functional.alltypes " +
1093  "where " + op1 + " " + cmpOp.toString() + " " + op2;
1094  }
1095  SelectStmt select = (SelectStmt) AnalyzesOk(queryStr);
1096  Expr expr = null;
1097  if (arithmeticMode) {
1098  ArrayList<Expr> selectListExprs = select.getResultExprs();
1099  assertNotNull(selectListExprs);
1100  assertEquals(selectListExprs.size(), 1);
1101  // check the first expr in select list
1102  expr = selectListExprs.get(0);
1103  Assert.assertEquals("opType= " + opType + " exprType=" + expr.getType(),
1104  opType, expr.getType());
1105  } else {
1106  // check the where clause
1107  expr = select.getWhereClause();
1108  if (!expr.getType().isNull()) {
1109  assertEquals(Type.BOOLEAN, expr.getType());
1110  }
1111  }
1112  checkCasts(expr);
1113  // The children's types must be NULL or equal to the requested opType.
1114  Type child1Type = expr.getChild(0).getType();
1115  Type child2Type = type1 == null ? null : expr.getChild(1).getType();
1116  Assert.assertTrue("opType= " + opType + " child1Type=" + child1Type,
1117  opType.equals(child1Type) || opType.isNull() || child1Type.isNull());
1118  if (type1 != null) {
1119  Assert.assertTrue("opType= " + opType + " child2Type=" + child2Type,
1120  opType.equals(child2Type) || opType.isNull() || child2Type.isNull());
1121  }
1122  }
1123 
1124  private void checkReturnType(String stmt, Type resultType) {
1125  SelectStmt select = (SelectStmt) AnalyzesOk(stmt);
1126  ArrayList<Expr> selectListExprs = select.getResultExprs();
1127  assertNotNull(selectListExprs);
1128  assertEquals(selectListExprs.size(), 1);
1129  // check the first expr in select list
1130  Expr expr = selectListExprs.get(0);
1131  assertEquals("Expected: " + resultType + " != " + expr.getType(),
1132  resultType, expr.getType());
1133  }
1134 
1135  @Test
1137  checkReturnType("select 1", Type.TINYINT);
1138  checkReturnType("select 1.1", ScalarType.createDecimalType(2,1));
1139  checkReturnType("select 01.1", ScalarType.createDecimalType(2,1));
1140  checkReturnType("select 1 + 1.1", Type.DOUBLE);
1141  checkReturnType("select 0.23 + 1.1", ScalarType.createDecimalType(4,2));
1142 
1143  checkReturnType("select float_col + float_col from functional.alltypestiny",
1144  Type.DOUBLE);
1145  checkReturnType("select int_col + int_col from functional.alltypestiny",
1146  Type.BIGINT);
1147 
1148  // floating point + numeric literal = floating point
1149  checkReturnType("select float_col + 1.1 from functional.alltypestiny",
1150  Type.DOUBLE);
1151  // decimal + numeric literal = decimal
1152  checkReturnType("select d1 + 1.1 from functional.decimal_tbl",
1154  // int + numeric literal = floating point
1155  checkReturnType("select int_col + 1.1 from functional.alltypestiny",
1156  Type.DOUBLE);
1157 
1158  // Explicitly casting the literal to a decimal will override the behavior
1159  checkReturnType("select int_col + cast(1.1 as decimal(2,1)) from "
1160  + " functional.alltypestiny", ScalarType.createDecimalType(12,1));
1161  checkReturnType("select float_col + cast(1.1 as decimal(2,1)) from "
1162  + " functional.alltypestiny", ScalarType.createDecimalType(38,9));
1163  checkReturnType("select float_col + cast(1.1*1.2+1.3 as decimal(2,1)) from "
1164  + " functional.alltypestiny", ScalarType.createDecimalType(38,9));
1165 
1166  // The location and complexity of the expr should not matter.
1167  checkReturnType("select 1.0 + float_col + 1.1 from functional.alltypestiny",
1168  Type.DOUBLE);
1169  checkReturnType("select 1.0 + 2.0 + float_col from functional.alltypestiny",
1170  Type.DOUBLE);
1171  checkReturnType("select 1.0 + 2.0 + pi() * float_col from functional.alltypestiny",
1172  Type.DOUBLE);
1173  checkReturnType("select 1.0 + d1 + 1.1 from functional.decimal_tbl",
1175  checkReturnType("select 1.0 + 2.0 + d1 from functional.decimal_tbl",
1177  checkReturnType("select 1.0 + 2.0 + pi()*d1 from functional.decimal_tbl",
1178  ScalarType.createDecimalType(38,17));
1179 
1180  // Test with multiple cols
1181  checkReturnType("select double_col + 1.23 + float_col + 1.0 " +
1182  " from functional.alltypestiny", Type.DOUBLE);
1183  checkReturnType("select double_col + 1.23 + float_col + 1.0 + int_col " +
1184  " + bigint_col from functional.alltypestiny", Type.DOUBLE);
1185  checkReturnType("select d1 + 1.23 + d2 + 1.0 " +
1186  " from functional.decimal_tbl", ScalarType.createDecimalType(14,2));
1187 
1188  // Test with slot of both decimal and non-decimal
1189  checkReturnType("select t1.int_col + t2.c1 from functional.alltypestiny t1 " +
1190  " cross join functional.decimal_tiny t2", ScalarType.createDecimalType(15,4));
1191  checkReturnType("select 1.1 + t1.int_col + t2.c1 from functional.alltypestiny t1 " +
1192  " cross join functional.decimal_tiny t2", ScalarType.createDecimalType(38,17));
1193  }
1194 
1201  private void checkCasts(Expr expr) {
1202  if (expr instanceof CastExpr) {
1203  CastExpr cast = (CastExpr)expr;
1204  if (cast.isImplicit()) {
1205  Assert.assertFalse(expr.getType() + " == " + expr.getChild(0).getType(),
1206  expr.getType().equals(expr.getChild(0).getType()));
1207  Assert.assertFalse(expr.debugString(), expr.getChild(0) instanceof LiteralExpr);
1208  }
1209  }
1210  for (Expr child: expr.getChildren()) {
1211  checkCasts(child);
1212  }
1213  }
1214 
1215  // TODO: re-enable tests as soon as we have date-related types
1216  // @Test
1218  // positive tests are included in TestComparisonTypeCasts
1219  AnalysisError("select int_col from functional.alltypes where date_col = 'ABCD'",
1220  "Unable to parse string 'ABCD' to date");
1221  AnalysisError("select int_col from functional.alltypes " +
1222  "where date_col = 'ABCD-EF-GH'",
1223  "Unable to parse string 'ABCD-EF-GH' to date");
1224  AnalysisError("select int_col from functional.alltypes where date_col = '2006'",
1225  "Unable to parse string '2006' to date");
1226  AnalysisError("select int_col from functional.alltypes where date_col = '0.5'",
1227  "Unable to parse string '0.5' to date");
1228  AnalysisError("select int_col from functional.alltypes where " +
1229  "date_col = '2006-10-10 ABCD'",
1230  "Unable to parse string '2006-10-10 ABCD' to date");
1231  AnalysisError("select int_col from functional.alltypes where " +
1232  "date_col = '2006-10-10 12:11:05.ABC'",
1233  "Unable to parse string '2006-10-10 12:11:05.ABC' to date");
1234  }
1235 
1236  // TODO: generate all possible error combinations of types and operands
1237  @Test
1239  // negative tests, no floating point types allowed
1240  AnalysisError("select ~float_col from functional.alltypes",
1241  "Bitwise operations only allowed on integer types");
1242  AnalysisError("select float_col ^ int_col from functional.alltypes",
1243  "Invalid non-integer argument to operation '^'");
1244  AnalysisError("select float_col & int_col from functional.alltypes",
1245  "Invalid non-integer argument to operation '&'");
1246  AnalysisError("select double_col | bigint_col from functional.alltypes",
1247  "Invalid non-integer argument to operation '|'");
1248  AnalysisError("select int_col from functional.alltypes where " +
1249  "float_col & bool_col > 5",
1250  "Arithmetic operation requires numeric operands");
1251  }
1252 
1261  @Test
1263  String[] valueTypeCols =
1264  new String[] {"tinyint_col", "smallint_col", "int_col", "bigint_col", "NULL"};
1265 
1266  // Tests all time units.
1267  for (TimeUnit timeUnit: TimeUnit.values()) {
1268  // Tests on all valid time value types (fixed points).
1269  for (String col: valueTypeCols) {
1270  // Non-function call like version.
1271  AnalyzesOk("select timestamp_col + interval " + col + " " + timeUnit.toString() +
1272  " from functional.alltypes");
1273  AnalyzesOk("select timestamp_col - interval " + col + " " + timeUnit.toString() +
1274  " from functional.alltypes");
1275  AnalyzesOk("select NULL - interval " + col + " " + timeUnit.toString() +
1276  " from functional.alltypes");
1277  // Reversed interval and timestamp using addition.
1278  AnalyzesOk("select interval " + col + " " + timeUnit.toString() +
1279  " + timestamp_col from functional.alltypes");
1280  // Function-call like version.
1281  AnalyzesOk("select date_add(timestamp_col, interval " + col + " " +
1282  timeUnit.toString() + ") from functional.alltypes");
1283  AnalyzesOk("select date_sub(timestamp_col, interval " + col + " " +
1284  timeUnit.toString() + ") from functional.alltypes");
1285  AnalyzesOk("select date_add(NULL, interval " + col + " " +
1286  timeUnit.toString() + ") from functional.alltypes");
1287  AnalyzesOk("select date_sub(NULL, interval " + col + " " +
1288  timeUnit.toString() + ") from functional.alltypes");
1289  }
1290  }
1291 
1292  // First operand does not return a timestamp. Non-function-call like version.
1293  AnalysisError("select float_col + interval 10 years from functional.alltypes",
1294  "Operand 'float_col' of timestamp arithmetic expression " +
1295  "'float_col + INTERVAL 10 years' returns type 'FLOAT'. " +
1296  "Expected type 'TIMESTAMP'.");
1297  AnalysisError("select string_col + interval 10 years from functional.alltypes",
1298  "Operand 'string_col' of timestamp arithmetic expression " +
1299  "'string_col + INTERVAL 10 years' returns type 'STRING'. " +
1300  "Expected type 'TIMESTAMP'.");
1301  AnalysisError(
1302  "select int_struct_col + interval 10 years from functional.allcomplextypes",
1303  "Operand 'int_struct_col' of timestamp arithmetic expression " +
1304  "'int_struct_col + INTERVAL 10 years' returns type 'STRUCT<f1:INT,f2:INT>'. " +
1305  "Expected type 'TIMESTAMP'.");
1306  // Reversed interval and timestamp using addition.
1307  AnalysisError("select interval 10 years + float_col from functional.alltypes",
1308  "Operand 'float_col' of timestamp arithmetic expression " +
1309  "'INTERVAL 10 years + float_col' returns type 'FLOAT'. " +
1310  "Expected type 'TIMESTAMP'");
1311  AnalysisError("select interval 10 years + string_col from functional.alltypes",
1312  "Operand 'string_col' of timestamp arithmetic expression " +
1313  "'INTERVAL 10 years + string_col' returns type 'STRING'. " +
1314  "Expected type 'TIMESTAMP'");
1315  AnalysisError(
1316  "select interval 10 years + int_array_col from functional.allcomplextypes",
1317  "Operand 'int_array_col' of timestamp arithmetic expression " +
1318  "'INTERVAL 10 years + int_array_col' returns type 'ARRAY<INT>'. " +
1319  "Expected type 'TIMESTAMP'.");
1320  // First operand does not return a timestamp. Function-call like version.
1321  AnalysisError("select date_add(float_col, interval 10 years) " +
1322  "from functional.alltypes",
1323  "Operand 'float_col' of timestamp arithmetic expression " +
1324  "'DATE_ADD(float_col, INTERVAL 10 years)' returns type 'FLOAT'. " +
1325  "Expected type 'TIMESTAMP'.");
1326  AnalysisError("select date_add(string_col, interval 10 years) " +
1327  "from functional.alltypes",
1328  "Operand 'string_col' of timestamp arithmetic expression " +
1329  "'DATE_ADD(string_col, INTERVAL 10 years)' returns type 'STRING'. " +
1330  "Expected type 'TIMESTAMP'.");
1331  AnalysisError("select date_add(int_map_col, interval 10 years) " +
1332  "from functional.allcomplextypes",
1333  "Operand 'int_map_col' of timestamp arithmetic expression " +
1334  "'DATE_ADD(int_map_col, INTERVAL 10 years)' returns type 'MAP<STRING,INT>'. " +
1335  "Expected type 'TIMESTAMP'.");
1336 
1337  // Second operand is not compatible with a fixed-point type.
1338  // Non-function-call like version.
1339  AnalysisError("select timestamp_col + interval 5.2 years from functional.alltypes",
1340  "Operand '5.2' of timestamp arithmetic expression " +
1341  "'timestamp_col + INTERVAL 5.2 years' returns type 'DECIMAL(2,1)'. " +
1342  "Expected an integer type.");
1343  AnalysisError("select cast(0 as timestamp) + interval int_array_col years " +
1344  "from functional.allcomplextypes",
1345  "Operand 'int_array_col' of timestamp arithmetic expression " +
1346  "'CAST(0 AS TIMESTAMP) + INTERVAL int_array_col years' " +
1347  "returns type 'ARRAY<INT>'. Expected an integer type.");
1348 
1349  // No implicit cast from STRING to integer types.
1350  AnalysisError("select timestamp_col + interval '10' years from functional.alltypes",
1351  "Operand ''10'' of timestamp arithmetic expression 'timestamp_col + " +
1352  "INTERVAL '10' years' returns type 'STRING'. " +
1353  "Expected an integer type.");
1354  AnalysisError("select date_add(timestamp_col, interval '10' years) " +
1355  "from functional.alltypes", "Operand ''10'' of timestamp arithmetic " +
1356  "expression 'DATE_ADD(timestamp_col, INTERVAL '10' years)' returns " +
1357  "type 'STRING'. Expected an integer type.");
1358 
1359  // Cast from STRING to INT.
1360  AnalyzesOk("select timestamp_col + interval cast('10' as int) years " +
1361  "from functional.alltypes");
1362  // Reversed interval and timestamp using addition.
1363  AnalysisError("select interval 5.2 years + timestamp_col from functional.alltypes",
1364  "Operand '5.2' of timestamp arithmetic expression " +
1365  "'INTERVAL 5.2 years + timestamp_col' returns type 'DECIMAL(2,1)'. " +
1366  "Expected an integer type.");
1367  // Cast from STRING to INT.
1368  AnalyzesOk("select interval cast('10' as int) years + timestamp_col " +
1369  "from functional.alltypes");
1370  // Second operand is not compatible with type INT. Function-call like version.
1371  AnalysisError("select date_add(timestamp_col, interval 5.2 years) " +
1372  "from functional.alltypes",
1373  "Operand '5.2' of timestamp arithmetic expression " +
1374  "'DATE_ADD(timestamp_col, INTERVAL 5.2 years)' returns type 'DECIMAL(2,1)'. " +
1375  "Expected an integer type.");
1376  // Cast from STRING to INT.
1377  AnalyzesOk("select date_add(timestamp_col, interval cast('10' as int) years) " +
1378  " from functional.alltypes");
1379 
1380  // Invalid time unit. Non-function-call like version.
1381  AnalysisError("select timestamp_col + interval 10 error from functional.alltypes",
1382  "Invalid time unit 'error' in timestamp arithmetic expression " +
1383  "'timestamp_col + INTERVAL 10 error'.");
1384  AnalysisError("select timestamp_col - interval 10 error from functional.alltypes",
1385  "Invalid time unit 'error' in timestamp arithmetic expression " +
1386  "'timestamp_col - INTERVAL 10 error'.");
1387  // Reversed interval and timestamp using addition.
1388  AnalysisError("select interval 10 error + timestamp_col from functional.alltypes",
1389  "Invalid time unit 'error' in timestamp arithmetic expression " +
1390  "'INTERVAL 10 error + timestamp_col'.");
1391  // Invalid time unit. Function-call like version.
1392  AnalysisError("select date_add(timestamp_col, interval 10 error) " +
1393  "from functional.alltypes",
1394  "Invalid time unit 'error' in timestamp arithmetic expression " +
1395  "'DATE_ADD(timestamp_col, INTERVAL 10 error)'.");
1396  AnalysisError("select date_sub(timestamp_col, interval 10 error) " +
1397  "from functional.alltypes",
1398  "Invalid time unit 'error' in timestamp arithmetic expression " +
1399  "'DATE_SUB(timestamp_col, INTERVAL 10 error)'.");
1400  }
1401 
1402  @Test
1404  AnalyzesOk("select pi()");
1405  AnalyzesOk("select sin(pi())");
1406  AnalyzesOk("select sin(cos(pi()))");
1407  AnalyzesOk("select sin(cos(tan(e())))");
1408  AnalysisError("select pi(*)", "Cannot pass '*' to scalar function.");
1409  AnalysisError("select sin(DISTINCT 1)",
1410  "Cannot pass 'DISTINCT' to scalar function.");
1411  AnalysisError("select * from functional.alltypes where pi(*) = 5",
1412  "Cannot pass '*' to scalar function.");
1413  // Invalid function name.
1414  AnalysisError("select a.b.sin()",
1415  "Invalid function name: 'a.b.sin'. Expected [dbname].funcname");
1416 
1417  // Call function that only accepts decimal
1418  AnalyzesOk("select precision(1)");
1419  AnalyzesOk("select precision(cast('1.1' as decimal))");
1420  AnalyzesOk("select scale(1.1)");
1421  AnalysisError("select scale('1.1')",
1422  "No matching function with signature: scale(STRING).");
1423 
1424  AnalyzesOk("select round(cast('1.1' as decimal), cast(1 as int))");
1425  // 1 is a tinyint, so the function is not a perfect match
1426  AnalyzesOk("select round(cast('1.1' as decimal), 1)");
1427 
1428  // No matching signature for complex type.
1429  AnalysisError("select lower(int_struct_col) from functional.allcomplextypes",
1430  "No matching function with signature: lower(STRUCT<f1:INT,f2:INT>).");
1431 
1432  // Special cases for FROM in function call
1433  AnalyzesOk("select extract(year from now())");
1434  AnalysisError("select extract(foo from now())",
1435  "Time unit 'foo' in expression 'EXTRACT(foo FROM now())' is invalid. Expected " +
1436  "one of YEAR, MONTH, DAY, HOUR, MINUTE, SECOND, MILLISECOND, EPOCH.");
1437  AnalysisError("select extract(year from 0)",
1438  "Expression '0' in 'EXTRACT(year FROM 0)' has a return type of TINYINT but a " +
1439  "TIMESTAMP is required.");
1440  AnalysisError("select functional.extract(year from now())",
1441  "Function functional.extract conflicts with the EXTRACT builtin");
1442  AnalysisError("select date_part(year from now())",
1443  "Function DATE_PART does not accept the keyword FROM");
1444  }
1445 
1446  @Test
1448  AnalyzesOk("select concat('a')");
1449  AnalyzesOk("select concat('a', 'b')");
1450  AnalyzesOk("select concat('a', 'b', 'c')");
1451  AnalyzesOk("select concat('a', 'b', 'c', 'd')");
1452  AnalyzesOk("select concat('a', 'b', 'c', 'd', 'e')");
1453  // Test different vararg type signatures for same function name.
1454  AnalyzesOk("select coalesce(true)");
1455  AnalyzesOk("select coalesce(true, false, true)");
1456  AnalyzesOk("select coalesce(5)");
1457  AnalyzesOk("select coalesce(5, 6, 7)");
1458  AnalyzesOk("select coalesce('a')");
1459  AnalyzesOk("select coalesce('a', 'b', 'c')");
1460  // Need at least one argument.
1461  AnalysisError("select concat()",
1462  "No matching function with signature: concat().");
1463  AnalysisError("select coalesce()",
1464  "No matching function with signature: coalesce().");
1465  }
1466 
1472  @Test
1474  // Test fixed arg functions using 'substring' as representative.
1475  AnalyzesOk("select substring(NULL, 1, 2)");
1476  AnalyzesOk("select substring('a', NULL, 2)");
1477  AnalyzesOk("select substring('a', 1, NULL)");
1478  AnalyzesOk("select substring(NULL, NULL, NULL)");
1479  // Cannot cast non-null args to match a signature.
1480  AnalysisError("select substring(1, NULL, NULL)",
1481  "No matching function with signature: " +
1482  "substring(TINYINT, NULL_TYPE, NULL_TYPE).");
1483  AnalysisError("select substring(NULL, 'a', NULL)",
1484  "No matching function with signature: " +
1485  "substring(NULL_TYPE, STRING, NULL_TYPE).");
1486 
1487  // Test vararg functions with 'concat' as representative.
1488  AnalyzesOk("select concat(NULL, 'a', 'b')");
1489  AnalyzesOk("select concat('a', NULL, 'b')");
1490  AnalyzesOk("select concat('a', 'b', NULL)");
1491  AnalyzesOk("select concat(NULL, NULL, NULL)");
1492  // Cannot cast non-null args to match a signature.
1493  AnalysisError("select concat(NULL, 1, 'b')",
1494  "No matching function with signature: concat(NULL_TYPE, TINYINT, STRING).");
1495  AnalysisError("select concat('a', NULL, 1)",
1496  "No matching function with signature: concat(STRING, NULL_TYPE, TINYINT).");
1497  AnalysisError("select concat(1, 'b', NULL)",
1498  "No matching function with signature: concat(TINYINT, STRING, NULL_TYPE).");
1499  }
1500 
1501  @Test
1502  public void TestCaseExpr() throws AnalysisException {
1503  // No case expr.
1504  AnalyzesOk("select case when 20 > 10 then 20 else 15 end");
1505  // No else.
1506  AnalyzesOk("select case when 20 > 10 then 20 end");
1507  // First when condition is a boolean slotref.
1508  AnalyzesOk("select case when bool_col then 20 else 15 end from functional.alltypes");
1509  // Requires casting then exprs.
1510  AnalyzesOk("select case when 20 > 10 then 20 when 1 > 2 then 1.0 else 15 end");
1511  // Requires casting then exprs.
1512  AnalyzesOk("select case when 20 > 10 then 20 when 1 > 2 then 1.0 " +
1513  "when 4 < 5 then 2 else 15 end");
1514  // First when expr doesn't return boolean.
1515  AnalysisError("select case when 20 then 20 when 1 > 2 then timestamp_col " +
1516  "when 4 < 5 then 2 else 15 end from functional.alltypes",
1517  "When expr '20' is not of type boolean and not castable to type boolean.");
1518  AnalysisError("select case when int_array_col then 20 when 1 > 2 then id end " +
1519  "from functional.allcomplextypes",
1520  "When expr 'int_array_col' is not of type boolean and not castable to " +
1521  "type boolean.");
1522  // Then exprs return incompatible types.
1523  AnalysisError("select case when 20 > 10 then 20 when 1 > 2 then timestamp_col " +
1524  "when 4 < 5 then 2 else 15 end from functional.alltypes",
1525  "Incompatible return types 'TINYINT' and 'TIMESTAMP' " +
1526  "of exprs '20' and 'timestamp_col'.");
1527  AnalysisError("select case when 20 > 10 then 20 when 1 > 2 then int_map_col " +
1528  "else 15 end from functional.allcomplextypes",
1529  "Incompatible return types 'TINYINT' and 'MAP<STRING,INT>' of exprs " +
1530  "'20' and 'int_map_col'.");
1531 
1532  // With case expr.
1533  AnalyzesOk("select case int_col when 20 then 30 else 15 end " +
1534  "from functional.alltypes");
1535  // No else.
1536  AnalyzesOk("select case int_col when 20 then 30 end " +
1537  "from functional.alltypes");
1538  // Requires casting case expr.
1539  AnalyzesOk("select case int_col when bigint_col then 30 else 15 end " +
1540  "from functional.alltypes");
1541  // Requires casting when expr.
1542  AnalyzesOk("select case bigint_col when int_col then 30 else 15 end " +
1543  "from functional.alltypes");
1544  // Requires multiple casts.
1545  AnalyzesOk("select case bigint_col when int_col then 30 " +
1546  "when double_col then 1.0 else 15 end from functional.alltypes");
1547  // Type of case expr is incompatible with first when expr.
1548  AnalysisError("select case bigint_col when timestamp_col then 30 " +
1549  "when double_col then 1.0 else 15 end from functional.alltypes",
1550  "Incompatible return types 'BIGINT' and 'TIMESTAMP' " +
1551  "of exprs 'bigint_col' and 'timestamp_col'.");
1552  // Then exprs return incompatible types.
1553  AnalysisError("select case bigint_col when int_col then 30 " +
1554  "when double_col then timestamp_col else 15 end from functional.alltypes",
1555  "Incompatible return types 'TINYINT' and 'TIMESTAMP' " +
1556  "of exprs '30' and 'timestamp_col'.");
1557 
1558  // Test different type classes (all types are tested in BE tests).
1559  AnalyzesOk("select case when true then 1 end");
1560  AnalyzesOk("select case when true then 1.0 end");
1561  AnalyzesOk("select case when true then 'abc' end");
1562  AnalyzesOk("select case when true then cast('2011-01-01 09:01:01' " +
1563  "as timestamp) end");
1564  // Test NULLs.
1565  AnalyzesOk("select case NULL when 1 then 2 else 3 end");
1566  AnalyzesOk("select case 1 when NULL then 2 else 3 end");
1567  AnalyzesOk("select case 1 when 2 then NULL else 3 end");
1568  AnalyzesOk("select case 1 when 2 then 3 else NULL end");
1569  AnalyzesOk("select case NULL when NULL then NULL else NULL end");
1570  }
1571 
1572  @Test
1573  public void TestDecodeExpr() throws AnalysisException {
1574  AnalyzesOk("select decode(1, 1, 1)");
1575  AnalyzesOk("select decode(1, 1, 'foo')");
1576  AnalyzesOk("select decode(1, 2, true, false)");
1577  AnalyzesOk("select decode(null, null, null, null, null, null)");
1579  "CASE WHEN 1 = 2 THEN NULL ELSE 'foo' END",
1580  "decode(1, 2, NULL, 'foo')");
1582  "CASE WHEN 1 = 2 THEN NULL ELSE 4 END",
1583  "decode(1, 2, NULL, 4)");
1585  "CASE WHEN string_col = 'a' THEN 1 WHEN string_col = 'b' THEN 2 ELSE 3 END",
1586  "decode(string_col, 'a', 1, 'b', 2, 3)");
1588  "CASE WHEN int_col IS NULL AND bigint_col IS NULL "
1589  + "OR int_col = bigint_col THEN tinyint_col ELSE smallint_col END",
1590  "decode(int_col, bigint_col, tinyint_col, smallint_col)");
1592  "CASE WHEN int_col = 1 THEN 1 WHEN int_col IS NULL AND bigint_col IS NULL OR "
1593  + "int_col = bigint_col THEN 2 WHEN int_col IS NULL THEN 3 ELSE 4 END",
1594  "decode(int_col, 1, 1, bigint_col, 2, NULL, 3, 4)");
1596  "CASE WHEN NULL IS NULL THEN NULL ELSE NULL END",
1597  "decode(null, null, null, null)");
1598 
1599  AnalysisError("select decode()",
1600  "DECODE in 'decode()' requires at least 3 arguments");
1601  AnalysisError("select decode(1)",
1602  "DECODE in 'decode(1)' requires at least 3 arguments");
1603  AnalysisError("select decode(1, 2)",
1604  "DECODE in 'decode(1, 2)' requires at least 3 arguments");
1605  AnalysisError("select decode(*)", "Cannot pass '*'");
1606  AnalysisError("select decode(distinct 1, 2, 3)", "Cannot pass 'DISTINCT'");
1607  AnalysisError("select decode(true, 'foo', 1)",
1608  "operands of type BOOLEAN and STRING are not comparable: TRUE = 'foo'");
1609  AnalysisError("select functional.decode(1, 1, 1)", "functional.decode() unknown");
1610  }
1611 
1616  void assertCaseEquivalence(String caseSql, String decodeSql)
1617  throws AnalysisException {
1618  String sqlTemplate = "select %s from functional.alltypes";
1619  SelectStmt stmt = (SelectStmt)AnalyzesOk(String.format(sqlTemplate, caseSql));
1620  CaseExpr caseExpr =
1621  (CaseExpr)stmt.getSelectList().getItems().get(0).getExpr();
1622  List<SlotRef> slotRefs = Lists.newArrayList();
1623  caseExpr.collect(Predicates.instanceOf(SlotRef.class), slotRefs);
1624  for (SlotRef slotRef: slotRefs) {
1625  slotRef.getDesc().setIsMaterialized(true);
1626  slotRef.getDesc().setByteOffset(0);
1627  }
1628  TExpr caseThrift = caseExpr.treeToThrift();
1629  stmt = (SelectStmt)AnalyzesOk(String.format(sqlTemplate, decodeSql));
1630  CaseExpr decodeExpr =
1631  (CaseExpr)stmt.getSelectList().getItems().get(0).getExpr();
1632  Assert.assertEquals(caseSql, decodeExpr.toCaseSql());
1633  slotRefs.clear();
1634  decodeExpr.collect(Predicates.instanceOf(SlotRef.class), slotRefs);
1635  for (SlotRef slotRef: slotRefs) {
1636  slotRef.getDesc().setIsMaterialized(true);
1637  slotRef.getDesc().setByteOffset(0);
1638  }
1639  Assert.assertEquals(caseThrift, decodeExpr.treeToThrift());
1640  }
1641 
1642  @Test
1643  public void TestConditionalExprs() {
1644  // Test IF conditional expr.
1645  AnalyzesOk("select if(true, false, false)");
1646  AnalyzesOk("select if(1 != 2, false, false)");
1647  AnalyzesOk("select if(bool_col, false, true) from functional.alltypes");
1648  AnalyzesOk("select if(bool_col, int_col, double_col) from functional.alltypes");
1649  // Test NULLs.
1650  AnalyzesOk("select if(NULL, false, true) from functional.alltypes");
1651  AnalyzesOk("select if(bool_col, NULL, true) from functional.alltypes");
1652  AnalyzesOk("select if(bool_col, false, NULL) from functional.alltypes");
1653  AnalyzesOk("select if(NULL, NULL, NULL) from functional.alltypes");
1654  // No matching signature.
1655  AnalysisError("select if(true, int_struct_col, int_struct_col) " +
1656  "from functional.allcomplextypes",
1657  "No matching function with signature: " +
1658  "if(BOOLEAN, STRUCT<f1:INT,f2:INT>, STRUCT<f1:INT,f2:INT>).");
1659 
1660  // if() only accepts three arguments
1661  AnalysisError("select if(true, false, true, true)",
1662  "No matching function with signature: if(BOOLEAN, BOOLEAN, BOOLEAN, " +
1663  "BOOLEAN).");
1664  AnalysisError("select if(true, false)",
1665  "No matching function with signature: if(BOOLEAN, BOOLEAN).");
1666  AnalysisError("select if(false)",
1667  "No matching function with signature: if(BOOLEAN).");
1668 
1669  // Test IsNull() conditional function.
1670  for (PrimitiveType t: PrimitiveType.values()) {
1671  String literal = typeToLiteralValue_.get(t);
1672  AnalyzesOk(String.format("select isnull(%s, %s)", literal, literal));
1673  AnalyzesOk(String.format("select isnull(%s, NULL)", literal));
1674  AnalyzesOk(String.format("select isnull(NULL, %s)", literal));
1675  }
1676  // IsNull() requires two arguments.
1677  AnalysisError("select isnull(1)",
1678  "No matching function with signature: isnull(TINYINT).");
1679  AnalysisError("select isnull(1, 2, 3)",
1680  "No matching function with signature: isnull(TINYINT, TINYINT, TINYINT).");
1681  // Incompatible types.
1682  AnalysisError("select isnull('a', true)",
1683  "No matching function with signature: isnull(STRING, BOOLEAN).");
1684  // No matching signature.
1685  AnalysisError("select isnull(1, int_array_col) from functional.allcomplextypes",
1686  "No matching function with signature: isnull(TINYINT, ARRAY<INT>).");
1687  }
1688 
1689  @Test
1690  public void TestUdfs() {
1691  HdfsUri dummyUri = new HdfsUri("");
1692 
1693  AnalysisError("select udf()", "default.udf() unknown");
1694  AnalysisError("select functional.udf()", "functional.udf() unknown");
1695  AnalysisError("select udf(1)", "default.udf() unknown");
1696 
1697  // Add a udf default.udf(), default.udf(int), default.udf(string...),
1698  // default.udf(int, string...) and functional.udf(double)
1699  catalog_.addFunction(new ScalarFunction(new FunctionName("default", "udf"),
1700  new ArrayList<Type>(), Type.INT, dummyUri, null, null, null));
1701  catalog_.addFunction(new ScalarFunction(new FunctionName("default", "udf"),
1702  Lists.<Type>newArrayList(Type.INT),
1703  Type.INT, dummyUri, null, null, null));
1704  ScalarFunction varArgsUdf1 = new ScalarFunction(new FunctionName("default", "udf"),
1705  Lists.<Type>newArrayList(Type.STRING),
1706  Type.INT, dummyUri, null, null, null);
1707  varArgsUdf1.setHasVarArgs(true);
1708  catalog_.addFunction(varArgsUdf1);
1709  ScalarFunction varArgsUdf2 = new ScalarFunction(new FunctionName("default", "udf"),
1710  Lists.<Type>newArrayList(Type.INT, Type.STRING),
1711  Type.INT, dummyUri, null, null, null);
1712  varArgsUdf2.setHasVarArgs(true);
1713  catalog_.addFunction(varArgsUdf2);
1714  ScalarFunction udf = new ScalarFunction(new FunctionName("functional", "udf"),
1715  Lists.<Type>newArrayList(Type.DOUBLE),
1716  Type.INT, dummyUri, null, null, null);
1717  catalog_.addFunction(udf);
1718 
1719  AnalyzesOk("select udf()");
1720  AnalyzesOk("select default.udf()");
1721  AnalyzesOk("select udf(1)");
1722  AnalyzesOk("select udf(cast (1.1 as INT))");
1723  AnalyzesOk("select udf(cast(1.1 as TINYINT))");
1724 
1725  // Var args
1726  AnalyzesOk("select udf('a')");
1727  AnalyzesOk("select udf('a', 'b')");
1728  AnalyzesOk("select udf('a', 'b', 'c')");
1729  AnalysisError("select udf(1, 1)",
1730  "No matching function with signature: default.udf(TINYINT, TINYINT).");
1731  AnalyzesOk("select udf(1, 'a')");
1732  AnalyzesOk("select udf(1, 'a', 'b')");
1733  AnalyzesOk("select udf(1, 'a', 'b', 'c')");
1734  AnalysisError("select udf(1, 'a', 2)",
1735  "No matching function with signature: default.udf(TINYINT, STRING, TINYINT).");
1736 
1737  AnalysisError("select udf(1.1)",
1738  "No matching function with signature: default.udf(DECIMAL(2,1))");
1739 
1740  AnalyzesOk("select functional.udf(1.1)");
1741  AnalysisError("select functional.udf('Hello')",
1742  "No matching function with signature: functional.udf(STRING).");
1743 
1744  AnalysisError("select udf(1, 2)",
1745  "No matching function with signature: default.udf(TINYINT, TINYINT).");
1746  catalog_.removeFunction(udf);
1747  }
1748 
1749  @Test
1750  public void TestExprChildLimit() {
1751  // Test IN predicate.
1752  StringBuilder inPredStr = new StringBuilder("select 1 IN(");
1753  for (int i = 0; i < Expr.EXPR_CHILDREN_LIMIT - 1; ++i) {
1754  inPredStr.append(i);
1755  if (i + 1 != Expr.EXPR_CHILDREN_LIMIT - 1) inPredStr.append(", ");
1756  }
1757  AnalyzesOk(inPredStr.toString() + ")");
1758  inPredStr.append(", " + 1234);
1759  AnalysisError(inPredStr.toString() + ")",
1760  String.format("Exceeded the maximum number of child expressions (%s).\n" +
1761  "Expression has %s children", Expr.EXPR_CHILDREN_LIMIT,
1762  Expr.EXPR_CHILDREN_LIMIT + 1));
1763 
1764  // Test CASE expr.
1765  StringBuilder caseExprStr = new StringBuilder("select case");
1766  for (int i = 0; i < Expr.EXPR_CHILDREN_LIMIT/2; ++i) {
1767  caseExprStr.append(" when true then 1");
1768  }
1769  AnalyzesOk(caseExprStr.toString() + " end");
1770  caseExprStr.append(" when true then 1");
1771  AnalysisError(caseExprStr.toString() + " end",
1772  String.format("Exceeded the maximum number of child expressions (%s).\n" +
1773  "Expression has %s children", Expr.EXPR_CHILDREN_LIMIT,
1774  Expr.EXPR_CHILDREN_LIMIT + 2));
1775  }
1776 
1777  @Test
1778  public void TestExprDepthLimit() {
1779  // Compound predicates.
1780  testInfixExprDepthLimit("select true", " and false");
1781  testInfixExprDepthLimit("select true", " or false");
1782 
1783  // Arithmetic expr. Use a bigint value to avoid casts that make reasoning about the
1784  // expr depth more difficult.
1785  testInfixExprDepthLimit("select " + String.valueOf(Long.MAX_VALUE),
1786  " + " + String.valueOf(Long.MAX_VALUE));
1787 
1788  // Function-call expr.
1789  testFuncExprDepthLimit("lower(", "'abc'", ")");
1790 
1791  // UDF.
1792  ScalarFunction udf = new ScalarFunction(new FunctionName("default", "udf"),
1793  Lists.<Type>newArrayList(Type.INT), Type.INT, new HdfsUri(""), null, null, null);
1794  catalog_.addFunction(udf);
1795  try {
1796  testFuncExprDepthLimit("udf(", "1", ")");
1797  } finally {
1798  catalog_.removeFunction(udf);
1799  }
1800 
1801  // Timestamp arithmetic expr.
1802  testFuncExprDepthLimit("date_add(", "now()", ", interval 1 day)");
1803 
1804  // Casts.
1805  testFuncExprDepthLimit("cast(", "1", " as int)");
1806  }
1807 
1808  // Verifies the resulting expr decimal type is exptectedType
1809  private void testDecimalExpr(String expr, Type expectedType) {
1810  SelectStmt selectStmt = (SelectStmt) AnalyzesOk("select " + expr);
1811  Expr root = selectStmt.resultExprs_.get(0);
1812  Type actualType = root.getType();
1813  Assert.assertTrue(
1814  "Expr: " + expr + " Expected: " + expectedType + " Actual: " + actualType,
1815  expectedType.equals(actualType));
1816  }
1817 
1818  @Test
1819  public void TestDecimalArithmetic() {
1820  String decimal_10_0 = "cast(1 as decimal(10,0))";
1821  String decimal_5_5 = "cast(1 as decimal(5, 5))";
1822  String decimal_38_34 = "cast(1 as decimal(38, 34))";
1823 
1824  testDecimalExpr(decimal_10_0, ScalarType.createDecimalType(10, 0));
1825  testDecimalExpr(decimal_5_5, ScalarType.createDecimalType(5, 5));
1826  testDecimalExpr(decimal_38_34, ScalarType.createDecimalType(38, 34));
1827 
1828  // Test arithmetic operations.
1829  testDecimalExpr(decimal_10_0 + " + " + decimal_10_0,
1830  ScalarType.createDecimalType(11, 0));
1831  testDecimalExpr(decimal_10_0 + " - " + decimal_10_0,
1832  ScalarType.createDecimalType(11, 0));
1833  testDecimalExpr(decimal_10_0 + " * " + decimal_10_0,
1834  ScalarType.createDecimalType(20, 0));
1835  testDecimalExpr(decimal_10_0 + " / " + decimal_10_0,
1836  ScalarType.createDecimalType(21, 11));
1837  testDecimalExpr(decimal_10_0 + " % " + decimal_10_0,
1838  ScalarType.createDecimalType(10, 0));
1839 
1840  testDecimalExpr(decimal_10_0 + " + " + decimal_5_5,
1841  ScalarType.createDecimalType(16, 5));
1842  testDecimalExpr(decimal_10_0 + " - " + decimal_5_5,
1843  ScalarType.createDecimalType(16, 5));
1844  testDecimalExpr(decimal_10_0 + " * " + decimal_5_5,
1845  ScalarType.createDecimalType(15, 5));
1846  testDecimalExpr(decimal_10_0 + " / " + decimal_5_5,
1847  ScalarType.createDecimalType(21, 6));
1848  testDecimalExpr(decimal_10_0 + " % " + decimal_5_5,
1850 
1851  testDecimalExpr(decimal_5_5 + " + " + decimal_10_0,
1852  ScalarType.createDecimalType(16, 5));
1853  testDecimalExpr(decimal_5_5 + " - " + decimal_10_0,
1854  ScalarType.createDecimalType(16, 5));
1855  testDecimalExpr(decimal_5_5 + " * " + decimal_10_0,
1856  ScalarType.createDecimalType(15, 5));
1857  testDecimalExpr(decimal_5_5 + " / " + decimal_10_0,
1858  ScalarType.createDecimalType(16, 16));
1859  testDecimalExpr(decimal_5_5 + " % " + decimal_10_0,
1861 
1862  // Test some overflow cases.
1863  testDecimalExpr(decimal_10_0 + " + " + decimal_38_34,
1864  ScalarType.createDecimalType(38, 34));
1865  testDecimalExpr(decimal_10_0 + " - " + decimal_38_34,
1866  ScalarType.createDecimalType(38, 34));
1867  testDecimalExpr(decimal_10_0 + " * " + decimal_38_34,
1868  ScalarType.createDecimalType(38, 34));
1869  testDecimalExpr(decimal_10_0 + " / " + decimal_38_34,
1870  ScalarType.createDecimalType(38, 34));
1871  testDecimalExpr(decimal_10_0 + " % " + decimal_38_34,
1872  ScalarType.createDecimalType(38, 34));
1873 
1874  testDecimalExpr(decimal_38_34 + " + " + decimal_5_5,
1875  ScalarType.createDecimalType(38, 34));
1876  testDecimalExpr(decimal_38_34 + " - " + decimal_5_5,
1877  ScalarType.createDecimalType(38, 34));
1878  testDecimalExpr(decimal_38_34 + " * " + decimal_5_5,
1879  ScalarType.createDecimalType(38, 38));
1880  testDecimalExpr(decimal_38_34 + " / " + decimal_5_5,
1881  ScalarType.createDecimalType(38, 34));
1882  testDecimalExpr(decimal_38_34 + " % " + decimal_5_5,
1883  ScalarType.createDecimalType(34, 34));
1884 
1885  testDecimalExpr(decimal_10_0 + " + " + decimal_10_0 + " + " + decimal_10_0,
1886  ScalarType.createDecimalType(12, 0));
1887  testDecimalExpr(decimal_10_0 + " - " + decimal_10_0 + " * " + decimal_10_0,
1888  ScalarType.createDecimalType(21, 0));
1889  testDecimalExpr(decimal_10_0 + " / " + decimal_10_0 + " / " + decimal_10_0,
1890  ScalarType.createDecimalType(32, 22));
1891  testDecimalExpr(decimal_10_0 + " % " + decimal_10_0 + " + " + decimal_10_0,
1892  ScalarType.createDecimalType(11, 0));
1893 
1894  // Operators between decimal and numeric types should be supported. The int
1895  // should be cast to the appropriate decimal (e.g. tinyint -> decimal(3,0)).
1896  testDecimalExpr(decimal_10_0 + " + cast(1 as tinyint)",
1897  ScalarType.createDecimalType(11, 0));
1898  testDecimalExpr(decimal_10_0 + " + cast(1 as smallint)",
1899  ScalarType.createDecimalType(11, 0));
1900  testDecimalExpr(decimal_10_0 + " + cast(1 as int)",
1901  ScalarType.createDecimalType(11, 0));
1902  testDecimalExpr(decimal_10_0 + " + cast(1 as bigint)",
1903  ScalarType.createDecimalType(20, 0));
1904  testDecimalExpr(decimal_10_0 + " + cast(1 as float)",
1905  ScalarType.createDecimalType(38, 9));
1906  testDecimalExpr(decimal_10_0 + " + cast(1 as double)",
1907  ScalarType.createDecimalType(38, 17));
1908 
1909  testDecimalExpr(decimal_5_5 + " + cast(1 as tinyint)",
1911  testDecimalExpr(decimal_5_5 + " - cast(1 as smallint)",
1912  ScalarType.createDecimalType(11, 5));
1913  testDecimalExpr(decimal_5_5 + " * cast(1 as int)",
1914  ScalarType.createDecimalType(15, 5));
1915  testDecimalExpr(decimal_5_5 + " % cast(1 as bigint)",
1917  testDecimalExpr(decimal_5_5 + " / cast(1 as float)",
1918  ScalarType.createDecimalType(38, 9));
1919  testDecimalExpr(decimal_5_5 + " + cast(1 as double)",
1920  ScalarType.createDecimalType(38, 17));
1921 
1922  AnalyzesOk("select " + decimal_5_5 + " = cast(1 as tinyint)");
1923  AnalyzesOk("select " + decimal_5_5 + " != cast(1 as smallint)");
1924  AnalyzesOk("select " + decimal_5_5 + " > cast(1 as int)");
1925  AnalyzesOk("select " + decimal_5_5 + " < cast(1 as bigint)");
1926  AnalyzesOk("select " + decimal_5_5 + " >= cast(1 as float)");
1927  AnalyzesOk("select " + decimal_5_5 + " <= cast(1 as double)");
1928 
1929  AnalysisError("select " + decimal_5_5 + " + 'abcd'",
1930  "Arithmetic operation requires numeric operands: "
1931  + "CAST(1 AS DECIMAL(5,5)) + 'abcd'");
1932  AnalysisError("select " + decimal_5_5 + " + 'cast(1 as timestamp)'",
1933  "Arithmetic operation requires numeric operands: "
1934  + "CAST(1 AS DECIMAL(5,5)) + 'cast(1 as timestamp)'");
1935 
1936  AnalysisError("select " + decimal_5_5 + " = 'abcd'",
1937  "operands of type DECIMAL(5,5) and STRING are not comparable: " +
1938  "CAST(1 AS DECIMAL(5,5)) = 'abcd'");
1939  AnalysisError("select " + decimal_5_5 + " > 'cast(1 as timestamp)'",
1940  "operands of type DECIMAL(5,5) and STRING are not comparable: "
1941  + "CAST(1 AS DECIMAL(5,5)) > 'cast(1 as timestamp)'");
1942  }
1943 
1944  @Test
1946  AnalyzesOk("select d2 % d5 from functional.decimal_tbl");
1947 
1948  AnalyzesOk("select d1 from functional.decimal_tbl");
1949  AnalyzesOk("select cast(d2 as decimal(1)) from functional.decimal_tbl");
1950  AnalyzesOk("select d3 + d4 from functional.decimal_tbl");
1951  AnalyzesOk("select d5 - d1 from functional.decimal_tbl");
1952  AnalyzesOk("select d2 * d2 from functional.decimal_tbl");
1953  AnalyzesOk("select d4 / d1 from functional.decimal_tbl");
1954  AnalyzesOk("select d2 % d5 from functional.decimal_tbl");
1955 
1956  AnalysisError("select d1 & d1 from functional.decimal_tbl",
1957  "Invalid non-integer argument to operation '&': d1 & d1");
1958  AnalysisError("select d1 | d1 from functional.decimal_tbl",
1959  "Invalid non-integer argument to operation '|': d1 | d1");
1960  AnalysisError("select d1 ^ d1 from functional.decimal_tbl",
1961  "Invalid non-integer argument to operation '^': d1 ^ d1");
1962  AnalysisError("select ~d1 from functional.decimal_tbl",
1963  "Bitwise operations only allowed on integer types: ~d1");
1964 
1965  AnalyzesOk("select d3 = d4 from functional.decimal_tbl");
1966  AnalyzesOk("select d5 != d1 from functional.decimal_tbl");
1967  AnalyzesOk("select d2 > d2 from functional.decimal_tbl");
1968  AnalyzesOk("select d4 >= d1 from functional.decimal_tbl");
1969  AnalyzesOk("select d2 < d5 from functional.decimal_tbl");
1970  AnalyzesOk("select d2 <= d5 from functional.decimal_tbl");
1971  }
1972 
1973  @Test
1974  public void TestDecimalCast() throws AnalysisException {
1975  AnalyzesOk("select cast(1 as decimal)");
1976  AnalyzesOk("select cast(1 as decimal(1))");
1977  AnalyzesOk("select cast(1 as decimal(38))");
1978  AnalyzesOk("select cast(1 as decimal(1, 0))");
1979  AnalyzesOk("select cast(1 as decimal(10, 5))");
1980  AnalyzesOk("select cast(1 as decimal(38, 0))");
1981  AnalyzesOk("select cast(1 as decimal(38, 38))");
1982 
1983  AnalysisError("select cast(1 as decimal(0))",
1984  "Decimal precision must be > 0: 0");
1985  AnalysisError("select cast(1 as decimal(39))",
1986  "Decimal precision must be <= 38: 39");
1987  AnalysisError("select cast(1 as decimal(1, 2))",
1988  "Decimal scale (2) must be <= precision (1)");
1989  }
1990 
1991  @Test
1993  AnalyzesOk("select abs(cast(1 as decimal))");
1994  AnalyzesOk("select abs(cast(-1.1 as decimal(10,3)))");
1995 
1996  AnalyzesOk("select floor(cast(-1.1 as decimal(10,3)))");
1997  AnalyzesOk("select ceil(cast(1.123 as decimal(10,3)))");
1998 
1999  AnalyzesOk("select round(cast(1.123 as decimal(10,3)))");
2000  AnalyzesOk("select round(cast(1.123 as decimal(10,3)), 0)");
2001  AnalyzesOk("select round(cast(1.123 as decimal(10,3)), 2)");
2002  AnalyzesOk("select round(cast(1.123 as decimal(10,3)), 5)");
2003  AnalyzesOk("select round(cast(1.123 as decimal(10,3)), -2)");
2004 
2005  AnalyzesOk("select truncate(cast(1.123 as decimal(10,3)))");
2006  AnalyzesOk("select truncate(cast(1.123 as decimal(10,3)), 0)");
2007  AnalyzesOk("select truncate(cast(1.123 as decimal(10,3)), 2)");
2008  AnalyzesOk("select truncate(cast(1.123 as decimal(10,3)), 5)");
2009  AnalyzesOk("select truncate(cast(1.123 as decimal(10,3)), -1)");
2010 
2011  AnalysisError("select round(cast(1.123 as decimal(10,3)), 5.1)",
2012  "No matching function with signature: round(DECIMAL(10,3), DECIMAL(2,1))");
2013  AnalysisError("select round(cast(1.123 as decimal(30,20)), 40)",
2014  "Cannot round/truncate to scales greater than 38.");
2015  AnalysisError("select truncate(cast(1.123 as decimal(10,3)), 40)",
2016  "Cannot round/truncate to scales greater than 38.");
2017  AnalysisError("select round(cast(1.123 as decimal(10,3)), NULL)",
2018  "round() cannot be called with a NULL second argument.");
2019 
2020  // This has 39 digits and can only be represented as a DOUBLE.
2021  AnalysisError("select precision(999999999999999999999999999999999999999.)",
2022  "No matching function with signature: precision(DOUBLE).");
2023 
2024  AnalysisError("select precision(cast(1 as float))",
2025  "No matching function with signature: precision(FLOAT)");
2026 
2027  AnalysisError("select precision(NULL)",
2028  "Cannot resolve DECIMAL precision and scale from NULL type.");
2029  AnalysisError("select scale(NULL)",
2030  "Cannot resolve DECIMAL precision and scale from NULL type.");
2031 
2032  testDecimalExpr("round(1.23)", ScalarType.createDecimalType(2, 0));
2033  testDecimalExpr("round(1.23, 1)", ScalarType.createDecimalType(3, 1));
2034  testDecimalExpr("round(1.23, 0)", ScalarType.createDecimalType(2, 0));
2035  testDecimalExpr("round(1.23, 3)", ScalarType.createDecimalType(4, 3));
2036  testDecimalExpr("round(1.23, -1)", ScalarType.createDecimalType(2, 0));
2037  testDecimalExpr("round(1.23, -2)", ScalarType.createDecimalType(2, 0));
2038  testDecimalExpr("round(cast(1.23 as decimal(3,2)), -2)",
2040 
2041  testDecimalExpr("ceil(123.45)", ScalarType.createDecimalType(4, 0));
2042  testDecimalExpr("floor(12.345)", ScalarType.createDecimalType(3, 0));
2043 
2044  testDecimalExpr("truncate(1.23)", ScalarType.createDecimalType(1, 0));
2045  testDecimalExpr("truncate(1.23, 1)", ScalarType.createDecimalType(2, 1));
2046  testDecimalExpr("truncate(1.23, 0)", ScalarType.createDecimalType(1, 0));
2047  testDecimalExpr("truncate(1.23, 3)", ScalarType.createDecimalType(4, 3));
2048  testDecimalExpr("truncate(1.23, -1)", ScalarType.createDecimalType(1, 0));
2049  testDecimalExpr("truncate(1.23, -2)", ScalarType.createDecimalType(1, 0));
2050  }
2051 
2056  private void testInfixExprDepthLimit(String prefix, String repeatSuffix) {
2057  StringBuilder exprStr = new StringBuilder(prefix);
2058  for (int i = 0; i < Expr.EXPR_DEPTH_LIMIT - 1; ++i) {
2059  exprStr.append(repeatSuffix);
2060  }
2061  AnalyzesOk(exprStr.toString());
2062  exprStr.append(repeatSuffix);
2063  AnalysisError(exprStr.toString(),
2064  String.format("Exceeded the maximum depth of an expression tree (%s).",
2066 
2067  // Test 10x the safe depth (already at 1x, append 9x).
2068  for (int i = 0; i < Expr.EXPR_DEPTH_LIMIT * 9; ++i) {
2069  exprStr.append(repeatSuffix);
2070  }
2071  AnalysisError(exprStr.toString(),
2072  String.format("Exceeded the maximum depth of an expression tree (%s).",
2074  }
2075 
2080  private void testFuncExprDepthLimit(String openFunc, String baseArg,
2081  String closeFunc) {
2082  AnalyzesOk("select " + getNestedFuncExpr(openFunc, baseArg, closeFunc,
2083  Expr.EXPR_DEPTH_LIMIT - 1));
2084  AnalysisError("select " + getNestedFuncExpr(openFunc, baseArg, closeFunc,
2086  String.format("Exceeded the maximum depth of an expression tree (%s).",
2088  // Test 10x the safe depth.
2089  AnalysisError("select " + getNestedFuncExpr(openFunc, baseArg, closeFunc,
2090  Expr.EXPR_DEPTH_LIMIT * 10),
2091  String.format("Exceeded the maximum depth of an expression tree (%s).",
2093  }
2094 
2099  private String getNestedFuncExpr(String openFunc, String baseArg,
2100  String closeFunc, int numFuncs) {
2101  StringBuilder exprStr = new StringBuilder();
2102  for (int i = 0; i < numFuncs; ++i) {
2103  exprStr.append(openFunc);
2104  }
2105  exprStr.append(baseArg);
2106  for (int i = 0; i < numFuncs; ++i) {
2107  exprStr.append(closeFunc);
2108  }
2109  return exprStr.toString();
2110  }
2111 
2112  @Test
2114  TQueryOptions queryOptions = new TQueryOptions();
2115  queryOptions.setAppx_count_distinct(true);
2116 
2117  // Accumulates count(distinct) for all columns of alltypesTbl or decimalTbl.
2118  List<String> countDistinctFns = Lists.newArrayList();
2119  // Accumulates count(distinct) for all columns of both alltypesTbl and decimalTbl.
2120  List<String> allCountDistinctFns = Lists.newArrayList();
2121 
2122  Table alltypesTbl = catalog_.getTable("functional", "alltypes");
2123  for (Column col: alltypesTbl.getColumns()) {
2124  String colName = col.getName();
2125  // Test a single count(distinct) with some other aggs.
2126  AnalyzesOk(String.format(
2127  "select count(distinct %s), sum(distinct smallint_col), " +
2128  "avg(float_col), min(%s) " +
2129  "from functional.alltypes",
2130  colName, colName), createAnalyzer(queryOptions));
2131  countDistinctFns.add(String.format("count(distinct %s)", colName));
2132  }
2133  // Test a single query with a count(distinct) on all columns of alltypesTbl.
2134  AnalyzesOk(String.format("select %s from functional.alltypes",
2135  Joiner.on(",").join(countDistinctFns)), createAnalyzer(queryOptions));
2136 
2137  allCountDistinctFns.addAll(countDistinctFns);
2138  countDistinctFns.clear();
2139  Table decimalTbl = catalog_.getTable("functional", "decimal_tbl");
2140  for (Column col: decimalTbl.getColumns()) {
2141  String colName = col.getName();
2142  // Test a single count(distinct) with some other aggs.
2143  AnalyzesOk(String.format(
2144  "select count(distinct %s), sum(distinct d1), " +
2145  "avg(d2), min(%s) " +
2146  "from functional.decimal_tbl",
2147  colName, colName), createAnalyzer(queryOptions));
2148  countDistinctFns.add(String.format("count(distinct %s)", colName));
2149  }
2150  // Test a single query with a count(distinct) on all columns of decimalTbl.
2151  AnalyzesOk(String.format("select %s from functional.decimal_tbl",
2152  Joiner.on(",").join(countDistinctFns)), createAnalyzer(queryOptions));
2153 
2154  allCountDistinctFns.addAll(countDistinctFns);
2155 
2156  // Test a single query with a count(distinct) on all columns of both
2157  // alltypes/decimalTbl.
2158  AnalyzesOk(String.format(
2159  "select %s from functional.alltypes cross join functional.decimal_tbl",
2160  Joiner.on(",").join(countDistinctFns)), createAnalyzer(queryOptions));
2161 
2162  // The rewrite does not work for multiple count() arguments.
2163  AnalysisError("select count(distinct int_col, bigint_col), " +
2164  "count(distinct string_col, float_col) from functional.alltypes",
2165  createAnalyzer(queryOptions),
2166  "all DISTINCT aggregate functions need to have the same set of parameters as " +
2167  "count(DISTINCT int_col, bigint_col); deviating function: " +
2168  "count(DISTINCT string_col, float_col)");
2169  // The rewrite only applies to the count() function.
2170  AnalysisError(
2171  "select avg(distinct int_col), sum(distinct float_col) from functional.alltypes",
2172  createAnalyzer(queryOptions),
2173  "all DISTINCT aggregate functions need to have the same set of parameters as " +
2174  "avg(DISTINCT int_col); deviating function: sum(DISTINCT");
2175  }
2176 }
static final ScalarType NULL
Definition: Type.java:45
static final ScalarType BIGINT
Definition: Type.java:50
void testNumericLiteral(String literal, Type expectedType)
static final ScalarType STRING
Definition: Type.java:53
Analyzer createAnalyzer(String defaultDb)
void checkExprType(String query, Type type)
static final ScalarType BOOLEAN
Definition: Type.java:46
static final ScalarType DEFAULT_DECIMAL
Definition: Type.java:58
static ScalarType createVarcharType(int len)
static final ScalarType SMALLINT
Definition: Type.java:48
static ScalarType createCharType(int len)
Definition: ScalarType.java:93
static final ScalarType FLOAT
Definition: Type.java:51
void assertCaseEquivalence(String caseSql, String decodeSql)
static final int EXPR_DEPTH_LIMIT
Definition: Expr.java:56
void testExprCast(String literal, Type expectedType)
PrimitiveType
Definition: types.h:27
static final ScalarType DOUBLE
Definition: Type.java:52
String getNestedFuncExpr(String openFunc, String baseArg, String closeFunc, int numFuncs)
static final ScalarType TINYINT
Definition: Type.java:47
static final ScalarType INT
Definition: Type.java:49
uint64_t Test(T *ht, const ProbeTuple *input, uint64_t num_tuples)
void checkReturnType(String stmt, Type resultType)
void typeCastTest(Type type1, Type type2, boolean op1IsLiteral, ArithmeticExpr.Operator arithmeticOp, BinaryPredicate.Operator cmpOp, Type opType)
static final int EXPR_CHILDREN_LIMIT
Definition: Expr.java:54
void testInfixExprDepthLimit(String prefix, String repeatSuffix)
void testFuncExprDepthLimit(String openFunc, String baseArg, String closeFunc)
void testDecimalExpr(String expr, Type expectedType)
static ArrayList< ScalarType > getSupportedTypes()
Definition: Type.java:109