sqlglot.dialects.redshift
1from __future__ import annotations 2 3import typing as t 4 5from sqlglot import exp, transforms 6from sqlglot.dialects.dialect import ( 7 NormalizationStrategy, 8 concat_to_dpipe_sql, 9 concat_ws_to_dpipe_sql, 10 date_delta_sql, 11 generatedasidentitycolumnconstraint_sql, 12 json_extract_segments, 13 no_tablesample_sql, 14 rename_func, 15) 16from sqlglot.dialects.postgres import Postgres 17from sqlglot.helper import seq_get 18from sqlglot.tokens import TokenType 19 20if t.TYPE_CHECKING: 21 from sqlglot._typing import E 22 23 24def _build_date_delta(expr_type: t.Type[E]) -> t.Callable[[t.List], E]: 25 def _builder(args: t.List) -> E: 26 expr = expr_type(this=seq_get(args, 2), expression=seq_get(args, 1), unit=seq_get(args, 0)) 27 if expr_type is exp.TsOrDsAdd: 28 expr.set("return_type", exp.DataType.build("TIMESTAMP")) 29 30 return expr 31 32 return _builder 33 34 35class Redshift(Postgres): 36 # https://docs.aws.amazon.com/redshift/latest/dg/r_names.html 37 NORMALIZATION_STRATEGY = NormalizationStrategy.CASE_INSENSITIVE 38 39 SUPPORTS_USER_DEFINED_TYPES = False 40 INDEX_OFFSET = 0 41 42 TIME_FORMAT = "'YYYY-MM-DD HH:MI:SS'" 43 TIME_MAPPING = { 44 **Postgres.TIME_MAPPING, 45 "MON": "%b", 46 "HH": "%H", 47 } 48 49 class Parser(Postgres.Parser): 50 FUNCTIONS = { 51 **Postgres.Parser.FUNCTIONS, 52 "ADD_MONTHS": lambda args: exp.TsOrDsAdd( 53 this=seq_get(args, 0), 54 expression=seq_get(args, 1), 55 unit=exp.var("month"), 56 return_type=exp.DataType.build("TIMESTAMP"), 57 ), 58 "DATEADD": _build_date_delta(exp.TsOrDsAdd), 59 "DATE_ADD": _build_date_delta(exp.TsOrDsAdd), 60 "DATEDIFF": _build_date_delta(exp.TsOrDsDiff), 61 "DATE_DIFF": _build_date_delta(exp.TsOrDsDiff), 62 "GETDATE": exp.CurrentTimestamp.from_arg_list, 63 "LISTAGG": exp.GroupConcat.from_arg_list, 64 "STRTOL": exp.FromBase.from_arg_list, 65 } 66 67 NO_PAREN_FUNCTION_PARSERS = { 68 **Postgres.Parser.NO_PAREN_FUNCTION_PARSERS, 69 "APPROXIMATE": lambda self: self._parse_approximate_count(), 70 "SYSDATE": lambda self: self.expression(exp.CurrentTimestamp, transaction=True), 71 } 72 73 SUPPORTS_IMPLICIT_UNNEST = True 74 75 def _parse_table( 76 self, 77 schema: bool = False, 78 joins: bool = False, 79 alias_tokens: t.Optional[t.Collection[TokenType]] = None, 80 parse_bracket: bool = False, 81 is_db_reference: bool = False, 82 parse_partition: bool = False, 83 ) -> t.Optional[exp.Expression]: 84 # Redshift supports UNPIVOTing SUPER objects, e.g. `UNPIVOT foo.obj[0] AS val AT attr` 85 unpivot = self._match(TokenType.UNPIVOT) 86 table = super()._parse_table( 87 schema=schema, 88 joins=joins, 89 alias_tokens=alias_tokens, 90 parse_bracket=parse_bracket, 91 is_db_reference=is_db_reference, 92 ) 93 94 return self.expression(exp.Pivot, this=table, unpivot=True) if unpivot else table 95 96 def _parse_convert( 97 self, strict: bool, safe: t.Optional[bool] = None 98 ) -> t.Optional[exp.Expression]: 99 to = self._parse_types() 100 self._match(TokenType.COMMA) 101 this = self._parse_bitwise() 102 return self.expression(exp.TryCast, this=this, to=to, safe=safe) 103 104 def _parse_approximate_count(self) -> t.Optional[exp.ApproxDistinct]: 105 index = self._index - 1 106 func = self._parse_function() 107 108 if isinstance(func, exp.Count) and isinstance(func.this, exp.Distinct): 109 return self.expression(exp.ApproxDistinct, this=seq_get(func.this.expressions, 0)) 110 self._retreat(index) 111 return None 112 113 class Tokenizer(Postgres.Tokenizer): 114 BIT_STRINGS = [] 115 HEX_STRINGS = [] 116 STRING_ESCAPES = ["\\", "'"] 117 118 KEYWORDS = { 119 **Postgres.Tokenizer.KEYWORDS, 120 "HLLSKETCH": TokenType.HLLSKETCH, 121 "SUPER": TokenType.SUPER, 122 "TOP": TokenType.TOP, 123 "UNLOAD": TokenType.COMMAND, 124 "VARBYTE": TokenType.VARBINARY, 125 } 126 KEYWORDS.pop("VALUES") 127 128 # Redshift allows # to appear as a table identifier prefix 129 SINGLE_TOKENS = Postgres.Tokenizer.SINGLE_TOKENS.copy() 130 SINGLE_TOKENS.pop("#") 131 132 class Generator(Postgres.Generator): 133 LOCKING_READS_SUPPORTED = False 134 QUERY_HINTS = False 135 VALUES_AS_TABLE = False 136 TZ_TO_WITH_TIME_ZONE = True 137 NVL2_SUPPORTED = True 138 LAST_DAY_SUPPORTS_DATE_PART = False 139 CAN_IMPLEMENT_ARRAY_ANY = False 140 MULTI_ARG_DISTINCT = True 141 142 TYPE_MAPPING = { 143 **Postgres.Generator.TYPE_MAPPING, 144 exp.DataType.Type.BINARY: "VARBYTE", 145 exp.DataType.Type.INT: "INTEGER", 146 exp.DataType.Type.TIMETZ: "TIME", 147 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", 148 exp.DataType.Type.VARBINARY: "VARBYTE", 149 } 150 151 TRANSFORMS = { 152 **Postgres.Generator.TRANSFORMS, 153 exp.Concat: concat_to_dpipe_sql, 154 exp.ConcatWs: concat_ws_to_dpipe_sql, 155 exp.ApproxDistinct: lambda self, 156 e: f"APPROXIMATE COUNT(DISTINCT {self.sql(e, 'this')})", 157 exp.CurrentTimestamp: lambda self, e: ( 158 "SYSDATE" if e.args.get("transaction") else "GETDATE()" 159 ), 160 exp.DateAdd: date_delta_sql("DATEADD"), 161 exp.DateDiff: date_delta_sql("DATEDIFF"), 162 exp.DistKeyProperty: lambda self, e: self.func("DISTKEY", e.this), 163 exp.DistStyleProperty: lambda self, e: self.naked_property(e), 164 exp.FromBase: rename_func("STRTOL"), 165 exp.GeneratedAsIdentityColumnConstraint: generatedasidentitycolumnconstraint_sql, 166 exp.JSONExtract: json_extract_segments("JSON_EXTRACT_PATH_TEXT"), 167 exp.JSONExtractScalar: json_extract_segments("JSON_EXTRACT_PATH_TEXT"), 168 exp.GroupConcat: rename_func("LISTAGG"), 169 exp.ParseJSON: rename_func("JSON_PARSE"), 170 exp.Select: transforms.preprocess( 171 [ 172 transforms.eliminate_distinct_on, 173 transforms.eliminate_semi_and_anti_joins, 174 transforms.unqualify_unnest, 175 ] 176 ), 177 exp.SortKeyProperty: lambda self, 178 e: f"{'COMPOUND ' if e.args['compound'] else ''}SORTKEY({self.format_args(*e.this)})", 179 exp.StartsWith: lambda self, 180 e: f"{self.sql(e.this)} LIKE {self.sql(e.expression)} || '%'", 181 exp.TableSample: no_tablesample_sql, 182 exp.TsOrDsAdd: date_delta_sql("DATEADD"), 183 exp.TsOrDsDiff: date_delta_sql("DATEDIFF"), 184 exp.UnixToTime: lambda self, 185 e: f"(TIMESTAMP 'epoch' + {self.sql(e.this)} * INTERVAL '1 SECOND')", 186 } 187 188 # Postgres maps exp.Pivot to no_pivot_sql, but Redshift support pivots 189 TRANSFORMS.pop(exp.Pivot) 190 191 # Redshift uses the POW | POWER (expr1, expr2) syntax instead of expr1 ^ expr2 (postgres) 192 TRANSFORMS.pop(exp.Pow) 193 194 # Redshift supports ANY_VALUE(..) 195 TRANSFORMS.pop(exp.AnyValue) 196 197 # Redshift supports LAST_DAY(..) 198 TRANSFORMS.pop(exp.LastDay) 199 200 RESERVED_KEYWORDS = { 201 "aes128", 202 "aes256", 203 "all", 204 "allowoverwrite", 205 "analyse", 206 "analyze", 207 "and", 208 "any", 209 "array", 210 "as", 211 "asc", 212 "authorization", 213 "az64", 214 "backup", 215 "between", 216 "binary", 217 "blanksasnull", 218 "both", 219 "bytedict", 220 "bzip2", 221 "case", 222 "cast", 223 "check", 224 "collate", 225 "column", 226 "constraint", 227 "create", 228 "credentials", 229 "cross", 230 "current_date", 231 "current_time", 232 "current_timestamp", 233 "current_user", 234 "current_user_id", 235 "default", 236 "deferrable", 237 "deflate", 238 "defrag", 239 "delta", 240 "delta32k", 241 "desc", 242 "disable", 243 "distinct", 244 "do", 245 "else", 246 "emptyasnull", 247 "enable", 248 "encode", 249 "encrypt ", 250 "encryption", 251 "end", 252 "except", 253 "explicit", 254 "false", 255 "for", 256 "foreign", 257 "freeze", 258 "from", 259 "full", 260 "globaldict256", 261 "globaldict64k", 262 "grant", 263 "group", 264 "gzip", 265 "having", 266 "identity", 267 "ignore", 268 "ilike", 269 "in", 270 "initially", 271 "inner", 272 "intersect", 273 "interval", 274 "into", 275 "is", 276 "isnull", 277 "join", 278 "leading", 279 "left", 280 "like", 281 "limit", 282 "localtime", 283 "localtimestamp", 284 "lun", 285 "luns", 286 "lzo", 287 "lzop", 288 "minus", 289 "mostly16", 290 "mostly32", 291 "mostly8", 292 "natural", 293 "new", 294 "not", 295 "notnull", 296 "null", 297 "nulls", 298 "off", 299 "offline", 300 "offset", 301 "oid", 302 "old", 303 "on", 304 "only", 305 "open", 306 "or", 307 "order", 308 "outer", 309 "overlaps", 310 "parallel", 311 "partition", 312 "percent", 313 "permissions", 314 "pivot", 315 "placing", 316 "primary", 317 "raw", 318 "readratio", 319 "recover", 320 "references", 321 "rejectlog", 322 "resort", 323 "respect", 324 "restore", 325 "right", 326 "select", 327 "session_user", 328 "similar", 329 "snapshot", 330 "some", 331 "sysdate", 332 "system", 333 "table", 334 "tag", 335 "tdes", 336 "text255", 337 "text32k", 338 "then", 339 "timestamp", 340 "to", 341 "top", 342 "trailing", 343 "true", 344 "truncatecolumns", 345 "type", 346 "union", 347 "unique", 348 "unnest", 349 "unpivot", 350 "user", 351 "using", 352 "verbose", 353 "wallet", 354 "when", 355 "where", 356 "with", 357 "without", 358 } 359 360 def unnest_sql(self, expression: exp.Unnest) -> str: 361 args = expression.expressions 362 num_args = len(args) 363 364 if num_args > 1: 365 self.unsupported(f"Unsupported number of arguments in UNNEST: {num_args}") 366 return "" 367 368 arg = self.sql(seq_get(args, 0)) 369 alias = self.expressions(expression.args.get("alias"), key="columns", flat=True) 370 return f"{arg} AS {alias}" if alias else arg 371 372 def with_properties(self, properties: exp.Properties) -> str: 373 """Redshift doesn't have `WITH` as part of their with_properties so we remove it""" 374 return self.properties(properties, prefix=" ", suffix="") 375 376 def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str: 377 if expression.is_type(exp.DataType.Type.JSON): 378 # Redshift doesn't support a JSON type, so casting to it is treated as a noop 379 return self.sql(expression, "this") 380 381 return super().cast_sql(expression, safe_prefix=safe_prefix) 382 383 def datatype_sql(self, expression: exp.DataType) -> str: 384 """ 385 Redshift converts the `TEXT` data type to `VARCHAR(255)` by default when people more generally mean 386 VARCHAR of max length which is `VARCHAR(max)` in Redshift. Therefore if we get a `TEXT` data type 387 without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert 388 `TEXT` to `VARCHAR`. 389 """ 390 if expression.is_type("text"): 391 expression.set("this", exp.DataType.Type.VARCHAR) 392 precision = expression.args.get("expressions") 393 394 if not precision: 395 expression.append("expressions", exp.var("MAX")) 396 397 return super().datatype_sql(expression)
36class Redshift(Postgres): 37 # https://docs.aws.amazon.com/redshift/latest/dg/r_names.html 38 NORMALIZATION_STRATEGY = NormalizationStrategy.CASE_INSENSITIVE 39 40 SUPPORTS_USER_DEFINED_TYPES = False 41 INDEX_OFFSET = 0 42 43 TIME_FORMAT = "'YYYY-MM-DD HH:MI:SS'" 44 TIME_MAPPING = { 45 **Postgres.TIME_MAPPING, 46 "MON": "%b", 47 "HH": "%H", 48 } 49 50 class Parser(Postgres.Parser): 51 FUNCTIONS = { 52 **Postgres.Parser.FUNCTIONS, 53 "ADD_MONTHS": lambda args: exp.TsOrDsAdd( 54 this=seq_get(args, 0), 55 expression=seq_get(args, 1), 56 unit=exp.var("month"), 57 return_type=exp.DataType.build("TIMESTAMP"), 58 ), 59 "DATEADD": _build_date_delta(exp.TsOrDsAdd), 60 "DATE_ADD": _build_date_delta(exp.TsOrDsAdd), 61 "DATEDIFF": _build_date_delta(exp.TsOrDsDiff), 62 "DATE_DIFF": _build_date_delta(exp.TsOrDsDiff), 63 "GETDATE": exp.CurrentTimestamp.from_arg_list, 64 "LISTAGG": exp.GroupConcat.from_arg_list, 65 "STRTOL": exp.FromBase.from_arg_list, 66 } 67 68 NO_PAREN_FUNCTION_PARSERS = { 69 **Postgres.Parser.NO_PAREN_FUNCTION_PARSERS, 70 "APPROXIMATE": lambda self: self._parse_approximate_count(), 71 "SYSDATE": lambda self: self.expression(exp.CurrentTimestamp, transaction=True), 72 } 73 74 SUPPORTS_IMPLICIT_UNNEST = True 75 76 def _parse_table( 77 self, 78 schema: bool = False, 79 joins: bool = False, 80 alias_tokens: t.Optional[t.Collection[TokenType]] = None, 81 parse_bracket: bool = False, 82 is_db_reference: bool = False, 83 parse_partition: bool = False, 84 ) -> t.Optional[exp.Expression]: 85 # Redshift supports UNPIVOTing SUPER objects, e.g. `UNPIVOT foo.obj[0] AS val AT attr` 86 unpivot = self._match(TokenType.UNPIVOT) 87 table = super()._parse_table( 88 schema=schema, 89 joins=joins, 90 alias_tokens=alias_tokens, 91 parse_bracket=parse_bracket, 92 is_db_reference=is_db_reference, 93 ) 94 95 return self.expression(exp.Pivot, this=table, unpivot=True) if unpivot else table 96 97 def _parse_convert( 98 self, strict: bool, safe: t.Optional[bool] = None 99 ) -> t.Optional[exp.Expression]: 100 to = self._parse_types() 101 self._match(TokenType.COMMA) 102 this = self._parse_bitwise() 103 return self.expression(exp.TryCast, this=this, to=to, safe=safe) 104 105 def _parse_approximate_count(self) -> t.Optional[exp.ApproxDistinct]: 106 index = self._index - 1 107 func = self._parse_function() 108 109 if isinstance(func, exp.Count) and isinstance(func.this, exp.Distinct): 110 return self.expression(exp.ApproxDistinct, this=seq_get(func.this.expressions, 0)) 111 self._retreat(index) 112 return None 113 114 class Tokenizer(Postgres.Tokenizer): 115 BIT_STRINGS = [] 116 HEX_STRINGS = [] 117 STRING_ESCAPES = ["\\", "'"] 118 119 KEYWORDS = { 120 **Postgres.Tokenizer.KEYWORDS, 121 "HLLSKETCH": TokenType.HLLSKETCH, 122 "SUPER": TokenType.SUPER, 123 "TOP": TokenType.TOP, 124 "UNLOAD": TokenType.COMMAND, 125 "VARBYTE": TokenType.VARBINARY, 126 } 127 KEYWORDS.pop("VALUES") 128 129 # Redshift allows # to appear as a table identifier prefix 130 SINGLE_TOKENS = Postgres.Tokenizer.SINGLE_TOKENS.copy() 131 SINGLE_TOKENS.pop("#") 132 133 class Generator(Postgres.Generator): 134 LOCKING_READS_SUPPORTED = False 135 QUERY_HINTS = False 136 VALUES_AS_TABLE = False 137 TZ_TO_WITH_TIME_ZONE = True 138 NVL2_SUPPORTED = True 139 LAST_DAY_SUPPORTS_DATE_PART = False 140 CAN_IMPLEMENT_ARRAY_ANY = False 141 MULTI_ARG_DISTINCT = True 142 143 TYPE_MAPPING = { 144 **Postgres.Generator.TYPE_MAPPING, 145 exp.DataType.Type.BINARY: "VARBYTE", 146 exp.DataType.Type.INT: "INTEGER", 147 exp.DataType.Type.TIMETZ: "TIME", 148 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", 149 exp.DataType.Type.VARBINARY: "VARBYTE", 150 } 151 152 TRANSFORMS = { 153 **Postgres.Generator.TRANSFORMS, 154 exp.Concat: concat_to_dpipe_sql, 155 exp.ConcatWs: concat_ws_to_dpipe_sql, 156 exp.ApproxDistinct: lambda self, 157 e: f"APPROXIMATE COUNT(DISTINCT {self.sql(e, 'this')})", 158 exp.CurrentTimestamp: lambda self, e: ( 159 "SYSDATE" if e.args.get("transaction") else "GETDATE()" 160 ), 161 exp.DateAdd: date_delta_sql("DATEADD"), 162 exp.DateDiff: date_delta_sql("DATEDIFF"), 163 exp.DistKeyProperty: lambda self, e: self.func("DISTKEY", e.this), 164 exp.DistStyleProperty: lambda self, e: self.naked_property(e), 165 exp.FromBase: rename_func("STRTOL"), 166 exp.GeneratedAsIdentityColumnConstraint: generatedasidentitycolumnconstraint_sql, 167 exp.JSONExtract: json_extract_segments("JSON_EXTRACT_PATH_TEXT"), 168 exp.JSONExtractScalar: json_extract_segments("JSON_EXTRACT_PATH_TEXT"), 169 exp.GroupConcat: rename_func("LISTAGG"), 170 exp.ParseJSON: rename_func("JSON_PARSE"), 171 exp.Select: transforms.preprocess( 172 [ 173 transforms.eliminate_distinct_on, 174 transforms.eliminate_semi_and_anti_joins, 175 transforms.unqualify_unnest, 176 ] 177 ), 178 exp.SortKeyProperty: lambda self, 179 e: f"{'COMPOUND ' if e.args['compound'] else ''}SORTKEY({self.format_args(*e.this)})", 180 exp.StartsWith: lambda self, 181 e: f"{self.sql(e.this)} LIKE {self.sql(e.expression)} || '%'", 182 exp.TableSample: no_tablesample_sql, 183 exp.TsOrDsAdd: date_delta_sql("DATEADD"), 184 exp.TsOrDsDiff: date_delta_sql("DATEDIFF"), 185 exp.UnixToTime: lambda self, 186 e: f"(TIMESTAMP 'epoch' + {self.sql(e.this)} * INTERVAL '1 SECOND')", 187 } 188 189 # Postgres maps exp.Pivot to no_pivot_sql, but Redshift support pivots 190 TRANSFORMS.pop(exp.Pivot) 191 192 # Redshift uses the POW | POWER (expr1, expr2) syntax instead of expr1 ^ expr2 (postgres) 193 TRANSFORMS.pop(exp.Pow) 194 195 # Redshift supports ANY_VALUE(..) 196 TRANSFORMS.pop(exp.AnyValue) 197 198 # Redshift supports LAST_DAY(..) 199 TRANSFORMS.pop(exp.LastDay) 200 201 RESERVED_KEYWORDS = { 202 "aes128", 203 "aes256", 204 "all", 205 "allowoverwrite", 206 "analyse", 207 "analyze", 208 "and", 209 "any", 210 "array", 211 "as", 212 "asc", 213 "authorization", 214 "az64", 215 "backup", 216 "between", 217 "binary", 218 "blanksasnull", 219 "both", 220 "bytedict", 221 "bzip2", 222 "case", 223 "cast", 224 "check", 225 "collate", 226 "column", 227 "constraint", 228 "create", 229 "credentials", 230 "cross", 231 "current_date", 232 "current_time", 233 "current_timestamp", 234 "current_user", 235 "current_user_id", 236 "default", 237 "deferrable", 238 "deflate", 239 "defrag", 240 "delta", 241 "delta32k", 242 "desc", 243 "disable", 244 "distinct", 245 "do", 246 "else", 247 "emptyasnull", 248 "enable", 249 "encode", 250 "encrypt ", 251 "encryption", 252 "end", 253 "except", 254 "explicit", 255 "false", 256 "for", 257 "foreign", 258 "freeze", 259 "from", 260 "full", 261 "globaldict256", 262 "globaldict64k", 263 "grant", 264 "group", 265 "gzip", 266 "having", 267 "identity", 268 "ignore", 269 "ilike", 270 "in", 271 "initially", 272 "inner", 273 "intersect", 274 "interval", 275 "into", 276 "is", 277 "isnull", 278 "join", 279 "leading", 280 "left", 281 "like", 282 "limit", 283 "localtime", 284 "localtimestamp", 285 "lun", 286 "luns", 287 "lzo", 288 "lzop", 289 "minus", 290 "mostly16", 291 "mostly32", 292 "mostly8", 293 "natural", 294 "new", 295 "not", 296 "notnull", 297 "null", 298 "nulls", 299 "off", 300 "offline", 301 "offset", 302 "oid", 303 "old", 304 "on", 305 "only", 306 "open", 307 "or", 308 "order", 309 "outer", 310 "overlaps", 311 "parallel", 312 "partition", 313 "percent", 314 "permissions", 315 "pivot", 316 "placing", 317 "primary", 318 "raw", 319 "readratio", 320 "recover", 321 "references", 322 "rejectlog", 323 "resort", 324 "respect", 325 "restore", 326 "right", 327 "select", 328 "session_user", 329 "similar", 330 "snapshot", 331 "some", 332 "sysdate", 333 "system", 334 "table", 335 "tag", 336 "tdes", 337 "text255", 338 "text32k", 339 "then", 340 "timestamp", 341 "to", 342 "top", 343 "trailing", 344 "true", 345 "truncatecolumns", 346 "type", 347 "union", 348 "unique", 349 "unnest", 350 "unpivot", 351 "user", 352 "using", 353 "verbose", 354 "wallet", 355 "when", 356 "where", 357 "with", 358 "without", 359 } 360 361 def unnest_sql(self, expression: exp.Unnest) -> str: 362 args = expression.expressions 363 num_args = len(args) 364 365 if num_args > 1: 366 self.unsupported(f"Unsupported number of arguments in UNNEST: {num_args}") 367 return "" 368 369 arg = self.sql(seq_get(args, 0)) 370 alias = self.expressions(expression.args.get("alias"), key="columns", flat=True) 371 return f"{arg} AS {alias}" if alias else arg 372 373 def with_properties(self, properties: exp.Properties) -> str: 374 """Redshift doesn't have `WITH` as part of their with_properties so we remove it""" 375 return self.properties(properties, prefix=" ", suffix="") 376 377 def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str: 378 if expression.is_type(exp.DataType.Type.JSON): 379 # Redshift doesn't support a JSON type, so casting to it is treated as a noop 380 return self.sql(expression, "this") 381 382 return super().cast_sql(expression, safe_prefix=safe_prefix) 383 384 def datatype_sql(self, expression: exp.DataType) -> str: 385 """ 386 Redshift converts the `TEXT` data type to `VARCHAR(255)` by default when people more generally mean 387 VARCHAR of max length which is `VARCHAR(max)` in Redshift. Therefore if we get a `TEXT` data type 388 without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert 389 `TEXT` to `VARCHAR`. 390 """ 391 if expression.is_type("text"): 392 expression.set("this", exp.DataType.Type.VARCHAR) 393 precision = expression.args.get("expressions") 394 395 if not precision: 396 expression.append("expressions", exp.var("MAX")) 397 398 return super().datatype_sql(expression)
Specifies the strategy according to which identifiers should be normalized.
Associates this dialect's time formats with their equivalent Python strftime
formats.
Mapping of an escaped sequence (\n
) to its unescaped version (
).
Inherited Members
- sqlglot.dialects.dialect.Dialect
- Dialect
- WEEK_OFFSET
- UNNEST_COLUMN_ONLY
- ALIAS_POST_TABLESAMPLE
- TABLESAMPLE_SIZE_IS_PERCENT
- IDENTIFIERS_CAN_START_WITH_DIGIT
- DPIPE_IS_STRING_CONCAT
- STRICT_STRING_CONCAT
- SUPPORTS_SEMI_ANTI_JOIN
- NORMALIZE_FUNCTIONS
- LOG_BASE_FIRST
- SAFE_DIVISION
- DATE_FORMAT
- DATEINT_FORMAT
- FORMAT_MAPPING
- PSEUDOCOLUMNS
- PREFER_CTE_ALIAS_COLUMN
- get_or_raise
- format_time
- normalize_identifier
- case_sensitive
- can_identify
- quote_identifier
- to_json_path
- parse
- parse_into
- generate
- transpile
- tokenize
- tokenizer
- parser
- generator
50 class Parser(Postgres.Parser): 51 FUNCTIONS = { 52 **Postgres.Parser.FUNCTIONS, 53 "ADD_MONTHS": lambda args: exp.TsOrDsAdd( 54 this=seq_get(args, 0), 55 expression=seq_get(args, 1), 56 unit=exp.var("month"), 57 return_type=exp.DataType.build("TIMESTAMP"), 58 ), 59 "DATEADD": _build_date_delta(exp.TsOrDsAdd), 60 "DATE_ADD": _build_date_delta(exp.TsOrDsAdd), 61 "DATEDIFF": _build_date_delta(exp.TsOrDsDiff), 62 "DATE_DIFF": _build_date_delta(exp.TsOrDsDiff), 63 "GETDATE": exp.CurrentTimestamp.from_arg_list, 64 "LISTAGG": exp.GroupConcat.from_arg_list, 65 "STRTOL": exp.FromBase.from_arg_list, 66 } 67 68 NO_PAREN_FUNCTION_PARSERS = { 69 **Postgres.Parser.NO_PAREN_FUNCTION_PARSERS, 70 "APPROXIMATE": lambda self: self._parse_approximate_count(), 71 "SYSDATE": lambda self: self.expression(exp.CurrentTimestamp, transaction=True), 72 } 73 74 SUPPORTS_IMPLICIT_UNNEST = True 75 76 def _parse_table( 77 self, 78 schema: bool = False, 79 joins: bool = False, 80 alias_tokens: t.Optional[t.Collection[TokenType]] = None, 81 parse_bracket: bool = False, 82 is_db_reference: bool = False, 83 parse_partition: bool = False, 84 ) -> t.Optional[exp.Expression]: 85 # Redshift supports UNPIVOTing SUPER objects, e.g. `UNPIVOT foo.obj[0] AS val AT attr` 86 unpivot = self._match(TokenType.UNPIVOT) 87 table = super()._parse_table( 88 schema=schema, 89 joins=joins, 90 alias_tokens=alias_tokens, 91 parse_bracket=parse_bracket, 92 is_db_reference=is_db_reference, 93 ) 94 95 return self.expression(exp.Pivot, this=table, unpivot=True) if unpivot else table 96 97 def _parse_convert( 98 self, strict: bool, safe: t.Optional[bool] = None 99 ) -> t.Optional[exp.Expression]: 100 to = self._parse_types() 101 self._match(TokenType.COMMA) 102 this = self._parse_bitwise() 103 return self.expression(exp.TryCast, this=this, to=to, safe=safe) 104 105 def _parse_approximate_count(self) -> t.Optional[exp.ApproxDistinct]: 106 index = self._index - 1 107 func = self._parse_function() 108 109 if isinstance(func, exp.Count) and isinstance(func.this, exp.Distinct): 110 return self.expression(exp.ApproxDistinct, this=seq_get(func.this.expressions, 0)) 111 self._retreat(index) 112 return None
Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.
Arguments:
- error_level: The desired error level. Default: ErrorLevel.IMMEDIATE
- error_message_context: The amount of context to capture from a query string when displaying the error message (in number of characters). Default: 100
- max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
Inherited Members
- sqlglot.parser.Parser
- Parser
- NO_PAREN_FUNCTIONS
- STRUCT_TYPE_TOKENS
- NESTED_TYPE_TOKENS
- ENUM_TYPE_TOKENS
- AGGREGATE_TYPE_TOKENS
- TYPE_TOKENS
- SIGNED_TO_UNSIGNED_TYPE_TOKEN
- SUBQUERY_PREDICATES
- RESERVED_TOKENS
- DB_CREATABLES
- CREATABLES
- ID_VAR_TOKENS
- INTERVAL_VARS
- TABLE_ALIAS_TOKENS
- ALIAS_TOKENS
- COMMENT_TABLE_ALIAS_TOKENS
- UPDATE_ALIAS_TOKENS
- TRIM_TYPES
- FUNC_TOKENS
- CONJUNCTION
- EQUALITY
- COMPARISON
- TERM
- FACTOR
- TIMES
- TIMESTAMPS
- SET_OPERATIONS
- JOIN_METHODS
- JOIN_SIDES
- JOIN_KINDS
- JOIN_HINTS
- LAMBDAS
- EXPRESSION_PARSERS
- UNARY_PARSERS
- STRING_PARSERS
- NUMERIC_PARSERS
- PRIMARY_PARSERS
- PLACEHOLDER_PARSERS
- CONSTRAINT_PARSERS
- ALTER_PARSERS
- SCHEMA_UNNAMED_CONSTRAINTS
- INVALID_FUNC_NAME_TOKENS
- FUNCTIONS_WITH_ALIASED_ARGS
- KEY_VALUE_DEFINITIONS
- QUERY_MODIFIER_PARSERS
- SET_PARSERS
- SHOW_PARSERS
- TYPE_LITERAL_PARSERS
- DDL_SELECT_TOKENS
- PRE_VOLATILE_TOKENS
- TRANSACTION_KIND
- TRANSACTION_CHARACTERISTICS
- CONFLICT_ACTIONS
- CREATE_SEQUENCE
- ISOLATED_LOADING_OPTIONS
- USABLES
- CAST_ACTIONS
- INSERT_ALTERNATIVES
- CLONE_KEYWORDS
- HISTORICAL_DATA_KIND
- OPCLASS_FOLLOW_KEYWORDS
- OPTYPE_FOLLOW_TOKENS
- TABLE_INDEX_HINT_TOKENS
- VIEW_ATTRIBUTES
- WINDOW_ALIAS_TOKENS
- WINDOW_BEFORE_PAREN_TOKENS
- WINDOW_SIDES
- JSON_KEY_VALUE_SEPARATOR_TOKENS
- FETCH_TOKENS
- ADD_CONSTRAINT_TOKENS
- DISTINCT_TOKENS
- NULL_TOKENS
- UNNEST_OFFSET_ALIAS_TOKENS
- SELECT_START_TOKENS
- STRICT_CAST
- PREFIXED_PIVOT_COLUMNS
- IDENTIFY_PIVOT_STRINGS
- LOG_DEFAULTS_TO_LN
- ALTER_TABLE_ADD_REQUIRED_FOR_EACH_COLUMN
- TABLESAMPLE_CSV
- SET_REQUIRES_ASSIGNMENT_DELIMITER
- TRIM_PATTERN_FIRST
- STRING_ALIASES
- MODIFIERS_ATTACHED_TO_UNION
- UNION_MODIFIERS
- NO_PAREN_IF_COMMANDS
- VALUES_FOLLOWED_BY_PAREN
- INTERVAL_SPANS
- SUPPORTS_PARTITION_SELECTION
- error_level
- error_message_context
- max_errors
- dialect
- reset
- parse
- parse_into
- check_errors
- raise_error
- expression
- validate_expression
- errors
- sql
114 class Tokenizer(Postgres.Tokenizer): 115 BIT_STRINGS = [] 116 HEX_STRINGS = [] 117 STRING_ESCAPES = ["\\", "'"] 118 119 KEYWORDS = { 120 **Postgres.Tokenizer.KEYWORDS, 121 "HLLSKETCH": TokenType.HLLSKETCH, 122 "SUPER": TokenType.SUPER, 123 "TOP": TokenType.TOP, 124 "UNLOAD": TokenType.COMMAND, 125 "VARBYTE": TokenType.VARBINARY, 126 } 127 KEYWORDS.pop("VALUES") 128 129 # Redshift allows # to appear as a table identifier prefix 130 SINGLE_TOKENS = Postgres.Tokenizer.SINGLE_TOKENS.copy() 131 SINGLE_TOKENS.pop("#")
Inherited Members
133 class Generator(Postgres.Generator): 134 LOCKING_READS_SUPPORTED = False 135 QUERY_HINTS = False 136 VALUES_AS_TABLE = False 137 TZ_TO_WITH_TIME_ZONE = True 138 NVL2_SUPPORTED = True 139 LAST_DAY_SUPPORTS_DATE_PART = False 140 CAN_IMPLEMENT_ARRAY_ANY = False 141 MULTI_ARG_DISTINCT = True 142 143 TYPE_MAPPING = { 144 **Postgres.Generator.TYPE_MAPPING, 145 exp.DataType.Type.BINARY: "VARBYTE", 146 exp.DataType.Type.INT: "INTEGER", 147 exp.DataType.Type.TIMETZ: "TIME", 148 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", 149 exp.DataType.Type.VARBINARY: "VARBYTE", 150 } 151 152 TRANSFORMS = { 153 **Postgres.Generator.TRANSFORMS, 154 exp.Concat: concat_to_dpipe_sql, 155 exp.ConcatWs: concat_ws_to_dpipe_sql, 156 exp.ApproxDistinct: lambda self, 157 e: f"APPROXIMATE COUNT(DISTINCT {self.sql(e, 'this')})", 158 exp.CurrentTimestamp: lambda self, e: ( 159 "SYSDATE" if e.args.get("transaction") else "GETDATE()" 160 ), 161 exp.DateAdd: date_delta_sql("DATEADD"), 162 exp.DateDiff: date_delta_sql("DATEDIFF"), 163 exp.DistKeyProperty: lambda self, e: self.func("DISTKEY", e.this), 164 exp.DistStyleProperty: lambda self, e: self.naked_property(e), 165 exp.FromBase: rename_func("STRTOL"), 166 exp.GeneratedAsIdentityColumnConstraint: generatedasidentitycolumnconstraint_sql, 167 exp.JSONExtract: json_extract_segments("JSON_EXTRACT_PATH_TEXT"), 168 exp.JSONExtractScalar: json_extract_segments("JSON_EXTRACT_PATH_TEXT"), 169 exp.GroupConcat: rename_func("LISTAGG"), 170 exp.ParseJSON: rename_func("JSON_PARSE"), 171 exp.Select: transforms.preprocess( 172 [ 173 transforms.eliminate_distinct_on, 174 transforms.eliminate_semi_and_anti_joins, 175 transforms.unqualify_unnest, 176 ] 177 ), 178 exp.SortKeyProperty: lambda self, 179 e: f"{'COMPOUND ' if e.args['compound'] else ''}SORTKEY({self.format_args(*e.this)})", 180 exp.StartsWith: lambda self, 181 e: f"{self.sql(e.this)} LIKE {self.sql(e.expression)} || '%'", 182 exp.TableSample: no_tablesample_sql, 183 exp.TsOrDsAdd: date_delta_sql("DATEADD"), 184 exp.TsOrDsDiff: date_delta_sql("DATEDIFF"), 185 exp.UnixToTime: lambda self, 186 e: f"(TIMESTAMP 'epoch' + {self.sql(e.this)} * INTERVAL '1 SECOND')", 187 } 188 189 # Postgres maps exp.Pivot to no_pivot_sql, but Redshift support pivots 190 TRANSFORMS.pop(exp.Pivot) 191 192 # Redshift uses the POW | POWER (expr1, expr2) syntax instead of expr1 ^ expr2 (postgres) 193 TRANSFORMS.pop(exp.Pow) 194 195 # Redshift supports ANY_VALUE(..) 196 TRANSFORMS.pop(exp.AnyValue) 197 198 # Redshift supports LAST_DAY(..) 199 TRANSFORMS.pop(exp.LastDay) 200 201 RESERVED_KEYWORDS = { 202 "aes128", 203 "aes256", 204 "all", 205 "allowoverwrite", 206 "analyse", 207 "analyze", 208 "and", 209 "any", 210 "array", 211 "as", 212 "asc", 213 "authorization", 214 "az64", 215 "backup", 216 "between", 217 "binary", 218 "blanksasnull", 219 "both", 220 "bytedict", 221 "bzip2", 222 "case", 223 "cast", 224 "check", 225 "collate", 226 "column", 227 "constraint", 228 "create", 229 "credentials", 230 "cross", 231 "current_date", 232 "current_time", 233 "current_timestamp", 234 "current_user", 235 "current_user_id", 236 "default", 237 "deferrable", 238 "deflate", 239 "defrag", 240 "delta", 241 "delta32k", 242 "desc", 243 "disable", 244 "distinct", 245 "do", 246 "else", 247 "emptyasnull", 248 "enable", 249 "encode", 250 "encrypt ", 251 "encryption", 252 "end", 253 "except", 254 "explicit", 255 "false", 256 "for", 257 "foreign", 258 "freeze", 259 "from", 260 "full", 261 "globaldict256", 262 "globaldict64k", 263 "grant", 264 "group", 265 "gzip", 266 "having", 267 "identity", 268 "ignore", 269 "ilike", 270 "in", 271 "initially", 272 "inner", 273 "intersect", 274 "interval", 275 "into", 276 "is", 277 "isnull", 278 "join", 279 "leading", 280 "left", 281 "like", 282 "limit", 283 "localtime", 284 "localtimestamp", 285 "lun", 286 "luns", 287 "lzo", 288 "lzop", 289 "minus", 290 "mostly16", 291 "mostly32", 292 "mostly8", 293 "natural", 294 "new", 295 "not", 296 "notnull", 297 "null", 298 "nulls", 299 "off", 300 "offline", 301 "offset", 302 "oid", 303 "old", 304 "on", 305 "only", 306 "open", 307 "or", 308 "order", 309 "outer", 310 "overlaps", 311 "parallel", 312 "partition", 313 "percent", 314 "permissions", 315 "pivot", 316 "placing", 317 "primary", 318 "raw", 319 "readratio", 320 "recover", 321 "references", 322 "rejectlog", 323 "resort", 324 "respect", 325 "restore", 326 "right", 327 "select", 328 "session_user", 329 "similar", 330 "snapshot", 331 "some", 332 "sysdate", 333 "system", 334 "table", 335 "tag", 336 "tdes", 337 "text255", 338 "text32k", 339 "then", 340 "timestamp", 341 "to", 342 "top", 343 "trailing", 344 "true", 345 "truncatecolumns", 346 "type", 347 "union", 348 "unique", 349 "unnest", 350 "unpivot", 351 "user", 352 "using", 353 "verbose", 354 "wallet", 355 "when", 356 "where", 357 "with", 358 "without", 359 } 360 361 def unnest_sql(self, expression: exp.Unnest) -> str: 362 args = expression.expressions 363 num_args = len(args) 364 365 if num_args > 1: 366 self.unsupported(f"Unsupported number of arguments in UNNEST: {num_args}") 367 return "" 368 369 arg = self.sql(seq_get(args, 0)) 370 alias = self.expressions(expression.args.get("alias"), key="columns", flat=True) 371 return f"{arg} AS {alias}" if alias else arg 372 373 def with_properties(self, properties: exp.Properties) -> str: 374 """Redshift doesn't have `WITH` as part of their with_properties so we remove it""" 375 return self.properties(properties, prefix=" ", suffix="") 376 377 def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str: 378 if expression.is_type(exp.DataType.Type.JSON): 379 # Redshift doesn't support a JSON type, so casting to it is treated as a noop 380 return self.sql(expression, "this") 381 382 return super().cast_sql(expression, safe_prefix=safe_prefix) 383 384 def datatype_sql(self, expression: exp.DataType) -> str: 385 """ 386 Redshift converts the `TEXT` data type to `VARCHAR(255)` by default when people more generally mean 387 VARCHAR of max length which is `VARCHAR(max)` in Redshift. Therefore if we get a `TEXT` data type 388 without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert 389 `TEXT` to `VARCHAR`. 390 """ 391 if expression.is_type("text"): 392 expression.set("this", exp.DataType.Type.VARCHAR) 393 precision = expression.args.get("expressions") 394 395 if not precision: 396 expression.append("expressions", exp.var("MAX")) 397 398 return super().datatype_sql(expression)
Generator converts a given syntax tree to the corresponding SQL string.
Arguments:
- pretty: Whether to format the produced SQL string. Default: False.
- identify: Determines when an identifier should be quoted. Possible values are: False (default): Never quote, except in cases where it's mandatory by the dialect. True or 'always': Always quote. 'safe': Only quote identifiers that are case insensitive.
- normalize: Whether to normalize identifiers to lowercase. Default: False.
- pad: The pad size in a formatted string. For example, this affects the indentation of a projection in a query, relative to its nesting level. Default: 2.
- indent: The indentation size in a formatted string. For example, this affects the
indentation of subqueries and filters under a
WHERE
clause. Default: 2. - normalize_functions: How to normalize function names. Possible values are: "upper" or True (default): Convert names to uppercase. "lower": Convert names to lowercase. False: Disables function name normalization.
- unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. Default ErrorLevel.WARN.
- max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
- leading_comma: Whether the comma is leading or trailing in select expressions. This is only relevant when generating in pretty mode. Default: False
- max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true line length. Default: 80
- comments: Whether to preserve comments in the output SQL code. Default: True
361 def unnest_sql(self, expression: exp.Unnest) -> str: 362 args = expression.expressions 363 num_args = len(args) 364 365 if num_args > 1: 366 self.unsupported(f"Unsupported number of arguments in UNNEST: {num_args}") 367 return "" 368 369 arg = self.sql(seq_get(args, 0)) 370 alias = self.expressions(expression.args.get("alias"), key="columns", flat=True) 371 return f"{arg} AS {alias}" if alias else arg
373 def with_properties(self, properties: exp.Properties) -> str: 374 """Redshift doesn't have `WITH` as part of their with_properties so we remove it""" 375 return self.properties(properties, prefix=" ", suffix="")
Redshift doesn't have WITH
as part of their with_properties so we remove it
377 def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str: 378 if expression.is_type(exp.DataType.Type.JSON): 379 # Redshift doesn't support a JSON type, so casting to it is treated as a noop 380 return self.sql(expression, "this") 381 382 return super().cast_sql(expression, safe_prefix=safe_prefix)
384 def datatype_sql(self, expression: exp.DataType) -> str: 385 """ 386 Redshift converts the `TEXT` data type to `VARCHAR(255)` by default when people more generally mean 387 VARCHAR of max length which is `VARCHAR(max)` in Redshift. Therefore if we get a `TEXT` data type 388 without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert 389 `TEXT` to `VARCHAR`. 390 """ 391 if expression.is_type("text"): 392 expression.set("this", exp.DataType.Type.VARCHAR) 393 precision = expression.args.get("expressions") 394 395 if not precision: 396 expression.append("expressions", exp.var("MAX")) 397 398 return super().datatype_sql(expression)
Redshift converts the TEXT
data type to VARCHAR(255)
by default when people more generally mean
VARCHAR of max length which is VARCHAR(max)
in Redshift. Therefore if we get a TEXT
data type
without precision we convert it to VARCHAR(max)
and if it does have precision then we just convert
TEXT
to VARCHAR
.
Inherited Members
- sqlglot.generator.Generator
- Generator
- NULL_ORDERING_SUPPORTED
- IGNORE_NULLS_IN_FUNC
- EXPLICIT_UNION
- WRAP_DERIVED_VALUES
- CREATE_FUNCTION_RETURN_AS
- MATCHED_BY_SOURCE
- INTERVAL_ALLOWS_PLURAL_FORM
- LIMIT_FETCH
- LIMIT_ONLY_LITERALS
- GROUPINGS_SEP
- INDEX_ON
- QUERY_HINT_SEP
- IS_BOOL_ALLOWED
- DUPLICATE_KEY_UPDATE_WITH_SET
- LIMIT_IS_TOP
- RETURNING_END
- COLUMN_JOIN_MARKS_SUPPORTED
- EXTRACT_ALLOWS_QUOTES
- ALTER_TABLE_INCLUDE_COLUMN_KEYWORD
- UNNEST_WITH_ORDINALITY
- AGGREGATE_FILTER_SUPPORTED
- SEMI_ANTI_JOIN_WITH_SIDE
- COMPUTED_COLUMN_WITH_TYPE
- SUPPORTS_TABLE_COPY
- TABLESAMPLE_REQUIRES_PARENS
- TABLESAMPLE_KEYWORDS
- TABLESAMPLE_WITH_METHOD
- COLLATE_IS_FUNC
- DATA_TYPE_SPECIFIERS_ALLOWED
- ENSURE_BOOLS
- CTE_RECURSIVE_KEYWORD_REQUIRED
- SUPPORTS_SINGLE_ARG_CONCAT
- SUPPORTS_TABLE_ALIAS_COLUMNS
- UNPIVOT_ALIASES_ARE_IDENTIFIERS
- JSON_KEY_VALUE_PAIR_SEP
- INSERT_OVERWRITE
- SUPPORTS_CREATE_TABLE_LIKE
- JSON_PATH_BRACKETED_KEY_SUPPORTED
- JSON_PATH_SINGLE_QUOTE_ESCAPE
- SUPPORTS_TO_NUMBER
- OUTER_UNION_MODIFIERS
- STAR_MAPPING
- TIME_PART_SINGULARS
- TOKEN_MAPPING
- STRUCT_DELIMITER
- NAMED_PLACEHOLDER_TOKEN
- WITH_SEPARATED_COMMENTS
- EXCLUDE_COMMENTS
- UNWRAPPED_INTERVAL_VALUES
- PARAMETERIZABLE_TEXT_TYPES
- EXPRESSIONS_WITHOUT_NESTED_CTES
- SENTINEL_LINE_BREAK
- pretty
- identify
- normalize
- pad
- unsupported_level
- max_unsupported
- leading_comma
- max_text_width
- comments
- dialect
- normalize_functions
- unsupported_messages
- generate
- preprocess
- unsupported
- sep
- seg
- pad_comment
- maybe_comment
- wrap
- no_identify
- normalize_func
- indent
- sql
- uncache_sql
- cache_sql
- characterset_sql
- column_parts
- column_sql
- columnposition_sql
- columndef_sql
- columnconstraint_sql
- computedcolumnconstraint_sql
- autoincrementcolumnconstraint_sql
- compresscolumnconstraint_sql
- generatedasidentitycolumnconstraint_sql
- generatedasrowcolumnconstraint_sql
- periodforsystemtimeconstraint_sql
- notnullcolumnconstraint_sql
- transformcolumnconstraint_sql
- primarykeycolumnconstraint_sql
- uniquecolumnconstraint_sql
- createable_sql
- create_sql
- sequenceproperties_sql
- clone_sql
- describe_sql
- heredoc_sql
- prepend_ctes
- with_sql
- cte_sql
- tablealias_sql
- bitstring_sql
- hexstring_sql
- bytestring_sql
- unicodestring_sql
- rawstring_sql
- datatypeparam_sql
- directory_sql
- delete_sql
- drop_sql
- except_sql
- except_op
- fetch_sql
- filter_sql
- hint_sql
- indexparameters_sql
- index_sql
- identifier_sql
- inputoutputformat_sql
- national_sql
- partition_sql
- properties_sql
- root_properties
- properties
- locate_properties
- property_name
- property_sql
- likeproperty_sql
- fallbackproperty_sql
- journalproperty_sql
- freespaceproperty_sql
- checksumproperty_sql
- mergeblockratioproperty_sql
- datablocksizeproperty_sql
- blockcompressionproperty_sql
- isolatedloadingproperty_sql
- partitionboundspec_sql
- partitionedofproperty_sql
- lockingproperty_sql
- withdataproperty_sql
- withsystemversioningproperty_sql
- insert_sql
- intersect_sql
- intersect_op
- introducer_sql
- kill_sql
- pseudotype_sql
- objectidentifier_sql
- onconflict_sql
- returning_sql
- rowformatdelimitedproperty_sql
- withtablehint_sql
- indextablehint_sql
- historicaldata_sql
- table_parts
- table_sql
- tablesample_sql
- pivot_sql
- version_sql
- tuple_sql
- update_sql
- values_sql
- var_sql
- into_sql
- from_sql
- group_sql
- having_sql
- connect_sql
- prior_sql
- join_sql
- lambda_sql
- lateral_op
- lateral_sql
- limit_sql
- offset_sql
- setitem_sql
- set_sql
- pragma_sql
- lock_sql
- literal_sql
- escape_str
- loaddata_sql
- null_sql
- boolean_sql
- order_sql
- withfill_sql
- cluster_sql
- distribute_sql
- sort_sql
- ordered_sql
- matchrecognizemeasure_sql
- matchrecognize_sql
- query_modifiers
- queryoption_sql
- offset_limit_modifiers
- after_limit_modifiers
- select_sql
- schema_sql
- schema_columns_sql
- star_sql
- parameter_sql
- sessionparameter_sql
- placeholder_sql
- subquery_sql
- qualify_sql
- set_operations
- union_sql
- union_op
- prewhere_sql
- where_sql
- window_sql
- partition_by_sql
- windowspec_sql
- withingroup_sql
- between_sql
- bracket_offset_expressions
- all_sql
- any_sql
- exists_sql
- case_sql
- constraint_sql
- nextvaluefor_sql
- extract_sql
- trim_sql
- convert_concat_args
- concat_sql
- concatws_sql
- check_sql
- foreignkey_sql
- primarykey_sql
- if_sql
- jsonkeyvalue_sql
- jsonpath_sql
- json_path_part
- formatjson_sql
- jsonobject_sql
- jsonobjectagg_sql
- jsonarray_sql
- jsonarrayagg_sql
- jsoncolumndef_sql
- jsonschema_sql
- jsontable_sql
- openjsoncolumndef_sql
- openjson_sql
- in_sql
- in_unnest_op
- interval_sql
- return_sql
- reference_sql
- anonymous_sql
- paren_sql
- neg_sql
- not_sql
- alias_sql
- pivotalias_sql
- aliases_sql
- atindex_sql
- attimezone_sql
- fromtimezone_sql
- add_sql
- and_sql
- or_sql
- xor_sql
- connector_sql
- bitwiseand_sql
- bitwiseleftshift_sql
- bitwisenot_sql
- bitwiseor_sql
- bitwiserightshift_sql
- bitwisexor_sql
- currentdate_sql
- currenttimestamp_sql
- collate_sql
- command_sql
- comment_sql
- mergetreettlaction_sql
- mergetreettl_sql
- transaction_sql
- commit_sql
- rollback_sql
- altercolumn_sql
- renametable_sql
- renamecolumn_sql
- altertable_sql
- add_column_sql
- droppartition_sql
- addconstraint_sql
- distinct_sql
- ignorenulls_sql
- respectnulls_sql
- havingmax_sql
- intdiv_sql
- dpipe_sql
- div_sql
- overlaps_sql
- distance_sql
- dot_sql
- eq_sql
- propertyeq_sql
- escape_sql
- glob_sql
- gt_sql
- gte_sql
- ilike_sql
- ilikeany_sql
- is_sql
- like_sql
- likeany_sql
- similarto_sql
- lt_sql
- lte_sql
- mod_sql
- mul_sql
- neq_sql
- nullsafeeq_sql
- nullsafeneq_sql
- slice_sql
- sub_sql
- trycast_sql
- log_sql
- use_sql
- binary
- function_fallback_sql
- func
- format_args
- too_wide
- format_time
- expressions
- op_expressions
- naked_property
- tag_sql
- token_sql
- userdefinedfunction_sql
- joinhint_sql
- kwarg_sql
- when_sql
- merge_sql
- tochar_sql
- tonumber_sql
- dictproperty_sql
- dictrange_sql
- dictsubproperty_sql
- oncluster_sql
- clusteredbyproperty_sql
- anyvalue_sql
- querytransform_sql
- indexconstraintoption_sql
- checkcolumnconstraint_sql
- indexcolumnconstraint_sql
- nvl2_sql
- comprehension_sql
- columnprefix_sql
- opclass_sql
- predict_sql
- forin_sql
- refresh_sql
- operator_sql
- toarray_sql
- tsordstotime_sql
- tsordstotimestamp_sql
- tsordstodate_sql
- unixdate_sql
- lastday_sql
- dateadd_sql
- arrayany_sql
- generateseries_sql
- struct_sql
- partitionrange_sql
- truncatetable_sql
- convert_sql
- sqlglot.dialects.postgres.Postgres.Generator
- SINGLE_STRING_INTERVAL
- RENAME_TABLE_WITH_DB
- JOIN_HINTS
- TABLE_HINTS
- PARAMETER_TOKEN
- TABLESAMPLE_SIZE_IS_ROWS
- TABLESAMPLE_SEED_KEYWORD
- SUPPORTS_SELECT_INTO
- JSON_TYPE_REQUIRED_FOR_EXTRACTION
- SUPPORTS_UNLOGGED_TABLES
- LIKE_PROPERTY_INSIDE_SCHEMA
- SUPPORTED_JSON_PATH_PARTS
- PROPERTIES_LOCATION
- bracket_sql
- matchagainst_sql