Edit on GitHub

sqlglot.dialects.snowflake

   1from __future__ import annotations
   2
   3import typing as t
   4
   5from sqlglot import exp, generator, parser, tokens, transforms
   6from sqlglot.dialects.dialect import (
   7    Dialect,
   8    NormalizationStrategy,
   9    binary_from_function,
  10    build_default_decimal_type,
  11    build_timestamp_from_parts,
  12    date_delta_sql,
  13    date_trunc_to_time,
  14    datestrtodate_sql,
  15    build_formatted_time,
  16    if_sql,
  17    inline_array_sql,
  18    max_or_greatest,
  19    min_or_least,
  20    rename_func,
  21    timestamptrunc_sql,
  22    timestrtotime_sql,
  23    var_map_sql,
  24    map_date_part,
  25    no_safe_divide_sql,
  26    no_timestamp_sql,
  27    timestampdiff_sql,
  28)
  29from sqlglot.generator import unsupported_args
  30from sqlglot.helper import flatten, is_float, is_int, seq_get
  31from sqlglot.tokens import TokenType
  32
  33if t.TYPE_CHECKING:
  34    from sqlglot._typing import E
  35
  36
  37# from https://docs.snowflake.com/en/sql-reference/functions/to_timestamp.html
  38def _build_datetime(
  39    name: str, kind: exp.DataType.Type, safe: bool = False
  40) -> t.Callable[[t.List], exp.Func]:
  41    def _builder(args: t.List) -> exp.Func:
  42        value = seq_get(args, 0)
  43        int_value = value is not None and is_int(value.name)
  44
  45        if isinstance(value, exp.Literal):
  46            # Converts calls like `TO_TIME('01:02:03')` into casts
  47            if len(args) == 1 and value.is_string and not int_value:
  48                return (
  49                    exp.TryCast(this=value, to=exp.DataType.build(kind))
  50                    if safe
  51                    else exp.cast(value, kind)
  52                )
  53
  54            # Handles `TO_TIMESTAMP(str, fmt)` and `TO_TIMESTAMP(num, scale)` as special
  55            # cases so we can transpile them, since they're relatively common
  56            if kind == exp.DataType.Type.TIMESTAMP:
  57                if int_value and not safe:
  58                    # TRY_TO_TIMESTAMP('integer') is not parsed into exp.UnixToTime as
  59                    # it's not easily transpilable
  60                    return exp.UnixToTime(this=value, scale=seq_get(args, 1))
  61                if not is_float(value.this):
  62                    expr = build_formatted_time(exp.StrToTime, "snowflake")(args)
  63                    expr.set("safe", safe)
  64                    return expr
  65
  66        if kind == exp.DataType.Type.DATE and not int_value:
  67            formatted_exp = build_formatted_time(exp.TsOrDsToDate, "snowflake")(args)
  68            formatted_exp.set("safe", safe)
  69            return formatted_exp
  70
  71        return exp.Anonymous(this=name, expressions=args)
  72
  73    return _builder
  74
  75
  76def _build_object_construct(args: t.List) -> t.Union[exp.StarMap, exp.Struct]:
  77    expression = parser.build_var_map(args)
  78
  79    if isinstance(expression, exp.StarMap):
  80        return expression
  81
  82    return exp.Struct(
  83        expressions=[
  84            exp.PropertyEQ(this=k, expression=v) for k, v in zip(expression.keys, expression.values)
  85        ]
  86    )
  87
  88
  89def _build_datediff(args: t.List) -> exp.DateDiff:
  90    return exp.DateDiff(
  91        this=seq_get(args, 2), expression=seq_get(args, 1), unit=map_date_part(seq_get(args, 0))
  92    )
  93
  94
  95def _build_date_time_add(expr_type: t.Type[E]) -> t.Callable[[t.List], E]:
  96    def _builder(args: t.List) -> E:
  97        return expr_type(
  98            this=seq_get(args, 2),
  99            expression=seq_get(args, 1),
 100            unit=map_date_part(seq_get(args, 0)),
 101        )
 102
 103    return _builder
 104
 105
 106# https://docs.snowflake.com/en/sql-reference/functions/div0
 107def _build_if_from_div0(args: t.List) -> exp.If:
 108    cond = exp.EQ(this=seq_get(args, 1), expression=exp.Literal.number(0)).and_(
 109        exp.Is(this=seq_get(args, 0), expression=exp.null()).not_()
 110    )
 111    true = exp.Literal.number(0)
 112    false = exp.Div(this=seq_get(args, 0), expression=seq_get(args, 1))
 113    return exp.If(this=cond, true=true, false=false)
 114
 115
 116# https://docs.snowflake.com/en/sql-reference/functions/zeroifnull
 117def _build_if_from_zeroifnull(args: t.List) -> exp.If:
 118    cond = exp.Is(this=seq_get(args, 0), expression=exp.Null())
 119    return exp.If(this=cond, true=exp.Literal.number(0), false=seq_get(args, 0))
 120
 121
 122# https://docs.snowflake.com/en/sql-reference/functions/zeroifnull
 123def _build_if_from_nullifzero(args: t.List) -> exp.If:
 124    cond = exp.EQ(this=seq_get(args, 0), expression=exp.Literal.number(0))
 125    return exp.If(this=cond, true=exp.Null(), false=seq_get(args, 0))
 126
 127
 128def _regexpilike_sql(self: Snowflake.Generator, expression: exp.RegexpILike) -> str:
 129    flag = expression.text("flag")
 130
 131    if "i" not in flag:
 132        flag += "i"
 133
 134    return self.func(
 135        "REGEXP_LIKE", expression.this, expression.expression, exp.Literal.string(flag)
 136    )
 137
 138
 139def _build_regexp_replace(args: t.List) -> exp.RegexpReplace:
 140    regexp_replace = exp.RegexpReplace.from_arg_list(args)
 141
 142    if not regexp_replace.args.get("replacement"):
 143        regexp_replace.set("replacement", exp.Literal.string(""))
 144
 145    return regexp_replace
 146
 147
 148def _show_parser(*args: t.Any, **kwargs: t.Any) -> t.Callable[[Snowflake.Parser], exp.Show]:
 149    def _parse(self: Snowflake.Parser) -> exp.Show:
 150        return self._parse_show_snowflake(*args, **kwargs)
 151
 152    return _parse
 153
 154
 155def _date_trunc_to_time(args: t.List) -> exp.DateTrunc | exp.TimestampTrunc:
 156    trunc = date_trunc_to_time(args)
 157    trunc.set("unit", map_date_part(trunc.args["unit"]))
 158    return trunc
 159
 160
 161def _unqualify_unpivot_columns(expression: exp.Expression) -> exp.Expression:
 162    """
 163    Snowflake doesn't allow columns referenced in UNPIVOT to be qualified,
 164    so we need to unqualify them.
 165
 166    Example:
 167        >>> from sqlglot import parse_one
 168        >>> expr = parse_one("SELECT * FROM m_sales UNPIVOT(sales FOR month IN (m_sales.jan, feb, mar, april))")
 169        >>> print(_unqualify_unpivot_columns(expr).sql(dialect="snowflake"))
 170        SELECT * FROM m_sales UNPIVOT(sales FOR month IN (jan, feb, mar, april))
 171    """
 172    if isinstance(expression, exp.Pivot) and expression.unpivot:
 173        expression = transforms.unqualify_columns(expression)
 174
 175    return expression
 176
 177
 178def _flatten_structured_types_unless_iceberg(expression: exp.Expression) -> exp.Expression:
 179    assert isinstance(expression, exp.Create)
 180
 181    def _flatten_structured_type(expression: exp.DataType) -> exp.DataType:
 182        if expression.this in exp.DataType.NESTED_TYPES:
 183            expression.set("expressions", None)
 184        return expression
 185
 186    props = expression.args.get("properties")
 187    if isinstance(expression.this, exp.Schema) and not (props and props.find(exp.IcebergProperty)):
 188        for schema_expression in expression.this.expressions:
 189            if isinstance(schema_expression, exp.ColumnDef):
 190                column_type = schema_expression.kind
 191                if isinstance(column_type, exp.DataType):
 192                    column_type.transform(_flatten_structured_type, copy=False)
 193
 194    return expression
 195
 196
 197def _unnest_generate_date_array(expression: exp.Expression) -> exp.Expression:
 198    if isinstance(expression, exp.Select):
 199        for unnest in expression.find_all(exp.Unnest):
 200            if (
 201                isinstance(unnest.parent, (exp.From, exp.Join))
 202                and len(unnest.expressions) == 1
 203                and isinstance(unnest.expressions[0], exp.GenerateDateArray)
 204            ):
 205                generate_date_array = unnest.expressions[0]
 206                start = generate_date_array.args.get("start")
 207                end = generate_date_array.args.get("end")
 208                step = generate_date_array.args.get("step")
 209
 210                if not start or not end or not isinstance(step, exp.Interval) or step.name != "1":
 211                    continue
 212
 213                unit = step.args.get("unit")
 214
 215                unnest_alias = unnest.args.get("alias")
 216                if unnest_alias:
 217                    unnest_alias = unnest_alias.copy()
 218                    sequence_value_name = seq_get(unnest_alias.columns, 0) or "value"
 219                else:
 220                    sequence_value_name = "value"
 221
 222                # We'll add the next sequence value to the starting date and project the result
 223                date_add = _build_date_time_add(exp.DateAdd)(
 224                    [unit, exp.cast(sequence_value_name, "int"), exp.cast(start, "date")]
 225                ).as_(sequence_value_name)
 226
 227                # We use DATEDIFF to compute the number of sequence values needed
 228                number_sequence = Snowflake.Parser.FUNCTIONS["ARRAY_GENERATE_RANGE"](
 229                    [exp.Literal.number(0), _build_datediff([unit, start, end]) + 1]
 230                )
 231
 232                unnest.set("expressions", [number_sequence])
 233                unnest.replace(exp.select(date_add).from_(unnest.copy()).subquery(unnest_alias))
 234
 235    return expression
 236
 237
 238def _build_regexp_extract(expr_type: t.Type[E]) -> t.Callable[[t.List], E]:
 239    def _builder(args: t.List) -> E:
 240        return expr_type(
 241            this=seq_get(args, 0),
 242            expression=seq_get(args, 1),
 243            position=seq_get(args, 2),
 244            occurrence=seq_get(args, 3),
 245            parameters=seq_get(args, 4),
 246            group=seq_get(args, 5) or exp.Literal.number(0),
 247        )
 248
 249    return _builder
 250
 251
 252def _regexpextract_sql(self, expression: exp.RegexpExtract | exp.RegexpExtractAll) -> str:
 253    # Other dialects don't support all of the following parameters, so we need to
 254    # generate default values as necessary to ensure the transpilation is correct
 255    group = expression.args.get("group")
 256
 257    # To avoid generating all these default values, we set group to None if
 258    # it's 0 (also default value) which doesn't trigger the following chain
 259    if group and group.name == "0":
 260        group = None
 261
 262    parameters = expression.args.get("parameters") or (group and exp.Literal.string("c"))
 263    occurrence = expression.args.get("occurrence") or (parameters and exp.Literal.number(1))
 264    position = expression.args.get("position") or (occurrence and exp.Literal.number(1))
 265
 266    return self.func(
 267        "REGEXP_SUBSTR" if isinstance(expression, exp.RegexpExtract) else "REGEXP_EXTRACT_ALL",
 268        expression.this,
 269        expression.expression,
 270        position,
 271        occurrence,
 272        parameters,
 273        group,
 274    )
 275
 276
 277def _json_extract_value_array_sql(
 278    self: Snowflake.Generator, expression: exp.JSONValueArray | exp.JSONExtractArray
 279) -> str:
 280    json_extract = exp.JSONExtract(this=expression.this, expression=expression.expression)
 281    ident = exp.to_identifier("x")
 282
 283    if isinstance(expression, exp.JSONValueArray):
 284        this: exp.Expression = exp.cast(ident, to=exp.DataType.Type.VARCHAR)
 285    else:
 286        this = exp.ParseJSON(this=f"TO_JSON({ident})")
 287
 288    transform_lambda = exp.Lambda(expressions=[ident], this=this)
 289
 290    return self.func("TRANSFORM", json_extract, transform_lambda)
 291
 292
 293class Snowflake(Dialect):
 294    # https://docs.snowflake.com/en/sql-reference/identifiers-syntax
 295    NORMALIZATION_STRATEGY = NormalizationStrategy.UPPERCASE
 296    NULL_ORDERING = "nulls_are_large"
 297    TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
 298    SUPPORTS_USER_DEFINED_TYPES = False
 299    SUPPORTS_SEMI_ANTI_JOIN = False
 300    PREFER_CTE_ALIAS_COLUMN = True
 301    TABLESAMPLE_SIZE_IS_PERCENT = True
 302    COPY_PARAMS_ARE_CSV = False
 303    ARRAY_AGG_INCLUDES_NULLS = None
 304
 305    TIME_MAPPING = {
 306        "YYYY": "%Y",
 307        "yyyy": "%Y",
 308        "YY": "%y",
 309        "yy": "%y",
 310        "MMMM": "%B",
 311        "mmmm": "%B",
 312        "MON": "%b",
 313        "mon": "%b",
 314        "MM": "%m",
 315        "mm": "%m",
 316        "DD": "%d",
 317        "dd": "%-d",
 318        "DY": "%a",
 319        "dy": "%w",
 320        "HH24": "%H",
 321        "hh24": "%H",
 322        "HH12": "%I",
 323        "hh12": "%I",
 324        "MI": "%M",
 325        "mi": "%M",
 326        "SS": "%S",
 327        "ss": "%S",
 328        "FF": "%f",
 329        "ff": "%f",
 330        "FF6": "%f",
 331        "ff6": "%f",
 332    }
 333
 334    def quote_identifier(self, expression: E, identify: bool = True) -> E:
 335        # This disables quoting DUAL in SELECT ... FROM DUAL, because Snowflake treats an
 336        # unquoted DUAL keyword in a special way and does not map it to a user-defined table
 337        if (
 338            isinstance(expression, exp.Identifier)
 339            and isinstance(expression.parent, exp.Table)
 340            and expression.name.lower() == "dual"
 341        ):
 342            return expression  # type: ignore
 343
 344        return super().quote_identifier(expression, identify=identify)
 345
 346    class Parser(parser.Parser):
 347        IDENTIFY_PIVOT_STRINGS = True
 348        DEFAULT_SAMPLING_METHOD = "BERNOULLI"
 349        COLON_IS_VARIANT_EXTRACT = True
 350
 351        ID_VAR_TOKENS = {
 352            *parser.Parser.ID_VAR_TOKENS,
 353            TokenType.MATCH_CONDITION,
 354        }
 355
 356        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW}
 357        TABLE_ALIAS_TOKENS.discard(TokenType.MATCH_CONDITION)
 358
 359        FUNCTIONS = {
 360            **parser.Parser.FUNCTIONS,
 361            "APPROX_PERCENTILE": exp.ApproxQuantile.from_arg_list,
 362            "ARRAY_CONSTRUCT": lambda args: exp.Array(expressions=args),
 363            "ARRAY_CONTAINS": lambda args: exp.ArrayContains(
 364                this=seq_get(args, 1), expression=seq_get(args, 0)
 365            ),
 366            "ARRAY_GENERATE_RANGE": lambda args: exp.GenerateSeries(
 367                # ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive
 368                start=seq_get(args, 0),
 369                end=exp.Sub(this=seq_get(args, 1), expression=exp.Literal.number(1)),
 370                step=seq_get(args, 2),
 371            ),
 372            "BITXOR": binary_from_function(exp.BitwiseXor),
 373            "BIT_XOR": binary_from_function(exp.BitwiseXor),
 374            "BOOLXOR": binary_from_function(exp.Xor),
 375            "DATE": _build_datetime("DATE", exp.DataType.Type.DATE),
 376            "DATE_TRUNC": _date_trunc_to_time,
 377            "DATEADD": _build_date_time_add(exp.DateAdd),
 378            "DATEDIFF": _build_datediff,
 379            "DIV0": _build_if_from_div0,
 380            "EDITDISTANCE": lambda args: exp.Levenshtein(
 381                this=seq_get(args, 0), expression=seq_get(args, 1), max_dist=seq_get(args, 2)
 382            ),
 383            "FLATTEN": exp.Explode.from_arg_list,
 384            "GET_PATH": lambda args, dialect: exp.JSONExtract(
 385                this=seq_get(args, 0), expression=dialect.to_json_path(seq_get(args, 1))
 386            ),
 387            "IFF": exp.If.from_arg_list,
 388            "LAST_DAY": lambda args: exp.LastDay(
 389                this=seq_get(args, 0), unit=map_date_part(seq_get(args, 1))
 390            ),
 391            "LEN": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
 392            "LENGTH": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
 393            "LISTAGG": exp.GroupConcat.from_arg_list,
 394            "NULLIFZERO": _build_if_from_nullifzero,
 395            "OBJECT_CONSTRUCT": _build_object_construct,
 396            "REGEXP_EXTRACT_ALL": _build_regexp_extract(exp.RegexpExtractAll),
 397            "REGEXP_REPLACE": _build_regexp_replace,
 398            "REGEXP_SUBSTR": _build_regexp_extract(exp.RegexpExtract),
 399            "REGEXP_SUBSTR_ALL": _build_regexp_extract(exp.RegexpExtractAll),
 400            "RLIKE": exp.RegexpLike.from_arg_list,
 401            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
 402            "TIMEADD": _build_date_time_add(exp.TimeAdd),
 403            "TIMEDIFF": _build_datediff,
 404            "TIMESTAMPADD": _build_date_time_add(exp.DateAdd),
 405            "TIMESTAMPDIFF": _build_datediff,
 406            "TIMESTAMPFROMPARTS": build_timestamp_from_parts,
 407            "TIMESTAMP_FROM_PARTS": build_timestamp_from_parts,
 408            "TIMESTAMPNTZFROMPARTS": build_timestamp_from_parts,
 409            "TIMESTAMP_NTZ_FROM_PARTS": build_timestamp_from_parts,
 410            "TRY_PARSE_JSON": lambda args: exp.ParseJSON(this=seq_get(args, 0), safe=True),
 411            "TRY_TO_DATE": _build_datetime("TRY_TO_DATE", exp.DataType.Type.DATE, safe=True),
 412            "TRY_TO_TIMESTAMP": _build_datetime(
 413                "TRY_TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP, safe=True
 414            ),
 415            "TO_DATE": _build_datetime("TO_DATE", exp.DataType.Type.DATE),
 416            "TO_NUMBER": lambda args: exp.ToNumber(
 417                this=seq_get(args, 0),
 418                format=seq_get(args, 1),
 419                precision=seq_get(args, 2),
 420                scale=seq_get(args, 3),
 421            ),
 422            "TO_TIME": _build_datetime("TO_TIME", exp.DataType.Type.TIME),
 423            "TO_TIMESTAMP": _build_datetime("TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP),
 424            "TO_TIMESTAMP_LTZ": _build_datetime("TO_TIMESTAMP_LTZ", exp.DataType.Type.TIMESTAMPLTZ),
 425            "TO_TIMESTAMP_NTZ": _build_datetime("TO_TIMESTAMP_NTZ", exp.DataType.Type.TIMESTAMP),
 426            "TO_TIMESTAMP_TZ": _build_datetime("TO_TIMESTAMP_TZ", exp.DataType.Type.TIMESTAMPTZ),
 427            "TO_VARCHAR": exp.ToChar.from_arg_list,
 428            "ZEROIFNULL": _build_if_from_zeroifnull,
 429        }
 430
 431        FUNCTION_PARSERS = {
 432            **parser.Parser.FUNCTION_PARSERS,
 433            "DATE_PART": lambda self: self._parse_date_part(),
 434            "OBJECT_CONSTRUCT_KEEP_NULL": lambda self: self._parse_json_object(),
 435        }
 436        FUNCTION_PARSERS.pop("TRIM")
 437
 438        TIMESTAMPS = parser.Parser.TIMESTAMPS - {TokenType.TIME}
 439
 440        RANGE_PARSERS = {
 441            **parser.Parser.RANGE_PARSERS,
 442            TokenType.LIKE_ANY: parser.binary_range_parser(exp.LikeAny),
 443            TokenType.ILIKE_ANY: parser.binary_range_parser(exp.ILikeAny),
 444        }
 445
 446        ALTER_PARSERS = {
 447            **parser.Parser.ALTER_PARSERS,
 448            "UNSET": lambda self: self.expression(
 449                exp.Set,
 450                tag=self._match_text_seq("TAG"),
 451                expressions=self._parse_csv(self._parse_id_var),
 452                unset=True,
 453            ),
 454        }
 455
 456        STATEMENT_PARSERS = {
 457            **parser.Parser.STATEMENT_PARSERS,
 458            TokenType.SHOW: lambda self: self._parse_show(),
 459        }
 460
 461        PROPERTY_PARSERS = {
 462            **parser.Parser.PROPERTY_PARSERS,
 463            "LOCATION": lambda self: self._parse_location_property(),
 464        }
 465
 466        TYPE_CONVERTERS = {
 467            # https://docs.snowflake.com/en/sql-reference/data-types-numeric#number
 468            exp.DataType.Type.DECIMAL: build_default_decimal_type(precision=38, scale=0),
 469        }
 470
 471        SHOW_PARSERS = {
 472            "SCHEMAS": _show_parser("SCHEMAS"),
 473            "TERSE SCHEMAS": _show_parser("SCHEMAS"),
 474            "OBJECTS": _show_parser("OBJECTS"),
 475            "TERSE OBJECTS": _show_parser("OBJECTS"),
 476            "TABLES": _show_parser("TABLES"),
 477            "TERSE TABLES": _show_parser("TABLES"),
 478            "VIEWS": _show_parser("VIEWS"),
 479            "TERSE VIEWS": _show_parser("VIEWS"),
 480            "PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 481            "TERSE PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 482            "IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 483            "TERSE IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 484            "UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 485            "TERSE UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 486            "SEQUENCES": _show_parser("SEQUENCES"),
 487            "TERSE SEQUENCES": _show_parser("SEQUENCES"),
 488            "COLUMNS": _show_parser("COLUMNS"),
 489            "USERS": _show_parser("USERS"),
 490            "TERSE USERS": _show_parser("USERS"),
 491        }
 492
 493        CONSTRAINT_PARSERS = {
 494            **parser.Parser.CONSTRAINT_PARSERS,
 495            "WITH": lambda self: self._parse_with_constraint(),
 496            "MASKING": lambda self: self._parse_with_constraint(),
 497            "PROJECTION": lambda self: self._parse_with_constraint(),
 498            "TAG": lambda self: self._parse_with_constraint(),
 499        }
 500
 501        STAGED_FILE_SINGLE_TOKENS = {
 502            TokenType.DOT,
 503            TokenType.MOD,
 504            TokenType.SLASH,
 505        }
 506
 507        FLATTEN_COLUMNS = ["SEQ", "KEY", "PATH", "INDEX", "VALUE", "THIS"]
 508
 509        SCHEMA_KINDS = {"OBJECTS", "TABLES", "VIEWS", "SEQUENCES", "UNIQUE KEYS", "IMPORTED KEYS"}
 510
 511        NON_TABLE_CREATABLES = {"STORAGE INTEGRATION", "TAG", "WAREHOUSE", "STREAMLIT"}
 512
 513        LAMBDAS = {
 514            **parser.Parser.LAMBDAS,
 515            TokenType.ARROW: lambda self, expressions: self.expression(
 516                exp.Lambda,
 517                this=self._replace_lambda(
 518                    self._parse_assignment(),
 519                    expressions,
 520                ),
 521                expressions=[e.this if isinstance(e, exp.Cast) else e for e in expressions],
 522            ),
 523        }
 524
 525        def _negate_range(
 526            self, this: t.Optional[exp.Expression] = None
 527        ) -> t.Optional[exp.Expression]:
 528            if not this:
 529                return this
 530
 531            query = this.args.get("query")
 532            if isinstance(this, exp.In) and isinstance(query, exp.Query):
 533                # Snowflake treats `value NOT IN (subquery)` as `VALUE <> ALL (subquery)`, so
 534                # we do this conversion here to avoid parsing it into `NOT value IN (subquery)`
 535                # which can produce different results (most likely a SnowFlake bug).
 536                #
 537                # https://docs.snowflake.com/en/sql-reference/functions/in
 538                # Context: https://github.com/tobymao/sqlglot/issues/3890
 539                return self.expression(
 540                    exp.NEQ, this=this.this, expression=exp.All(this=query.unnest())
 541                )
 542
 543            return self.expression(exp.Not, this=this)
 544
 545        def _parse_with_constraint(self) -> t.Optional[exp.Expression]:
 546            if self._prev.token_type != TokenType.WITH:
 547                self._retreat(self._index - 1)
 548
 549            if self._match_text_seq("MASKING", "POLICY"):
 550                policy = self._parse_column()
 551                return self.expression(
 552                    exp.MaskingPolicyColumnConstraint,
 553                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
 554                    expressions=self._match(TokenType.USING)
 555                    and self._parse_wrapped_csv(self._parse_id_var),
 556                )
 557            if self._match_text_seq("PROJECTION", "POLICY"):
 558                policy = self._parse_column()
 559                return self.expression(
 560                    exp.ProjectionPolicyColumnConstraint,
 561                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
 562                )
 563            if self._match(TokenType.TAG):
 564                return self.expression(
 565                    exp.TagColumnConstraint,
 566                    expressions=self._parse_wrapped_csv(self._parse_property),
 567                )
 568
 569            return None
 570
 571        def _parse_create(self) -> exp.Create | exp.Command:
 572            expression = super()._parse_create()
 573            if isinstance(expression, exp.Create) and expression.kind in self.NON_TABLE_CREATABLES:
 574                # Replace the Table node with the enclosed Identifier
 575                expression.this.replace(expression.this.this)
 576
 577            return expression
 578
 579        # https://docs.snowflake.com/en/sql-reference/functions/date_part.html
 580        # https://docs.snowflake.com/en/sql-reference/functions-date-time.html#label-supported-date-time-parts
 581        def _parse_date_part(self: Snowflake.Parser) -> t.Optional[exp.Expression]:
 582            this = self._parse_var() or self._parse_type()
 583
 584            if not this:
 585                return None
 586
 587            self._match(TokenType.COMMA)
 588            expression = self._parse_bitwise()
 589            this = map_date_part(this)
 590            name = this.name.upper()
 591
 592            if name.startswith("EPOCH"):
 593                if name == "EPOCH_MILLISECOND":
 594                    scale = 10**3
 595                elif name == "EPOCH_MICROSECOND":
 596                    scale = 10**6
 597                elif name == "EPOCH_NANOSECOND":
 598                    scale = 10**9
 599                else:
 600                    scale = None
 601
 602                ts = self.expression(exp.Cast, this=expression, to=exp.DataType.build("TIMESTAMP"))
 603                to_unix: exp.Expression = self.expression(exp.TimeToUnix, this=ts)
 604
 605                if scale:
 606                    to_unix = exp.Mul(this=to_unix, expression=exp.Literal.number(scale))
 607
 608                return to_unix
 609
 610            return self.expression(exp.Extract, this=this, expression=expression)
 611
 612        def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]:
 613            if is_map:
 614                # Keys are strings in Snowflake's objects, see also:
 615                # - https://docs.snowflake.com/en/sql-reference/data-types-semistructured
 616                # - https://docs.snowflake.com/en/sql-reference/functions/object_construct
 617                return self._parse_slice(self._parse_string())
 618
 619            return self._parse_slice(self._parse_alias(self._parse_assignment(), explicit=True))
 620
 621        def _parse_lateral(self) -> t.Optional[exp.Lateral]:
 622            lateral = super()._parse_lateral()
 623            if not lateral:
 624                return lateral
 625
 626            if isinstance(lateral.this, exp.Explode):
 627                table_alias = lateral.args.get("alias")
 628                columns = [exp.to_identifier(col) for col in self.FLATTEN_COLUMNS]
 629                if table_alias and not table_alias.args.get("columns"):
 630                    table_alias.set("columns", columns)
 631                elif not table_alias:
 632                    exp.alias_(lateral, "_flattened", table=columns, copy=False)
 633
 634            return lateral
 635
 636        def _parse_table_parts(
 637            self, schema: bool = False, is_db_reference: bool = False, wildcard: bool = False
 638        ) -> exp.Table:
 639            # https://docs.snowflake.com/en/user-guide/querying-stage
 640            if self._match(TokenType.STRING, advance=False):
 641                table = self._parse_string()
 642            elif self._match_text_seq("@", advance=False):
 643                table = self._parse_location_path()
 644            else:
 645                table = None
 646
 647            if table:
 648                file_format = None
 649                pattern = None
 650
 651                wrapped = self._match(TokenType.L_PAREN)
 652                while self._curr and wrapped and not self._match(TokenType.R_PAREN):
 653                    if self._match_text_seq("FILE_FORMAT", "=>"):
 654                        file_format = self._parse_string() or super()._parse_table_parts(
 655                            is_db_reference=is_db_reference
 656                        )
 657                    elif self._match_text_seq("PATTERN", "=>"):
 658                        pattern = self._parse_string()
 659                    else:
 660                        break
 661
 662                    self._match(TokenType.COMMA)
 663
 664                table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern)
 665            else:
 666                table = super()._parse_table_parts(schema=schema, is_db_reference=is_db_reference)
 667
 668            return table
 669
 670        def _parse_id_var(
 671            self,
 672            any_token: bool = True,
 673            tokens: t.Optional[t.Collection[TokenType]] = None,
 674        ) -> t.Optional[exp.Expression]:
 675            if self._match_text_seq("IDENTIFIER", "("):
 676                identifier = (
 677                    super()._parse_id_var(any_token=any_token, tokens=tokens)
 678                    or self._parse_string()
 679                )
 680                self._match_r_paren()
 681                return self.expression(exp.Anonymous, this="IDENTIFIER", expressions=[identifier])
 682
 683            return super()._parse_id_var(any_token=any_token, tokens=tokens)
 684
 685        def _parse_show_snowflake(self, this: str) -> exp.Show:
 686            scope = None
 687            scope_kind = None
 688
 689            # will identity SHOW TERSE SCHEMAS but not SHOW TERSE PRIMARY KEYS
 690            # which is syntactically valid but has no effect on the output
 691            terse = self._tokens[self._index - 2].text.upper() == "TERSE"
 692
 693            history = self._match_text_seq("HISTORY")
 694
 695            like = self._parse_string() if self._match(TokenType.LIKE) else None
 696
 697            if self._match(TokenType.IN):
 698                if self._match_text_seq("ACCOUNT"):
 699                    scope_kind = "ACCOUNT"
 700                elif self._match_set(self.DB_CREATABLES):
 701                    scope_kind = self._prev.text.upper()
 702                    if self._curr:
 703                        scope = self._parse_table_parts()
 704                elif self._curr:
 705                    scope_kind = "SCHEMA" if this in self.SCHEMA_KINDS else "TABLE"
 706                    scope = self._parse_table_parts()
 707
 708            return self.expression(
 709                exp.Show,
 710                **{
 711                    "terse": terse,
 712                    "this": this,
 713                    "history": history,
 714                    "like": like,
 715                    "scope": scope,
 716                    "scope_kind": scope_kind,
 717                    "starts_with": self._match_text_seq("STARTS", "WITH") and self._parse_string(),
 718                    "limit": self._parse_limit(),
 719                    "from": self._parse_string() if self._match(TokenType.FROM) else None,
 720                },
 721            )
 722
 723        def _parse_location_property(self) -> exp.LocationProperty:
 724            self._match(TokenType.EQ)
 725            return self.expression(exp.LocationProperty, this=self._parse_location_path())
 726
 727        def _parse_file_location(self) -> t.Optional[exp.Expression]:
 728            # Parse either a subquery or a staged file
 729            return (
 730                self._parse_select(table=True, parse_subquery_alias=False)
 731                if self._match(TokenType.L_PAREN, advance=False)
 732                else self._parse_table_parts()
 733            )
 734
 735        def _parse_location_path(self) -> exp.Var:
 736            parts = [self._advance_any(ignore_reserved=True)]
 737
 738            # We avoid consuming a comma token because external tables like @foo and @bar
 739            # can be joined in a query with a comma separator, as well as closing paren
 740            # in case of subqueries
 741            while self._is_connected() and not self._match_set(
 742                (TokenType.COMMA, TokenType.L_PAREN, TokenType.R_PAREN), advance=False
 743            ):
 744                parts.append(self._advance_any(ignore_reserved=True))
 745
 746            return exp.var("".join(part.text for part in parts if part))
 747
 748        def _parse_lambda_arg(self) -> t.Optional[exp.Expression]:
 749            this = super()._parse_lambda_arg()
 750
 751            if not this:
 752                return this
 753
 754            typ = self._parse_types()
 755
 756            if typ:
 757                return self.expression(exp.Cast, this=this, to=typ)
 758
 759            return this
 760
 761    class Tokenizer(tokens.Tokenizer):
 762        STRING_ESCAPES = ["\\", "'"]
 763        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
 764        RAW_STRINGS = ["$$"]
 765        COMMENTS = ["--", "//", ("/*", "*/")]
 766        NESTED_COMMENTS = False
 767
 768        KEYWORDS = {
 769            **tokens.Tokenizer.KEYWORDS,
 770            "BYTEINT": TokenType.INT,
 771            "CHAR VARYING": TokenType.VARCHAR,
 772            "CHARACTER VARYING": TokenType.VARCHAR,
 773            "EXCLUDE": TokenType.EXCEPT,
 774            "ILIKE ANY": TokenType.ILIKE_ANY,
 775            "LIKE ANY": TokenType.LIKE_ANY,
 776            "MATCH_CONDITION": TokenType.MATCH_CONDITION,
 777            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
 778            "MINUS": TokenType.EXCEPT,
 779            "NCHAR VARYING": TokenType.VARCHAR,
 780            "PUT": TokenType.COMMAND,
 781            "REMOVE": TokenType.COMMAND,
 782            "RM": TokenType.COMMAND,
 783            "SAMPLE": TokenType.TABLE_SAMPLE,
 784            "SQL_DOUBLE": TokenType.DOUBLE,
 785            "SQL_VARCHAR": TokenType.VARCHAR,
 786            "STORAGE INTEGRATION": TokenType.STORAGE_INTEGRATION,
 787            "TAG": TokenType.TAG,
 788            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
 789            "TOP": TokenType.TOP,
 790            "WAREHOUSE": TokenType.WAREHOUSE,
 791            "STREAMLIT": TokenType.STREAMLIT,
 792        }
 793        KEYWORDS.pop("/*+")
 794
 795        SINGLE_TOKENS = {
 796            **tokens.Tokenizer.SINGLE_TOKENS,
 797            "$": TokenType.PARAMETER,
 798        }
 799
 800        VAR_SINGLE_TOKENS = {"$"}
 801
 802        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}
 803
 804    class Generator(generator.Generator):
 805        PARAMETER_TOKEN = "$"
 806        MATCHED_BY_SOURCE = False
 807        SINGLE_STRING_INTERVAL = True
 808        JOIN_HINTS = False
 809        TABLE_HINTS = False
 810        QUERY_HINTS = False
 811        AGGREGATE_FILTER_SUPPORTED = False
 812        SUPPORTS_TABLE_COPY = False
 813        COLLATE_IS_FUNC = True
 814        LIMIT_ONLY_LITERALS = True
 815        JSON_KEY_VALUE_PAIR_SEP = ","
 816        INSERT_OVERWRITE = " OVERWRITE INTO"
 817        STRUCT_DELIMITER = ("(", ")")
 818        COPY_PARAMS_ARE_WRAPPED = False
 819        COPY_PARAMS_EQ_REQUIRED = True
 820        STAR_EXCEPT = "EXCLUDE"
 821        SUPPORTS_EXPLODING_PROJECTIONS = False
 822        ARRAY_CONCAT_IS_VAR_LEN = False
 823        SUPPORTS_CONVERT_TIMEZONE = True
 824        EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE = False
 825        SUPPORTS_MEDIAN = True
 826        ARRAY_SIZE_NAME = "ARRAY_SIZE"
 827
 828        TRANSFORMS = {
 829            **generator.Generator.TRANSFORMS,
 830            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
 831            exp.ArgMax: rename_func("MAX_BY"),
 832            exp.ArgMin: rename_func("MIN_BY"),
 833            exp.Array: inline_array_sql,
 834            exp.ArrayConcat: lambda self, e: self.arrayconcat_sql(e, name="ARRAY_CAT"),
 835            exp.ArrayContains: lambda self, e: self.func("ARRAY_CONTAINS", e.expression, e.this),
 836            exp.AtTimeZone: lambda self, e: self.func(
 837                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
 838            ),
 839            exp.BitwiseXor: rename_func("BITXOR"),
 840            exp.Create: transforms.preprocess([_flatten_structured_types_unless_iceberg]),
 841            exp.DateAdd: date_delta_sql("DATEADD"),
 842            exp.DateDiff: date_delta_sql("DATEDIFF"),
 843            exp.DatetimeAdd: date_delta_sql("TIMESTAMPADD"),
 844            exp.DatetimeDiff: timestampdiff_sql,
 845            exp.DateStrToDate: datestrtodate_sql,
 846            exp.DayOfMonth: rename_func("DAYOFMONTH"),
 847            exp.DayOfWeek: rename_func("DAYOFWEEK"),
 848            exp.DayOfYear: rename_func("DAYOFYEAR"),
 849            exp.Explode: rename_func("FLATTEN"),
 850            exp.Extract: rename_func("DATE_PART"),
 851            exp.FromTimeZone: lambda self, e: self.func(
 852                "CONVERT_TIMEZONE", e.args.get("zone"), "'UTC'", e.this
 853            ),
 854            exp.GenerateSeries: lambda self, e: self.func(
 855                "ARRAY_GENERATE_RANGE", e.args["start"], e.args["end"] + 1, e.args.get("step")
 856            ),
 857            exp.GroupConcat: rename_func("LISTAGG"),
 858            exp.If: if_sql(name="IFF", false_value="NULL"),
 859            exp.JSONExtractArray: _json_extract_value_array_sql,
 860            exp.JSONExtractScalar: lambda self, e: self.func(
 861                "JSON_EXTRACT_PATH_TEXT", e.this, e.expression
 862            ),
 863            exp.JSONObject: lambda self, e: self.func("OBJECT_CONSTRUCT_KEEP_NULL", *e.expressions),
 864            exp.JSONPathRoot: lambda *_: "",
 865            exp.JSONValueArray: _json_extract_value_array_sql,
 866            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
 867            exp.LogicalOr: rename_func("BOOLOR_AGG"),
 868            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 869            exp.Max: max_or_greatest,
 870            exp.Min: min_or_least,
 871            exp.ParseJSON: lambda self, e: self.func(
 872                "TRY_PARSE_JSON" if e.args.get("safe") else "PARSE_JSON", e.this
 873            ),
 874            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
 875            exp.PercentileCont: transforms.preprocess(
 876                [transforms.add_within_group_for_percentiles]
 877            ),
 878            exp.PercentileDisc: transforms.preprocess(
 879                [transforms.add_within_group_for_percentiles]
 880            ),
 881            exp.Pivot: transforms.preprocess([_unqualify_unpivot_columns]),
 882            exp.RegexpExtract: _regexpextract_sql,
 883            exp.RegexpExtractAll: _regexpextract_sql,
 884            exp.RegexpILike: _regexpilike_sql,
 885            exp.Rand: rename_func("RANDOM"),
 886            exp.Select: transforms.preprocess(
 887                [
 888                    transforms.eliminate_distinct_on,
 889                    transforms.explode_to_unnest(),
 890                    transforms.eliminate_semi_and_anti_joins,
 891                    _unnest_generate_date_array,
 892                ]
 893            ),
 894            exp.SafeDivide: lambda self, e: no_safe_divide_sql(self, e, "IFF"),
 895            exp.SHA: rename_func("SHA1"),
 896            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
 897            exp.StartsWith: rename_func("STARTSWITH"),
 898            exp.StrPosition: lambda self, e: self.func(
 899                "POSITION", e.args.get("substr"), e.this, e.args.get("position")
 900            ),
 901            exp.StrToDate: lambda self, e: self.func("DATE", e.this, self.format_time(e)),
 902            exp.Stuff: rename_func("INSERT"),
 903            exp.TimeAdd: date_delta_sql("TIMEADD"),
 904            exp.Timestamp: no_timestamp_sql,
 905            exp.TimestampAdd: date_delta_sql("TIMESTAMPADD"),
 906            exp.TimestampDiff: lambda self, e: self.func(
 907                "TIMESTAMPDIFF", e.unit, e.expression, e.this
 908            ),
 909            exp.TimestampTrunc: timestamptrunc_sql(),
 910            exp.TimeStrToTime: timestrtotime_sql,
 911            exp.TimeToStr: lambda self, e: self.func(
 912                "TO_CHAR", exp.cast(e.this, exp.DataType.Type.TIMESTAMP), self.format_time(e)
 913            ),
 914            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
 915            exp.ToArray: rename_func("TO_ARRAY"),
 916            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
 917            exp.ToDouble: rename_func("TO_DOUBLE"),
 918            exp.TsOrDsAdd: date_delta_sql("DATEADD", cast=True),
 919            exp.TsOrDsDiff: date_delta_sql("DATEDIFF"),
 920            exp.TsOrDsToDate: lambda self, e: self.func(
 921                "TRY_TO_DATE" if e.args.get("safe") else "TO_DATE", e.this, self.format_time(e)
 922            ),
 923            exp.UnixToTime: rename_func("TO_TIMESTAMP"),
 924            exp.Uuid: rename_func("UUID_STRING"),
 925            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 926            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
 927            exp.Xor: rename_func("BOOLXOR"),
 928            exp.Levenshtein: unsupported_args("ins_cost", "del_cost", "sub_cost")(
 929                rename_func("EDITDISTANCE")
 930            ),
 931        }
 932
 933        SUPPORTED_JSON_PATH_PARTS = {
 934            exp.JSONPathKey,
 935            exp.JSONPathRoot,
 936            exp.JSONPathSubscript,
 937        }
 938
 939        TYPE_MAPPING = {
 940            **generator.Generator.TYPE_MAPPING,
 941            exp.DataType.Type.NESTED: "OBJECT",
 942            exp.DataType.Type.STRUCT: "OBJECT",
 943        }
 944
 945        PROPERTIES_LOCATION = {
 946            **generator.Generator.PROPERTIES_LOCATION,
 947            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
 948            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
 949        }
 950
 951        UNSUPPORTED_VALUES_EXPRESSIONS = {
 952            exp.Map,
 953            exp.StarMap,
 954            exp.Struct,
 955            exp.VarMap,
 956        }
 957
 958        def with_properties(self, properties: exp.Properties) -> str:
 959            return self.properties(properties, wrapped=False, prefix=self.sep(""), sep=" ")
 960
 961        def values_sql(self, expression: exp.Values, values_as_table: bool = True) -> str:
 962            if expression.find(*self.UNSUPPORTED_VALUES_EXPRESSIONS):
 963                values_as_table = False
 964
 965            return super().values_sql(expression, values_as_table=values_as_table)
 966
 967        def datatype_sql(self, expression: exp.DataType) -> str:
 968            expressions = expression.expressions
 969            if (
 970                expressions
 971                and expression.is_type(*exp.DataType.STRUCT_TYPES)
 972                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
 973            ):
 974                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
 975                return "OBJECT"
 976
 977            return super().datatype_sql(expression)
 978
 979        def tonumber_sql(self, expression: exp.ToNumber) -> str:
 980            return self.func(
 981                "TO_NUMBER",
 982                expression.this,
 983                expression.args.get("format"),
 984                expression.args.get("precision"),
 985                expression.args.get("scale"),
 986            )
 987
 988        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
 989            milli = expression.args.get("milli")
 990            if milli is not None:
 991                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
 992                expression.set("nano", milli_to_nano)
 993
 994            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
 995
 996        def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
 997            if expression.is_type(exp.DataType.Type.GEOGRAPHY):
 998                return self.func("TO_GEOGRAPHY", expression.this)
 999            if expression.is_type(exp.DataType.Type.GEOMETRY):
1000                return self.func("TO_GEOMETRY", expression.this)
1001
1002            return super().cast_sql(expression, safe_prefix=safe_prefix)
1003
1004        def trycast_sql(self, expression: exp.TryCast) -> str:
1005            value = expression.this
1006
1007            if value.type is None:
1008                from sqlglot.optimizer.annotate_types import annotate_types
1009
1010                value = annotate_types(value)
1011
1012            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
1013                return super().trycast_sql(expression)
1014
1015            # TRY_CAST only works for string values in Snowflake
1016            return self.cast_sql(expression)
1017
1018        def log_sql(self, expression: exp.Log) -> str:
1019            if not expression.expression:
1020                return self.func("LN", expression.this)
1021
1022            return super().log_sql(expression)
1023
1024        def unnest_sql(self, expression: exp.Unnest) -> str:
1025            unnest_alias = expression.args.get("alias")
1026            offset = expression.args.get("offset")
1027
1028            columns = [
1029                exp.to_identifier("seq"),
1030                exp.to_identifier("key"),
1031                exp.to_identifier("path"),
1032                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
1033                seq_get(unnest_alias.columns if unnest_alias else [], 0)
1034                or exp.to_identifier("value"),
1035                exp.to_identifier("this"),
1036            ]
1037
1038            if unnest_alias:
1039                unnest_alias.set("columns", columns)
1040            else:
1041                unnest_alias = exp.TableAlias(this="_u", columns=columns)
1042
1043            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
1044            alias = self.sql(unnest_alias)
1045            alias = f" AS {alias}" if alias else ""
1046            return f"{explode}{alias}"
1047
1048        def show_sql(self, expression: exp.Show) -> str:
1049            terse = "TERSE " if expression.args.get("terse") else ""
1050            history = " HISTORY" if expression.args.get("history") else ""
1051            like = self.sql(expression, "like")
1052            like = f" LIKE {like}" if like else ""
1053
1054            scope = self.sql(expression, "scope")
1055            scope = f" {scope}" if scope else ""
1056
1057            scope_kind = self.sql(expression, "scope_kind")
1058            if scope_kind:
1059                scope_kind = f" IN {scope_kind}"
1060
1061            starts_with = self.sql(expression, "starts_with")
1062            if starts_with:
1063                starts_with = f" STARTS WITH {starts_with}"
1064
1065            limit = self.sql(expression, "limit")
1066
1067            from_ = self.sql(expression, "from")
1068            if from_:
1069                from_ = f" FROM {from_}"
1070
1071            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}"
1072
1073        def describe_sql(self, expression: exp.Describe) -> str:
1074            # Default to table if kind is unknown
1075            kind_value = expression.args.get("kind") or "TABLE"
1076            kind = f" {kind_value}" if kind_value else ""
1077            this = f" {self.sql(expression, 'this')}"
1078            expressions = self.expressions(expression, flat=True)
1079            expressions = f" {expressions}" if expressions else ""
1080            return f"DESCRIBE{kind}{this}{expressions}"
1081
1082        def generatedasidentitycolumnconstraint_sql(
1083            self, expression: exp.GeneratedAsIdentityColumnConstraint
1084        ) -> str:
1085            start = expression.args.get("start")
1086            start = f" START {start}" if start else ""
1087            increment = expression.args.get("increment")
1088            increment = f" INCREMENT {increment}" if increment else ""
1089            return f"AUTOINCREMENT{start}{increment}"
1090
1091        def cluster_sql(self, expression: exp.Cluster) -> str:
1092            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
1093
1094        def struct_sql(self, expression: exp.Struct) -> str:
1095            keys = []
1096            values = []
1097
1098            for i, e in enumerate(expression.expressions):
1099                if isinstance(e, exp.PropertyEQ):
1100                    keys.append(
1101                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
1102                    )
1103                    values.append(e.expression)
1104                else:
1105                    keys.append(exp.Literal.string(f"_{i}"))
1106                    values.append(e)
1107
1108            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))
1109
1110        @unsupported_args("weight", "accuracy")
1111        def approxquantile_sql(self, expression: exp.ApproxQuantile) -> str:
1112            return self.func("APPROX_PERCENTILE", expression.this, expression.args.get("quantile"))
1113
1114        def alterset_sql(self, expression: exp.AlterSet) -> str:
1115            exprs = self.expressions(expression, flat=True)
1116            exprs = f" {exprs}" if exprs else ""
1117            file_format = self.expressions(expression, key="file_format", flat=True, sep=" ")
1118            file_format = f" STAGE_FILE_FORMAT = ({file_format})" if file_format else ""
1119            copy_options = self.expressions(expression, key="copy_options", flat=True, sep=" ")
1120            copy_options = f" STAGE_COPY_OPTIONS = ({copy_options})" if copy_options else ""
1121            tag = self.expressions(expression, key="tag", flat=True)
1122            tag = f" TAG {tag}" if tag else ""
1123
1124            return f"SET{exprs}{file_format}{copy_options}{tag}"
1125
1126        def strtotime_sql(self, expression: exp.StrToTime):
1127            safe_prefix = "TRY_" if expression.args.get("safe") else ""
1128            return self.func(
1129                f"{safe_prefix}TO_TIMESTAMP", expression.this, self.format_time(expression)
1130            )
1131
1132        def timestampsub_sql(self, expression: exp.TimestampSub):
1133            return self.sql(
1134                exp.TimestampAdd(
1135                    this=expression.this,
1136                    expression=expression.expression * -1,
1137                    unit=expression.unit,
1138                )
1139            )
1140
1141        def jsonextract_sql(self, expression: exp.JSONExtract):
1142            this = expression.this
1143
1144            # JSON strings are valid coming from other dialects such as BQ
1145            return self.func(
1146                "GET_PATH",
1147                exp.ParseJSON(this=this) if this.is_string else this,
1148                expression.expression,
1149            )
class Snowflake(sqlglot.dialects.dialect.Dialect):
 294class Snowflake(Dialect):
 295    # https://docs.snowflake.com/en/sql-reference/identifiers-syntax
 296    NORMALIZATION_STRATEGY = NormalizationStrategy.UPPERCASE
 297    NULL_ORDERING = "nulls_are_large"
 298    TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
 299    SUPPORTS_USER_DEFINED_TYPES = False
 300    SUPPORTS_SEMI_ANTI_JOIN = False
 301    PREFER_CTE_ALIAS_COLUMN = True
 302    TABLESAMPLE_SIZE_IS_PERCENT = True
 303    COPY_PARAMS_ARE_CSV = False
 304    ARRAY_AGG_INCLUDES_NULLS = None
 305
 306    TIME_MAPPING = {
 307        "YYYY": "%Y",
 308        "yyyy": "%Y",
 309        "YY": "%y",
 310        "yy": "%y",
 311        "MMMM": "%B",
 312        "mmmm": "%B",
 313        "MON": "%b",
 314        "mon": "%b",
 315        "MM": "%m",
 316        "mm": "%m",
 317        "DD": "%d",
 318        "dd": "%-d",
 319        "DY": "%a",
 320        "dy": "%w",
 321        "HH24": "%H",
 322        "hh24": "%H",
 323        "HH12": "%I",
 324        "hh12": "%I",
 325        "MI": "%M",
 326        "mi": "%M",
 327        "SS": "%S",
 328        "ss": "%S",
 329        "FF": "%f",
 330        "ff": "%f",
 331        "FF6": "%f",
 332        "ff6": "%f",
 333    }
 334
 335    def quote_identifier(self, expression: E, identify: bool = True) -> E:
 336        # This disables quoting DUAL in SELECT ... FROM DUAL, because Snowflake treats an
 337        # unquoted DUAL keyword in a special way and does not map it to a user-defined table
 338        if (
 339            isinstance(expression, exp.Identifier)
 340            and isinstance(expression.parent, exp.Table)
 341            and expression.name.lower() == "dual"
 342        ):
 343            return expression  # type: ignore
 344
 345        return super().quote_identifier(expression, identify=identify)
 346
 347    class Parser(parser.Parser):
 348        IDENTIFY_PIVOT_STRINGS = True
 349        DEFAULT_SAMPLING_METHOD = "BERNOULLI"
 350        COLON_IS_VARIANT_EXTRACT = True
 351
 352        ID_VAR_TOKENS = {
 353            *parser.Parser.ID_VAR_TOKENS,
 354            TokenType.MATCH_CONDITION,
 355        }
 356
 357        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW}
 358        TABLE_ALIAS_TOKENS.discard(TokenType.MATCH_CONDITION)
 359
 360        FUNCTIONS = {
 361            **parser.Parser.FUNCTIONS,
 362            "APPROX_PERCENTILE": exp.ApproxQuantile.from_arg_list,
 363            "ARRAY_CONSTRUCT": lambda args: exp.Array(expressions=args),
 364            "ARRAY_CONTAINS": lambda args: exp.ArrayContains(
 365                this=seq_get(args, 1), expression=seq_get(args, 0)
 366            ),
 367            "ARRAY_GENERATE_RANGE": lambda args: exp.GenerateSeries(
 368                # ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive
 369                start=seq_get(args, 0),
 370                end=exp.Sub(this=seq_get(args, 1), expression=exp.Literal.number(1)),
 371                step=seq_get(args, 2),
 372            ),
 373            "BITXOR": binary_from_function(exp.BitwiseXor),
 374            "BIT_XOR": binary_from_function(exp.BitwiseXor),
 375            "BOOLXOR": binary_from_function(exp.Xor),
 376            "DATE": _build_datetime("DATE", exp.DataType.Type.DATE),
 377            "DATE_TRUNC": _date_trunc_to_time,
 378            "DATEADD": _build_date_time_add(exp.DateAdd),
 379            "DATEDIFF": _build_datediff,
 380            "DIV0": _build_if_from_div0,
 381            "EDITDISTANCE": lambda args: exp.Levenshtein(
 382                this=seq_get(args, 0), expression=seq_get(args, 1), max_dist=seq_get(args, 2)
 383            ),
 384            "FLATTEN": exp.Explode.from_arg_list,
 385            "GET_PATH": lambda args, dialect: exp.JSONExtract(
 386                this=seq_get(args, 0), expression=dialect.to_json_path(seq_get(args, 1))
 387            ),
 388            "IFF": exp.If.from_arg_list,
 389            "LAST_DAY": lambda args: exp.LastDay(
 390                this=seq_get(args, 0), unit=map_date_part(seq_get(args, 1))
 391            ),
 392            "LEN": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
 393            "LENGTH": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
 394            "LISTAGG": exp.GroupConcat.from_arg_list,
 395            "NULLIFZERO": _build_if_from_nullifzero,
 396            "OBJECT_CONSTRUCT": _build_object_construct,
 397            "REGEXP_EXTRACT_ALL": _build_regexp_extract(exp.RegexpExtractAll),
 398            "REGEXP_REPLACE": _build_regexp_replace,
 399            "REGEXP_SUBSTR": _build_regexp_extract(exp.RegexpExtract),
 400            "REGEXP_SUBSTR_ALL": _build_regexp_extract(exp.RegexpExtractAll),
 401            "RLIKE": exp.RegexpLike.from_arg_list,
 402            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
 403            "TIMEADD": _build_date_time_add(exp.TimeAdd),
 404            "TIMEDIFF": _build_datediff,
 405            "TIMESTAMPADD": _build_date_time_add(exp.DateAdd),
 406            "TIMESTAMPDIFF": _build_datediff,
 407            "TIMESTAMPFROMPARTS": build_timestamp_from_parts,
 408            "TIMESTAMP_FROM_PARTS": build_timestamp_from_parts,
 409            "TIMESTAMPNTZFROMPARTS": build_timestamp_from_parts,
 410            "TIMESTAMP_NTZ_FROM_PARTS": build_timestamp_from_parts,
 411            "TRY_PARSE_JSON": lambda args: exp.ParseJSON(this=seq_get(args, 0), safe=True),
 412            "TRY_TO_DATE": _build_datetime("TRY_TO_DATE", exp.DataType.Type.DATE, safe=True),
 413            "TRY_TO_TIMESTAMP": _build_datetime(
 414                "TRY_TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP, safe=True
 415            ),
 416            "TO_DATE": _build_datetime("TO_DATE", exp.DataType.Type.DATE),
 417            "TO_NUMBER": lambda args: exp.ToNumber(
 418                this=seq_get(args, 0),
 419                format=seq_get(args, 1),
 420                precision=seq_get(args, 2),
 421                scale=seq_get(args, 3),
 422            ),
 423            "TO_TIME": _build_datetime("TO_TIME", exp.DataType.Type.TIME),
 424            "TO_TIMESTAMP": _build_datetime("TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP),
 425            "TO_TIMESTAMP_LTZ": _build_datetime("TO_TIMESTAMP_LTZ", exp.DataType.Type.TIMESTAMPLTZ),
 426            "TO_TIMESTAMP_NTZ": _build_datetime("TO_TIMESTAMP_NTZ", exp.DataType.Type.TIMESTAMP),
 427            "TO_TIMESTAMP_TZ": _build_datetime("TO_TIMESTAMP_TZ", exp.DataType.Type.TIMESTAMPTZ),
 428            "TO_VARCHAR": exp.ToChar.from_arg_list,
 429            "ZEROIFNULL": _build_if_from_zeroifnull,
 430        }
 431
 432        FUNCTION_PARSERS = {
 433            **parser.Parser.FUNCTION_PARSERS,
 434            "DATE_PART": lambda self: self._parse_date_part(),
 435            "OBJECT_CONSTRUCT_KEEP_NULL": lambda self: self._parse_json_object(),
 436        }
 437        FUNCTION_PARSERS.pop("TRIM")
 438
 439        TIMESTAMPS = parser.Parser.TIMESTAMPS - {TokenType.TIME}
 440
 441        RANGE_PARSERS = {
 442            **parser.Parser.RANGE_PARSERS,
 443            TokenType.LIKE_ANY: parser.binary_range_parser(exp.LikeAny),
 444            TokenType.ILIKE_ANY: parser.binary_range_parser(exp.ILikeAny),
 445        }
 446
 447        ALTER_PARSERS = {
 448            **parser.Parser.ALTER_PARSERS,
 449            "UNSET": lambda self: self.expression(
 450                exp.Set,
 451                tag=self._match_text_seq("TAG"),
 452                expressions=self._parse_csv(self._parse_id_var),
 453                unset=True,
 454            ),
 455        }
 456
 457        STATEMENT_PARSERS = {
 458            **parser.Parser.STATEMENT_PARSERS,
 459            TokenType.SHOW: lambda self: self._parse_show(),
 460        }
 461
 462        PROPERTY_PARSERS = {
 463            **parser.Parser.PROPERTY_PARSERS,
 464            "LOCATION": lambda self: self._parse_location_property(),
 465        }
 466
 467        TYPE_CONVERTERS = {
 468            # https://docs.snowflake.com/en/sql-reference/data-types-numeric#number
 469            exp.DataType.Type.DECIMAL: build_default_decimal_type(precision=38, scale=0),
 470        }
 471
 472        SHOW_PARSERS = {
 473            "SCHEMAS": _show_parser("SCHEMAS"),
 474            "TERSE SCHEMAS": _show_parser("SCHEMAS"),
 475            "OBJECTS": _show_parser("OBJECTS"),
 476            "TERSE OBJECTS": _show_parser("OBJECTS"),
 477            "TABLES": _show_parser("TABLES"),
 478            "TERSE TABLES": _show_parser("TABLES"),
 479            "VIEWS": _show_parser("VIEWS"),
 480            "TERSE VIEWS": _show_parser("VIEWS"),
 481            "PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 482            "TERSE PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 483            "IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 484            "TERSE IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 485            "UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 486            "TERSE UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 487            "SEQUENCES": _show_parser("SEQUENCES"),
 488            "TERSE SEQUENCES": _show_parser("SEQUENCES"),
 489            "COLUMNS": _show_parser("COLUMNS"),
 490            "USERS": _show_parser("USERS"),
 491            "TERSE USERS": _show_parser("USERS"),
 492        }
 493
 494        CONSTRAINT_PARSERS = {
 495            **parser.Parser.CONSTRAINT_PARSERS,
 496            "WITH": lambda self: self._parse_with_constraint(),
 497            "MASKING": lambda self: self._parse_with_constraint(),
 498            "PROJECTION": lambda self: self._parse_with_constraint(),
 499            "TAG": lambda self: self._parse_with_constraint(),
 500        }
 501
 502        STAGED_FILE_SINGLE_TOKENS = {
 503            TokenType.DOT,
 504            TokenType.MOD,
 505            TokenType.SLASH,
 506        }
 507
 508        FLATTEN_COLUMNS = ["SEQ", "KEY", "PATH", "INDEX", "VALUE", "THIS"]
 509
 510        SCHEMA_KINDS = {"OBJECTS", "TABLES", "VIEWS", "SEQUENCES", "UNIQUE KEYS", "IMPORTED KEYS"}
 511
 512        NON_TABLE_CREATABLES = {"STORAGE INTEGRATION", "TAG", "WAREHOUSE", "STREAMLIT"}
 513
 514        LAMBDAS = {
 515            **parser.Parser.LAMBDAS,
 516            TokenType.ARROW: lambda self, expressions: self.expression(
 517                exp.Lambda,
 518                this=self._replace_lambda(
 519                    self._parse_assignment(),
 520                    expressions,
 521                ),
 522                expressions=[e.this if isinstance(e, exp.Cast) else e for e in expressions],
 523            ),
 524        }
 525
 526        def _negate_range(
 527            self, this: t.Optional[exp.Expression] = None
 528        ) -> t.Optional[exp.Expression]:
 529            if not this:
 530                return this
 531
 532            query = this.args.get("query")
 533            if isinstance(this, exp.In) and isinstance(query, exp.Query):
 534                # Snowflake treats `value NOT IN (subquery)` as `VALUE <> ALL (subquery)`, so
 535                # we do this conversion here to avoid parsing it into `NOT value IN (subquery)`
 536                # which can produce different results (most likely a SnowFlake bug).
 537                #
 538                # https://docs.snowflake.com/en/sql-reference/functions/in
 539                # Context: https://github.com/tobymao/sqlglot/issues/3890
 540                return self.expression(
 541                    exp.NEQ, this=this.this, expression=exp.All(this=query.unnest())
 542                )
 543
 544            return self.expression(exp.Not, this=this)
 545
 546        def _parse_with_constraint(self) -> t.Optional[exp.Expression]:
 547            if self._prev.token_type != TokenType.WITH:
 548                self._retreat(self._index - 1)
 549
 550            if self._match_text_seq("MASKING", "POLICY"):
 551                policy = self._parse_column()
 552                return self.expression(
 553                    exp.MaskingPolicyColumnConstraint,
 554                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
 555                    expressions=self._match(TokenType.USING)
 556                    and self._parse_wrapped_csv(self._parse_id_var),
 557                )
 558            if self._match_text_seq("PROJECTION", "POLICY"):
 559                policy = self._parse_column()
 560                return self.expression(
 561                    exp.ProjectionPolicyColumnConstraint,
 562                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
 563                )
 564            if self._match(TokenType.TAG):
 565                return self.expression(
 566                    exp.TagColumnConstraint,
 567                    expressions=self._parse_wrapped_csv(self._parse_property),
 568                )
 569
 570            return None
 571
 572        def _parse_create(self) -> exp.Create | exp.Command:
 573            expression = super()._parse_create()
 574            if isinstance(expression, exp.Create) and expression.kind in self.NON_TABLE_CREATABLES:
 575                # Replace the Table node with the enclosed Identifier
 576                expression.this.replace(expression.this.this)
 577
 578            return expression
 579
 580        # https://docs.snowflake.com/en/sql-reference/functions/date_part.html
 581        # https://docs.snowflake.com/en/sql-reference/functions-date-time.html#label-supported-date-time-parts
 582        def _parse_date_part(self: Snowflake.Parser) -> t.Optional[exp.Expression]:
 583            this = self._parse_var() or self._parse_type()
 584
 585            if not this:
 586                return None
 587
 588            self._match(TokenType.COMMA)
 589            expression = self._parse_bitwise()
 590            this = map_date_part(this)
 591            name = this.name.upper()
 592
 593            if name.startswith("EPOCH"):
 594                if name == "EPOCH_MILLISECOND":
 595                    scale = 10**3
 596                elif name == "EPOCH_MICROSECOND":
 597                    scale = 10**6
 598                elif name == "EPOCH_NANOSECOND":
 599                    scale = 10**9
 600                else:
 601                    scale = None
 602
 603                ts = self.expression(exp.Cast, this=expression, to=exp.DataType.build("TIMESTAMP"))
 604                to_unix: exp.Expression = self.expression(exp.TimeToUnix, this=ts)
 605
 606                if scale:
 607                    to_unix = exp.Mul(this=to_unix, expression=exp.Literal.number(scale))
 608
 609                return to_unix
 610
 611            return self.expression(exp.Extract, this=this, expression=expression)
 612
 613        def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]:
 614            if is_map:
 615                # Keys are strings in Snowflake's objects, see also:
 616                # - https://docs.snowflake.com/en/sql-reference/data-types-semistructured
 617                # - https://docs.snowflake.com/en/sql-reference/functions/object_construct
 618                return self._parse_slice(self._parse_string())
 619
 620            return self._parse_slice(self._parse_alias(self._parse_assignment(), explicit=True))
 621
 622        def _parse_lateral(self) -> t.Optional[exp.Lateral]:
 623            lateral = super()._parse_lateral()
 624            if not lateral:
 625                return lateral
 626
 627            if isinstance(lateral.this, exp.Explode):
 628                table_alias = lateral.args.get("alias")
 629                columns = [exp.to_identifier(col) for col in self.FLATTEN_COLUMNS]
 630                if table_alias and not table_alias.args.get("columns"):
 631                    table_alias.set("columns", columns)
 632                elif not table_alias:
 633                    exp.alias_(lateral, "_flattened", table=columns, copy=False)
 634
 635            return lateral
 636
 637        def _parse_table_parts(
 638            self, schema: bool = False, is_db_reference: bool = False, wildcard: bool = False
 639        ) -> exp.Table:
 640            # https://docs.snowflake.com/en/user-guide/querying-stage
 641            if self._match(TokenType.STRING, advance=False):
 642                table = self._parse_string()
 643            elif self._match_text_seq("@", advance=False):
 644                table = self._parse_location_path()
 645            else:
 646                table = None
 647
 648            if table:
 649                file_format = None
 650                pattern = None
 651
 652                wrapped = self._match(TokenType.L_PAREN)
 653                while self._curr and wrapped and not self._match(TokenType.R_PAREN):
 654                    if self._match_text_seq("FILE_FORMAT", "=>"):
 655                        file_format = self._parse_string() or super()._parse_table_parts(
 656                            is_db_reference=is_db_reference
 657                        )
 658                    elif self._match_text_seq("PATTERN", "=>"):
 659                        pattern = self._parse_string()
 660                    else:
 661                        break
 662
 663                    self._match(TokenType.COMMA)
 664
 665                table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern)
 666            else:
 667                table = super()._parse_table_parts(schema=schema, is_db_reference=is_db_reference)
 668
 669            return table
 670
 671        def _parse_id_var(
 672            self,
 673            any_token: bool = True,
 674            tokens: t.Optional[t.Collection[TokenType]] = None,
 675        ) -> t.Optional[exp.Expression]:
 676            if self._match_text_seq("IDENTIFIER", "("):
 677                identifier = (
 678                    super()._parse_id_var(any_token=any_token, tokens=tokens)
 679                    or self._parse_string()
 680                )
 681                self._match_r_paren()
 682                return self.expression(exp.Anonymous, this="IDENTIFIER", expressions=[identifier])
 683
 684            return super()._parse_id_var(any_token=any_token, tokens=tokens)
 685
 686        def _parse_show_snowflake(self, this: str) -> exp.Show:
 687            scope = None
 688            scope_kind = None
 689
 690            # will identity SHOW TERSE SCHEMAS but not SHOW TERSE PRIMARY KEYS
 691            # which is syntactically valid but has no effect on the output
 692            terse = self._tokens[self._index - 2].text.upper() == "TERSE"
 693
 694            history = self._match_text_seq("HISTORY")
 695
 696            like = self._parse_string() if self._match(TokenType.LIKE) else None
 697
 698            if self._match(TokenType.IN):
 699                if self._match_text_seq("ACCOUNT"):
 700                    scope_kind = "ACCOUNT"
 701                elif self._match_set(self.DB_CREATABLES):
 702                    scope_kind = self._prev.text.upper()
 703                    if self._curr:
 704                        scope = self._parse_table_parts()
 705                elif self._curr:
 706                    scope_kind = "SCHEMA" if this in self.SCHEMA_KINDS else "TABLE"
 707                    scope = self._parse_table_parts()
 708
 709            return self.expression(
 710                exp.Show,
 711                **{
 712                    "terse": terse,
 713                    "this": this,
 714                    "history": history,
 715                    "like": like,
 716                    "scope": scope,
 717                    "scope_kind": scope_kind,
 718                    "starts_with": self._match_text_seq("STARTS", "WITH") and self._parse_string(),
 719                    "limit": self._parse_limit(),
 720                    "from": self._parse_string() if self._match(TokenType.FROM) else None,
 721                },
 722            )
 723
 724        def _parse_location_property(self) -> exp.LocationProperty:
 725            self._match(TokenType.EQ)
 726            return self.expression(exp.LocationProperty, this=self._parse_location_path())
 727
 728        def _parse_file_location(self) -> t.Optional[exp.Expression]:
 729            # Parse either a subquery or a staged file
 730            return (
 731                self._parse_select(table=True, parse_subquery_alias=False)
 732                if self._match(TokenType.L_PAREN, advance=False)
 733                else self._parse_table_parts()
 734            )
 735
 736        def _parse_location_path(self) -> exp.Var:
 737            parts = [self._advance_any(ignore_reserved=True)]
 738
 739            # We avoid consuming a comma token because external tables like @foo and @bar
 740            # can be joined in a query with a comma separator, as well as closing paren
 741            # in case of subqueries
 742            while self._is_connected() and not self._match_set(
 743                (TokenType.COMMA, TokenType.L_PAREN, TokenType.R_PAREN), advance=False
 744            ):
 745                parts.append(self._advance_any(ignore_reserved=True))
 746
 747            return exp.var("".join(part.text for part in parts if part))
 748
 749        def _parse_lambda_arg(self) -> t.Optional[exp.Expression]:
 750            this = super()._parse_lambda_arg()
 751
 752            if not this:
 753                return this
 754
 755            typ = self._parse_types()
 756
 757            if typ:
 758                return self.expression(exp.Cast, this=this, to=typ)
 759
 760            return this
 761
 762    class Tokenizer(tokens.Tokenizer):
 763        STRING_ESCAPES = ["\\", "'"]
 764        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
 765        RAW_STRINGS = ["$$"]
 766        COMMENTS = ["--", "//", ("/*", "*/")]
 767        NESTED_COMMENTS = False
 768
 769        KEYWORDS = {
 770            **tokens.Tokenizer.KEYWORDS,
 771            "BYTEINT": TokenType.INT,
 772            "CHAR VARYING": TokenType.VARCHAR,
 773            "CHARACTER VARYING": TokenType.VARCHAR,
 774            "EXCLUDE": TokenType.EXCEPT,
 775            "ILIKE ANY": TokenType.ILIKE_ANY,
 776            "LIKE ANY": TokenType.LIKE_ANY,
 777            "MATCH_CONDITION": TokenType.MATCH_CONDITION,
 778            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
 779            "MINUS": TokenType.EXCEPT,
 780            "NCHAR VARYING": TokenType.VARCHAR,
 781            "PUT": TokenType.COMMAND,
 782            "REMOVE": TokenType.COMMAND,
 783            "RM": TokenType.COMMAND,
 784            "SAMPLE": TokenType.TABLE_SAMPLE,
 785            "SQL_DOUBLE": TokenType.DOUBLE,
 786            "SQL_VARCHAR": TokenType.VARCHAR,
 787            "STORAGE INTEGRATION": TokenType.STORAGE_INTEGRATION,
 788            "TAG": TokenType.TAG,
 789            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
 790            "TOP": TokenType.TOP,
 791            "WAREHOUSE": TokenType.WAREHOUSE,
 792            "STREAMLIT": TokenType.STREAMLIT,
 793        }
 794        KEYWORDS.pop("/*+")
 795
 796        SINGLE_TOKENS = {
 797            **tokens.Tokenizer.SINGLE_TOKENS,
 798            "$": TokenType.PARAMETER,
 799        }
 800
 801        VAR_SINGLE_TOKENS = {"$"}
 802
 803        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}
 804
 805    class Generator(generator.Generator):
 806        PARAMETER_TOKEN = "$"
 807        MATCHED_BY_SOURCE = False
 808        SINGLE_STRING_INTERVAL = True
 809        JOIN_HINTS = False
 810        TABLE_HINTS = False
 811        QUERY_HINTS = False
 812        AGGREGATE_FILTER_SUPPORTED = False
 813        SUPPORTS_TABLE_COPY = False
 814        COLLATE_IS_FUNC = True
 815        LIMIT_ONLY_LITERALS = True
 816        JSON_KEY_VALUE_PAIR_SEP = ","
 817        INSERT_OVERWRITE = " OVERWRITE INTO"
 818        STRUCT_DELIMITER = ("(", ")")
 819        COPY_PARAMS_ARE_WRAPPED = False
 820        COPY_PARAMS_EQ_REQUIRED = True
 821        STAR_EXCEPT = "EXCLUDE"
 822        SUPPORTS_EXPLODING_PROJECTIONS = False
 823        ARRAY_CONCAT_IS_VAR_LEN = False
 824        SUPPORTS_CONVERT_TIMEZONE = True
 825        EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE = False
 826        SUPPORTS_MEDIAN = True
 827        ARRAY_SIZE_NAME = "ARRAY_SIZE"
 828
 829        TRANSFORMS = {
 830            **generator.Generator.TRANSFORMS,
 831            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
 832            exp.ArgMax: rename_func("MAX_BY"),
 833            exp.ArgMin: rename_func("MIN_BY"),
 834            exp.Array: inline_array_sql,
 835            exp.ArrayConcat: lambda self, e: self.arrayconcat_sql(e, name="ARRAY_CAT"),
 836            exp.ArrayContains: lambda self, e: self.func("ARRAY_CONTAINS", e.expression, e.this),
 837            exp.AtTimeZone: lambda self, e: self.func(
 838                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
 839            ),
 840            exp.BitwiseXor: rename_func("BITXOR"),
 841            exp.Create: transforms.preprocess([_flatten_structured_types_unless_iceberg]),
 842            exp.DateAdd: date_delta_sql("DATEADD"),
 843            exp.DateDiff: date_delta_sql("DATEDIFF"),
 844            exp.DatetimeAdd: date_delta_sql("TIMESTAMPADD"),
 845            exp.DatetimeDiff: timestampdiff_sql,
 846            exp.DateStrToDate: datestrtodate_sql,
 847            exp.DayOfMonth: rename_func("DAYOFMONTH"),
 848            exp.DayOfWeek: rename_func("DAYOFWEEK"),
 849            exp.DayOfYear: rename_func("DAYOFYEAR"),
 850            exp.Explode: rename_func("FLATTEN"),
 851            exp.Extract: rename_func("DATE_PART"),
 852            exp.FromTimeZone: lambda self, e: self.func(
 853                "CONVERT_TIMEZONE", e.args.get("zone"), "'UTC'", e.this
 854            ),
 855            exp.GenerateSeries: lambda self, e: self.func(
 856                "ARRAY_GENERATE_RANGE", e.args["start"], e.args["end"] + 1, e.args.get("step")
 857            ),
 858            exp.GroupConcat: rename_func("LISTAGG"),
 859            exp.If: if_sql(name="IFF", false_value="NULL"),
 860            exp.JSONExtractArray: _json_extract_value_array_sql,
 861            exp.JSONExtractScalar: lambda self, e: self.func(
 862                "JSON_EXTRACT_PATH_TEXT", e.this, e.expression
 863            ),
 864            exp.JSONObject: lambda self, e: self.func("OBJECT_CONSTRUCT_KEEP_NULL", *e.expressions),
 865            exp.JSONPathRoot: lambda *_: "",
 866            exp.JSONValueArray: _json_extract_value_array_sql,
 867            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
 868            exp.LogicalOr: rename_func("BOOLOR_AGG"),
 869            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 870            exp.Max: max_or_greatest,
 871            exp.Min: min_or_least,
 872            exp.ParseJSON: lambda self, e: self.func(
 873                "TRY_PARSE_JSON" if e.args.get("safe") else "PARSE_JSON", e.this
 874            ),
 875            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
 876            exp.PercentileCont: transforms.preprocess(
 877                [transforms.add_within_group_for_percentiles]
 878            ),
 879            exp.PercentileDisc: transforms.preprocess(
 880                [transforms.add_within_group_for_percentiles]
 881            ),
 882            exp.Pivot: transforms.preprocess([_unqualify_unpivot_columns]),
 883            exp.RegexpExtract: _regexpextract_sql,
 884            exp.RegexpExtractAll: _regexpextract_sql,
 885            exp.RegexpILike: _regexpilike_sql,
 886            exp.Rand: rename_func("RANDOM"),
 887            exp.Select: transforms.preprocess(
 888                [
 889                    transforms.eliminate_distinct_on,
 890                    transforms.explode_to_unnest(),
 891                    transforms.eliminate_semi_and_anti_joins,
 892                    _unnest_generate_date_array,
 893                ]
 894            ),
 895            exp.SafeDivide: lambda self, e: no_safe_divide_sql(self, e, "IFF"),
 896            exp.SHA: rename_func("SHA1"),
 897            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
 898            exp.StartsWith: rename_func("STARTSWITH"),
 899            exp.StrPosition: lambda self, e: self.func(
 900                "POSITION", e.args.get("substr"), e.this, e.args.get("position")
 901            ),
 902            exp.StrToDate: lambda self, e: self.func("DATE", e.this, self.format_time(e)),
 903            exp.Stuff: rename_func("INSERT"),
 904            exp.TimeAdd: date_delta_sql("TIMEADD"),
 905            exp.Timestamp: no_timestamp_sql,
 906            exp.TimestampAdd: date_delta_sql("TIMESTAMPADD"),
 907            exp.TimestampDiff: lambda self, e: self.func(
 908                "TIMESTAMPDIFF", e.unit, e.expression, e.this
 909            ),
 910            exp.TimestampTrunc: timestamptrunc_sql(),
 911            exp.TimeStrToTime: timestrtotime_sql,
 912            exp.TimeToStr: lambda self, e: self.func(
 913                "TO_CHAR", exp.cast(e.this, exp.DataType.Type.TIMESTAMP), self.format_time(e)
 914            ),
 915            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
 916            exp.ToArray: rename_func("TO_ARRAY"),
 917            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
 918            exp.ToDouble: rename_func("TO_DOUBLE"),
 919            exp.TsOrDsAdd: date_delta_sql("DATEADD", cast=True),
 920            exp.TsOrDsDiff: date_delta_sql("DATEDIFF"),
 921            exp.TsOrDsToDate: lambda self, e: self.func(
 922                "TRY_TO_DATE" if e.args.get("safe") else "TO_DATE", e.this, self.format_time(e)
 923            ),
 924            exp.UnixToTime: rename_func("TO_TIMESTAMP"),
 925            exp.Uuid: rename_func("UUID_STRING"),
 926            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 927            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
 928            exp.Xor: rename_func("BOOLXOR"),
 929            exp.Levenshtein: unsupported_args("ins_cost", "del_cost", "sub_cost")(
 930                rename_func("EDITDISTANCE")
 931            ),
 932        }
 933
 934        SUPPORTED_JSON_PATH_PARTS = {
 935            exp.JSONPathKey,
 936            exp.JSONPathRoot,
 937            exp.JSONPathSubscript,
 938        }
 939
 940        TYPE_MAPPING = {
 941            **generator.Generator.TYPE_MAPPING,
 942            exp.DataType.Type.NESTED: "OBJECT",
 943            exp.DataType.Type.STRUCT: "OBJECT",
 944        }
 945
 946        PROPERTIES_LOCATION = {
 947            **generator.Generator.PROPERTIES_LOCATION,
 948            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
 949            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
 950        }
 951
 952        UNSUPPORTED_VALUES_EXPRESSIONS = {
 953            exp.Map,
 954            exp.StarMap,
 955            exp.Struct,
 956            exp.VarMap,
 957        }
 958
 959        def with_properties(self, properties: exp.Properties) -> str:
 960            return self.properties(properties, wrapped=False, prefix=self.sep(""), sep=" ")
 961
 962        def values_sql(self, expression: exp.Values, values_as_table: bool = True) -> str:
 963            if expression.find(*self.UNSUPPORTED_VALUES_EXPRESSIONS):
 964                values_as_table = False
 965
 966            return super().values_sql(expression, values_as_table=values_as_table)
 967
 968        def datatype_sql(self, expression: exp.DataType) -> str:
 969            expressions = expression.expressions
 970            if (
 971                expressions
 972                and expression.is_type(*exp.DataType.STRUCT_TYPES)
 973                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
 974            ):
 975                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
 976                return "OBJECT"
 977
 978            return super().datatype_sql(expression)
 979
 980        def tonumber_sql(self, expression: exp.ToNumber) -> str:
 981            return self.func(
 982                "TO_NUMBER",
 983                expression.this,
 984                expression.args.get("format"),
 985                expression.args.get("precision"),
 986                expression.args.get("scale"),
 987            )
 988
 989        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
 990            milli = expression.args.get("milli")
 991            if milli is not None:
 992                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
 993                expression.set("nano", milli_to_nano)
 994
 995            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
 996
 997        def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
 998            if expression.is_type(exp.DataType.Type.GEOGRAPHY):
 999                return self.func("TO_GEOGRAPHY", expression.this)
1000            if expression.is_type(exp.DataType.Type.GEOMETRY):
1001                return self.func("TO_GEOMETRY", expression.this)
1002
1003            return super().cast_sql(expression, safe_prefix=safe_prefix)
1004
1005        def trycast_sql(self, expression: exp.TryCast) -> str:
1006            value = expression.this
1007
1008            if value.type is None:
1009                from sqlglot.optimizer.annotate_types import annotate_types
1010
1011                value = annotate_types(value)
1012
1013            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
1014                return super().trycast_sql(expression)
1015
1016            # TRY_CAST only works for string values in Snowflake
1017            return self.cast_sql(expression)
1018
1019        def log_sql(self, expression: exp.Log) -> str:
1020            if not expression.expression:
1021                return self.func("LN", expression.this)
1022
1023            return super().log_sql(expression)
1024
1025        def unnest_sql(self, expression: exp.Unnest) -> str:
1026            unnest_alias = expression.args.get("alias")
1027            offset = expression.args.get("offset")
1028
1029            columns = [
1030                exp.to_identifier("seq"),
1031                exp.to_identifier("key"),
1032                exp.to_identifier("path"),
1033                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
1034                seq_get(unnest_alias.columns if unnest_alias else [], 0)
1035                or exp.to_identifier("value"),
1036                exp.to_identifier("this"),
1037            ]
1038
1039            if unnest_alias:
1040                unnest_alias.set("columns", columns)
1041            else:
1042                unnest_alias = exp.TableAlias(this="_u", columns=columns)
1043
1044            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
1045            alias = self.sql(unnest_alias)
1046            alias = f" AS {alias}" if alias else ""
1047            return f"{explode}{alias}"
1048
1049        def show_sql(self, expression: exp.Show) -> str:
1050            terse = "TERSE " if expression.args.get("terse") else ""
1051            history = " HISTORY" if expression.args.get("history") else ""
1052            like = self.sql(expression, "like")
1053            like = f" LIKE {like}" if like else ""
1054
1055            scope = self.sql(expression, "scope")
1056            scope = f" {scope}" if scope else ""
1057
1058            scope_kind = self.sql(expression, "scope_kind")
1059            if scope_kind:
1060                scope_kind = f" IN {scope_kind}"
1061
1062            starts_with = self.sql(expression, "starts_with")
1063            if starts_with:
1064                starts_with = f" STARTS WITH {starts_with}"
1065
1066            limit = self.sql(expression, "limit")
1067
1068            from_ = self.sql(expression, "from")
1069            if from_:
1070                from_ = f" FROM {from_}"
1071
1072            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}"
1073
1074        def describe_sql(self, expression: exp.Describe) -> str:
1075            # Default to table if kind is unknown
1076            kind_value = expression.args.get("kind") or "TABLE"
1077            kind = f" {kind_value}" if kind_value else ""
1078            this = f" {self.sql(expression, 'this')}"
1079            expressions = self.expressions(expression, flat=True)
1080            expressions = f" {expressions}" if expressions else ""
1081            return f"DESCRIBE{kind}{this}{expressions}"
1082
1083        def generatedasidentitycolumnconstraint_sql(
1084            self, expression: exp.GeneratedAsIdentityColumnConstraint
1085        ) -> str:
1086            start = expression.args.get("start")
1087            start = f" START {start}" if start else ""
1088            increment = expression.args.get("increment")
1089            increment = f" INCREMENT {increment}" if increment else ""
1090            return f"AUTOINCREMENT{start}{increment}"
1091
1092        def cluster_sql(self, expression: exp.Cluster) -> str:
1093            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
1094
1095        def struct_sql(self, expression: exp.Struct) -> str:
1096            keys = []
1097            values = []
1098
1099            for i, e in enumerate(expression.expressions):
1100                if isinstance(e, exp.PropertyEQ):
1101                    keys.append(
1102                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
1103                    )
1104                    values.append(e.expression)
1105                else:
1106                    keys.append(exp.Literal.string(f"_{i}"))
1107                    values.append(e)
1108
1109            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))
1110
1111        @unsupported_args("weight", "accuracy")
1112        def approxquantile_sql(self, expression: exp.ApproxQuantile) -> str:
1113            return self.func("APPROX_PERCENTILE", expression.this, expression.args.get("quantile"))
1114
1115        def alterset_sql(self, expression: exp.AlterSet) -> str:
1116            exprs = self.expressions(expression, flat=True)
1117            exprs = f" {exprs}" if exprs else ""
1118            file_format = self.expressions(expression, key="file_format", flat=True, sep=" ")
1119            file_format = f" STAGE_FILE_FORMAT = ({file_format})" if file_format else ""
1120            copy_options = self.expressions(expression, key="copy_options", flat=True, sep=" ")
1121            copy_options = f" STAGE_COPY_OPTIONS = ({copy_options})" if copy_options else ""
1122            tag = self.expressions(expression, key="tag", flat=True)
1123            tag = f" TAG {tag}" if tag else ""
1124
1125            return f"SET{exprs}{file_format}{copy_options}{tag}"
1126
1127        def strtotime_sql(self, expression: exp.StrToTime):
1128            safe_prefix = "TRY_" if expression.args.get("safe") else ""
1129            return self.func(
1130                f"{safe_prefix}TO_TIMESTAMP", expression.this, self.format_time(expression)
1131            )
1132
1133        def timestampsub_sql(self, expression: exp.TimestampSub):
1134            return self.sql(
1135                exp.TimestampAdd(
1136                    this=expression.this,
1137                    expression=expression.expression * -1,
1138                    unit=expression.unit,
1139                )
1140            )
1141
1142        def jsonextract_sql(self, expression: exp.JSONExtract):
1143            this = expression.this
1144
1145            # JSON strings are valid coming from other dialects such as BQ
1146            return self.func(
1147                "GET_PATH",
1148                exp.ParseJSON(this=this) if this.is_string else this,
1149                expression.expression,
1150            )
NORMALIZATION_STRATEGY = <NormalizationStrategy.UPPERCASE: 'UPPERCASE'>

Specifies the strategy according to which identifiers should be normalized.

NULL_ORDERING = 'nulls_are_large'

Default NULL ordering method to use if not explicitly set. Possible values: "nulls_are_small", "nulls_are_large", "nulls_are_last"

TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
SUPPORTS_USER_DEFINED_TYPES = False

Whether user-defined data types are supported.

SUPPORTS_SEMI_ANTI_JOIN = False

Whether SEMI or ANTI joins are supported.

PREFER_CTE_ALIAS_COLUMN = True

Some dialects, such as Snowflake, allow you to reference a CTE column alias in the HAVING clause of the CTE. This flag will cause the CTE alias columns to override any projection aliases in the subquery.

For example, WITH y(c) AS ( SELECT SUM(a) FROM (SELECT 1 a) AS x HAVING c > 0 ) SELECT c FROM y;

will be rewritten as

WITH y(c) AS (
    SELECT SUM(a) AS c FROM (SELECT 1 AS a) AS x HAVING c > 0
) SELECT c FROM y;
TABLESAMPLE_SIZE_IS_PERCENT = True

Whether a size in the table sample clause represents percentage.

COPY_PARAMS_ARE_CSV = False

Whether COPY statement parameters are separated by comma or whitespace

ARRAY_AGG_INCLUDES_NULLS: Optional[bool] = None

Whether ArrayAgg needs to filter NULL values.

TIME_MAPPING: Dict[str, str] = {'YYYY': '%Y', 'yyyy': '%Y', 'YY': '%y', 'yy': '%y', 'MMMM': '%B', 'mmmm': '%B', 'MON': '%b', 'mon': '%b', 'MM': '%m', 'mm': '%m', 'DD': '%d', 'dd': '%-d', 'DY': '%a', 'dy': '%w', 'HH24': '%H', 'hh24': '%H', 'HH12': '%I', 'hh12': '%I', 'MI': '%M', 'mi': '%M', 'SS': '%S', 'ss': '%S', 'FF': '%f', 'ff': '%f', 'FF6': '%f', 'ff6': '%f'}

Associates this dialect's time formats with their equivalent Python strftime formats.

def quote_identifier(self, expression: ~E, identify: bool = True) -> ~E:
335    def quote_identifier(self, expression: E, identify: bool = True) -> E:
336        # This disables quoting DUAL in SELECT ... FROM DUAL, because Snowflake treats an
337        # unquoted DUAL keyword in a special way and does not map it to a user-defined table
338        if (
339            isinstance(expression, exp.Identifier)
340            and isinstance(expression.parent, exp.Table)
341            and expression.name.lower() == "dual"
342        ):
343            return expression  # type: ignore
344
345        return super().quote_identifier(expression, identify=identify)

Adds quotes to a given identifier.

Arguments:
  • expression: The expression of interest. If it's not an Identifier, this method is a no-op.
  • identify: If set to False, the quotes will only be added if the identifier is deemed "unsafe", with respect to its characters and this dialect's normalization strategy.
SUPPORTS_COLUMN_JOIN_MARKS = False

Whether the old-style outer join (+) syntax is supported.

UNESCAPED_SEQUENCES: Dict[str, str] = {'\\a': '\x07', '\\b': '\x08', '\\f': '\x0c', '\\n': '\n', '\\r': '\r', '\\t': '\t', '\\v': '\x0b', '\\\\': '\\'}

Mapping of an escaped sequence (\n) to its unescaped version ( ).

tokenizer_class = <class 'Snowflake.Tokenizer'>
jsonpath_tokenizer_class = <class 'sqlglot.tokens.JSONPathTokenizer'>
parser_class = <class 'Snowflake.Parser'>
generator_class = <class 'Snowflake.Generator'>
TIME_TRIE: Dict = {'Y': {'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'y': {'y': {'y': {'y': {0: True}}, 0: True}}, 'M': {'M': {'M': {'M': {0: True}}, 0: True}, 'O': {'N': {0: True}}, 'I': {0: True}}, 'm': {'m': {'m': {'m': {0: True}}, 0: True}, 'o': {'n': {0: True}}, 'i': {0: True}}, 'D': {'D': {0: True}, 'Y': {0: True}}, 'd': {'d': {0: True}, 'y': {0: True}}, 'H': {'H': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'h': {'h': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'S': {'S': {0: True}}, 's': {'s': {0: True}}, 'F': {'F': {0: True, '6': {0: True}}}, 'f': {'f': {0: True, '6': {0: True}}}}
FORMAT_TRIE: Dict = {'Y': {'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'y': {'y': {'y': {'y': {0: True}}, 0: True}}, 'M': {'M': {'M': {'M': {0: True}}, 0: True}, 'O': {'N': {0: True}}, 'I': {0: True}}, 'm': {'m': {'m': {'m': {0: True}}, 0: True}, 'o': {'n': {0: True}}, 'i': {0: True}}, 'D': {'D': {0: True}, 'Y': {0: True}}, 'd': {'d': {0: True}, 'y': {0: True}}, 'H': {'H': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'h': {'h': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'S': {'S': {0: True}}, 's': {'s': {0: True}}, 'F': {'F': {0: True, '6': {0: True}}}, 'f': {'f': {0: True, '6': {0: True}}}}
INVERSE_TIME_MAPPING: Dict[str, str] = {'%Y': 'yyyy', '%y': 'yy', '%B': 'mmmm', '%b': 'mon', '%m': 'mm', '%d': 'DD', '%-d': 'dd', '%a': 'DY', '%w': 'dy', '%H': 'hh24', '%I': 'hh12', '%M': 'mi', '%S': 'ss', '%f': 'ff6'}
INVERSE_TIME_TRIE: Dict = {'%': {'Y': {0: True}, 'y': {0: True}, 'B': {0: True}, 'b': {0: True}, 'm': {0: True}, 'd': {0: True}, '-': {'d': {0: True}}, 'a': {0: True}, 'w': {0: True}, 'H': {0: True}, 'I': {0: True}, 'M': {0: True}, 'S': {0: True}, 'f': {0: True}}}
INVERSE_FORMAT_MAPPING: Dict[str, str] = {}
INVERSE_FORMAT_TRIE: Dict = {}
INVERSE_CREATABLE_KIND_MAPPING: dict[str, str] = {}
ESCAPED_SEQUENCES: Dict[str, str] = {'\x07': '\\a', '\x08': '\\b', '\x0c': '\\f', '\n': '\\n', '\r': '\\r', '\t': '\\t', '\x0b': '\\v', '\\': '\\\\'}
QUOTE_START = "'"
QUOTE_END = "'"
IDENTIFIER_START = '"'
IDENTIFIER_END = '"'
BIT_START: Optional[str] = None
BIT_END: Optional[str] = None
HEX_START: Optional[str] = "x'"
HEX_END: Optional[str] = "'"
BYTE_START: Optional[str] = None
BYTE_END: Optional[str] = None
UNICODE_START: Optional[str] = None
UNICODE_END: Optional[str] = None
class Snowflake.Parser(sqlglot.parser.Parser):
347    class Parser(parser.Parser):
348        IDENTIFY_PIVOT_STRINGS = True
349        DEFAULT_SAMPLING_METHOD = "BERNOULLI"
350        COLON_IS_VARIANT_EXTRACT = True
351
352        ID_VAR_TOKENS = {
353            *parser.Parser.ID_VAR_TOKENS,
354            TokenType.MATCH_CONDITION,
355        }
356
357        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW}
358        TABLE_ALIAS_TOKENS.discard(TokenType.MATCH_CONDITION)
359
360        FUNCTIONS = {
361            **parser.Parser.FUNCTIONS,
362            "APPROX_PERCENTILE": exp.ApproxQuantile.from_arg_list,
363            "ARRAY_CONSTRUCT": lambda args: exp.Array(expressions=args),
364            "ARRAY_CONTAINS": lambda args: exp.ArrayContains(
365                this=seq_get(args, 1), expression=seq_get(args, 0)
366            ),
367            "ARRAY_GENERATE_RANGE": lambda args: exp.GenerateSeries(
368                # ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive
369                start=seq_get(args, 0),
370                end=exp.Sub(this=seq_get(args, 1), expression=exp.Literal.number(1)),
371                step=seq_get(args, 2),
372            ),
373            "BITXOR": binary_from_function(exp.BitwiseXor),
374            "BIT_XOR": binary_from_function(exp.BitwiseXor),
375            "BOOLXOR": binary_from_function(exp.Xor),
376            "DATE": _build_datetime("DATE", exp.DataType.Type.DATE),
377            "DATE_TRUNC": _date_trunc_to_time,
378            "DATEADD": _build_date_time_add(exp.DateAdd),
379            "DATEDIFF": _build_datediff,
380            "DIV0": _build_if_from_div0,
381            "EDITDISTANCE": lambda args: exp.Levenshtein(
382                this=seq_get(args, 0), expression=seq_get(args, 1), max_dist=seq_get(args, 2)
383            ),
384            "FLATTEN": exp.Explode.from_arg_list,
385            "GET_PATH": lambda args, dialect: exp.JSONExtract(
386                this=seq_get(args, 0), expression=dialect.to_json_path(seq_get(args, 1))
387            ),
388            "IFF": exp.If.from_arg_list,
389            "LAST_DAY": lambda args: exp.LastDay(
390                this=seq_get(args, 0), unit=map_date_part(seq_get(args, 1))
391            ),
392            "LEN": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
393            "LENGTH": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
394            "LISTAGG": exp.GroupConcat.from_arg_list,
395            "NULLIFZERO": _build_if_from_nullifzero,
396            "OBJECT_CONSTRUCT": _build_object_construct,
397            "REGEXP_EXTRACT_ALL": _build_regexp_extract(exp.RegexpExtractAll),
398            "REGEXP_REPLACE": _build_regexp_replace,
399            "REGEXP_SUBSTR": _build_regexp_extract(exp.RegexpExtract),
400            "REGEXP_SUBSTR_ALL": _build_regexp_extract(exp.RegexpExtractAll),
401            "RLIKE": exp.RegexpLike.from_arg_list,
402            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
403            "TIMEADD": _build_date_time_add(exp.TimeAdd),
404            "TIMEDIFF": _build_datediff,
405            "TIMESTAMPADD": _build_date_time_add(exp.DateAdd),
406            "TIMESTAMPDIFF": _build_datediff,
407            "TIMESTAMPFROMPARTS": build_timestamp_from_parts,
408            "TIMESTAMP_FROM_PARTS": build_timestamp_from_parts,
409            "TIMESTAMPNTZFROMPARTS": build_timestamp_from_parts,
410            "TIMESTAMP_NTZ_FROM_PARTS": build_timestamp_from_parts,
411            "TRY_PARSE_JSON": lambda args: exp.ParseJSON(this=seq_get(args, 0), safe=True),
412            "TRY_TO_DATE": _build_datetime("TRY_TO_DATE", exp.DataType.Type.DATE, safe=True),
413            "TRY_TO_TIMESTAMP": _build_datetime(
414                "TRY_TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP, safe=True
415            ),
416            "TO_DATE": _build_datetime("TO_DATE", exp.DataType.Type.DATE),
417            "TO_NUMBER": lambda args: exp.ToNumber(
418                this=seq_get(args, 0),
419                format=seq_get(args, 1),
420                precision=seq_get(args, 2),
421                scale=seq_get(args, 3),
422            ),
423            "TO_TIME": _build_datetime("TO_TIME", exp.DataType.Type.TIME),
424            "TO_TIMESTAMP": _build_datetime("TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP),
425            "TO_TIMESTAMP_LTZ": _build_datetime("TO_TIMESTAMP_LTZ", exp.DataType.Type.TIMESTAMPLTZ),
426            "TO_TIMESTAMP_NTZ": _build_datetime("TO_TIMESTAMP_NTZ", exp.DataType.Type.TIMESTAMP),
427            "TO_TIMESTAMP_TZ": _build_datetime("TO_TIMESTAMP_TZ", exp.DataType.Type.TIMESTAMPTZ),
428            "TO_VARCHAR": exp.ToChar.from_arg_list,
429            "ZEROIFNULL": _build_if_from_zeroifnull,
430        }
431
432        FUNCTION_PARSERS = {
433            **parser.Parser.FUNCTION_PARSERS,
434            "DATE_PART": lambda self: self._parse_date_part(),
435            "OBJECT_CONSTRUCT_KEEP_NULL": lambda self: self._parse_json_object(),
436        }
437        FUNCTION_PARSERS.pop("TRIM")
438
439        TIMESTAMPS = parser.Parser.TIMESTAMPS - {TokenType.TIME}
440
441        RANGE_PARSERS = {
442            **parser.Parser.RANGE_PARSERS,
443            TokenType.LIKE_ANY: parser.binary_range_parser(exp.LikeAny),
444            TokenType.ILIKE_ANY: parser.binary_range_parser(exp.ILikeAny),
445        }
446
447        ALTER_PARSERS = {
448            **parser.Parser.ALTER_PARSERS,
449            "UNSET": lambda self: self.expression(
450                exp.Set,
451                tag=self._match_text_seq("TAG"),
452                expressions=self._parse_csv(self._parse_id_var),
453                unset=True,
454            ),
455        }
456
457        STATEMENT_PARSERS = {
458            **parser.Parser.STATEMENT_PARSERS,
459            TokenType.SHOW: lambda self: self._parse_show(),
460        }
461
462        PROPERTY_PARSERS = {
463            **parser.Parser.PROPERTY_PARSERS,
464            "LOCATION": lambda self: self._parse_location_property(),
465        }
466
467        TYPE_CONVERTERS = {
468            # https://docs.snowflake.com/en/sql-reference/data-types-numeric#number
469            exp.DataType.Type.DECIMAL: build_default_decimal_type(precision=38, scale=0),
470        }
471
472        SHOW_PARSERS = {
473            "SCHEMAS": _show_parser("SCHEMAS"),
474            "TERSE SCHEMAS": _show_parser("SCHEMAS"),
475            "OBJECTS": _show_parser("OBJECTS"),
476            "TERSE OBJECTS": _show_parser("OBJECTS"),
477            "TABLES": _show_parser("TABLES"),
478            "TERSE TABLES": _show_parser("TABLES"),
479            "VIEWS": _show_parser("VIEWS"),
480            "TERSE VIEWS": _show_parser("VIEWS"),
481            "PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
482            "TERSE PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
483            "IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
484            "TERSE IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
485            "UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
486            "TERSE UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
487            "SEQUENCES": _show_parser("SEQUENCES"),
488            "TERSE SEQUENCES": _show_parser("SEQUENCES"),
489            "COLUMNS": _show_parser("COLUMNS"),
490            "USERS": _show_parser("USERS"),
491            "TERSE USERS": _show_parser("USERS"),
492        }
493
494        CONSTRAINT_PARSERS = {
495            **parser.Parser.CONSTRAINT_PARSERS,
496            "WITH": lambda self: self._parse_with_constraint(),
497            "MASKING": lambda self: self._parse_with_constraint(),
498            "PROJECTION": lambda self: self._parse_with_constraint(),
499            "TAG": lambda self: self._parse_with_constraint(),
500        }
501
502        STAGED_FILE_SINGLE_TOKENS = {
503            TokenType.DOT,
504            TokenType.MOD,
505            TokenType.SLASH,
506        }
507
508        FLATTEN_COLUMNS = ["SEQ", "KEY", "PATH", "INDEX", "VALUE", "THIS"]
509
510        SCHEMA_KINDS = {"OBJECTS", "TABLES", "VIEWS", "SEQUENCES", "UNIQUE KEYS", "IMPORTED KEYS"}
511
512        NON_TABLE_CREATABLES = {"STORAGE INTEGRATION", "TAG", "WAREHOUSE", "STREAMLIT"}
513
514        LAMBDAS = {
515            **parser.Parser.LAMBDAS,
516            TokenType.ARROW: lambda self, expressions: self.expression(
517                exp.Lambda,
518                this=self._replace_lambda(
519                    self._parse_assignment(),
520                    expressions,
521                ),
522                expressions=[e.this if isinstance(e, exp.Cast) else e for e in expressions],
523            ),
524        }
525
526        def _negate_range(
527            self, this: t.Optional[exp.Expression] = None
528        ) -> t.Optional[exp.Expression]:
529            if not this:
530                return this
531
532            query = this.args.get("query")
533            if isinstance(this, exp.In) and isinstance(query, exp.Query):
534                # Snowflake treats `value NOT IN (subquery)` as `VALUE <> ALL (subquery)`, so
535                # we do this conversion here to avoid parsing it into `NOT value IN (subquery)`
536                # which can produce different results (most likely a SnowFlake bug).
537                #
538                # https://docs.snowflake.com/en/sql-reference/functions/in
539                # Context: https://github.com/tobymao/sqlglot/issues/3890
540                return self.expression(
541                    exp.NEQ, this=this.this, expression=exp.All(this=query.unnest())
542                )
543
544            return self.expression(exp.Not, this=this)
545
546        def _parse_with_constraint(self) -> t.Optional[exp.Expression]:
547            if self._prev.token_type != TokenType.WITH:
548                self._retreat(self._index - 1)
549
550            if self._match_text_seq("MASKING", "POLICY"):
551                policy = self._parse_column()
552                return self.expression(
553                    exp.MaskingPolicyColumnConstraint,
554                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
555                    expressions=self._match(TokenType.USING)
556                    and self._parse_wrapped_csv(self._parse_id_var),
557                )
558            if self._match_text_seq("PROJECTION", "POLICY"):
559                policy = self._parse_column()
560                return self.expression(
561                    exp.ProjectionPolicyColumnConstraint,
562                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
563                )
564            if self._match(TokenType.TAG):
565                return self.expression(
566                    exp.TagColumnConstraint,
567                    expressions=self._parse_wrapped_csv(self._parse_property),
568                )
569
570            return None
571
572        def _parse_create(self) -> exp.Create | exp.Command:
573            expression = super()._parse_create()
574            if isinstance(expression, exp.Create) and expression.kind in self.NON_TABLE_CREATABLES:
575                # Replace the Table node with the enclosed Identifier
576                expression.this.replace(expression.this.this)
577
578            return expression
579
580        # https://docs.snowflake.com/en/sql-reference/functions/date_part.html
581        # https://docs.snowflake.com/en/sql-reference/functions-date-time.html#label-supported-date-time-parts
582        def _parse_date_part(self: Snowflake.Parser) -> t.Optional[exp.Expression]:
583            this = self._parse_var() or self._parse_type()
584
585            if not this:
586                return None
587
588            self._match(TokenType.COMMA)
589            expression = self._parse_bitwise()
590            this = map_date_part(this)
591            name = this.name.upper()
592
593            if name.startswith("EPOCH"):
594                if name == "EPOCH_MILLISECOND":
595                    scale = 10**3
596                elif name == "EPOCH_MICROSECOND":
597                    scale = 10**6
598                elif name == "EPOCH_NANOSECOND":
599                    scale = 10**9
600                else:
601                    scale = None
602
603                ts = self.expression(exp.Cast, this=expression, to=exp.DataType.build("TIMESTAMP"))
604                to_unix: exp.Expression = self.expression(exp.TimeToUnix, this=ts)
605
606                if scale:
607                    to_unix = exp.Mul(this=to_unix, expression=exp.Literal.number(scale))
608
609                return to_unix
610
611            return self.expression(exp.Extract, this=this, expression=expression)
612
613        def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]:
614            if is_map:
615                # Keys are strings in Snowflake's objects, see also:
616                # - https://docs.snowflake.com/en/sql-reference/data-types-semistructured
617                # - https://docs.snowflake.com/en/sql-reference/functions/object_construct
618                return self._parse_slice(self._parse_string())
619
620            return self._parse_slice(self._parse_alias(self._parse_assignment(), explicit=True))
621
622        def _parse_lateral(self) -> t.Optional[exp.Lateral]:
623            lateral = super()._parse_lateral()
624            if not lateral:
625                return lateral
626
627            if isinstance(lateral.this, exp.Explode):
628                table_alias = lateral.args.get("alias")
629                columns = [exp.to_identifier(col) for col in self.FLATTEN_COLUMNS]
630                if table_alias and not table_alias.args.get("columns"):
631                    table_alias.set("columns", columns)
632                elif not table_alias:
633                    exp.alias_(lateral, "_flattened", table=columns, copy=False)
634
635            return lateral
636
637        def _parse_table_parts(
638            self, schema: bool = False, is_db_reference: bool = False, wildcard: bool = False
639        ) -> exp.Table:
640            # https://docs.snowflake.com/en/user-guide/querying-stage
641            if self._match(TokenType.STRING, advance=False):
642                table = self._parse_string()
643            elif self._match_text_seq("@", advance=False):
644                table = self._parse_location_path()
645            else:
646                table = None
647
648            if table:
649                file_format = None
650                pattern = None
651
652                wrapped = self._match(TokenType.L_PAREN)
653                while self._curr and wrapped and not self._match(TokenType.R_PAREN):
654                    if self._match_text_seq("FILE_FORMAT", "=>"):
655                        file_format = self._parse_string() or super()._parse_table_parts(
656                            is_db_reference=is_db_reference
657                        )
658                    elif self._match_text_seq("PATTERN", "=>"):
659                        pattern = self._parse_string()
660                    else:
661                        break
662
663                    self._match(TokenType.COMMA)
664
665                table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern)
666            else:
667                table = super()._parse_table_parts(schema=schema, is_db_reference=is_db_reference)
668
669            return table
670
671        def _parse_id_var(
672            self,
673            any_token: bool = True,
674            tokens: t.Optional[t.Collection[TokenType]] = None,
675        ) -> t.Optional[exp.Expression]:
676            if self._match_text_seq("IDENTIFIER", "("):
677                identifier = (
678                    super()._parse_id_var(any_token=any_token, tokens=tokens)
679                    or self._parse_string()
680                )
681                self._match_r_paren()
682                return self.expression(exp.Anonymous, this="IDENTIFIER", expressions=[identifier])
683
684            return super()._parse_id_var(any_token=any_token, tokens=tokens)
685
686        def _parse_show_snowflake(self, this: str) -> exp.Show:
687            scope = None
688            scope_kind = None
689
690            # will identity SHOW TERSE SCHEMAS but not SHOW TERSE PRIMARY KEYS
691            # which is syntactically valid but has no effect on the output
692            terse = self._tokens[self._index - 2].text.upper() == "TERSE"
693
694            history = self._match_text_seq("HISTORY")
695
696            like = self._parse_string() if self._match(TokenType.LIKE) else None
697
698            if self._match(TokenType.IN):
699                if self._match_text_seq("ACCOUNT"):
700                    scope_kind = "ACCOUNT"
701                elif self._match_set(self.DB_CREATABLES):
702                    scope_kind = self._prev.text.upper()
703                    if self._curr:
704                        scope = self._parse_table_parts()
705                elif self._curr:
706                    scope_kind = "SCHEMA" if this in self.SCHEMA_KINDS else "TABLE"
707                    scope = self._parse_table_parts()
708
709            return self.expression(
710                exp.Show,
711                **{
712                    "terse": terse,
713                    "this": this,
714                    "history": history,
715                    "like": like,
716                    "scope": scope,
717                    "scope_kind": scope_kind,
718                    "starts_with": self._match_text_seq("STARTS", "WITH") and self._parse_string(),
719                    "limit": self._parse_limit(),
720                    "from": self._parse_string() if self._match(TokenType.FROM) else None,
721                },
722            )
723
724        def _parse_location_property(self) -> exp.LocationProperty:
725            self._match(TokenType.EQ)
726            return self.expression(exp.LocationProperty, this=self._parse_location_path())
727
728        def _parse_file_location(self) -> t.Optional[exp.Expression]:
729            # Parse either a subquery or a staged file
730            return (
731                self._parse_select(table=True, parse_subquery_alias=False)
732                if self._match(TokenType.L_PAREN, advance=False)
733                else self._parse_table_parts()
734            )
735
736        def _parse_location_path(self) -> exp.Var:
737            parts = [self._advance_any(ignore_reserved=True)]
738
739            # We avoid consuming a comma token because external tables like @foo and @bar
740            # can be joined in a query with a comma separator, as well as closing paren
741            # in case of subqueries
742            while self._is_connected() and not self._match_set(
743                (TokenType.COMMA, TokenType.L_PAREN, TokenType.R_PAREN), advance=False
744            ):
745                parts.append(self._advance_any(ignore_reserved=True))
746
747            return exp.var("".join(part.text for part in parts if part))
748
749        def _parse_lambda_arg(self) -> t.Optional[exp.Expression]:
750            this = super()._parse_lambda_arg()
751
752            if not this:
753                return this
754
755            typ = self._parse_types()
756
757            if typ:
758                return self.expression(exp.Cast, this=this, to=typ)
759
760            return this

Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

Arguments:
  • error_level: The desired error level. Default: ErrorLevel.IMMEDIATE
  • error_message_context: The amount of context to capture from a query string when displaying the error message (in number of characters). Default: 100
  • max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
IDENTIFY_PIVOT_STRINGS = True
DEFAULT_SAMPLING_METHOD = 'BERNOULLI'
COLON_IS_VARIANT_EXTRACT = True
ID_VAR_TOKENS = {<TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.ASOF: 'ASOF'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.DECIMAL64: 'DECIMAL64'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.MODEL: 'MODEL'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.WINDOW: 'WINDOW'>, <TokenType.STREAMLIT: 'STREAMLIT'>, <TokenType.UINT: 'UINT'>, <TokenType.FIRST: 'FIRST'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.LIST: 'LIST'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.UUID: 'UUID'>, <TokenType.SOME: 'SOME'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.TABLE: 'TABLE'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.ANTI: 'ANTI'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.OFFSET: 'OFFSET'>, <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.INDEX: 'INDEX'>, <TokenType.NAME: 'NAME'>, <TokenType.SHOW: 'SHOW'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.FINAL: 'FINAL'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.ENUM: 'ENUM'>, <TokenType.FULL: 'FULL'>, <TokenType.LOAD: 'LOAD'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.UPDATE: 'UPDATE'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.FALSE: 'FALSE'>, <TokenType.BIT: 'BIT'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.UINT128: 'UINT128'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.SET: 'SET'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.DATE: 'DATE'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.LEFT: 'LEFT'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.APPLY: 'APPLY'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.INT128: 'INT128'>, <TokenType.STRAIGHT_JOIN: 'STRAIGHT_JOIN'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.VIEW: 'VIEW'>, <TokenType.JSONB: 'JSONB'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.INT: 'INT'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.POINT: 'POINT'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.ROWS: 'ROWS'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.MULTILINESTRING: 'MULTILINESTRING'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.TRUNCATE: 'TRUNCATE'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.NESTED: 'NESTED'>, <TokenType.BINARY: 'BINARY'>, <TokenType.NATURAL: 'NATURAL'>, <TokenType.RENAME: 'RENAME'>, <TokenType.INT256: 'INT256'>, <TokenType.RIGHT: 'RIGHT'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.DATE32: 'DATE32'>, <TokenType.JSON: 'JSON'>, <TokenType.DIV: 'DIV'>, <TokenType.KEEP: 'KEEP'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.TOP: 'TOP'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.SEMI: 'SEMI'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.DESC: 'DESC'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.TRUE: 'TRUE'>, <TokenType.MATCH_CONDITION: 'MATCH_CONDITION'>, <TokenType.SUPER: 'SUPER'>, <TokenType.MAP: 'MAP'>, <TokenType.NEXT: 'NEXT'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.TEXT: 'TEXT'>, <TokenType.POLYGON: 'POLYGON'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.MONEY: 'MONEY'>, <TokenType.ROW: 'ROW'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.ALL: 'ALL'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.WAREHOUSE: 'WAREHOUSE'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.XML: 'XML'>, <TokenType.DECIMAL32: 'DECIMAL32'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.COPY: 'COPY'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.KILL: 'KILL'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.MERGE: 'MERGE'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.DECIMAL128: 'DECIMAL128'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.CACHE: 'CACHE'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.SEQUENCE: 'SEQUENCE'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.IS: 'IS'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.ROLLUP: 'ROLLUP'>, <TokenType.DECIMAL256: 'DECIMAL256'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.LINESTRING: 'LINESTRING'>, <TokenType.IPV4: 'IPV4'>, <TokenType.STORAGE_INTEGRATION: 'STORAGE_INTEGRATION'>, <TokenType.INET: 'INET'>, <TokenType.RING: 'RING'>, <TokenType.TIME: 'TIME'>, <TokenType.RANGE: 'RANGE'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.FILTER: 'FILTER'>, <TokenType.VAR: 'VAR'>, <TokenType.IPV6: 'IPV6'>, <TokenType.TAG: 'TAG'>, <TokenType.UINT256: 'UINT256'>, <TokenType.CASE: 'CASE'>, <TokenType.TDIGEST: 'TDIGEST'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.YEAR: 'YEAR'>, <TokenType.ANY: 'ANY'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.USE: 'USE'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.DELETE: 'DELETE'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.END: 'END'>, <TokenType.CHAR: 'CHAR'>, <TokenType.IDENTIFIER: 'IDENTIFIER'>, <TokenType.MULTIPOLYGON: 'MULTIPOLYGON'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.CUBE: 'CUBE'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.ASC: 'ASC'>, <TokenType.NULL: 'NULL'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.VECTOR: 'VECTOR'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.UNNEST: 'UNNEST'>}
TABLE_ALIAS_TOKENS = {<TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.DECIMAL64: 'DECIMAL64'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.MODEL: 'MODEL'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.STREAMLIT: 'STREAMLIT'>, <TokenType.WINDOW: 'WINDOW'>, <TokenType.UINT: 'UINT'>, <TokenType.FIRST: 'FIRST'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.LIST: 'LIST'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.UUID: 'UUID'>, <TokenType.SOME: 'SOME'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.TABLE: 'TABLE'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.ANTI: 'ANTI'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.INDEX: 'INDEX'>, <TokenType.NAME: 'NAME'>, <TokenType.SHOW: 'SHOW'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.FINAL: 'FINAL'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.ENUM: 'ENUM'>, <TokenType.LOAD: 'LOAD'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.UPDATE: 'UPDATE'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.FALSE: 'FALSE'>, <TokenType.BIT: 'BIT'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.UINT128: 'UINT128'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.SET: 'SET'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.DATE: 'DATE'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.INT128: 'INT128'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.STRAIGHT_JOIN: 'STRAIGHT_JOIN'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.VIEW: 'VIEW'>, <TokenType.JSONB: 'JSONB'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.INT: 'INT'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.POINT: 'POINT'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.ROWS: 'ROWS'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.MULTILINESTRING: 'MULTILINESTRING'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.TRUNCATE: 'TRUNCATE'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.NESTED: 'NESTED'>, <TokenType.BINARY: 'BINARY'>, <TokenType.RENAME: 'RENAME'>, <TokenType.INT256: 'INT256'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.DATE32: 'DATE32'>, <TokenType.JSON: 'JSON'>, <TokenType.DIV: 'DIV'>, <TokenType.KEEP: 'KEEP'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.TOP: 'TOP'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.SEMI: 'SEMI'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.DESC: 'DESC'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.TRUE: 'TRUE'>, <TokenType.SUPER: 'SUPER'>, <TokenType.MAP: 'MAP'>, <TokenType.NEXT: 'NEXT'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.TEXT: 'TEXT'>, <TokenType.POLYGON: 'POLYGON'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.MONEY: 'MONEY'>, <TokenType.ROW: 'ROW'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.ALL: 'ALL'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.WAREHOUSE: 'WAREHOUSE'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.XML: 'XML'>, <TokenType.DECIMAL32: 'DECIMAL32'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.COPY: 'COPY'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.KILL: 'KILL'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.MERGE: 'MERGE'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.DECIMAL128: 'DECIMAL128'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.CACHE: 'CACHE'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.SEQUENCE: 'SEQUENCE'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.IS: 'IS'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.ROLLUP: 'ROLLUP'>, <TokenType.DECIMAL256: 'DECIMAL256'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.LINESTRING: 'LINESTRING'>, <TokenType.IPV4: 'IPV4'>, <TokenType.STORAGE_INTEGRATION: 'STORAGE_INTEGRATION'>, <TokenType.INET: 'INET'>, <TokenType.RING: 'RING'>, <TokenType.TIME: 'TIME'>, <TokenType.RANGE: 'RANGE'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.FILTER: 'FILTER'>, <TokenType.VAR: 'VAR'>, <TokenType.IPV6: 'IPV6'>, <TokenType.TAG: 'TAG'>, <TokenType.UINT256: 'UINT256'>, <TokenType.CASE: 'CASE'>, <TokenType.TDIGEST: 'TDIGEST'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.YEAR: 'YEAR'>, <TokenType.ANY: 'ANY'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.USE: 'USE'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.DELETE: 'DELETE'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.END: 'END'>, <TokenType.CHAR: 'CHAR'>, <TokenType.IDENTIFIER: 'IDENTIFIER'>, <TokenType.MULTIPOLYGON: 'MULTIPOLYGON'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.CUBE: 'CUBE'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.ASC: 'ASC'>, <TokenType.NULL: 'NULL'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.VECTOR: 'VECTOR'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.UNNEST: 'UNNEST'>}
FUNCTIONS = {'ABS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Abs'>>, 'ADD_MONTHS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AddMonths'>>, 'ANONYMOUS_AGG_FUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AnonymousAggFunc'>>, 'ANY_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AnyValue'>>, 'APPLY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Apply'>>, 'APPROX_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_COUNT_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxQuantile'>>, 'APPROX_TOP_K': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxTopK'>>, 'ARG_MAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'ARGMAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'MAX_BY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'ARG_MIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'ARGMIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'MIN_BY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'ARRAY': <function Parser.<lambda>>, 'ARRAY_AGG': <function Parser.<lambda>>, 'ARRAY_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAll'>>, 'ARRAY_ANY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAny'>>, 'ARRAY_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CONSTRUCT_COMPACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConstructCompact'>>, 'ARRAY_CONTAINS': <function Snowflake.Parser.<lambda>>, 'ARRAY_HAS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContains'>>, 'ARRAY_CONTAINS_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContainsAll'>>, 'ARRAY_HAS_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContainsAll'>>, 'FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_OVERLAPS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayOverlaps'>>, 'ARRAY_SIZE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySize'>>, 'ARRAY_LENGTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySize'>>, 'ARRAY_SORT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySort'>>, 'ARRAY_SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySum'>>, 'ARRAY_TO_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayToString'>>, 'ARRAY_JOIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayToString'>>, 'ARRAY_UNION_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayUnionAgg'>>, 'ARRAY_UNIQUE_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayUniqueAgg'>>, 'AVG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Avg'>>, 'CASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Case'>>, 'CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Cast'>>, 'CAST_TO_STR_TYPE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CastToStrType'>>, 'CBRT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Cbrt'>>, 'CEIL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'CEILING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'CHR': <function Parser.<lambda>>, 'CHAR': <function Parser.<lambda>>, 'COALESCE': <function build_coalesce>, 'IFNULL': <function build_coalesce>, 'NVL': <function build_coalesce>, 'COLLATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Collate'>>, 'COLUMNS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Columns'>>, 'COMBINED_AGG_FUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CombinedAggFunc'>>, 'COMBINED_PARAMETERIZED_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CombinedParameterizedAgg'>>, 'CONCAT': <function Parser.<lambda>>, 'CONCAT_WS': <function Parser.<lambda>>, 'CONNECT_BY_ROOT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ConnectByRoot'>>, 'CONVERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Convert'>>, 'CONVERT_TIMEZONE': <function build_convert_timezone>, 'CORR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Corr'>>, 'COUNT': <function Parser.<lambda>>, 'COUNT_IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CountIf'>>, 'COUNTIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CountIf'>>, 'COVAR_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CovarPop'>>, 'COVAR_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CovarSamp'>>, 'CURRENT_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDate'>>, 'CURRENT_DATETIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDatetime'>>, 'CURRENT_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTime'>>, 'CURRENT_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTimestamp'>>, 'CURRENT_USER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentUser'>>, 'DATE': <function _build_datetime.<locals>._builder>, 'DATE_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateAdd'>>, 'DATEDIFF': <function _build_datediff>, 'DATE_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateDiff'>>, 'DATE_FROM_PARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateFromParts'>>, 'DATEFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateFromParts'>>, 'DATE_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateStrToDate'>>, 'DATE_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateSub'>>, 'DATE_TO_DATE_STR': <function Parser.<lambda>>, 'DATE_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateToDi'>>, 'DATE_TRUNC': <function _date_trunc_to_time>, 'DATETIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Datetime'>>, 'DATETIME_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeAdd'>>, 'DATETIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeDiff'>>, 'DATETIME_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeSub'>>, 'DATETIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeTrunc'>>, 'DAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Day'>>, 'DAY_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAYOFMONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAY_OF_WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAYOFWEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAYOFWEEK_ISO': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeekIso'>>, 'ISODOW': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeekIso'>>, 'DAY_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DAYOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DECODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Decode'>>, 'DI_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DiToDate'>>, 'ENCODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Encode'>>, 'EXISTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Exists'>>, 'EXP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Exp'>>, 'EXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Explode'>>, 'EXPLODE_OUTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ExplodeOuter'>>, 'EXPLODING_GENERATE_SERIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ExplodingGenerateSeries'>>, 'EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Extract'>>, 'FIRST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.First'>>, 'FIRST_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FirstValue'>>, 'FLATTEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Explode'>>, 'FLOOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Floor'>>, 'FROM_BASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase'>>, 'FROM_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase64'>>, 'FROM_ISO8601_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromISO8601Timestamp'>>, 'GAP_FILL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GapFill'>>, 'GENERATE_DATE_ARRAY': <function Parser.<lambda>>, 'GENERATE_SERIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateSeries'>>, 'GENERATE_TIMESTAMP_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateTimestampArray'>>, 'GREATEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Greatest'>>, 'GROUP_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GroupConcat'>>, 'HEX': <function build_hex>, 'HLL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Hll'>>, 'IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'IIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'INITCAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Initcap'>>, 'INLINE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Inline'>>, 'IS_INF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsInf'>>, 'ISINF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsInf'>>, 'IS_NAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'ISNAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'J_S_O_N_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArray'>>, 'J_S_O_N_ARRAY_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayAgg'>>, 'JSON_ARRAY_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayContains'>>, 'JSONB_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBContains'>>, 'JSONB_EXISTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExists'>>, 'JSONB_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtract'>>, 'JSONB_EXTRACT_SCALAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtractScalar'>>, 'J_S_O_N_EXISTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONExists'>>, 'JSON_EXTRACT': <function build_extract_json_with_path.<locals>._builder>, 'J_S_O_N_EXTRACT_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONExtractArray'>>, 'JSON_EXTRACT_SCALAR': <function build_extract_json_with_path.<locals>._builder>, 'JSON_FORMAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONFormat'>>, 'J_S_O_N_OBJECT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONObject'>>, 'J_S_O_N_OBJECT_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONObjectAgg'>>, 'J_S_O_N_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONTable'>>, 'J_S_O_N_VALUE_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONValueArray'>>, 'LAG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lag'>>, 'LAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Last'>>, 'LAST_DAY': <function Snowflake.Parser.<lambda>>, 'LAST_DAY_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LastDay'>>, 'LAST_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LastValue'>>, 'LEAD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lead'>>, 'LEAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Least'>>, 'LEFT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Left'>>, 'LENGTH': <function Snowflake.Parser.<lambda>>, 'LEN': <function Snowflake.Parser.<lambda>>, 'LEVENSHTEIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Levenshtein'>>, 'LIST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.List'>>, 'LN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ln'>>, 'LOG': <function build_logarithm>, 'LOGICAL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOLAND_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'LOGICAL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOLOR_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'LOWER': <function build_lower>, 'LCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lower'>>, 'LOWER_HEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LowerHex'>>, 'MD5': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5'>>, 'MD5_DIGEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5Digest'>>, 'MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Map'>>, 'MAP_FROM_ENTRIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MapFromEntries'>>, 'MATCH_AGAINST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MatchAgainst'>>, 'MAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Max'>>, 'MEDIAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Median'>>, 'MIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Min'>>, 'MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Month'>>, 'MONTHS_BETWEEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MonthsBetween'>>, 'NEXT_VALUE_FOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NextValueFor'>>, 'NORMALIZE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Normalize'>>, 'NTH_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NthValue'>>, 'NULLIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Nullif'>>, 'NUMBER_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NumberToStr'>>, 'NVL2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Nvl2'>>, 'OBJECT_INSERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ObjectInsert'>>, 'OPEN_J_S_O_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.OpenJSON'>>, 'OVERLAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Overlay'>>, 'PAD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pad'>>, 'PARAMETERIZED_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParameterizedAgg'>>, 'PARSE_JSON': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParseJSON'>>, 'JSON_PARSE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParseJSON'>>, 'PERCENTILE_CONT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileCont'>>, 'PERCENTILE_DISC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileDisc'>>, 'POSEXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Posexplode'>>, 'POSEXPLODE_OUTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PosexplodeOuter'>>, 'POWER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'POW': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'PREDICT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Predict'>>, 'QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Quantile'>>, 'QUARTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Quarter'>>, 'RAND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Rand'>>, 'RANDOM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Rand'>>, 'RANDN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Randn'>>, 'RANGE_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RangeN'>>, 'READ_CSV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ReadCSV'>>, 'REDUCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Reduce'>>, 'REGEXP_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpExtract'>>, 'REGEXP_EXTRACT_ALL': <function _build_regexp_extract.<locals>._builder>, 'REGEXP_I_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpILike'>>, 'REGEXP_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'REGEXP_REPLACE': <function _build_regexp_replace>, 'REGEXP_SPLIT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpSplit'>>, 'REPEAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Repeat'>>, 'RIGHT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Right'>>, 'ROUND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Round'>>, 'ROW_NUMBER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RowNumber'>>, 'SHA': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA1': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA2'>>, 'SAFE_DIVIDE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SafeDivide'>>, 'SIGN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sign'>>, 'SIGNUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sign'>>, 'SORT_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SortArray'>>, 'SPLIT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Split'>>, 'SPLIT_PART': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SplitPart'>>, 'SQRT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sqrt'>>, 'STANDARD_HASH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StandardHash'>>, 'STAR_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StarMap'>>, 'STARTS_WITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STARTSWITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STDDEV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stddev'>>, 'STDEV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stddev'>>, 'STDDEV_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevPop'>>, 'STDDEV_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevSamp'>>, 'STR_POSITION': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrPosition'>>, 'STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToDate'>>, 'STR_TO_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToMap'>>, 'STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToTime'>>, 'STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToUnix'>>, 'STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.String'>>, 'STRING_TO_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StringToArray'>>, 'SPLIT_BY_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StringToArray'>>, 'STRUCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Struct'>>, 'STRUCT_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StructExtract'>>, 'STUFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stuff'>>, 'INSERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stuff'>>, 'SUBSTRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Substring'>>, 'SUBSTR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Substring'>>, 'SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sum'>>, 'TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Time'>>, 'TIME_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeAdd'>>, 'TIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeDiff'>>, 'TIME_FROM_PARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeFromParts'>>, 'TIMEFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeFromParts'>>, 'TIME_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToDate'>>, 'TIME_STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToTime'>>, 'TIME_STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToUnix'>>, 'TIME_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeSub'>>, 'TIME_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToStr'>>, 'TIME_TO_TIME_STR': <function Parser.<lambda>>, 'TIME_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToUnix'>>, 'TIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeTrunc'>>, 'TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Timestamp'>>, 'TIMESTAMP_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampAdd'>>, 'TIMESTAMPDIFF': <function _build_datediff>, 'TIMESTAMP_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampDiff'>>, 'TIMESTAMP_FROM_PARTS': <function build_timestamp_from_parts>, 'TIMESTAMPFROMPARTS': <function build_timestamp_from_parts>, 'TIMESTAMP_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampSub'>>, 'TIMESTAMP_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampTrunc'>>, 'TO_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToArray'>>, 'TO_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToBase64'>>, 'TO_CHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToChar'>>, 'TO_DAYS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToDays'>>, 'TO_DOUBLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToDouble'>>, 'TO_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToMap'>>, 'TO_NUMBER': <function Snowflake.Parser.<lambda>>, 'TRANSFORM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Transform'>>, 'TRIM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Trim'>>, 'TRY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Try'>>, 'TRY_CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TryCast'>>, 'TS_OR_DI_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDiToDi'>>, 'TS_OR_DS_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsAdd'>>, 'TS_OR_DS_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsDiff'>>, 'TS_OR_DS_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToDate'>>, 'TS_OR_DS_TO_DATE_STR': <function Parser.<lambda>>, 'TS_OR_DS_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToTime'>>, 'TS_OR_DS_TO_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToTimestamp'>>, 'UNHEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Unhex'>>, 'UNIX_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixDate'>>, 'UNIX_SECONDS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixSeconds'>>, 'UNIX_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToStr'>>, 'UNIX_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTime'>>, 'UNIX_TO_TIME_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTimeStr'>>, 'UNNEST': <function Parser.<lambda>>, 'UPPER': <function build_upper>, 'UCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Upper'>>, 'UUID': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Uuid'>>, 'GEN_RANDOM_UUID': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Uuid'>>, 'GENERATE_UUID': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Uuid'>>, 'UUID_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Uuid'>>, 'VAR_MAP': <function build_var_map>, 'VARIANCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VAR_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'VAR_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Week'>>, 'WEEK_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WEEKOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WHEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.When'>>, 'X_M_L_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.XMLTable'>>, 'XOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Xor'>>, 'YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Year'>>, 'ARRAYAGG': <function Parser.<lambda>>, 'GLOB': <function Parser.<lambda>>, 'INSTR': <function Parser.<lambda>>, 'JSON_EXTRACT_PATH_TEXT': <function build_extract_json_with_path.<locals>._builder>, 'LIKE': <function build_like>, 'LOG2': <function Parser.<lambda>>, 'LOG10': <function Parser.<lambda>>, 'LPAD': <function Parser.<lambda>>, 'LEFTPAD': <function Parser.<lambda>>, 'LTRIM': <function Parser.<lambda>>, 'MOD': <function build_mod>, 'RIGHTPAD': <function Parser.<lambda>>, 'RPAD': <function Parser.<lambda>>, 'RTRIM': <function Parser.<lambda>>, 'SCOPE_RESOLUTION': <function Parser.<lambda>>, 'TO_HEX': <function build_hex>, 'APPROX_PERCENTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxQuantile'>>, 'ARRAY_CONSTRUCT': <function Snowflake.Parser.<lambda>>, 'ARRAY_GENERATE_RANGE': <function Snowflake.Parser.<lambda>>, 'BITXOR': <function binary_from_function.<locals>.<lambda>>, 'BIT_XOR': <function binary_from_function.<locals>.<lambda>>, 'BOOLXOR': <function binary_from_function.<locals>.<lambda>>, 'DATEADD': <function _build_date_time_add.<locals>._builder>, 'DIV0': <function _build_if_from_div0>, 'EDITDISTANCE': <function Snowflake.Parser.<lambda>>, 'GET_PATH': <function Snowflake.Parser.<lambda>>, 'IFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'LISTAGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GroupConcat'>>, 'NULLIFZERO': <function _build_if_from_nullifzero>, 'OBJECT_CONSTRUCT': <function _build_object_construct>, 'REGEXP_SUBSTR': <function _build_regexp_extract.<locals>._builder>, 'REGEXP_SUBSTR_ALL': <function _build_regexp_extract.<locals>._builder>, 'RLIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'SQUARE': <function Snowflake.Parser.<lambda>>, 'TIMEADD': <function _build_date_time_add.<locals>._builder>, 'TIMEDIFF': <function _build_datediff>, 'TIMESTAMPADD': <function _build_date_time_add.<locals>._builder>, 'TIMESTAMPNTZFROMPARTS': <function build_timestamp_from_parts>, 'TIMESTAMP_NTZ_FROM_PARTS': <function build_timestamp_from_parts>, 'TRY_PARSE_JSON': <function Snowflake.Parser.<lambda>>, 'TRY_TO_DATE': <function _build_datetime.<locals>._builder>, 'TRY_TO_TIMESTAMP': <function _build_datetime.<locals>._builder>, 'TO_DATE': <function _build_datetime.<locals>._builder>, 'TO_TIME': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP_LTZ': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP_NTZ': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP_TZ': <function _build_datetime.<locals>._builder>, 'TO_VARCHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToChar'>>, 'ZEROIFNULL': <function _build_if_from_zeroifnull>}
FUNCTION_PARSERS = {'CAST': <function Parser.<lambda>>, 'CONVERT': <function Parser.<lambda>>, 'DECODE': <function Parser.<lambda>>, 'EXTRACT': <function Parser.<lambda>>, 'GAP_FILL': <function Parser.<lambda>>, 'JSON_OBJECT': <function Parser.<lambda>>, 'JSON_OBJECTAGG': <function Parser.<lambda>>, 'JSON_TABLE': <function Parser.<lambda>>, 'MATCH': <function Parser.<lambda>>, 'NORMALIZE': <function Parser.<lambda>>, 'OPENJSON': <function Parser.<lambda>>, 'OVERLAY': <function Parser.<lambda>>, 'POSITION': <function Parser.<lambda>>, 'PREDICT': <function Parser.<lambda>>, 'SAFE_CAST': <function Parser.<lambda>>, 'STRING_AGG': <function Parser.<lambda>>, 'SUBSTRING': <function Parser.<lambda>>, 'TRY_CAST': <function Parser.<lambda>>, 'TRY_CONVERT': <function Parser.<lambda>>, 'DATE_PART': <function Snowflake.Parser.<lambda>>, 'OBJECT_CONSTRUCT_KEEP_NULL': <function Snowflake.Parser.<lambda>>}
TIMESTAMPS = {<TokenType.TIMETZ: 'TIMETZ'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>}
RANGE_PARSERS = {<TokenType.AT_GT: 'AT_GT'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.BETWEEN: 'BETWEEN'>: <function Parser.<lambda>>, <TokenType.GLOB: 'GLOB'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.ILIKE: 'ILIKE'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.IN: 'IN'>: <function Parser.<lambda>>, <TokenType.IRLIKE: 'IRLIKE'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.IS: 'IS'>: <function Parser.<lambda>>, <TokenType.LIKE: 'LIKE'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.LT_AT: 'LT_AT'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.OVERLAPS: 'OVERLAPS'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.RLIKE: 'RLIKE'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.SIMILAR_TO: 'SIMILAR_TO'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.FOR: 'FOR'>: <function Parser.<lambda>>, <TokenType.LIKE_ANY: 'LIKE_ANY'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.ILIKE_ANY: 'ILIKE_ANY'>: <function binary_range_parser.<locals>._parse_binary_range>}
ALTER_PARSERS = {'ADD': <function Parser.<lambda>>, 'AS': <function Parser.<lambda>>, 'ALTER': <function Parser.<lambda>>, 'CLUSTER BY': <function Parser.<lambda>>, 'DELETE': <function Parser.<lambda>>, 'DROP': <function Parser.<lambda>>, 'RENAME': <function Parser.<lambda>>, 'SET': <function Parser.<lambda>>, 'SWAP': <function Parser.<lambda>>, 'UNSET': <function Snowflake.Parser.<lambda>>}
STATEMENT_PARSERS = {<TokenType.ALTER: 'ALTER'>: <function Parser.<lambda>>, <TokenType.BEGIN: 'BEGIN'>: <function Parser.<lambda>>, <TokenType.CACHE: 'CACHE'>: <function Parser.<lambda>>, <TokenType.COMMENT: 'COMMENT'>: <function Parser.<lambda>>, <TokenType.COMMIT: 'COMMIT'>: <function Parser.<lambda>>, <TokenType.COPY: 'COPY'>: <function Parser.<lambda>>, <TokenType.CREATE: 'CREATE'>: <function Parser.<lambda>>, <TokenType.DELETE: 'DELETE'>: <function Parser.<lambda>>, <TokenType.DESC: 'DESC'>: <function Parser.<lambda>>, <TokenType.DESCRIBE: 'DESCRIBE'>: <function Parser.<lambda>>, <TokenType.DROP: 'DROP'>: <function Parser.<lambda>>, <TokenType.GRANT: 'GRANT'>: <function Parser.<lambda>>, <TokenType.INSERT: 'INSERT'>: <function Parser.<lambda>>, <TokenType.KILL: 'KILL'>: <function Parser.<lambda>>, <TokenType.LOAD: 'LOAD'>: <function Parser.<lambda>>, <TokenType.MERGE: 'MERGE'>: <function Parser.<lambda>>, <TokenType.PIVOT: 'PIVOT'>: <function Parser.<lambda>>, <TokenType.PRAGMA: 'PRAGMA'>: <function Parser.<lambda>>, <TokenType.REFRESH: 'REFRESH'>: <function Parser.<lambda>>, <TokenType.ROLLBACK: 'ROLLBACK'>: <function Parser.<lambda>>, <TokenType.SET: 'SET'>: <function Parser.<lambda>>, <TokenType.TRUNCATE: 'TRUNCATE'>: <function Parser.<lambda>>, <TokenType.UNCACHE: 'UNCACHE'>: <function Parser.<lambda>>, <TokenType.UPDATE: 'UPDATE'>: <function Parser.<lambda>>, <TokenType.USE: 'USE'>: <function Parser.<lambda>>, <TokenType.SEMICOLON: 'SEMICOLON'>: <function Parser.<lambda>>, <TokenType.SHOW: 'SHOW'>: <function Snowflake.Parser.<lambda>>}
PROPERTY_PARSERS = {'ALLOWED_VALUES': <function Parser.<lambda>>, 'ALGORITHM': <function Parser.<lambda>>, 'AUTO': <function Parser.<lambda>>, 'AUTO_INCREMENT': <function Parser.<lambda>>, 'BACKUP': <function Parser.<lambda>>, 'BLOCKCOMPRESSION': <function Parser.<lambda>>, 'CHARSET': <function Parser.<lambda>>, 'CHARACTER SET': <function Parser.<lambda>>, 'CHECKSUM': <function Parser.<lambda>>, 'CLUSTER BY': <function Parser.<lambda>>, 'CLUSTERED': <function Parser.<lambda>>, 'COLLATE': <function Parser.<lambda>>, 'COMMENT': <function Parser.<lambda>>, 'CONTAINS': <function Parser.<lambda>>, 'COPY': <function Parser.<lambda>>, 'DATABLOCKSIZE': <function Parser.<lambda>>, 'DATA_DELETION': <function Parser.<lambda>>, 'DEFINER': <function Parser.<lambda>>, 'DETERMINISTIC': <function Parser.<lambda>>, 'DISTRIBUTED': <function Parser.<lambda>>, 'DUPLICATE': <function Parser.<lambda>>, 'DYNAMIC': <function Parser.<lambda>>, 'DISTKEY': <function Parser.<lambda>>, 'DISTSTYLE': <function Parser.<lambda>>, 'EMPTY': <function Parser.<lambda>>, 'ENGINE': <function Parser.<lambda>>, 'EXECUTE': <function Parser.<lambda>>, 'EXTERNAL': <function Parser.<lambda>>, 'FALLBACK': <function Parser.<lambda>>, 'FORMAT': <function Parser.<lambda>>, 'FREESPACE': <function Parser.<lambda>>, 'GLOBAL': <function Parser.<lambda>>, 'HEAP': <function Parser.<lambda>>, 'ICEBERG': <function Parser.<lambda>>, 'IMMUTABLE': <function Parser.<lambda>>, 'INHERITS': <function Parser.<lambda>>, 'INPUT': <function Parser.<lambda>>, 'JOURNAL': <function Parser.<lambda>>, 'LANGUAGE': <function Parser.<lambda>>, 'LAYOUT': <function Parser.<lambda>>, 'LIFETIME': <function Parser.<lambda>>, 'LIKE': <function Parser.<lambda>>, 'LOCATION': <function Snowflake.Parser.<lambda>>, 'LOCK': <function Parser.<lambda>>, 'LOCKING': <function Parser.<lambda>>, 'LOG': <function Parser.<lambda>>, 'MATERIALIZED': <function Parser.<lambda>>, 'MERGEBLOCKRATIO': <function Parser.<lambda>>, 'MODIFIES': <function Parser.<lambda>>, 'MULTISET': <function Parser.<lambda>>, 'NO': <function Parser.<lambda>>, 'ON': <function Parser.<lambda>>, 'ORDER BY': <function Parser.<lambda>>, 'OUTPUT': <function Parser.<lambda>>, 'PARTITION': <function Parser.<lambda>>, 'PARTITION BY': <function Parser.<lambda>>, 'PARTITIONED BY': <function Parser.<lambda>>, 'PARTITIONED_BY': <function Parser.<lambda>>, 'PRIMARY KEY': <function Parser.<lambda>>, 'RANGE': <function Parser.<lambda>>, 'READS': <function Parser.<lambda>>, 'REMOTE': <function Parser.<lambda>>, 'RETURNS': <function Parser.<lambda>>, 'STRICT': <function Parser.<lambda>>, 'STREAMING': <function Parser.<lambda>>, 'ROW': <function Parser.<lambda>>, 'ROW_FORMAT': <function Parser.<lambda>>, 'SAMPLE': <function Parser.<lambda>>, 'SECURE': <function Parser.<lambda>>, 'SECURITY': <function Parser.<lambda>>, 'SET': <function Parser.<lambda>>, 'SETTINGS': <function Parser.<lambda>>, 'SHARING': <function Parser.<lambda>>, 'SORTKEY': <function Parser.<lambda>>, 'SOURCE': <function Parser.<lambda>>, 'STABLE': <function Parser.<lambda>>, 'STORED': <function Parser.<lambda>>, 'SYSTEM_VERSIONING': <function Parser.<lambda>>, 'TBLPROPERTIES': <function Parser.<lambda>>, 'TEMP': <function Parser.<lambda>>, 'TEMPORARY': <function Parser.<lambda>>, 'TO': <function Parser.<lambda>>, 'TRANSIENT': <function Parser.<lambda>>, 'TRANSFORM': <function Parser.<lambda>>, 'TTL': <function Parser.<lambda>>, 'USING': <function Parser.<lambda>>, 'UNLOGGED': <function Parser.<lambda>>, 'VOLATILE': <function Parser.<lambda>>, 'WITH': <function Parser.<lambda>>}
TYPE_CONVERTERS = {<Type.DECIMAL: 'DECIMAL'>: <function build_default_decimal_type.<locals>._builder>}
SHOW_PARSERS = {'SCHEMAS': <function _show_parser.<locals>._parse>, 'TERSE SCHEMAS': <function _show_parser.<locals>._parse>, 'OBJECTS': <function _show_parser.<locals>._parse>, 'TERSE OBJECTS': <function _show_parser.<locals>._parse>, 'TABLES': <function _show_parser.<locals>._parse>, 'TERSE TABLES': <function _show_parser.<locals>._parse>, 'VIEWS': <function _show_parser.<locals>._parse>, 'TERSE VIEWS': <function _show_parser.<locals>._parse>, 'PRIMARY KEYS': <function _show_parser.<locals>._parse>, 'TERSE PRIMARY KEYS': <function _show_parser.<locals>._parse>, 'IMPORTED KEYS': <function _show_parser.<locals>._parse>, 'TERSE IMPORTED KEYS': <function _show_parser.<locals>._parse>, 'UNIQUE KEYS': <function _show_parser.<locals>._parse>, 'TERSE UNIQUE KEYS': <function _show_parser.<locals>._parse>, 'SEQUENCES': <function _show_parser.<locals>._parse>, 'TERSE SEQUENCES': <function _show_parser.<locals>._parse>, 'COLUMNS': <function _show_parser.<locals>._parse>, 'USERS': <function _show_parser.<locals>._parse>, 'TERSE USERS': <function _show_parser.<locals>._parse>}
CONSTRAINT_PARSERS = {'AUTOINCREMENT': <function Parser.<lambda>>, 'AUTO_INCREMENT': <function Parser.<lambda>>, 'CASESPECIFIC': <function Parser.<lambda>>, 'CHARACTER SET': <function Parser.<lambda>>, 'CHECK': <function Parser.<lambda>>, 'COLLATE': <function Parser.<lambda>>, 'COMMENT': <function Parser.<lambda>>, 'COMPRESS': <function Parser.<lambda>>, 'CLUSTERED': <function Parser.<lambda>>, 'NONCLUSTERED': <function Parser.<lambda>>, 'DEFAULT': <function Parser.<lambda>>, 'ENCODE': <function Parser.<lambda>>, 'EPHEMERAL': <function Parser.<lambda>>, 'EXCLUDE': <function Parser.<lambda>>, 'FOREIGN KEY': <function Parser.<lambda>>, 'FORMAT': <function Parser.<lambda>>, 'GENERATED': <function Parser.<lambda>>, 'IDENTITY': <function Parser.<lambda>>, 'INLINE': <function Parser.<lambda>>, 'LIKE': <function Parser.<lambda>>, 'NOT': <function Parser.<lambda>>, 'NULL': <function Parser.<lambda>>, 'ON': <function Parser.<lambda>>, 'PATH': <function Parser.<lambda>>, 'PERIOD': <function Parser.<lambda>>, 'PRIMARY KEY': <function Parser.<lambda>>, 'REFERENCES': <function Parser.<lambda>>, 'TITLE': <function Parser.<lambda>>, 'TTL': <function Parser.<lambda>>, 'UNIQUE': <function Parser.<lambda>>, 'UPPERCASE': <function Parser.<lambda>>, 'WITH': <function Snowflake.Parser.<lambda>>, 'MASKING': <function Snowflake.Parser.<lambda>>, 'PROJECTION': <function Snowflake.Parser.<lambda>>, 'TAG': <function Snowflake.Parser.<lambda>>}
STAGED_FILE_SINGLE_TOKENS = {<TokenType.MOD: 'MOD'>, <TokenType.SLASH: 'SLASH'>, <TokenType.DOT: 'DOT'>}
FLATTEN_COLUMNS = ['SEQ', 'KEY', 'PATH', 'INDEX', 'VALUE', 'THIS']
SCHEMA_KINDS = {'SEQUENCES', 'IMPORTED KEYS', 'VIEWS', 'OBJECTS', 'TABLES', 'UNIQUE KEYS'}
NON_TABLE_CREATABLES = {'STREAMLIT', 'WAREHOUSE', 'STORAGE INTEGRATION', 'TAG'}
LAMBDAS = {<TokenType.ARROW: 'ARROW'>: <function Snowflake.Parser.<lambda>>, <TokenType.FARROW: 'FARROW'>: <function Parser.<lambda>>}
SHOW_TRIE: Dict = {'SCHEMAS': {0: True}, 'TERSE': {'SCHEMAS': {0: True}, 'OBJECTS': {0: True}, 'TABLES': {0: True}, 'VIEWS': {0: True}, 'PRIMARY': {'KEYS': {0: True}}, 'IMPORTED': {'KEYS': {0: True}}, 'UNIQUE': {'KEYS': {0: True}}, 'SEQUENCES': {0: True}, 'USERS': {0: True}}, 'OBJECTS': {0: True}, 'TABLES': {0: True}, 'VIEWS': {0: True}, 'PRIMARY': {'KEYS': {0: True}}, 'IMPORTED': {'KEYS': {0: True}}, 'UNIQUE': {'KEYS': {0: True}}, 'SEQUENCES': {0: True}, 'COLUMNS': {0: True}, 'USERS': {0: True}}
SET_TRIE: Dict = {'GLOBAL': {0: True}, 'LOCAL': {0: True}, 'SESSION': {0: True}, 'TRANSACTION': {0: True}}
Inherited Members
sqlglot.parser.Parser
Parser
NO_PAREN_FUNCTIONS
STRUCT_TYPE_TOKENS
NESTED_TYPE_TOKENS
ENUM_TYPE_TOKENS
AGGREGATE_TYPE_TOKENS
TYPE_TOKENS
SIGNED_TO_UNSIGNED_TYPE_TOKEN
SUBQUERY_PREDICATES
RESERVED_TOKENS
DB_CREATABLES
CREATABLES
ALTERABLES
INTERVAL_VARS
ALIAS_TOKENS
ARRAY_CONSTRUCTORS
COMMENT_TABLE_ALIAS_TOKENS
UPDATE_ALIAS_TOKENS
TRIM_TYPES
FUNC_TOKENS
CONJUNCTION
ASSIGNMENT
DISJUNCTION
EQUALITY
COMPARISON
BITWISE
TERM
FACTOR
EXPONENT
TIMES
SET_OPERATIONS
JOIN_METHODS
JOIN_SIDES
JOIN_KINDS
JOIN_HINTS
COLUMN_OPERATORS
EXPRESSION_PARSERS
UNARY_PARSERS
STRING_PARSERS
NUMERIC_PARSERS
PRIMARY_PARSERS
PLACEHOLDER_PARSERS
ALTER_ALTER_PARSERS
SCHEMA_UNNAMED_CONSTRAINTS
NO_PAREN_FUNCTION_PARSERS
INVALID_FUNC_NAME_TOKENS
FUNCTIONS_WITH_ALIASED_ARGS
KEY_VALUE_DEFINITIONS
QUERY_MODIFIER_PARSERS
SET_PARSERS
TYPE_LITERAL_PARSERS
DDL_SELECT_TOKENS
PRE_VOLATILE_TOKENS
TRANSACTION_KIND
TRANSACTION_CHARACTERISTICS
CONFLICT_ACTIONS
CREATE_SEQUENCE
ISOLATED_LOADING_OPTIONS
USABLES
CAST_ACTIONS
SCHEMA_BINDING_OPTIONS
PROCEDURE_OPTIONS
EXECUTE_AS_OPTIONS
KEY_CONSTRAINT_OPTIONS
INSERT_ALTERNATIVES
CLONE_KEYWORDS
HISTORICAL_DATA_PREFIX
HISTORICAL_DATA_KIND
OPCLASS_FOLLOW_KEYWORDS
OPTYPE_FOLLOW_TOKENS
TABLE_INDEX_HINT_TOKENS
VIEW_ATTRIBUTES
WINDOW_ALIAS_TOKENS
WINDOW_BEFORE_PAREN_TOKENS
WINDOW_SIDES
JSON_KEY_VALUE_SEPARATOR_TOKENS
FETCH_TOKENS
ADD_CONSTRAINT_TOKENS
DISTINCT_TOKENS
NULL_TOKENS
UNNEST_OFFSET_ALIAS_TOKENS
SELECT_START_TOKENS
COPY_INTO_VARLEN_OPTIONS
IS_JSON_PREDICATE_KIND
ODBC_DATETIME_LITERALS
ON_CONDITION_TOKENS
PRIVILEGE_FOLLOW_TOKENS
DESCRIBE_STYLES
OPERATION_MODIFIERS
STRICT_CAST
PREFIXED_PIVOT_COLUMNS
LOG_DEFAULTS_TO_LN
ALTER_TABLE_ADD_REQUIRED_FOR_EACH_COLUMN
TABLESAMPLE_CSV
SET_REQUIRES_ASSIGNMENT_DELIMITER
TRIM_PATTERN_FIRST
STRING_ALIASES
MODIFIERS_ATTACHED_TO_SET_OP
SET_OP_MODIFIERS
NO_PAREN_IF_COMMANDS
JSON_ARROWS_REQUIRE_JSON_TYPE
VALUES_FOLLOWED_BY_PAREN
SUPPORTS_IMPLICIT_UNNEST
INTERVAL_SPANS
SUPPORTS_PARTITION_SELECTION
error_level
error_message_context
max_errors
dialect
reset
parse
parse_into
check_errors
raise_error
expression
validate_expression
errors
sql
class Snowflake.Tokenizer(sqlglot.tokens.Tokenizer):
762    class Tokenizer(tokens.Tokenizer):
763        STRING_ESCAPES = ["\\", "'"]
764        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
765        RAW_STRINGS = ["$$"]
766        COMMENTS = ["--", "//", ("/*", "*/")]
767        NESTED_COMMENTS = False
768
769        KEYWORDS = {
770            **tokens.Tokenizer.KEYWORDS,
771            "BYTEINT": TokenType.INT,
772            "CHAR VARYING": TokenType.VARCHAR,
773            "CHARACTER VARYING": TokenType.VARCHAR,
774            "EXCLUDE": TokenType.EXCEPT,
775            "ILIKE ANY": TokenType.ILIKE_ANY,
776            "LIKE ANY": TokenType.LIKE_ANY,
777            "MATCH_CONDITION": TokenType.MATCH_CONDITION,
778            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
779            "MINUS": TokenType.EXCEPT,
780            "NCHAR VARYING": TokenType.VARCHAR,
781            "PUT": TokenType.COMMAND,
782            "REMOVE": TokenType.COMMAND,
783            "RM": TokenType.COMMAND,
784            "SAMPLE": TokenType.TABLE_SAMPLE,
785            "SQL_DOUBLE": TokenType.DOUBLE,
786            "SQL_VARCHAR": TokenType.VARCHAR,
787            "STORAGE INTEGRATION": TokenType.STORAGE_INTEGRATION,
788            "TAG": TokenType.TAG,
789            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
790            "TOP": TokenType.TOP,
791            "WAREHOUSE": TokenType.WAREHOUSE,
792            "STREAMLIT": TokenType.STREAMLIT,
793        }
794        KEYWORDS.pop("/*+")
795
796        SINGLE_TOKENS = {
797            **tokens.Tokenizer.SINGLE_TOKENS,
798            "$": TokenType.PARAMETER,
799        }
800
801        VAR_SINGLE_TOKENS = {"$"}
802
803        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}
STRING_ESCAPES = ['\\', "'"]
HEX_STRINGS = [("x'", "'"), ("X'", "'")]
RAW_STRINGS = ['$$']
COMMENTS = ['--', '//', ('/*', '*/')]
NESTED_COMMENTS = False
KEYWORDS = {'{%': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%-': <TokenType.BLOCK_START: 'BLOCK_START'>, '%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '+%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '{{+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{{-': <TokenType.BLOCK_START: 'BLOCK_START'>, '+}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '==': <TokenType.EQ: 'EQ'>, '::': <TokenType.DCOLON: 'DCOLON'>, '||': <TokenType.DPIPE: 'DPIPE'>, '>=': <TokenType.GTE: 'GTE'>, '<=': <TokenType.LTE: 'LTE'>, '<>': <TokenType.NEQ: 'NEQ'>, '!=': <TokenType.NEQ: 'NEQ'>, ':=': <TokenType.COLON_EQ: 'COLON_EQ'>, '<=>': <TokenType.NULLSAFE_EQ: 'NULLSAFE_EQ'>, '->': <TokenType.ARROW: 'ARROW'>, '->>': <TokenType.DARROW: 'DARROW'>, '=>': <TokenType.FARROW: 'FARROW'>, '#>': <TokenType.HASH_ARROW: 'HASH_ARROW'>, '#>>': <TokenType.DHASH_ARROW: 'DHASH_ARROW'>, '<->': <TokenType.LR_ARROW: 'LR_ARROW'>, '&&': <TokenType.DAMP: 'DAMP'>, '??': <TokenType.DQMARK: 'DQMARK'>, '~~~': <TokenType.GLOB: 'GLOB'>, '~~': <TokenType.LIKE: 'LIKE'>, '~~*': <TokenType.ILIKE: 'ILIKE'>, '~*': <TokenType.IRLIKE: 'IRLIKE'>, 'ALL': <TokenType.ALL: 'ALL'>, 'ALWAYS': <TokenType.ALWAYS: 'ALWAYS'>, 'AND': <TokenType.AND: 'AND'>, 'ANTI': <TokenType.ANTI: 'ANTI'>, 'ANY': <TokenType.ANY: 'ANY'>, 'ASC': <TokenType.ASC: 'ASC'>, 'AS': <TokenType.ALIAS: 'ALIAS'>, 'ASOF': <TokenType.ASOF: 'ASOF'>, 'AUTOINCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'AUTO_INCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'BEGIN': <TokenType.BEGIN: 'BEGIN'>, 'BETWEEN': <TokenType.BETWEEN: 'BETWEEN'>, 'CACHE': <TokenType.CACHE: 'CACHE'>, 'UNCACHE': <TokenType.UNCACHE: 'UNCACHE'>, 'CASE': <TokenType.CASE: 'CASE'>, 'CHARACTER SET': <TokenType.CHARACTER_SET: 'CHARACTER_SET'>, 'CLUSTER BY': <TokenType.CLUSTER_BY: 'CLUSTER_BY'>, 'COLLATE': <TokenType.COLLATE: 'COLLATE'>, 'COLUMN': <TokenType.COLUMN: 'COLUMN'>, 'COMMIT': <TokenType.COMMIT: 'COMMIT'>, 'CONNECT BY': <TokenType.CONNECT_BY: 'CONNECT_BY'>, 'CONSTRAINT': <TokenType.CONSTRAINT: 'CONSTRAINT'>, 'COPY': <TokenType.COPY: 'COPY'>, 'CREATE': <TokenType.CREATE: 'CREATE'>, 'CROSS': <TokenType.CROSS: 'CROSS'>, 'CUBE': <TokenType.CUBE: 'CUBE'>, 'CURRENT_DATE': <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, 'CURRENT_TIME': <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, 'CURRENT_TIMESTAMP': <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, 'CURRENT_USER': <TokenType.CURRENT_USER: 'CURRENT_USER'>, 'DATABASE': <TokenType.DATABASE: 'DATABASE'>, 'DEFAULT': <TokenType.DEFAULT: 'DEFAULT'>, 'DELETE': <TokenType.DELETE: 'DELETE'>, 'DESC': <TokenType.DESC: 'DESC'>, 'DESCRIBE': <TokenType.DESCRIBE: 'DESCRIBE'>, 'DISTINCT': <TokenType.DISTINCT: 'DISTINCT'>, 'DISTRIBUTE BY': <TokenType.DISTRIBUTE_BY: 'DISTRIBUTE_BY'>, 'DIV': <TokenType.DIV: 'DIV'>, 'DROP': <TokenType.DROP: 'DROP'>, 'ELSE': <TokenType.ELSE: 'ELSE'>, 'END': <TokenType.END: 'END'>, 'ENUM': <TokenType.ENUM: 'ENUM'>, 'ESCAPE': <TokenType.ESCAPE: 'ESCAPE'>, 'EXCEPT': <TokenType.EXCEPT: 'EXCEPT'>, 'EXECUTE': <TokenType.EXECUTE: 'EXECUTE'>, 'EXISTS': <TokenType.EXISTS: 'EXISTS'>, 'FALSE': <TokenType.FALSE: 'FALSE'>, 'FETCH': <TokenType.FETCH: 'FETCH'>, 'FILTER': <TokenType.FILTER: 'FILTER'>, 'FIRST': <TokenType.FIRST: 'FIRST'>, 'FULL': <TokenType.FULL: 'FULL'>, 'FUNCTION': <TokenType.FUNCTION: 'FUNCTION'>, 'FOR': <TokenType.FOR: 'FOR'>, 'FOREIGN KEY': <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, 'FORMAT': <TokenType.FORMAT: 'FORMAT'>, 'FROM': <TokenType.FROM: 'FROM'>, 'GEOGRAPHY': <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, 'GEOMETRY': <TokenType.GEOMETRY: 'GEOMETRY'>, 'GLOB': <TokenType.GLOB: 'GLOB'>, 'GROUP BY': <TokenType.GROUP_BY: 'GROUP_BY'>, 'GROUPING SETS': <TokenType.GROUPING_SETS: 'GROUPING_SETS'>, 'HAVING': <TokenType.HAVING: 'HAVING'>, 'ILIKE': <TokenType.ILIKE: 'ILIKE'>, 'IN': <TokenType.IN: 'IN'>, 'INDEX': <TokenType.INDEX: 'INDEX'>, 'INET': <TokenType.INET: 'INET'>, 'INNER': <TokenType.INNER: 'INNER'>, 'INSERT': <TokenType.INSERT: 'INSERT'>, 'INTERVAL': <TokenType.INTERVAL: 'INTERVAL'>, 'INTERSECT': <TokenType.INTERSECT: 'INTERSECT'>, 'INTO': <TokenType.INTO: 'INTO'>, 'IS': <TokenType.IS: 'IS'>, 'ISNULL': <TokenType.ISNULL: 'ISNULL'>, 'JOIN': <TokenType.JOIN: 'JOIN'>, 'KEEP': <TokenType.KEEP: 'KEEP'>, 'KILL': <TokenType.KILL: 'KILL'>, 'LATERAL': <TokenType.LATERAL: 'LATERAL'>, 'LEFT': <TokenType.LEFT: 'LEFT'>, 'LIKE': <TokenType.LIKE: 'LIKE'>, 'LIMIT': <TokenType.LIMIT: 'LIMIT'>, 'LOAD': <TokenType.LOAD: 'LOAD'>, 'LOCK': <TokenType.LOCK: 'LOCK'>, 'MERGE': <TokenType.MERGE: 'MERGE'>, 'NATURAL': <TokenType.NATURAL: 'NATURAL'>, 'NEXT': <TokenType.NEXT: 'NEXT'>, 'NOT': <TokenType.NOT: 'NOT'>, 'NOTNULL': <TokenType.NOTNULL: 'NOTNULL'>, 'NULL': <TokenType.NULL: 'NULL'>, 'OBJECT': <TokenType.OBJECT: 'OBJECT'>, 'OFFSET': <TokenType.OFFSET: 'OFFSET'>, 'ON': <TokenType.ON: 'ON'>, 'OR': <TokenType.OR: 'OR'>, 'XOR': <TokenType.XOR: 'XOR'>, 'ORDER BY': <TokenType.ORDER_BY: 'ORDER_BY'>, 'ORDINALITY': <TokenType.ORDINALITY: 'ORDINALITY'>, 'OUTER': <TokenType.OUTER: 'OUTER'>, 'OVER': <TokenType.OVER: 'OVER'>, 'OVERLAPS': <TokenType.OVERLAPS: 'OVERLAPS'>, 'OVERWRITE': <TokenType.OVERWRITE: 'OVERWRITE'>, 'PARTITION': <TokenType.PARTITION: 'PARTITION'>, 'PARTITION BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED_BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PERCENT': <TokenType.PERCENT: 'PERCENT'>, 'PIVOT': <TokenType.PIVOT: 'PIVOT'>, 'PRAGMA': <TokenType.PRAGMA: 'PRAGMA'>, 'PRIMARY KEY': <TokenType.PRIMARY_KEY: 'PRIMARY_KEY'>, 'PROCEDURE': <TokenType.PROCEDURE: 'PROCEDURE'>, 'QUALIFY': <TokenType.QUALIFY: 'QUALIFY'>, 'RANGE': <TokenType.RANGE: 'RANGE'>, 'RECURSIVE': <TokenType.RECURSIVE: 'RECURSIVE'>, 'REGEXP': <TokenType.RLIKE: 'RLIKE'>, 'RENAME': <TokenType.RENAME: 'RENAME'>, 'REPLACE': <TokenType.REPLACE: 'REPLACE'>, 'RETURNING': <TokenType.RETURNING: 'RETURNING'>, 'REFERENCES': <TokenType.REFERENCES: 'REFERENCES'>, 'RIGHT': <TokenType.RIGHT: 'RIGHT'>, 'RLIKE': <TokenType.RLIKE: 'RLIKE'>, 'ROLLBACK': <TokenType.ROLLBACK: 'ROLLBACK'>, 'ROLLUP': <TokenType.ROLLUP: 'ROLLUP'>, 'ROW': <TokenType.ROW: 'ROW'>, 'ROWS': <TokenType.ROWS: 'ROWS'>, 'SCHEMA': <TokenType.SCHEMA: 'SCHEMA'>, 'SELECT': <TokenType.SELECT: 'SELECT'>, 'SEMI': <TokenType.SEMI: 'SEMI'>, 'SET': <TokenType.SET: 'SET'>, 'SETTINGS': <TokenType.SETTINGS: 'SETTINGS'>, 'SHOW': <TokenType.SHOW: 'SHOW'>, 'SIMILAR TO': <TokenType.SIMILAR_TO: 'SIMILAR_TO'>, 'SOME': <TokenType.SOME: 'SOME'>, 'SORT BY': <TokenType.SORT_BY: 'SORT_BY'>, 'START WITH': <TokenType.START_WITH: 'START_WITH'>, 'STRAIGHT_JOIN': <TokenType.STRAIGHT_JOIN: 'STRAIGHT_JOIN'>, 'TABLE': <TokenType.TABLE: 'TABLE'>, 'TABLESAMPLE': <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>, 'TEMP': <TokenType.TEMPORARY: 'TEMPORARY'>, 'TEMPORARY': <TokenType.TEMPORARY: 'TEMPORARY'>, 'THEN': <TokenType.THEN: 'THEN'>, 'TRUE': <TokenType.TRUE: 'TRUE'>, 'TRUNCATE': <TokenType.TRUNCATE: 'TRUNCATE'>, 'UNION': <TokenType.UNION: 'UNION'>, 'UNKNOWN': <TokenType.UNKNOWN: 'UNKNOWN'>, 'UNNEST': <TokenType.UNNEST: 'UNNEST'>, 'UNPIVOT': <TokenType.UNPIVOT: 'UNPIVOT'>, 'UPDATE': <TokenType.UPDATE: 'UPDATE'>, 'USE': <TokenType.USE: 'USE'>, 'USING': <TokenType.USING: 'USING'>, 'UUID': <TokenType.UUID: 'UUID'>, 'VALUES': <TokenType.VALUES: 'VALUES'>, 'VIEW': <TokenType.VIEW: 'VIEW'>, 'VOLATILE': <TokenType.VOLATILE: 'VOLATILE'>, 'WHEN': <TokenType.WHEN: 'WHEN'>, 'WHERE': <TokenType.WHERE: 'WHERE'>, 'WINDOW': <TokenType.WINDOW: 'WINDOW'>, 'WITH': <TokenType.WITH: 'WITH'>, 'APPLY': <TokenType.APPLY: 'APPLY'>, 'ARRAY': <TokenType.ARRAY: 'ARRAY'>, 'BIT': <TokenType.BIT: 'BIT'>, 'BOOL': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BOOLEAN': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BYTE': <TokenType.TINYINT: 'TINYINT'>, 'MEDIUMINT': <TokenType.MEDIUMINT: 'MEDIUMINT'>, 'INT1': <TokenType.TINYINT: 'TINYINT'>, 'TINYINT': <TokenType.TINYINT: 'TINYINT'>, 'INT16': <TokenType.SMALLINT: 'SMALLINT'>, 'SHORT': <TokenType.SMALLINT: 'SMALLINT'>, 'SMALLINT': <TokenType.SMALLINT: 'SMALLINT'>, 'INT128': <TokenType.INT128: 'INT128'>, 'HUGEINT': <TokenType.INT128: 'INT128'>, 'UHUGEINT': <TokenType.UINT128: 'UINT128'>, 'INT2': <TokenType.SMALLINT: 'SMALLINT'>, 'INTEGER': <TokenType.INT: 'INT'>, 'INT': <TokenType.INT: 'INT'>, 'INT4': <TokenType.INT: 'INT'>, 'INT32': <TokenType.INT: 'INT'>, 'INT64': <TokenType.BIGINT: 'BIGINT'>, 'LONG': <TokenType.BIGINT: 'BIGINT'>, 'BIGINT': <TokenType.BIGINT: 'BIGINT'>, 'INT8': <TokenType.TINYINT: 'TINYINT'>, 'UINT': <TokenType.UINT: 'UINT'>, 'DEC': <TokenType.DECIMAL: 'DECIMAL'>, 'DECIMAL': <TokenType.DECIMAL: 'DECIMAL'>, 'DECIMAL32': <TokenType.DECIMAL32: 'DECIMAL32'>, 'DECIMAL64': <TokenType.DECIMAL64: 'DECIMAL64'>, 'DECIMAL128': <TokenType.DECIMAL128: 'DECIMAL128'>, 'DECIMAL256': <TokenType.DECIMAL256: 'DECIMAL256'>, 'BIGDECIMAL': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'BIGNUMERIC': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'LIST': <TokenType.LIST: 'LIST'>, 'MAP': <TokenType.MAP: 'MAP'>, 'NULLABLE': <TokenType.NULLABLE: 'NULLABLE'>, 'NUMBER': <TokenType.DECIMAL: 'DECIMAL'>, 'NUMERIC': <TokenType.DECIMAL: 'DECIMAL'>, 'FIXED': <TokenType.DECIMAL: 'DECIMAL'>, 'REAL': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT4': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT8': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE PRECISION': <TokenType.DOUBLE: 'DOUBLE'>, 'JSON': <TokenType.JSON: 'JSON'>, 'JSONB': <TokenType.JSONB: 'JSONB'>, 'CHAR': <TokenType.CHAR: 'CHAR'>, 'CHARACTER': <TokenType.CHAR: 'CHAR'>, 'NCHAR': <TokenType.NCHAR: 'NCHAR'>, 'VARCHAR': <TokenType.VARCHAR: 'VARCHAR'>, 'VARCHAR2': <TokenType.VARCHAR: 'VARCHAR'>, 'NVARCHAR': <TokenType.NVARCHAR: 'NVARCHAR'>, 'NVARCHAR2': <TokenType.NVARCHAR: 'NVARCHAR'>, 'BPCHAR': <TokenType.BPCHAR: 'BPCHAR'>, 'STR': <TokenType.TEXT: 'TEXT'>, 'STRING': <TokenType.TEXT: 'TEXT'>, 'TEXT': <TokenType.TEXT: 'TEXT'>, 'LONGTEXT': <TokenType.LONGTEXT: 'LONGTEXT'>, 'MEDIUMTEXT': <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, 'TINYTEXT': <TokenType.TINYTEXT: 'TINYTEXT'>, 'CLOB': <TokenType.TEXT: 'TEXT'>, 'LONGVARCHAR': <TokenType.TEXT: 'TEXT'>, 'BINARY': <TokenType.BINARY: 'BINARY'>, 'BLOB': <TokenType.VARBINARY: 'VARBINARY'>, 'LONGBLOB': <TokenType.LONGBLOB: 'LONGBLOB'>, 'MEDIUMBLOB': <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, 'TINYBLOB': <TokenType.TINYBLOB: 'TINYBLOB'>, 'BYTEA': <TokenType.VARBINARY: 'VARBINARY'>, 'VARBINARY': <TokenType.VARBINARY: 'VARBINARY'>, 'TIME': <TokenType.TIME: 'TIME'>, 'TIMETZ': <TokenType.TIMETZ: 'TIMETZ'>, 'TIMESTAMP': <TokenType.TIMESTAMP: 'TIMESTAMP'>, 'TIMESTAMPTZ': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TIMESTAMPLTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'TIMESTAMP_LTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'TIMESTAMPNTZ': <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, 'TIMESTAMP_NTZ': <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, 'DATE': <TokenType.DATE: 'DATE'>, 'DATETIME': <TokenType.DATETIME: 'DATETIME'>, 'INT4RANGE': <TokenType.INT4RANGE: 'INT4RANGE'>, 'INT4MULTIRANGE': <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, 'INT8RANGE': <TokenType.INT8RANGE: 'INT8RANGE'>, 'INT8MULTIRANGE': <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, 'NUMRANGE': <TokenType.NUMRANGE: 'NUMRANGE'>, 'NUMMULTIRANGE': <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, 'TSRANGE': <TokenType.TSRANGE: 'TSRANGE'>, 'TSMULTIRANGE': <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, 'TSTZRANGE': <TokenType.TSTZRANGE: 'TSTZRANGE'>, 'TSTZMULTIRANGE': <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, 'DATERANGE': <TokenType.DATERANGE: 'DATERANGE'>, 'DATEMULTIRANGE': <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, 'UNIQUE': <TokenType.UNIQUE: 'UNIQUE'>, 'VECTOR': <TokenType.VECTOR: 'VECTOR'>, 'STRUCT': <TokenType.STRUCT: 'STRUCT'>, 'SEQUENCE': <TokenType.SEQUENCE: 'SEQUENCE'>, 'VARIANT': <TokenType.VARIANT: 'VARIANT'>, 'ALTER': <TokenType.ALTER: 'ALTER'>, 'ANALYZE': <TokenType.COMMAND: 'COMMAND'>, 'CALL': <TokenType.COMMAND: 'COMMAND'>, 'COMMENT': <TokenType.COMMENT: 'COMMENT'>, 'EXPLAIN': <TokenType.COMMAND: 'COMMAND'>, 'GRANT': <TokenType.GRANT: 'GRANT'>, 'OPTIMIZE': <TokenType.COMMAND: 'COMMAND'>, 'PREPARE': <TokenType.COMMAND: 'COMMAND'>, 'VACUUM': <TokenType.COMMAND: 'COMMAND'>, 'USER-DEFINED': <TokenType.USERDEFINED: 'USERDEFINED'>, 'FOR VERSION': <TokenType.VERSION_SNAPSHOT: 'VERSION_SNAPSHOT'>, 'FOR TIMESTAMP': <TokenType.TIMESTAMP_SNAPSHOT: 'TIMESTAMP_SNAPSHOT'>, 'BYTEINT': <TokenType.INT: 'INT'>, 'CHAR VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'CHARACTER VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'EXCLUDE': <TokenType.EXCEPT: 'EXCEPT'>, 'ILIKE ANY': <TokenType.ILIKE_ANY: 'ILIKE_ANY'>, 'LIKE ANY': <TokenType.LIKE_ANY: 'LIKE_ANY'>, 'MATCH_CONDITION': <TokenType.MATCH_CONDITION: 'MATCH_CONDITION'>, 'MATCH_RECOGNIZE': <TokenType.MATCH_RECOGNIZE: 'MATCH_RECOGNIZE'>, 'MINUS': <TokenType.EXCEPT: 'EXCEPT'>, 'NCHAR VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'PUT': <TokenType.COMMAND: 'COMMAND'>, 'REMOVE': <TokenType.COMMAND: 'COMMAND'>, 'RM': <TokenType.COMMAND: 'COMMAND'>, 'SAMPLE': <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>, 'SQL_DOUBLE': <TokenType.DOUBLE: 'DOUBLE'>, 'SQL_VARCHAR': <TokenType.VARCHAR: 'VARCHAR'>, 'STORAGE INTEGRATION': <TokenType.STORAGE_INTEGRATION: 'STORAGE_INTEGRATION'>, 'TAG': <TokenType.TAG: 'TAG'>, 'TIMESTAMP_TZ': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TOP': <TokenType.TOP: 'TOP'>, 'WAREHOUSE': <TokenType.WAREHOUSE: 'WAREHOUSE'>, 'STREAMLIT': <TokenType.STREAMLIT: 'STREAMLIT'>}
SINGLE_TOKENS = {'(': <TokenType.L_PAREN: 'L_PAREN'>, ')': <TokenType.R_PAREN: 'R_PAREN'>, '[': <TokenType.L_BRACKET: 'L_BRACKET'>, ']': <TokenType.R_BRACKET: 'R_BRACKET'>, '{': <TokenType.L_BRACE: 'L_BRACE'>, '}': <TokenType.R_BRACE: 'R_BRACE'>, '&': <TokenType.AMP: 'AMP'>, '^': <TokenType.CARET: 'CARET'>, ':': <TokenType.COLON: 'COLON'>, ',': <TokenType.COMMA: 'COMMA'>, '.': <TokenType.DOT: 'DOT'>, '-': <TokenType.DASH: 'DASH'>, '=': <TokenType.EQ: 'EQ'>, '>': <TokenType.GT: 'GT'>, '<': <TokenType.LT: 'LT'>, '%': <TokenType.MOD: 'MOD'>, '!': <TokenType.NOT: 'NOT'>, '|': <TokenType.PIPE: 'PIPE'>, '+': <TokenType.PLUS: 'PLUS'>, ';': <TokenType.SEMICOLON: 'SEMICOLON'>, '/': <TokenType.SLASH: 'SLASH'>, '\\': <TokenType.BACKSLASH: 'BACKSLASH'>, '*': <TokenType.STAR: 'STAR'>, '~': <TokenType.TILDA: 'TILDA'>, '?': <TokenType.PLACEHOLDER: 'PLACEHOLDER'>, '@': <TokenType.PARAMETER: 'PARAMETER'>, '#': <TokenType.HASH: 'HASH'>, "'": <TokenType.UNKNOWN: 'UNKNOWN'>, '`': <TokenType.UNKNOWN: 'UNKNOWN'>, '"': <TokenType.UNKNOWN: 'UNKNOWN'>, '$': <TokenType.PARAMETER: 'PARAMETER'>}
VAR_SINGLE_TOKENS = {'$'}
COMMANDS = {<TokenType.RENAME: 'RENAME'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.FETCH: 'FETCH'>}
class Snowflake.Generator(sqlglot.generator.Generator):
 805    class Generator(generator.Generator):
 806        PARAMETER_TOKEN = "$"
 807        MATCHED_BY_SOURCE = False
 808        SINGLE_STRING_INTERVAL = True
 809        JOIN_HINTS = False
 810        TABLE_HINTS = False
 811        QUERY_HINTS = False
 812        AGGREGATE_FILTER_SUPPORTED = False
 813        SUPPORTS_TABLE_COPY = False
 814        COLLATE_IS_FUNC = True
 815        LIMIT_ONLY_LITERALS = True
 816        JSON_KEY_VALUE_PAIR_SEP = ","
 817        INSERT_OVERWRITE = " OVERWRITE INTO"
 818        STRUCT_DELIMITER = ("(", ")")
 819        COPY_PARAMS_ARE_WRAPPED = False
 820        COPY_PARAMS_EQ_REQUIRED = True
 821        STAR_EXCEPT = "EXCLUDE"
 822        SUPPORTS_EXPLODING_PROJECTIONS = False
 823        ARRAY_CONCAT_IS_VAR_LEN = False
 824        SUPPORTS_CONVERT_TIMEZONE = True
 825        EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE = False
 826        SUPPORTS_MEDIAN = True
 827        ARRAY_SIZE_NAME = "ARRAY_SIZE"
 828
 829        TRANSFORMS = {
 830            **generator.Generator.TRANSFORMS,
 831            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
 832            exp.ArgMax: rename_func("MAX_BY"),
 833            exp.ArgMin: rename_func("MIN_BY"),
 834            exp.Array: inline_array_sql,
 835            exp.ArrayConcat: lambda self, e: self.arrayconcat_sql(e, name="ARRAY_CAT"),
 836            exp.ArrayContains: lambda self, e: self.func("ARRAY_CONTAINS", e.expression, e.this),
 837            exp.AtTimeZone: lambda self, e: self.func(
 838                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
 839            ),
 840            exp.BitwiseXor: rename_func("BITXOR"),
 841            exp.Create: transforms.preprocess([_flatten_structured_types_unless_iceberg]),
 842            exp.DateAdd: date_delta_sql("DATEADD"),
 843            exp.DateDiff: date_delta_sql("DATEDIFF"),
 844            exp.DatetimeAdd: date_delta_sql("TIMESTAMPADD"),
 845            exp.DatetimeDiff: timestampdiff_sql,
 846            exp.DateStrToDate: datestrtodate_sql,
 847            exp.DayOfMonth: rename_func("DAYOFMONTH"),
 848            exp.DayOfWeek: rename_func("DAYOFWEEK"),
 849            exp.DayOfYear: rename_func("DAYOFYEAR"),
 850            exp.Explode: rename_func("FLATTEN"),
 851            exp.Extract: rename_func("DATE_PART"),
 852            exp.FromTimeZone: lambda self, e: self.func(
 853                "CONVERT_TIMEZONE", e.args.get("zone"), "'UTC'", e.this
 854            ),
 855            exp.GenerateSeries: lambda self, e: self.func(
 856                "ARRAY_GENERATE_RANGE", e.args["start"], e.args["end"] + 1, e.args.get("step")
 857            ),
 858            exp.GroupConcat: rename_func("LISTAGG"),
 859            exp.If: if_sql(name="IFF", false_value="NULL"),
 860            exp.JSONExtractArray: _json_extract_value_array_sql,
 861            exp.JSONExtractScalar: lambda self, e: self.func(
 862                "JSON_EXTRACT_PATH_TEXT", e.this, e.expression
 863            ),
 864            exp.JSONObject: lambda self, e: self.func("OBJECT_CONSTRUCT_KEEP_NULL", *e.expressions),
 865            exp.JSONPathRoot: lambda *_: "",
 866            exp.JSONValueArray: _json_extract_value_array_sql,
 867            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
 868            exp.LogicalOr: rename_func("BOOLOR_AGG"),
 869            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 870            exp.Max: max_or_greatest,
 871            exp.Min: min_or_least,
 872            exp.ParseJSON: lambda self, e: self.func(
 873                "TRY_PARSE_JSON" if e.args.get("safe") else "PARSE_JSON", e.this
 874            ),
 875            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
 876            exp.PercentileCont: transforms.preprocess(
 877                [transforms.add_within_group_for_percentiles]
 878            ),
 879            exp.PercentileDisc: transforms.preprocess(
 880                [transforms.add_within_group_for_percentiles]
 881            ),
 882            exp.Pivot: transforms.preprocess([_unqualify_unpivot_columns]),
 883            exp.RegexpExtract: _regexpextract_sql,
 884            exp.RegexpExtractAll: _regexpextract_sql,
 885            exp.RegexpILike: _regexpilike_sql,
 886            exp.Rand: rename_func("RANDOM"),
 887            exp.Select: transforms.preprocess(
 888                [
 889                    transforms.eliminate_distinct_on,
 890                    transforms.explode_to_unnest(),
 891                    transforms.eliminate_semi_and_anti_joins,
 892                    _unnest_generate_date_array,
 893                ]
 894            ),
 895            exp.SafeDivide: lambda self, e: no_safe_divide_sql(self, e, "IFF"),
 896            exp.SHA: rename_func("SHA1"),
 897            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
 898            exp.StartsWith: rename_func("STARTSWITH"),
 899            exp.StrPosition: lambda self, e: self.func(
 900                "POSITION", e.args.get("substr"), e.this, e.args.get("position")
 901            ),
 902            exp.StrToDate: lambda self, e: self.func("DATE", e.this, self.format_time(e)),
 903            exp.Stuff: rename_func("INSERT"),
 904            exp.TimeAdd: date_delta_sql("TIMEADD"),
 905            exp.Timestamp: no_timestamp_sql,
 906            exp.TimestampAdd: date_delta_sql("TIMESTAMPADD"),
 907            exp.TimestampDiff: lambda self, e: self.func(
 908                "TIMESTAMPDIFF", e.unit, e.expression, e.this
 909            ),
 910            exp.TimestampTrunc: timestamptrunc_sql(),
 911            exp.TimeStrToTime: timestrtotime_sql,
 912            exp.TimeToStr: lambda self, e: self.func(
 913                "TO_CHAR", exp.cast(e.this, exp.DataType.Type.TIMESTAMP), self.format_time(e)
 914            ),
 915            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
 916            exp.ToArray: rename_func("TO_ARRAY"),
 917            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
 918            exp.ToDouble: rename_func("TO_DOUBLE"),
 919            exp.TsOrDsAdd: date_delta_sql("DATEADD", cast=True),
 920            exp.TsOrDsDiff: date_delta_sql("DATEDIFF"),
 921            exp.TsOrDsToDate: lambda self, e: self.func(
 922                "TRY_TO_DATE" if e.args.get("safe") else "TO_DATE", e.this, self.format_time(e)
 923            ),
 924            exp.UnixToTime: rename_func("TO_TIMESTAMP"),
 925            exp.Uuid: rename_func("UUID_STRING"),
 926            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 927            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
 928            exp.Xor: rename_func("BOOLXOR"),
 929            exp.Levenshtein: unsupported_args("ins_cost", "del_cost", "sub_cost")(
 930                rename_func("EDITDISTANCE")
 931            ),
 932        }
 933
 934        SUPPORTED_JSON_PATH_PARTS = {
 935            exp.JSONPathKey,
 936            exp.JSONPathRoot,
 937            exp.JSONPathSubscript,
 938        }
 939
 940        TYPE_MAPPING = {
 941            **generator.Generator.TYPE_MAPPING,
 942            exp.DataType.Type.NESTED: "OBJECT",
 943            exp.DataType.Type.STRUCT: "OBJECT",
 944        }
 945
 946        PROPERTIES_LOCATION = {
 947            **generator.Generator.PROPERTIES_LOCATION,
 948            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
 949            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
 950        }
 951
 952        UNSUPPORTED_VALUES_EXPRESSIONS = {
 953            exp.Map,
 954            exp.StarMap,
 955            exp.Struct,
 956            exp.VarMap,
 957        }
 958
 959        def with_properties(self, properties: exp.Properties) -> str:
 960            return self.properties(properties, wrapped=False, prefix=self.sep(""), sep=" ")
 961
 962        def values_sql(self, expression: exp.Values, values_as_table: bool = True) -> str:
 963            if expression.find(*self.UNSUPPORTED_VALUES_EXPRESSIONS):
 964                values_as_table = False
 965
 966            return super().values_sql(expression, values_as_table=values_as_table)
 967
 968        def datatype_sql(self, expression: exp.DataType) -> str:
 969            expressions = expression.expressions
 970            if (
 971                expressions
 972                and expression.is_type(*exp.DataType.STRUCT_TYPES)
 973                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
 974            ):
 975                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
 976                return "OBJECT"
 977
 978            return super().datatype_sql(expression)
 979
 980        def tonumber_sql(self, expression: exp.ToNumber) -> str:
 981            return self.func(
 982                "TO_NUMBER",
 983                expression.this,
 984                expression.args.get("format"),
 985                expression.args.get("precision"),
 986                expression.args.get("scale"),
 987            )
 988
 989        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
 990            milli = expression.args.get("milli")
 991            if milli is not None:
 992                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
 993                expression.set("nano", milli_to_nano)
 994
 995            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
 996
 997        def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
 998            if expression.is_type(exp.DataType.Type.GEOGRAPHY):
 999                return self.func("TO_GEOGRAPHY", expression.this)
1000            if expression.is_type(exp.DataType.Type.GEOMETRY):
1001                return self.func("TO_GEOMETRY", expression.this)
1002
1003            return super().cast_sql(expression, safe_prefix=safe_prefix)
1004
1005        def trycast_sql(self, expression: exp.TryCast) -> str:
1006            value = expression.this
1007
1008            if value.type is None:
1009                from sqlglot.optimizer.annotate_types import annotate_types
1010
1011                value = annotate_types(value)
1012
1013            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
1014                return super().trycast_sql(expression)
1015
1016            # TRY_CAST only works for string values in Snowflake
1017            return self.cast_sql(expression)
1018
1019        def log_sql(self, expression: exp.Log) -> str:
1020            if not expression.expression:
1021                return self.func("LN", expression.this)
1022
1023            return super().log_sql(expression)
1024
1025        def unnest_sql(self, expression: exp.Unnest) -> str:
1026            unnest_alias = expression.args.get("alias")
1027            offset = expression.args.get("offset")
1028
1029            columns = [
1030                exp.to_identifier("seq"),
1031                exp.to_identifier("key"),
1032                exp.to_identifier("path"),
1033                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
1034                seq_get(unnest_alias.columns if unnest_alias else [], 0)
1035                or exp.to_identifier("value"),
1036                exp.to_identifier("this"),
1037            ]
1038
1039            if unnest_alias:
1040                unnest_alias.set("columns", columns)
1041            else:
1042                unnest_alias = exp.TableAlias(this="_u", columns=columns)
1043
1044            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
1045            alias = self.sql(unnest_alias)
1046            alias = f" AS {alias}" if alias else ""
1047            return f"{explode}{alias}"
1048
1049        def show_sql(self, expression: exp.Show) -> str:
1050            terse = "TERSE " if expression.args.get("terse") else ""
1051            history = " HISTORY" if expression.args.get("history") else ""
1052            like = self.sql(expression, "like")
1053            like = f" LIKE {like}" if like else ""
1054
1055            scope = self.sql(expression, "scope")
1056            scope = f" {scope}" if scope else ""
1057
1058            scope_kind = self.sql(expression, "scope_kind")
1059            if scope_kind:
1060                scope_kind = f" IN {scope_kind}"
1061
1062            starts_with = self.sql(expression, "starts_with")
1063            if starts_with:
1064                starts_with = f" STARTS WITH {starts_with}"
1065
1066            limit = self.sql(expression, "limit")
1067
1068            from_ = self.sql(expression, "from")
1069            if from_:
1070                from_ = f" FROM {from_}"
1071
1072            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}"
1073
1074        def describe_sql(self, expression: exp.Describe) -> str:
1075            # Default to table if kind is unknown
1076            kind_value = expression.args.get("kind") or "TABLE"
1077            kind = f" {kind_value}" if kind_value else ""
1078            this = f" {self.sql(expression, 'this')}"
1079            expressions = self.expressions(expression, flat=True)
1080            expressions = f" {expressions}" if expressions else ""
1081            return f"DESCRIBE{kind}{this}{expressions}"
1082
1083        def generatedasidentitycolumnconstraint_sql(
1084            self, expression: exp.GeneratedAsIdentityColumnConstraint
1085        ) -> str:
1086            start = expression.args.get("start")
1087            start = f" START {start}" if start else ""
1088            increment = expression.args.get("increment")
1089            increment = f" INCREMENT {increment}" if increment else ""
1090            return f"AUTOINCREMENT{start}{increment}"
1091
1092        def cluster_sql(self, expression: exp.Cluster) -> str:
1093            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
1094
1095        def struct_sql(self, expression: exp.Struct) -> str:
1096            keys = []
1097            values = []
1098
1099            for i, e in enumerate(expression.expressions):
1100                if isinstance(e, exp.PropertyEQ):
1101                    keys.append(
1102                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
1103                    )
1104                    values.append(e.expression)
1105                else:
1106                    keys.append(exp.Literal.string(f"_{i}"))
1107                    values.append(e)
1108
1109            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))
1110
1111        @unsupported_args("weight", "accuracy")
1112        def approxquantile_sql(self, expression: exp.ApproxQuantile) -> str:
1113            return self.func("APPROX_PERCENTILE", expression.this, expression.args.get("quantile"))
1114
1115        def alterset_sql(self, expression: exp.AlterSet) -> str:
1116            exprs = self.expressions(expression, flat=True)
1117            exprs = f" {exprs}" if exprs else ""
1118            file_format = self.expressions(expression, key="file_format", flat=True, sep=" ")
1119            file_format = f" STAGE_FILE_FORMAT = ({file_format})" if file_format else ""
1120            copy_options = self.expressions(expression, key="copy_options", flat=True, sep=" ")
1121            copy_options = f" STAGE_COPY_OPTIONS = ({copy_options})" if copy_options else ""
1122            tag = self.expressions(expression, key="tag", flat=True)
1123            tag = f" TAG {tag}" if tag else ""
1124
1125            return f"SET{exprs}{file_format}{copy_options}{tag}"
1126
1127        def strtotime_sql(self, expression: exp.StrToTime):
1128            safe_prefix = "TRY_" if expression.args.get("safe") else ""
1129            return self.func(
1130                f"{safe_prefix}TO_TIMESTAMP", expression.this, self.format_time(expression)
1131            )
1132
1133        def timestampsub_sql(self, expression: exp.TimestampSub):
1134            return self.sql(
1135                exp.TimestampAdd(
1136                    this=expression.this,
1137                    expression=expression.expression * -1,
1138                    unit=expression.unit,
1139                )
1140            )
1141
1142        def jsonextract_sql(self, expression: exp.JSONExtract):
1143            this = expression.this
1144
1145            # JSON strings are valid coming from other dialects such as BQ
1146            return self.func(
1147                "GET_PATH",
1148                exp.ParseJSON(this=this) if this.is_string else this,
1149                expression.expression,
1150            )

Generator converts a given syntax tree to the corresponding SQL string.

Arguments:
  • pretty: Whether to format the produced SQL string. Default: False.
  • identify: Determines when an identifier should be quoted. Possible values are: False (default): Never quote, except in cases where it's mandatory by the dialect. True or 'always': Always quote. 'safe': Only quote identifiers that are case insensitive.
  • normalize: Whether to normalize identifiers to lowercase. Default: False.
  • pad: The pad size in a formatted string. For example, this affects the indentation of a projection in a query, relative to its nesting level. Default: 2.
  • indent: The indentation size in a formatted string. For example, this affects the indentation of subqueries and filters under a WHERE clause. Default: 2.
  • normalize_functions: How to normalize function names. Possible values are: "upper" or True (default): Convert names to uppercase. "lower": Convert names to lowercase. False: Disables function name normalization.
  • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. Default ErrorLevel.WARN.
  • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
  • leading_comma: Whether the comma is leading or trailing in select expressions. This is only relevant when generating in pretty mode. Default: False
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true line length. Default: 80
  • comments: Whether to preserve comments in the output SQL code. Default: True
PARAMETER_TOKEN = '$'
MATCHED_BY_SOURCE = False
SINGLE_STRING_INTERVAL = True
JOIN_HINTS = False
TABLE_HINTS = False
QUERY_HINTS = False
AGGREGATE_FILTER_SUPPORTED = False
SUPPORTS_TABLE_COPY = False
COLLATE_IS_FUNC = True
LIMIT_ONLY_LITERALS = True
JSON_KEY_VALUE_PAIR_SEP = ','
INSERT_OVERWRITE = ' OVERWRITE INTO'
STRUCT_DELIMITER = ('(', ')')
COPY_PARAMS_ARE_WRAPPED = False
COPY_PARAMS_EQ_REQUIRED = True
STAR_EXCEPT = 'EXCLUDE'
SUPPORTS_EXPLODING_PROJECTIONS = False
ARRAY_CONCAT_IS_VAR_LEN = False
SUPPORTS_CONVERT_TIMEZONE = True
EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE = False
SUPPORTS_MEDIAN = True
ARRAY_SIZE_NAME = 'ARRAY_SIZE'
TRANSFORMS = {<class 'sqlglot.expressions.JSONPathKey'>: <function <lambda>>, <class 'sqlglot.expressions.JSONPathRoot'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.JSONPathSubscript'>: <function <lambda>>, <class 'sqlglot.expressions.AllowedValuesProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ArrayContainsAll'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ArrayOverlaps'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.AutoRefreshProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.BackupProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CaseSpecificColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ClusteredColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CollateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CommentColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ConnectByRoot'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DateFormatColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DefaultColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DynamicProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EmptyProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EncodeColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EphemeralColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExcludeColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Except'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExternalProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.GlobalProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.HeapProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.IcebergProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InheritsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InlineLengthColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InputModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Intersect'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.IntervalSpan'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LanguageProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LocationProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LogProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.MaterializedProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NonClusteredColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NotForReplicationColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnCommitProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnUpdateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Operator'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OutputModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.PathColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.PivotAny'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ProjectionPolicyColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.RemoteWithConnectionModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ReturnsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SampleProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SecureProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SecurityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SetConfigProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SettingsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SharingProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SqlReadWriteProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StabilityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Stream'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StreamingTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StrictProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SwapTable'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TemporaryProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TagColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TitleColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ToMap'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ToTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TransformModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TransientProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Union'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.UnloggedProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Uuid'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.UppercaseColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VarMap'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.ViewAttributeProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VolatileProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithProcedureOptions'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithSchemaBindingProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithOperator'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ApproxDistinct'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ArgMax'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ArgMin'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Array'>: <function inline_array_sql>, <class 'sqlglot.expressions.ArrayConcat'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.ArrayContains'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.AtTimeZone'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.BitwiseXor'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Create'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.DateAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.DateDiff'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.DatetimeAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.DatetimeDiff'>: <function timestampdiff_sql>, <class 'sqlglot.expressions.DateStrToDate'>: <function datestrtodate_sql>, <class 'sqlglot.expressions.DayOfMonth'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.DayOfWeek'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.DayOfYear'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Explode'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Extract'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.FromTimeZone'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.GenerateSeries'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.GroupConcat'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.If'>: <function if_sql.<locals>._if_sql>, <class 'sqlglot.expressions.JSONExtractArray'>: <function _json_extract_value_array_sql>, <class 'sqlglot.expressions.JSONExtractScalar'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.JSONObject'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.JSONValueArray'>: <function _json_extract_value_array_sql>, <class 'sqlglot.expressions.LogicalAnd'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.LogicalOr'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Map'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.Max'>: <function max_or_greatest>, <class 'sqlglot.expressions.Min'>: <function min_or_least>, <class 'sqlglot.expressions.ParseJSON'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.PartitionedByProperty'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.PercentileCont'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.PercentileDisc'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.Pivot'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.RegexpExtract'>: <function _regexpextract_sql>, <class 'sqlglot.expressions.RegexpExtractAll'>: <function _regexpextract_sql>, <class 'sqlglot.expressions.RegexpILike'>: <function _regexpilike_sql>, <class 'sqlglot.expressions.Rand'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Select'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.SafeDivide'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.SHA'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.StarMap'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.StartsWith'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.StrPosition'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.StrToDate'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.Stuff'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.TimeAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.Timestamp'>: <function no_timestamp_sql>, <class 'sqlglot.expressions.TimestampAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.TimestampDiff'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.TimestampTrunc'>: <function timestamptrunc_sql.<locals>._timestamptrunc_sql>, <class 'sqlglot.expressions.TimeStrToTime'>: <function timestrtotime_sql>, <class 'sqlglot.expressions.TimeToStr'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.TimeToUnix'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.ToArray'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ToChar'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.ToDouble'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.TsOrDsAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.TsOrDsDiff'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.TsOrDsToDate'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.UnixToTime'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.WeekOfYear'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Xor'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Levenshtein'>: <function rename_func.<locals>.<lambda>>}
TYPE_MAPPING = {<Type.NCHAR: 'NCHAR'>: 'CHAR', <Type.NVARCHAR: 'NVARCHAR'>: 'VARCHAR', <Type.MEDIUMTEXT: 'MEDIUMTEXT'>: 'TEXT', <Type.LONGTEXT: 'LONGTEXT'>: 'TEXT', <Type.TINYTEXT: 'TINYTEXT'>: 'TEXT', <Type.MEDIUMBLOB: 'MEDIUMBLOB'>: 'BLOB', <Type.LONGBLOB: 'LONGBLOB'>: 'BLOB', <Type.TINYBLOB: 'TINYBLOB'>: 'BLOB', <Type.INET: 'INET'>: 'INET', <Type.ROWVERSION: 'ROWVERSION'>: 'VARBINARY', <Type.NESTED: 'NESTED'>: 'OBJECT', <Type.STRUCT: 'STRUCT'>: 'OBJECT'}
PROPERTIES_LOCATION = {<class 'sqlglot.expressions.AllowedValuesProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.AlgorithmProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.AutoIncrementProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.AutoRefreshProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.BackupProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.BlockCompressionProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CharacterSetProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ChecksumProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CollateProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Cluster'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ClusteredByProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistributedByProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DuplicateKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DataBlocksizeProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.DataDeletionProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DefinerProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.DictRange'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DictProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DynamicProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.DistKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistStyleProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.EmptyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.EngineProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExternalProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.FallbackProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.FileFormatProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.FreespaceProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.GlobalProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.HeapProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.InheritsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.IcebergProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.InputModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.IsolatedLoadingProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.JournalProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.LanguageProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LikeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LocationProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LockProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LockingProperty'>: <Location.POST_ALIAS: 'POST_ALIAS'>, <class 'sqlglot.expressions.LogProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.MaterializedProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.MergeBlockRatioProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.OnProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.OnCommitProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.Order'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.OutputModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PartitionedByProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.PartitionedOfProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PrimaryKey'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Property'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.RemoteWithConnectionModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ReturnsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatDelimitedProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatSerdeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SampleProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SchemaCommentProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SecureProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.SecurityProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SerdeProperties'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Set'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SettingsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SetProperty'>: <Location.UNSUPPORTED: 'UNSUPPORTED'>, <class 'sqlglot.expressions.SetConfigProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SharingProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.SequenceProperties'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.SortKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SqlReadWriteProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.StabilityProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.StreamingTableProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.StrictProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.TemporaryProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.ToTableProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.TransientProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.TransformModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.MergeTreeTTL'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.UnloggedProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.ViewAttributeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.VolatileProperty'>: <Location.UNSUPPORTED: 'UNSUPPORTED'>, <class 'sqlglot.expressions.WithDataProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.WithProcedureOptions'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.WithSchemaBindingProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.WithSystemVersioningProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>}
UNSUPPORTED_VALUES_EXPRESSIONS = {<class 'sqlglot.expressions.StarMap'>, <class 'sqlglot.expressions.Map'>, <class 'sqlglot.expressions.VarMap'>, <class 'sqlglot.expressions.Struct'>}
def with_properties(self, properties: sqlglot.expressions.Properties) -> str:
959        def with_properties(self, properties: exp.Properties) -> str:
960            return self.properties(properties, wrapped=False, prefix=self.sep(""), sep=" ")
def values_sql( self, expression: sqlglot.expressions.Values, values_as_table: bool = True) -> str:
962        def values_sql(self, expression: exp.Values, values_as_table: bool = True) -> str:
963            if expression.find(*self.UNSUPPORTED_VALUES_EXPRESSIONS):
964                values_as_table = False
965
966            return super().values_sql(expression, values_as_table=values_as_table)
def datatype_sql(self, expression: sqlglot.expressions.DataType) -> str:
968        def datatype_sql(self, expression: exp.DataType) -> str:
969            expressions = expression.expressions
970            if (
971                expressions
972                and expression.is_type(*exp.DataType.STRUCT_TYPES)
973                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
974            ):
975                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
976                return "OBJECT"
977
978            return super().datatype_sql(expression)
def tonumber_sql(self, expression: sqlglot.expressions.ToNumber) -> str:
980        def tonumber_sql(self, expression: exp.ToNumber) -> str:
981            return self.func(
982                "TO_NUMBER",
983                expression.this,
984                expression.args.get("format"),
985                expression.args.get("precision"),
986                expression.args.get("scale"),
987            )
def timestampfromparts_sql(self, expression: sqlglot.expressions.TimestampFromParts) -> str:
989        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
990            milli = expression.args.get("milli")
991            if milli is not None:
992                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
993                expression.set("nano", milli_to_nano)
994
995            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
def cast_sql( self, expression: sqlglot.expressions.Cast, safe_prefix: Optional[str] = None) -> str:
 997        def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
 998            if expression.is_type(exp.DataType.Type.GEOGRAPHY):
 999                return self.func("TO_GEOGRAPHY", expression.this)
1000            if expression.is_type(exp.DataType.Type.GEOMETRY):
1001                return self.func("TO_GEOMETRY", expression.this)
1002
1003            return super().cast_sql(expression, safe_prefix=safe_prefix)
def trycast_sql(self, expression: sqlglot.expressions.TryCast) -> str:
1005        def trycast_sql(self, expression: exp.TryCast) -> str:
1006            value = expression.this
1007
1008            if value.type is None:
1009                from sqlglot.optimizer.annotate_types import annotate_types
1010
1011                value = annotate_types(value)
1012
1013            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
1014                return super().trycast_sql(expression)
1015
1016            # TRY_CAST only works for string values in Snowflake
1017            return self.cast_sql(expression)
def log_sql(self, expression: sqlglot.expressions.Log) -> str:
1019        def log_sql(self, expression: exp.Log) -> str:
1020            if not expression.expression:
1021                return self.func("LN", expression.this)
1022
1023            return super().log_sql(expression)
def unnest_sql(self, expression: sqlglot.expressions.Unnest) -> str:
1025        def unnest_sql(self, expression: exp.Unnest) -> str:
1026            unnest_alias = expression.args.get("alias")
1027            offset = expression.args.get("offset")
1028
1029            columns = [
1030                exp.to_identifier("seq"),
1031                exp.to_identifier("key"),
1032                exp.to_identifier("path"),
1033                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
1034                seq_get(unnest_alias.columns if unnest_alias else [], 0)
1035                or exp.to_identifier("value"),
1036                exp.to_identifier("this"),
1037            ]
1038
1039            if unnest_alias:
1040                unnest_alias.set("columns", columns)
1041            else:
1042                unnest_alias = exp.TableAlias(this="_u", columns=columns)
1043
1044            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
1045            alias = self.sql(unnest_alias)
1046            alias = f" AS {alias}" if alias else ""
1047            return f"{explode}{alias}"
def show_sql(self, expression: sqlglot.expressions.Show) -> str:
1049        def show_sql(self, expression: exp.Show) -> str:
1050            terse = "TERSE " if expression.args.get("terse") else ""
1051            history = " HISTORY" if expression.args.get("history") else ""
1052            like = self.sql(expression, "like")
1053            like = f" LIKE {like}" if like else ""
1054
1055            scope = self.sql(expression, "scope")
1056            scope = f" {scope}" if scope else ""
1057
1058            scope_kind = self.sql(expression, "scope_kind")
1059            if scope_kind:
1060                scope_kind = f" IN {scope_kind}"
1061
1062            starts_with = self.sql(expression, "starts_with")
1063            if starts_with:
1064                starts_with = f" STARTS WITH {starts_with}"
1065
1066            limit = self.sql(expression, "limit")
1067
1068            from_ = self.sql(expression, "from")
1069            if from_:
1070                from_ = f" FROM {from_}"
1071
1072            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}"
def describe_sql(self, expression: sqlglot.expressions.Describe) -> str:
1074        def describe_sql(self, expression: exp.Describe) -> str:
1075            # Default to table if kind is unknown
1076            kind_value = expression.args.get("kind") or "TABLE"
1077            kind = f" {kind_value}" if kind_value else ""
1078            this = f" {self.sql(expression, 'this')}"
1079            expressions = self.expressions(expression, flat=True)
1080            expressions = f" {expressions}" if expressions else ""
1081            return f"DESCRIBE{kind}{this}{expressions}"
def generatedasidentitycolumnconstraint_sql( self, expression: sqlglot.expressions.GeneratedAsIdentityColumnConstraint) -> str:
1083        def generatedasidentitycolumnconstraint_sql(
1084            self, expression: exp.GeneratedAsIdentityColumnConstraint
1085        ) -> str:
1086            start = expression.args.get("start")
1087            start = f" START {start}" if start else ""
1088            increment = expression.args.get("increment")
1089            increment = f" INCREMENT {increment}" if increment else ""
1090            return f"AUTOINCREMENT{start}{increment}"
def cluster_sql(self, expression: sqlglot.expressions.Cluster) -> str:
1092        def cluster_sql(self, expression: exp.Cluster) -> str:
1093            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
def struct_sql(self, expression: sqlglot.expressions.Struct) -> str:
1095        def struct_sql(self, expression: exp.Struct) -> str:
1096            keys = []
1097            values = []
1098
1099            for i, e in enumerate(expression.expressions):
1100                if isinstance(e, exp.PropertyEQ):
1101                    keys.append(
1102                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
1103                    )
1104                    values.append(e.expression)
1105                else:
1106                    keys.append(exp.Literal.string(f"_{i}"))
1107                    values.append(e)
1108
1109            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))
@unsupported_args('weight', 'accuracy')
def approxquantile_sql(self, expression: sqlglot.expressions.ApproxQuantile) -> str:
1111        @unsupported_args("weight", "accuracy")
1112        def approxquantile_sql(self, expression: exp.ApproxQuantile) -> str:
1113            return self.func("APPROX_PERCENTILE", expression.this, expression.args.get("quantile"))
def alterset_sql(self, expression: sqlglot.expressions.AlterSet) -> str:
1115        def alterset_sql(self, expression: exp.AlterSet) -> str:
1116            exprs = self.expressions(expression, flat=True)
1117            exprs = f" {exprs}" if exprs else ""
1118            file_format = self.expressions(expression, key="file_format", flat=True, sep=" ")
1119            file_format = f" STAGE_FILE_FORMAT = ({file_format})" if file_format else ""
1120            copy_options = self.expressions(expression, key="copy_options", flat=True, sep=" ")
1121            copy_options = f" STAGE_COPY_OPTIONS = ({copy_options})" if copy_options else ""
1122            tag = self.expressions(expression, key="tag", flat=True)
1123            tag = f" TAG {tag}" if tag else ""
1124
1125            return f"SET{exprs}{file_format}{copy_options}{tag}"
def strtotime_sql(self, expression: sqlglot.expressions.StrToTime):
1127        def strtotime_sql(self, expression: exp.StrToTime):
1128            safe_prefix = "TRY_" if expression.args.get("safe") else ""
1129            return self.func(
1130                f"{safe_prefix}TO_TIMESTAMP", expression.this, self.format_time(expression)
1131            )
def timestampsub_sql(self, expression: sqlglot.expressions.TimestampSub):
1133        def timestampsub_sql(self, expression: exp.TimestampSub):
1134            return self.sql(
1135                exp.TimestampAdd(
1136                    this=expression.this,
1137                    expression=expression.expression * -1,
1138                    unit=expression.unit,
1139                )
1140            )
def jsonextract_sql(self, expression: sqlglot.expressions.JSONExtract):
1142        def jsonextract_sql(self, expression: exp.JSONExtract):
1143            this = expression.this
1144
1145            # JSON strings are valid coming from other dialects such as BQ
1146            return self.func(
1147                "GET_PATH",
1148                exp.ParseJSON(this=this) if this.is_string else this,
1149                expression.expression,
1150            )
SELECT_KINDS: Tuple[str, ...] = ()
TRY_SUPPORTED = False
SUPPORTS_UESCAPE = False
AFTER_HAVING_MODIFIER_TRANSFORMS = {'windows': <function Generator.<lambda>>, 'qualify': <function Generator.<lambda>>}
Inherited Members
sqlglot.generator.Generator
Generator
NULL_ORDERING_SUPPORTED
IGNORE_NULLS_IN_FUNC
LOCKING_READS_SUPPORTED
WRAP_DERIVED_VALUES
CREATE_FUNCTION_RETURN_AS
INTERVAL_ALLOWS_PLURAL_FORM
LIMIT_FETCH
RENAME_TABLE_WITH_DB
GROUPINGS_SEP
INDEX_ON
QUERY_HINT_SEP
IS_BOOL_ALLOWED
DUPLICATE_KEY_UPDATE_WITH_SET
LIMIT_IS_TOP
RETURNING_END
EXTRACT_ALLOWS_QUOTES
TZ_TO_WITH_TIME_ZONE
NVL2_SUPPORTED
VALUES_AS_TABLE
ALTER_TABLE_INCLUDE_COLUMN_KEYWORD
UNNEST_WITH_ORDINALITY
SEMI_ANTI_JOIN_WITH_SIDE
COMPUTED_COLUMN_WITH_TYPE
TABLESAMPLE_REQUIRES_PARENS
TABLESAMPLE_SIZE_IS_ROWS
TABLESAMPLE_KEYWORDS
TABLESAMPLE_WITH_METHOD
TABLESAMPLE_SEED_KEYWORD
DATA_TYPE_SPECIFIERS_ALLOWED
ENSURE_BOOLS
CTE_RECURSIVE_KEYWORD_REQUIRED
SUPPORTS_SINGLE_ARG_CONCAT
LAST_DAY_SUPPORTS_DATE_PART
SUPPORTS_TABLE_ALIAS_COLUMNS
UNPIVOT_ALIASES_ARE_IDENTIFIERS
SUPPORTS_SELECT_INTO
SUPPORTS_UNLOGGED_TABLES
SUPPORTS_CREATE_TABLE_LIKE
LIKE_PROPERTY_INSIDE_SCHEMA
MULTI_ARG_DISTINCT
JSON_TYPE_REQUIRED_FOR_EXTRACTION
JSON_PATH_BRACKETED_KEY_SUPPORTED
JSON_PATH_SINGLE_QUOTE_ESCAPE
CAN_IMPLEMENT_ARRAY_ANY
SUPPORTS_TO_NUMBER
SET_OP_MODIFIERS
COPY_HAS_INTO_KEYWORD
HEX_FUNC
WITH_PROPERTIES_PREFIX
QUOTE_JSON_PATH
PAD_FILL_PATTERN_IS_REQUIRED
SUPPORTS_UNIX_SECONDS
PARSE_JSON_NAME
ARRAY_SIZE_DIM_REQUIRED
TIME_PART_SINGULARS
TOKEN_MAPPING
NAMED_PLACEHOLDER_TOKEN
RESERVED_KEYWORDS
WITH_SEPARATED_COMMENTS
EXCLUDE_COMMENTS
UNWRAPPED_INTERVAL_VALUES
PARAMETERIZABLE_TEXT_TYPES
EXPRESSIONS_WITHOUT_NESTED_CTES
SENTINEL_LINE_BREAK
pretty
identify
normalize
pad
unsupported_level
max_unsupported
leading_comma
max_text_width
comments
dialect
normalize_functions
unsupported_messages
generate
preprocess
unsupported
sep
seg
pad_comment
maybe_comment
wrap
no_identify
normalize_func
indent
sql
uncache_sql
cache_sql
characterset_sql
column_parts
column_sql
columnposition_sql
columndef_sql
columnconstraint_sql
computedcolumnconstraint_sql
autoincrementcolumnconstraint_sql
compresscolumnconstraint_sql
generatedasrowcolumnconstraint_sql
periodforsystemtimeconstraint_sql
notnullcolumnconstraint_sql
transformcolumnconstraint_sql
primarykeycolumnconstraint_sql
uniquecolumnconstraint_sql
createable_sql
create_sql
sequenceproperties_sql
clone_sql
heredoc_sql
prepend_ctes
with_sql
cte_sql
tablealias_sql
bitstring_sql
hexstring_sql
bytestring_sql
unicodestring_sql
rawstring_sql
datatypeparam_sql
directory_sql
delete_sql
drop_sql
set_operation
set_operations
fetch_sql
filter_sql
hint_sql
indexparameters_sql
index_sql
identifier_sql
hex_sql
lowerhex_sql
inputoutputformat_sql
national_sql
partition_sql
properties_sql
root_properties
properties
locate_properties
property_name
property_sql
likeproperty_sql
fallbackproperty_sql
journalproperty_sql
freespaceproperty_sql
checksumproperty_sql
mergeblockratioproperty_sql
datablocksizeproperty_sql
blockcompressionproperty_sql
isolatedloadingproperty_sql
partitionboundspec_sql
partitionedofproperty_sql
lockingproperty_sql
withdataproperty_sql
withsystemversioningproperty_sql
insert_sql
introducer_sql
kill_sql
pseudotype_sql
objectidentifier_sql
onconflict_sql
returning_sql
rowformatdelimitedproperty_sql
withtablehint_sql
indextablehint_sql
historicaldata_sql
table_parts
table_sql
tablesample_sql
pivot_sql
version_sql
tuple_sql
update_sql
var_sql
into_sql
from_sql
groupingsets_sql
rollup_sql
cube_sql
group_sql
having_sql
connect_sql
prior_sql
join_sql
lambda_sql
lateral_op
lateral_sql
limit_sql
offset_sql
setitem_sql
set_sql
pragma_sql
lock_sql
literal_sql
escape_str
loaddata_sql
null_sql
boolean_sql
order_sql
withfill_sql
distribute_sql
sort_sql
ordered_sql
matchrecognizemeasure_sql
matchrecognize_sql
query_modifiers
options_modifier
queryoption_sql
offset_limit_modifiers
after_limit_modifiers
select_sql
schema_sql
schema_columns_sql
star_sql
parameter_sql
sessionparameter_sql
placeholder_sql
subquery_sql
qualify_sql
prewhere_sql
where_sql
window_sql
partition_by_sql
windowspec_sql
withingroup_sql
between_sql
bracket_offset_expressions
bracket_sql
all_sql
any_sql
exists_sql
case_sql
constraint_sql
nextvaluefor_sql
extract_sql
trim_sql
convert_concat_args
concat_sql
concatws_sql
check_sql
foreignkey_sql
primarykey_sql
if_sql
matchagainst_sql
jsonkeyvalue_sql
jsonpath_sql
json_path_part
formatjson_sql
jsonobject_sql
jsonobjectagg_sql
jsonarray_sql
jsonarrayagg_sql
jsoncolumndef_sql
jsonschema_sql
jsontable_sql
openjsoncolumndef_sql
openjson_sql
in_sql
in_unnest_op
interval_sql
return_sql
reference_sql
anonymous_sql
paren_sql
neg_sql
not_sql
alias_sql
pivotalias_sql
aliases_sql
atindex_sql
attimezone_sql
fromtimezone_sql
add_sql
and_sql
or_sql
xor_sql
connector_sql
bitwiseand_sql
bitwiseleftshift_sql
bitwisenot_sql
bitwiseor_sql
bitwiserightshift_sql
bitwisexor_sql
currentdate_sql
collate_sql
command_sql
comment_sql
mergetreettlaction_sql
mergetreettl_sql
transaction_sql
commit_sql
rollback_sql
altercolumn_sql
alterdiststyle_sql
altersortkey_sql
alterrename_sql
renamecolumn_sql
alter_sql
add_column_sql
droppartition_sql
addconstraint_sql
distinct_sql
ignorenulls_sql
respectnulls_sql
havingmax_sql
intdiv_sql
dpipe_sql
div_sql
overlaps_sql
distance_sql
dot_sql
eq_sql
propertyeq_sql
escape_sql
glob_sql
gt_sql
gte_sql
ilike_sql
ilikeany_sql
is_sql
like_sql
likeany_sql
similarto_sql
lt_sql
lte_sql
mod_sql
mul_sql
neq_sql
nullsafeeq_sql
nullsafeneq_sql
slice_sql
sub_sql
try_sql
use_sql
binary
function_fallback_sql
func
format_args
too_wide
format_time
expressions
op_expressions
naked_property
tag_sql
token_sql
userdefinedfunction_sql
joinhint_sql
kwarg_sql
when_sql
merge_sql
tochar_sql
dictproperty_sql
dictrange_sql
dictsubproperty_sql
duplicatekeyproperty_sql
distributedbyproperty_sql
oncluster_sql
clusteredbyproperty_sql
anyvalue_sql
querytransform_sql
indexconstraintoption_sql
checkcolumnconstraint_sql
indexcolumnconstraint_sql
nvl2_sql
comprehension_sql
columnprefix_sql
opclass_sql
predict_sql
forin_sql
refresh_sql
toarray_sql
tsordstotime_sql
tsordstotimestamp_sql
tsordstodate_sql
unixdate_sql
lastday_sql
dateadd_sql
arrayany_sql
partitionrange_sql
truncatetable_sql
convert_sql
copyparameter_sql
credentials_sql
copy_sql
semicolon_sql
datadeletionproperty_sql
maskingpolicycolumnconstraint_sql
gapfill_sql
scope_resolution
scoperesolution_sql
parsejson_sql
rand_sql
changes_sql
pad_sql
summarize_sql
explodinggenerateseries_sql
arrayconcat_sql
converttimezone_sql
json_sql
jsonvalue_sql
conditionalinsert_sql
multitableinserts_sql
oncondition_sql
jsonexists_sql
arrayagg_sql
apply_sql
grant_sql
grantprivilege_sql
grantprincipal_sql
columns_sql
overlay_sql
todouble_sql
string_sql
median_sql
overflowtruncatebehavior_sql
unixseconds_sql
arraysize_sql