sqlglot.tokens
1from __future__ import annotations 2 3import typing as t 4from enum import auto 5 6from sqlglot.helper import AutoName 7from sqlglot.trie import in_trie, new_trie 8 9 10class TokenType(AutoName): 11 L_PAREN = auto() 12 R_PAREN = auto() 13 L_BRACKET = auto() 14 R_BRACKET = auto() 15 L_BRACE = auto() 16 R_BRACE = auto() 17 COMMA = auto() 18 DOT = auto() 19 DASH = auto() 20 PLUS = auto() 21 COLON = auto() 22 DCOLON = auto() 23 SEMICOLON = auto() 24 STAR = auto() 25 BACKSLASH = auto() 26 SLASH = auto() 27 LT = auto() 28 LTE = auto() 29 GT = auto() 30 GTE = auto() 31 NOT = auto() 32 EQ = auto() 33 NEQ = auto() 34 NULLSAFE_EQ = auto() 35 AND = auto() 36 OR = auto() 37 AMP = auto() 38 DPIPE = auto() 39 PIPE = auto() 40 CARET = auto() 41 TILDA = auto() 42 ARROW = auto() 43 DARROW = auto() 44 FARROW = auto() 45 HASH = auto() 46 HASH_ARROW = auto() 47 DHASH_ARROW = auto() 48 LR_ARROW = auto() 49 LT_AT = auto() 50 AT_GT = auto() 51 DOLLAR = auto() 52 PARAMETER = auto() 53 SESSION_PARAMETER = auto() 54 NATIONAL = auto() 55 DAMP = auto() 56 57 BLOCK_START = auto() 58 BLOCK_END = auto() 59 60 SPACE = auto() 61 BREAK = auto() 62 63 STRING = auto() 64 NUMBER = auto() 65 IDENTIFIER = auto() 66 DATABASE = auto() 67 COLUMN = auto() 68 COLUMN_DEF = auto() 69 SCHEMA = auto() 70 TABLE = auto() 71 VAR = auto() 72 BIT_STRING = auto() 73 HEX_STRING = auto() 74 BYTE_STRING = auto() 75 76 # types 77 BIT = auto() 78 BOOLEAN = auto() 79 TINYINT = auto() 80 UTINYINT = auto() 81 SMALLINT = auto() 82 USMALLINT = auto() 83 INT = auto() 84 UINT = auto() 85 BIGINT = auto() 86 UBIGINT = auto() 87 INT128 = auto() 88 UINT128 = auto() 89 INT256 = auto() 90 UINT256 = auto() 91 FLOAT = auto() 92 DOUBLE = auto() 93 DECIMAL = auto() 94 BIGDECIMAL = auto() 95 CHAR = auto() 96 NCHAR = auto() 97 VARCHAR = auto() 98 NVARCHAR = auto() 99 TEXT = auto() 100 MEDIUMTEXT = auto() 101 LONGTEXT = auto() 102 MEDIUMBLOB = auto() 103 LONGBLOB = auto() 104 BINARY = auto() 105 VARBINARY = auto() 106 JSON = auto() 107 JSONB = auto() 108 TIME = auto() 109 TIMESTAMP = auto() 110 TIMESTAMPTZ = auto() 111 TIMESTAMPLTZ = auto() 112 DATETIME = auto() 113 DATETIME64 = auto() 114 DATE = auto() 115 UUID = auto() 116 GEOGRAPHY = auto() 117 NULLABLE = auto() 118 GEOMETRY = auto() 119 HLLSKETCH = auto() 120 HSTORE = auto() 121 SUPER = auto() 122 SERIAL = auto() 123 SMALLSERIAL = auto() 124 BIGSERIAL = auto() 125 XML = auto() 126 UNIQUEIDENTIFIER = auto() 127 MONEY = auto() 128 SMALLMONEY = auto() 129 ROWVERSION = auto() 130 IMAGE = auto() 131 VARIANT = auto() 132 OBJECT = auto() 133 INET = auto() 134 135 # keywords 136 ALIAS = auto() 137 ALTER = auto() 138 ALWAYS = auto() 139 ALL = auto() 140 ANTI = auto() 141 ANY = auto() 142 APPLY = auto() 143 ARRAY = auto() 144 ASC = auto() 145 ASOF = auto() 146 AT_TIME_ZONE = auto() 147 AUTO_INCREMENT = auto() 148 BEGIN = auto() 149 BETWEEN = auto() 150 BOTH = auto() 151 BUCKET = auto() 152 BY_DEFAULT = auto() 153 CACHE = auto() 154 CASCADE = auto() 155 CASE = auto() 156 CHARACTER_SET = auto() 157 CLUSTER_BY = auto() 158 COLLATE = auto() 159 COMMAND = auto() 160 COMMENT = auto() 161 COMMIT = auto() 162 COMPOUND = auto() 163 CONSTRAINT = auto() 164 CREATE = auto() 165 CROSS = auto() 166 CUBE = auto() 167 CURRENT_DATE = auto() 168 CURRENT_DATETIME = auto() 169 CURRENT_ROW = auto() 170 CURRENT_TIME = auto() 171 CURRENT_TIMESTAMP = auto() 172 CURRENT_USER = auto() 173 DEFAULT = auto() 174 DELETE = auto() 175 DESC = auto() 176 DESCRIBE = auto() 177 DISTINCT = auto() 178 DISTINCT_FROM = auto() 179 DISTRIBUTE_BY = auto() 180 DIV = auto() 181 DROP = auto() 182 ELSE = auto() 183 END = auto() 184 ESCAPE = auto() 185 EXCEPT = auto() 186 EXECUTE = auto() 187 EXISTS = auto() 188 FALSE = auto() 189 FETCH = auto() 190 FILTER = auto() 191 FINAL = auto() 192 FIRST = auto() 193 FOLLOWING = auto() 194 FOR = auto() 195 FOREIGN_KEY = auto() 196 FORMAT = auto() 197 FROM = auto() 198 FULL = auto() 199 FUNCTION = auto() 200 GLOB = auto() 201 GLOBAL = auto() 202 GROUP_BY = auto() 203 GROUPING_SETS = auto() 204 HAVING = auto() 205 HINT = auto() 206 IF = auto() 207 IGNORE_NULLS = auto() 208 ILIKE = auto() 209 ILIKE_ANY = auto() 210 IN = auto() 211 INDEX = auto() 212 INNER = auto() 213 INSERT = auto() 214 INTERSECT = auto() 215 INTERVAL = auto() 216 INTO = auto() 217 INTRODUCER = auto() 218 IRLIKE = auto() 219 IS = auto() 220 ISNULL = auto() 221 JOIN = auto() 222 JOIN_MARKER = auto() 223 KEEP = auto() 224 LANGUAGE = auto() 225 LATERAL = auto() 226 LAZY = auto() 227 LEADING = auto() 228 LEFT = auto() 229 LIKE = auto() 230 LIKE_ANY = auto() 231 LIMIT = auto() 232 LOAD_DATA = auto() 233 LOCAL = auto() 234 MAP = auto() 235 MATCH_RECOGNIZE = auto() 236 MATERIALIZED = auto() 237 MERGE = auto() 238 MOD = auto() 239 NATURAL = auto() 240 NEXT = auto() 241 NEXT_VALUE_FOR = auto() 242 NO_ACTION = auto() 243 NOTNULL = auto() 244 NULL = auto() 245 NULLS_FIRST = auto() 246 NULLS_LAST = auto() 247 OFFSET = auto() 248 ON = auto() 249 ONLY = auto() 250 OPTIONS = auto() 251 ORDER_BY = auto() 252 ORDERED = auto() 253 ORDINALITY = auto() 254 OUTER = auto() 255 OUT_OF = auto() 256 OVER = auto() 257 OVERLAPS = auto() 258 OVERWRITE = auto() 259 PARTITION = auto() 260 PARTITION_BY = auto() 261 PERCENT = auto() 262 PIVOT = auto() 263 PLACEHOLDER = auto() 264 PRAGMA = auto() 265 PRECEDING = auto() 266 PRIMARY_KEY = auto() 267 PROCEDURE = auto() 268 PROPERTIES = auto() 269 PSEUDO_TYPE = auto() 270 QUALIFY = auto() 271 QUOTE = auto() 272 RANGE = auto() 273 RECURSIVE = auto() 274 REPLACE = auto() 275 RESPECT_NULLS = auto() 276 RETURNING = auto() 277 REFERENCES = auto() 278 RIGHT = auto() 279 RLIKE = auto() 280 ROLLBACK = auto() 281 ROLLUP = auto() 282 ROW = auto() 283 ROWS = auto() 284 SEED = auto() 285 SELECT = auto() 286 SEMI = auto() 287 SEPARATOR = auto() 288 SERDE_PROPERTIES = auto() 289 SET = auto() 290 SETTINGS = auto() 291 SHOW = auto() 292 SIMILAR_TO = auto() 293 SOME = auto() 294 SORTKEY = auto() 295 SORT_BY = auto() 296 STRUCT = auto() 297 TABLE_SAMPLE = auto() 298 TEMPORARY = auto() 299 TOP = auto() 300 THEN = auto() 301 TRAILING = auto() 302 TRUE = auto() 303 UNBOUNDED = auto() 304 UNCACHE = auto() 305 UNION = auto() 306 UNLOGGED = auto() 307 UNNEST = auto() 308 UNPIVOT = auto() 309 UPDATE = auto() 310 USE = auto() 311 USING = auto() 312 VALUES = auto() 313 VIEW = auto() 314 VOLATILE = auto() 315 WHEN = auto() 316 WHERE = auto() 317 WINDOW = auto() 318 WITH = auto() 319 WITH_TIME_ZONE = auto() 320 WITH_LOCAL_TIME_ZONE = auto() 321 WITHIN_GROUP = auto() 322 WITHOUT_TIME_ZONE = auto() 323 UNIQUE = auto() 324 325 326class Token: 327 __slots__ = ("token_type", "text", "line", "col", "end", "comments") 328 329 @classmethod 330 def number(cls, number: int) -> Token: 331 """Returns a NUMBER token with `number` as its text.""" 332 return cls(TokenType.NUMBER, str(number)) 333 334 @classmethod 335 def string(cls, string: str) -> Token: 336 """Returns a STRING token with `string` as its text.""" 337 return cls(TokenType.STRING, string) 338 339 @classmethod 340 def identifier(cls, identifier: str) -> Token: 341 """Returns an IDENTIFIER token with `identifier` as its text.""" 342 return cls(TokenType.IDENTIFIER, identifier) 343 344 @classmethod 345 def var(cls, var: str) -> Token: 346 """Returns an VAR token with `var` as its text.""" 347 return cls(TokenType.VAR, var) 348 349 def __init__( 350 self, 351 token_type: TokenType, 352 text: str, 353 line: int = 1, 354 col: int = 1, 355 end: int = 0, 356 comments: t.List[str] = [], 357 ) -> None: 358 self.token_type = token_type 359 self.text = text 360 self.line = line 361 size = len(text) 362 self.col = col 363 self.end = end if end else size 364 self.comments = comments 365 366 @property 367 def start(self) -> int: 368 """Returns the start of the token.""" 369 return self.end - len(self.text) 370 371 def __repr__(self) -> str: 372 attributes = ", ".join(f"{k}: {getattr(self, k)}" for k in self.__slots__) 373 return f"<Token {attributes}>" 374 375 376class _Tokenizer(type): 377 def __new__(cls, clsname, bases, attrs): 378 klass = super().__new__(cls, clsname, bases, attrs) 379 380 klass._QUOTES = { 381 f"{prefix}{s}": e 382 for s, e in cls._delimeter_list_to_dict(klass.QUOTES).items() 383 for prefix in (("",) if s[0].isalpha() else ("", "n", "N")) 384 } 385 klass._BIT_STRINGS = cls._delimeter_list_to_dict(klass.BIT_STRINGS) 386 klass._HEX_STRINGS = cls._delimeter_list_to_dict(klass.HEX_STRINGS) 387 klass._BYTE_STRINGS = cls._delimeter_list_to_dict(klass.BYTE_STRINGS) 388 klass._IDENTIFIERS = cls._delimeter_list_to_dict(klass.IDENTIFIERS) 389 klass._STRING_ESCAPES = set(klass.STRING_ESCAPES) 390 klass._IDENTIFIER_ESCAPES = set(klass.IDENTIFIER_ESCAPES) 391 klass._COMMENTS = dict( 392 (comment, None) if isinstance(comment, str) else (comment[0], comment[1]) 393 for comment in klass.COMMENTS 394 ) 395 396 klass.KEYWORD_TRIE = new_trie( 397 key.upper() 398 for key in { 399 **klass.KEYWORDS, 400 **{comment: TokenType.COMMENT for comment in klass._COMMENTS}, 401 **{quote: TokenType.QUOTE for quote in klass._QUOTES}, 402 **{bit_string: TokenType.BIT_STRING for bit_string in klass._BIT_STRINGS}, 403 **{hex_string: TokenType.HEX_STRING for hex_string in klass._HEX_STRINGS}, 404 **{byte_string: TokenType.BYTE_STRING for byte_string in klass._BYTE_STRINGS}, 405 } 406 if " " in key or any(single in key for single in klass.SINGLE_TOKENS) 407 ) 408 409 return klass 410 411 @staticmethod 412 def _delimeter_list_to_dict(list: t.List[str | t.Tuple[str, str]]) -> t.Dict[str, str]: 413 return dict((item, item) if isinstance(item, str) else (item[0], item[1]) for item in list) 414 415 416class Tokenizer(metaclass=_Tokenizer): 417 SINGLE_TOKENS = { 418 "(": TokenType.L_PAREN, 419 ")": TokenType.R_PAREN, 420 "[": TokenType.L_BRACKET, 421 "]": TokenType.R_BRACKET, 422 "{": TokenType.L_BRACE, 423 "}": TokenType.R_BRACE, 424 "&": TokenType.AMP, 425 "^": TokenType.CARET, 426 ":": TokenType.COLON, 427 ",": TokenType.COMMA, 428 ".": TokenType.DOT, 429 "-": TokenType.DASH, 430 "=": TokenType.EQ, 431 ">": TokenType.GT, 432 "<": TokenType.LT, 433 "%": TokenType.MOD, 434 "!": TokenType.NOT, 435 "|": TokenType.PIPE, 436 "+": TokenType.PLUS, 437 ";": TokenType.SEMICOLON, 438 "/": TokenType.SLASH, 439 "\\": TokenType.BACKSLASH, 440 "*": TokenType.STAR, 441 "~": TokenType.TILDA, 442 "?": TokenType.PLACEHOLDER, 443 "@": TokenType.PARAMETER, 444 # used for breaking a var like x'y' but nothing else 445 # the token type doesn't matter 446 "'": TokenType.QUOTE, 447 "`": TokenType.IDENTIFIER, 448 '"': TokenType.IDENTIFIER, 449 "#": TokenType.HASH, 450 } 451 452 BIT_STRINGS: t.List[str | t.Tuple[str, str]] = [] 453 BYTE_STRINGS: t.List[str | t.Tuple[str, str]] = [] 454 HEX_STRINGS: t.List[str | t.Tuple[str, str]] = [] 455 IDENTIFIERS: t.List[str | t.Tuple[str, str]] = ['"'] 456 IDENTIFIER_ESCAPES = ['"'] 457 QUOTES: t.List[t.Tuple[str, str] | str] = ["'"] 458 STRING_ESCAPES = ["'"] 459 VAR_SINGLE_TOKENS: t.Set[str] = set() 460 461 _COMMENTS: t.Dict[str, str] = {} 462 _BIT_STRINGS: t.Dict[str, str] = {} 463 _BYTE_STRINGS: t.Dict[str, str] = {} 464 _HEX_STRINGS: t.Dict[str, str] = {} 465 _IDENTIFIERS: t.Dict[str, str] = {} 466 _IDENTIFIER_ESCAPES: t.Set[str] = set() 467 _QUOTES: t.Dict[str, str] = {} 468 _STRING_ESCAPES: t.Set[str] = set() 469 470 KEYWORDS: t.Dict[t.Optional[str], TokenType] = { 471 **{f"{{%{postfix}": TokenType.BLOCK_START for postfix in ("", "+", "-")}, 472 **{f"{prefix}%}}": TokenType.BLOCK_END for prefix in ("", "+", "-")}, 473 "{{+": TokenType.BLOCK_START, 474 "{{-": TokenType.BLOCK_START, 475 "+}}": TokenType.BLOCK_END, 476 "-}}": TokenType.BLOCK_END, 477 "/*+": TokenType.HINT, 478 "==": TokenType.EQ, 479 "::": TokenType.DCOLON, 480 "||": TokenType.DPIPE, 481 ">=": TokenType.GTE, 482 "<=": TokenType.LTE, 483 "<>": TokenType.NEQ, 484 "!=": TokenType.NEQ, 485 "<=>": TokenType.NULLSAFE_EQ, 486 "->": TokenType.ARROW, 487 "->>": TokenType.DARROW, 488 "=>": TokenType.FARROW, 489 "#>": TokenType.HASH_ARROW, 490 "#>>": TokenType.DHASH_ARROW, 491 "<->": TokenType.LR_ARROW, 492 "&&": TokenType.DAMP, 493 "ALL": TokenType.ALL, 494 "ALWAYS": TokenType.ALWAYS, 495 "AND": TokenType.AND, 496 "ANTI": TokenType.ANTI, 497 "ANY": TokenType.ANY, 498 "ASC": TokenType.ASC, 499 "AS": TokenType.ALIAS, 500 "AT TIME ZONE": TokenType.AT_TIME_ZONE, 501 "AUTOINCREMENT": TokenType.AUTO_INCREMENT, 502 "AUTO_INCREMENT": TokenType.AUTO_INCREMENT, 503 "BEGIN": TokenType.BEGIN, 504 "BETWEEN": TokenType.BETWEEN, 505 "BOTH": TokenType.BOTH, 506 "BUCKET": TokenType.BUCKET, 507 "BY DEFAULT": TokenType.BY_DEFAULT, 508 "CACHE": TokenType.CACHE, 509 "UNCACHE": TokenType.UNCACHE, 510 "CASE": TokenType.CASE, 511 "CASCADE": TokenType.CASCADE, 512 "CHARACTER SET": TokenType.CHARACTER_SET, 513 "CLUSTER BY": TokenType.CLUSTER_BY, 514 "COLLATE": TokenType.COLLATE, 515 "COLUMN": TokenType.COLUMN, 516 "COMMIT": TokenType.COMMIT, 517 "COMPOUND": TokenType.COMPOUND, 518 "CONSTRAINT": TokenType.CONSTRAINT, 519 "CREATE": TokenType.CREATE, 520 "CROSS": TokenType.CROSS, 521 "CUBE": TokenType.CUBE, 522 "CURRENT_DATE": TokenType.CURRENT_DATE, 523 "CURRENT ROW": TokenType.CURRENT_ROW, 524 "CURRENT_TIME": TokenType.CURRENT_TIME, 525 "CURRENT_TIMESTAMP": TokenType.CURRENT_TIMESTAMP, 526 "CURRENT_USER": TokenType.CURRENT_USER, 527 "DATABASE": TokenType.DATABASE, 528 "DEFAULT": TokenType.DEFAULT, 529 "DELETE": TokenType.DELETE, 530 "DESC": TokenType.DESC, 531 "DESCRIBE": TokenType.DESCRIBE, 532 "DISTINCT": TokenType.DISTINCT, 533 "DISTINCT FROM": TokenType.DISTINCT_FROM, 534 "DISTRIBUTE BY": TokenType.DISTRIBUTE_BY, 535 "DIV": TokenType.DIV, 536 "DROP": TokenType.DROP, 537 "ELSE": TokenType.ELSE, 538 "END": TokenType.END, 539 "ESCAPE": TokenType.ESCAPE, 540 "EXCEPT": TokenType.EXCEPT, 541 "EXECUTE": TokenType.EXECUTE, 542 "EXISTS": TokenType.EXISTS, 543 "FALSE": TokenType.FALSE, 544 "FETCH": TokenType.FETCH, 545 "FILTER": TokenType.FILTER, 546 "FIRST": TokenType.FIRST, 547 "FULL": TokenType.FULL, 548 "FUNCTION": TokenType.FUNCTION, 549 "FOLLOWING": TokenType.FOLLOWING, 550 "FOR": TokenType.FOR, 551 "FOREIGN KEY": TokenType.FOREIGN_KEY, 552 "FORMAT": TokenType.FORMAT, 553 "FROM": TokenType.FROM, 554 "GLOB": TokenType.GLOB, 555 "GROUP BY": TokenType.GROUP_BY, 556 "GROUPING SETS": TokenType.GROUPING_SETS, 557 "HAVING": TokenType.HAVING, 558 "IF": TokenType.IF, 559 "ILIKE": TokenType.ILIKE, 560 "IGNORE NULLS": TokenType.IGNORE_NULLS, 561 "IN": TokenType.IN, 562 "INDEX": TokenType.INDEX, 563 "INET": TokenType.INET, 564 "INNER": TokenType.INNER, 565 "INSERT": TokenType.INSERT, 566 "INTERVAL": TokenType.INTERVAL, 567 "INTERSECT": TokenType.INTERSECT, 568 "INTO": TokenType.INTO, 569 "IS": TokenType.IS, 570 "ISNULL": TokenType.ISNULL, 571 "JOIN": TokenType.JOIN, 572 "KEEP": TokenType.KEEP, 573 "LATERAL": TokenType.LATERAL, 574 "LAZY": TokenType.LAZY, 575 "LEADING": TokenType.LEADING, 576 "LEFT": TokenType.LEFT, 577 "LIKE": TokenType.LIKE, 578 "LIMIT": TokenType.LIMIT, 579 "LOAD DATA": TokenType.LOAD_DATA, 580 "LOCAL": TokenType.LOCAL, 581 "MATERIALIZED": TokenType.MATERIALIZED, 582 "MERGE": TokenType.MERGE, 583 "NATURAL": TokenType.NATURAL, 584 "NEXT": TokenType.NEXT, 585 "NEXT VALUE FOR": TokenType.NEXT_VALUE_FOR, 586 "NO ACTION": TokenType.NO_ACTION, 587 "NOT": TokenType.NOT, 588 "NOTNULL": TokenType.NOTNULL, 589 "NULL": TokenType.NULL, 590 "NULLS FIRST": TokenType.NULLS_FIRST, 591 "NULLS LAST": TokenType.NULLS_LAST, 592 "OBJECT": TokenType.OBJECT, 593 "OFFSET": TokenType.OFFSET, 594 "ON": TokenType.ON, 595 "ONLY": TokenType.ONLY, 596 "OPTIONS": TokenType.OPTIONS, 597 "OR": TokenType.OR, 598 "ORDER BY": TokenType.ORDER_BY, 599 "ORDINALITY": TokenType.ORDINALITY, 600 "OUTER": TokenType.OUTER, 601 "OUT OF": TokenType.OUT_OF, 602 "OVER": TokenType.OVER, 603 "OVERLAPS": TokenType.OVERLAPS, 604 "OVERWRITE": TokenType.OVERWRITE, 605 "PARTITION": TokenType.PARTITION, 606 "PARTITION BY": TokenType.PARTITION_BY, 607 "PARTITIONED BY": TokenType.PARTITION_BY, 608 "PARTITIONED_BY": TokenType.PARTITION_BY, 609 "PERCENT": TokenType.PERCENT, 610 "PIVOT": TokenType.PIVOT, 611 "PRAGMA": TokenType.PRAGMA, 612 "PRECEDING": TokenType.PRECEDING, 613 "PRIMARY KEY": TokenType.PRIMARY_KEY, 614 "PROCEDURE": TokenType.PROCEDURE, 615 "QUALIFY": TokenType.QUALIFY, 616 "RANGE": TokenType.RANGE, 617 "RECURSIVE": TokenType.RECURSIVE, 618 "REGEXP": TokenType.RLIKE, 619 "REPLACE": TokenType.REPLACE, 620 "RESPECT NULLS": TokenType.RESPECT_NULLS, 621 "REFERENCES": TokenType.REFERENCES, 622 "RIGHT": TokenType.RIGHT, 623 "RLIKE": TokenType.RLIKE, 624 "ROLLBACK": TokenType.ROLLBACK, 625 "ROLLUP": TokenType.ROLLUP, 626 "ROW": TokenType.ROW, 627 "ROWS": TokenType.ROWS, 628 "SCHEMA": TokenType.SCHEMA, 629 "SEED": TokenType.SEED, 630 "SELECT": TokenType.SELECT, 631 "SEMI": TokenType.SEMI, 632 "SET": TokenType.SET, 633 "SETTINGS": TokenType.SETTINGS, 634 "SHOW": TokenType.SHOW, 635 "SIMILAR TO": TokenType.SIMILAR_TO, 636 "SOME": TokenType.SOME, 637 "SORTKEY": TokenType.SORTKEY, 638 "SORT BY": TokenType.SORT_BY, 639 "TABLE": TokenType.TABLE, 640 "TABLESAMPLE": TokenType.TABLE_SAMPLE, 641 "TEMP": TokenType.TEMPORARY, 642 "TEMPORARY": TokenType.TEMPORARY, 643 "THEN": TokenType.THEN, 644 "TRUE": TokenType.TRUE, 645 "TRAILING": TokenType.TRAILING, 646 "UNBOUNDED": TokenType.UNBOUNDED, 647 "UNION": TokenType.UNION, 648 "UNLOGGED": TokenType.UNLOGGED, 649 "UNNEST": TokenType.UNNEST, 650 "UNPIVOT": TokenType.UNPIVOT, 651 "UPDATE": TokenType.UPDATE, 652 "USE": TokenType.USE, 653 "USING": TokenType.USING, 654 "UUID": TokenType.UUID, 655 "VALUES": TokenType.VALUES, 656 "VIEW": TokenType.VIEW, 657 "VOLATILE": TokenType.VOLATILE, 658 "WHEN": TokenType.WHEN, 659 "WHERE": TokenType.WHERE, 660 "WINDOW": TokenType.WINDOW, 661 "WITH": TokenType.WITH, 662 "WITH TIME ZONE": TokenType.WITH_TIME_ZONE, 663 "WITH LOCAL TIME ZONE": TokenType.WITH_LOCAL_TIME_ZONE, 664 "WITHIN GROUP": TokenType.WITHIN_GROUP, 665 "WITHOUT TIME ZONE": TokenType.WITHOUT_TIME_ZONE, 666 "APPLY": TokenType.APPLY, 667 "ARRAY": TokenType.ARRAY, 668 "BIT": TokenType.BIT, 669 "BOOL": TokenType.BOOLEAN, 670 "BOOLEAN": TokenType.BOOLEAN, 671 "BYTE": TokenType.TINYINT, 672 "TINYINT": TokenType.TINYINT, 673 "SHORT": TokenType.SMALLINT, 674 "SMALLINT": TokenType.SMALLINT, 675 "INT2": TokenType.SMALLINT, 676 "INTEGER": TokenType.INT, 677 "INT": TokenType.INT, 678 "INT4": TokenType.INT, 679 "LONG": TokenType.BIGINT, 680 "BIGINT": TokenType.BIGINT, 681 "INT8": TokenType.BIGINT, 682 "DEC": TokenType.DECIMAL, 683 "DECIMAL": TokenType.DECIMAL, 684 "BIGDECIMAL": TokenType.BIGDECIMAL, 685 "BIGNUMERIC": TokenType.BIGDECIMAL, 686 "MAP": TokenType.MAP, 687 "NULLABLE": TokenType.NULLABLE, 688 "NUMBER": TokenType.DECIMAL, 689 "NUMERIC": TokenType.DECIMAL, 690 "FIXED": TokenType.DECIMAL, 691 "REAL": TokenType.FLOAT, 692 "FLOAT": TokenType.FLOAT, 693 "FLOAT4": TokenType.FLOAT, 694 "FLOAT8": TokenType.DOUBLE, 695 "DOUBLE": TokenType.DOUBLE, 696 "DOUBLE PRECISION": TokenType.DOUBLE, 697 "JSON": TokenType.JSON, 698 "CHAR": TokenType.CHAR, 699 "CHARACTER": TokenType.CHAR, 700 "NCHAR": TokenType.NCHAR, 701 "VARCHAR": TokenType.VARCHAR, 702 "VARCHAR2": TokenType.VARCHAR, 703 "NVARCHAR": TokenType.NVARCHAR, 704 "NVARCHAR2": TokenType.NVARCHAR, 705 "STR": TokenType.TEXT, 706 "STRING": TokenType.TEXT, 707 "TEXT": TokenType.TEXT, 708 "CLOB": TokenType.TEXT, 709 "LONGVARCHAR": TokenType.TEXT, 710 "BINARY": TokenType.BINARY, 711 "BLOB": TokenType.VARBINARY, 712 "BYTEA": TokenType.VARBINARY, 713 "VARBINARY": TokenType.VARBINARY, 714 "TIME": TokenType.TIME, 715 "TIMESTAMP": TokenType.TIMESTAMP, 716 "TIMESTAMPTZ": TokenType.TIMESTAMPTZ, 717 "TIMESTAMPLTZ": TokenType.TIMESTAMPLTZ, 718 "DATE": TokenType.DATE, 719 "DATETIME": TokenType.DATETIME, 720 "UNIQUE": TokenType.UNIQUE, 721 "STRUCT": TokenType.STRUCT, 722 "VARIANT": TokenType.VARIANT, 723 "ALTER": TokenType.ALTER, 724 "ALTER AGGREGATE": TokenType.COMMAND, 725 "ALTER DEFAULT": TokenType.COMMAND, 726 "ALTER DOMAIN": TokenType.COMMAND, 727 "ALTER ROLE": TokenType.COMMAND, 728 "ALTER RULE": TokenType.COMMAND, 729 "ALTER SEQUENCE": TokenType.COMMAND, 730 "ALTER TYPE": TokenType.COMMAND, 731 "ALTER USER": TokenType.COMMAND, 732 "ALTER VIEW": TokenType.COMMAND, 733 "ANALYZE": TokenType.COMMAND, 734 "CALL": TokenType.COMMAND, 735 "COMMENT": TokenType.COMMENT, 736 "COPY": TokenType.COMMAND, 737 "EXPLAIN": TokenType.COMMAND, 738 "GRANT": TokenType.COMMAND, 739 "OPTIMIZE": TokenType.COMMAND, 740 "PREPARE": TokenType.COMMAND, 741 "TRUNCATE": TokenType.COMMAND, 742 "VACUUM": TokenType.COMMAND, 743 } 744 745 WHITE_SPACE: t.Dict[t.Optional[str], TokenType] = { 746 " ": TokenType.SPACE, 747 "\t": TokenType.SPACE, 748 "\n": TokenType.BREAK, 749 "\r": TokenType.BREAK, 750 "\r\n": TokenType.BREAK, 751 } 752 753 COMMANDS = { 754 TokenType.COMMAND, 755 TokenType.EXECUTE, 756 TokenType.FETCH, 757 TokenType.SHOW, 758 } 759 760 COMMAND_PREFIX_TOKENS = {TokenType.SEMICOLON, TokenType.BEGIN} 761 762 # handle numeric literals like in hive (3L = BIGINT) 763 NUMERIC_LITERALS: t.Dict[str, str] = {} 764 ENCODE: t.Optional[str] = None 765 766 COMMENTS = ["--", ("/*", "*/"), ("{#", "#}")] 767 KEYWORD_TRIE: t.Dict = {} # autofilled 768 769 IDENTIFIER_CAN_START_WITH_DIGIT = False 770 771 __slots__ = ( 772 "sql", 773 "size", 774 "tokens", 775 "_start", 776 "_current", 777 "_line", 778 "_col", 779 "_comments", 780 "_char", 781 "_end", 782 "_peek", 783 "_prev_token_line", 784 ) 785 786 def __init__(self) -> None: 787 self.reset() 788 789 def reset(self) -> None: 790 self.sql = "" 791 self.size = 0 792 self.tokens: t.List[Token] = [] 793 self._start = 0 794 self._current = 0 795 self._line = 1 796 self._col = 1 797 self._comments: t.List[str] = [] 798 799 self._char = "" 800 self._end = False 801 self._peek = "" 802 self._prev_token_line = -1 803 804 def tokenize(self, sql: str) -> t.List[Token]: 805 """Returns a list of tokens corresponding to the SQL string `sql`.""" 806 self.reset() 807 self.sql = sql 808 self.size = len(sql) 809 try: 810 self._scan() 811 except Exception as e: 812 start = self._current - 50 813 end = self._current + 50 814 start = start if start > 0 else 0 815 end = end if end < self.size else self.size - 1 816 context = self.sql[start:end] 817 raise ValueError(f"Error tokenizing '{context}'") from e 818 819 return self.tokens 820 821 def _scan(self, until: t.Optional[t.Callable] = None) -> None: 822 while self.size and not self._end: 823 self._start = self._current 824 self._advance() 825 826 if self._char is None: 827 break 828 829 if self._char not in self.WHITE_SPACE: 830 if self._char.isdigit(): 831 self._scan_number() 832 elif self._char in self._IDENTIFIERS: 833 self._scan_identifier(self._IDENTIFIERS[self._char]) 834 else: 835 self._scan_keywords() 836 837 if until and until(): 838 break 839 840 if self.tokens: 841 self.tokens[-1].comments.extend(self._comments) 842 843 def _chars(self, size: int) -> str: 844 if size == 1: 845 return self._char 846 start = self._current - 1 847 end = start + size 848 if end <= self.size: 849 return self.sql[start:end] 850 return "" 851 852 def _advance(self, i: int = 1, alnum: bool = False) -> None: 853 if self.WHITE_SPACE.get(self._char) is TokenType.BREAK: 854 self._col = 1 855 self._line += 1 856 else: 857 self._col += i 858 859 self._current += i 860 self._end = self._current >= self.size 861 self._char = self.sql[self._current - 1] 862 self._peek = "" if self._end else self.sql[self._current] 863 864 if alnum and self._char.isalnum(): 865 _col = self._col 866 _current = self._current 867 _end = self._end 868 _peek = self._peek 869 870 while _peek.isalnum(): 871 _col += 1 872 _current += 1 873 _end = _current >= self.size 874 _peek = "" if _end else self.sql[_current] 875 876 self._col = _col 877 self._current = _current 878 self._end = _end 879 self._peek = _peek 880 self._char = self.sql[_current - 1] 881 882 @property 883 def _text(self) -> str: 884 return self.sql[self._start : self._current] 885 886 def _add(self, token_type: TokenType, text: t.Optional[str] = None) -> None: 887 self._prev_token_line = self._line 888 self.tokens.append( 889 Token( 890 token_type, 891 self._text if text is None else text, 892 self._line, 893 self._col, 894 self._current, 895 self._comments, 896 ) 897 ) 898 self._comments = [] 899 900 # If we have either a semicolon or a begin token before the command's token, we'll parse 901 # whatever follows the command's token as a string 902 if ( 903 token_type in self.COMMANDS 904 and self._peek != ";" 905 and (len(self.tokens) == 1 or self.tokens[-2].token_type in self.COMMAND_PREFIX_TOKENS) 906 ): 907 start = self._current 908 tokens = len(self.tokens) 909 self._scan(lambda: self._peek == ";") 910 self.tokens = self.tokens[:tokens] 911 text = self.sql[start : self._current].strip() 912 if text: 913 self._add(TokenType.STRING, text) 914 915 def _scan_keywords(self) -> None: 916 size = 0 917 word = None 918 chars = self._text 919 char = chars 920 prev_space = False 921 skip = False 922 trie = self.KEYWORD_TRIE 923 single_token = char in self.SINGLE_TOKENS 924 925 while chars: 926 if skip: 927 result = 1 928 else: 929 result, trie = in_trie(trie, char.upper()) 930 931 if result == 0: 932 break 933 if result == 2: 934 word = chars 935 936 size += 1 937 end = self._current - 1 + size 938 939 if end < self.size: 940 char = self.sql[end] 941 single_token = single_token or char in self.SINGLE_TOKENS 942 is_space = char in self.WHITE_SPACE 943 944 if not is_space or not prev_space: 945 if is_space: 946 char = " " 947 chars += char 948 prev_space = is_space 949 skip = False 950 else: 951 skip = True 952 else: 953 char = "" 954 chars = " " 955 956 word = None if not single_token and chars[-1] not in self.WHITE_SPACE else word 957 958 if not word: 959 if self._char in self.SINGLE_TOKENS: 960 self._add(self.SINGLE_TOKENS[self._char], text=self._char) 961 return 962 self._scan_var() 963 return 964 965 if self._scan_string(word): 966 return 967 if self._scan_formatted_string(word): 968 return 969 if self._scan_comment(word): 970 return 971 972 self._advance(size - 1) 973 word = word.upper() 974 self._add(self.KEYWORDS[word], text=word) 975 976 def _scan_comment(self, comment_start: str) -> bool: 977 if comment_start not in self._COMMENTS: 978 return False 979 980 comment_start_line = self._line 981 comment_start_size = len(comment_start) 982 comment_end = self._COMMENTS[comment_start] 983 984 if comment_end: 985 # Skip the comment's start delimiter 986 self._advance(comment_start_size) 987 988 comment_end_size = len(comment_end) 989 while not self._end and self._chars(comment_end_size) != comment_end: 990 self._advance(alnum=True) 991 992 self._comments.append(self._text[comment_start_size : -comment_end_size + 1]) 993 self._advance(comment_end_size - 1) 994 else: 995 while not self._end and not self.WHITE_SPACE.get(self._peek) is TokenType.BREAK: 996 self._advance(alnum=True) 997 self._comments.append(self._text[comment_start_size:]) 998 999 # Leading comment is attached to the succeeding token, whilst trailing comment to the preceding. 1000 # Multiple consecutive comments are preserved by appending them to the current comments list. 1001 if comment_start_line == self._prev_token_line: 1002 self.tokens[-1].comments.extend(self._comments) 1003 self._comments = [] 1004 self._prev_token_line = self._line 1005 1006 return True 1007 1008 def _scan_number(self) -> None: 1009 if self._char == "0": 1010 peek = self._peek.upper() 1011 if peek == "B": 1012 return self._scan_bits() if self._BIT_STRINGS else self._add(TokenType.NUMBER) 1013 elif peek == "X": 1014 return self._scan_hex() if self._HEX_STRINGS else self._add(TokenType.NUMBER) 1015 1016 decimal = False 1017 scientific = 0 1018 1019 while True: 1020 if self._peek.isdigit(): 1021 self._advance() 1022 elif self._peek == "." and not decimal: 1023 decimal = True 1024 self._advance() 1025 elif self._peek in ("-", "+") and scientific == 1: 1026 scientific += 1 1027 self._advance() 1028 elif self._peek.upper() == "E" and not scientific: 1029 scientific += 1 1030 self._advance() 1031 elif self._peek.isidentifier(): 1032 number_text = self._text 1033 literal = "" 1034 1035 while self._peek.strip() and self._peek not in self.SINGLE_TOKENS: 1036 literal += self._peek.upper() 1037 self._advance() 1038 1039 token_type = self.KEYWORDS.get(self.NUMERIC_LITERALS.get(literal)) 1040 1041 if token_type: 1042 self._add(TokenType.NUMBER, number_text) 1043 self._add(TokenType.DCOLON, "::") 1044 return self._add(token_type, literal) 1045 elif self.IDENTIFIER_CAN_START_WITH_DIGIT: 1046 return self._add(TokenType.VAR) 1047 1048 self._add(TokenType.NUMBER, number_text) 1049 return self._advance(-len(literal)) 1050 else: 1051 return self._add(TokenType.NUMBER) 1052 1053 def _scan_bits(self) -> None: 1054 self._advance() 1055 value = self._extract_value() 1056 try: 1057 # If `value` can't be converted to a binary, fallback to tokenizing it as an identifier 1058 int(value, 2) 1059 self._add(TokenType.BIT_STRING, value[2:]) # Drop the 0b 1060 except ValueError: 1061 self._add(TokenType.IDENTIFIER) 1062 1063 def _scan_hex(self) -> None: 1064 self._advance() 1065 value = self._extract_value() 1066 try: 1067 # If `value` can't be converted to a hex, fallback to tokenizing it as an identifier 1068 int(value, 16) 1069 self._add(TokenType.HEX_STRING, value[2:]) # Drop the 0x 1070 except ValueError: 1071 self._add(TokenType.IDENTIFIER) 1072 1073 def _extract_value(self) -> str: 1074 while True: 1075 char = self._peek.strip() 1076 if char and char not in self.SINGLE_TOKENS: 1077 self._advance(alnum=True) 1078 else: 1079 break 1080 1081 return self._text 1082 1083 def _scan_string(self, quote: str) -> bool: 1084 quote_end = self._QUOTES.get(quote) 1085 if quote_end is None: 1086 return False 1087 1088 self._advance(len(quote)) 1089 text = self._extract_string(quote_end) 1090 text = text.encode(self.ENCODE).decode(self.ENCODE) if self.ENCODE else text 1091 self._add(TokenType.NATIONAL if quote[0].upper() == "N" else TokenType.STRING, text) 1092 return True 1093 1094 # X'1234', b'0110', E'\\\\\' etc. 1095 def _scan_formatted_string(self, string_start: str) -> bool: 1096 if string_start in self._HEX_STRINGS: 1097 delimiters = self._HEX_STRINGS 1098 token_type = TokenType.HEX_STRING 1099 base = 16 1100 elif string_start in self._BIT_STRINGS: 1101 delimiters = self._BIT_STRINGS 1102 token_type = TokenType.BIT_STRING 1103 base = 2 1104 elif string_start in self._BYTE_STRINGS: 1105 delimiters = self._BYTE_STRINGS 1106 token_type = TokenType.BYTE_STRING 1107 base = None 1108 else: 1109 return False 1110 1111 self._advance(len(string_start)) 1112 string_end = delimiters[string_start] 1113 text = self._extract_string(string_end) 1114 1115 if base: 1116 try: 1117 int(text, base) 1118 except: 1119 raise RuntimeError( 1120 f"Numeric string contains invalid characters from {self._line}:{self._start}" 1121 ) 1122 1123 self._add(token_type, text) 1124 return True 1125 1126 def _scan_identifier(self, identifier_end: str) -> None: 1127 self._advance() 1128 text = self._extract_string(identifier_end, self._IDENTIFIER_ESCAPES) 1129 self._add(TokenType.IDENTIFIER, text) 1130 1131 def _scan_var(self) -> None: 1132 while True: 1133 char = self._peek.strip() 1134 if char and (char in self.VAR_SINGLE_TOKENS or char not in self.SINGLE_TOKENS): 1135 self._advance(alnum=True) 1136 else: 1137 break 1138 1139 self._add( 1140 TokenType.VAR 1141 if self.tokens and self.tokens[-1].token_type == TokenType.PARAMETER 1142 else self.KEYWORDS.get(self._text.upper(), TokenType.VAR) 1143 ) 1144 1145 def _extract_string(self, delimiter: str, escapes=None) -> str: 1146 text = "" 1147 delim_size = len(delimiter) 1148 escapes = self._STRING_ESCAPES if escapes is None else escapes 1149 1150 while True: 1151 if self._char in escapes and (self._peek == delimiter or self._peek in escapes): 1152 if self._peek == delimiter: 1153 text += self._peek 1154 else: 1155 text += self._char + self._peek 1156 1157 if self._current + 1 < self.size: 1158 self._advance(2) 1159 else: 1160 raise RuntimeError(f"Missing {delimiter} from {self._line}:{self._current}") 1161 else: 1162 if self._chars(delim_size) == delimiter: 1163 if delim_size > 1: 1164 self._advance(delim_size - 1) 1165 break 1166 1167 if self._end: 1168 raise RuntimeError(f"Missing {delimiter} from {self._line}:{self._start}") 1169 1170 current = self._current - 1 1171 self._advance(alnum=True) 1172 text += self.sql[current : self._current - 1] 1173 1174 return text
11class TokenType(AutoName): 12 L_PAREN = auto() 13 R_PAREN = auto() 14 L_BRACKET = auto() 15 R_BRACKET = auto() 16 L_BRACE = auto() 17 R_BRACE = auto() 18 COMMA = auto() 19 DOT = auto() 20 DASH = auto() 21 PLUS = auto() 22 COLON = auto() 23 DCOLON = auto() 24 SEMICOLON = auto() 25 STAR = auto() 26 BACKSLASH = auto() 27 SLASH = auto() 28 LT = auto() 29 LTE = auto() 30 GT = auto() 31 GTE = auto() 32 NOT = auto() 33 EQ = auto() 34 NEQ = auto() 35 NULLSAFE_EQ = auto() 36 AND = auto() 37 OR = auto() 38 AMP = auto() 39 DPIPE = auto() 40 PIPE = auto() 41 CARET = auto() 42 TILDA = auto() 43 ARROW = auto() 44 DARROW = auto() 45 FARROW = auto() 46 HASH = auto() 47 HASH_ARROW = auto() 48 DHASH_ARROW = auto() 49 LR_ARROW = auto() 50 LT_AT = auto() 51 AT_GT = auto() 52 DOLLAR = auto() 53 PARAMETER = auto() 54 SESSION_PARAMETER = auto() 55 NATIONAL = auto() 56 DAMP = auto() 57 58 BLOCK_START = auto() 59 BLOCK_END = auto() 60 61 SPACE = auto() 62 BREAK = auto() 63 64 STRING = auto() 65 NUMBER = auto() 66 IDENTIFIER = auto() 67 DATABASE = auto() 68 COLUMN = auto() 69 COLUMN_DEF = auto() 70 SCHEMA = auto() 71 TABLE = auto() 72 VAR = auto() 73 BIT_STRING = auto() 74 HEX_STRING = auto() 75 BYTE_STRING = auto() 76 77 # types 78 BIT = auto() 79 BOOLEAN = auto() 80 TINYINT = auto() 81 UTINYINT = auto() 82 SMALLINT = auto() 83 USMALLINT = auto() 84 INT = auto() 85 UINT = auto() 86 BIGINT = auto() 87 UBIGINT = auto() 88 INT128 = auto() 89 UINT128 = auto() 90 INT256 = auto() 91 UINT256 = auto() 92 FLOAT = auto() 93 DOUBLE = auto() 94 DECIMAL = auto() 95 BIGDECIMAL = auto() 96 CHAR = auto() 97 NCHAR = auto() 98 VARCHAR = auto() 99 NVARCHAR = auto() 100 TEXT = auto() 101 MEDIUMTEXT = auto() 102 LONGTEXT = auto() 103 MEDIUMBLOB = auto() 104 LONGBLOB = auto() 105 BINARY = auto() 106 VARBINARY = auto() 107 JSON = auto() 108 JSONB = auto() 109 TIME = auto() 110 TIMESTAMP = auto() 111 TIMESTAMPTZ = auto() 112 TIMESTAMPLTZ = auto() 113 DATETIME = auto() 114 DATETIME64 = auto() 115 DATE = auto() 116 UUID = auto() 117 GEOGRAPHY = auto() 118 NULLABLE = auto() 119 GEOMETRY = auto() 120 HLLSKETCH = auto() 121 HSTORE = auto() 122 SUPER = auto() 123 SERIAL = auto() 124 SMALLSERIAL = auto() 125 BIGSERIAL = auto() 126 XML = auto() 127 UNIQUEIDENTIFIER = auto() 128 MONEY = auto() 129 SMALLMONEY = auto() 130 ROWVERSION = auto() 131 IMAGE = auto() 132 VARIANT = auto() 133 OBJECT = auto() 134 INET = auto() 135 136 # keywords 137 ALIAS = auto() 138 ALTER = auto() 139 ALWAYS = auto() 140 ALL = auto() 141 ANTI = auto() 142 ANY = auto() 143 APPLY = auto() 144 ARRAY = auto() 145 ASC = auto() 146 ASOF = auto() 147 AT_TIME_ZONE = auto() 148 AUTO_INCREMENT = auto() 149 BEGIN = auto() 150 BETWEEN = auto() 151 BOTH = auto() 152 BUCKET = auto() 153 BY_DEFAULT = auto() 154 CACHE = auto() 155 CASCADE = auto() 156 CASE = auto() 157 CHARACTER_SET = auto() 158 CLUSTER_BY = auto() 159 COLLATE = auto() 160 COMMAND = auto() 161 COMMENT = auto() 162 COMMIT = auto() 163 COMPOUND = auto() 164 CONSTRAINT = auto() 165 CREATE = auto() 166 CROSS = auto() 167 CUBE = auto() 168 CURRENT_DATE = auto() 169 CURRENT_DATETIME = auto() 170 CURRENT_ROW = auto() 171 CURRENT_TIME = auto() 172 CURRENT_TIMESTAMP = auto() 173 CURRENT_USER = auto() 174 DEFAULT = auto() 175 DELETE = auto() 176 DESC = auto() 177 DESCRIBE = auto() 178 DISTINCT = auto() 179 DISTINCT_FROM = auto() 180 DISTRIBUTE_BY = auto() 181 DIV = auto() 182 DROP = auto() 183 ELSE = auto() 184 END = auto() 185 ESCAPE = auto() 186 EXCEPT = auto() 187 EXECUTE = auto() 188 EXISTS = auto() 189 FALSE = auto() 190 FETCH = auto() 191 FILTER = auto() 192 FINAL = auto() 193 FIRST = auto() 194 FOLLOWING = auto() 195 FOR = auto() 196 FOREIGN_KEY = auto() 197 FORMAT = auto() 198 FROM = auto() 199 FULL = auto() 200 FUNCTION = auto() 201 GLOB = auto() 202 GLOBAL = auto() 203 GROUP_BY = auto() 204 GROUPING_SETS = auto() 205 HAVING = auto() 206 HINT = auto() 207 IF = auto() 208 IGNORE_NULLS = auto() 209 ILIKE = auto() 210 ILIKE_ANY = auto() 211 IN = auto() 212 INDEX = auto() 213 INNER = auto() 214 INSERT = auto() 215 INTERSECT = auto() 216 INTERVAL = auto() 217 INTO = auto() 218 INTRODUCER = auto() 219 IRLIKE = auto() 220 IS = auto() 221 ISNULL = auto() 222 JOIN = auto() 223 JOIN_MARKER = auto() 224 KEEP = auto() 225 LANGUAGE = auto() 226 LATERAL = auto() 227 LAZY = auto() 228 LEADING = auto() 229 LEFT = auto() 230 LIKE = auto() 231 LIKE_ANY = auto() 232 LIMIT = auto() 233 LOAD_DATA = auto() 234 LOCAL = auto() 235 MAP = auto() 236 MATCH_RECOGNIZE = auto() 237 MATERIALIZED = auto() 238 MERGE = auto() 239 MOD = auto() 240 NATURAL = auto() 241 NEXT = auto() 242 NEXT_VALUE_FOR = auto() 243 NO_ACTION = auto() 244 NOTNULL = auto() 245 NULL = auto() 246 NULLS_FIRST = auto() 247 NULLS_LAST = auto() 248 OFFSET = auto() 249 ON = auto() 250 ONLY = auto() 251 OPTIONS = auto() 252 ORDER_BY = auto() 253 ORDERED = auto() 254 ORDINALITY = auto() 255 OUTER = auto() 256 OUT_OF = auto() 257 OVER = auto() 258 OVERLAPS = auto() 259 OVERWRITE = auto() 260 PARTITION = auto() 261 PARTITION_BY = auto() 262 PERCENT = auto() 263 PIVOT = auto() 264 PLACEHOLDER = auto() 265 PRAGMA = auto() 266 PRECEDING = auto() 267 PRIMARY_KEY = auto() 268 PROCEDURE = auto() 269 PROPERTIES = auto() 270 PSEUDO_TYPE = auto() 271 QUALIFY = auto() 272 QUOTE = auto() 273 RANGE = auto() 274 RECURSIVE = auto() 275 REPLACE = auto() 276 RESPECT_NULLS = auto() 277 RETURNING = auto() 278 REFERENCES = auto() 279 RIGHT = auto() 280 RLIKE = auto() 281 ROLLBACK = auto() 282 ROLLUP = auto() 283 ROW = auto() 284 ROWS = auto() 285 SEED = auto() 286 SELECT = auto() 287 SEMI = auto() 288 SEPARATOR = auto() 289 SERDE_PROPERTIES = auto() 290 SET = auto() 291 SETTINGS = auto() 292 SHOW = auto() 293 SIMILAR_TO = auto() 294 SOME = auto() 295 SORTKEY = auto() 296 SORT_BY = auto() 297 STRUCT = auto() 298 TABLE_SAMPLE = auto() 299 TEMPORARY = auto() 300 TOP = auto() 301 THEN = auto() 302 TRAILING = auto() 303 TRUE = auto() 304 UNBOUNDED = auto() 305 UNCACHE = auto() 306 UNION = auto() 307 UNLOGGED = auto() 308 UNNEST = auto() 309 UNPIVOT = auto() 310 UPDATE = auto() 311 USE = auto() 312 USING = auto() 313 VALUES = auto() 314 VIEW = auto() 315 VOLATILE = auto() 316 WHEN = auto() 317 WHERE = auto() 318 WINDOW = auto() 319 WITH = auto() 320 WITH_TIME_ZONE = auto() 321 WITH_LOCAL_TIME_ZONE = auto() 322 WITHIN_GROUP = auto() 323 WITHOUT_TIME_ZONE = auto() 324 UNIQUE = auto()
An enumeration.
L_PAREN =
<TokenType.L_PAREN: 'L_PAREN'>
R_PAREN =
<TokenType.R_PAREN: 'R_PAREN'>
L_BRACKET =
<TokenType.L_BRACKET: 'L_BRACKET'>
R_BRACKET =
<TokenType.R_BRACKET: 'R_BRACKET'>
L_BRACE =
<TokenType.L_BRACE: 'L_BRACE'>
R_BRACE =
<TokenType.R_BRACE: 'R_BRACE'>
COMMA =
<TokenType.COMMA: 'COMMA'>
DOT =
<TokenType.DOT: 'DOT'>
DASH =
<TokenType.DASH: 'DASH'>
PLUS =
<TokenType.PLUS: 'PLUS'>
COLON =
<TokenType.COLON: 'COLON'>
DCOLON =
<TokenType.DCOLON: 'DCOLON'>
SEMICOLON =
<TokenType.SEMICOLON: 'SEMICOLON'>
STAR =
<TokenType.STAR: 'STAR'>
BACKSLASH =
<TokenType.BACKSLASH: 'BACKSLASH'>
SLASH =
<TokenType.SLASH: 'SLASH'>
LT =
<TokenType.LT: 'LT'>
LTE =
<TokenType.LTE: 'LTE'>
GT =
<TokenType.GT: 'GT'>
GTE =
<TokenType.GTE: 'GTE'>
NOT =
<TokenType.NOT: 'NOT'>
EQ =
<TokenType.EQ: 'EQ'>
NEQ =
<TokenType.NEQ: 'NEQ'>
NULLSAFE_EQ =
<TokenType.NULLSAFE_EQ: 'NULLSAFE_EQ'>
AND =
<TokenType.AND: 'AND'>
OR =
<TokenType.OR: 'OR'>
AMP =
<TokenType.AMP: 'AMP'>
DPIPE =
<TokenType.DPIPE: 'DPIPE'>
PIPE =
<TokenType.PIPE: 'PIPE'>
CARET =
<TokenType.CARET: 'CARET'>
TILDA =
<TokenType.TILDA: 'TILDA'>
ARROW =
<TokenType.ARROW: 'ARROW'>
DARROW =
<TokenType.DARROW: 'DARROW'>
FARROW =
<TokenType.FARROW: 'FARROW'>
HASH =
<TokenType.HASH: 'HASH'>
HASH_ARROW =
<TokenType.HASH_ARROW: 'HASH_ARROW'>
DHASH_ARROW =
<TokenType.DHASH_ARROW: 'DHASH_ARROW'>
LR_ARROW =
<TokenType.LR_ARROW: 'LR_ARROW'>
LT_AT =
<TokenType.LT_AT: 'LT_AT'>
AT_GT =
<TokenType.AT_GT: 'AT_GT'>
DOLLAR =
<TokenType.DOLLAR: 'DOLLAR'>
PARAMETER =
<TokenType.PARAMETER: 'PARAMETER'>
SESSION_PARAMETER =
<TokenType.SESSION_PARAMETER: 'SESSION_PARAMETER'>
NATIONAL =
<TokenType.NATIONAL: 'NATIONAL'>
DAMP =
<TokenType.DAMP: 'DAMP'>
BLOCK_START =
<TokenType.BLOCK_START: 'BLOCK_START'>
BLOCK_END =
<TokenType.BLOCK_END: 'BLOCK_END'>
SPACE =
<TokenType.SPACE: 'SPACE'>
BREAK =
<TokenType.BREAK: 'BREAK'>
STRING =
<TokenType.STRING: 'STRING'>
NUMBER =
<TokenType.NUMBER: 'NUMBER'>
IDENTIFIER =
<TokenType.IDENTIFIER: 'IDENTIFIER'>
DATABASE =
<TokenType.DATABASE: 'DATABASE'>
COLUMN =
<TokenType.COLUMN: 'COLUMN'>
COLUMN_DEF =
<TokenType.COLUMN_DEF: 'COLUMN_DEF'>
SCHEMA =
<TokenType.SCHEMA: 'SCHEMA'>
TABLE =
<TokenType.TABLE: 'TABLE'>
VAR =
<TokenType.VAR: 'VAR'>
BIT_STRING =
<TokenType.BIT_STRING: 'BIT_STRING'>
HEX_STRING =
<TokenType.HEX_STRING: 'HEX_STRING'>
BYTE_STRING =
<TokenType.BYTE_STRING: 'BYTE_STRING'>
BIT =
<TokenType.BIT: 'BIT'>
BOOLEAN =
<TokenType.BOOLEAN: 'BOOLEAN'>
TINYINT =
<TokenType.TINYINT: 'TINYINT'>
UTINYINT =
<TokenType.UTINYINT: 'UTINYINT'>
SMALLINT =
<TokenType.SMALLINT: 'SMALLINT'>
USMALLINT =
<TokenType.USMALLINT: 'USMALLINT'>
INT =
<TokenType.INT: 'INT'>
UINT =
<TokenType.UINT: 'UINT'>
BIGINT =
<TokenType.BIGINT: 'BIGINT'>
UBIGINT =
<TokenType.UBIGINT: 'UBIGINT'>
INT128 =
<TokenType.INT128: 'INT128'>
UINT128 =
<TokenType.UINT128: 'UINT128'>
INT256 =
<TokenType.INT256: 'INT256'>
UINT256 =
<TokenType.UINT256: 'UINT256'>
FLOAT =
<TokenType.FLOAT: 'FLOAT'>
DOUBLE =
<TokenType.DOUBLE: 'DOUBLE'>
DECIMAL =
<TokenType.DECIMAL: 'DECIMAL'>
BIGDECIMAL =
<TokenType.BIGDECIMAL: 'BIGDECIMAL'>
CHAR =
<TokenType.CHAR: 'CHAR'>
NCHAR =
<TokenType.NCHAR: 'NCHAR'>
VARCHAR =
<TokenType.VARCHAR: 'VARCHAR'>
NVARCHAR =
<TokenType.NVARCHAR: 'NVARCHAR'>
TEXT =
<TokenType.TEXT: 'TEXT'>
MEDIUMTEXT =
<TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>
LONGTEXT =
<TokenType.LONGTEXT: 'LONGTEXT'>
MEDIUMBLOB =
<TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>
LONGBLOB =
<TokenType.LONGBLOB: 'LONGBLOB'>
BINARY =
<TokenType.BINARY: 'BINARY'>
VARBINARY =
<TokenType.VARBINARY: 'VARBINARY'>
JSON =
<TokenType.JSON: 'JSON'>
JSONB =
<TokenType.JSONB: 'JSONB'>
TIME =
<TokenType.TIME: 'TIME'>
TIMESTAMP =
<TokenType.TIMESTAMP: 'TIMESTAMP'>
TIMESTAMPTZ =
<TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>
TIMESTAMPLTZ =
<TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>
DATETIME =
<TokenType.DATETIME: 'DATETIME'>
DATETIME64 =
<TokenType.DATETIME64: 'DATETIME64'>
DATE =
<TokenType.DATE: 'DATE'>
UUID =
<TokenType.UUID: 'UUID'>
GEOGRAPHY =
<TokenType.GEOGRAPHY: 'GEOGRAPHY'>
NULLABLE =
<TokenType.NULLABLE: 'NULLABLE'>
GEOMETRY =
<TokenType.GEOMETRY: 'GEOMETRY'>
HLLSKETCH =
<TokenType.HLLSKETCH: 'HLLSKETCH'>
HSTORE =
<TokenType.HSTORE: 'HSTORE'>
SUPER =
<TokenType.SUPER: 'SUPER'>
SERIAL =
<TokenType.SERIAL: 'SERIAL'>
SMALLSERIAL =
<TokenType.SMALLSERIAL: 'SMALLSERIAL'>
BIGSERIAL =
<TokenType.BIGSERIAL: 'BIGSERIAL'>
XML =
<TokenType.XML: 'XML'>
UNIQUEIDENTIFIER =
<TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>
MONEY =
<TokenType.MONEY: 'MONEY'>
SMALLMONEY =
<TokenType.SMALLMONEY: 'SMALLMONEY'>
ROWVERSION =
<TokenType.ROWVERSION: 'ROWVERSION'>
IMAGE =
<TokenType.IMAGE: 'IMAGE'>
VARIANT =
<TokenType.VARIANT: 'VARIANT'>
OBJECT =
<TokenType.OBJECT: 'OBJECT'>
INET =
<TokenType.INET: 'INET'>
ALIAS =
<TokenType.ALIAS: 'ALIAS'>
ALTER =
<TokenType.ALTER: 'ALTER'>
ALWAYS =
<TokenType.ALWAYS: 'ALWAYS'>
ALL =
<TokenType.ALL: 'ALL'>
ANTI =
<TokenType.ANTI: 'ANTI'>
ANY =
<TokenType.ANY: 'ANY'>
APPLY =
<TokenType.APPLY: 'APPLY'>
ARRAY =
<TokenType.ARRAY: 'ARRAY'>
ASC =
<TokenType.ASC: 'ASC'>
ASOF =
<TokenType.ASOF: 'ASOF'>
AT_TIME_ZONE =
<TokenType.AT_TIME_ZONE: 'AT_TIME_ZONE'>
AUTO_INCREMENT =
<TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>
BEGIN =
<TokenType.BEGIN: 'BEGIN'>
BETWEEN =
<TokenType.BETWEEN: 'BETWEEN'>
BOTH =
<TokenType.BOTH: 'BOTH'>
BUCKET =
<TokenType.BUCKET: 'BUCKET'>
BY_DEFAULT =
<TokenType.BY_DEFAULT: 'BY_DEFAULT'>
CACHE =
<TokenType.CACHE: 'CACHE'>
CASCADE =
<TokenType.CASCADE: 'CASCADE'>
CASE =
<TokenType.CASE: 'CASE'>
CHARACTER_SET =
<TokenType.CHARACTER_SET: 'CHARACTER_SET'>
CLUSTER_BY =
<TokenType.CLUSTER_BY: 'CLUSTER_BY'>
COLLATE =
<TokenType.COLLATE: 'COLLATE'>
COMMAND =
<TokenType.COMMAND: 'COMMAND'>
COMMENT =
<TokenType.COMMENT: 'COMMENT'>
COMMIT =
<TokenType.COMMIT: 'COMMIT'>
COMPOUND =
<TokenType.COMPOUND: 'COMPOUND'>
CONSTRAINT =
<TokenType.CONSTRAINT: 'CONSTRAINT'>
CREATE =
<TokenType.CREATE: 'CREATE'>
CROSS =
<TokenType.CROSS: 'CROSS'>
CUBE =
<TokenType.CUBE: 'CUBE'>
CURRENT_DATE =
<TokenType.CURRENT_DATE: 'CURRENT_DATE'>
CURRENT_DATETIME =
<TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>
CURRENT_ROW =
<TokenType.CURRENT_ROW: 'CURRENT_ROW'>
CURRENT_TIME =
<TokenType.CURRENT_TIME: 'CURRENT_TIME'>
CURRENT_TIMESTAMP =
<TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>
CURRENT_USER =
<TokenType.CURRENT_USER: 'CURRENT_USER'>
DEFAULT =
<TokenType.DEFAULT: 'DEFAULT'>
DELETE =
<TokenType.DELETE: 'DELETE'>
DESC =
<TokenType.DESC: 'DESC'>
DESCRIBE =
<TokenType.DESCRIBE: 'DESCRIBE'>
DISTINCT =
<TokenType.DISTINCT: 'DISTINCT'>
DISTINCT_FROM =
<TokenType.DISTINCT_FROM: 'DISTINCT_FROM'>
DISTRIBUTE_BY =
<TokenType.DISTRIBUTE_BY: 'DISTRIBUTE_BY'>
DIV =
<TokenType.DIV: 'DIV'>
DROP =
<TokenType.DROP: 'DROP'>
ELSE =
<TokenType.ELSE: 'ELSE'>
END =
<TokenType.END: 'END'>
ESCAPE =
<TokenType.ESCAPE: 'ESCAPE'>
EXCEPT =
<TokenType.EXCEPT: 'EXCEPT'>
EXECUTE =
<TokenType.EXECUTE: 'EXECUTE'>
EXISTS =
<TokenType.EXISTS: 'EXISTS'>
FALSE =
<TokenType.FALSE: 'FALSE'>
FETCH =
<TokenType.FETCH: 'FETCH'>
FILTER =
<TokenType.FILTER: 'FILTER'>
FINAL =
<TokenType.FINAL: 'FINAL'>
FIRST =
<TokenType.FIRST: 'FIRST'>
FOLLOWING =
<TokenType.FOLLOWING: 'FOLLOWING'>
FOR =
<TokenType.FOR: 'FOR'>
FOREIGN_KEY =
<TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>
FORMAT =
<TokenType.FORMAT: 'FORMAT'>
FROM =
<TokenType.FROM: 'FROM'>
FULL =
<TokenType.FULL: 'FULL'>
FUNCTION =
<TokenType.FUNCTION: 'FUNCTION'>
GLOB =
<TokenType.GLOB: 'GLOB'>
GLOBAL =
<TokenType.GLOBAL: 'GLOBAL'>
GROUP_BY =
<TokenType.GROUP_BY: 'GROUP_BY'>
GROUPING_SETS =
<TokenType.GROUPING_SETS: 'GROUPING_SETS'>
HAVING =
<TokenType.HAVING: 'HAVING'>
HINT =
<TokenType.HINT: 'HINT'>
IF =
<TokenType.IF: 'IF'>
IGNORE_NULLS =
<TokenType.IGNORE_NULLS: 'IGNORE_NULLS'>
ILIKE =
<TokenType.ILIKE: 'ILIKE'>
ILIKE_ANY =
<TokenType.ILIKE_ANY: 'ILIKE_ANY'>
IN =
<TokenType.IN: 'IN'>
INDEX =
<TokenType.INDEX: 'INDEX'>
INNER =
<TokenType.INNER: 'INNER'>
INSERT =
<TokenType.INSERT: 'INSERT'>
INTERSECT =
<TokenType.INTERSECT: 'INTERSECT'>
INTERVAL =
<TokenType.INTERVAL: 'INTERVAL'>
INTO =
<TokenType.INTO: 'INTO'>
INTRODUCER =
<TokenType.INTRODUCER: 'INTRODUCER'>
IRLIKE =
<TokenType.IRLIKE: 'IRLIKE'>
IS =
<TokenType.IS: 'IS'>
ISNULL =
<TokenType.ISNULL: 'ISNULL'>
JOIN =
<TokenType.JOIN: 'JOIN'>
JOIN_MARKER =
<TokenType.JOIN_MARKER: 'JOIN_MARKER'>
KEEP =
<TokenType.KEEP: 'KEEP'>
LANGUAGE =
<TokenType.LANGUAGE: 'LANGUAGE'>
LATERAL =
<TokenType.LATERAL: 'LATERAL'>
LAZY =
<TokenType.LAZY: 'LAZY'>
LEADING =
<TokenType.LEADING: 'LEADING'>
LEFT =
<TokenType.LEFT: 'LEFT'>
LIKE =
<TokenType.LIKE: 'LIKE'>
LIKE_ANY =
<TokenType.LIKE_ANY: 'LIKE_ANY'>
LIMIT =
<TokenType.LIMIT: 'LIMIT'>
LOAD_DATA =
<TokenType.LOAD_DATA: 'LOAD_DATA'>
LOCAL =
<TokenType.LOCAL: 'LOCAL'>
MAP =
<TokenType.MAP: 'MAP'>
MATCH_RECOGNIZE =
<TokenType.MATCH_RECOGNIZE: 'MATCH_RECOGNIZE'>
MATERIALIZED =
<TokenType.MATERIALIZED: 'MATERIALIZED'>
MERGE =
<TokenType.MERGE: 'MERGE'>
MOD =
<TokenType.MOD: 'MOD'>
NATURAL =
<TokenType.NATURAL: 'NATURAL'>
NEXT =
<TokenType.NEXT: 'NEXT'>
NEXT_VALUE_FOR =
<TokenType.NEXT_VALUE_FOR: 'NEXT_VALUE_FOR'>
NO_ACTION =
<TokenType.NO_ACTION: 'NO_ACTION'>
NOTNULL =
<TokenType.NOTNULL: 'NOTNULL'>
NULL =
<TokenType.NULL: 'NULL'>
NULLS_FIRST =
<TokenType.NULLS_FIRST: 'NULLS_FIRST'>
NULLS_LAST =
<TokenType.NULLS_LAST: 'NULLS_LAST'>
OFFSET =
<TokenType.OFFSET: 'OFFSET'>
ON =
<TokenType.ON: 'ON'>
ONLY =
<TokenType.ONLY: 'ONLY'>
OPTIONS =
<TokenType.OPTIONS: 'OPTIONS'>
ORDER_BY =
<TokenType.ORDER_BY: 'ORDER_BY'>
ORDERED =
<TokenType.ORDERED: 'ORDERED'>
ORDINALITY =
<TokenType.ORDINALITY: 'ORDINALITY'>
OUTER =
<TokenType.OUTER: 'OUTER'>
OUT_OF =
<TokenType.OUT_OF: 'OUT_OF'>
OVER =
<TokenType.OVER: 'OVER'>
OVERLAPS =
<TokenType.OVERLAPS: 'OVERLAPS'>
OVERWRITE =
<TokenType.OVERWRITE: 'OVERWRITE'>
PARTITION =
<TokenType.PARTITION: 'PARTITION'>
PARTITION_BY =
<TokenType.PARTITION_BY: 'PARTITION_BY'>
PERCENT =
<TokenType.PERCENT: 'PERCENT'>
PIVOT =
<TokenType.PIVOT: 'PIVOT'>
PLACEHOLDER =
<TokenType.PLACEHOLDER: 'PLACEHOLDER'>
PRAGMA =
<TokenType.PRAGMA: 'PRAGMA'>
PRECEDING =
<TokenType.PRECEDING: 'PRECEDING'>
PRIMARY_KEY =
<TokenType.PRIMARY_KEY: 'PRIMARY_KEY'>
PROCEDURE =
<TokenType.PROCEDURE: 'PROCEDURE'>
PROPERTIES =
<TokenType.PROPERTIES: 'PROPERTIES'>
PSEUDO_TYPE =
<TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>
QUALIFY =
<TokenType.QUALIFY: 'QUALIFY'>
QUOTE =
<TokenType.QUOTE: 'QUOTE'>
RANGE =
<TokenType.RANGE: 'RANGE'>
RECURSIVE =
<TokenType.RECURSIVE: 'RECURSIVE'>
REPLACE =
<TokenType.REPLACE: 'REPLACE'>
RESPECT_NULLS =
<TokenType.RESPECT_NULLS: 'RESPECT_NULLS'>
RETURNING =
<TokenType.RETURNING: 'RETURNING'>
REFERENCES =
<TokenType.REFERENCES: 'REFERENCES'>
RIGHT =
<TokenType.RIGHT: 'RIGHT'>
RLIKE =
<TokenType.RLIKE: 'RLIKE'>
ROLLBACK =
<TokenType.ROLLBACK: 'ROLLBACK'>
ROLLUP =
<TokenType.ROLLUP: 'ROLLUP'>
ROW =
<TokenType.ROW: 'ROW'>
ROWS =
<TokenType.ROWS: 'ROWS'>
SEED =
<TokenType.SEED: 'SEED'>
SELECT =
<TokenType.SELECT: 'SELECT'>
SEMI =
<TokenType.SEMI: 'SEMI'>
SEPARATOR =
<TokenType.SEPARATOR: 'SEPARATOR'>
SERDE_PROPERTIES =
<TokenType.SERDE_PROPERTIES: 'SERDE_PROPERTIES'>
SET =
<TokenType.SET: 'SET'>
SETTINGS =
<TokenType.SETTINGS: 'SETTINGS'>
SHOW =
<TokenType.SHOW: 'SHOW'>
SIMILAR_TO =
<TokenType.SIMILAR_TO: 'SIMILAR_TO'>
SOME =
<TokenType.SOME: 'SOME'>
SORTKEY =
<TokenType.SORTKEY: 'SORTKEY'>
SORT_BY =
<TokenType.SORT_BY: 'SORT_BY'>
STRUCT =
<TokenType.STRUCT: 'STRUCT'>
TABLE_SAMPLE =
<TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>
TEMPORARY =
<TokenType.TEMPORARY: 'TEMPORARY'>
TOP =
<TokenType.TOP: 'TOP'>
THEN =
<TokenType.THEN: 'THEN'>
TRAILING =
<TokenType.TRAILING: 'TRAILING'>
TRUE =
<TokenType.TRUE: 'TRUE'>
UNBOUNDED =
<TokenType.UNBOUNDED: 'UNBOUNDED'>
UNCACHE =
<TokenType.UNCACHE: 'UNCACHE'>
UNION =
<TokenType.UNION: 'UNION'>
UNLOGGED =
<TokenType.UNLOGGED: 'UNLOGGED'>
UNNEST =
<TokenType.UNNEST: 'UNNEST'>
UNPIVOT =
<TokenType.UNPIVOT: 'UNPIVOT'>
UPDATE =
<TokenType.UPDATE: 'UPDATE'>
USE =
<TokenType.USE: 'USE'>
USING =
<TokenType.USING: 'USING'>
VALUES =
<TokenType.VALUES: 'VALUES'>
VIEW =
<TokenType.VIEW: 'VIEW'>
VOLATILE =
<TokenType.VOLATILE: 'VOLATILE'>
WHEN =
<TokenType.WHEN: 'WHEN'>
WHERE =
<TokenType.WHERE: 'WHERE'>
WINDOW =
<TokenType.WINDOW: 'WINDOW'>
WITH =
<TokenType.WITH: 'WITH'>
WITH_TIME_ZONE =
<TokenType.WITH_TIME_ZONE: 'WITH_TIME_ZONE'>
WITH_LOCAL_TIME_ZONE =
<TokenType.WITH_LOCAL_TIME_ZONE: 'WITH_LOCAL_TIME_ZONE'>
WITHIN_GROUP =
<TokenType.WITHIN_GROUP: 'WITHIN_GROUP'>
WITHOUT_TIME_ZONE =
<TokenType.WITHOUT_TIME_ZONE: 'WITHOUT_TIME_ZONE'>
UNIQUE =
<TokenType.UNIQUE: 'UNIQUE'>
Inherited Members
- enum.Enum
- name
- value
class
Token:
327class Token: 328 __slots__ = ("token_type", "text", "line", "col", "end", "comments") 329 330 @classmethod 331 def number(cls, number: int) -> Token: 332 """Returns a NUMBER token with `number` as its text.""" 333 return cls(TokenType.NUMBER, str(number)) 334 335 @classmethod 336 def string(cls, string: str) -> Token: 337 """Returns a STRING token with `string` as its text.""" 338 return cls(TokenType.STRING, string) 339 340 @classmethod 341 def identifier(cls, identifier: str) -> Token: 342 """Returns an IDENTIFIER token with `identifier` as its text.""" 343 return cls(TokenType.IDENTIFIER, identifier) 344 345 @classmethod 346 def var(cls, var: str) -> Token: 347 """Returns an VAR token with `var` as its text.""" 348 return cls(TokenType.VAR, var) 349 350 def __init__( 351 self, 352 token_type: TokenType, 353 text: str, 354 line: int = 1, 355 col: int = 1, 356 end: int = 0, 357 comments: t.List[str] = [], 358 ) -> None: 359 self.token_type = token_type 360 self.text = text 361 self.line = line 362 size = len(text) 363 self.col = col 364 self.end = end if end else size 365 self.comments = comments 366 367 @property 368 def start(self) -> int: 369 """Returns the start of the token.""" 370 return self.end - len(self.text) 371 372 def __repr__(self) -> str: 373 attributes = ", ".join(f"{k}: {getattr(self, k)}" for k in self.__slots__) 374 return f"<Token {attributes}>"
Token( token_type: sqlglot.tokens.TokenType, text: str, line: int = 1, col: int = 1, end: int = 0, comments: List[str] = [])
350 def __init__( 351 self, 352 token_type: TokenType, 353 text: str, 354 line: int = 1, 355 col: int = 1, 356 end: int = 0, 357 comments: t.List[str] = [], 358 ) -> None: 359 self.token_type = token_type 360 self.text = text 361 self.line = line 362 size = len(text) 363 self.col = col 364 self.end = end if end else size 365 self.comments = comments
330 @classmethod 331 def number(cls, number: int) -> Token: 332 """Returns a NUMBER token with `number` as its text.""" 333 return cls(TokenType.NUMBER, str(number))
Returns a NUMBER token with number
as its text.
335 @classmethod 336 def string(cls, string: str) -> Token: 337 """Returns a STRING token with `string` as its text.""" 338 return cls(TokenType.STRING, string)
Returns a STRING token with string
as its text.
340 @classmethod 341 def identifier(cls, identifier: str) -> Token: 342 """Returns an IDENTIFIER token with `identifier` as its text.""" 343 return cls(TokenType.IDENTIFIER, identifier)
Returns an IDENTIFIER token with identifier
as its text.
class
Tokenizer:
417class Tokenizer(metaclass=_Tokenizer): 418 SINGLE_TOKENS = { 419 "(": TokenType.L_PAREN, 420 ")": TokenType.R_PAREN, 421 "[": TokenType.L_BRACKET, 422 "]": TokenType.R_BRACKET, 423 "{": TokenType.L_BRACE, 424 "}": TokenType.R_BRACE, 425 "&": TokenType.AMP, 426 "^": TokenType.CARET, 427 ":": TokenType.COLON, 428 ",": TokenType.COMMA, 429 ".": TokenType.DOT, 430 "-": TokenType.DASH, 431 "=": TokenType.EQ, 432 ">": TokenType.GT, 433 "<": TokenType.LT, 434 "%": TokenType.MOD, 435 "!": TokenType.NOT, 436 "|": TokenType.PIPE, 437 "+": TokenType.PLUS, 438 ";": TokenType.SEMICOLON, 439 "/": TokenType.SLASH, 440 "\\": TokenType.BACKSLASH, 441 "*": TokenType.STAR, 442 "~": TokenType.TILDA, 443 "?": TokenType.PLACEHOLDER, 444 "@": TokenType.PARAMETER, 445 # used for breaking a var like x'y' but nothing else 446 # the token type doesn't matter 447 "'": TokenType.QUOTE, 448 "`": TokenType.IDENTIFIER, 449 '"': TokenType.IDENTIFIER, 450 "#": TokenType.HASH, 451 } 452 453 BIT_STRINGS: t.List[str | t.Tuple[str, str]] = [] 454 BYTE_STRINGS: t.List[str | t.Tuple[str, str]] = [] 455 HEX_STRINGS: t.List[str | t.Tuple[str, str]] = [] 456 IDENTIFIERS: t.List[str | t.Tuple[str, str]] = ['"'] 457 IDENTIFIER_ESCAPES = ['"'] 458 QUOTES: t.List[t.Tuple[str, str] | str] = ["'"] 459 STRING_ESCAPES = ["'"] 460 VAR_SINGLE_TOKENS: t.Set[str] = set() 461 462 _COMMENTS: t.Dict[str, str] = {} 463 _BIT_STRINGS: t.Dict[str, str] = {} 464 _BYTE_STRINGS: t.Dict[str, str] = {} 465 _HEX_STRINGS: t.Dict[str, str] = {} 466 _IDENTIFIERS: t.Dict[str, str] = {} 467 _IDENTIFIER_ESCAPES: t.Set[str] = set() 468 _QUOTES: t.Dict[str, str] = {} 469 _STRING_ESCAPES: t.Set[str] = set() 470 471 KEYWORDS: t.Dict[t.Optional[str], TokenType] = { 472 **{f"{{%{postfix}": TokenType.BLOCK_START for postfix in ("", "+", "-")}, 473 **{f"{prefix}%}}": TokenType.BLOCK_END for prefix in ("", "+", "-")}, 474 "{{+": TokenType.BLOCK_START, 475 "{{-": TokenType.BLOCK_START, 476 "+}}": TokenType.BLOCK_END, 477 "-}}": TokenType.BLOCK_END, 478 "/*+": TokenType.HINT, 479 "==": TokenType.EQ, 480 "::": TokenType.DCOLON, 481 "||": TokenType.DPIPE, 482 ">=": TokenType.GTE, 483 "<=": TokenType.LTE, 484 "<>": TokenType.NEQ, 485 "!=": TokenType.NEQ, 486 "<=>": TokenType.NULLSAFE_EQ, 487 "->": TokenType.ARROW, 488 "->>": TokenType.DARROW, 489 "=>": TokenType.FARROW, 490 "#>": TokenType.HASH_ARROW, 491 "#>>": TokenType.DHASH_ARROW, 492 "<->": TokenType.LR_ARROW, 493 "&&": TokenType.DAMP, 494 "ALL": TokenType.ALL, 495 "ALWAYS": TokenType.ALWAYS, 496 "AND": TokenType.AND, 497 "ANTI": TokenType.ANTI, 498 "ANY": TokenType.ANY, 499 "ASC": TokenType.ASC, 500 "AS": TokenType.ALIAS, 501 "AT TIME ZONE": TokenType.AT_TIME_ZONE, 502 "AUTOINCREMENT": TokenType.AUTO_INCREMENT, 503 "AUTO_INCREMENT": TokenType.AUTO_INCREMENT, 504 "BEGIN": TokenType.BEGIN, 505 "BETWEEN": TokenType.BETWEEN, 506 "BOTH": TokenType.BOTH, 507 "BUCKET": TokenType.BUCKET, 508 "BY DEFAULT": TokenType.BY_DEFAULT, 509 "CACHE": TokenType.CACHE, 510 "UNCACHE": TokenType.UNCACHE, 511 "CASE": TokenType.CASE, 512 "CASCADE": TokenType.CASCADE, 513 "CHARACTER SET": TokenType.CHARACTER_SET, 514 "CLUSTER BY": TokenType.CLUSTER_BY, 515 "COLLATE": TokenType.COLLATE, 516 "COLUMN": TokenType.COLUMN, 517 "COMMIT": TokenType.COMMIT, 518 "COMPOUND": TokenType.COMPOUND, 519 "CONSTRAINT": TokenType.CONSTRAINT, 520 "CREATE": TokenType.CREATE, 521 "CROSS": TokenType.CROSS, 522 "CUBE": TokenType.CUBE, 523 "CURRENT_DATE": TokenType.CURRENT_DATE, 524 "CURRENT ROW": TokenType.CURRENT_ROW, 525 "CURRENT_TIME": TokenType.CURRENT_TIME, 526 "CURRENT_TIMESTAMP": TokenType.CURRENT_TIMESTAMP, 527 "CURRENT_USER": TokenType.CURRENT_USER, 528 "DATABASE": TokenType.DATABASE, 529 "DEFAULT": TokenType.DEFAULT, 530 "DELETE": TokenType.DELETE, 531 "DESC": TokenType.DESC, 532 "DESCRIBE": TokenType.DESCRIBE, 533 "DISTINCT": TokenType.DISTINCT, 534 "DISTINCT FROM": TokenType.DISTINCT_FROM, 535 "DISTRIBUTE BY": TokenType.DISTRIBUTE_BY, 536 "DIV": TokenType.DIV, 537 "DROP": TokenType.DROP, 538 "ELSE": TokenType.ELSE, 539 "END": TokenType.END, 540 "ESCAPE": TokenType.ESCAPE, 541 "EXCEPT": TokenType.EXCEPT, 542 "EXECUTE": TokenType.EXECUTE, 543 "EXISTS": TokenType.EXISTS, 544 "FALSE": TokenType.FALSE, 545 "FETCH": TokenType.FETCH, 546 "FILTER": TokenType.FILTER, 547 "FIRST": TokenType.FIRST, 548 "FULL": TokenType.FULL, 549 "FUNCTION": TokenType.FUNCTION, 550 "FOLLOWING": TokenType.FOLLOWING, 551 "FOR": TokenType.FOR, 552 "FOREIGN KEY": TokenType.FOREIGN_KEY, 553 "FORMAT": TokenType.FORMAT, 554 "FROM": TokenType.FROM, 555 "GLOB": TokenType.GLOB, 556 "GROUP BY": TokenType.GROUP_BY, 557 "GROUPING SETS": TokenType.GROUPING_SETS, 558 "HAVING": TokenType.HAVING, 559 "IF": TokenType.IF, 560 "ILIKE": TokenType.ILIKE, 561 "IGNORE NULLS": TokenType.IGNORE_NULLS, 562 "IN": TokenType.IN, 563 "INDEX": TokenType.INDEX, 564 "INET": TokenType.INET, 565 "INNER": TokenType.INNER, 566 "INSERT": TokenType.INSERT, 567 "INTERVAL": TokenType.INTERVAL, 568 "INTERSECT": TokenType.INTERSECT, 569 "INTO": TokenType.INTO, 570 "IS": TokenType.IS, 571 "ISNULL": TokenType.ISNULL, 572 "JOIN": TokenType.JOIN, 573 "KEEP": TokenType.KEEP, 574 "LATERAL": TokenType.LATERAL, 575 "LAZY": TokenType.LAZY, 576 "LEADING": TokenType.LEADING, 577 "LEFT": TokenType.LEFT, 578 "LIKE": TokenType.LIKE, 579 "LIMIT": TokenType.LIMIT, 580 "LOAD DATA": TokenType.LOAD_DATA, 581 "LOCAL": TokenType.LOCAL, 582 "MATERIALIZED": TokenType.MATERIALIZED, 583 "MERGE": TokenType.MERGE, 584 "NATURAL": TokenType.NATURAL, 585 "NEXT": TokenType.NEXT, 586 "NEXT VALUE FOR": TokenType.NEXT_VALUE_FOR, 587 "NO ACTION": TokenType.NO_ACTION, 588 "NOT": TokenType.NOT, 589 "NOTNULL": TokenType.NOTNULL, 590 "NULL": TokenType.NULL, 591 "NULLS FIRST": TokenType.NULLS_FIRST, 592 "NULLS LAST": TokenType.NULLS_LAST, 593 "OBJECT": TokenType.OBJECT, 594 "OFFSET": TokenType.OFFSET, 595 "ON": TokenType.ON, 596 "ONLY": TokenType.ONLY, 597 "OPTIONS": TokenType.OPTIONS, 598 "OR": TokenType.OR, 599 "ORDER BY": TokenType.ORDER_BY, 600 "ORDINALITY": TokenType.ORDINALITY, 601 "OUTER": TokenType.OUTER, 602 "OUT OF": TokenType.OUT_OF, 603 "OVER": TokenType.OVER, 604 "OVERLAPS": TokenType.OVERLAPS, 605 "OVERWRITE": TokenType.OVERWRITE, 606 "PARTITION": TokenType.PARTITION, 607 "PARTITION BY": TokenType.PARTITION_BY, 608 "PARTITIONED BY": TokenType.PARTITION_BY, 609 "PARTITIONED_BY": TokenType.PARTITION_BY, 610 "PERCENT": TokenType.PERCENT, 611 "PIVOT": TokenType.PIVOT, 612 "PRAGMA": TokenType.PRAGMA, 613 "PRECEDING": TokenType.PRECEDING, 614 "PRIMARY KEY": TokenType.PRIMARY_KEY, 615 "PROCEDURE": TokenType.PROCEDURE, 616 "QUALIFY": TokenType.QUALIFY, 617 "RANGE": TokenType.RANGE, 618 "RECURSIVE": TokenType.RECURSIVE, 619 "REGEXP": TokenType.RLIKE, 620 "REPLACE": TokenType.REPLACE, 621 "RESPECT NULLS": TokenType.RESPECT_NULLS, 622 "REFERENCES": TokenType.REFERENCES, 623 "RIGHT": TokenType.RIGHT, 624 "RLIKE": TokenType.RLIKE, 625 "ROLLBACK": TokenType.ROLLBACK, 626 "ROLLUP": TokenType.ROLLUP, 627 "ROW": TokenType.ROW, 628 "ROWS": TokenType.ROWS, 629 "SCHEMA": TokenType.SCHEMA, 630 "SEED": TokenType.SEED, 631 "SELECT": TokenType.SELECT, 632 "SEMI": TokenType.SEMI, 633 "SET": TokenType.SET, 634 "SETTINGS": TokenType.SETTINGS, 635 "SHOW": TokenType.SHOW, 636 "SIMILAR TO": TokenType.SIMILAR_TO, 637 "SOME": TokenType.SOME, 638 "SORTKEY": TokenType.SORTKEY, 639 "SORT BY": TokenType.SORT_BY, 640 "TABLE": TokenType.TABLE, 641 "TABLESAMPLE": TokenType.TABLE_SAMPLE, 642 "TEMP": TokenType.TEMPORARY, 643 "TEMPORARY": TokenType.TEMPORARY, 644 "THEN": TokenType.THEN, 645 "TRUE": TokenType.TRUE, 646 "TRAILING": TokenType.TRAILING, 647 "UNBOUNDED": TokenType.UNBOUNDED, 648 "UNION": TokenType.UNION, 649 "UNLOGGED": TokenType.UNLOGGED, 650 "UNNEST": TokenType.UNNEST, 651 "UNPIVOT": TokenType.UNPIVOT, 652 "UPDATE": TokenType.UPDATE, 653 "USE": TokenType.USE, 654 "USING": TokenType.USING, 655 "UUID": TokenType.UUID, 656 "VALUES": TokenType.VALUES, 657 "VIEW": TokenType.VIEW, 658 "VOLATILE": TokenType.VOLATILE, 659 "WHEN": TokenType.WHEN, 660 "WHERE": TokenType.WHERE, 661 "WINDOW": TokenType.WINDOW, 662 "WITH": TokenType.WITH, 663 "WITH TIME ZONE": TokenType.WITH_TIME_ZONE, 664 "WITH LOCAL TIME ZONE": TokenType.WITH_LOCAL_TIME_ZONE, 665 "WITHIN GROUP": TokenType.WITHIN_GROUP, 666 "WITHOUT TIME ZONE": TokenType.WITHOUT_TIME_ZONE, 667 "APPLY": TokenType.APPLY, 668 "ARRAY": TokenType.ARRAY, 669 "BIT": TokenType.BIT, 670 "BOOL": TokenType.BOOLEAN, 671 "BOOLEAN": TokenType.BOOLEAN, 672 "BYTE": TokenType.TINYINT, 673 "TINYINT": TokenType.TINYINT, 674 "SHORT": TokenType.SMALLINT, 675 "SMALLINT": TokenType.SMALLINT, 676 "INT2": TokenType.SMALLINT, 677 "INTEGER": TokenType.INT, 678 "INT": TokenType.INT, 679 "INT4": TokenType.INT, 680 "LONG": TokenType.BIGINT, 681 "BIGINT": TokenType.BIGINT, 682 "INT8": TokenType.BIGINT, 683 "DEC": TokenType.DECIMAL, 684 "DECIMAL": TokenType.DECIMAL, 685 "BIGDECIMAL": TokenType.BIGDECIMAL, 686 "BIGNUMERIC": TokenType.BIGDECIMAL, 687 "MAP": TokenType.MAP, 688 "NULLABLE": TokenType.NULLABLE, 689 "NUMBER": TokenType.DECIMAL, 690 "NUMERIC": TokenType.DECIMAL, 691 "FIXED": TokenType.DECIMAL, 692 "REAL": TokenType.FLOAT, 693 "FLOAT": TokenType.FLOAT, 694 "FLOAT4": TokenType.FLOAT, 695 "FLOAT8": TokenType.DOUBLE, 696 "DOUBLE": TokenType.DOUBLE, 697 "DOUBLE PRECISION": TokenType.DOUBLE, 698 "JSON": TokenType.JSON, 699 "CHAR": TokenType.CHAR, 700 "CHARACTER": TokenType.CHAR, 701 "NCHAR": TokenType.NCHAR, 702 "VARCHAR": TokenType.VARCHAR, 703 "VARCHAR2": TokenType.VARCHAR, 704 "NVARCHAR": TokenType.NVARCHAR, 705 "NVARCHAR2": TokenType.NVARCHAR, 706 "STR": TokenType.TEXT, 707 "STRING": TokenType.TEXT, 708 "TEXT": TokenType.TEXT, 709 "CLOB": TokenType.TEXT, 710 "LONGVARCHAR": TokenType.TEXT, 711 "BINARY": TokenType.BINARY, 712 "BLOB": TokenType.VARBINARY, 713 "BYTEA": TokenType.VARBINARY, 714 "VARBINARY": TokenType.VARBINARY, 715 "TIME": TokenType.TIME, 716 "TIMESTAMP": TokenType.TIMESTAMP, 717 "TIMESTAMPTZ": TokenType.TIMESTAMPTZ, 718 "TIMESTAMPLTZ": TokenType.TIMESTAMPLTZ, 719 "DATE": TokenType.DATE, 720 "DATETIME": TokenType.DATETIME, 721 "UNIQUE": TokenType.UNIQUE, 722 "STRUCT": TokenType.STRUCT, 723 "VARIANT": TokenType.VARIANT, 724 "ALTER": TokenType.ALTER, 725 "ALTER AGGREGATE": TokenType.COMMAND, 726 "ALTER DEFAULT": TokenType.COMMAND, 727 "ALTER DOMAIN": TokenType.COMMAND, 728 "ALTER ROLE": TokenType.COMMAND, 729 "ALTER RULE": TokenType.COMMAND, 730 "ALTER SEQUENCE": TokenType.COMMAND, 731 "ALTER TYPE": TokenType.COMMAND, 732 "ALTER USER": TokenType.COMMAND, 733 "ALTER VIEW": TokenType.COMMAND, 734 "ANALYZE": TokenType.COMMAND, 735 "CALL": TokenType.COMMAND, 736 "COMMENT": TokenType.COMMENT, 737 "COPY": TokenType.COMMAND, 738 "EXPLAIN": TokenType.COMMAND, 739 "GRANT": TokenType.COMMAND, 740 "OPTIMIZE": TokenType.COMMAND, 741 "PREPARE": TokenType.COMMAND, 742 "TRUNCATE": TokenType.COMMAND, 743 "VACUUM": TokenType.COMMAND, 744 } 745 746 WHITE_SPACE: t.Dict[t.Optional[str], TokenType] = { 747 " ": TokenType.SPACE, 748 "\t": TokenType.SPACE, 749 "\n": TokenType.BREAK, 750 "\r": TokenType.BREAK, 751 "\r\n": TokenType.BREAK, 752 } 753 754 COMMANDS = { 755 TokenType.COMMAND, 756 TokenType.EXECUTE, 757 TokenType.FETCH, 758 TokenType.SHOW, 759 } 760 761 COMMAND_PREFIX_TOKENS = {TokenType.SEMICOLON, TokenType.BEGIN} 762 763 # handle numeric literals like in hive (3L = BIGINT) 764 NUMERIC_LITERALS: t.Dict[str, str] = {} 765 ENCODE: t.Optional[str] = None 766 767 COMMENTS = ["--", ("/*", "*/"), ("{#", "#}")] 768 KEYWORD_TRIE: t.Dict = {} # autofilled 769 770 IDENTIFIER_CAN_START_WITH_DIGIT = False 771 772 __slots__ = ( 773 "sql", 774 "size", 775 "tokens", 776 "_start", 777 "_current", 778 "_line", 779 "_col", 780 "_comments", 781 "_char", 782 "_end", 783 "_peek", 784 "_prev_token_line", 785 ) 786 787 def __init__(self) -> None: 788 self.reset() 789 790 def reset(self) -> None: 791 self.sql = "" 792 self.size = 0 793 self.tokens: t.List[Token] = [] 794 self._start = 0 795 self._current = 0 796 self._line = 1 797 self._col = 1 798 self._comments: t.List[str] = [] 799 800 self._char = "" 801 self._end = False 802 self._peek = "" 803 self._prev_token_line = -1 804 805 def tokenize(self, sql: str) -> t.List[Token]: 806 """Returns a list of tokens corresponding to the SQL string `sql`.""" 807 self.reset() 808 self.sql = sql 809 self.size = len(sql) 810 try: 811 self._scan() 812 except Exception as e: 813 start = self._current - 50 814 end = self._current + 50 815 start = start if start > 0 else 0 816 end = end if end < self.size else self.size - 1 817 context = self.sql[start:end] 818 raise ValueError(f"Error tokenizing '{context}'") from e 819 820 return self.tokens 821 822 def _scan(self, until: t.Optional[t.Callable] = None) -> None: 823 while self.size and not self._end: 824 self._start = self._current 825 self._advance() 826 827 if self._char is None: 828 break 829 830 if self._char not in self.WHITE_SPACE: 831 if self._char.isdigit(): 832 self._scan_number() 833 elif self._char in self._IDENTIFIERS: 834 self._scan_identifier(self._IDENTIFIERS[self._char]) 835 else: 836 self._scan_keywords() 837 838 if until and until(): 839 break 840 841 if self.tokens: 842 self.tokens[-1].comments.extend(self._comments) 843 844 def _chars(self, size: int) -> str: 845 if size == 1: 846 return self._char 847 start = self._current - 1 848 end = start + size 849 if end <= self.size: 850 return self.sql[start:end] 851 return "" 852 853 def _advance(self, i: int = 1, alnum: bool = False) -> None: 854 if self.WHITE_SPACE.get(self._char) is TokenType.BREAK: 855 self._col = 1 856 self._line += 1 857 else: 858 self._col += i 859 860 self._current += i 861 self._end = self._current >= self.size 862 self._char = self.sql[self._current - 1] 863 self._peek = "" if self._end else self.sql[self._current] 864 865 if alnum and self._char.isalnum(): 866 _col = self._col 867 _current = self._current 868 _end = self._end 869 _peek = self._peek 870 871 while _peek.isalnum(): 872 _col += 1 873 _current += 1 874 _end = _current >= self.size 875 _peek = "" if _end else self.sql[_current] 876 877 self._col = _col 878 self._current = _current 879 self._end = _end 880 self._peek = _peek 881 self._char = self.sql[_current - 1] 882 883 @property 884 def _text(self) -> str: 885 return self.sql[self._start : self._current] 886 887 def _add(self, token_type: TokenType, text: t.Optional[str] = None) -> None: 888 self._prev_token_line = self._line 889 self.tokens.append( 890 Token( 891 token_type, 892 self._text if text is None else text, 893 self._line, 894 self._col, 895 self._current, 896 self._comments, 897 ) 898 ) 899 self._comments = [] 900 901 # If we have either a semicolon or a begin token before the command's token, we'll parse 902 # whatever follows the command's token as a string 903 if ( 904 token_type in self.COMMANDS 905 and self._peek != ";" 906 and (len(self.tokens) == 1 or self.tokens[-2].token_type in self.COMMAND_PREFIX_TOKENS) 907 ): 908 start = self._current 909 tokens = len(self.tokens) 910 self._scan(lambda: self._peek == ";") 911 self.tokens = self.tokens[:tokens] 912 text = self.sql[start : self._current].strip() 913 if text: 914 self._add(TokenType.STRING, text) 915 916 def _scan_keywords(self) -> None: 917 size = 0 918 word = None 919 chars = self._text 920 char = chars 921 prev_space = False 922 skip = False 923 trie = self.KEYWORD_TRIE 924 single_token = char in self.SINGLE_TOKENS 925 926 while chars: 927 if skip: 928 result = 1 929 else: 930 result, trie = in_trie(trie, char.upper()) 931 932 if result == 0: 933 break 934 if result == 2: 935 word = chars 936 937 size += 1 938 end = self._current - 1 + size 939 940 if end < self.size: 941 char = self.sql[end] 942 single_token = single_token or char in self.SINGLE_TOKENS 943 is_space = char in self.WHITE_SPACE 944 945 if not is_space or not prev_space: 946 if is_space: 947 char = " " 948 chars += char 949 prev_space = is_space 950 skip = False 951 else: 952 skip = True 953 else: 954 char = "" 955 chars = " " 956 957 word = None if not single_token and chars[-1] not in self.WHITE_SPACE else word 958 959 if not word: 960 if self._char in self.SINGLE_TOKENS: 961 self._add(self.SINGLE_TOKENS[self._char], text=self._char) 962 return 963 self._scan_var() 964 return 965 966 if self._scan_string(word): 967 return 968 if self._scan_formatted_string(word): 969 return 970 if self._scan_comment(word): 971 return 972 973 self._advance(size - 1) 974 word = word.upper() 975 self._add(self.KEYWORDS[word], text=word) 976 977 def _scan_comment(self, comment_start: str) -> bool: 978 if comment_start not in self._COMMENTS: 979 return False 980 981 comment_start_line = self._line 982 comment_start_size = len(comment_start) 983 comment_end = self._COMMENTS[comment_start] 984 985 if comment_end: 986 # Skip the comment's start delimiter 987 self._advance(comment_start_size) 988 989 comment_end_size = len(comment_end) 990 while not self._end and self._chars(comment_end_size) != comment_end: 991 self._advance(alnum=True) 992 993 self._comments.append(self._text[comment_start_size : -comment_end_size + 1]) 994 self._advance(comment_end_size - 1) 995 else: 996 while not self._end and not self.WHITE_SPACE.get(self._peek) is TokenType.BREAK: 997 self._advance(alnum=True) 998 self._comments.append(self._text[comment_start_size:]) 999 1000 # Leading comment is attached to the succeeding token, whilst trailing comment to the preceding. 1001 # Multiple consecutive comments are preserved by appending them to the current comments list. 1002 if comment_start_line == self._prev_token_line: 1003 self.tokens[-1].comments.extend(self._comments) 1004 self._comments = [] 1005 self._prev_token_line = self._line 1006 1007 return True 1008 1009 def _scan_number(self) -> None: 1010 if self._char == "0": 1011 peek = self._peek.upper() 1012 if peek == "B": 1013 return self._scan_bits() if self._BIT_STRINGS else self._add(TokenType.NUMBER) 1014 elif peek == "X": 1015 return self._scan_hex() if self._HEX_STRINGS else self._add(TokenType.NUMBER) 1016 1017 decimal = False 1018 scientific = 0 1019 1020 while True: 1021 if self._peek.isdigit(): 1022 self._advance() 1023 elif self._peek == "." and not decimal: 1024 decimal = True 1025 self._advance() 1026 elif self._peek in ("-", "+") and scientific == 1: 1027 scientific += 1 1028 self._advance() 1029 elif self._peek.upper() == "E" and not scientific: 1030 scientific += 1 1031 self._advance() 1032 elif self._peek.isidentifier(): 1033 number_text = self._text 1034 literal = "" 1035 1036 while self._peek.strip() and self._peek not in self.SINGLE_TOKENS: 1037 literal += self._peek.upper() 1038 self._advance() 1039 1040 token_type = self.KEYWORDS.get(self.NUMERIC_LITERALS.get(literal)) 1041 1042 if token_type: 1043 self._add(TokenType.NUMBER, number_text) 1044 self._add(TokenType.DCOLON, "::") 1045 return self._add(token_type, literal) 1046 elif self.IDENTIFIER_CAN_START_WITH_DIGIT: 1047 return self._add(TokenType.VAR) 1048 1049 self._add(TokenType.NUMBER, number_text) 1050 return self._advance(-len(literal)) 1051 else: 1052 return self._add(TokenType.NUMBER) 1053 1054 def _scan_bits(self) -> None: 1055 self._advance() 1056 value = self._extract_value() 1057 try: 1058 # If `value` can't be converted to a binary, fallback to tokenizing it as an identifier 1059 int(value, 2) 1060 self._add(TokenType.BIT_STRING, value[2:]) # Drop the 0b 1061 except ValueError: 1062 self._add(TokenType.IDENTIFIER) 1063 1064 def _scan_hex(self) -> None: 1065 self._advance() 1066 value = self._extract_value() 1067 try: 1068 # If `value` can't be converted to a hex, fallback to tokenizing it as an identifier 1069 int(value, 16) 1070 self._add(TokenType.HEX_STRING, value[2:]) # Drop the 0x 1071 except ValueError: 1072 self._add(TokenType.IDENTIFIER) 1073 1074 def _extract_value(self) -> str: 1075 while True: 1076 char = self._peek.strip() 1077 if char and char not in self.SINGLE_TOKENS: 1078 self._advance(alnum=True) 1079 else: 1080 break 1081 1082 return self._text 1083 1084 def _scan_string(self, quote: str) -> bool: 1085 quote_end = self._QUOTES.get(quote) 1086 if quote_end is None: 1087 return False 1088 1089 self._advance(len(quote)) 1090 text = self._extract_string(quote_end) 1091 text = text.encode(self.ENCODE).decode(self.ENCODE) if self.ENCODE else text 1092 self._add(TokenType.NATIONAL if quote[0].upper() == "N" else TokenType.STRING, text) 1093 return True 1094 1095 # X'1234', b'0110', E'\\\\\' etc. 1096 def _scan_formatted_string(self, string_start: str) -> bool: 1097 if string_start in self._HEX_STRINGS: 1098 delimiters = self._HEX_STRINGS 1099 token_type = TokenType.HEX_STRING 1100 base = 16 1101 elif string_start in self._BIT_STRINGS: 1102 delimiters = self._BIT_STRINGS 1103 token_type = TokenType.BIT_STRING 1104 base = 2 1105 elif string_start in self._BYTE_STRINGS: 1106 delimiters = self._BYTE_STRINGS 1107 token_type = TokenType.BYTE_STRING 1108 base = None 1109 else: 1110 return False 1111 1112 self._advance(len(string_start)) 1113 string_end = delimiters[string_start] 1114 text = self._extract_string(string_end) 1115 1116 if base: 1117 try: 1118 int(text, base) 1119 except: 1120 raise RuntimeError( 1121 f"Numeric string contains invalid characters from {self._line}:{self._start}" 1122 ) 1123 1124 self._add(token_type, text) 1125 return True 1126 1127 def _scan_identifier(self, identifier_end: str) -> None: 1128 self._advance() 1129 text = self._extract_string(identifier_end, self._IDENTIFIER_ESCAPES) 1130 self._add(TokenType.IDENTIFIER, text) 1131 1132 def _scan_var(self) -> None: 1133 while True: 1134 char = self._peek.strip() 1135 if char and (char in self.VAR_SINGLE_TOKENS or char not in self.SINGLE_TOKENS): 1136 self._advance(alnum=True) 1137 else: 1138 break 1139 1140 self._add( 1141 TokenType.VAR 1142 if self.tokens and self.tokens[-1].token_type == TokenType.PARAMETER 1143 else self.KEYWORDS.get(self._text.upper(), TokenType.VAR) 1144 ) 1145 1146 def _extract_string(self, delimiter: str, escapes=None) -> str: 1147 text = "" 1148 delim_size = len(delimiter) 1149 escapes = self._STRING_ESCAPES if escapes is None else escapes 1150 1151 while True: 1152 if self._char in escapes and (self._peek == delimiter or self._peek in escapes): 1153 if self._peek == delimiter: 1154 text += self._peek 1155 else: 1156 text += self._char + self._peek 1157 1158 if self._current + 1 < self.size: 1159 self._advance(2) 1160 else: 1161 raise RuntimeError(f"Missing {delimiter} from {self._line}:{self._current}") 1162 else: 1163 if self._chars(delim_size) == delimiter: 1164 if delim_size > 1: 1165 self._advance(delim_size - 1) 1166 break 1167 1168 if self._end: 1169 raise RuntimeError(f"Missing {delimiter} from {self._line}:{self._start}") 1170 1171 current = self._current - 1 1172 self._advance(alnum=True) 1173 text += self.sql[current : self._current - 1] 1174 1175 return text
def
reset(self) -> None:
790 def reset(self) -> None: 791 self.sql = "" 792 self.size = 0 793 self.tokens: t.List[Token] = [] 794 self._start = 0 795 self._current = 0 796 self._line = 1 797 self._col = 1 798 self._comments: t.List[str] = [] 799 800 self._char = "" 801 self._end = False 802 self._peek = "" 803 self._prev_token_line = -1
805 def tokenize(self, sql: str) -> t.List[Token]: 806 """Returns a list of tokens corresponding to the SQL string `sql`.""" 807 self.reset() 808 self.sql = sql 809 self.size = len(sql) 810 try: 811 self._scan() 812 except Exception as e: 813 start = self._current - 50 814 end = self._current + 50 815 start = start if start > 0 else 0 816 end = end if end < self.size else self.size - 1 817 context = self.sql[start:end] 818 raise ValueError(f"Error tokenizing '{context}'") from e 819 820 return self.tokens
Returns a list of tokens corresponding to the SQL string sql
.