sqlglot.dialects.redshift
1from __future__ import annotations 2 3import typing as t 4 5from sqlglot import exp, transforms 6from sqlglot.dialects.postgres import Postgres 7from sqlglot.helper import seq_get 8from sqlglot.tokens import TokenType 9 10 11def _json_sql(self, e) -> str: 12 return f'{self.sql(e, "this")}."{e.expression.name}"' 13 14 15class Redshift(Postgres): 16 time_format = "'YYYY-MM-DD HH:MI:SS'" 17 time_mapping = { 18 **Postgres.time_mapping, # type: ignore 19 "MON": "%b", 20 "HH": "%H", 21 } 22 23 class Parser(Postgres.Parser): 24 FUNCTIONS = { 25 **Postgres.Parser.FUNCTIONS, # type: ignore 26 "DATEADD": lambda args: exp.DateAdd( 27 this=seq_get(args, 2), 28 expression=seq_get(args, 1), 29 unit=seq_get(args, 0), 30 ), 31 "DATEDIFF": lambda args: exp.DateDiff( 32 this=seq_get(args, 2), 33 expression=seq_get(args, 1), 34 unit=seq_get(args, 0), 35 ), 36 "NVL": exp.Coalesce.from_arg_list, 37 } 38 39 CONVERT_TYPE_FIRST = True 40 41 def _parse_types(self, check_func: bool = False) -> t.Optional[exp.Expression]: 42 this = super()._parse_types(check_func=check_func) 43 44 if ( 45 isinstance(this, exp.DataType) 46 and this.this == exp.DataType.Type.VARCHAR 47 and this.expressions 48 and this.expressions[0].this == exp.column("MAX") 49 ): 50 this.set("expressions", [exp.Var(this="MAX")]) 51 52 return this 53 54 class Tokenizer(Postgres.Tokenizer): 55 BIT_STRINGS = [] 56 HEX_STRINGS = [] 57 STRING_ESCAPES = ["\\"] 58 59 KEYWORDS = { 60 **Postgres.Tokenizer.KEYWORDS, # type: ignore 61 "GEOMETRY": TokenType.GEOMETRY, 62 "GEOGRAPHY": TokenType.GEOGRAPHY, 63 "HLLSKETCH": TokenType.HLLSKETCH, 64 "SUPER": TokenType.SUPER, 65 "SYSDATE": TokenType.CURRENT_TIMESTAMP, 66 "TIME": TokenType.TIMESTAMP, 67 "TIMETZ": TokenType.TIMESTAMPTZ, 68 "TOP": TokenType.TOP, 69 "UNLOAD": TokenType.COMMAND, 70 "VARBYTE": TokenType.VARBINARY, 71 } 72 73 # Redshift allows # to appear as a table identifier prefix 74 SINGLE_TOKENS = Postgres.Tokenizer.SINGLE_TOKENS.copy() 75 SINGLE_TOKENS.pop("#") 76 77 class Generator(Postgres.Generator): 78 LOCKING_READS_SUPPORTED = False 79 SINGLE_STRING_INTERVAL = True 80 RENAME_TABLE_WITH_DB = False 81 82 TYPE_MAPPING = { 83 **Postgres.Generator.TYPE_MAPPING, # type: ignore 84 exp.DataType.Type.BINARY: "VARBYTE", 85 exp.DataType.Type.VARBINARY: "VARBYTE", 86 exp.DataType.Type.INT: "INTEGER", 87 } 88 89 PROPERTIES_LOCATION = { 90 **Postgres.Generator.PROPERTIES_LOCATION, # type: ignore 91 exp.LikeProperty: exp.Properties.Location.POST_WITH, 92 } 93 94 TRANSFORMS = { 95 **Postgres.Generator.TRANSFORMS, # type: ignore 96 exp.CurrentTimestamp: lambda self, e: "SYSDATE", 97 exp.DateAdd: lambda self, e: self.func( 98 "DATEADD", exp.var(e.text("unit") or "day"), e.expression, e.this 99 ), 100 exp.DateDiff: lambda self, e: self.func( 101 "DATEDIFF", exp.var(e.text("unit") or "day"), e.expression, e.this 102 ), 103 exp.DistKeyProperty: lambda self, e: f"DISTKEY({e.name})", 104 exp.DistStyleProperty: lambda self, e: self.naked_property(e), 105 exp.JSONExtract: _json_sql, 106 exp.JSONExtractScalar: _json_sql, 107 exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]), 108 exp.SortKeyProperty: lambda self, e: f"{'COMPOUND ' if e.args['compound'] else ''}SORTKEY({self.format_args(*e.this)})", 109 } 110 111 # Redshift uses the POW | POWER (expr1, expr2) syntax instead of expr1 ^ expr2 (postgres) 112 TRANSFORMS.pop(exp.Pow) 113 114 RESERVED_KEYWORDS = {*Postgres.Generator.RESERVED_KEYWORDS, "snapshot"} 115 116 def values_sql(self, expression: exp.Values) -> str: 117 """ 118 Converts `VALUES...` expression into a series of unions. 119 120 Note: If you have a lot of unions then this will result in a large number of recursive statements to 121 evaluate the expression. You may need to increase `sys.setrecursionlimit` to run and it can also be 122 very slow. 123 """ 124 if not isinstance(expression.unnest().parent, exp.From): 125 return super().values_sql(expression) 126 rows = [tuple_exp.expressions for tuple_exp in expression.expressions] 127 selects = [] 128 for i, row in enumerate(rows): 129 if i == 0 and expression.alias: 130 row = [ 131 exp.alias_(value, column_name) 132 for value, column_name in zip(row, expression.args["alias"].args["columns"]) 133 ] 134 selects.append(exp.Select(expressions=row)) 135 subquery_expression = selects[0] 136 if len(selects) > 1: 137 for select in selects[1:]: 138 subquery_expression = exp.union(subquery_expression, select, distinct=False) 139 return self.subquery_sql(subquery_expression.subquery(expression.alias)) 140 141 def with_properties(self, properties: exp.Properties) -> str: 142 """Redshift doesn't have `WITH` as part of their with_properties so we remove it""" 143 return self.properties(properties, prefix=" ", suffix="") 144 145 def datatype_sql(self, expression: exp.DataType) -> str: 146 """ 147 Redshift converts the `TEXT` data type to `VARCHAR(255)` by default when people more generally mean 148 VARCHAR of max length which is `VARCHAR(max)` in Redshift. Therefore if we get a `TEXT` data type 149 without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert 150 `TEXT` to `VARCHAR`. 151 """ 152 if expression.this == exp.DataType.Type.TEXT: 153 expression = expression.copy() 154 expression.set("this", exp.DataType.Type.VARCHAR) 155 precision = expression.args.get("expressions") 156 if not precision: 157 expression.append("expressions", exp.Var(this="MAX")) 158 return super().datatype_sql(expression)
16class Redshift(Postgres): 17 time_format = "'YYYY-MM-DD HH:MI:SS'" 18 time_mapping = { 19 **Postgres.time_mapping, # type: ignore 20 "MON": "%b", 21 "HH": "%H", 22 } 23 24 class Parser(Postgres.Parser): 25 FUNCTIONS = { 26 **Postgres.Parser.FUNCTIONS, # type: ignore 27 "DATEADD": lambda args: exp.DateAdd( 28 this=seq_get(args, 2), 29 expression=seq_get(args, 1), 30 unit=seq_get(args, 0), 31 ), 32 "DATEDIFF": lambda args: exp.DateDiff( 33 this=seq_get(args, 2), 34 expression=seq_get(args, 1), 35 unit=seq_get(args, 0), 36 ), 37 "NVL": exp.Coalesce.from_arg_list, 38 } 39 40 CONVERT_TYPE_FIRST = True 41 42 def _parse_types(self, check_func: bool = False) -> t.Optional[exp.Expression]: 43 this = super()._parse_types(check_func=check_func) 44 45 if ( 46 isinstance(this, exp.DataType) 47 and this.this == exp.DataType.Type.VARCHAR 48 and this.expressions 49 and this.expressions[0].this == exp.column("MAX") 50 ): 51 this.set("expressions", [exp.Var(this="MAX")]) 52 53 return this 54 55 class Tokenizer(Postgres.Tokenizer): 56 BIT_STRINGS = [] 57 HEX_STRINGS = [] 58 STRING_ESCAPES = ["\\"] 59 60 KEYWORDS = { 61 **Postgres.Tokenizer.KEYWORDS, # type: ignore 62 "GEOMETRY": TokenType.GEOMETRY, 63 "GEOGRAPHY": TokenType.GEOGRAPHY, 64 "HLLSKETCH": TokenType.HLLSKETCH, 65 "SUPER": TokenType.SUPER, 66 "SYSDATE": TokenType.CURRENT_TIMESTAMP, 67 "TIME": TokenType.TIMESTAMP, 68 "TIMETZ": TokenType.TIMESTAMPTZ, 69 "TOP": TokenType.TOP, 70 "UNLOAD": TokenType.COMMAND, 71 "VARBYTE": TokenType.VARBINARY, 72 } 73 74 # Redshift allows # to appear as a table identifier prefix 75 SINGLE_TOKENS = Postgres.Tokenizer.SINGLE_TOKENS.copy() 76 SINGLE_TOKENS.pop("#") 77 78 class Generator(Postgres.Generator): 79 LOCKING_READS_SUPPORTED = False 80 SINGLE_STRING_INTERVAL = True 81 RENAME_TABLE_WITH_DB = False 82 83 TYPE_MAPPING = { 84 **Postgres.Generator.TYPE_MAPPING, # type: ignore 85 exp.DataType.Type.BINARY: "VARBYTE", 86 exp.DataType.Type.VARBINARY: "VARBYTE", 87 exp.DataType.Type.INT: "INTEGER", 88 } 89 90 PROPERTIES_LOCATION = { 91 **Postgres.Generator.PROPERTIES_LOCATION, # type: ignore 92 exp.LikeProperty: exp.Properties.Location.POST_WITH, 93 } 94 95 TRANSFORMS = { 96 **Postgres.Generator.TRANSFORMS, # type: ignore 97 exp.CurrentTimestamp: lambda self, e: "SYSDATE", 98 exp.DateAdd: lambda self, e: self.func( 99 "DATEADD", exp.var(e.text("unit") or "day"), e.expression, e.this 100 ), 101 exp.DateDiff: lambda self, e: self.func( 102 "DATEDIFF", exp.var(e.text("unit") or "day"), e.expression, e.this 103 ), 104 exp.DistKeyProperty: lambda self, e: f"DISTKEY({e.name})", 105 exp.DistStyleProperty: lambda self, e: self.naked_property(e), 106 exp.JSONExtract: _json_sql, 107 exp.JSONExtractScalar: _json_sql, 108 exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]), 109 exp.SortKeyProperty: lambda self, e: f"{'COMPOUND ' if e.args['compound'] else ''}SORTKEY({self.format_args(*e.this)})", 110 } 111 112 # Redshift uses the POW | POWER (expr1, expr2) syntax instead of expr1 ^ expr2 (postgres) 113 TRANSFORMS.pop(exp.Pow) 114 115 RESERVED_KEYWORDS = {*Postgres.Generator.RESERVED_KEYWORDS, "snapshot"} 116 117 def values_sql(self, expression: exp.Values) -> str: 118 """ 119 Converts `VALUES...` expression into a series of unions. 120 121 Note: If you have a lot of unions then this will result in a large number of recursive statements to 122 evaluate the expression. You may need to increase `sys.setrecursionlimit` to run and it can also be 123 very slow. 124 """ 125 if not isinstance(expression.unnest().parent, exp.From): 126 return super().values_sql(expression) 127 rows = [tuple_exp.expressions for tuple_exp in expression.expressions] 128 selects = [] 129 for i, row in enumerate(rows): 130 if i == 0 and expression.alias: 131 row = [ 132 exp.alias_(value, column_name) 133 for value, column_name in zip(row, expression.args["alias"].args["columns"]) 134 ] 135 selects.append(exp.Select(expressions=row)) 136 subquery_expression = selects[0] 137 if len(selects) > 1: 138 for select in selects[1:]: 139 subquery_expression = exp.union(subquery_expression, select, distinct=False) 140 return self.subquery_sql(subquery_expression.subquery(expression.alias)) 141 142 def with_properties(self, properties: exp.Properties) -> str: 143 """Redshift doesn't have `WITH` as part of their with_properties so we remove it""" 144 return self.properties(properties, prefix=" ", suffix="") 145 146 def datatype_sql(self, expression: exp.DataType) -> str: 147 """ 148 Redshift converts the `TEXT` data type to `VARCHAR(255)` by default when people more generally mean 149 VARCHAR of max length which is `VARCHAR(max)` in Redshift. Therefore if we get a `TEXT` data type 150 without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert 151 `TEXT` to `VARCHAR`. 152 """ 153 if expression.this == exp.DataType.Type.TEXT: 154 expression = expression.copy() 155 expression.set("this", exp.DataType.Type.VARCHAR) 156 precision = expression.args.get("expressions") 157 if not precision: 158 expression.append("expressions", exp.Var(this="MAX")) 159 return super().datatype_sql(expression)
24 class Parser(Postgres.Parser): 25 FUNCTIONS = { 26 **Postgres.Parser.FUNCTIONS, # type: ignore 27 "DATEADD": lambda args: exp.DateAdd( 28 this=seq_get(args, 2), 29 expression=seq_get(args, 1), 30 unit=seq_get(args, 0), 31 ), 32 "DATEDIFF": lambda args: exp.DateDiff( 33 this=seq_get(args, 2), 34 expression=seq_get(args, 1), 35 unit=seq_get(args, 0), 36 ), 37 "NVL": exp.Coalesce.from_arg_list, 38 } 39 40 CONVERT_TYPE_FIRST = True 41 42 def _parse_types(self, check_func: bool = False) -> t.Optional[exp.Expression]: 43 this = super()._parse_types(check_func=check_func) 44 45 if ( 46 isinstance(this, exp.DataType) 47 and this.this == exp.DataType.Type.VARCHAR 48 and this.expressions 49 and this.expressions[0].this == exp.column("MAX") 50 ): 51 this.set("expressions", [exp.Var(this="MAX")]) 52 53 return this
Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer
and produces
a parsed syntax tree.
Arguments:
- error_level: the desired error level. Default: ErrorLevel.RAISE
- error_message_context: determines the amount of context to capture from a query string when displaying the error message (in number of characters). Default: 50.
- index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list. Default: 0
- alias_post_tablesample: If the table alias comes after tablesample. Default: False
- max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
- null_ordering: Indicates the default null ordering method to use if not explicitly set. Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". Default: "nulls_are_small"
Inherited Members
55 class Tokenizer(Postgres.Tokenizer): 56 BIT_STRINGS = [] 57 HEX_STRINGS = [] 58 STRING_ESCAPES = ["\\"] 59 60 KEYWORDS = { 61 **Postgres.Tokenizer.KEYWORDS, # type: ignore 62 "GEOMETRY": TokenType.GEOMETRY, 63 "GEOGRAPHY": TokenType.GEOGRAPHY, 64 "HLLSKETCH": TokenType.HLLSKETCH, 65 "SUPER": TokenType.SUPER, 66 "SYSDATE": TokenType.CURRENT_TIMESTAMP, 67 "TIME": TokenType.TIMESTAMP, 68 "TIMETZ": TokenType.TIMESTAMPTZ, 69 "TOP": TokenType.TOP, 70 "UNLOAD": TokenType.COMMAND, 71 "VARBYTE": TokenType.VARBINARY, 72 } 73 74 # Redshift allows # to appear as a table identifier prefix 75 SINGLE_TOKENS = Postgres.Tokenizer.SINGLE_TOKENS.copy() 76 SINGLE_TOKENS.pop("#")
Inherited Members
78 class Generator(Postgres.Generator): 79 LOCKING_READS_SUPPORTED = False 80 SINGLE_STRING_INTERVAL = True 81 RENAME_TABLE_WITH_DB = False 82 83 TYPE_MAPPING = { 84 **Postgres.Generator.TYPE_MAPPING, # type: ignore 85 exp.DataType.Type.BINARY: "VARBYTE", 86 exp.DataType.Type.VARBINARY: "VARBYTE", 87 exp.DataType.Type.INT: "INTEGER", 88 } 89 90 PROPERTIES_LOCATION = { 91 **Postgres.Generator.PROPERTIES_LOCATION, # type: ignore 92 exp.LikeProperty: exp.Properties.Location.POST_WITH, 93 } 94 95 TRANSFORMS = { 96 **Postgres.Generator.TRANSFORMS, # type: ignore 97 exp.CurrentTimestamp: lambda self, e: "SYSDATE", 98 exp.DateAdd: lambda self, e: self.func( 99 "DATEADD", exp.var(e.text("unit") or "day"), e.expression, e.this 100 ), 101 exp.DateDiff: lambda self, e: self.func( 102 "DATEDIFF", exp.var(e.text("unit") or "day"), e.expression, e.this 103 ), 104 exp.DistKeyProperty: lambda self, e: f"DISTKEY({e.name})", 105 exp.DistStyleProperty: lambda self, e: self.naked_property(e), 106 exp.JSONExtract: _json_sql, 107 exp.JSONExtractScalar: _json_sql, 108 exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]), 109 exp.SortKeyProperty: lambda self, e: f"{'COMPOUND ' if e.args['compound'] else ''}SORTKEY({self.format_args(*e.this)})", 110 } 111 112 # Redshift uses the POW | POWER (expr1, expr2) syntax instead of expr1 ^ expr2 (postgres) 113 TRANSFORMS.pop(exp.Pow) 114 115 RESERVED_KEYWORDS = {*Postgres.Generator.RESERVED_KEYWORDS, "snapshot"} 116 117 def values_sql(self, expression: exp.Values) -> str: 118 """ 119 Converts `VALUES...` expression into a series of unions. 120 121 Note: If you have a lot of unions then this will result in a large number of recursive statements to 122 evaluate the expression. You may need to increase `sys.setrecursionlimit` to run and it can also be 123 very slow. 124 """ 125 if not isinstance(expression.unnest().parent, exp.From): 126 return super().values_sql(expression) 127 rows = [tuple_exp.expressions for tuple_exp in expression.expressions] 128 selects = [] 129 for i, row in enumerate(rows): 130 if i == 0 and expression.alias: 131 row = [ 132 exp.alias_(value, column_name) 133 for value, column_name in zip(row, expression.args["alias"].args["columns"]) 134 ] 135 selects.append(exp.Select(expressions=row)) 136 subquery_expression = selects[0] 137 if len(selects) > 1: 138 for select in selects[1:]: 139 subquery_expression = exp.union(subquery_expression, select, distinct=False) 140 return self.subquery_sql(subquery_expression.subquery(expression.alias)) 141 142 def with_properties(self, properties: exp.Properties) -> str: 143 """Redshift doesn't have `WITH` as part of their with_properties so we remove it""" 144 return self.properties(properties, prefix=" ", suffix="") 145 146 def datatype_sql(self, expression: exp.DataType) -> str: 147 """ 148 Redshift converts the `TEXT` data type to `VARCHAR(255)` by default when people more generally mean 149 VARCHAR of max length which is `VARCHAR(max)` in Redshift. Therefore if we get a `TEXT` data type 150 without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert 151 `TEXT` to `VARCHAR`. 152 """ 153 if expression.this == exp.DataType.Type.TEXT: 154 expression = expression.copy() 155 expression.set("this", exp.DataType.Type.VARCHAR) 156 precision = expression.args.get("expressions") 157 if not precision: 158 expression.append("expressions", exp.Var(this="MAX")) 159 return super().datatype_sql(expression)
Generator interprets the given syntax tree and produces a SQL string as an output.
Arguments:
- time_mapping (dict): the dictionary of custom time mappings in which the key represents a python time format and the output the target time format
- time_trie (trie): a trie of the time_mapping keys
- pretty (bool): if set to True the returned string will be formatted. Default: False.
- quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
- quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
- identifier_start (str): specifies which starting character to use to delimit identifiers. Default: ".
- identifier_end (str): specifies which ending character to use to delimit identifiers. Default: ".
- bit_start (str): specifies which starting character to use to delimit bit literals. Default: None.
- bit_end (str): specifies which ending character to use to delimit bit literals. Default: None.
- hex_start (str): specifies which starting character to use to delimit hex literals. Default: None.
- hex_end (str): specifies which ending character to use to delimit hex literals. Default: None.
- byte_start (str): specifies which starting character to use to delimit byte literals. Default: None.
- byte_end (str): specifies which ending character to use to delimit byte literals. Default: None.
- identify (bool | str): 'always': always quote, 'safe': quote identifiers if they don't contain an upcase, True defaults to always.
- normalize (bool): if set to True all identifiers will lower cased
- string_escape (str): specifies a string escape character. Default: '.
- identifier_escape (str): specifies an identifier escape character. Default: ".
- pad (int): determines padding in a formatted string. Default: 2.
- indent (int): determines the size of indentation in a formatted string. Default: 4.
- unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
- normalize_functions (str): normalize function names, "upper", "lower", or None Default: "upper"
- alias_post_tablesample (bool): if the table alias comes after tablesample Default: False
- unsupported_level (ErrorLevel): determines the generator's behavior when it encounters unsupported expressions. Default ErrorLevel.WARN.
- null_ordering (str): Indicates the default null ordering method to use if not explicitly set. Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". Default: "nulls_are_small"
- max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
- leading_comma (bool): if the the comma is leading or trailing in select statements Default: False
- max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true line length. Default: 80
- comments: Whether or not to preserve comments in the output SQL code. Default: True
117 def values_sql(self, expression: exp.Values) -> str: 118 """ 119 Converts `VALUES...` expression into a series of unions. 120 121 Note: If you have a lot of unions then this will result in a large number of recursive statements to 122 evaluate the expression. You may need to increase `sys.setrecursionlimit` to run and it can also be 123 very slow. 124 """ 125 if not isinstance(expression.unnest().parent, exp.From): 126 return super().values_sql(expression) 127 rows = [tuple_exp.expressions for tuple_exp in expression.expressions] 128 selects = [] 129 for i, row in enumerate(rows): 130 if i == 0 and expression.alias: 131 row = [ 132 exp.alias_(value, column_name) 133 for value, column_name in zip(row, expression.args["alias"].args["columns"]) 134 ] 135 selects.append(exp.Select(expressions=row)) 136 subquery_expression = selects[0] 137 if len(selects) > 1: 138 for select in selects[1:]: 139 subquery_expression = exp.union(subquery_expression, select, distinct=False) 140 return self.subquery_sql(subquery_expression.subquery(expression.alias))
Converts VALUES...
expression into a series of unions.
Note: If you have a lot of unions then this will result in a large number of recursive statements to
evaluate the expression. You may need to increase sys.setrecursionlimit
to run and it can also be
very slow.
142 def with_properties(self, properties: exp.Properties) -> str: 143 """Redshift doesn't have `WITH` as part of their with_properties so we remove it""" 144 return self.properties(properties, prefix=" ", suffix="")
Redshift doesn't have WITH
as part of their with_properties so we remove it
146 def datatype_sql(self, expression: exp.DataType) -> str: 147 """ 148 Redshift converts the `TEXT` data type to `VARCHAR(255)` by default when people more generally mean 149 VARCHAR of max length which is `VARCHAR(max)` in Redshift. Therefore if we get a `TEXT` data type 150 without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert 151 `TEXT` to `VARCHAR`. 152 """ 153 if expression.this == exp.DataType.Type.TEXT: 154 expression = expression.copy() 155 expression.set("this", exp.DataType.Type.VARCHAR) 156 precision = expression.args.get("expressions") 157 if not precision: 158 expression.append("expressions", exp.Var(this="MAX")) 159 return super().datatype_sql(expression)
Redshift converts the TEXT
data type to VARCHAR(255)
by default when people more generally mean
VARCHAR of max length which is VARCHAR(max)
in Redshift. Therefore if we get a TEXT
data type
without precision we convert it to VARCHAR(max)
and if it does have precision then we just convert
TEXT
to VARCHAR
.
Inherited Members
- sqlglot.generator.Generator
- Generator
- generate
- unsupported
- sep
- seg
- pad_comment
- maybe_comment
- wrap
- no_identify
- normalize_func
- indent
- sql
- uncache_sql
- cache_sql
- characterset_sql
- column_sql
- columnposition_sql
- columndef_sql
- columnconstraint_sql
- autoincrementcolumnconstraint_sql
- compresscolumnconstraint_sql
- generatedasidentitycolumnconstraint_sql
- notnullcolumnconstraint_sql
- primarykeycolumnconstraint_sql
- uniquecolumnconstraint_sql
- create_sql
- clone_sql
- describe_sql
- prepend_ctes
- with_sql
- cte_sql
- tablealias_sql
- bitstring_sql
- hexstring_sql
- bytestring_sql
- datatypesize_sql
- directory_sql
- delete_sql
- drop_sql
- except_sql
- except_op
- fetch_sql
- filter_sql
- hint_sql
- index_sql
- identifier_sql
- inputoutputformat_sql
- national_sql
- partition_sql
- properties_sql
- root_properties
- properties
- locate_properties
- property_sql
- likeproperty_sql
- fallbackproperty_sql
- journalproperty_sql
- freespaceproperty_sql
- afterjournalproperty_sql
- checksumproperty_sql
- mergeblockratioproperty_sql
- datablocksizeproperty_sql
- blockcompressionproperty_sql
- isolatedloadingproperty_sql
- lockingproperty_sql
- withdataproperty_sql
- insert_sql
- intersect_sql
- intersect_op
- introducer_sql
- pseudotype_sql
- onconflict_sql
- returning_sql
- rowformatdelimitedproperty_sql
- table_sql
- tablesample_sql
- pivot_sql
- tuple_sql
- update_sql
- var_sql
- into_sql
- from_sql
- group_sql
- having_sql
- join_sql
- lambda_sql
- lateral_sql
- limit_sql
- offset_sql
- setitem_sql
- set_sql
- pragma_sql
- lock_sql
- literal_sql
- loaddata_sql
- null_sql
- boolean_sql
- order_sql
- cluster_sql
- distribute_sql
- sort_sql
- ordered_sql
- matchrecognize_sql
- query_modifiers
- after_having_modifiers
- after_limit_modifiers
- select_sql
- schema_sql
- star_sql
- parameter_sql
- sessionparameter_sql
- placeholder_sql
- subquery_sql
- qualify_sql
- union_sql
- union_op
- unnest_sql
- where_sql
- window_sql
- partition_by_sql
- windowspec_sql
- withingroup_sql
- between_sql
- bracket_sql
- all_sql
- any_sql
- exists_sql
- case_sql
- constraint_sql
- nextvaluefor_sql
- extract_sql
- trim_sql
- concat_sql
- check_sql
- foreignkey_sql
- primarykey_sql
- unique_sql
- if_sql
- matchagainst_sql
- jsonkeyvalue_sql
- jsonobject_sql
- openjsoncolumndef_sql
- openjson_sql
- in_sql
- in_unnest_op
- interval_sql
- return_sql
- reference_sql
- anonymous_sql
- paren_sql
- neg_sql
- not_sql
- alias_sql
- aliases_sql
- attimezone_sql
- add_sql
- and_sql
- connector_sql
- bitwiseand_sql
- bitwiseleftshift_sql
- bitwisenot_sql
- bitwiseor_sql
- bitwiserightshift_sql
- bitwisexor_sql
- cast_sql
- currentdate_sql
- collate_sql
- command_sql
- comment_sql
- mergetreettlaction_sql
- mergetreettl_sql
- transaction_sql
- commit_sql
- rollback_sql
- altercolumn_sql
- renametable_sql
- altertable_sql
- droppartition_sql
- addconstraint_sql
- distinct_sql
- ignorenulls_sql
- respectnulls_sql
- intdiv_sql
- dpipe_sql
- div_sql
- overlaps_sql
- distance_sql
- dot_sql
- eq_sql
- escape_sql
- glob_sql
- gt_sql
- gte_sql
- ilike_sql
- ilikeany_sql
- is_sql
- like_sql
- likeany_sql
- similarto_sql
- lt_sql
- lte_sql
- mod_sql
- mul_sql
- neq_sql
- nullsafeeq_sql
- nullsafeneq_sql
- or_sql
- slice_sql
- sub_sql
- trycast_sql
- use_sql
- binary
- function_fallback_sql
- func
- format_args
- text_width
- format_time
- expressions
- op_expressions
- naked_property
- set_operation
- tag_sql
- token_sql
- userdefinedfunction_sql
- joinhint_sql
- kwarg_sql
- when_sql
- merge_sql
- tochar_sql