pytermgui.helpers
Helper methods and functions for pytermgui.
1"""Helper methods and functions for pytermgui.""" 2 3from __future__ import annotations 4 5from typing import Iterator 6 7from .markup import tokenize_ansi 8from .markup.parsing import LINK_TEMPLATE, PARSERS 9from .regex import real_length 10 11__all__ = [ 12 "break_line", 13] 14 15 16def break_line( # pylint: disable=too-many-branches 17 line: str, limit: int, non_first_limit: int | None = None, fill: str | None = None 18) -> Iterator[str]: 19 """Breaks a line into a `list[str]` with maximum `limit` length per line. 20 21 It keeps ongoing ANSI sequences between lines, and inserts a reset sequence 22 at the end of each style-containing line. 23 24 At the moment it splits strings exactly on the limit, and not on word 25 boundaries. That functionality would be preferred, so it will end up being 26 implemented at some point. 27 28 Args: 29 line: The line to split. May or may not contain ANSI sequences. 30 limit: The maximum amount of characters allowed in each line, excluding 31 non-printing sequences. 32 non_first_limit: The limit after the first line. If not given, defaults 33 to `limit`. 34 """ 35 36 if line in ["", "\x1b[0m"]: 37 yield "" 38 return 39 40 def _pad_and_link(line: str, link: str | None) -> str: 41 count = limit - real_length(line) 42 43 if link is not None: 44 line = LINK_TEMPLATE.format(uri=link, label=line) 45 46 if fill is None: 47 return line 48 49 line += count * fill 50 51 return line 52 53 used = 0 54 current = "" 55 sequences = "" 56 57 if non_first_limit is None: 58 non_first_limit = limit 59 60 parsers = PARSERS 61 link = None 62 63 for token in tokenize_ansi(line): 64 if token.is_plain(): 65 for char in token.value: 66 if char == "\n" or used >= limit: 67 if sequences != "": 68 current += "\x1b[0m" 69 70 yield _pad_and_link(current, link) 71 link = None 72 73 current = sequences 74 used = 0 75 76 limit = non_first_limit 77 78 if char != "\n": 79 current += char 80 used += 1 81 82 # If the link wasn't yielded along with its token, remove and add it 83 # to current manually. 84 if link is not None: 85 current = current[: -len(token.value)] 86 current += LINK_TEMPLATE.format(uri=link, label=token.value) 87 link = None 88 89 continue 90 91 if token.value == "/": 92 sequences = "\x1b[0m" 93 94 if len(current) > 0: 95 current += sequences 96 97 continue 98 99 if token.is_hyperlink(): 100 link = token.value 101 continue 102 103 sequence = parsers[type(token)](token, {}, lambda: line) # type: ignore 104 sequences += sequence 105 current += sequence 106 107 if current == "": 108 return 109 110 if sequences != "" and not current.endswith("\x1b[0m"): 111 current += "\x1b[0m" 112 113 yield _pad_and_link(current, link)
def
break_line( line: str, limit: int, non_first_limit: int | None = None, fill: str | None = None) -> Iterator[str]:
17def break_line( # pylint: disable=too-many-branches 18 line: str, limit: int, non_first_limit: int | None = None, fill: str | None = None 19) -> Iterator[str]: 20 """Breaks a line into a `list[str]` with maximum `limit` length per line. 21 22 It keeps ongoing ANSI sequences between lines, and inserts a reset sequence 23 at the end of each style-containing line. 24 25 At the moment it splits strings exactly on the limit, and not on word 26 boundaries. That functionality would be preferred, so it will end up being 27 implemented at some point. 28 29 Args: 30 line: The line to split. May or may not contain ANSI sequences. 31 limit: The maximum amount of characters allowed in each line, excluding 32 non-printing sequences. 33 non_first_limit: The limit after the first line. If not given, defaults 34 to `limit`. 35 """ 36 37 if line in ["", "\x1b[0m"]: 38 yield "" 39 return 40 41 def _pad_and_link(line: str, link: str | None) -> str: 42 count = limit - real_length(line) 43 44 if link is not None: 45 line = LINK_TEMPLATE.format(uri=link, label=line) 46 47 if fill is None: 48 return line 49 50 line += count * fill 51 52 return line 53 54 used = 0 55 current = "" 56 sequences = "" 57 58 if non_first_limit is None: 59 non_first_limit = limit 60 61 parsers = PARSERS 62 link = None 63 64 for token in tokenize_ansi(line): 65 if token.is_plain(): 66 for char in token.value: 67 if char == "\n" or used >= limit: 68 if sequences != "": 69 current += "\x1b[0m" 70 71 yield _pad_and_link(current, link) 72 link = None 73 74 current = sequences 75 used = 0 76 77 limit = non_first_limit 78 79 if char != "\n": 80 current += char 81 used += 1 82 83 # If the link wasn't yielded along with its token, remove and add it 84 # to current manually. 85 if link is not None: 86 current = current[: -len(token.value)] 87 current += LINK_TEMPLATE.format(uri=link, label=token.value) 88 link = None 89 90 continue 91 92 if token.value == "/": 93 sequences = "\x1b[0m" 94 95 if len(current) > 0: 96 current += sequences 97 98 continue 99 100 if token.is_hyperlink(): 101 link = token.value 102 continue 103 104 sequence = parsers[type(token)](token, {}, lambda: line) # type: ignore 105 sequences += sequence 106 current += sequence 107 108 if current == "": 109 return 110 111 if sequences != "" and not current.endswith("\x1b[0m"): 112 current += "\x1b[0m" 113 114 yield _pad_and_link(current, link)
Breaks a line into a list[str]
with maximum limit
length per line.
It keeps ongoing ANSI sequences between lines, and inserts a reset sequence at the end of each style-containing line.
At the moment it splits strings exactly on the limit, and not on word boundaries. That functionality would be preferred, so it will end up being implemented at some point.
Args
- line: The line to split. May or may not contain ANSI sequences.
- limit: The maximum amount of characters allowed in each line, excluding non-printing sequences.
- non_first_limit: The limit after the first line. If not given, defaults
to
limit
.