Parser/lexerTypes

Source   Edit  

Types

Lexer = ref object
  tokenNodes*: seq[TokenNode]
  fileName*: string
  metContChar*: bool         ## whether is after the line
                             ## whose last token was a line continuation character '\'
  parenLevel*: int
  tripleStr*: tuple[within: bool, val: string, quote: char, tokenKind: Token,
                    raw: bool, escape: LexerEscaper,
                    start: tuple[lineNo, colNo: int]] ## is handling triple string (multiline string)
For CPython 3.13, this is roughly equal to tok_state* Source   Edit  
LexerEscaper = proc (s: string): string {....raises: [SyntaxError].}
Source   Edit  
Mode {.pure.} = enum
  Single = "single", File = "exec", Eval = "eval"
Source   Edit  
string_kind_t {.pure.} = enum
  FSTRING, TSTRING
Source   Edit  
tokenizer_mode = object
  kind*: tokenizer_mode_kind_t
  quote*: char
  quote_size*: int
  raw*: bool
  curly_bracket_depth*, ## depth of f-string expression start, -1 if not in f-string expression
                         ## only affected by the `{`, `}` that starts or ends f-string expressions
                         ## 
                         ## So `>= 0` if in f-string expression
                         ## 
  ## number of unmatched {, unconditional,
  ## e.g. including those in nested f-strings like f"{ {1} }" -> '{1}'
  ## 
  curly_bracket_expr_start_depth*: int
  start*, multi_line_start*: int
  first_line*: int
  start_offset*, multi_line_start_offset*: int
  last_expr_size*, last_expr_end*: int
  when not defined(release):
    in_debug*: bool
  string_kind*: string_kind_t
Source   Edit  
tokenizer_mode_kind_t = enum
  TOK_REGULAR_MODE, TOK_FSTRING_MODE
Source   Edit  

Procs

proc cont(lexer: Lexer): bool {.inline, ...raises: [], tags: [], forbids: [].}
Source   Edit  
proc getMode(lexer: Lexer): var tokenizer_mode {.inline, ...raises: [], tags: [],
    forbids: [].}
TOK_GET_MODE Source   Edit  
proc indentStack(lexer: Lexer): var seq[int] {.inline, ...raises: [], tags: [],
    forbids: [].}
Source   Edit  
proc lineNo(lexer: Lexer): var int {.inline, ...raises: [], tags: [], forbids: [].}
Source   Edit  
proc new_tokenizer_mode(kind: tokenizer_mode_kind_t): tokenizer_mode {.inline,
    inline, ...raises: [], tags: [], forbids: [].}
Source   Edit  
proc new_tokenizer_mode(kind: tokenizer_mode_kind_t; quote: char;
                        quote_size: int; raw: bool; strkind: string_kind_t): tokenizer_mode {.
    inline, inline, ...raises: [], tags: [], forbids: [].}
Source   Edit  
proc parseModeEnum(s: string; res: var Mode): bool {....raises: [], tags: [],
    forbids: [].}
Source   Edit  
proc popMode(lexer: Lexer): tokenizer_mode {.inline, ...raises: [], tags: [],
    forbids: [].}
Source   Edit  
func withinFStringExpr(tok: tokenizer_mode): bool {....raises: [], tags: [],
    forbids: [].}
Source   Edit  

Templates

template enter_FSTRING_EXPR(tok: tokenizer_mode)
Source   Edit  
template INSIDE_FSTRING(lexer: Lexer): bool
Source   Edit  
template INSIDE_FSTRING_EXPR(tok: tokenizer_mode): bool
Source   Edit  
template INSIDE_FSTRING_EXPR_AT_TOP(tok: tokenizer_mode): bool
Source   Edit  
template withNextMode(lexer; doWithIt)
Source   Edit  
template withNextMode(lexer; it; doWithIt)
Source   Edit