compiler: rename Tok/Token to Token/TokenKind
parent
89ea8a0275
commit
c620da9089
|
@ -208,7 +208,7 @@ fn (s mut Scanner) goto_scanner_position(scp ScannerPos) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// get_scanner_pos_of_token rescans *the whole source* till it reaches {t.line_nr, t.col} .
|
// get_scanner_pos_of_token rescans *the whole source* till it reaches {t.line_nr, t.col} .
|
||||||
fn (s mut Scanner) get_scanner_pos_of_token(t &Tok) ScannerPos {
|
fn (s mut Scanner) get_scanner_pos_of_token(t &Token) ScannerPos {
|
||||||
// This rescanning is done just once on error, so it is fine for now.
|
// This rescanning is done just once on error, so it is fine for now.
|
||||||
// Be careful for the performance implications, if you want to
|
// Be careful for the performance implications, if you want to
|
||||||
// do it more frequently. The alternative would be to store
|
// do it more frequently. The alternative would be to store
|
||||||
|
|
|
@ -296,7 +296,7 @@ fn (p mut Parser) fn_decl() {
|
||||||
}
|
}
|
||||||
// Returns a type?
|
// Returns a type?
|
||||||
mut typ := 'void'
|
mut typ := 'void'
|
||||||
if p.tok in [Token.name, .mul, .amp, .lsbr, .question, .lpar] {
|
if p.tok in [TokenKind.name, .mul, .amp, .lsbr, .question, .lpar] {
|
||||||
p.fgen(' ')
|
p.fgen(' ')
|
||||||
typ = p.get_type()
|
typ = p.get_type()
|
||||||
}
|
}
|
||||||
|
|
|
@ -9,10 +9,8 @@ import (
|
||||||
strings
|
strings
|
||||||
)
|
)
|
||||||
|
|
||||||
// TODO rename to Token
|
struct Token {
|
||||||
// TODO rename enum Token to TokenType
|
tok TokenKind // the token number/enum; for quick comparisons
|
||||||
struct Tok {
|
|
||||||
tok Token // the token number/enum; for quick comparisons
|
|
||||||
lit string // literal representation of the token
|
lit string // literal representation of the token
|
||||||
line_nr int // the line number in the source where the token occured
|
line_nr int // the line number in the source where the token occured
|
||||||
name_idx int // name table index for O(1) lookup
|
name_idx int // name table index for O(1) lookup
|
||||||
|
@ -32,11 +30,11 @@ struct Parser {
|
||||||
pref &Preferences // Preferences shared from V struct
|
pref &Preferences // Preferences shared from V struct
|
||||||
mut:
|
mut:
|
||||||
scanner &Scanner
|
scanner &Scanner
|
||||||
tokens []Tok
|
tokens []Token
|
||||||
token_idx int
|
token_idx int
|
||||||
tok Token
|
tok TokenKind
|
||||||
prev_tok Token
|
prev_tok TokenKind
|
||||||
prev_tok2 Token // TODO remove these once the tokens are cached
|
prev_tok2 TokenKind // TODO remove these once the tokens are cached
|
||||||
lit string
|
lit string
|
||||||
cgen &CGen
|
cgen &CGen
|
||||||
table &Table
|
table &Table
|
||||||
|
@ -165,7 +163,7 @@ fn (v mut V) new_parser(scanner &Scanner, id string) Parser {
|
||||||
fn (p mut Parser) scan_tokens() {
|
fn (p mut Parser) scan_tokens() {
|
||||||
for {
|
for {
|
||||||
res := p.scanner.scan()
|
res := p.scanner.scan()
|
||||||
p.tokens << Tok{
|
p.tokens << Token{
|
||||||
tok: res.tok
|
tok: res.tok
|
||||||
lit: res.lit
|
lit: res.lit
|
||||||
line_nr: p.scanner.line_nr
|
line_nr: p.scanner.line_nr
|
||||||
|
@ -188,7 +186,7 @@ fn (p mut Parser) next() {
|
||||||
p.prev_tok = p.tok
|
p.prev_tok = p.tok
|
||||||
p.scanner.prev_tok = p.tok
|
p.scanner.prev_tok = p.tok
|
||||||
if p.token_idx >= p.tokens.len {
|
if p.token_idx >= p.tokens.len {
|
||||||
p.tok = Token.eof
|
p.tok = TokenKind.eof
|
||||||
p.lit = ''
|
p.lit = ''
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -199,25 +197,25 @@ fn (p mut Parser) next() {
|
||||||
p.scanner.line_nr = res.line_nr
|
p.scanner.line_nr = res.line_nr
|
||||||
}
|
}
|
||||||
|
|
||||||
fn (p & Parser) peek() Token {
|
fn (p & Parser) peek() TokenKind {
|
||||||
if p.token_idx >= p.tokens.len - 2 {
|
if p.token_idx >= p.tokens.len - 2 {
|
||||||
return Token.eof
|
return TokenKind.eof
|
||||||
}
|
}
|
||||||
tok := p.tokens[p.token_idx]
|
tok := p.tokens[p.token_idx]
|
||||||
return tok.tok
|
return tok.tok
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO remove dups
|
// TODO remove dups
|
||||||
[inline] fn (p &Parser) prev_token() Tok {
|
[inline] fn (p &Parser) prev_token() Token {
|
||||||
return p.tokens[p.token_idx - 2]
|
return p.tokens[p.token_idx - 2]
|
||||||
}
|
}
|
||||||
|
|
||||||
[inline] fn (p &Parser) cur_tok() Tok {
|
[inline] fn (p &Parser) cur_tok() Token {
|
||||||
return p.tokens[p.token_idx - 1]
|
return p.tokens[p.token_idx - 1]
|
||||||
}
|
}
|
||||||
[inline] fn (p &Parser) peek_token() Tok {
|
[inline] fn (p &Parser) peek_token() Token {
|
||||||
if p.token_idx >= p.tokens.len - 2 {
|
if p.token_idx >= p.tokens.len - 2 {
|
||||||
return Tok{ tok:Token.eof }
|
return Token{ tok:TokenKind.eof }
|
||||||
}
|
}
|
||||||
return p.tokens[p.token_idx]
|
return p.tokens[p.token_idx]
|
||||||
}
|
}
|
||||||
|
@ -286,7 +284,7 @@ fn (p mut Parser) parse(pass Pass) {
|
||||||
p.fgenln('')
|
p.fgenln('')
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case Token.key_enum:
|
case TokenKind.key_enum:
|
||||||
p.next()
|
p.next()
|
||||||
if p.tok == .name {
|
if p.tok == .name {
|
||||||
p.fgen('enum ')
|
p.fgen('enum ')
|
||||||
|
@ -303,7 +301,7 @@ fn (p mut Parser) parse(pass Pass) {
|
||||||
else {
|
else {
|
||||||
p.check(.name)
|
p.check(.name)
|
||||||
}
|
}
|
||||||
case Token.key_pub:
|
case TokenKind.key_pub:
|
||||||
if p.peek() == .func {
|
if p.peek() == .func {
|
||||||
p.fn_decl()
|
p.fn_decl()
|
||||||
} else if p.peek() == .key_struct {
|
} else if p.peek() == .key_struct {
|
||||||
|
@ -312,27 +310,27 @@ fn (p mut Parser) parse(pass Pass) {
|
||||||
} else {
|
} else {
|
||||||
p.error('wrong pub keyword usage')
|
p.error('wrong pub keyword usage')
|
||||||
}
|
}
|
||||||
case Token.func:
|
case TokenKind.func:
|
||||||
p.fn_decl()
|
p.fn_decl()
|
||||||
case Token.key_type:
|
case TokenKind.key_type:
|
||||||
p.type_decl()
|
p.type_decl()
|
||||||
case Token.lsbr:
|
case TokenKind.lsbr:
|
||||||
// `[` can only mean an [attribute] before a function
|
// `[` can only mean an [attribute] before a function
|
||||||
// or a struct definition
|
// or a struct definition
|
||||||
p.attribute()
|
p.attribute()
|
||||||
case Token.key_struct, Token.key_interface, Token.key_union, Token.lsbr:
|
case TokenKind.key_struct, TokenKind.key_interface, TokenKind.key_union, TokenKind.lsbr:
|
||||||
p.struct_decl()
|
p.struct_decl()
|
||||||
case Token.key_const:
|
case TokenKind.key_const:
|
||||||
p.const_decl()
|
p.const_decl()
|
||||||
case Token.hash:
|
case TokenKind.hash:
|
||||||
// insert C code, TODO this is going to be removed ASAP
|
// insert C code, TODO this is going to be removed ASAP
|
||||||
// some libraries (like UI) still have lots of C code
|
// some libraries (like UI) still have lots of C code
|
||||||
// # puts("hello");
|
// # puts("hello");
|
||||||
p.chash()
|
p.chash()
|
||||||
case Token.dollar:
|
case TokenKind.dollar:
|
||||||
// $if, $else
|
// $if, $else
|
||||||
p.comp_time()
|
p.comp_time()
|
||||||
case Token.key_global:
|
case TokenKind.key_global:
|
||||||
if !p.pref.translated && !p.pref.is_live &&
|
if !p.pref.translated && !p.pref.is_live &&
|
||||||
!p.builtin_mod && !p.pref.building_v && !os.getwd().contains('/volt') {
|
!p.builtin_mod && !p.pref.building_v && !os.getwd().contains('/volt') {
|
||||||
p.error('__global is only allowed in translated code')
|
p.error('__global is only allowed in translated code')
|
||||||
|
@ -355,7 +353,7 @@ fn (p mut Parser) parse(pass Pass) {
|
||||||
// p.genln('; // global')
|
// p.genln('; // global')
|
||||||
g += '; // global'
|
g += '; // global'
|
||||||
p.cgen.consts << g
|
p.cgen.consts << g
|
||||||
case Token.eof:
|
case TokenKind.eof:
|
||||||
//p.log('end of parse()')
|
//p.log('end of parse()')
|
||||||
// TODO: check why this was added? everything seems to work
|
// TODO: check why this was added? everything seems to work
|
||||||
// without it, and it's already happening in fn_decl
|
// without it, and it's already happening in fn_decl
|
||||||
|
@ -581,12 +579,12 @@ fn (p mut Parser) interface_method(field_name, receiver string) &Fn {
|
||||||
return method
|
return method
|
||||||
}
|
}
|
||||||
|
|
||||||
fn key_to_type_cat(tok Token) TypeCategory {
|
fn key_to_type_cat(tok TokenKind) TypeCategory {
|
||||||
switch tok {
|
switch tok {
|
||||||
case Token.key_interface: return TypeCategory.interface_
|
case TokenKind.key_interface: return TypeCategory.interface_
|
||||||
case Token.key_struct: return TypeCategory.struct_
|
case TokenKind.key_struct: return TypeCategory.struct_
|
||||||
case Token.key_union: return TypeCategory.union_
|
case TokenKind.key_union: return TypeCategory.union_
|
||||||
//Token.key_ => return .interface_
|
//TokenKind.key_ => return .interface_
|
||||||
}
|
}
|
||||||
verror('Unknown token: $tok')
|
verror('Unknown token: $tok')
|
||||||
return TypeCategory.builtin
|
return TypeCategory.builtin
|
||||||
|
@ -862,13 +860,13 @@ fn (p &Parser) strtok() string {
|
||||||
|
|
||||||
// same as check(), but adds a space to the formatter output
|
// same as check(), but adds a space to the formatter output
|
||||||
// TODO bad name
|
// TODO bad name
|
||||||
fn (p mut Parser) check_space(expected Token) {
|
fn (p mut Parser) check_space(expected TokenKind) {
|
||||||
p.fspace()
|
p.fspace()
|
||||||
p.check(expected)
|
p.check(expected)
|
||||||
p.fspace()
|
p.fspace()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn (p mut Parser) check(expected Token) {
|
fn (p mut Parser) check(expected TokenKind) {
|
||||||
if p.tok != expected {
|
if p.tok != expected {
|
||||||
println('check()')
|
println('check()')
|
||||||
s := 'expected `${expected.str()}` but got `${p.strtok()}`'
|
s := 'expected `${expected.str()}` but got `${p.strtok()}`'
|
||||||
|
@ -1265,52 +1263,52 @@ fn (p mut Parser) statement(add_semi bool) string {
|
||||||
// `a + 3`, `a(7)`, or just `a`
|
// `a + 3`, `a(7)`, or just `a`
|
||||||
q = p.bool_expression()
|
q = p.bool_expression()
|
||||||
}
|
}
|
||||||
case Token.key_goto:
|
case TokenKind.key_goto:
|
||||||
p.check(.key_goto)
|
p.check(.key_goto)
|
||||||
p.fgen(' ')
|
p.fgen(' ')
|
||||||
label := p.check_name()
|
label := p.check_name()
|
||||||
p.genln('goto $label;')
|
p.genln('goto $label;')
|
||||||
return ''
|
return ''
|
||||||
case Token.key_defer:
|
case TokenKind.key_defer:
|
||||||
p.defer_st()
|
p.defer_st()
|
||||||
return ''
|
return ''
|
||||||
case Token.hash:
|
case TokenKind.hash:
|
||||||
p.chash()
|
p.chash()
|
||||||
return ''
|
return ''
|
||||||
case Token.dollar:
|
case TokenKind.dollar:
|
||||||
p.comp_time()
|
p.comp_time()
|
||||||
case Token.key_if:
|
case TokenKind.key_if:
|
||||||
p.if_st(false, 0)
|
p.if_st(false, 0)
|
||||||
case Token.key_for:
|
case TokenKind.key_for:
|
||||||
p.for_st()
|
p.for_st()
|
||||||
case Token.key_switch:
|
case TokenKind.key_switch:
|
||||||
p.switch_statement()
|
p.switch_statement()
|
||||||
case Token.key_match:
|
case TokenKind.key_match:
|
||||||
p.match_statement(false)
|
p.match_statement(false)
|
||||||
case Token.key_mut, Token.key_static:
|
case TokenKind.key_mut, TokenKind.key_static:
|
||||||
p.var_decl()
|
p.var_decl()
|
||||||
case Token.key_return:
|
case TokenKind.key_return:
|
||||||
p.return_st()
|
p.return_st()
|
||||||
case Token.lcbr:// {} block
|
case TokenKind.lcbr:// {} block
|
||||||
p.check(.lcbr)
|
p.check(.lcbr)
|
||||||
p.genln('{')
|
p.genln('{')
|
||||||
p.statements()
|
p.statements()
|
||||||
return ''
|
return ''
|
||||||
case Token.key_continue:
|
case TokenKind.key_continue:
|
||||||
if p.for_expr_cnt == 0 {
|
if p.for_expr_cnt == 0 {
|
||||||
p.error('`continue` statement outside `for`')
|
p.error('`continue` statement outside `for`')
|
||||||
}
|
}
|
||||||
p.genln('continue')
|
p.genln('continue')
|
||||||
p.check(.key_continue)
|
p.check(.key_continue)
|
||||||
case Token.key_break:
|
case TokenKind.key_break:
|
||||||
if p.for_expr_cnt == 0 {
|
if p.for_expr_cnt == 0 {
|
||||||
p.error('`break` statement outside `for`')
|
p.error('`break` statement outside `for`')
|
||||||
}
|
}
|
||||||
p.genln('break')
|
p.genln('break')
|
||||||
p.check(.key_break)
|
p.check(.key_break)
|
||||||
case Token.key_go:
|
case TokenKind.key_go:
|
||||||
p.go_statement()
|
p.go_statement()
|
||||||
case Token.key_assert:
|
case TokenKind.key_assert:
|
||||||
p.assert_statement()
|
p.assert_statement()
|
||||||
default:
|
default:
|
||||||
// An expression as a statement
|
// An expression as a statement
|
||||||
|
@ -1357,11 +1355,11 @@ fn ($v.name mut $v.typ) $p.cur_fn.name (...) {
|
||||||
is_str := v.typ == 'string'
|
is_str := v.typ == 'string'
|
||||||
is_ustr := v.typ == 'ustring'
|
is_ustr := v.typ == 'ustring'
|
||||||
switch tok {
|
switch tok {
|
||||||
case Token.assign:
|
case TokenKind.assign:
|
||||||
if !is_map && !p.is_empty_c_struct_init {
|
if !is_map && !p.is_empty_c_struct_init {
|
||||||
p.gen(' = ')
|
p.gen(' = ')
|
||||||
}
|
}
|
||||||
case Token.plus_assign:
|
case TokenKind.plus_assign:
|
||||||
if is_str && !p.is_js {
|
if is_str && !p.is_js {
|
||||||
p.gen('= string_add($v.name, ')// TODO can't do `foo.bar += '!'`
|
p.gen('= string_add($v.name, ')// TODO can't do `foo.bar += '!'`
|
||||||
}
|
}
|
||||||
|
@ -1628,42 +1626,42 @@ fn (p mut Parser) bterm() string {
|
||||||
if is_str && !p.is_js { //&& !p.is_sql {
|
if is_str && !p.is_js { //&& !p.is_sql {
|
||||||
p.gen(')')
|
p.gen(')')
|
||||||
switch tok {
|
switch tok {
|
||||||
case Token.eq: p.cgen.set_placeholder(ph, 'string_eq(')
|
case TokenKind.eq: p.cgen.set_placeholder(ph, 'string_eq(')
|
||||||
case Token.ne: p.cgen.set_placeholder(ph, 'string_ne(')
|
case TokenKind.ne: p.cgen.set_placeholder(ph, 'string_ne(')
|
||||||
case Token.le: p.cgen.set_placeholder(ph, 'string_le(')
|
case TokenKind.le: p.cgen.set_placeholder(ph, 'string_le(')
|
||||||
case Token.ge: p.cgen.set_placeholder(ph, 'string_ge(')
|
case TokenKind.ge: p.cgen.set_placeholder(ph, 'string_ge(')
|
||||||
case Token.gt: p.cgen.set_placeholder(ph, 'string_gt(')
|
case TokenKind.gt: p.cgen.set_placeholder(ph, 'string_gt(')
|
||||||
case Token.lt: p.cgen.set_placeholder(ph, 'string_lt(')
|
case TokenKind.lt: p.cgen.set_placeholder(ph, 'string_lt(')
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
Token.eq => p.cgen.set_placeholder(ph, 'string_eq(')
|
TokenKind.eq => p.cgen.set_placeholder(ph, 'string_eq(')
|
||||||
Token.ne => p.cgen.set_placeholder(ph, 'string_ne(')
|
TokenKind.ne => p.cgen.set_placeholder(ph, 'string_ne(')
|
||||||
Token.le => p.cgen.set_placeholder(ph, 'string_le(')
|
TokenKind.le => p.cgen.set_placeholder(ph, 'string_le(')
|
||||||
Token.ge => p.cgen.set_placeholder(ph, 'string_ge(')
|
TokenKind.ge => p.cgen.set_placeholder(ph, 'string_ge(')
|
||||||
Token.gt => p.cgen.set_placeholder(ph, 'string_gt(')
|
TokenKind.gt => p.cgen.set_placeholder(ph, 'string_gt(')
|
||||||
Token.lt => p.cgen.set_placeholder(ph, 'string_lt(')
|
TokenKind.lt => p.cgen.set_placeholder(ph, 'string_lt(')
|
||||||
*/
|
*/
|
||||||
}
|
}
|
||||||
if is_ustr {
|
if is_ustr {
|
||||||
p.gen(')')
|
p.gen(')')
|
||||||
switch tok {
|
switch tok {
|
||||||
case Token.eq: p.cgen.set_placeholder(ph, 'ustring_eq(')
|
case TokenKind.eq: p.cgen.set_placeholder(ph, 'ustring_eq(')
|
||||||
case Token.ne: p.cgen.set_placeholder(ph, 'ustring_ne(')
|
case TokenKind.ne: p.cgen.set_placeholder(ph, 'ustring_ne(')
|
||||||
case Token.le: p.cgen.set_placeholder(ph, 'ustring_le(')
|
case TokenKind.le: p.cgen.set_placeholder(ph, 'ustring_le(')
|
||||||
case Token.ge: p.cgen.set_placeholder(ph, 'ustring_ge(')
|
case TokenKind.ge: p.cgen.set_placeholder(ph, 'ustring_ge(')
|
||||||
case Token.gt: p.cgen.set_placeholder(ph, 'ustring_gt(')
|
case TokenKind.gt: p.cgen.set_placeholder(ph, 'ustring_gt(')
|
||||||
case Token.lt: p.cgen.set_placeholder(ph, 'ustring_lt(')
|
case TokenKind.lt: p.cgen.set_placeholder(ph, 'ustring_lt(')
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if is_float {
|
if is_float {
|
||||||
p.gen(')')
|
p.gen(')')
|
||||||
switch tok {
|
switch tok {
|
||||||
case Token.eq: p.cgen.set_placeholder(ph, '${expr_type}_eq(')
|
case TokenKind.eq: p.cgen.set_placeholder(ph, '${expr_type}_eq(')
|
||||||
case Token.ne: p.cgen.set_placeholder(ph, '${expr_type}_ne(')
|
case TokenKind.ne: p.cgen.set_placeholder(ph, '${expr_type}_ne(')
|
||||||
case Token.le: p.cgen.set_placeholder(ph, '${expr_type}_le(')
|
case TokenKind.le: p.cgen.set_placeholder(ph, '${expr_type}_le(')
|
||||||
case Token.ge: p.cgen.set_placeholder(ph, '${expr_type}_ge(')
|
case TokenKind.ge: p.cgen.set_placeholder(ph, '${expr_type}_ge(')
|
||||||
case Token.gt: p.cgen.set_placeholder(ph, '${expr_type}_gt(')
|
case TokenKind.gt: p.cgen.set_placeholder(ph, '${expr_type}_gt(')
|
||||||
case Token.lt: p.cgen.set_placeholder(ph, '${expr_type}_lt(')
|
case TokenKind.lt: p.cgen.set_placeholder(ph, '${expr_type}_lt(')
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2477,7 +2475,7 @@ fn (p mut Parser) expression() string {
|
||||||
return 'int'
|
return 'int'
|
||||||
}
|
}
|
||||||
// + - | ^
|
// + - | ^
|
||||||
for p.tok in [Token.plus, .minus, .pipe, .amp, .xor] {
|
for p.tok in [TokenKind.plus, .minus, .pipe, .amp, .xor] {
|
||||||
tok_op := p.tok
|
tok_op := p.tok
|
||||||
if typ == 'bool' {
|
if typ == 'bool' {
|
||||||
p.error('operator ${p.tok.str()} not defined on bool ')
|
p.error('operator ${p.tok.str()} not defined on bool ')
|
||||||
|
@ -2576,7 +2574,7 @@ fn (p mut Parser) unary() string {
|
||||||
mut typ := ''
|
mut typ := ''
|
||||||
tok := p.tok
|
tok := p.tok
|
||||||
switch tok {
|
switch tok {
|
||||||
case Token.not:
|
case TokenKind.not:
|
||||||
p.gen('!')
|
p.gen('!')
|
||||||
p.check(.not)
|
p.check(.not)
|
||||||
// typ should be bool type
|
// typ should be bool type
|
||||||
|
@ -2585,7 +2583,7 @@ fn (p mut Parser) unary() string {
|
||||||
p.error('operator ! requires bool type, not `$typ`')
|
p.error('operator ! requires bool type, not `$typ`')
|
||||||
}
|
}
|
||||||
|
|
||||||
case Token.bit_not:
|
case TokenKind.bit_not:
|
||||||
p.gen('~')
|
p.gen('~')
|
||||||
p.check(.bit_not)
|
p.check(.bit_not)
|
||||||
typ = p.bool_expression()
|
typ = p.bool_expression()
|
||||||
|
@ -2606,7 +2604,7 @@ fn (p mut Parser) factor() string {
|
||||||
p.gen('opt_none()')
|
p.gen('opt_none()')
|
||||||
p.check(.key_none)
|
p.check(.key_none)
|
||||||
return p.expected_type
|
return p.expected_type
|
||||||
case Token.number:
|
case TokenKind.number:
|
||||||
typ = 'int'
|
typ = 'int'
|
||||||
// Check if float (`1.0`, `1e+3`) but not if is hexa
|
// Check if float (`1.0`, `1e+3`) but not if is hexa
|
||||||
if (p.lit.contains('.') || (p.lit.contains('e') || p.lit.contains('E'))) &&
|
if (p.lit.contains('.') || (p.lit.contains('e') || p.lit.contains('E'))) &&
|
||||||
|
@ -2624,13 +2622,13 @@ fn (p mut Parser) factor() string {
|
||||||
}
|
}
|
||||||
p.gen(p.lit)
|
p.gen(p.lit)
|
||||||
p.fgen(p.lit)
|
p.fgen(p.lit)
|
||||||
case Token.minus:
|
case TokenKind.minus:
|
||||||
p.gen('-')
|
p.gen('-')
|
||||||
p.fgen('-')
|
p.fgen('-')
|
||||||
p.next()
|
p.next()
|
||||||
return p.factor()
|
return p.factor()
|
||||||
// Variable
|
// Variable
|
||||||
case Token.key_sizeof:
|
case TokenKind.key_sizeof:
|
||||||
p.gen('sizeof(')
|
p.gen('sizeof(')
|
||||||
p.fgen('sizeof(')
|
p.fgen('sizeof(')
|
||||||
p.next()
|
p.next()
|
||||||
|
@ -2640,10 +2638,10 @@ fn (p mut Parser) factor() string {
|
||||||
p.gen('$sizeof_typ)')
|
p.gen('$sizeof_typ)')
|
||||||
p.fgen('$sizeof_typ)')
|
p.fgen('$sizeof_typ)')
|
||||||
return 'int'
|
return 'int'
|
||||||
case Token.amp, Token.dot, Token.mul:
|
case TokenKind.amp, TokenKind.dot, TokenKind.mul:
|
||||||
// (dot is for enum vals: `.green`)
|
// (dot is for enum vals: `.green`)
|
||||||
return p.name_expr()
|
return p.name_expr()
|
||||||
case Token.name:
|
case TokenKind.name:
|
||||||
// map[string]int
|
// map[string]int
|
||||||
if p.lit == 'map' && p.peek() == .lsbr {
|
if p.lit == 'map' && p.peek() == .lsbr {
|
||||||
return p.map_init()
|
return p.map_init()
|
||||||
|
@ -2660,7 +2658,7 @@ fn (p mut Parser) factor() string {
|
||||||
//}
|
//}
|
||||||
typ = p.name_expr()
|
typ = p.name_expr()
|
||||||
return typ
|
return typ
|
||||||
case Token.key_default:
|
case TokenKind.key_default:
|
||||||
p.next()
|
p.next()
|
||||||
p.next()
|
p.next()
|
||||||
name := p.check_name()
|
name := p.check_name()
|
||||||
|
@ -2670,7 +2668,7 @@ fn (p mut Parser) factor() string {
|
||||||
p.gen('default(T)')
|
p.gen('default(T)')
|
||||||
p.next()
|
p.next()
|
||||||
return 'T'
|
return 'T'
|
||||||
case Token.lpar:
|
case TokenKind.lpar:
|
||||||
//p.gen('(/*lpar*/')
|
//p.gen('(/*lpar*/')
|
||||||
p.gen('(')
|
p.gen('(')
|
||||||
p.check(.lpar)
|
p.check(.lpar)
|
||||||
|
@ -2684,38 +2682,38 @@ fn (p mut Parser) factor() string {
|
||||||
p.ptr_cast = false
|
p.ptr_cast = false
|
||||||
p.gen(')')
|
p.gen(')')
|
||||||
return typ
|
return typ
|
||||||
case Token.chartoken:
|
case TokenKind.chartoken:
|
||||||
p.char_expr()
|
p.char_expr()
|
||||||
typ = 'byte'
|
typ = 'byte'
|
||||||
return typ
|
return typ
|
||||||
case Token.str:
|
case TokenKind.str:
|
||||||
p.string_expr()
|
p.string_expr()
|
||||||
typ = 'string'
|
typ = 'string'
|
||||||
return typ
|
return typ
|
||||||
case Token.key_false:
|
case TokenKind.key_false:
|
||||||
typ = 'bool'
|
typ = 'bool'
|
||||||
p.gen('0')
|
p.gen('0')
|
||||||
p.fgen('false')
|
p.fgen('false')
|
||||||
case Token.key_true:
|
case TokenKind.key_true:
|
||||||
typ = 'bool'
|
typ = 'bool'
|
||||||
p.gen('1')
|
p.gen('1')
|
||||||
p.fgen('true')
|
p.fgen('true')
|
||||||
case Token.lsbr:
|
case TokenKind.lsbr:
|
||||||
// `[1,2,3]` or `[]` or `[20]byte`
|
// `[1,2,3]` or `[]` or `[20]byte`
|
||||||
// TODO have to return because arrayInit does next()
|
// TODO have to return because arrayInit does next()
|
||||||
// everything should do next()
|
// everything should do next()
|
||||||
return p.array_init()
|
return p.array_init()
|
||||||
case Token.lcbr:
|
case TokenKind.lcbr:
|
||||||
// `m := { 'one': 1 }`
|
// `m := { 'one': 1 }`
|
||||||
if p.peek() == .str {
|
if p.peek() == .str {
|
||||||
return p.map_init()
|
return p.map_init()
|
||||||
}
|
}
|
||||||
// { user | name :'new name' }
|
// { user | name :'new name' }
|
||||||
return p.assoc()
|
return p.assoc()
|
||||||
case Token.key_if:
|
case TokenKind.key_if:
|
||||||
typ = p.if_st(true, 0)
|
typ = p.if_st(true, 0)
|
||||||
return typ
|
return typ
|
||||||
case Token.key_match:
|
case TokenKind.key_match:
|
||||||
typ = p.match_statement(true)
|
typ = p.match_statement(true)
|
||||||
return typ
|
return typ
|
||||||
default:
|
default:
|
||||||
|
|
|
@ -33,7 +33,7 @@ mut:
|
||||||
fmt_out strings.Builder
|
fmt_out strings.Builder
|
||||||
fmt_indent int
|
fmt_indent int
|
||||||
fmt_line_empty bool
|
fmt_line_empty bool
|
||||||
prev_tok Token
|
prev_tok TokenKind
|
||||||
fn_name string // needed for @FN
|
fn_name string // needed for @FN
|
||||||
should_print_line_on_error bool
|
should_print_line_on_error bool
|
||||||
should_print_errors_in_color bool
|
should_print_errors_in_color bool
|
||||||
|
@ -84,11 +84,11 @@ fn new_scanner(text string) &Scanner {
|
||||||
|
|
||||||
// TODO remove once multiple return values are implemented
|
// TODO remove once multiple return values are implemented
|
||||||
struct ScanRes {
|
struct ScanRes {
|
||||||
tok Token
|
tok TokenKind
|
||||||
lit string
|
lit string
|
||||||
}
|
}
|
||||||
|
|
||||||
fn scan_res(tok Token, lit string) ScanRes {
|
fn scan_res(tok TokenKind, lit string) ScanRes {
|
||||||
return ScanRes{tok, lit}
|
return ScanRes{tok, lit}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
224
compiler/token.v
224
compiler/token.v
|
@ -4,7 +4,7 @@
|
||||||
|
|
||||||
module main
|
module main
|
||||||
|
|
||||||
enum Token {
|
enum TokenKind {
|
||||||
eof
|
eof
|
||||||
name // user
|
name // user
|
||||||
number // 123
|
number // 123
|
||||||
|
@ -116,115 +116,115 @@ enum Token {
|
||||||
// Keywords['return'] == .key_return
|
// Keywords['return'] == .key_return
|
||||||
fn build_keys() map[string]int {
|
fn build_keys() map[string]int {
|
||||||
mut res := map[string]int
|
mut res := map[string]int
|
||||||
for t := int(Token.keyword_beg) + 1; t < int(Token.keyword_end); t++ {
|
for t := int(TokenKind.keyword_beg) + 1; t < int(TokenKind.keyword_end); t++ {
|
||||||
key := TokenStr[t]
|
key := TokenStr[t]
|
||||||
res[key] = int(t)
|
res[key] = int(t)
|
||||||
}
|
}
|
||||||
return res
|
return res
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO remove once we have `enum Token { name('name') if('if') ... }`
|
// TODO remove once we have `enum TokenKind { name('name') if('if') ... }`
|
||||||
fn build_token_str() []string {
|
fn build_token_str() []string {
|
||||||
mut s := [''].repeat(NrTokens)
|
mut s := [''].repeat(NrTokens)
|
||||||
s[Token.keyword_beg] = ''
|
s[TokenKind.keyword_beg] = ''
|
||||||
s[Token.keyword_end] = ''
|
s[TokenKind.keyword_end] = ''
|
||||||
s[Token.eof] = 'eof'
|
s[TokenKind.eof] = 'eof'
|
||||||
s[Token.name] = 'name'
|
s[TokenKind.name] = 'name'
|
||||||
s[Token.number] = 'number'
|
s[TokenKind.number] = 'number'
|
||||||
s[Token.str] = 'STR'
|
s[TokenKind.str] = 'STR'
|
||||||
s[Token.chartoken] = 'char'
|
s[TokenKind.chartoken] = 'char'
|
||||||
s[Token.plus] = '+'
|
s[TokenKind.plus] = '+'
|
||||||
s[Token.minus] = '-'
|
s[TokenKind.minus] = '-'
|
||||||
s[Token.mul] = '*'
|
s[TokenKind.mul] = '*'
|
||||||
s[Token.div] = '/'
|
s[TokenKind.div] = '/'
|
||||||
s[Token.mod] = '%'
|
s[TokenKind.mod] = '%'
|
||||||
s[Token.xor] = '^'
|
s[TokenKind.xor] = '^'
|
||||||
s[Token.bit_not] = '~'
|
s[TokenKind.bit_not] = '~'
|
||||||
s[Token.pipe] = '|'
|
s[TokenKind.pipe] = '|'
|
||||||
s[Token.hash] = '#'
|
s[TokenKind.hash] = '#'
|
||||||
s[Token.amp] = '&'
|
s[TokenKind.amp] = '&'
|
||||||
s[Token.inc] = '++'
|
s[TokenKind.inc] = '++'
|
||||||
s[Token.dec] = '--'
|
s[TokenKind.dec] = '--'
|
||||||
s[Token.and] = '&&'
|
s[TokenKind.and] = '&&'
|
||||||
s[Token.logical_or] = '||'
|
s[TokenKind.logical_or] = '||'
|
||||||
s[Token.not] = '!'
|
s[TokenKind.not] = '!'
|
||||||
s[Token.dot] = '.'
|
s[TokenKind.dot] = '.'
|
||||||
s[Token.dotdot] = '..'
|
s[TokenKind.dotdot] = '..'
|
||||||
s[Token.ellipsis] = '...'
|
s[TokenKind.ellipsis] = '...'
|
||||||
s[Token.comma] = ','
|
s[TokenKind.comma] = ','
|
||||||
//s[Token.at] = '@'
|
//s[TokenKind.at] = '@'
|
||||||
s[Token.semicolon] = ';'
|
s[TokenKind.semicolon] = ';'
|
||||||
s[Token.colon] = ':'
|
s[TokenKind.colon] = ':'
|
||||||
s[Token.arrow] = '=>'
|
s[TokenKind.arrow] = '=>'
|
||||||
s[Token.assign] = '='
|
s[TokenKind.assign] = '='
|
||||||
s[Token.decl_assign] = ':='
|
s[TokenKind.decl_assign] = ':='
|
||||||
s[Token.plus_assign] = '+='
|
s[TokenKind.plus_assign] = '+='
|
||||||
s[Token.minus_assign] = '-='
|
s[TokenKind.minus_assign] = '-='
|
||||||
s[Token.mult_assign] = '*='
|
s[TokenKind.mult_assign] = '*='
|
||||||
s[Token.div_assign] = '/='
|
s[TokenKind.div_assign] = '/='
|
||||||
s[Token.xor_assign] = '^='
|
s[TokenKind.xor_assign] = '^='
|
||||||
s[Token.mod_assign] = '%='
|
s[TokenKind.mod_assign] = '%='
|
||||||
s[Token.or_assign] = '|='
|
s[TokenKind.or_assign] = '|='
|
||||||
s[Token.and_assign] = '&='
|
s[TokenKind.and_assign] = '&='
|
||||||
s[Token.righ_shift_assign] = '>>='
|
s[TokenKind.righ_shift_assign] = '>>='
|
||||||
s[Token.left_shift_assign] = '<<='
|
s[TokenKind.left_shift_assign] = '<<='
|
||||||
s[Token.lcbr] = '{'
|
s[TokenKind.lcbr] = '{'
|
||||||
s[Token.rcbr] = '}'
|
s[TokenKind.rcbr] = '}'
|
||||||
s[Token.lpar] = '('
|
s[TokenKind.lpar] = '('
|
||||||
s[Token.rpar] = ')'
|
s[TokenKind.rpar] = ')'
|
||||||
s[Token.lsbr] = '['
|
s[TokenKind.lsbr] = '['
|
||||||
s[Token.rsbr] = ']'
|
s[TokenKind.rsbr] = ']'
|
||||||
s[Token.eq] = '=='
|
s[TokenKind.eq] = '=='
|
||||||
s[Token.ne] = '!='
|
s[TokenKind.ne] = '!='
|
||||||
s[Token.gt] = '>'
|
s[TokenKind.gt] = '>'
|
||||||
s[Token.lt] = '<'
|
s[TokenKind.lt] = '<'
|
||||||
s[Token.ge] = '>='
|
s[TokenKind.ge] = '>='
|
||||||
s[Token.le] = '<='
|
s[TokenKind.le] = '<='
|
||||||
s[Token.question] = '?'
|
s[TokenKind.question] = '?'
|
||||||
s[Token.left_shift] = '<<'
|
s[TokenKind.left_shift] = '<<'
|
||||||
s[Token.righ_shift] = '>>'
|
s[TokenKind.righ_shift] = '>>'
|
||||||
//s[Token.line_com] = '//'
|
//s[TokenKind.line_com] = '//'
|
||||||
s[Token.nl] = 'NLL'
|
s[TokenKind.nl] = 'NLL'
|
||||||
s[Token.dollar] = '$'
|
s[TokenKind.dollar] = '$'
|
||||||
s[Token.key_assert] = 'assert'
|
s[TokenKind.key_assert] = 'assert'
|
||||||
s[Token.key_struct] = 'struct'
|
s[TokenKind.key_struct] = 'struct'
|
||||||
s[Token.key_if] = 'if'
|
s[TokenKind.key_if] = 'if'
|
||||||
s[Token.key_else] = 'else'
|
s[TokenKind.key_else] = 'else'
|
||||||
s[Token.key_return] = 'return'
|
s[TokenKind.key_return] = 'return'
|
||||||
s[Token.key_module] = 'module'
|
s[TokenKind.key_module] = 'module'
|
||||||
s[Token.key_sizeof] = 'sizeof'
|
s[TokenKind.key_sizeof] = 'sizeof'
|
||||||
s[Token.key_go] = 'go'
|
s[TokenKind.key_go] = 'go'
|
||||||
s[Token.key_goto] = 'goto'
|
s[TokenKind.key_goto] = 'goto'
|
||||||
s[Token.key_const] = 'const'
|
s[TokenKind.key_const] = 'const'
|
||||||
s[Token.key_mut] = 'mut'
|
s[TokenKind.key_mut] = 'mut'
|
||||||
s[Token.key_type] = 'type'
|
s[TokenKind.key_type] = 'type'
|
||||||
s[Token.key_for] = 'for'
|
s[TokenKind.key_for] = 'for'
|
||||||
s[Token.key_switch] = 'switch'
|
s[TokenKind.key_switch] = 'switch'
|
||||||
s[Token.key_case] = 'case'
|
s[TokenKind.key_case] = 'case'
|
||||||
s[Token.func] = 'fn'
|
s[TokenKind.func] = 'fn'
|
||||||
s[Token.key_true] = 'true'
|
s[TokenKind.key_true] = 'true'
|
||||||
s[Token.key_false] = 'false'
|
s[TokenKind.key_false] = 'false'
|
||||||
s[Token.key_continue] = 'continue'
|
s[TokenKind.key_continue] = 'continue'
|
||||||
s[Token.key_break] = 'break'
|
s[TokenKind.key_break] = 'break'
|
||||||
s[Token.key_import] = 'import'
|
s[TokenKind.key_import] = 'import'
|
||||||
s[Token.key_embed] = 'embed'
|
s[TokenKind.key_embed] = 'embed'
|
||||||
//Tokens[key_typeof] = 'typeof'
|
//Tokens[key_typeof] = 'typeof'
|
||||||
s[Token.key_default] = 'default'
|
s[TokenKind.key_default] = 'default'
|
||||||
s[Token.key_enum] = 'enum'
|
s[TokenKind.key_enum] = 'enum'
|
||||||
s[Token.key_interface] = 'interface'
|
s[TokenKind.key_interface] = 'interface'
|
||||||
s[Token.key_pub] = 'pub'
|
s[TokenKind.key_pub] = 'pub'
|
||||||
s[Token.key_import_const] = 'import_const'
|
s[TokenKind.key_import_const] = 'import_const'
|
||||||
s[Token.key_in] = 'in'
|
s[TokenKind.key_in] = 'in'
|
||||||
s[Token.key_atomic] = 'atomic'
|
s[TokenKind.key_atomic] = 'atomic'
|
||||||
s[Token.key_orelse] = 'or'
|
s[TokenKind.key_orelse] = 'or'
|
||||||
s[Token.key_global] = '__global'
|
s[TokenKind.key_global] = '__global'
|
||||||
s[Token.key_union] = 'union'
|
s[TokenKind.key_union] = 'union'
|
||||||
s[Token.key_static] = 'static'
|
s[TokenKind.key_static] = 'static'
|
||||||
s[Token.key_as] = 'as'
|
s[TokenKind.key_as] = 'as'
|
||||||
s[Token.key_defer] = 'defer'
|
s[TokenKind.key_defer] = 'defer'
|
||||||
s[Token.key_match] = 'match'
|
s[TokenKind.key_match] = 'match'
|
||||||
s[Token.key_select] = 'select'
|
s[TokenKind.key_select] = 'select'
|
||||||
s[Token.key_none] = 'none'
|
s[TokenKind.key_none] = 'none'
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -234,8 +234,8 @@ const (
|
||||||
KEYWORDS = build_keys()
|
KEYWORDS = build_keys()
|
||||||
)
|
)
|
||||||
|
|
||||||
fn key_to_token(key string) Token {
|
fn key_to_token(key string) TokenKind {
|
||||||
a := Token(KEYWORDS[key])
|
a := TokenKind(KEYWORDS[key])
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -243,11 +243,11 @@ fn is_key(key string) bool {
|
||||||
return int(key_to_token(key)) > 0
|
return int(key_to_token(key)) > 0
|
||||||
}
|
}
|
||||||
|
|
||||||
fn (t Token) str() string {
|
fn (t TokenKind) str() string {
|
||||||
return TokenStr[int(t)]
|
return TokenStr[int(t)]
|
||||||
}
|
}
|
||||||
|
|
||||||
fn (t Token) is_decl() bool {
|
fn (t TokenKind) is_decl() bool {
|
||||||
// TODO i
|
// TODO i
|
||||||
//return t in [.key_enum, .key_interface, .func, .typ, .key_const,
|
//return t in [.key_enum, .key_interface, .func, .typ, .key_const,
|
||||||
//.key_import_const, .key_struct, .key_pub, .eof]
|
//.key_import_const, .key_struct, .key_pub, .eof]
|
||||||
|
@ -258,20 +258,20 @@ fn (t Token) is_decl() bool {
|
||||||
|
|
||||||
const (
|
const (
|
||||||
AssignTokens = [
|
AssignTokens = [
|
||||||
Token.assign, Token.plus_assign, Token.minus_assign,
|
TokenKind.assign, TokenKind.plus_assign, TokenKind.minus_assign,
|
||||||
Token.mult_assign, Token.div_assign, Token.xor_assign,
|
TokenKind.mult_assign, TokenKind.div_assign, TokenKind.xor_assign,
|
||||||
Token.mod_assign,
|
TokenKind.mod_assign,
|
||||||
Token.or_assign, Token.and_assign, Token.righ_shift_assign,
|
TokenKind.or_assign, TokenKind.and_assign, TokenKind.righ_shift_assign,
|
||||||
Token.left_shift_assign
|
TokenKind.left_shift_assign
|
||||||
]
|
]
|
||||||
|
|
||||||
)
|
)
|
||||||
|
|
||||||
fn (t Token) is_assign() bool {
|
fn (t TokenKind) is_assign() bool {
|
||||||
return t in AssignTokens
|
return t in AssignTokens
|
||||||
}
|
}
|
||||||
|
|
||||||
fn (t []Token) contains(val Token) bool {
|
fn (t []TokenKind) contains(val TokenKind) bool {
|
||||||
for tt in t {
|
for tt in t {
|
||||||
if tt == val {
|
if tt == val {
|
||||||
return true
|
return true
|
||||||
|
|
|
@ -38,7 +38,7 @@ fn (p mut Parser) fgenln(s string) {
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
fn (p mut Parser) peek() Token {
|
fn (p mut Parser) peek() TokenKind {
|
||||||
for {
|
for {
|
||||||
p.cgen.line = p.scanner.line_nr + 1
|
p.cgen.line = p.scanner.line_nr + 1
|
||||||
tok := p.scanner.peek()
|
tok := p.scanner.peek()
|
||||||
|
|
Loading…
Reference in New Issue