clean up parser_test.v
parent
d9835c1ecf
commit
9b37fc7310
|
@ -158,7 +158,7 @@ fn (p mut Parser) fnext() {
|
|||
//println('eof ret')
|
||||
//return
|
||||
//}
|
||||
if p.tok == .rcbr && !p.inside_if_expr { //&& p.prev_tok != .lcbr {
|
||||
if p.tok == .rcbr && !p.inside_if_expr && p.prev_tok != .lcbr {
|
||||
p.fmt_dec()
|
||||
}
|
||||
s := p.strtok()
|
||||
|
@ -167,7 +167,7 @@ fn (p mut Parser) fnext() {
|
|||
}
|
||||
// vfmt: increase indentation on `{` unless it's `{}`
|
||||
inc_indent := false
|
||||
if p.tok == .lcbr && !p.inside_if_expr {// && p.peek() != .rcbr {
|
||||
if p.tok == .lcbr && !p.inside_if_expr && p.peek() != .rcbr {
|
||||
p.fgen_nl()
|
||||
p.fmt_inc()
|
||||
}
|
||||
|
@ -267,7 +267,7 @@ fn (p &Parser) gen_fmt() {
|
|||
])
|
||||
*/
|
||||
//.replace('\n\n\n\n', '\n\n')
|
||||
|
||||
|
||||
s2 := s1.replace(' \n', '\n')
|
||||
s3 := s2.replace(') or{', ') or {')
|
||||
s4 := s3.replace(')or{', ') or {')
|
||||
|
@ -305,4 +305,4 @@ fn write_formatted_source(file_name string, s string) string {
|
|||
out.writeln(s.trim_space())//p.scanner.fmt_out.str().trim_space())
|
||||
out.close()
|
||||
return path
|
||||
}
|
||||
}
|
||||
|
|
|
@ -48,6 +48,15 @@ fn (g mut Gen) expr(node ast.Expr) Type {
|
|||
g.write(it.val.str())
|
||||
return int_type
|
||||
}
|
||||
ast.ScalarExpr {
|
||||
g.expr(it.left)
|
||||
g.write(' $it.val ')
|
||||
|
||||
}
|
||||
ast.UnaryExpr {
|
||||
g.expr(it.left)
|
||||
g.write(' $it.op ')
|
||||
}
|
||||
ast.StringLiteral {
|
||||
g.write('"$it.val"')
|
||||
return string_type
|
||||
|
|
|
@ -52,8 +52,7 @@ pub fn (p mut Parser) expr(rbp int) ast.Expr {
|
|||
tok := p.tok
|
||||
lit := p.lit
|
||||
p.next()
|
||||
mut node := ast.Expr{
|
||||
}
|
||||
mut node := ast.Expr{}
|
||||
match tok {
|
||||
.lpar {
|
||||
node = p.expr(0)
|
||||
|
@ -122,6 +121,5 @@ fn (p mut Parser) stmt() ast.Stmt {
|
|||
}
|
||||
*/
|
||||
|
||||
return ast.VarDecl{
|
||||
}
|
||||
return ast.VarDecl{}
|
||||
}
|
||||
|
|
|
@ -6,12 +6,12 @@ import (
|
|||
)
|
||||
|
||||
fn test_parser() {
|
||||
//if true { return }
|
||||
//expr := ast.IntegerExpr {val:10}
|
||||
//expr := ast.BinaryExpr{}
|
||||
|
||||
// print using walk
|
||||
expr := parse_expr('3 + 7')
|
||||
walk(expr)
|
||||
println('\n')
|
||||
|
||||
text_expr := [
|
||||
|
@ -23,8 +23,10 @@ fn test_parser() {
|
|||
'3 + (7 * 6)',
|
||||
'2 ^ 8 * (7 * 6)',
|
||||
'20 + (10 * 15) / 5', // 50
|
||||
'(2) + (17*2-30) * (5)+2 - (8/2)*4' // 8
|
||||
'(2) + (17*2-30) * (5)+2 - (8/2)*4', // 8
|
||||
'2 + "hi"'
|
||||
]
|
||||
|
||||
for s in text_expr {
|
||||
// print using str method
|
||||
x := parse_expr(s)
|
||||
|
@ -35,47 +37,17 @@ fn test_parser() {
|
|||
}
|
||||
|
||||
|
||||
fn walk(node ast.Expr) {
|
||||
//println('walk()')
|
||||
match node {
|
||||
ast.BinaryExpr {
|
||||
print(' (')
|
||||
walk(it.left)
|
||||
// print('$it.op.str()')
|
||||
match it.op {
|
||||
.plus {
|
||||
print(' + ')
|
||||
}
|
||||
.minus {
|
||||
print(' - ')
|
||||
}
|
||||
else {}
|
||||
|
||||
}
|
||||
walk(it.right)
|
||||
print(') ')
|
||||
}
|
||||
ast.ScalarExpr {
|
||||
walk(it.left)
|
||||
print(' $it.val ')
|
||||
}
|
||||
ast.UnaryExpr {
|
||||
walk(it.left)
|
||||
print(' $it.op ')
|
||||
}
|
||||
else { }
|
||||
}
|
||||
/*
|
||||
fn test_cgen() {
|
||||
//expr := parse_expr('3 + 7 * 2')
|
||||
expr := parse_stmt('a := 3 + "f"')
|
||||
//expr2 := parse_stmt('a := 3 + "f"')
|
||||
expr2 := parse_expr('2 +3 ')//"helo"')
|
||||
program := ast.Program{
|
||||
exprs: [
|
||||
expr,
|
||||
expr2,
|
||||
//parse_expr('2 * 2'),
|
||||
]
|
||||
}
|
||||
cgen.gen(program)
|
||||
//cgen.save()
|
||||
*/
|
||||
}
|
||||
|
||||
|
|
|
@ -329,10 +329,7 @@ pub fn (tok Token) precedence() int {
|
|||
|
||||
// is_scalar returns true if the token is a scalar
|
||||
pub fn (tok Token) is_scalar() bool {
|
||||
match tok {
|
||||
.number { return true }
|
||||
else { return false }
|
||||
}
|
||||
return tok in [.number, .str]
|
||||
}
|
||||
|
||||
// is_unary returns true if the token can be in a unary expression
|
||||
|
@ -344,8 +341,8 @@ pub fn (tok Token) is_unary() bool {
|
|||
}
|
||||
}
|
||||
|
||||
// NOTE: do we need this for all tokens (is_left_assoc / is_right_assoc),
|
||||
// or only ones with the same precedence?
|
||||
// NOTE: do we need this for all tokens (is_left_assoc / is_right_assoc),
|
||||
// or only ones with the same precedence?
|
||||
|
||||
// is_left_assoc returns true if the token is left associative
|
||||
pub fn (tok Token) is_left_assoc() bool {
|
||||
|
@ -366,7 +363,7 @@ pub fn (tok Token) is_right_assoc() bool {
|
|||
match tok {
|
||||
// `+` | `-` | `!` | `++` | `--`
|
||||
.plus, .minus, .not, .inc, .dec,
|
||||
// `=` | `+=` | `-=` | `*=` | `/=`
|
||||
// `=` | `+=` | `-=` | `*=` | `/=`
|
||||
.assign, .plus_assign, .minus_assign, .mult_assign, .div_assign,
|
||||
// `%=` | `>>=` | `<<=`
|
||||
.mod_assign, .righ_shift_assign, .left_shift_assign,
|
||||
|
|
Loading…
Reference in New Issue