示例#1
0
def test_tokenize():
    source0 = ['(define a 3)']
    tokens0 = tokenize(source0)
    source1 = ['(+ 10 25 0)']
    tokens1 = tokenize(source1)
    assert list(tokens0) == ['(', 'define', 'a', '3', ')']
    assert list(tokens1) == ['(', '+', '10', '25', '0', ')']
示例#2
0
def test_list():
    source = ['(list 1 2 (+ 1 2) 4)']
    exp = parse_tokens(tokenize(source))[0]
    assert eval_in_env(exp, []) == [1, 2, 3, 4]
示例#3
0
def test_cdr():
    source = ['(cdr (cons 1 null))']
    exp = parse_tokens(tokenize(source))[0]
    assert eval_in_env(exp, []) == []
示例#4
0
 def test_tokenise(self):
     self.assertEqual(
         tokenize('(set var 123)'),
         ['(', 'set', 'var', '123', ')']
     )
示例#5
0
def test_multiply():
    source = ['(* 1 3 5)']
    exp = parse_tokens(tokenize(source))[0]
    assert eval_in_env(exp, []) == 15
示例#6
0
def test_or_false():
    source = ['(or (> 2 10) (= 1 2) #f)']
    exp = parse_tokens(tokenize(source))[0]
    assert eval_in_env(exp, []) == False
示例#7
0
def test_parse_add():
    source = ['(define a (+ 3 3))']
    tokens = tokenize(source)
    exp = parse_tokens(tokens)
    assert exp == [['define', 'a', ['+', 3, 3]]]
示例#8
0
def test_nullcheck_3():
    source = ['(null? (cdr (list 1)))']
    exp = parse_tokens(tokenize(source))[0]
    assert eval_in_env(exp, []) == True
示例#9
0
def test_add():
    source = ['(+ 1 3 5)']
    exp = parse_tokens(tokenize(source))[0]
    assert eval_in_env(exp, []) == 9
示例#10
0
def test_divide():
    source = ['(/ 10 3)']
    exp = parse_tokens(tokenize(source))[0]
    assert eval_in_env(exp, []) == 3
示例#11
0
def test_subtract():
    source = ['(- 10 14)']
    exp = parse_tokens(tokenize(source))[0]
    assert eval_in_env(exp, []) == -4
示例#12
0
 def test_begin_statement(self):
     self.assertEqual(tokenize('(begin ())'), ['(', 'begin', '(', ')', ')'])
示例#13
0
 def test_empty_brackets(self):
     self.assertEqual(tokenize('()'), ['(', ')'])
示例#14
0
def test_parse_define_lambda():
    source = ['(define add (lambda (x y) (+ x y)))']
    tokens = tokenize(source)
    exp = parse_tokens(tokens)
    assert exp == [['define', 'add', ['lambda', ['x', 'y'], ['+', 'x', 'y']]]]
示例#15
0
def test_nullcheck():
    source = ['(null? null)']
    exp = parse_tokens(tokenize(source))[0]
    assert eval_in_env(exp, []) == True
示例#16
0
def test_gt():
    source0, source1 = ['(> 2 2)'], ['(> 3 2)']
    exp0 = parse_tokens(tokenize(source0))[0]
    exp1 = parse_tokens(tokenize(source1))[0]
    assert eval_in_env(exp0, []) == False
    assert eval_in_env(exp1, []) == True
示例#17
0
def test_nullcheck_2():
    source = ['(null? (cons 1 null))']
    exp = parse_tokens(tokenize(source))[0]
    assert eval_in_env(exp, []) == False
示例#18
0
def test_and_false():
    source = ['(and (> 2 1) (= 1 2) #t)']
    exp = parse_tokens(tokenize(source))[0]
    assert eval_in_env(exp, []) == False
示例#19
0
def test_cons_2():
    source = ['(cons (+ 1 3 5) (cons (+ 1 3 5) null))']
    exp = parse_tokens(tokenize(source))[0]
    assert eval_in_env(exp, []) == [9, 9]
示例#20
0
def test_parse_define():
    source = ['(define a 3)']
    tokens = tokenize(source)
    exp = parse_tokens(tokens)
    assert exp == [['define', 'a', 3]]