/
test_functions.py
90 lines (67 loc) · 2.56 KB
/
test_functions.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
from lex import tokenize
from parse import parse_tokens
from evaluate import eval_in_env, Environment
def test_add():
source = ['(+ 1 3 5)']
exp = parse_tokens(tokenize(source))[0]
assert eval_in_env(exp, Environment([])) == 9
def test_multiply():
source = ['(* 1 3 5)']
exp = parse_tokens(tokenize(source))[0]
assert eval_in_env(exp, Environment([])) == 15
def test_subtract():
source = ['(- 10 14)']
exp = parse_tokens(tokenize(source))[0]
assert eval_in_env(exp, Environment([])) == -4
def test_divide():
source = ['(/ 10 3)']
exp = parse_tokens(tokenize(source))[0]
assert eval_in_env(exp, Environment([])) == 3
def test_equals():
source0, source1 = ['(= 1 2)'], ['(= 2 2)']
exp0 = parse_tokens(tokenize(source0))[0]
exp1 = parse_tokens(tokenize(source1))[0]
assert eval_in_env(exp0, Environment([])) == False
assert eval_in_env(exp1, Environment([])) == True
def test_lt():
source0, source1 = ['(< 2 2)'], ['(< 1 2)']
exp0 = parse_tokens(tokenize(source0))[0]
exp1 = parse_tokens(tokenize(source1))[0]
assert eval_in_env(exp0, Environment([])) == False
assert eval_in_env(exp1, Environment([])) == True
def test_gt():
source0, source1 = ['(> 2 2)'], ['(> 3 2)']
exp0 = parse_tokens(tokenize(source0))[0]
exp1 = parse_tokens(tokenize(source1))[0]
assert eval_in_env(exp0, Environment([])) == False
assert eval_in_env(exp1, Environment([])) == True
def test_and_true():
source = ['(and (> 2 1) (= 1 1) #t)']
exp = parse_tokens(tokenize(source))[0]
assert eval_in_env(exp, Environment([])) == True
def test_and_false():
source = ['(and (> 2 1) (= 1 2) #t)']
exp = parse_tokens(tokenize(source))[0]
assert eval_in_env(exp, Environment([])) == False
def test_and_shortcircuit():
source = ['(and #f never-reached)']
exp = parse_tokens(tokenize(source))[0]
try:
eval_in_env(exp, Environment([]))
except Exception as e:
assert str(e) != 'unknown variable "never-reached"'
def test_or_true():
source = ['(or (> 2 10) (= 1 1) #f)']
exp = parse_tokens(tokenize(source))[0]
assert eval_in_env(exp, Environment([])) == True
def test_or_false():
source = ['(or (> 2 10) (= 1 2) #f)']
exp = parse_tokens(tokenize(source))[0]
assert eval_in_env(exp, Environment([])) == False
def test_or_shortcircuit():
source = ['(or #t never-reached)']
exp = parse_tokens(tokenize(source))[0]
try:
eval_in_env(exp, Environment([]))
except Exception as e:
assert str(e) != 'unknown variable "never-reached"'