Beispiel #1
0
 def evaluate(self, expression: str):
     lexer = Lexer(expression)
     tokens = lexer.tokenize()
     parser = Parser(tokens)
     expression = parser.parse()
     evaluator = Evaluator(self.env)
     return evaluator.evaluate(expression)
Beispiel #2
0
 def test_tokenize_integer(self):
     expression = "1"
     tokens = Lexer(expression).tokenize()
     self.assertEqual(len(tokens), 1)
     self.assertEqual(tokens[0].type, ttypes.INTEGER)
Beispiel #3
0
 def test_tokenize_operator_gt_eq(self):
     expression = ">="
     tokens = Lexer(expression).tokenize()
     self.assertEqual(len(tokens), 1)
     self.assertEqual(tokens[0].type, ttypes.GREATER_EQ_THAN)
Beispiel #4
0
 def test_tokenize_identifier_starts_digit_ko(self):
     expression = "123asdf"
     with self.assertRaises(UnknownTokenException):
         tokens = Lexer(expression).tokenize()
Beispiel #5
0
 def test_tokenize_operator_lt(self):
     expression = "<"
     tokens = Lexer(expression).tokenize()
     self.assertEqual(len(tokens), 1)
     self.assertEqual(tokens[0].type, ttypes.LESS_THAN)
Beispiel #6
0
 def test_tokenize_identifier_phrase_mixed_upper(self):
     expression = "AbCdEFGhIjk"
     tokens = Lexer(expression).tokenize()
     self.assertEqual(len(tokens), 1)
     self.assertEqual(tokens[0].type, ttypes.IDENTIFIER)
Beispiel #7
0
 def test_tokenize_identifier_underscore_also_mixed(self):
     expression = "aB123c_D45efG"
     tokens = Lexer(expression).tokenize()
     self.assertEqual(len(tokens), 1)
     self.assertEqual(tokens[0].type, ttypes.IDENTIFIER)
Beispiel #8
0
 def test_tokenize_identifier_char_upper(self):
     expression = "A"
     tokens = Lexer(expression).tokenize()
     self.assertEqual(len(tokens), 1)
     self.assertEqual(tokens[0].type, ttypes.IDENTIFIER)
Beispiel #9
0
 def test_tokenize_identifier_phrase_mixed_lower(self):
     expression = "aBcDEfGHI"
     tokens = Lexer(expression).tokenize()
     self.assertEqual(len(tokens), 1)
     self.assertEqual(tokens[0].type, ttypes.IDENTIFIER)
Beispiel #10
0
 def test_tokenize_float(self):
     expression = "1.23"
     tokens = Lexer(expression).tokenize()
     self.assertEqual(len(tokens), 1)
     self.assertEqual(tokens[0].type, ttypes.FLOAT)
Beispiel #11
0
 def test_tokenize_several_tokens_several_spaces(self):
     expression = "( foo    )"
     tokens = Lexer(expression).tokenize()
     self.assertEqual(len(tokens), 3)
Beispiel #12
0
 def test_tokenize_close_par(self):
     expression = ")"
     tokens = Lexer(expression).tokenize()
     self.assertEqual(len(tokens), 1)
     self.assertEqual(tokens[0].type, ttypes.CLOSEPAR)
Beispiel #13
0
 def test_tokenize_open_par(self):
     expression = "("
     tokens = Lexer(expression).tokenize()
     self.assertEqual(len(tokens), 1)
     self.assertEqual(tokens[0].type, ttypes.OPENPAR)
Beispiel #14
0
 def test_tokenize_operator_or(self):
     expression = "or"
     tokens = Lexer(expression).tokenize()
     self.assertEqual(len(tokens), 1)
     self.assertEqual(tokens[0].type, ttypes.OR)
Beispiel #15
0
 def test_tokenize_operator_not_eq(self):
     expression = "<>"
     tokens = Lexer(expression).tokenize()
     self.assertEqual(len(tokens), 1)
     self.assertEqual(tokens[0].type, ttypes.NOT_EQUALS)
Beispiel #16
0
 def parse(expression):
     tokens = Lexer(expression).tokenize()
     return Parser(tokens).parse()