Пример #1
0
    def test_integer_expressions(self) -> None:
        program: Program = Program(statements=[
            ExpressionStatement(token=Token(TokenType.INT, literal='5'),
                                expression=Integer(token=Token(TokenType.INT,
                                                               literal='5'),
                                                   value=5)),
        ])

        program_str = str(program)

        self.assertEquals(program_str, '5')
Пример #2
0
    def test_return_statement(self) -> None:
        program: Program = Program(statements=[
            ReturnStatement(token=Token(TokenType.RETURN, literal='=>'),
                            return_value=Identifier(token=Token(
                                TokenType.IDENT, literal='my_val'),
                                                    value='my_val'))
        ])

        program_str = str(program)

        self.assertEquals(program_str, '=> my_val;')
Пример #3
0
    def test_eof(self) -> None:
        source: str = '+'
        lexer: Lexer = Lexer(source)

        tokens: List[Token] = []
        for i in range(len(source) + 1):
            tokens.append(lexer.next_token())

        expected_tokens: List[Token] = [
            Token(TokenType.PLUS, '+'),
            Token(TokenType.EOF, '')
        ]

        self.assertEquals(tokens, expected_tokens)
Пример #4
0
    def test_let_statement(self) -> None:
        program: Program = Program(statements=[
            LetStatement(token=Token(TokenType.LET, literal='let'),
                         name=Identifier(token=Token(TokenType.IDENT,
                                                     literal='my_val'),
                                         value='my_val'),
                         value=Identifier(token=Token(TokenType.IDENT,
                                                      literal='other_val'),
                                          value='other_val'))
        ])

        program_str = str(program)

        self.assertEquals(program_str, 'let my_val = other_val;')
Пример #5
0
    def test_illegal(self) -> None:
        source: str = '¡¿@'
        lexer: Lexer = Lexer(source)

        tokens: List[Token] = []
        for i in range(len(source)):
            tokens.append(lexer.next_token())

        expected_token: List[Token] = [
            Token(TokenType.ILLEGAL, '¡'),
            Token(TokenType.ILLEGAL, '¿'),
            Token(TokenType.ILLEGAL, '@')
        ]

        self.assertEqual(tokens, expected_token)
Пример #6
0
    def _make_a_lot_of_character_token(self, character: str,
                                       token_type: TokenType) -> Token:
        initial_position = self._read_position

        self._read_character()

        while (self._character != character):
            self._read_character()

        return Token(token_type,
                     self._source[initial_position - 1:self._read_position])
Пример #7
0
    def test_funtion_call(self) -> None:
        source: str = 'let variable = suma(2,3)'
        lexer: Lexer = Lexer(source)

        tokens: List[Token] = []
        for i in range(9):
            tokens.append(lexer.next_token())

        expected_tokens: List[Token] = [
            Token(TokenType.LET, 'let'),
            Token(TokenType.IDENT, 'variable'),
            Token(TokenType.ASSIGN, '='),
            Token(TokenType.IDENT, 'suma'),
            Token(TokenType.LPAREN, '('),
            Token(TokenType.INT, '2'),
            Token(TokenType.COMMA, ','),
            Token(TokenType.INT, '3'),
            Token(TokenType.RPAREN, ')')
        ]
        self.assertEquals(tokens, expected_tokens)
Пример #8
0
    def _parse_tuple(self,
                     fst_value: Optional[Expression]) -> Optional[Expression]:
        assert self._current_token is not None
        tuple_values = TupleValues(token=Token(TokenType.COMMA, '('))

        values = [fst_value]

        self._advance_tokens()

        while self._current_token.token_type == TokenType.COMMA:
            self._advance_tokens()

            if expression := self._parse_expression(Precedence.LOWEST):
                values.append(expression)

                self._advance_tokens()
Пример #9
0
    def test_delimiters(self) -> None:
        source: str = '(){}[],;'
        lexer: Lexer = Lexer(source)

        tokens: List[Token] = []
        for i in range(len(source)):
            tokens.append(lexer.next_token())

        expected_tokens: List[Token] = [
            Token(TokenType.LPAREN, '('),
            Token(TokenType.RPAREN, ')'),
            Token(TokenType.LBRACE, '{'),
            Token(TokenType.RBRACE, '}'),
            Token(TokenType.LBRAKET, '['),
            Token(TokenType.RBRAKET, ']'),
            Token(TokenType.COMMA, ','),
            Token(TokenType.SEMICOLON, ';')
        ]
        self.assertEquals(tokens, expected_tokens)
Пример #10
0
    def test_one_character_operator(self) -> None:
        source: str = '=+-/*<>%'
        lexer: Lexer = Lexer(source)

        tokens: List[Token] = []
        for i in range(len(source)):
            tokens.append(lexer.next_token())

        expected_tokens: List[Token] = [
            Token(TokenType.ASSIGN, '='),
            Token(TokenType.PLUS, '+'),
            Token(TokenType.MINUS, '-'),
            Token(TokenType.DIVISION, '/'),
            Token(TokenType.MULTIPLICATION, '*'),
            Token(TokenType.LT, '<'),
            Token(TokenType.GT, '>'),
            Token(TokenType.MODULUS, '%'),
        ]

        self.assertEquals(tokens, expected_tokens)
Пример #11
0
    def test_control_statements(self) -> None:
        source: str = '''
            if 5 < 10 then true else false
        '''
        lexer: Lexer = Lexer(source)

        tokens: List[Token] = []
        for i in range(8):
            tokens.append(lexer.next_token())

        expected_tokens: List[Token] = [
            Token(TokenType.IF, 'if'),
            Token(TokenType.INT, '5'),
            Token(TokenType.LT, '<'),
            Token(TokenType.INT, '10'),
            Token(TokenType.THEN, 'then'),
            Token(TokenType.TRUE, 'true'),
            Token(TokenType.ELSE, 'else'),
            Token(TokenType.FALSE, 'false')
        ]
        self.assertEquals(tokens, expected_tokens)
Пример #12
0
    def test_two_character_operator(self) -> None:

        source: str = '''
            10 == 10
            10 != 10
            10 >= 10
            10 <= 10
            10 ** 10
            10 || 10
            10 && 10
        '''
        lexer: Lexer = Lexer(source)

        tokens: List[Token] = []
        for i in range(21):
            tokens.append(lexer.next_token())

        expected_tokens: List[Token] = [
            Token(TokenType.INT, '10'),
            Token(TokenType.EQ, '=='),
            Token(TokenType.INT, '10'),
            Token(TokenType.INT, '10'),
            Token(TokenType.NOT_EQ, '!='),
            Token(TokenType.INT, '10'),
            Token(TokenType.INT, '10'),
            Token(TokenType.G_OR_EQ_T, '>='),
            Token(TokenType.INT, '10'),
            Token(TokenType.INT, '10'),
            Token(TokenType.L_OR_EQ_T, '<='),
            Token(TokenType.INT, '10'),
            Token(TokenType.INT, '10'),
            Token(TokenType.EXPONENTIATION, '**'),
            Token(TokenType.INT, '10'),
            Token(TokenType.INT, '10'),
            Token(TokenType.OR, '||'),
            Token(TokenType.INT, '10'),
            Token(TokenType.INT, '10'),
            Token(TokenType.AND, '&&'),
            Token(TokenType.INT, '10'),
        ]
        self.assertEquals(tokens, expected_tokens)
Пример #13
0
    def _make_two_character_token(self, token_type: TokenType) -> Token:
        prefix = self._character
        self._read_character()
        suffix = self._character

        return Token(token_type, f'{prefix}{suffix}')
Пример #14
0
from os import system, name

from typing import (Optional, List)

from sigmaF.ast import Program
from sigmaF.object import (Environment, ObjectType)
from sigmaF.parser import (
    Parser, )
from sigmaF.lexer import Lexer
from sigmaF.token import (
    Token,
    TokenType,
)
from sigmaF.evaluator import evaluate

EOF_TOKEN: Token = Token(TokenType.EOF, '')

_FILENOTFOUNT = "File not fount on {}"
_MAXIMUMRECURSIONDEPTH = 'Maximum recursion depth exceeded while being evaluated {}'
_EVALUATIONERROR = 'There was an error in the evaluation process {}'


def _print_parse_errors(errors: List[str]):
    for error in errors:
        print(error)


def _clean_comments(source: str) -> str:
    pattern_single_line_comment = re.compile(r'\-\-.*(\n|\b)')
    pattern_multiline_comment = re.compile(r'\/\*(\s|.)*?\*\/')
Пример #15
0
    def next_token(self) -> Token:
        self._skip_whitespace()

        if match(r'^=$', self._character):
            if self._peek_character() == '=':
                token = self._make_two_character_token(TokenType.EQ)
            elif self._peek_character() == '>':
                token = self._make_two_character_token(TokenType.RETURN)
            else:
                token = Token(TokenType.ASSIGN, self._character)
        elif match(r'^\"$', self._character):
            if self._peek_ahead_character('\"'):
                token = self._make_a_lot_of_character_token(
                    self._character, TokenType.STRING)
            else:
                token = Token(TokenType.ILLEGAL, "\"")
        elif match(r'^\+$', self._character):
            token = Token(TokenType.PLUS, self._character)
        elif match(r'^$', self._character):
            token = Token(TokenType.EOF, self._character)
        elif match(r'^\($', self._character):
            token = Token(TokenType.LPAREN, self._character)
        elif match(r'^\)$', self._character):
            token = Token(TokenType.RPAREN, self._character)
        elif match(r'^\{$', self._character):
            token = Token(TokenType.LBRACE, self._character)
        elif match(r'^\}$', self._character):
            token = Token(TokenType.RBRACE, self._character)
        elif match(r'^\[$', self._character):
            token = Token(TokenType.LBRAKET, self._character)
        elif match(r'^\]$', self._character):
            token = Token(TokenType.RBRAKET, self._character)
        elif match(r'^\,$', self._character):
            token = Token(TokenType.COMMA, self._character)
        elif match(r'^;$', self._character):
            token = Token(TokenType.SEMICOLON, self._character)
        elif match(r'^<$', self._character):
            if self._peek_character() == '=':
                token = self._make_two_character_token(TokenType.L_OR_EQ_T)
            else:
                token = Token(TokenType.LT, self._character)
        elif match(r'^>$', self._character):
            if self._peek_character() == '=':
                token = self._make_two_character_token(TokenType.G_OR_EQ_T)
            else:
                token = Token(TokenType.GT, self._character)
        elif match(r'^-$', self._character):
            if self._peek_character() == '>':
                token = self._make_two_character_token(TokenType.OUTPUTFUNTION)
            else:
                token = Token(TokenType.MINUS, self._character)
        elif match(r'^:$', self._character):
            if self._peek_character() == ':':
                token = self._make_two_character_token(TokenType.TYPEASSIGN)
            else:
                token = Token(TokenType.ILLEGAL, self._character)
        elif match(r'^\|$', self._character):
            if self._peek_character() == '|':
                token = self._make_two_character_token(TokenType.OR)
            else:
                token = Token(TokenType.ILLEGAL, self._character)
        elif match(r'^&$', self._character):
            if self._peek_character() == '&':
                token = self._make_two_character_token(TokenType.AND)
            else:
                token = Token(TokenType.ILLEGAL, self._character)
        elif match(r'^/$', self._character):
            token = Token(TokenType.DIVISION, self._character)
        elif match(r'^\*$', self._character):
            if self._peek_character() == '*':
                token = self._make_two_character_token(
                    TokenType.EXPONENTIATION)
            else:
                token = Token(TokenType.MULTIPLICATION, self._character)
        elif match(r'^%$', self._character):
            token = Token(TokenType.MODULUS, self._character)
        elif match(r'^!$', self._character):
            if self._peek_character() == '=':
                token = self._make_two_character_token(TokenType.NOT_EQ)
            else:
                token = Token(TokenType.ILLEGAL, self._character)
        elif self._is_letter(self._character):
            literal = self._read_identifier()
            token_type = lookup_token_type(literal)

            return Token(token_type, literal)
        elif self._is_number(self._character):
            literal = self._read_number()

            if self._character == '.':
                self._read_character()
                sufix = self._read_number()
                return Token(TokenType.FLOAT, f'{literal}.{sufix}')

            return Token(TokenType.INT, literal)
        else:
            token = Token(TokenType.ILLEGAL, self._character)

        self._read_character()

        return token
Пример #16
0
    def test_assignment(self) -> None:
        source: str = '''
            let x = 5;
            let y = "cinco";
            let foo = 5.0;
        '''
        lexer: Lexer = Lexer(source)

        tokens: List[Token] = []
        for i in range(15):
            tokens.append(lexer.next_token())

        expected_tokens: List[Token] = [
            Token(TokenType.LET, 'let'),
            Token(TokenType.IDENT, 'x'),
            Token(TokenType.ASSIGN, '='),
            Token(TokenType.INT, '5'),
            Token(TokenType.SEMICOLON, ';'),
            Token(TokenType.LET, 'let'),
            Token(TokenType.IDENT, 'y'),
            Token(TokenType.ASSIGN, '='),
            Token(TokenType.STRING, '"cinco"'),
            Token(TokenType.SEMICOLON, ';'),
            Token(TokenType.LET, 'let'),
            Token(TokenType.IDENT, 'foo'),
            Token(TokenType.ASSIGN, '='),
            Token(TokenType.FLOAT, '5.0'),
            Token(TokenType.SEMICOLON, ';'),
        ]
        self.assertEquals(tokens, expected_tokens)
Пример #17
0
    def test_funtion_declaration(self) -> None:
        source: str = '''
            let sum = x::int, y::int -> int {
                => x + y
            }
        '''
        lexer: Lexer = Lexer(source)

        tokens: List[Token] = []
        for i in range(18):
            tokens.append(lexer.next_token())

        expected_tokens: List[Token] = [
            Token(TokenType.LET, 'let'),
            Token(TokenType.IDENT, 'sum'),
            Token(TokenType.ASSIGN, '='),
            Token(TokenType.IDENT, 'x'),
            Token(TokenType.TYPEASSIGN, '::'),
            Token(TokenType.CLASSNAME, 'int'),
            Token(TokenType.COMMA, ','),
            Token(TokenType.IDENT, 'y'),
            Token(TokenType.TYPEASSIGN, '::'),
            Token(TokenType.CLASSNAME, 'int'),
            Token(TokenType.OUTPUTFUNTION, '->'),
            Token(TokenType.CLASSNAME, 'int'),
            Token(TokenType.LBRACE, '{'),
            Token(TokenType.RETURN, '=>'),
            Token(TokenType.IDENT, 'x'),
            Token(TokenType.PLUS, '+'),
            Token(TokenType.IDENT, 'y'),
            Token(TokenType.RBRACE, '}'),
        ]

        self.assertEquals(tokens, expected_tokens)