Пример #1
0
def test_valid_string(lexer_input, expected_value):
    lexer.input(lexer_input)
    tokens = list(lexer)

    assert len(tokens) == 1
    assert tokens[0].type == 'STRING'
    assert tokens[0].value == expected_value
Пример #2
0
def test_identifier_prefix_reserved_word(identifier):
    lexer.input(identifier)
    tokens = list(lexer)

    assert len(tokens) == 1
    assert tokens[0].type == 'ID'
    assert tokens[0].value == identifier
Пример #3
0
def test_1e42_is_floatnum():
    lexer_input = '1e42'
    lexer.input(lexer_input)
    tokens = list(lexer)

    assert len(tokens) == 1
    assert tokens[0].type == 'FLOATNUM'
    assert tokens[0].value == 1e42
Пример #4
0
def test_digit_dot_op_digit(lexer_input):
    """
    (1)(.+)(2) vs. (1.)(+)(2) vs. (1.)(+2)
    """
    lexer.input(lexer_input)
    # tokens = list(lexer)

    pass  # no requirements specified
Пример #5
0
def test_hash_in_string():
    lexer_input = r'"Hello #world"'
    lexer.input(lexer_input)
    tokens = list(lexer)

    assert len(tokens) == 1
    assert tokens[0].type == 'STRING'
    assert tokens[0].value == "Hello #world"
def test_ignored(lexer_input, expected_number_of_tokens):
    """
    Następujące znaki powinny być pomijane:
    * białe znaki: spacje, tabulatory, znaki nowej linii
    * komentarze: komentarze rozpoczynające się znakiem # do znaku końca linii
    """
    lexer.input(lexer_input)
    tokens = list(lexer)

    assert len(tokens) == expected_number_of_tokens
def test_valid_identifier(lexer_input):
    """
    pierwszy znak identyfikatora to litera lub znak _, w kolejnych znakach mogą dodatkowo wystąpić cyfry
    """
    lexer.input(lexer_input)
    tokens = list(lexer)

    assert len(tokens) == 1
    assert tokens[0].type == 'ID'
    assert tokens[0].value == lexer_input
Пример #8
0
def test_valid_number(lexer_input, expected_type, expected_value):
    lexer.input(lexer_input)
    tokens = list(lexer)

    assert len(tokens) == 1
    assert tokens[0].type == expected_type
    assert tokens[0].value == expected_value
    assert type(tokens[0].value) == {
        'INTNUM': int,
        'FLOATNUM': float
    }[expected_type]
Пример #9
0
def run():
    filename = None
    try:
        filename = sys.argv[1] if len(sys.argv) > 1 else "example1"
        file = open(filename, "r")
    except IOError:
        print("Cannot open {0} file".format(filename))
        sys.exit(0)

    parser = Mparser.parser
    text = file.read()
    lexer.input(text)
    ast = parser.parse(text, lexer=lexer)
    print(ast.printTree())
    typeChecker = TypeChecker()
    typeChecker.visit(ast)  # or alternatively ast.accept(typeChecker)
    print('Checked')

    ast.accept(Interpreter())
def test_token(lexer_input, token_name):
    """
    Analizator leksykalny powinien rozpoznawać następujące leksemy:
    * operatory binare: +, -, *, /
    * macierzowe operatory binarne (dla operacji element po elemencie): .+, .-, .*, ./
    * operatory przypisania: =, +=, -=, *=, /=
    * operatory relacyjne: <, >, <=, >=, !=, ==
    * nawiasy: (,), [,], {,}
    * operator zakresu: :
    * transpozycja macierzy: '
    * przecinek i średnik: , ;
    * słowa kluczowe: if, else, for, while
    * słowa kluczowe: break, continue oraz return
    * słowa kluczowe: eye, zeros oraz ones
    * słowa kluczowe: print
    """
    lexer.input(lexer_input)
    token = next(lexer)

    assert token.type == token_name
    assert token.value == lexer_input
Пример #11
0
def test_parser():
    try:
        filename = "all_examples"
        file = open(filename, "r")
    except IOError:
        print("Cannot open {0} file".format(filename))
        sys.exit(0)

    parser = Mparser.parser
    text = file.read()
    lexer.input(text)
    ast = parser.parse(text, lexer=lexer)

    old_stdout = sys.stdout
    sys.stdout = mystdout = StringIO()

    print(ast.printTree())

    sys.stdout = old_stdout

    with open('actual_tree', 'w') as test_file:
        test_file.write(mystdout.getvalue())
Пример #12
0
def test_invalid_number(lexer_input):
    lexer.input(lexer_input)
    tokens = list(lexer)

    assert len(tokens) != 1 or tokens[0].type not in ('INTNUM', 'FLOATNUM')
Пример #13
0
def test_invalid_string(lexer_input):
    lexer.input(lexer_input)
    tokens = list(lexer)

    assert len(tokens) != 1 or tokens[0].type != 'STRING' or tokens[0].value != lexer_input
def test_invalid_identifier(lexer_input):
    lexer.input(lexer_input)
    tokens = list(lexer)

    assert len(tokens) != 1 or tokens[0].type != 'ID' or tokens[
        0].value != lexer_input
Пример #15
0
def test_reserved_word_is_not_identifier(reserved_word):
    lexer.input(reserved_word)
    tokens = list(lexer)

    assert len(tokens) == 1
    assert tokens[0].type == reserved_word.upper()