Ejemplo n.º 1
0
def test_table():

    initial_toml = """name = "first"
id=42 # My id


"""

    tokens = tuple(lexer.tokenize(initial_toml))

    elements = (
        AtomicElement(tokens[:1]),
        WhitespaceElement(tokens[1:2]),
        PunctuationElement(tokens[2:3]),
        WhitespaceElement(tokens[3:4]),
        AtomicElement(tokens[4:5]),
        NewlineElement(tokens[5:6]),
        AtomicElement(tokens[6:7]),
        PunctuationElement(tokens[7:8]),
        AtomicElement(tokens[8:9]),
        WhitespaceElement(tokens[9:10]),
        CommentElement(tokens[10:12]),
        NewlineElement(tokens[12:13]),
        NewlineElement(tokens[13:14]),
    )

    table = TableElement(elements)

    assert set(table.items()) == {('name', 'first'), ('id', 42)}

    assert table['name'] == 'first'
    assert table['id'] == 42

    table['relation'] = 'another'

    assert set(table.items()) == {('name', 'first'), ('id', 42),
                                  ('relation', 'another')}

    table['name'] = 'fawzy'

    assert set(table.items()) == {('name', 'fawzy'), ('id', 42),
                                  ('relation', 'another')}

    expected_toml = """name = "fawzy"
id=42 # My id
relation = "another"


"""

    assert table.serialized() == expected_toml
Ejemplo n.º 2
0
def test_array_element():
    tokens = tuple(lexer.tokenize('[4, 8, 42, \n 23, 15]'))
    assert len(tokens) == 17
    sub_elements = (PunctuationElement(tokens[:1]), AtomicElement(tokens[1:2]),
                    PunctuationElement(tokens[2:3]),
                    WhitespaceElement(tokens[3:4]), AtomicElement(tokens[4:5]),
                    PunctuationElement(tokens[5:6]),
                    WhitespaceElement(tokens[6:7]), AtomicElement(tokens[7:8]),
                    PunctuationElement(tokens[8:9]),
                    WhitespaceElement(tokens[9:10]),
                    NewlineElement(tokens[10:11]),
                    WhitespaceElement(tokens[11:12]),
                    AtomicElement(tokens[12:13]),
                    PunctuationElement(tokens[13:14]),
                    WhitespaceElement(tokens[14:15]),
                    AtomicElement(tokens[15:16]),
                    PunctuationElement(tokens[16:17]))

    array_element = ArrayElement(sub_elements)

    # Test length
    assert len(array_element) == 5

    # Test getting a value
    assert array_element[0] == 4
    assert array_element[1] == 8
    assert array_element[2] == 42
    assert array_element[3] == 23
    assert array_element[-1] == 15

    # Test assignment with a negative index
    array_element[-1] = 12

    # Test persistence of formatting
    assert '[4, 8, 42, \n 23, 12]' == array_element.serialized()

    # Test raises IndexError on invalid index
    with pytest.raises(IndexError) as _:
        print(array_element[5])

    # Test appending a new value
    array_element.append(77)
    assert '[4, 8, 42, \n 23, 12, 77]' == array_element.serialized()

    # Test deleting a value
    del array_element[3]
    assert '[4, 8, 42, 12, 77]' == array_element.serialized()

    # Test primitive_value
    assert [4, 8, 42, 12, 77] == array_element.primitive_value
Ejemplo n.º 3
0
def test_inline_table():
    tokens = tuple(lexer.tokenize('{ name= "first", id=42}'))

    elements = (PunctuationElement(tokens[:1]), WhitespaceElement(tokens[1:2]),
                AtomicElement(tokens[2:3]), PunctuationElement(tokens[3:4]),
                WhitespaceElement(tokens[4:5]), AtomicElement(tokens[5:6]),
                PunctuationElement(tokens[6:7]),
                WhitespaceElement(tokens[7:8]), AtomicElement(tokens[8:9]),
                PunctuationElement(tokens[9:10]), AtomicElement(tokens[10:11]),
                PunctuationElement(tokens[11:12]))

    table = InlineTableElement(elements)

    assert table['name'] == 'first'
    assert table['id'] == 42

    table['name'] = 'fawzy'
    table['nickname'] = 'nickfawzy'

    assert set(table.items()) == {('name', 'fawzy'), ('id', 42),
                                  ('nickname', 'nickfawzy')}

    assert table.serialized(
    ) == '{ name= "fawzy", id=42, nickname = "nickfawzy"}'

    del table['name']

    assert table.serialized() == '{ id=42, nickname = "nickfawzy"}'

    del table['nickname']

    assert table.serialized() == '{ id=42}'

    del table['id']

    assert table.serialized() == '{ }'

    table['item1'] = 11
    table['item2'] = 22

    assert table.serialized() == '{ item1 = 11, item2 = 22}'
Ejemplo n.º 4
0
def primitive_token_to_primitive_element(token):
    if token.type == tokens.TYPE_NEWLINE:
        return NewlineElement((token,))
    elif token.type in atomic_token_types:
        return AtomicElement((token,))
    elif token.type == tokens.TYPE_NEWLINE:
        return NewlineElement((token,))
    elif token.type in punctuation_token_types:
        return PunctuationElement((token,))
    elif token.type == tokens.TYPE_WHITESPACE:
        return WhitespaceElement((token,))
    elif token.type == tokens.TYPE_COMMENT:
        return CommentElement((token,))
    else:
        raise RuntimeError("{} has no mapped primitive element".format(token))
Ejemplo n.º 5
0
def test_table():
    initial_toml = """id=42 # My id\nage=14"""
    tokens = tuple(lexer.tokenize(initial_toml))
    table = TableElement([
        AtomicElement(tokens[0:1]),
        PunctuationElement(tokens[1:2]),
        AtomicElement(tokens[2:3]),
        WhitespaceElement(tokens[3:4]),
        CommentElement(tokens[4:6]),
        AtomicElement(tokens[6:7]),
        PunctuationElement(tokens[7:8]),
        AtomicElement(tokens[8:9]),
    ])
    assert set(table.items()) == {('id', 42), ('age', 14)}
    del table['id']
    assert set(table.items()) == {('age', 14)}
Ejemplo n.º 6
0
def test_whitespace_element():
    element = WhitespaceElement(tuple(lexer.tokenize(' \t   ')))
    assert element.serialized() == ' \t   '
Ejemplo n.º 7
0
def create_whitespace_element(length=1, char=' '):
    """
    Creates and returns a WhitespaceElement containing spaces.
    """
    ts = (tokens.Token(tokens.TYPE_WHITESPACE, char), ) * length
    return WhitespaceElement(ts)
Ejemplo n.º 8
0
def test_whitespace_element():
    element = WhitespaceElement(tuple(lexer.tokenize(' \t   ')))
    assert element.serialized() == ' \t   '
Ejemplo n.º 9
0
def space_element(token_stream):
    captured = capture_from(token_stream).find(zero_or_more_tokens(tokens.TYPE_WHITESPACE))
    return WhitespaceElement([t for t in captured.value() if t]), captured.pending_tokens