def test_cannot_convert_EBNF_to_strategy_directly():
    with pytest.raises(InvalidArgument):
        # Not a Lark object
        from_lark(EBNF_GRAMMAR).example()
    with pytest.raises(TypeError):
        # Not even the right number of arguments
        from_lark(EBNF_GRAMMAR, start="value").example()
def test_cannot_convert_EBNF_to_strategy_directly():
    with pytest.raises(InvalidArgument):
        # Not a Lark object
        from_lark(EBNF_GRAMMAR).example()
    with pytest.raises(TypeError):
        # Not even the right number of arguments
        from_lark(EBNF_GRAMMAR, start="value").example()
def test_can_not_use_undefined_terminals_yet():
    grammar = r"""
    list : "[" ELEMENT ("," ELEMENT)* "]"
    %declare ELEMENT
    """

    with pytest.raises(InvalidArgument):
        from_lark(Lark(grammar, start="list")).example()
def test_can_not_use_undefined_terminals_yet():
    grammar = r"""
    list : "[" ELEMENT ("," ELEMENT)* "]"
    %declare ELEMENT
    """

    with pytest.raises(InvalidArgument):
        from_lark(Lark(grammar, start="list")).example()
Exemple #5
0
def test_undefined_terminals_require_explicit_strategies():
    elem_grammar = r"""
    list : "[" [ELEMENT ("," ELEMENT)*] "]"
    %declare ELEMENT
    """
    with pytest.raises(InvalidArgument):
        from_lark(Lark(elem_grammar, start="list")).example()
    strategy = {"ELEMENT": just("200")}
    from_lark(Lark(elem_grammar, start="list"), explicit=strategy).example()
def test_generation_without_whitespace():
    list_grammar = r"""
    list : "[" [NUMBER ("," NUMBER)*] "]"
    NUMBER: /[0-9]+/
    """

    @given(from_lark(Lark(list_grammar, start="list")))
    def test(g):
        assert " " not in g

    test()
Exemple #7
0
def test_can_generate_ignored_tokens():
    list_grammar = r"""
    list : "[" [STRING ("," STRING)*] "]"
    STRING : /"[a-z]*"/
    WS : /[ \t\r\n]+/
    %ignore WS
    """
    strategy = from_lark(Lark(list_grammar, start="list"))
    # A JSON list of strings in canonical form which does not round-trip,
    # must contain ignorable whitespace in the initial string.
    find_any(strategy, lambda s: "\t" in s)
def test_generation_without_whitespace():
    list_grammar = r"""
    list : "[" [NUMBER ("," NUMBER)*] "]"
    NUMBER: /[0-9]+/
    """

    @given(from_lark(Lark(list_grammar, start="list")))
    def test(g):
        assert " " not in g

    test()
def test_can_generate_ignored_tokens():
    list_grammar = r"""
    list : "[" [STRING ("," STRING)*] "]"
    STRING : /"[a-z]*"/
    WS : /[ \t\r\n]+/
    %ignore WS
    """
    strategy = from_lark(Lark(list_grammar, start="list"))
    # A JSON list of strings in canoncial form which does not round-trip,
    # must contain ignorable whitespace in the initial string.
    find_any(strategy, lambda s: "\t" in s)
Exemple #10
0
def test_cannot_convert_EBNF_to_strategy_directly():
    with pytest.raises(InvalidArgument):
        # Not a Lark object
        from_lark(EBNF_GRAMMAR).example()
    with pytest.raises(TypeError):
        # Not even the right number of arguments
        from_lark(EBNF_GRAMMAR, start="value").example()
    with pytest.raises(InvalidArgument):
        # Wrong type for explicit_strategies
        from_lark(Lark(LIST_GRAMMAR, start="list"), explicit=[]).example()
Exemple #11
0
def test_can_specify_start_rule(data, start, type_):
    string = data.draw(
        from_lark(Lark(EBNF_GRAMMAR, start="value"), start=start))
    value = json.loads(string)
    assert isinstance(value, type_)
Exemple #12
0
def test_non_string_explicit_strategies_are_invalid():
    with pytest.raises(InvalidArgument):
        from_lark(Lark(LIST_GRAMMAR, start="list"),
                  explicit={
                      "NUMBER": just(0)
                  }).example()
Exemple #13
0
def test_cannot_use_explicit_strategies_for_unknown_terminals():
    with pytest.raises(InvalidArgument):
        from_lark(Lark(LIST_GRAMMAR, start="list"),
                  explicit={
                      "unused_name": just("")
                  }).example()
Exemple #14
0
import hypothesis

grammar_source = r"""

prog: stmt+
stmt: typ id "=" num ";"
typ: "int"
   | "float"
id: /[a-z]+/
num: /[0-9]+/

%ignore /[ ]+/

"""

grammar = Lark(grammar_source, start='prog')


@hypothesis.given(from_lark(grammar, start='prog'))
def test_lark_token_concatenation(prog):
    print('===========')
    print(prog)
    print('----')
    p = grammar.parse(prog)
    print(p)
    print('===========')


if __name__ == "__main__":
    test_lark_token_concatenation()
Exemple #15
0
    dict : "{" [STRING ":" value ("," STRING ":" value)*] "}"

    STRING : /"[a-z]*"/
    NUMBER : /-?[1-9][0-9]*(\.[0-9]+)?([eE][+-]?[0-9]+)?/

    WS : /[ \t\r\n]+/
    %ignore WS
"""

LIST_GRAMMAR = r"""
list : "[" [NUMBER ("," NUMBER)*] "]"
NUMBER: /[0-9]+/
"""


@given(from_lark(Lark(EBNF_GRAMMAR, start="value")))
def test_generates_valid_json(string):
    json.loads(string)


@pytest.mark.parametrize(
    "start, type_",
    [
        ("dict", dict),
        ("list", list),
        ("STRING", str),
        ("NUMBER", (int, float)),
        ("TRUE", bool),
        ("FALSE", bool),
        ("NULL", type(None)),
    ],
def test_can_specify_start_rule(data, start, type_):
    string = data.draw(from_lark(Lark(EBNF_GRAMMAR, start="value"), start=start))
    value = json.loads(string)
    assert isinstance(value, type_)
#gramática generativa
# !pip install lark-parser --user
# !pip install hypothesis --user

from lark import Lark
from hypothesis.extra import lark

grammar = Lark ("""
start : PLANETA S "em" S SIGNO S "indica" S evento "."

evento : "que você terá" S PROBLEMA S ALVO | "uma boa fase" S ALVO
PLANETA : "Mercúrio" | "Vênus"
SIGNO : "Capricórnio" | "Peixes"
PROBLEMA : "problemas" | "decepções"
ALVO : "no amor" | "no trabalho" | "na vida financeira" | "na universidade"
S : " "
""")

frase = "Mercúrio em Peixes indica que você terá problemas na universidade."

tree=grammar.parse(frase)

print(tree.pretty)

gen = lark.from_lark(grammar)
for i in range(10):
    print(gen.example())
            | "let me down,\nyou have found her, now go and get her"  

rememberto  : "remember to" WS rememberwhat NL
rememberwhat: "let her into your heart"
            | "let her under your skin"      

then        : "then you" WS thenwhat WS "to make it better" NL
thenwhat    : "can start" 
            | "begin"

better      : starstbetter "waaaaa" NL

starstbetter: "better" WS
            | "better" WS starstbetter

nanana      : beginnanana "na-na-na-na" NL NL

startnanana : "na" WS
            | "na" WS startnanana
            
WS : " "
NL : "\n"
""")

generator = from_lark(grammar)

ex = generator.example()

print()
print("=" * 50)
print(ex)
Exemple #19
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-

from lark import Lark
from hypothesis.extra.lark import from_lark
import argparse
import sys

if __name__ == '__main__':

    parser = argparse.ArgumentParser(description='Generate grammar samples.')
    parser.add_argument('--grammar',
                        dest='grammar',
                        help='file with grammar syntax')
    parser.add_argument('--start', dest='start', help='start terminal')
    args = parser.parse_args()
    if (not args.grammar):
        sys.exit(1)
    if args.grammar == "mime.lark":
        sys.setrecursionlimit(10000)

    with open(args.grammar, 'r') as grammar:
        sample = from_lark(Lark(open(args.grammar, 'r'),
                                start="start")).example()
        print('{}'.format(sample))
Exemple #20
0
  | "|"
  | "&"

%ignore / +/
%declare NUM ID

"""

grammar = Lark(grammar_text)
explicit = {
    "NUM": st.integers().map(str),
    "ID": st.text(alphabet="abcdefghijUVWXYZ", min_size=6),
}


@given(from_lark(grammar, explicit=explicit))
def test_c(prog):
    """ Test various randomly generated slabs of C-ish code. """
    print(prog)
    try:
        ast = parse_text(prog)
    except CompilerError as ex:
        print("Compilation error", ex)
    else:
        print(ast)
        print_ast(ast)


if __name__ == "__main__":
    test_c()
         | NUMBER
         | "true"  -> true
         | "false" -> false
         | "null"  -> null
    list : "[" [value ("," value)*] "]"
    dict : "{" [STRING ":" value ("," STRING ":" value)*] "}"

    STRING : /"[a-z]*"/
    NUMBER : /-?[1-9][0-9]*(\.[0-9]+)?([eE][+-]?[0-9]+)?/

    WS : /[ \t\r\n]+/
    %ignore WS
"""


@given(from_lark(Lark(EBNF_GRAMMAR, start="value")))
def test_generates_valid_json(string):
    json.loads(string)


@pytest.mark.parametrize(
    "start, type_",
    [
        ("dict", dict),
        ("list", list),
        ("STRING", text_type),
        ("NUMBER", integer_types + (float, )),
        ("TRUE", bool),
        ("FALSE", bool),
        ("NULL", type(None)),
    ],
Exemple #22
0
def test_generation_without_whitespace():
    find_any(from_lark(Lark(LIST_GRAMMAR, start="list")),
             lambda g: " " not in g)
Exemple #23
0
    'rfc_1738': _build_path('rfc_1738.lark'),
    'rfc_2397': _build_path('rfc_2397.lark'),
    'rfc_2396': _build_path('rfc_2396.lark'),
    'rfc_6531': _build_path('rfc_6531.lark'),
    'rfc_5321': _build_path('rfc_5321.lark'),
    'rfc_5545': _build_path('rfc_5545.lark'),
    'robotstxt': _build_path('robotstxt.lark'),
    'subunit_v1': _build_path('subunit_v1.lark'),
    'tap13': _build_path('tap13.lark'),
    'toml': _build_path('toml.lark'),
    'yaml': _build_path('yaml.lark')
}

if __name__ == '__main__':
    hypothesis.seed(time.time())

    parser = argparse.ArgumentParser(description='Generate grammar samples.')
    parser.add_argument('--grammar',
                        dest='grammar',
                        help='file with grammar syntax')
    parser.add_argument('--start', dest=DEFAULT_START, help='start terminal')
    args = parser.parse_args()
    if (not args.grammar):
        sys.exit(1)
    if args.grammar == 'mime.lark':
        sys.setrecursionlimit(10000)

    with open(args.grammar, 'r') as grammar:
        sample = from_lark(Lark(grammar, start=args.start)).example()
        print('{}'.format(sample))
         | NUMBER
         | "true"  -> true
         | "false" -> false
         | "null"  -> null
    list : "[" [value ("," value)*] "]"
    dict : "{" [STRING ":" value ("," STRING ":" value)*] "}"

    STRING : /"[a-z]*"/
    NUMBER : /-?[1-9][0-9]*(\.[0-9]+)?([eE][+-]?[0-9]+)?/

    WS : /[ \t\r\n]+/
    %ignore WS
"""


@given(from_lark(Lark(EBNF_GRAMMAR, start="value")))
def test_generates_valid_json(string):
    json.loads(string)


@pytest.mark.parametrize(
    "start, type_",
    [
        ("dict", dict),
        ("list", list),
        ("STRING", text_type),
        ("NUMBER", integer_types + (float,)),
        ("TRUE", bool),
        ("FALSE", bool),
        ("NULL", type(None)),
    ],
Exemple #25
0
from hypothesis import given, strategies
from hypothesis.extra.lark import from_lark
import lark

from beboppy import __version__
from beboppy import encode, decode, generate, parse

parser = parse.get_parser()

bop_strategy = from_lark(parser, start="start")
msg_strategy = from_lark(parser, start="message")
struct_strategy = from_lark(parser, start="struct")
enum_strategy = from_lark(parser, start="enum")


def test_version():
    assert __version__ == '0.1.0'


def assert_roundtrips(val, e_func, d_func):
    encoded = e_func(val)
    decoded = d_func(encoded)
    assert decoded == val, f"Given {val}, encoded: {encoded}, decoded: {decoded}"


@given(strategies.booleans())
def test_roundtrip_bool(val: bool):
    assert_roundtrips(val, encode.encode_bool, decode.decode_bool)


def assert_parses(text: str) -> lark.Tree:
Exemple #26
0
def test_grammars(name, path):
    if os.path.basename(path) in skipped:
        pytest.skip()
    with open(path, 'r') as fh:
        from_lark(Lark(fh)).example()