コード例 #1
0
        print(div)

        return items


def input_queries(prompt):
    s = input(prompt)
    if not s.strip():
        return []
    while not s.rstrip().endswith(';'):
        s += '\n' + input()
    return [x + ';' for x in s.split(';')[:-1]]


if __name__ == "__main__":
    prompt = "DB_example> "

    with open('grammar.lark') as file:
        parser = lark.Lark(file.read(), start="command", lexer='standard')
    transformer = Transformer()

    while True:
        for query in input_queries(prompt):
            try:
                tree = parser.parse(query)
                transformer.clean()
                msg = transformer.transform(tree)[0]
            except Exception as e:
                if isinstance(e, lark.exceptions.UnexpectedInput):
                    print(prompt + "Syntax error")
                break
コード例 #2
0
ファイル: main.py プロジェクト: 085astatine/lark_test
def main() -> None:
    with open('./expr.lark') as file:
        parser = lark.Lark(file.read(), start='expr')
    tree = parser.parse(sys.argv[1])
    print(tree)
コード例 #3
0
              | "false"
              | "{}"
              | NUMBER
              | SIGNED_NUMBER
              | HEXNUMBER
              | ESCAPED_STRING

    %import common.CNAME -> CNAME
    %import common.NUMBER -> NUMBER
    %import common.SIGNED_NUMBER -> SIGNED_NUMBER
    %import common.ESCAPED_STRING -> ESCAPED_STRING
    %import common.WS
    %ignore WS
    """

_PARSER = lark.Lark(_GRAMMAR, parser='lalr', propagate_positions=True)

_XPARSER = lark.Lark(_GRAMMAR,
                     parser='lalr',
                     propagate_positions=True,
                     keep_all_tokens=True)

# _FN_WHITELIST & _FN_BLACKLIST takes either name or mapsig.
_FN_BLACKLIST = set([])

_FN_WHITELIST = set([
    'copy_(Tensor, Tensor, bool) -> Tensor',
    'einsum',
    'resize_',
])
コード例 #4
0
ファイル: ast.py プロジェクト: timothyrenner/svl
        # Don't touch the case of the transforms - that gets passed to SQL
        # as-is.
        return {"transform": str(items[0])[1:-1]}

    def aggregation(self, items):
        return {"agg": str(items[0]).upper()}

    def sort(self, items):
        return {"sort": str(items[0]).upper()}

    def color_scale(self, items):
        return {"color_scale": str(items[0][1:-1])}


debug_parser = lark.Lark(
    pkg_resources.resource_string("resources", "svl.lark").decode("utf-8")
)


parser = lark.Lark(
    pkg_resources.resource_string("resources", "svl.lark").decode("utf-8"),
    parser="lalr",
    transformer=SVLTransformer(),
)


def parse_svl(svl_string, debug=False, **kwargs):
    if debug:
        return debug_parser.parse(svl_string)
    else:
コード例 #5
0
        return eval(st)

    def array(self, children):
        return children

    def object(self, children):
        return dict(children)

    def member(self, children):
        return tuple(children)

    def start(self, children):
        return children[0]


parser = lark.Lark(grammar)


def loads(text: str) -> object:
    """
    Carrega um documento JSON e retorna o valor Python correspondente.
    """
    tree = parser.parse(text)
    transformer = JSONTransformer()
    tree = transformer.transform(tree)
    if hasattr(tree, "pretty"):
        return tree.pretty()
    return tree


# Exemplos
コード例 #6
0
        for con in context[len(before) + len(words):]:
            after += [con]

        # for con in context:
        #     if not (con in (before+output+after)):
        #         logging.error("AUFSCHREI! " + con["word"] + " | " + " ".join([c["word"] for c in context]))

        logging.debug("before", " ".join([x["word"] for x in before]))
        logging.debug("output", " ".join([x["word"] for x in output]))
        logging.debug("after",  " ".join([x["word"] for x in after]))
        logging.debug("\n")

        # res.append({"result": " ".join([linked_word(x) for x in output])})
        res.append({"l_context": before, "keywords": output, "r_context": after})

    logging.info("done")
    return jsonify({'sparql': html.escape(sparql), 'results': res, "last_page": lst, "first_page": fst, "page": page})


if __name__ == '__main__':
    with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), config['grammar'])) as inp_file:
        parser = lark.Lark(inp_file, debug=True)
        logging.info('loaded')
        logging.debug(parser)

    conn = SPARQLWrapper(urllib.parse.urljoin(config['sparql']['host'], config['sparql']['endpoint']))

    app.run(debug=True, host=config['api']['host'], port=config['api']['port'])

コード例 #7
0
ファイル: grammar.py プロジェクト: neelsenc/synapse
        return s_ast.SwitchCase(newkids)

    def casevalu(self, kids):
        assert len(kids) == 1
        kid = kids[0]

        if kid.type == 'DOUBLEQUOTEDSTRING':
            return self._convert_child(kid)

        return s_ast.Const(kid.value[:-1])  # drop the trailing ':'


with s_datfile.openDatFile('synapse.lib/storm.lark') as larkf:
    _grammar = larkf.read().decode()

QueryParser = lark.Lark(_grammar, start='query', propagate_positions=True)
CmdrParser = lark.Lark(_grammar,
                       start='query',
                       propagate_positions=True,
                       keep_all_tokens=True)
StormCmdParser = lark.Lark(_grammar,
                           start='stormcmdargs',
                           propagate_positions=True)

_eofre = regex.compile(r'''Terminal\('(\w+)'\)''')


class Parser:
    '''
    Storm query parser
    '''
コード例 #8
0
import lark
import re
from copy import deepcopy
from functools import reduce, partial

from etl.common.utils import remove_none, resolve_path, as_list, flatten, distinct, is_list_like, \
    remove_empty

field_template_string_parser = lark.Lark('''
WS: /[ ]/+

LCASE_LETTER: "a".."z"
UCASE_LETTER: "A".."Z"

FIELD: (UCASE_LETTER | LCASE_LETTER)+ 
field_path: ("." FIELD?)+

object_path: field_path WS* "=>" WS*

value_path: field_path (WS* "+" WS* field_path)* 

start: "{" WS* object_path* WS* value_path WS* "}"
''')


def get_field_path(tree):
    return resolve_path(tree, ['field_path', 'FIELD'])


def tree_to_dict(tree):
    if isinstance(tree, lark.tree.Tree):
コード例 #9
0
    raise Exception('failed to transform lark tree')


def transform(lark_object):
    if isinstance(lark_object, lark.lexer.Token):
        return transform_lark_token(lark_object)

    elif isinstance(lark_object, lark.tree.Tree):
        return transform_lark_tree(lark_object)

    raise Exception('Something went wrong transforming!')


GRAMMAR = open('lambda_grammar.lark').read()

LISP_PARSER = lark.Lark(
    GRAMMAR,
    parser='lalr',
    lexer='standard',
    lexer_callbacks={},
    # transformer=LispTransformer(),
    # increase speed:
    propagate_positions=False,
    maybe_placeholders=False)

ast = LISP_PARSER.parse('(a b c)')

s = transform(ast)
print(s)
コード例 #10
0
WHOLE_WORD = False # only effective when USE_REGEX is True
UNDER_THE_CURSOR = True
CLEAR_ON_ESCAPE = False
FILE_SIZE_LIMIT = 4194304
SETTINGS = {}
KEYWORD_MAP = []


# Debugger settings: 0 - disabled, 127 - enabled
log = getLogger( 1, __name__ )

_parser = lark.Lark( r"""
start: SEARCH* | WORDS* | SEARCH+ WORDS+

WORDS: /[^\/].*/
SEARCH: / *(?<!\\)\/[^\/]+(?<!\\)\/ */
SPACES: /[\t \f]+/

%ignore SPACES
""", start='start', parser='lalr', lexer='contextual' )

g_view_selections = {}
g_regionkey = "HighlightWords"

class Data(object):

    def __init__(self, view):
        self.view = view
        self.added_regions = []
        self.added_regions_set = []
        self.last_caret_begin = 0
コード例 #11
0
import lark
import pkg_resources

# todo: signed numbers
# todo: string quoted members
# todo: last - number  (ibm)
# todo: $."$varname" uses the variable name as lookup key
# todo: json string Unicode escapes
# todo: match json string semantics (\n and other escapes)
# todo: arithmetic expressions
# todo: case insensitive keywords? (allowed at all?)
# todo: list subscripts can be expressions
# todo: a lot more :)

grammar = pkg_resources.resource_string(__package__, "sql_json.lark").decode()
parser = lark.Lark(grammar, parser="lalr", start="query", debug=True)


class QueryError(Exception):
    pass


def compile(input):
    print(input)
    try:
        tree = parser.parse(input)
    except lark.UnexpectedInput as exc:
        raise QueryError(f"invalid query: {exc}") from exc
    print(tree.pretty())
    try:
        transformed = Transformer().transform(tree)
コード例 #12
0
def ParseGN(contents):
    parser = lark.Lark(GN_GRAMMAR, parser='lalr', start='file')
    return parser.parse(contents)
コード例 #13
0

@lark.v_args(inline=True)
class Calc(lark.Transformer):
    from operator import add, mul
    number = int
    start = staticmethod(lambda *a: sum(a))


# Part 1
print(
    lark.Lark(r"""
start: (expr "\n")* expr "\n"?
?atom: /\d+/ -> number
     | "(" expr ")"
?expr: atom
     | expr " + " atom -> add
     | expr " * " atom -> mul
""",
              parser="lalr",
              transformer=Calc()).parse(text))

# Part 2
print(
    lark.Lark(r"""
start: (expr "\n")* expr "\n"?
?atom: /\d+/ -> number
     | "(" expr ")"
?exp1: atom
     | exp1 " + " atom -> add
?expr: exp1
     | expr " * " exp1 -> mul
コード例 #14
0
              | NUMBER
              | SIGNED_NUMBER
              | HEXNUMBER
              | ESCAPED_STRING
              | CNAME

    %import common.CNAME -> CNAME
    %import common.NUMBER -> NUMBER
    %import common.SIGNED_NUMBER -> SIGNED_NUMBER
    %import common.ESCAPED_STRING -> ESCAPED_STRING
    %import common.WS
    %ignore WS
    """

_CPP_SIG_PARSER = lark.Lark(_CPP_SIG_GRAMMAR,
                            parser='lalr',
                            propagate_positions=True)


class CPPSig(SigParser):
    def __init__(self, cpp_sig):
        super(CPPSig, self).__init__(cpp_sig, _CPP_SIG_PARSER)

        self._def_name = self.__get_function_name(self._sig_tree)

    def __param_name(self, t):
        assert isinstance(t, lark.tree.Tree)
        c = t.children[1]
        assert isinstance(c, lark.tree.Tree)
        assert c.data == 'param_name'
        token = c.children[0]
コード例 #15
0
 def __init__(self):
     self._parser = lark.Lark(GRAMMAR, start="simple_select")
     self._transformer = TreeToSqlAlchemy()
コード例 #16
0
        return ('asm', _[0])

    _('auto: "auto" lvar ("," lvar)* ";"')

    def auto(self, _):
        return ('auto', _)

    _('lvar: NAME [constant]')

    def lvar(self, _):
        return (_[0].value, _[1] if len(_) == 2 else 0)


parser = lark.Lark(_.grammar,
                   parser='lalr',
                   lexer='standard',
                   propagate_positions=False,
                   maybe_placeholders=False,
                   transformer=Transformer())


class Codegen:
    def __init__(self, ast, opt):
        # Optimization level.
        self.opt_level = opt

        # Output list of lines.
        # First line is empty and is getting removed by
        # `get_output`. We need it when some function
        # checks last line, but there are no already
        # emitted lines.
        self.output = ['']
コード例 #17
0
ファイル: briltxt.py プロジェクト: dz333/bril
def parse_bril(txt):
    parser = lark.Lark(GRAMMAR)
    tree = parser.parse(txt)
    data = JSONTransformer().transform(tree)
    return json.dumps(data, indent=2, sort_keys=True)
コード例 #18
0
ファイル: larker.py プロジェクト: 0xSMT/bf2c
    right = lamparse(">")

    inp = lamparse(",")
    out = lamparse(".")

    plbrace = lamparse("[")
    prbrace = lamparse("]")

    def loop(self, items):
        return "".join(items)

    def lbrace(self, parameter_list):
        global indent

        s = self.plbrace("[")
        indent += 1

        return s

    def rbrace(self, parameter_list):
        global indent

        indent -= 1
        return self.prbrace("]")


_parser = lark.Lark(_grammar,
                    start='prog',
                    parser="lalr",
                    transformer=BFTransform())
tree = _parser.parse(inputdata)
コード例 #19
0
partition_size: "SIZE"i DISK_SIZE DISK_UNITS
partition_mount: "MOUNT"i PATH
partition_format: "FORMAT"i PART_FORMAT
partition_flags: "FLAGS"i PART_FLAG ("," PART_FLAG)*
PART_FLAG: "BOOT"i | "EFI"i | "BIOS_GRUB"i
PART_FORMAT: /[^ \t\n\\]/i+

COMMENT: /#[^\n]*/

%import common.NEWLINE -> _NEWLINE
%import common.WS_INLINE
%ignore WS_INLINE
%ignore COMMENT
%ignore /\\[\t \f]*\r?\n/   // LINE_CONT
"""
IMAGEFILE_PARSER = lark.Lark(IMAGEFILE_GRAMMAR, parser="lalr").parse


class GuestChrootCommand(editor.GuestCommand):
    def __init__(
        self,
        cmd: str,
        ssh_config: ssh.SshConfig,
        connect_timeout: int,
        run_timeout: Optional[int] = None,
        stdin: utils.FILE_TYPE = subprocess.DEVNULL,
        capture_stdout: bool = False,
        capture_stderr: bool = False,
    ):
        escaped_command = cmd.replace("'", "'\\''")
        chroot_command = (
コード例 #20
0
ファイル: parsing.py プロジェクト: petersn/language-play
#!/usr/bin/python

import easy
import lark

with open("grammar.txt") as f:
	grammar_text = f.read()

# Strip comments from the grammar.
grammar_text = "\n".join(
	line for line in grammar_text.split("\n")
	if not line.strip().startswith("//")
)
term_parser = lark.Lark(grammar_text, start="term")
# FIXME: Is there some way to reuse with the above grammar?
vernac_parser = lark.Lark(grammar_text, start="vernacular")

def unpack_typed_params(ctx, typed_params):
	"""unpack_typed_params(typed_params) -> [(var1, ty1), ...]"""
	results = []
	for child in typed_params.children:
		if child.data == "untyped_param":
			var_name, = child.children
			results.append((easy.Var(str(var_name)), easy.Hole()))
		elif child.data == "param_group":
			ty = unpack_term_ast(ctx, child.children[-1])
			for var_name in child.children[:-1]:
				results.append((easy.Var(str(var_name)), ty))
		else:
			assert False
	return results
コード例 #21
0
import operator
import random
import re
import sys
from functools import reduce

parser = lark.Lark('''
start  : _exprs
_exprs : _e* _e
_e     : ATOM
       | _num
       | BOOL
       | list
TRUE   : "#t"
FALSE  : "#f"
BOOL   : TRUE | FALSE
list   : "(" _exprs? ")"
INT    : /[-+]?[0-9]+/
ATOM   : /[a-zA-Z]+[a-zA-Z0-9\-\?]*/
       | /[\*\/\=\>\<]/
       | /[\-\+](?![0-9])/
FLOAT  : /[-+]?[0-9]+\.[0-9]*/
_num   : INT | FLOAT
%import common.WS
%ignore WS
    ''')


def atom(x):
    return 'atom', x

コード例 #22
0
#!/usr/bin/env python3

import sys
import lark

lark_src = None
with open('struct.lark', 'r') as f:
    lark_src = f.read()

struct_parser = lark.Lark(lark_src, start='entry')

source = None
with open(sys.argv[1], 'r') as f:
    source = f.read()

ast = struct_parser.parse(source)
# print(ast)
# print(ast.pretty())


class StructToIR(lark.Transformer):
    def declaration(self, e):
        return {'type': e[0], 'name': e[1]}

    def var_type(self, v):
        return v[0]

    def identifier(self, s):
        return str(s[0])

    def struct(self, s):
コード例 #23
0
 def __init__(self):
     self._grammar_parser = lark.Lark(self.RETRIEVER_GRAMMAR,
                                      start='generate_retrieve_func')
コード例 #24
0
    def parse_nnf(nnf_file):
        # parse input file
        parser = lark.Lark(r'''
        identifier : CNAME

        inputsize : "inputsize" "=" INT ";"

        assign : identifier "=" value ";"

        layer : "layer" "[" assign * "]"

        atom_net : "atom_net" WORD "$" layer * "$"

        start: inputsize atom_net

        nans: "-"?"nan"

        value : SIGNED_INT
              | SIGNED_FLOAT
              | nans
              | "FILE" ":" FILENAME "[" INT "]"

        FILENAME : ("_"|"-"|"."|LETTER|DIGIT)+

        %import common.SIGNED_NUMBER
        %import common.LETTER
        %import common.WORD
        %import common.DIGIT
        %import common.INT
        %import common.SIGNED_INT
        %import common.SIGNED_FLOAT
        %import common.CNAME
        %import common.WS
        %ignore WS
        ''')
        tree = parser.parse(nnf_file)

        # execute parse tree
        class TreeExec(lark.Transformer):
            def identifier(self, v):
                v = v[0].value
                return v

            def value(self, v):
                if len(v) == 1:
                    v = v[0]
                    if isinstance(v, lark.tree.Tree):
                        assert v.data == 'nans'
                        return math.nan
                    assert isinstance(v, lark.lexer.Token)
                    if v.type == 'FILENAME':
                        v = v.value
                    elif v.type == 'SIGNED_INT' or v.type == 'INT':
                        v = int(v.value)
                    elif v.type == 'SIGNED_FLOAT' or v.type == 'FLOAT':
                        v = float(v.value)
                    else:
                        raise ValueError('unexpected type')
                elif len(v) == 2:
                    v = self.value([v[0]]), self.value([v[1]])
                else:
                    raise ValueError('length of value can only be 1 or 2')
                return v

            def assign(self, v):
                name = v[0]
                value = v[1]
                return name, value

            def layer(self, v):
                return dict(v)

            def atom_net(self, v):
                layers = v[1:]
                return layers

            def start(self, v):
                return v[1]

        layer_setups = TreeExec().transform(tree)
        return layer_setups
コード例 #25
0
import lark
import re
import common

lines = common.read_file('2020/19/data.txt').splitlines()
divider = lines.index('')
rules_str = lines[:divider]
tickets = lines[divider + 1:]
grammar = ''

for line in rules_str:
    new_line = re.sub(r'(\d+)', r'rule\1', line)
    grammar += new_line + '\n'

# part 1
lrk = lark.Lark(grammar, start='rule0')
acc = 0
for t in tickets:
    try:
        lrk.parse(t)
        acc += 1
    except:
        pass

print(acc)

# part 2
grammar = re.sub(r'rule8:.*\n', 'rule8: rule42 | rule42 rule8\n', grammar)
grammar = re.sub(r'rule11:.*\n',
                 'rule11: rule42 rule31 | rule42 rule11 rule31\n', grammar)
lrk = lark.Lark(grammar, start='rule0')
コード例 #26
0
        def _parse(self, txt):
            parser = lark.Lark(r'''
            identifier : CNAME

            outer_assign : identifier "=" value
            params : outer_assign *

            inner_assign : identifier "=" value ";"
            input_size : "inputsize" "=" INT ";"

            layer : "layer" "[" inner_assign * "]"

            atom_type : WORD

            atom_net : "atom_net" atom_type "$" layer * "$"

            network_setup: "network_setup" "{" input_size atom_net * "}"

            start: params network_setup params

            value : SIGNED_INT
                | SIGNED_FLOAT
                | STRING_VALUE

            STRING_VALUE : ("_"|"-"|"."|"/"|LETTER)("_"|"-"|"."|"/"|LETTER|DIGIT)*

            %import common.SIGNED_NUMBER
            %import common.LETTER
            %import common.WORD
            %import common.DIGIT
            %import common.INT
            %import common.SIGNED_INT
            %import common.SIGNED_FLOAT
            %import common.CNAME
            %import common.WS
            %ignore WS
            %ignore /!.*/
            ''')  # noqa: E501
            tree = parser.parse(txt)

            class TreeExec(lark.Transformer):
                def identifier(self, v):
                    v = v[0].value
                    return v

                def value(self, v):
                    if len(v) == 1:
                        v = v[0]
                        if v.type == 'STRING_VALUE':
                            v = v.value
                        elif v.type == 'SIGNED_INT' or v.type == 'INT':
                            v = int(v.value)
                        elif v.type == 'SIGNED_FLOAT' or v.type == 'FLOAT':
                            v = float(v.value)
                        else:
                            raise ValueError('unexpected type')
                    else:
                        raise ValueError('length of value can only be 1 or 2')
                    return v

                def outer_assign(self, v):
                    name = v[0]
                    value = v[1]
                    return name, value

                inner_assign = outer_assign

                def params(self, v):
                    return v

                def network_setup(self, v):
                    intput_size = int(v[0])
                    atomic_nets = dict(v[1:])
                    return intput_size, atomic_nets

                def layer(self, v):
                    return dict(v)

                def atom_net(self, v):
                    atom_type = v[0]
                    layers = v[1:]
                    return atom_type, layers

                def atom_type(self, v):
                    return v[0].value

                def start(self, v):
                    network_setup = v[1]
                    del v[1]
                    return network_setup, dict(itertools.chain(*v))

                def input_size(self, v):
                    return v[0].value

            return TreeExec().transform(tree)
コード例 #27
0
import lark

parser = lark.Lark("""

start: CHAR*

CHAR: /./s

""")

#  parser = lark.Lark("""
#  //-----------------------------
#  start: item*
#
#  item:    STRING
#         | CHAR
#
#  // STRING: "\"" /[^\"]*/ "\""
#  STRING: "\"" /[abc]/ "\""
#
#  CHAR: /.|\n|\r/x
#
#  //-----------------------------
#  """)

parsed = parser.parse('''
foo bar baz
''')

for i in parsed.children:
    print(str(i.value))
コード例 #28
0
ファイル: osg_parse.py プロジェクト: q4a/opensourcegames
def create(grammar, Transformer):
    parser = lark.Lark(grammar, debug=False, parser='lalr')
    transformer = Transformer()
    return partial(parse, parser, transformer)
コード例 #29
0
ファイル: core.py プロジェクト: 0u812/obj_tables
class ReactionEquation(list):
    """ Reaction equation
    """

    with open(
            pkg_resources.resource_filename(
                'obj_tables', os.path.join('chem', 'reaction_equation.lark')),
            'r') as file:
        parser = lark.Lark(file.read())

    def is_equal(self, other):
        """ Determine if two reaction equations are semantically equivalent

        Args:
            other (:obj:`ReactionEquation`): other reaction equation

        Returns:
            :obj:`bool`: :obj:`True` if the objects are semantically equivalent
        """
        if self.__class__ != other.__class__:
            return False

        if len(self) != len(other):
            return False

        other_participants = list(other)
        for part in self:
            part_in_other = False
            for other_part in list(other_participants):
                if part.is_equal(other_part):
                    part_in_other = True
                    other_participants.remove(other_part)
                    break
            if not part_in_other:
                return False

        return True

    def to_dict(self):
        """ Get a simple Python representation compatible with JSON

        Returns:
            :obj:`list`: simple Python representation
        """
        return [part.to_dict() for part in self]

    def serialize(self):
        """ Generate a string representation

        Returns:
            :obj:`str`: string representation
        """
        compartments = set()
        for part in self:
            if isinstance(part.compartment, core.Model):
                compartments.add(part.compartment.serialize())
            else:
                compartments.add(part.compartment)

        lhs = []
        rhs = []
        for part in self:
            if part.stoichiometry < 0:
                lhs.append(
                    part.serialize(include_compartment=len(compartments) > 1))
            elif part.stoichiometry > 0:
                rhs.append(
                    part.serialize(include_compartment=len(compartments) > 1))

        serialized_value = '{} <=> {}'.format(' + '.join(sorted(lhs)),
                                              ' + '.join(sorted(rhs))).strip()

        if len(compartments) == 1:
            serialized_value = '[{}]: '.format(
                list(compartments)[0]) + serialized_value

        return serialized_value

    def deserialize(self, value, species=None, compartments=None):
        """ Set the participants from a string representation

        Args:
            value (:obj:`str`): string representation
            species (:obj:`dict`, optional): dictionary that maps species ids to instances
            compartments (:obj:`dict`, optional): dictionary that maps compartment ids to instances
        """
        tree = self.parser.parse(value)
        transformer = self.ParseTreeTransformer(species=species,
                                                compartments=compartments)
        parts = transformer.transform(tree)
        self.clear()
        self.extend(parts)
        return self

    class ParseTreeTransformer(lark.Transformer):
        """ Transforms parse trees into an instance of :obj:`ReactionEquation`

        Attributes:
            species (:obj:`dict`): dictionary that maps species ids to instances
            compartments (:obj:`dict`): dictionary that maps compartment ids to instances
        """
        def __init__(self, species=None, compartments=None):
            """
            Args:
                species (:obj:`dict`, optional): dictionary that maps species ids to instances
                compartments (:obj:`dict`, optional): dictionary that maps compartment ids to instances
            """
            self.species = species
            self.compartments = compartments

        @lark.v_args(inline=True)
        def start(self, parts):
            if len(set([part.serialize() for part in parts])) < len(parts):
                raise ValueError('Reaction participants cannot be repeated')

            if self.species:
                for part in parts:
                    species = self.species.get(part.species, None)
                    if not species:
                        raise ValueError('Species "{}" must be defined'.format(
                            part.species))
                    part.species = species

            if self.compartments:
                for part in parts:
                    compartment = self.compartments.get(part.compartment, None)
                    if not compartment:
                        raise ValueError(
                            'Compartment "{}" must be defined'.format(
                                part.compartment))
                    part.compartment = compartment

            return parts

        @lark.v_args(inline=True)
        def gbl(self, *args):
            parts = []
            for arg in args:
                if isinstance(arg, lark.lexer.Token) and \
                        arg.type == 'SPECIES_STOICHIOMETRY__SPECIES__COMPARTMENT__ID':
                    compartment = arg.value

                elif isinstance(arg, lark.tree.Tree):
                    if arg.data == 'gbl_reactants':
                        sign = -1
                    else:
                        sign = 1

                    for part in arg.children[0]:
                        part.stoichiometry *= sign
                        part.compartment = compartment
                        parts.append(part)
            return parts

        @lark.v_args(inline=True)
        def gbl_parts(self, *args):
            val = []
            for arg in args:
                if isinstance(arg, ReactionParticipant):
                    val.append(arg)
            return val

        @lark.v_args(inline=True)
        def gbl_part(self, *args):
            stoichiometry = 1.
            for arg in args:
                if arg.type == 'SPECIES_STOICHIOMETRY__SPECIES__SPECIES_TYPE__ID':
                    species = arg.value
                elif arg.type == 'SPECIES_STOICHIOMETRY__STOICHIOMETRY':
                    stoichiometry = float(arg.value)
            return ReactionParticipant(species, None, stoichiometry)

        @lark.v_args(inline=True)
        def lcl(self, *args):
            parts = []
            for arg in args:
                if isinstance(arg, lark.tree.Tree):
                    if arg.data == 'lcl_reactants':
                        sign = -1
                    else:
                        sign = 1
                    for part in arg.children[0]:
                        part.stoichiometry *= sign
                        parts.append(part)
            return parts

        @lark.v_args(inline=True)
        def lcl_parts(self, *args):
            val = []
            for arg in args:
                if isinstance(arg, ReactionParticipant):
                    val.append(arg)
            return val

        @lark.v_args(inline=True)
        def lcl_part(self, *args):
            stoichiometry = 1.
            for arg in args:
                if arg.type == 'SPECIES_STOICHIOMETRY__SPECIES__SPECIES_TYPE__ID':
                    species = arg.value
                elif arg.type == 'SPECIES_STOICHIOMETRY__SPECIES__COMPARTMENT__ID':
                    compartment = arg.value
                elif arg.type == 'SPECIES_STOICHIOMETRY__STOICHIOMETRY':
                    stoichiometry = float(arg.value)

            return ReactionParticipant(species, compartment, stoichiometry)
コード例 #30
0
_parser = lark.Lark(r"""
    productions   : new_line* ( non_terminal_start "->" non_terminals end_symbol )* non_terminal_start "->" non_terminals end_symbol?
    non_terminals : production ( "|" production )*
    production    : ( non_terminal? ( space+ | epsilon | terminal ) )*

    // Forces them to appear in the tree as branches
    epsilon         : "&"+
    end_symbol      : ( ";" | new_line )+
    terminal        : ( SIGNED_NUMBER | LCASE_LETTER | dash_phi_hyphen )+
    non_terminal    : UCASE_LETTER ( UCASE_LETTER | DIGIT | quote )*
    new_line        : NEWLINE
    quote           : "'"
    dash_phi_hyphen : "-"
    space           : " "

    // Rename the start symbol, so when parsing the tree it is simple to find it
    non_terminal_start : non_terminal

    // Stops Lark from automatically filtering out these literals from the tree
    null   : "null"
    true   : "true"
    false  : "false"

    // Import common definitions
    %import common.INT
    %import common.DIGIT
    %import common.UCASE_LETTER
    %import common.LCASE_LETTER
    %import common.SIGNED_NUMBER
    %import common.NEWLINE
    %import common.WS_INLINE

    // Set to ignore white spaces
    %ignore WS_INLINE
""",
                    start='productions',
                    parser='earley',
                    ambiguity="explicit")