Ejemplo n.º 1
0
 def get_regex(self, r):
     from pypy.rlib.parsing.regexparse import parse_regex
     if r in self.matchers:
         return self.matchers[r]
     regex = parse_regex(r)
     if regex is None:
         raise ValueError(
             "%s is not a valid regular expression" % regextext)
     automaton = regex.make_automaton().make_deterministic()
     automaton.optimize()
     matcher = automaton.make_lexing_code()
     self.matchers[r] = py.code.Source(matcher)
     return matcher
Ejemplo n.º 2
0
 def get_regex(self, r):
     from pypy.rlib.parsing.regexparse import parse_regex
     if r in self.matchers:
         return self.matchers[r]
     regex = parse_regex(r)
     if regex is None:
         raise ValueError("%s is not a valid regular expression" %
                          regextext)
     automaton = regex.make_automaton().make_deterministic()
     automaton.optimize()
     matcher = automaton.make_lexing_code()
     self.matchers[r] = py.code.Source(matcher)
     return matcher
def make_lexer():
    return Lexer([parse_regex(globals()[r]) for r in tokens], tokens[:])
Ejemplo n.º 4
0
Archivo: parse.py Proyecto: bivab/pytri
slashStarComment = r'(/\*([^\*]|\*[^/])*\*?\*/)'
slashSlashComment = r'//[^\n]*\n'
poundComment = r'#[^\n]*\n'
Comment = group(slashStarComment, slashSlashComment, poundComment)

# Petri Net Tokens #
Places = r'P:'
States = r'S:'
Transition = r'T:'
From = r'->'
Number = r'([1-9][0-9]*)|0+'
Separator = r'\|'
rexs = [Places, Transition, From, Number, Separator, Comment, Whitespace, States]
names = ['Places', 'Transition', 'Arrow', 'Number', 'Separator', 'Comment', 'Whitespace', 'State']
ignores = ['Whitespace', 'Comment']
lexer = Lexer([parse_regex(r) for r in rexs], names, ignores)


def parse_net(inp):
    p = PetriNetParser(inp)
    return p.parse()

def parse_props(inp):
    p = PropParser(inp)
    return p.parse()

class PetriNetParser(object):
    """
P:5
T:0|2|3 -> 1|2|3
T:1 -> 1|2|3
Ejemplo n.º 5
0
def make_lexer():
    return Lexer([parse_regex(globals()[r]) for r in tokens], tokens[:])
Ejemplo n.º 6
0
def make_lexer():
    from pypy.rlib.parsing.regexparse import parse_regex
    return Lexer([parse_regex(globals()[r]) for r in tokens], tokens[:])