示例#1
0
def test3():
    from langscape.langlets.ls_grammar.grammar_object import GrammarObject

    class NFA2GrammarObjTranslator(NFA2GrammarTranslator):
        def __init__(self, langlet):
            self.langlet = langlet
            nfas = self.langlet.nfagenerator.nfas
            self.names = {}
            for r in nfas:
                name = nfas[r][0].split(":")[0]
                self.names[r] = name
            super(NFA2GrammarObjTranslator, self).__init__(langlet)

        def get_constants(self):
            return  {} #self.langlet.nfagenerator.nfadata.constants

        def get_node_name(self, nid):
            return self.names[nid]

    rules = []
    rules = open("tests/rulefile.txt").readlines()
    for i, rule in enumerate(rules):
        print "RULE", rule
        go = GrammarObject.grammar_from_rule(rule)
        nfa = go.nfagenerator.nfas[go.get_start_symbol()]
        nfa2gra = NFA2GrammarObjTranslator(go)
        if not nfa2gra.check_translation(nfa, False):
            print (i, rule)
        if i == 10:
            break
示例#2
0
def test3():
    from langscape.langlets.ls_grammar.grammar_object import GrammarObject

    class NFA2GrammarObjTranslator(NFA2GrammarTranslator):
        def __init__(self, langlet):
            self.langlet = langlet
            nfas = self.langlet.nfagenerator.nfas
            self.names = {}
            for r in nfas:
                name = nfas[r][0].split(":")[0]
                self.names[r] = name
            super(NFA2GrammarObjTranslator, self).__init__(langlet)

        def get_constants(self):
            return {}  # self.langlet.nfagenerator.nfadata.constants

        def get_node_name(self, nid):
            return self.names[nid]

    rules = []
    rules = open("tests/rulefile.txt").readlines()
    for i, rule in enumerate(rules):
        print "RULE", rule
        go = GrammarObject.grammar_from_rule(rule)
        nfa = go.nfagenerator.nfas[go.get_start_symbol()]
        nfa2gra = NFA2GrammarObjTranslator(go)
        if not nfa2gra.check_translation(nfa, False):
            print (i, rule)
        if i == 10:
            break
示例#3
0
    def check_refinement(self, parser_rules):
        rules = []
        templates = []
        for name, R in parser_rules:
            if isinstance(R, RuleTemplate):
                rules.append(ls_grammar.tokenize(R.get_subgrammar_rule()))
                templates.append(R)
            else:
                rules.append(ls_grammar.tokenize(R))
        go = GrammarObject(rules)
        go.set_langlet_id(self.langlet.langlet_id)
        go.langlet.lex_nfa = self.langlet.lex_nfa
        go.langlet.token = go.langlet.parse_token = self.langlet.token
        go.create_grammar(expansion=False)
        go.langlet._load_unparser()
        nfas = go.get_nfas()
        # check that grammars are specialized

        for rt in templates:
            nid = rt.get_nid()
            nfa = nfas[rt.get_nid()]
            traces = compute_span_traces(nfa)
            for tr in traces:
                tro = TraceObject(tr[:-1])
                if not self.trchecker.checktrace(tro, start_symbol=nid):
                    raise GrammarError("Not a refinement: '%s'" % rt.rule_name)
        return go
示例#4
0
    def check_refinement(self, parser_rules):
        rules     = []
        templates = []
        for name, R in parser_rules:
            if isinstance(R, RuleTemplate):
                rules.append(ls_grammar.tokenize(R.get_subgrammar_rule()))
                templates.append(R)
            else:
                rules.append(ls_grammar.tokenize(R))
        go = GrammarObject(rules)
        go.set_langlet_id(self.langlet.langlet_id)
        go.langlet.lex_nfa = self.langlet.lex_nfa
        go.langlet.token = go.langlet.parse_token = self.langlet.token
        go.create_grammar(expansion = False)
        go.langlet._load_unparser()
        nfas = go.get_nfas()
        # check that grammars are specialized

        for rt in templates:
            nid = rt.get_nid()
            nfa = nfas[rt.get_nid()]
            traces = compute_span_traces(nfa)
            for tr in traces:
                tro = TraceObject(tr[:-1])
                if not self.trchecker.checktrace(tro, start_symbol = nid):
                    raise GrammarError("Not a refinement: '%s'"%rt.rule_name)
        return go
示例#5
0
 def check_translation(self, nfa, dbg=True):
     global __DEBUG__
     __DEBUG__ = dbg
     if __DEBUG__:
         print "----------------------------------------------"
         pprint.pprint(nfa)
         print "----------------------------------------------"
     R = self.translate(nfa)
     if __DEBUG__:
         print "[check-translation : Rule]", R
     go = GrammarObject.grammar_from_rule(R)
     nfa_R = go.get_nfas()[go.get_start_symbol()]
     __DEBUG__ = False
     return R, nfa_comp(nfa, nfa_R)
示例#6
0
 def check_translation(self, nfa, dbg=True):
     global __DEBUG__
     __DEBUG__ = dbg
     if __DEBUG__:
         print "----------------------------------------------"
         pprint.pprint(nfa)
         print "----------------------------------------------"
     R = self.translate(nfa)
     if __DEBUG__:
         print "[check-translation : Rule]", R
     go = GrammarObject.grammar_from_rule(R)
     nfa_R = go.get_nfas()[go.get_start_symbol()]
     __DEBUG__ = False
     return R, nfa_comp(nfa, nfa_R)
示例#7
0
    def create_refined_grammar(self):
        parser_rules = self.get_parse_gen()
        variables    = {}
        n = 0
        for name in dir(self):
            m = getattr(self, name)
            if hasattr(m, "refined"):
                doc = m.__doc__.strip()
                if doc.startswith(name) and doc[len(name):len(name)+1] in " :":
                    rt = RuleTemplate(self.langlet, doc)
                    for i, (nm, _) in enumerate(parser_rules):
                        if name == nm:
                            parser_rules[i] = (name, rt)
                #variables[names] = m.__call__() or []
        go = self.check_refinement(parser_rules)
        go.variables = variables
        return go

        if n == 0:
            return go
        else:
            rules = []
            for name, R in parser_rules:
                if isinstance(R, RuleTemplate):
                    g_rule = R.get_grammar_rule()
                    rules.append(ls_grammar.tokenize(g_rule))
                    for varname in R.variables:
                        rules.append(ls_grammar.tokenize(varname + ": '$%s$'"%varname))
                else:
                    rules.append(ls_grammar.tokenize(R))
            go = GrammarObject(rules)
            go.set_langlet_id(self.langlet.langlet_id)
            go.langlet.lex_nfa = self.langlet.lex_nfa
            go.langlet.token = go.langlet.parse_token = self.langlet.token
            go.create_grammar(expansion = False)
            go.langlet._load_unparser()
            return go
示例#8
0
    def create_refined_grammar(self):
        parser_rules = self.get_parse_gen()
        variables = {}
        n = 0
        for name in dir(self):
            m = getattr(self, name)
            if hasattr(m, "refined"):
                doc = m.__doc__.strip()
                if doc.startswith(name) and doc[len(name):len(name) +
                                                1] in " :":
                    rt = RuleTemplate(self.langlet, doc)
                    for i, (nm, _) in enumerate(parser_rules):
                        if name == nm:
                            parser_rules[i] = (name, rt)
                #variables[names] = m.__call__() or []
        go = self.check_refinement(parser_rules)
        go.variables = variables
        return go

        if n == 0:
            return go
        else:
            rules = []
            for name, R in parser_rules:
                if isinstance(R, RuleTemplate):
                    g_rule = R.get_grammar_rule()
                    rules.append(ls_grammar.tokenize(g_rule))
                    for varname in R.variables:
                        rules.append(
                            ls_grammar.tokenize(varname +
                                                ": '$%s$'" % varname))
                else:
                    rules.append(ls_grammar.tokenize(R))
            go = GrammarObject(rules)
            go.set_langlet_id(self.langlet.langlet_id)
            go.langlet.lex_nfa = self.langlet.lex_nfa
            go.langlet.token = go.langlet.parse_token = self.langlet.token
            go.create_grammar(expansion=False)
            go.langlet._load_unparser()
            return go