Exemple #1
0
 def and_test(self, node, line = -1, idx = 0):
     if find_node(node, self.keyword["and"],depth = 1):
         _not_tests = find_all(node, self.symbol.not_test, depth=1)
         for sub in _not_tests:
             if find_node(sub, self.symbol.test):
                 self.run(sub, line = line, idx = idx)
             else:
                 # find not_test nodes
                 for item in find_all_gen(node, self.symbol.atom):
                     if len(item)>2:
                         first_line = item[1][2]
                     else:
                         continue
                     if isinstance(first_line, int):
                         break
                 else:
                     continue
                 if first_line == line:
                     idx+=1
                 else:
                     line = first_line
                     idx  = 1
                 _num = self.fn.Number(len(monitor.Monitor().expr_sensors))
                 monitor.ExprSensor(first_line, idx)
                 self.run(sub, line = line, idx = idx)
                 cloned = clone_node(sub)
                 call_measure_expr = self.fn.CallFunc("measure_expr",[cloned, _num])
                 replace_node(sub, self.fn.not_test(call_measure_expr))
Exemple #2
0
def create_bnf_langlet(bnf_grammar_file, lexer_file):
    '''
    Construct an ad-hoc langlet from a BNF grammar file.
    '''
    # parser-rules
    cst = bnfreader.parse_file(bnf_grammar_file)
    parser_rules = []
    # do some normalization of rules of the grammar file
    for rule in find_all(cst, bnfreader.symbol.rule):
        ls_rule = " ".join(bnfreader.unparse(rule)[:-1].split())+"\n"
        parser_rules.append(ls_rule)
    bnf_grammar  = "".join(parser_rules)
    langlet_id   = 1000*100
    parse_symbol = SymbolObject(langlet_id)
    parse_symbol.create(parser_rules)

    # lexer-rules
    with open(lexer_file) as f_lex:
        lexer_rules = ls_grammar.unparse(ls_grammar.parse(f_lex.read())).split("\n")
    lex_symbol  = SymbolObject(langlet_id, 100)
    lex_symbol.create(lexer_rules)
    # create NFAs but don't compute properties. This won't work because
    # left recursion prevents first-sets ( reachables ) to be derived.
    langlet = LangletObject(langlet_id, parse_symbol, lex_symbol)
    nfagen  = NFAGenerator(langlet)
    nfas    = nfagen.create_all(bnf_grammar)
    langlet.nfas = nfas
    langlet.keywords = nfagen.keywords
    return langlet
Exemple #3
0
 def and_test(self, node, line=-1, idx=0):
     if find_node(node, self.keyword["and"], depth=1):
         _not_tests = find_all(node, self.symbol.not_test, depth=1)
         for sub in _not_tests:
             if find_node(sub, self.symbol.test):
                 self.run(sub, line=line, idx=idx)
             else:
                 # find not_test nodes
                 for item in find_all_gen(node, self.symbol.atom):
                     if len(item) > 2:
                         first_line = item[1][2]
                     else:
                         continue
                     if isinstance(first_line, int):
                         break
                 else:
                     continue
                 if first_line == line:
                     idx += 1
                 else:
                     line = first_line
                     idx = 1
                 _num = self.fn.Number(len(monitor.Monitor().expr_sensors))
                 monitor.ExprSensor(first_line, idx)
                 self.run(sub, line=line, idx=idx)
                 cloned = clone_node(sub)
                 call_measure_expr = self.fn.CallFunc(
                     "measure_expr", [cloned, _num])
                 replace_node(sub, self.fn.not_test(call_measure_expr))
Exemple #4
0
    def suite(self, node):
        # special case: no use of sensors in 'if __main__...' stmts of modules that are not __main__.
        _stmts = find_all(node, self.symbol.stmt, depth=1)
        _num = self.fn.Number(len(monitor.Monitor().stmt_sensors))

        # compile a call 'measure_stmt(_num)' into each suite
        call_measure_stmt = self.fn.CallFunc("measure_stmt", [_num])
        _sensor_stmt = self.fn.stmt(call_measure_stmt)
        IDX = 0
        for i, item in enumerate(node[1:]):
            if item[0] == self.symbol.stmt:
                if find_node(item, self.symbol.flow_stmt,
                             depth=3):  # measure_stmt shall be execed before
                    IDX = i  # return, break, continue
                    break
                IDX = i
        if IDX:
            suite_begin, suite_end = self.get_line_info(node)
            monitor.StmtSensor(suite_begin, suite_end)
            _small = find_node(node[i], self.symbol.small_stmt, depth=3)
            if _small and self.fn.is_atomic(_small) and find_node(
                    _small, self.token.STRING):
                node.insert(IDX + 2, _sensor_stmt)
            else:
                node.insert(IDX + 1, _sensor_stmt)
Exemple #5
0
def create_bnf_langlet(bnf_grammar_file, lexer_file):
    '''
    Construct an ad-hoc langlet from a BNF grammar file.
    '''
    # parser-rules
    cst = bnfreader.parse_file(bnf_grammar_file)
    parser_rules = []
    # do some normalization of rules of the grammar file
    for rule in find_all(cst, bnfreader.symbol.rule):
        ls_rule = " ".join(bnfreader.unparse(rule)[:-1].split()) + "\n"
        parser_rules.append(ls_rule)
    bnf_grammar = "".join(parser_rules)
    langlet_id = 1000 * 100
    parse_symbol = SymbolObject(langlet_id)
    parse_symbol.create(parser_rules)

    # lexer-rules
    with open(lexer_file) as f_lex:
        lexer_rules = ls_grammar.unparse(ls_grammar.parse(
            f_lex.read())).split("\n")
    lex_symbol = SymbolObject(langlet_id, 100)
    lex_symbol.create(lexer_rules)
    # create NFAs but don't compute properties. This won't work because
    # left recursion prevents first-sets ( reachables ) to be derived.
    langlet = LangletObject(langlet_id, parse_symbol, lex_symbol)
    nfagen = NFAGenerator(langlet)
    nfas = nfagen.create_all(bnf_grammar)
    langlet.nfas = nfas
    langlet.keywords = nfagen.keywords
    return langlet
Exemple #6
0
def replace_all_nodes(old_node, new_node, contains = None):
    '''
    Replace all nodes N within i{context} where nid(N) = i{in_nid} by i{node}
    when a node M with nid(M) = i{nid} can be found in N.

    @param context: contextual cst node
    @param nid: node id that constraints the node to be replaced.
    @param in_nid: node id of the target node of replacement.
    @param node: substitution.
    '''
    from langscape.csttools.cstsearch import find_node, find_all
    nid = new_node[0]
    for node in find_all(old_node, nid):
        if contains:
            if find_node(node, contains):
                replace_node(node, new_node)
        else:
            replace_node(node, new_node)
Exemple #7
0
def replace_all_nodes(old_node, new_node, contains=None):
    '''
    Replace all nodes N within i{context} where nid(N) = i{in_nid} by i{node}
    when a node M with nid(M) = i{nid} can be found in N.

    @param context: contextual cst node
    @param nid: node id that constraints the node to be replaced.
    @param in_nid: node id of the target node of replacement.
    @param node: substitution.
    '''
    from langscape.csttools.cstsearch import find_node, find_all
    nid = new_node[0]
    for node in find_all(old_node, nid):
        if contains:
            if find_node(node, contains):
                replace_node(node, new_node)
        else:
            replace_node(node, new_node)
Exemple #8
0
    def suite(self, node):
        # special case: no use of sensors in 'if __main__...' stmts of modules that are not __main__.
        _stmts = find_all(node, self.symbol.stmt,depth = 1)
        _num = self.fn.Number(len(monitor.Monitor().stmt_sensors))

        # compile a call 'measure_stmt(_num)' into each suite
        call_measure_stmt = self.fn.CallFunc("measure_stmt",[_num])
        _sensor_stmt = self.fn.stmt(call_measure_stmt)
        IDX = 0
        for i,item in enumerate(node[1:]):
            if item[0] == self.symbol.stmt:
                if find_node(item, self.symbol.flow_stmt, depth=3):    # measure_stmt shall be execed before
                    IDX = i                                       # return, break, continue
                    break
                IDX = i
        if IDX:
            suite_begin, suite_end = self.get_line_info(node)
            monitor.StmtSensor(suite_begin, suite_end)
            _small = find_node(node[i], self.symbol.small_stmt,depth = 3)
            if _small and self.fn.is_atomic(_small) and find_node(_small, self.token.STRING):
                node.insert(IDX+2, _sensor_stmt)
            else:
                node.insert(IDX+1, _sensor_stmt)
Exemple #9
0
 def test_or(self):
     nd = self.p4d.fn.or_test("x", "y", "0")
     self.p4d.check_node(nd)
     kwd_or = self.p4d.parse_nfa.keywords["or"]
     self.assertTrue(len(find_all(nd, kwd_or)) == 2)