def read_w2pfile(self, fileName): """ Function to read a web2py language file and return a list of translation string pairs """ f = open(fileName) fileContent = f.read() fileContent = "%s\n" % fileContent.replace("\r", "") tmpstr = [] # Create a parse tree list st = parser.suite(fileContent) stList = parser.st2list(st, line_info=1) f.close() P = TranslateParseFiles() for element in stList: P.parseList(element, tmpstr) strings = [] # Store the strings as (original string, translated string) tuple for i in range(0, len(tmpstr)): if i % 2 == 0: strings.append((tmpstr[i][1:-1], tmpstr[i + 1][1:-1])) return strings
def _replace_constants(current_attribute, formula): formula = formula.replace(" ", "") parsed_formula = _build_simple_list(parser.st2list(parser.expr(formula))) replaced_formula = [] for i in parsed_formula: # power operatior is differnt in Python if i == "^": i = "**" # interface for `round` is a little different elif i == "10": i = "-1" elif i == "0.01": i = "10" elif i == "ROUND": # Python round method is different i = "round" # operator/numbers elif len(i) > 1 and i != current_attribute: i = _spacify(i) char_attr = attributes["archetypes"]["Character"]["attributes"] if i in char_attr: i = _replace_constants(current_attribute, char_attr[i]["calculation"]) else: i = str(attributes["constants"][i]) replaced_formula.append(i) return "".join(replaced_formula)
def read_w2pfile(self, fileName): """ Function to read a web2py language file and return a list of translation string pairs """ f = open(fileName) fileContent = f.read() fileContent = "%s\n" % fileContent.replace("\r", "") tmpstr = [] # Create a parse tree list st = parser.suite(fileContent) stList = parser.st2list(st, line_info=1) f.close() P = TranslateParseFiles() for element in stList: P.parseList(element, tmpstr) strings = [] # Store the strings as (original string, translated string) tuple for i in range(0, len(tmpstr)): if i%2 == 0: strings.append((tmpstr[i][1:-1], tmpstr[i + 1][1:-1])) return strings
def file_to_ast(file_path, raw=False, line_number=False): m = get_token_symbol_map() st_object = suite(open(file_path, 'r').read()) raw_ast = st2list(st_object, line_info=line_number) if raw: return raw_ast return shallow(raw_ast)
def run_static_analisys(source_code_str): try: a = compile(source_code_str, '', 'exec') except Exception as error: if isinstance("", SyntaxError): message = {'type': 'F', 'row': error.lineno, 'column': error.offset, 'text': error.message} else: message = {'type': 'F', 'row': -1, 'column': -1, 'text': str(error)} print(message) #exit() return GLOBAL_SYMBOL_LIST.append(Variable("__file__")) st_main = parser.suite(source_code_str) statements = parser.st2list(st_main, line_info=True, col_info=True) parse_main(statements) for s in GLOBAL_SYMBOL_LIST: if isinstance(s, Function) and len(s.body) >= 1: s.do_all(s.body, GLOBAL_SYMBOL_LIST)
def run_static_analisys(source_code_str): try: a = compile(source_code_str, '', 'exec') except Exception as error: if isinstance("", SyntaxError): message = { 'type': 'F', 'row': error.lineno, 'column': error.offset, 'text': error.message } else: message = { 'type': 'F', 'row': -1, 'column': -1, 'text': str(error) } print(message) #exit() return GLOBAL_SYMBOL_LIST.append(Variable("__file__")) st_main = parser.suite(source_code_str) statements = parser.st2list(st_main, line_info=True, col_info=True) parse_main(statements) for s in GLOBAL_SYMBOL_LIST: if isinstance(s, Function) and len(s.body) >= 1: s.do_all(s.body, GLOBAL_SYMBOL_LIST)
def to_dot(infile, outfile): print("Parsing {0}...".format(infile)) source = open(infile).read() st = parser.suite(source) tree = parser.st2list(st) ct = convert_tree(tree) converted_str = "graph {\n" + "{0};\n".format(ct[1]) + tree2dot(ct) + "}" print("Writing to `{0}'...".format(outfile)) open(outfile, "w").write(converted_str)
def close(self): self.show("Colorizing Python source text - parsing...") import parser try: nodes = parser.st2list(parser.suite(self.__source), 1) except parser.ParserError, err: self.__viewer.context.message( "Syntax error in Python source: %s" % err) return
def close(self): self.show("Colorizing Python source text - parsing...") import parser try: nodes = parser.st2list(parser.suite(self.__source), 1) except parser.ParserError, err: self.__viewer.context.message("Syntax error in Python source: %s" % err) return
def Snippetize(f): """Return the syntax tree of the given file.""" f.seek(0) syntax_tree = parser.st2list(parser.suite(f.read())) tokens = offset_token.Tokenize(f) snippet = _SnippetizeNode(syntax_tree, tokens) assert not tokens return snippet
def main(fn=""): """ Returns a readable (c)oncrete (s)yntax (t)ree (CST) list for the given filename. """ if not fn: return [] src = open(fn,'r').read() cst = parser.suite(src) lis = parser.st2list(cst, True) replaceNodeType(lis) return lis
def lint(self): print('Linting with %s in Python %s... ' % (_module, platform.python_version()), end='') self.errors = [] if (self.code == None): with open(self.filename, 'rt', encoding="utf-8") as f: try: self.code = f.read() except e: msg = 'Error when trying to read file:\n' + str(e) expl = ("This usually means something got corrupted in " "your file\n\t\t\t and you should remove the " "corrupted portions or\n\t\t\t start a new file.") self.oops(msg, expl) raise _LintError(self.errors) if (self.code in [None, '']): self.oops('Could not read code from "%s"' % self.filename) raise _LintError(self.errors) self.lines = self.code.splitlines() self.st = parser.suite(self.code) self.stList = parser.st2list(self.st, line_info=True, col_info=True) self.astList = self.buildSimpleAST(self.stList, textOnly=False) self.astTextOnlyList = self.buildSimpleAST(self.stList, textOnly=True) # allow if...main() last line... if (self.astTextOnlyList[-1] in [[ 'if', ['__name__', '==', "'__main__'"], ':', ['main', ['(', ')']] ], [ 'if', ['(', ['__name__', '==', "'__main__'"], ')'], ':', ['main', ['(', ')']] ], [ 'if', ['__name__', '==', '"__main__"'], ':', ['main', ['(', ')']] ], [ 'if', ['(', ['__name__', '==', '"__main__"'], ')'], ':', ['main', ['(', ')']] ]]): # just remove it... self.astTextOnlyList.pop() self.astList.pop() # now do the actual linting... #self.lintLineWidths() #self.lintTopLevel() # just import, def, class, or if...main() self.lintAllLevels(self.astList) if (self.errors != []): raise _LintError(self.errors) print("Passed!")
def test_position(self): # An absolutely minimal test of position information. Better # tests would be a big project. code = "def f(x):\n return x + 1" st = parser.suite(code) def walk(tree): node_type = tree[0] next = tree[1] if isinstance(next, (tuple, list)): for elt in tree[1:]: for x in walk(elt): yield x else: yield tree expected = [ (1, 'def', 1, 0), (1, 'f', 1, 4), (7, '(', 1, 5), (1, 'x', 1, 6), (8, ')', 1, 7), (11, ':', 1, 8), (4, '', 1, 9), (5, '', 2, -1), (1, 'return', 2, 4), (1, 'x', 2, 11), (14, '+', 2, 13), (2, '1', 2, 15), (4, '', 2, 16), (6, '', 2, -1), (4, '', 2, -1), (0, '', 2, -1), ] self.assertEqual(list(walk(st.totuple(line_info=True, col_info=True))), expected) self.assertEqual(list(walk(st.totuple())), [(t, n) for t, n, l, c in expected]) self.assertEqual(list(walk(st.totuple(line_info=True))), [(t, n, l) for t, n, l, c in expected]) self.assertEqual(list(walk(st.totuple(col_info=True))), [(t, n, c) for t, n, l, c in expected]) self.assertEqual(list(walk(st.tolist(line_info=True, col_info=True))), [list(x) for x in expected]) self.assertEqual( list(walk(parser.st2tuple(st, line_info=True, col_info=True))), expected) self.assertEqual( list(walk(parser.st2list(st, line_info=True, col_info=True))), [list(x) for x in expected])
def test_position(self): # An absolutely minimal test of position information. Better # tests would be a big project. code = "def f(x):\n return x + 1" st = parser.suite(code) def walk(tree): node_type = tree[0] next = tree[1] if isinstance(next, (tuple, list)): for elt in tree[1:]: for x in walk(elt): yield x else: yield tree expected = [ (1, 'def', 1, 0), (1, 'f', 1, 4), (7, '(', 1, 5), (1, 'x', 1, 6), (8, ')', 1, 7), (11, ':', 1, 8), (4, '', 1, 9), (5, '', 2, -1), (1, 'return', 2, 4), (1, 'x', 2, 11), (14, '+', 2, 13), (2, '1', 2, 15), (4, '', 2, 16), (6, '', 2, -1), (4, '', 2, -1), (0, '', 2, -1), ] self.assertEqual(list(walk(st.totuple(line_info=True, col_info=True))), expected) self.assertEqual(list(walk(st.totuple())), [(t, n) for t, n, l, c in expected]) self.assertEqual(list(walk(st.totuple(line_info=True))), [(t, n, l) for t, n, l, c in expected]) self.assertEqual(list(walk(st.totuple(col_info=True))), [(t, n, c) for t, n, l, c in expected]) self.assertEqual(list(walk(st.tolist(line_info=True, col_info=True))), [list(x) for x in expected]) self.assertEqual(list(walk(parser.st2tuple(st, line_info=True, col_info=True))), expected) self.assertEqual(list(walk(parser.st2list(st, line_info=True, col_info=True))), [list(x) for x in expected])
def lint(self): print('Linting... ', end='') self.errors = [] if (self.code == None): with open(self.filename, 'rt') as f: try: self.code = f.read() except UnicodeDecodeError as e: self.oops('Non-Ascii Character in File:\n' + str(e)) raise _LintError(self.errors) if (self.code in [None, '']): self.oops('Could not read code from "%s"' % self.filename) raise _LintError(self.errors) self.lines = self.code.splitlines() self.st = parser.suite(self.code) self.stList = parser.st2list(self.st, line_info=True, col_info=True) self.astList = self.buildSimpleAST(self.stList, textOnly=False) self.astTextOnlyList = self.buildSimpleAST(self.stList, textOnly=True) # allow if...main() last line... if (self.astTextOnlyList[-1] in [[ 'if', ['__name__', '==', "'__main__'"], ':', ['main', ['(', ')']] ], [ 'if', ['(', ['__name__', '==', "'__main__'"], ')'], ':', ['main', ['(', ')']] ], [ 'if', ['__name__', '==', '"__main__"'], ':', ['main', ['(', ')']] ], [ 'if', ['(', ['__name__', '==', '"__main__"'], ')'], ':', ['main', ['(', ')']] ], ['main', ['(', ')']]]): # just remove it... self.astTextOnlyList.pop() self.astList.pop() # now do the actual linting... self.lintLineWidths() self.lintTopLevel() # just import, def, class, or if...main() self.lintAllLevels(self.astList) if (self.errors != []): raise _LintError(self.errors) print("Passed!")
def generate_dot_code(python_file): """ generate_dot_code :param py_code: :return: """ with open(python_file) as source: ast_module_obj = parser.suite(source.read()) ast_obj_list = parser.st2list(ast_module_obj) # ast_module_obj = ast.parse(source.read(), python_file, mode='exec') # ast_module_obj_body = ast_module_obj.body ast_obj = ast_obj_list print('ast_list\n', repr(ast_module_obj)) print('ast_iter_tuple\n', ast.iter_fields(ast_module_obj)) # print('ast_body\n', ast_module_obj_body) print('ast_obj\n\n', repr(ast_obj)) # print('ast.iter_child_nodes\n', ast.iter_child_nodes(ast_obj)) # for b in ast.walk(ast_obj): # print('ast_obj\n', repr(b)) call_graph = {} construct_call_graph(ast_obj, call_graph) # pprint.pprint(call_graph) dot = [] dot.append("digraph G {") dot.append("rankdir=LR") for from_fn, to_fns in call_graph.items(): if not to_fns: dot.append('%s;' % from_fn) for to_fn in to_fns: if to_fn not in call_graph: continue dot.append('%s -> %s;' % (from_fn, to_fn)) dot.append("}") return '\n'.join(dot)
def lex(expression): symbols = {v: k for k, v in symbol.__dict__.items() if isinstance(v, int)} tokens = {v: k for k, v in token.__dict__.items() if isinstance(v, int)} lexicon = {**symbols, **tokens} st = parser.expr(expression) st_list = parser.st2list(st) def replace(l: list): r = [] for i in l: if isinstance(i, list): r.append(replace(i)) else: if i in lexicon: r.append(lexicon[i]) else: r.append(i) return r return replace(st_list)
def load_python_parse_tree(code, filter_test=False, line_info=True, col_info=True): """ input a python code, return the parse tree. the tree is represented in list-tree. :param code: :param filter_test: filter the redundant nodes in parse tree. The redundant nodes are introduced by the grammar. :param line_info: :param col_info: :return: """ try: st_obj = parser.suite(code) except Exception as e: return None st_list = parser.st2list(st_obj, line_info=line_info, col_info=col_info) if filter_test: st_list = construct_filter_list_tree(st_list) # print_list_tree(st_list) return st_list
def parse_python_test(): code = r''' import os def add(a:int, b): a = a + b if a > b and a > 1: pass return a c = add(((1>2) and (2>3)), 2) print(c) ''' code2 = r'''def hello_world(): return "hello world"''' st_obj = parser.suite(code) st_list = parser.st2list(st_obj, line_info=True, col_info=True) print(st_list) filtered_list = construct_filter_list_tree(st_list) print(filtered_list) print_list_tree(filtered_list) # root = construct_filter_tree(st_list) # root.print_tree(0) print('root')
def ParseAndCompileUserFunctionString(self, inString): # shift user functions into numpy namespace at run time, do not import time numpySafeTokenList = [] for key in list(self.functionDictionary.keys()): numpySafeTokenList += self.functionDictionary[key] for key in list(self.constantsDictionary.keys()): numpySafeTokenList += self.constantsDictionary[key] # to shift user functions such as "power" into the numpy namespace "numpy.power" for evaluation for token in numpySafeTokenList: exec(token + " = numpy." + token) # no blank lines of text, StringIO() allows using file methods on text stringToConvert = "" rawData = StringIO.StringIO(inString).readlines() for line in rawData: stripped = line.strip() if len(stripped) > 0: # no empty strings if stripped[0] != "#": # no comment-only lines stringToConvert += stripped + "\n" # convert brackets to parentheses stringToConvert = stringToConvert.replace("[", "(").replace("]", ")") if stringToConvert == "": raise Exception("You must enter some function text for the software to use.") if -1 != stringToConvert.find("="): raise Exception('Please do not use an equals sign "=" in your text.') st = parser.expr(stringToConvert) tup = st.totuple() tokens = self.GetTokensFromTupleParsingHelper(tup) if "^" in tokens: raise Exception( 'The caret symbol "^" is not recognized by the parser, please substitute double asterisks "**" for "^".' ) if "ln" in tokens: raise Exception( "The parser uses log() for the natural log function, not ln(). Please use log() in your text." ) if "abs" in tokens: raise Exception("The parser uses fabs() for the absolute value, not abs(). Please use fabs() in your text.") if "EXP" in tokens: raise Exception( "The parser uses lower case exp(), not upper case EXP(). Please use lower case exp() in your text." ) if "LOG" in tokens: raise Exception( "The parser uses lower case log(), not upper case LOG(). Please use lower case log() in your text." ) # test for required reserved tokens tokenNames = list(set(tokens) - set(numpySafeTokenList)) if "X" not in tokenNames: raise Exception('You must use a separate upper case "X" in your function to enter a valid function of X.') if "Y" not in tokenNames: raise Exception('You must use a separate upper case "Y" in your function to enter a valid function of Y.') self._coefficientDesignators = sorted(list(set(tokenNames) - set(["X", "Y"]))) if len(self._coefficientDesignators) == 0: raise Exception( "I could not find any equation parameter or coefficient names, please check the function text" ) # now compile code object using safe tokens self.safe_dict = dict([(k, locals().get(k, None)) for k in numpySafeTokenList]) # now compile code object using safe tokens with integer conversion self.safe_dict = dict([(k, locals().get(k, None)) for k in numpySafeTokenList]) # convert integer use such as (3/2) into floats such as (3.0/2.0) st = parser.expr(stringToConvert) stList = parser.st2list(st) stList = self.RecursivelyConvertIntStringsToFloatStrings(stList) st = parser.sequence2st(stList) # later evals re-use this compiled code for improved performance in EvaluateCachedData() methods self.userFunctionCodeObject = parser.compilest(st)
def match(self, expr, maxindex=None): """Return the packet that matches the given expression, also the packet index points to the next packet after the matched packet. Returns None if packet is not found and the packet index points to the packet at the beginning of the search. expr: String of expressions to be evaluated maxindex: The match fails if packet index hits this limit Examples: # Find the packet with both the ACK and SYN TCP flags set to 1 pkt = x.match("TCP.flags.ACK == 1 and TCP.flags.SYN == 1") # Find the next NFS EXCHANGE_ID request pkt = x.match("NFS.argop == 42") # Find the next NFS EXCHANGE_ID or CREATE_SESSION request pkt = x.match("NFS.argop in [42,43]") # Find the next NFS OPEN request or reply pkt = x.match("NFS.op == 18") # Find all packets coming from subnet 192.168.1.0/24 using # a regular expression while x.match(r"IP.src == re('192\.168\.1\.\d*')"): print x.pkt.tcp # Find packet having a GETATTR asking for FATTR4_FS_LAYOUT_TYPE(bit 62) pkt_call = x.match("NFS.attr_request & 0x4000000000000000L != 0") if pkt_call: # Find GETATTR reply xid = pkt_call.rpc.xid # Find reply where the number 62 is in the array NFS.obj_attributes pkt_reply = x.match("RPC.xid == %d and 62 in NFS.obj_attributes" % xid) # Find the next WRITE request pkt = x.match("NFS.argop == 38") if pkt: print pkt.nfs # Same as above, but using membership test operator instead if ("NFS.argop == 38" in x): print x.pkt.nfs See also: match_ethernet(), match_ip(), match_tcp(), match_rpc(), match_nfs() """ # Save current position save_index = self.index # Parse match expression st = parser.expr(expr) smap = parser.st2list(st) pdata = self._convert_match(smap) self.dprint('PKT1', ">>> %d: match(%s)" % (self.index, expr)) # Search one packet at a time for pkt in self: if maxindex and self.index > maxindex: # Hit maxindex limit break try: if eval(pdata): # Return matched packet self.dprint('PKT1', ">>> %d: match() -> True" % pkt.record.index) return pkt except Exception: pass # No packet matched, re-position the file pointer back to where # the search started self.rewind(save_index) self.pkt = None self.dprint('PKT1', ">>> match() -> False") return None
def assert_rewrite(line, debug): org_line = line map = dict(token.tok_name) map.update(symbol.sym_name) # https://stackoverflow.com/a/5454348 def shallow(ast): if not isinstance(ast, list): return ast if len(ast) == 2: return shallow(ast[1]) return [map[ast[0]]] + [shallow(a) for a in ast[1:]] try: ast = shallow(parser.st2list(parser.suite(line.strip()))) except SyntaxError as e: if debug: print('assert_rewrite: Parsing error', e) return org_line if debug: print('XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX', line) import pprint pprint.pprint(ast) try: if ast[0] != 'file_input' or ast[1][0] != 'simple_stmt' or ast[1][1][ 0] != 'assert_stmt': return org_line simple_stmt = ast[1] comment = simple_stmt[2] assert_stmt = ast[1][1] expression = assert_stmt[2] except IndexError: return org_line if len(assert_stmt) > 3: # Already has a message return org_line if debug: print('comment', comment) if comment: line = line[:line.index(comment)].rstrip() if debug: print('new line: %r' % line) if debug: print('assert_stmt', len(assert_stmt), assert_stmt) print('expression', expression) ws, assrt, rest = line.partition('assert') if not isinstance(expression, list): rewrite = ws + '____l = ' + expression + '; assert ____l, "%r" % ____l' return rewrite # TODO: expression ['not_test', 'not', 'False'] # expression ['not_test', 'not', ['comparison', '1', '==', '2']] if len(expression) < 4 or expression[0] != 'comparison': return org_line # TODO: expression ['comparison', '1', '<=', '1', '<=', '1'] if len(expression) > 4: return org_line op = expression[2] if debug: print('left', expression[1]) print('op', op) print('right', expression[3]) # TODO: op ['comp_op', 'is', 'not'] if isinstance(op, list): return org_line # In case of mutiple op's in the line, find the correct one to split on def flatten(lst): for e in lst: if isinstance(e, list): yield from flatten(e) else: yield e num_ops = list(flatten([expression[1]])).count(op) if debug: print('num_ops', num_ops) print('rest', rest) index = -1 for num in range(num_ops + 1): index = rest.find(op, index + 1) if debug: print('index', index) if index == -1: return org_line left = rest[:index].strip() right = rest[index + len(op):].strip() if debug: print('left: %r' % left) print('right: %r' % right) rewrite = ws + '____l = ' + left + '; ____r = ' + right + '; assert ____l ' + op + ' ____r, "%r ' + op + ' %r" % (____l, ____r)' if debug: print('rewrite', rewrite) return rewrite
def findstr(self, fileName, spmod, modlist): """ Using the methods in TranslateParseFiles to extract the strings fileName -> the file to be used for extraction spmod -> the required module modlist -> a list of all modules in Eden """ try: f = open(fileName) except: path = os.path.split(__file__)[0] fileName = os.path.join(path, fileName) try: f = open(fileName) except: return # Read all contents of file fileContent = f.read() # Remove CL-RF and NOEOL characters fileContent = fileContent.replace("\r", "") + "\n" P = TranslateParseFiles() try: st = parser.suite(fileContent) except: return [] f.close() # Create a parse tree list for traversal stList = parser.st2list(st, line_info=1) # List which holds the extracted strings strings = [] if spmod == "ALL": # If all strings are to be extracted, call ParseAll() for element in stList: P.parseAll(strings, element) else: # Handle cases for special files which contain # strings belonging to different modules appname = current.request.application if fileName.endswith("/%s/modules/eden/menus.py" % appname) == True: for element in stList: P.parseMenu(spmod, strings, element, 0) elif fileName.endswith("/%s/modules/s3cfg.py" % appname) == True: for element in stList: P.parseS3cfg(spmod, strings, element, modlist) elif os.path.basename(fileName) == "000_config.py" or \ os.path.basename(fileName) == "config.py": for element in stList: P.parseConfig(spmod, strings, element, modlist) # Extract strings from deployment_settings.variable() calls final_strings = [] settings = current.deployment_settings for (loc, s) in strings: if s[0] != '"' and s[0] != "'" and "settings." in s: # Convert the call to a standard form s = s.replace("current.deployment_settings", "settings") s = s.replace("()", "") l = s.split(".") obj = settings # Get the actual value for atr in l[1:]: obj = getattr(obj, atr)() s = obj final_strings.append((loc, s)) return final_strings
if hasattr(st_list, "__iter__"): code = st_list[0] if code == symbol.expr_stmt and len(st_list) == 4: return [(st2str(st_list[1]), st2str(st_list[3]))] results = [] for i in st_list: if type(i) != str: r = find_expr_stmt(i) if r: results.extend(r) return results code = inspect.getsource(recipipe) st = parser.suite(code) st_list = parser.st2list(st) assign_expr = find_expr_stmt(st_list) if len(sys.argv) < 2: raise ValueError("Required output file as a parameter.") filename = sys.argv[1] filename = os.path.join(os.path.dirname(__file__), filename) with open(filename, "w") as f: f.write("Alias,Definition\n") for i, j in assign_expr: if i[0] != "_": j = j.replace('"', "'") o = getattr(recipipe, i) if inspect.isclass(o): # Remove modules starting from '_' to avoid wrong references # to the SKLearn documentation.
def cprint(code): pprint.pprint(shallow(parser.st2list(parser.expr(code))))
for j in range(1, len(sys.argv)): file_str = str(sys.argv[j]) file = open(file_str, "r") source_code_str = file.read() file.close() print(file_str + ":") try: a = compile(source_code_str, "", "exec") except Exception as error: if isinstance("", SyntaxError): message = {"type": "F", "row": error.lineno, "column": error.offset, "text": error.message} else: message = {"type": "F", "row": -1, "column": -1, "text": str(error)} print(message) exit() st_main = parser.suite(source_code_str) statements = parser.st2list(st_main, line_info=True, col_info=True) parse_main(statements) for s in GLOBAL_SYMBOL_LIST: if type(s) == type(Function("")) and len(s.body) >= 1: s.parse(s.body, GLOBAL_SYMBOL_LIST) for err in ERROR_LIST: print(err.info) GLOBAL_SYMBOL_LIST.clear() ERROR_LIST.clear()
rootdir = sys.argv[1] files = build_recursive_dir_tree(rootdir) results_arrays = [] for file in files: result_array = [file, []] results_arrays.append(result_array) fd = open(file, "r", errors="ignore") st = parser.suite(fd.read()) st_list = parser.st2list(st, line_info=True) search(st_list, result_array[1]) fd.close() graphs = {} nodes_by_name = {} nodes_by_file = {} nodes_by_id = [] num_nodes = 0 for results_for_file in results_arrays: file_name = results_for_file[0] edges = []
a = 1 def my_max(b): c = a if c < b: return b else: return c code_str = """def my_max(b): global a if a<b: return b else: return a """ import parser from pprint import pprint st = parser.suite(code_str) pprint(parser.st2list(st, line_info=True)) import ast tree = ast.parse(code_str, mode="exec") pprint(ast.dump(tree)) import dis print(dis.dis(my_max))
def _parseIt(self, goodif): parsed = shallow(parser.st2list(parser.expr(goodif)))[0] if (not isinstance(parsed, list)): parsed = [parsed] return parsed
a = compile(source_code_str, '', 'exec') except Exception as error: if isinstance("", SyntaxError): message = { 'type': 'F', 'row': error.lineno, 'column': error.offset, 'text': error.message } else: message = { 'type': 'F', 'row': -1, 'column': -1, 'text': str(error) } print(message) exit() GLOBAL_SYMBOL_LIST.append(Variable("__file__")) st_main = parser.suite(source_code_str) statements = parser.st2list(st_main, line_info=True, col_info=True) parse_main(statements) for s in GLOBAL_SYMBOL_LIST: if isinstance(s, Function) and len(s.body) >= 1: s.do_all(s.body, GLOBAL_SYMBOL_LIST) for err in ERROR_LIST: print(err.info)
def match(self, expr, maxindex=None, rewind=True, reply=False): """Return the packet that matches the given expression, also the packet index points to the next packet after the matched packet. Returns None if packet is not found and the packet index points to the packet at the beginning of the search. expr: String of expressions to be evaluated maxindex: The match fails if packet index hits this limit rewind: Rewind to index where matching started if match fails reply: Match RPC replies of previously matched calls as well Examples: # Find the packet with both the ACK and SYN TCP flags set to 1 pkt = x.match("TCP.flags.ACK == 1 and TCP.flags.SYN == 1") # Find the next NFS EXCHANGE_ID request pkt = x.match("NFS.argop == 42") # Find the next NFS EXCHANGE_ID or CREATE_SESSION request pkt = x.match("NFS.argop in [42,43]") # Find the next NFS OPEN request or reply pkt = x.match("NFS.op == 18") # Find all packets coming from subnet 192.168.1.0/24 using # a regular expression while x.match(r"IP.src == re('192\.168\.1\.\d*')"): print x.pkt.tcp # Find packet having a GETATTR asking for FATTR4_FS_LAYOUT_TYPES(bit 62) pkt_call = x.match("NFS.attr_request & 0x4000000000000000L != 0") if pkt_call: # Find GETATTR reply xid = pkt_call.rpc.xid # Find reply where the number 62 is in the array NFS.attributes pkt_reply = x.match("RPC.xid == %d and 62 in NFS.attributes" % xid) # Find the next WRITE request pkt = x.match("NFS.argop == 38") if pkt: print pkt.nfs # Same as above, but using membership test operator instead if ("NFS.argop == 38" in x): print x.pkt.nfs See also: match_ethernet(), match_ip(), match_tcp(), match_rpc(), match_nfs() """ # Save current position save_index = self.index # Parse match expression st = parser.expr(expr) smap = parser.st2list(st) pdata = self._convert_match(smap) self.dprint('PKT1', ">>> %d: match(%s)" % (self.index, expr)) self.reply_matched = False # Search one packet at a time for pkt in self: if maxindex and self.index > maxindex: # Hit maxindex limit break try: if reply and pkt == "rpc" and pkt.rpc.type == 1 and pkt.rpc.xid in self._match_xid_list: self.dprint( 'PKT1', ">>> %d: match() -> True: reply" % pkt.record.index) self._match_xid_list.remove(pkt.rpc.xid) self.reply_matched = True return pkt if eval(pdata): # Return matched packet self.dprint('PKT1', ">>> %d: match() -> True" % pkt.record.index) if reply and pkt == "rpc" and pkt.rpc.type == 0: # Save xid of matched call self._match_xid_list.append(pkt.rpc.xid) return pkt except Exception: pass if rewind: # No packet matched, re-position the file pointer back to where # the search started self.rewind(save_index) self.pkt = None self.dprint('PKT1', ">>> match() -> False") return None
def ParseAndCompileUserFunctionString(self, inString): # shift user functions into numpy namespace at run time, not import time numpySafeTokenList = [] for key in list(self.functionDictionary.keys()): numpySafeTokenList += self.functionDictionary[key] for key in list(self.constantsDictionary.keys()): numpySafeTokenList += self.constantsDictionary[key] # no blank lines of text, StringIO() allows using file methods on text stringToConvert = '' rawData = io.StringIO(inString).readlines() for line in rawData: stripped = line.strip() if len(stripped) > 0: # no empty strings if stripped[0] != '#': # no comment-only lines stringToConvert += stripped + '\n' # convert brackets to parentheses stringToConvert = stringToConvert.replace('[', '(').replace(']', ')') if stringToConvert == '': raise Exception( 'You must enter some function text for the software to use.') if -1 != stringToConvert.find('='): raise Exception( 'Please do not use an equals sign "=" in your text.') st = parser.expr(stringToConvert) tup = st.totuple() tokens = self.GetTokensFromTupleParsingHelper(tup) if '^' in tokens: raise Exception( 'The caret symbol "^" is not recognized by the parser, please substitute double asterisks "**" for "^".' ) if 'ln' in tokens: raise Exception( "The parser uses log() for the natural log function, not ln(). Please use log() in your text." ) if 'abs' in tokens: raise Exception( "The parser uses fabs() for the absolute value, not abs(). Please use fabs() in your text." ) if 'EXP' in tokens: raise Exception( "The parser uses lower case exp(), not upper case EXP(). Please use lower case exp() in your text." ) if 'LOG' in tokens: raise Exception( "The parser uses lower case log(), not upper case LOG(). Please use lower case log() in your text." ) # test for required reserved tokens tokenNames = list(set(tokens) - set(numpySafeTokenList)) if 'X' not in tokenNames: raise Exception( 'You must use a separate upper case "X" in your function to enter a valid function of X.' ) if 'Y' not in tokenNames: raise Exception( 'You must use a separate upper case "Y" in your function to enter a valid function of Y.' ) self._coefficientDesignators = sorted( list(set(tokenNames) - set(['X', 'Y']))) if len(self._coefficientDesignators) == 0: raise Exception( 'I could not find any equation parameter or coefficient names, please check the function text' ) # now compile code object using safe tokens with integer conversion self.safe_dict = locals() for f in numpySafeTokenList: self.safe_dict[f] = eval('numpy.' + f) # convert integer use such as (3/2) into floats such as (3.0/2.0) st = parser.expr(stringToConvert) stList = parser.st2list(st) stList = self.RecursivelyConvertIntStringsToFloatStrings(stList) st = parser.sequence2st(stList) # later evals re-use this compiled code for improved performance in EvaluateCachedData() methods self.userFunctionCodeObject = parser.compilest(st)
def get_file_trans_strings(file_path): src = open(file_path, 'rb').read() tree = parser.st2list(parser.suite(src)) return [x for x in walk_for_trans(tree) if x]