def main(): # parser config and arguments args, config = parse_arguments() logger.info("Arguments: %s, Config: %s" % (args, config)) # Init run directories output_dir = init_directories(args.section) # Instantiate a new adaptive padding object wtfpad = ap.AdaptiveSimulator(config) # Run simulation on all traces latencies, bandwidths = [], [] for fname in listdir(args.traces_path): trace = parse(join(args.traces_path, fname)) logger.info("Simulating trace: %s" % fname) simulated = wtfpad.simulate(Trace(trace)) # dump simulated trace to results directory dump(simulated, join(output_dir, fname)) # calculate overheads bw_ovhd = oh.bandwidth_ovhd(simulated, trace) bandwidths.append(bw_ovhd) logger.debug("Bandwidth overhead: %s" % bw_ovhd) lat_ovhd = oh.latency_ovhd(simulated, trace) latencies.append(lat_ovhd) logger.debug("Latency overhead: %s" % lat_ovhd) logger.info("Latency overhead: %s" % np.median([l for l in latencies if l > 0.0])) logger.info("Bandwidth overhead: %s" % np.median([b for b in bandwidths if b > 0.0]))
def runTest(self): scanner = lexer.MiniJavaScanner() with open(p) as f: s = f.read() tokens = scanner.tokenize(s) javaParser = parser.ProgramParser() tree = javaParser.parse(tokens) with TempFile() as fout: parser.dump(tree, fout.f) fout.flush() self.diff(expected, fout.name)
def save(self, path=None): ''' Dumps the data to the given path using kerbal format ''' if path is None: path = self.path with open(path, 'w') as fp: fp.write(parser.dump(self.data))
from parser import parse_data, load_js, dump from urllib.request import urlopen from splash import show_splash if __name__ == '__main__': os.system('clear') show_splash() time.sleep(1) # Parse all given arguments and return needed values html, path, op_type = check_args(len(argv), argv) # Process link as a single post url if op_type == "single_post": base_data, type_name = parse_data(html) select_media(type_name, base_data, path) print("[*] Done!") # Process comprehensive dump of all media else: # # Use selenium to preload embedded js html = load_js(argv[2]) links, dump_dir = dump(html, argv[2], path) for l in links: link_html = urlopen(l).read() base_data, type_name = parse_data(link_html) select_media(type_name, base_data, dump_dir) print("[*] Done!")
def main(): args = getArguments() if not args: sys.exit(1) if len(args.files) > 1: print("Only one input file is supported") sys.exit(1) outfile = args.out_file + '.pyc' verbose = args.verbose dumpbin = args.dump_binary if args.pedantic: settings.MODE_PEDANTIC = True if args.no_fastgen: settings.MODE_FASTGEN = False settings.VERBOSITY = verbose import lexer import parser import typechecker import optimizer import codegen for inputFile in args.files: with inputFile as f: s = f.read() #Lexical Analysis scanner = lexer.MiniJavaScanner() tokens = scanner.tokenize(s) if args.phase == 'lex': lexer.dump(tokens, sys.stdout) break #Parsing p = parser.ProgramParser() tree = p.parse(tokens) if args.phase == 'parse': parser.dump(tree, sys.stdout) break #Typecheck Parse Tree try: typechecker.typecheck(tree) except typechecker.TypecheckException as ex: print('Nope', file=sys.stderr) if verbose: print(ex, file=sys.stderr) if verbose > 1: raise ex sys.exit(1) if args.phase == 'typecheck': print('Looks good') break #Optimization if args.phase == 'optimize': parser.dump(tree, sys.stdout) print() if args.phase == 'optimize' or args.optimize: if args.phase == 'optimize' and args.optimize == 0: args.optimize = 1 print("running", args.optimize, "rounds of optimize()") for roundNum in range(args.optimize): tree.optimize() if args.phase == 'optimize': parser.dump(tree, sys.stdout) break #Generate Code codegen.codegen(outfile, tree, dumpbin) if args.phase == 'codegen': break if args.phase == 'run': import importlib mod = importlib.import_module(args.out_file) mod.main()