def parse_and_generate(example): with open("%s.bex" % example, "r") as f: strp = f.read() printers = PrintersFactory.get_printers() parser = BeParser() program = parser.program_from_string(strp) if program.params: program.apply_param(dict(program.get_params()[0])) c4printer = CVC4Encoder() c4printer.print_program(program) c4printer.print_data_type(program) c4printer.print_block_type(program) config = Config() jprinters = PrintersFactory.get_printers_by_type(PrinterType.JS) jprinter = PrintersFactory.printer_by_name(config.jsprinter) assert (jprinter in jprinters) if not program.params: with open("%s/models.txt" % example, "r") as f: execsstr = f.read() execs = parser.executions_from_string(execsstr, program) eprint = c4printer.print_executions(execs) jprog = jprinter.print_program(program, execs) with open("%s/program.js" % example, "r") as f: a = f.read() b = jprog a = re.sub("//.*(\n|\Z)", "", a) b = re.sub("//.*(\n|\Z)", "", b) a = re.sub(re.compile("(\n)+", re.MULTILINE | re.DOTALL), '\n', a) b = re.sub(re.compile("(\n)+", re.MULTILINE | re.DOTALL), '\n', b) if a != b: print(example) print("\"%s\"" % a) print("\"%s\"" % b) assert a == b with open("%s/outputs.txt" % example, "r") as f: a = f.read().split("\n") b = jprinter.print_executions(program, execs).split("\n") a.sort() b.sort() if a != b: print(example) print("\"%s\"" % a) print("\"%s\"" % b) assert a == b assert True
def prune_not_eq(self, ao_execs, executions, model, program, threads): self.c4vexecsmanager.preload = False beparser = BeParser() eq_progs = [] equivalent_AOs = [] events_dic = dict([(x.name, x) for x in program.get_events()]) if threads > 1: valid_aos = self.__check_all_mt(model, ao_execs, executions, threads) else: valid_aos = self.__check_all(model, ao_execs, executions) for exe in valid_aos: rel = Relation(AO) rel.tuples = [(events_dic[str(x[0])], events_dic[str(x[1])]) for x in exe.get_AO().tuples] exe.set_AO(rel) exe.program = program equivalent_AOs.append(exe) program = beparser.program_from_execution(exe) eq_progs.append(program) Logger.log(" DONE", 1) if Logger.level(1): for el in equivalent_AOs: Logger.log("EQUIVALENT: %s" % el.get_AO(), 1) return eq_progs
def be_parsing(example): with open("%s.bex" % example, "r") as f: strp = f.read() parser = BeParser() program = parser.program_from_string(strp) beprinter = PrintersFactory.printer_by_name(BePrinter().get_name()) beprogram = beprinter.print_program(program) strp = re.sub("\n+", "\n", strp) strp = re.sub("//.*\n", "", strp) beprogram = re.sub("//.*\n", "", beprogram) strp = strp.replace(" ", "") beprogram = beprogram.replace(" ", "") strp = re.sub(re.compile("Params\{.*\}", re.MULTILINE | re.DOTALL), '', strp) beprogram = re.sub(re.compile("Params\{.*\}", re.MULTILINE | re.DOTALL), '', beprogram) if not (strp == beprogram): print(example) print(strp) print(beprogram) assert (strp == beprogram)
def printers_coherence(example): with open("%s.bex" % example, "r") as f: strp = f.read() parser = BeParser() program = parser.program_from_string(strp) if program.params: program.apply_param(dict(program.get_params()[0])) jprinters = PrintersFactory.get_printers_by_type(PrinterType.JS) if not program.params: with open("%s/models.txt" % example, "r") as f: execsstr = f.read() execs = parser.executions_from_string(execsstr, program) config = Config() jprinter = PrintersFactory.printer_by_name(config.jsprinter) jprog = jprinter.compute_possible_executions(program, execs) for jprinter in jprinters: assert jprinter.compute_possible_executions(program, execs) == jprog for jprinter in jprinters: assert jprinter.print_program(program, execs) assert True
def is_done(self): execs = None parser = BeParser() if self.models_file: if os.path.exists(self.models_file): with open(self.models_file, "r") as f: execs = (parser.executions_from_string(f.read())) if execs: return execs.allexecs return False
def load_models(self): shared_objs = [] parser = BeParser() if self.models_file: if os.path.exists(self.models_file): with open(self.models_file, "r") as f: shared_objs = (parser.executions_from_string( f.read())).executions self.models = shared_objs return shared_objs
def parse_program(config): parser = BeParser() parser.DEBUG = config.debug # Parsing of the bounded execution # with open(config.inputfile, "r") as f: program = parser.program_from_string(f.read()) if not os.path.exists(config.prefix): os.makedirs(config.prefix) return program
def load_models(self): shared_objs = [] if self.prevmodels is not None: return self.prevmodels if not self.preload: return shared_objs parser = BeParser() if self.models_file: if os.path.exists(self.models_file): with open(self.models_file, "r") as f: shared_objs = (parser.executions_from_string( f.read())).executions return shared_objs
def parse(example, valid): try: with open("%s.bex" % example, "r") as f: strp = f.read() printers = PrintersFactory.get_printers() parser = BeParser() program = parser.program_from_string(strp) if program.params: program.apply_param(dict(program.get_params()[0])) except Exception as e: if not valid: print(e) return else: raise if not valid: assert (False) return program
def analyze_constraints(self, program, model, jsengine, runs, threads, jsprogram, use_alloy): matched = None unmatched = None config = Config() config.command = jsengine config.input_file = jsprogram config.threads = threads config.number = runs config.silent = True config.models = True if use_alloy: labelling_vars = [y[2] for y in [x.split(" ") for x in model.split("\n") if len(x.split(" "))>2] \ if y[2][:len(LABELLING_VAR_PREF)] == LABELLING_VAR_PREF] else: labelling_vars = [x.split(" ")[0] for x in model.split("\n") \ if x[:len(LABELLING_VAR_PREF)] == LABELLING_VAR_PREF] if len(labelling_vars) == 0: Logger.error("No labelling vars defined") return None if use_alloy: self.al_consamanager.labelling_vars = labelling_vars else: self.c4_consamanager.labelling_vars = labelling_vars (matched, unmatched) = self.__load_outputs(config.number, self.outfile, jsengine) if (matched is None) and (unmatched is None): timer = Logger.start_timer("Run Litmus") (matched, unmatched) = run_litmus(config) Logger.stop_timer(timer) self.__save_outputs(config.number, self.outfile, jsengine, matched, unmatched) timer = Logger.start_timer("Analyze output") parser = BeParser() mexecs = parser.executions_from_string("\n".join(matched), program) uexecs = parser.executions_from_string("\n".join(unmatched), program) Logger.log(" -> Found %s matched models" % (len(matched)), 0) Logger.log(" -> Found %s unmatched models" % (len(unmatched)), 0) if len(unmatched) == 0: Logger.error("No unmatched models") Logger.stop_timer(timer) return None rels = [x for x in RELATIONS if x != AO] matched = self.al_encoder.print_assert_exl_execs(mexecs, rels) if use_alloy else \ self.c4_encoder.print_assert_exl_execs(mexecs, rels) unmatched = self.al_encoder.print_assert_exl_execs(uexecs, rels) if use_alloy else \ self.c4_encoder.print_assert_exl_execs(uexecs, rels) objs = [] Logger.log("\nMatched models analysis", 0) Logger.msg("Solving... ", 0) if use_alloy: vmodel = "\n".join([ model, matched, self.al_encoder.print_run_condition(program, True) ]) objs = self.al_solver.compute_models(vmodel, self.al_consamanager, objs) mmodels = " | ".join([x[1] for x in objs]) else: vmodel = "%s\n%s" % (model, matched) objs = self.c4_solver.compute_models(vmodel, self.c4_consamanager, objs) mmodels = " | ".join(objs) Logger.log(" DONE", 0) mmodels = self.bsolver.simplify(mmodels, True) self.__print_models(mmodels) objs = [] Logger.log("Unmatched models analysis", 0) Logger.msg("Solving... ", 0) if use_alloy: vmodel = "\n".join([ model, unmatched, self.al_encoder.print_run_condition(program, True) ]) objs = self.al_solver.compute_models(vmodel, self.al_consamanager, objs) nmodels = " | ".join([x[1] for x in objs]) else: vmodel = "%s\n%s" % (model, unmatched) objs = self.c4_solver.compute_models(vmodel, self.c4_consamanager, objs) nmodels = " | ".join(objs) Logger.log(" DONE", 0) nmodels = self.bsolver.simplify(nmodels, True) self.__print_models(nmodels) Logger.log("Difference analysis (exist support(matched) in unmatched)", 0) diffmodels = self.bsolver.support_exist(" | ".join(mmodels), " | ".join(nmodels), True) self.__print_models(diffmodels) self.user_defined_analyses(mmodels, nmodels) Logger.stop_timer(timer) return (mmodels, nmodels, diffmodels)
def analyze_program(config): config.generate_filenames() Logger.log("\n** Program Analysis **", 0) Logger.msg("Generating bounded execution... ", 0) program = parse_program(config) Logger.log("DONE", 0) Logger.msg("Generating model... ", 0) if config.use_alloy: strmodel = generate_alloy_model(config, program) else: strmodel = generate_cvc_model(config, program) Logger.log("DONE", 0) if config.only_model: return 0 if (not config.skip_solving): Logger.msg("Solving... ", 0) totmodels = solve(config, program, strmodel) if (not config.skip_solving): Logger.log(" DONE", 0) if totmodels > 0: Logger.log( " -> Found %s possible model%s" % (totmodels, "" if totmodels == 1 else "s"), 0) else: Logger.log(" -> No viable executions found", 0) # Generation of the JS litmus test # pprinter = PrintersFactory.printer_by_name(config.jsprinter) dprinter = PrintersFactory.printer_by_name(DotPrinter().get_name()) dprinter.set_printing_relations(config.printing_relations) prefix = config.prefix params = program.get_params() models = config.models for idparam in range(program.param_size()): if program.params: config.prefix = "%sparam%03d/" % (prefix, idparam + 1) config.generate_filenames() program.apply_param(dict(params[idparam])) if config.verbosity > 0: conf = params[idparam] pconf = ["%s=\"%s\"" % (x[0], x[1]) for x in conf] Logger.log( "\nParameter configuration (%03d): %s" % (idparam + 1, (", ".join(pconf))), 0) executions = None if (totmodels > 0): Logger.msg("Computing expected outputs... ", 0) parser = BeParser() parser.DEBUG = config.debug with open(models, "r") as modelfile: executions = parser.executions_from_string( modelfile.read(), program) Logger.log("DONE", 0) Logger.msg("Generating program... ", 0) outfiles = [config.outprogram] if config.jsdir: filename = config.outprogram.replace("../", "").replace("/", "-").replace( "..", "") outprogram = "%s/%s" % (config.jsdir, filename) outfiles = [outprogram] extension = pprinter.get_extension() for outfile in outfiles: with open(outfile + extension, "w") as f: f.write(pprinter.print_program(program, executions)) Logger.log("DONE", 0) if (totmodels > 0): Logger.msg("Generating expected outputs... ", 0) # Generation of all possible outputs for the JS litmus test # execs = pprinter.compute_possible_executions(program, executions) if config.debug: if execs is not None: with open(config.execs, "w") as exefile: exefile.write("\n".join(execs)) # Generation of all possible MM interpretations # mms = dprinter.print_executions(program, executions) for i in range(len(mms)): with open(config.dots % (str(i + 1)), "w") as dot: dot.write(mms[i]) if config.graphviz: with open(config.grap % (str(i + 1)), "w") as dot: graphviz_gen(config, config.dots % (str(i + 1)), config.grap % (str(i + 1))) Logger.log("DONE", 0) Logger.log( " -> Found %s possible output%s" % (len(execs), "" if len(execs) == 1 else "s"), 0) return 0
def run_litmus(config): command = config.command.split(" ") number = config.number if type(config.number) == str else str( config.number) input_file_has_models = False if config.input_file: command.append(config.input_file) outputs_dic = {} factor = 1 if K in number: number = number.replace(K, "") factor = factor * (10**3) if M in number: number = number.replace(M, "") factor = factor * (10**6) try: number = int(number) except Exception: number = 1 number = int(number) * factor if config.outputs: try: with open(config.outputs, "r") as f: i = 1 for line in f.readlines(): line = line.replace("\n", "") line = line.split(";") line.sort() line = ";".join(line) outputs_dic[line] = [i, 0] i += 1 input_file_has_models = True except Exception: print("File not found \"%s\"" % config.outputs) return 1 else: try: modelstr = None with open(config.input_file, "r") as f: strfile = f.read() if EPrinter.DATA in strfile: modelstr = strfile[strfile.find(EPrinter.DATA) + len(EPrinter.DATA):] modelstr = modelstr.split("\n") modelstr = [x[2:] for x in modelstr] modelstr = "".join(modelstr) if modelstr is not None: input_file_has_models = True i = 1 for line in decompress_string(modelstr).split("\n"): model = line[line.find(EPrinter.MOD) + len(EPrinter.MOD):] output = line[len(EPrinter.OUT):line.find(EPrinter.MOD)] output = output.split(";") output.sort() output = ";".join(output) outputs_dic[output] = [i, 0, model] i += 1 except Exception: print("Error while reading \"%s\"" % config.input_file) return 1 if not input_file_has_models: print("ERROR: the input file does not contain model information") return 0 original_sigint_handler = signal.signal(signal.SIGINT, signal.SIG_IGN) num_t = config.threads pool = multiprocessing.Pool(num_t) async_results = [] outputs_t = [] signal.signal(signal.SIGINT, original_sigint_handler) for i in range(num_t): async_results.append( pool.apply_async(run_command, (command, int(number / num_t), config.silent))) if config.silent: sys.stdout.write("Running (x%s) \"%s\"..." % (number, " ".join(command))) sys.stdout.flush() try: if not config.silent: print("** Processing command \"%s\" **" % (" ".join(command))) if not config.silent: print("Running...") time.sleep(5) except KeyboardInterrupt: print("Caught KeyboardInterrupt, terminating workers") pool.terminate() return 1 for i in range(num_t): outputs_t.append(async_results[i].get()) not_matched = [] for outputs in outputs_t: for el in outputs: if el not in outputs_dic: not_matched.append(el) else: outputs_dic[el][1] += outputs[el] not_matched = list(set(not_matched)) results = [(outputs_dic[x][1], x) for x in outputs_dic] results.sort() results.reverse() if not config.silent: sys.stdout.write('\n=== Results ===\n') sys.stdout.flush() lines = [] for el in not_matched: if not config.silent: lines.append("NOT MATCHED OUTPUT ERROR: \"%s\"" % el) if config.pretty: table = PrettyTable() mmatched = set([]) matches = 0 for result in results: if result[0] > 0: num = result[0] if not config.percent else float( float(result[0] * 100) / float(number)) res_val = result[1] row = None if config.percent: if not config.silent: if config.pretty: row = ["%.2f%%" % num] + res_val.split(";") else: lines.append("%.2f%%\t\"%s\"" % (num, result[1])) else: if not config.silent: if config.pretty: row = [num] + res_val.split(";") else: lines.append("%s\t\"%s\"" % (num, result[1])) matches += 1 mmatched.add(tuple(outputs_dic[res_val])) if row: table.add_row(row) if (not config.silent) and (config.pretty): table.field_names = ["Frequency"] + [ "Output %s" % (x + 1) for x in range(len(results[0][1].split(";"))) ] table.align = "l" lines.append(str(table)) if config.percent: if not config.silent: lines.append("Coverage: %.2f%%" % (float(float(matches * 100) / float(len(results))))) else: if not config.silent: lines.append("Coverage: %s/%s" % (matches, len(results))) if not config.silent: print("\n".join(lines)) if config.silent: if len(not_matched) > 0: print("FAIL") if not config.models: return 1 else: print("ok") if not config.models: return 0 if config.models: mmatched = [str(x[2]) for x in mmatched] amatched = [str(outputs_dic[x][2]) for x in outputs_dic] nmatched = [x for x in amatched if x not in mmatched] if config.silent: return (mmatched, nmatched) print("\n== Matched models ==") print("\n".join(mmatched)) print("\n== Unmatched models ==") print("\n".join([x for x in nmatched if x not in mmatched])) beparser = BeParser() mmatched = beparser.executions_from_string("\n".join(mmatched)) nmatched = beparser.executions_from_string("\n".join(nmatched)) evaluator = Evaluator(mmatched, nmatched) emod = evaluator.differential_evaluation() print("\n== Missing (single) Reads Bytes From ==") out_str = [ "%s := [%s]" % (x[0], ", ".join(x[1])) for x in emod.get_u_RBF(MatchType.UM) ] out_str.sort() print("\n".join(out_str)) print("\n== Difference Between Happens Before ==") print(", ".join( ["(%s, %s)" % (x) for x in emod.get_u_HB(MatchType.UM)]))