def generate_STS(self, var_str, init_str, invar_str, trans_str): ts = TS("Additional system") init = [] trans = [] invar = [] sparser = StringParser() for var in var_str: ts.add_state_var(self._define_var(var)) for init_s in init_str: init.append(sparser.parse_formula(init_s)) for invar_s in invar_str: invar.append(sparser.parse_formula(invar_s)) for trans_s in trans_str: trans.append(sparser.parse_formula(trans_s)) ts.init = And(init) ts.invar = And(invar) ts.trans = And(trans) return ts
def test_next(): reset_env() arrtype = ArrayType(BVType(3), BVType(4)) arr = Symbol("arr", arrtype) idx = Symbol("idx", BVType(3)) parser = StringParser() [(_, f, _)] = parser.parse_formulae(["next(arr)[idx]"]) assert (f.args()[1] == idx)
def compile_sts(self, name, params): ts = TS() parsize = params[0] size = None if type(parsize) == str: sparser = StringParser() parsize = sparser.parse_formula(parsize) if parsize.is_constant(): size = parsize.constant_value() if get_type(parsize).is_bv_type(): size = get_type(parsize).width if size is None: Logger.error("Undefined size for symbol \"%s\"" % (params[0])) value = Symbol("%s.value" % name, BVType(size)) ts.add_var(value) ts.trans = EqualsOrIff(value, TS.get_prime(value)) return ts
def parse_string(self, lines): [none, var, state, input, output, init, invar, trans, ftrans] = range(9) section = none inits = TRUE() invars = TRUE() transs = TRUE() ftranss = {} sparser = StringParser() count = 0 vars = set([]) states = set([]) inputs = set([]) outputs = set([]) invar_props = [] ltl_props = [] for line in lines: count += 1 if (line.strip() in ["", "\n"]) or line[0] == T_COM: continue if T_VAR == line[:len(T_VAR)]: section = var continue if T_STATE == line[:len(T_STATE)]: section = state continue if T_INPUT == line[:len(T_INPUT)]: section = input continue if T_OUTPUT == line[:len(T_OUTPUT)]: section = output continue if T_INIT == line[:len(T_INIT)]: section = init continue if T_INVAR == line[:len(T_INVAR)]: section = invar continue if T_TRANS == line[:len(T_TRANS)]: section = trans continue if T_FTRANS == line[:len(T_FTRANS)]: section = ftrans continue if section in [var, state, input, output]: varname, vartype = line[:-2].replace(" ", "").split(":") if varname[0] == "'": varname = varname[1:-1] vartype = parse_typestr(vartype) vardef = self._define_var(varname, vartype) vars.add(vardef) if section == state: states.add(vardef) if section == input: inputs.add(vardef) if section == output: outputs.add(vardef) if section in [init, invar, trans]: line = line.replace(T_SC, "").strip() qline = quote_names(line, replace_ops=False) if section == init: inits = And(inits, sparser.parse_formula(qline)) if section == invar: invars = And(invars, sparser.parse_formula(qline)) if section == trans: transs = And(transs, sparser.parse_formula(qline)) if section == ftrans: strvar = line[:line.find(":=")] var = sparser.parse_formula( quote_names(strvar, replace_ops=False)) cond_ass = line[line.find(":=") + 2:].strip() ftranss[var] = [] for cond_as in cond_ass.split("{"): if cond_as == "": continue cond = cond_as[:cond_as.find(",")] ass = cond_as[cond_as.find(",") + 1:cond_as.find("}")] ftranss[var].append((sparser.parse_formula( quote_names(cond, replace_ops=False)), sparser.parse_formula( quote_names(ass, replace_ops=False)))) hts = HTS("STS") ts = TS() ts.vars = vars ts.state_vars = states ts.input_vars = inputs ts.output_vars = outputs ts.init = inits ts.invar = invars ts.trans = transs ts.ftrans = ftranss hts.add_ts(ts) return (hts, invar_props, ltl_props)
def generate_HTS(self, module, modulesdic): hts = HTS(module.name) ts = TS("TS %s" % module.name) init = [] trans = [] invar = [] params = [] sparser = StringParser() (vars, states, inputs, outputs) = self._collect_sub_variables(module, modulesdic, path=[], varlist=[], statelist=[], inputlist=[], outputlist=[]) for var in vars: ts.add_var(self._define_var(var, module.name)) for var in states: ts.add_state_var(self._define_var(var, module.name)) for var in inputs: ts.add_input_var(self._define_var(var, module.name)) for var in outputs: ts.add_output_var(self._define_var(var, module.name)) self._check_parameters(module, modulesdic, ts.vars) for par in module.pars: assert len(par) == 2, "Expecting a variable" hts.add_param(self._define_var((par[0], par[1]), module.name)) for init_s in module.init: formula = sparser.parse_formula(quote_names(init_s, module.name), False) init.append(formula) for invar_s in module.invar: formula = sparser.parse_formula(quote_names(invar_s, module.name), False) invar.append(formula) for trans_s in module.trans: formula = sparser.parse_formula(quote_names(trans_s, module.name), False) trans.append(formula) for sub in module.subs: hts.add_sub(sub[0], self.generate_HTS(modulesdic[sub[1]], modulesdic), tuple([v[0] for v in sub[2]])) ts.init = And(init) ts.invar = And(invar) ts.trans = And(trans) hts.add_ts(ts) return hts
def solve_problems(self, problems, config): encoder_config = self.problems2encoder_config(config, problems) self.sparser = StringParser(encoder_config) self.lparser = LTLParser() self.coi = ConeOfInfluence() invar_props = [] ltl_props = [] si = False if len(problems.symbolic_inits) == 0: problems.symbolic_inits.add(si) HTSM = 0 HTS2 = 1 HTSD = (HTSM, si) model_extension = config.model_extension if problems.model_extension is None else problems.model_extension assume_if_true = config.assume_if_true or problems.assume_if_true cache_files = config.cache_files or problems.cache_files clean_cache = config.clean_cache modifier = None if model_extension is not None: modifier = lambda hts: ModelExtension.extend( hts, ModelModifiersFactory.modifier_by_name(model_extension)) # generate systems for each problem configuration systems = {} for si in problems.symbolic_inits: encoder_config.symbolic_init = si (systems[(HTSM, si)], invar_props, ltl_props) = self.parse_model(problems.relative_path, \ problems.model_file, \ encoder_config, \ "System 1", \ modifier, \ cache_files=cache_files, \ clean_cache=clean_cache) if problems.equivalence is not None: (systems[(HTS2, si)], _, _) = self.parse_model(problems.relative_path, \ problems.equivalence, \ encoder_config, \ "System 2", \ cache_files=cache_files, \ clean_cache=clean_cache) else: systems[(HTS2, si)] = None if config.safety or config.problems: for invar_prop in invar_props: inv_prob = problems.new_problem() inv_prob.verification = VerificationType.SAFETY inv_prob.name = invar_prop[0] inv_prob.description = invar_prop[1] inv_prob.formula = invar_prop[2] problems.add_problem(inv_prob) if config.ltl or config.problems: for ltl_prop in ltl_props: ltl_prob = problems.new_problem() ltl_prob.verification = VerificationType.LTL ltl_prob.name = ltl_prop[0] ltl_prob.description = ltl_prop[1] ltl_prob.formula = ltl_prop[2] problems.add_problem(ltl_prob) if HTSD in systems: problems._hts = systems[HTSD] for problem in problems.problems: problem.hts = systems[(HTSM, problem.symbolic_init)] if problems._hts is None: problems._hts = problem.hts problem.hts2 = systems[(HTS2, problem.symbolic_init)] if problems._hts2 is None: problems._hts2 = problem.hts2 problem.vcd = problems.vcd or config.vcd or problem.vcd problem.abstract_clock = problems.abstract_clock or config.abstract_clock problem.add_clock = problems.add_clock or config.add_clock problem.coi = problems.coi or config.coi problem.run_coreir_passes = problems.run_coreir_passes problem.relative_path = problems.relative_path problem.cardinality = max(problems.cardinality, config.cardinality) if not problem.full_trace: problem.full_trace = problems.full_trace if not problem.trace_vars_change: problem.trace_vars_change = problems.trace_vars_change if not problem.trace_all_vars: problem.trace_all_vars = problems.trace_all_vars if not problem.clock_behaviors: clk_bhvs = [ p for p in [problems.clock_behaviors, config.clock_behaviors] if p is not None ] if len(clk_bhvs) > 0: problem.clock_behaviors = ";".join(clk_bhvs) if not problem.generators: problem.generators = config.generators Logger.log( "Solving with abstract_clock=%s, add_clock=%s" % (problem.abstract_clock, problem.add_clock), 2) if problem.trace_prefix is not None: problem.trace_prefix = "".join( [problem.relative_path, problem.trace_prefix]) if config.time or problems.time: timer_solve = Logger.start_timer("Problem %s" % problem.name, False) try: self.__solve_problem(problem, config) if problem.verification is None: Logger.log("Unset verification", 2) continue Logger.msg(" %s\n" % problem.status, 0, not (Logger.level(1))) if (assume_if_true) and \ (problem.status == VerificationStatus.TRUE) and \ (problem.assumptions == None) and \ (problem.verification == VerificationType.SAFETY): ass_ts = TS("Previous assumption from property") if TS.has_next(problem.formula): ass_ts.trans = problem.formula else: ass_ts.invar = problem.formula problem.hts.reset_formulae() problem.hts.add_ts(ass_ts) if config.time or problems.time: problem.time = Logger.get_timer(timer_solve, False) except KeyboardInterrupt as e: Logger.msg("\b\b Skipped!\n", 0)
def compile_sts(self, name, params): sparser = StringParser() in_port, max_val, c_push, c_pop = list(params) max_val = int(max_val) if type(c_push) == str: c_push = sparser.parse_formula(c_push) if type(c_pop) == str: c_pop = sparser.parse_formula(c_pop) tracking = Symbol("%s.tracking" % name, BOOL) end = Symbol("%s.end" % name, BOOL) done = Symbol("%s.done" % name, BOOL) packet = Symbol("%s.packet" % name, BVType(in_port.symbol_type().width)) max_width = math.ceil(math.log(max_val) / math.log(2)) max_bvval = BV(max_val, max_width) zero = BV(0, max_width) one = BV(1, max_width) count = Symbol("%s.count" % name, BVType(max_width)) size = Symbol("%s.size" % name, BVType(max_width)) pos_c_push = BV2B(c_push) neg_c_push = Not(BV2B(c_push)) pos_c_pop = BV2B(c_pop) neg_c_pop = Not(BV2B(c_pop)) init = [] trans = [] invar = [] # INIT DEFINITION # # count = 0 init.append(EqualsOrIff(count, BV(0, max_width))) # tracking = False init.append(EqualsOrIff(tracking, FALSE())) # size = 0 init.append(EqualsOrIff(size, BV(0, max_width))) # end = false init.append(EqualsOrIff(end, FALSE())) # INVAR DEFINITION # # !done -> (end = (tracking & (size = count))) invar.append( Implies(Not(done), EqualsOrIff(end, And(tracking, EqualsOrIff(size, count))))) # count <= size invar.append(BVULE(count, size)) # count <= maxval invar.append(BVULE(count, max_bvval)) # size <= maxval invar.append(BVULE(size, max_bvval)) # done -> (end <-> False); invar.append(Implies(done, EqualsOrIff(end, FALSE()))) # done -> (count = 0_8); invar.append(Implies(done, EqualsOrIff(count, BV(0, max_width)))) # done -> (size = 0_8); invar.append(Implies(done, EqualsOrIff(size, BV(0, max_width)))) # done -> (packet = 0_8); invar.append( Implies(done, EqualsOrIff(packet, BV(0, in_port.symbol_type().width)))) # TRANS DEFINITION # # (!end & !done) -> next(!done); trans.append(Implies(And(Not(end), Not(done)), TS.to_next(Not(done)))) # end -> next(done); trans.append(Implies(end, TS.to_next(done))) # done -> next(done); trans.append(Implies(done, TS.to_next(done))) # tracking -> next(tracking); trans.append( Implies(Not(done), Implies(tracking, TS.to_next(tracking)))) # tracking -> (next(packet) = packet); trans.append( Implies(Not(done), Implies(tracking, EqualsOrIff(TS.to_next(packet), packet)))) # !tracking & next(tracking) -> c_push; trans.append( Implies( Not(done), Implies(And(Not(tracking), TS.to_next(tracking)), pos_c_push))) # (c_push & next(tracking)) -> ((packet = in) & (next(packet) = in); trans.append( Implies( Not(done), Implies( And(pos_c_push, TS.to_next(tracking)), And(EqualsOrIff(packet, in_port), EqualsOrIff(TS.to_next(packet), in_port))))) # (c_push & !c_pop & tracking) -> (next(count) = (count + 1_8)); trans.append( Implies( Not(done), Implies( And(pos_c_push, neg_c_pop, tracking), EqualsOrIff(TS.to_next(count), BVAdd(count, BV(1, max_width)))))) # (c_push & size < maxval) -> (next(size) = (size + 1_8)); trans.append( Implies( Not(done), Implies( And(pos_c_push, BVULT(size, max_bvval)), EqualsOrIff(TS.to_next(size), BVAdd(size, BV(1, max_width)))))) # (c_pop & size > 0) -> (next(size) = (size - 1_8)); trans.append( Implies( Not(done), Implies( And(pos_c_pop, BVUGT(size, zero)), EqualsOrIff(TS.to_next(size), BVSub(size, BV(1, max_width)))))) # (!(c_push | c_pop)) -> (next(count) = count); trans.append( Implies( Not(done), Implies(Not(Or(pos_c_push, pos_c_pop)), EqualsOrIff(count, TS.to_next(count))))) # ((c_push | c_pop) & !tracking) -> (next(count) = count); trans.append( Implies( Not(done), Implies(And(Or(pos_c_push, pos_c_pop), Not(tracking)), EqualsOrIff(count, TS.to_next(count))))) # (c_push & size = maxval) -> (next(size) = size); trans.append( Implies( Not(done), Implies(And(pos_c_push, EqualsOrIff(size, max_bvval)), EqualsOrIff(TS.to_next(size), size)))) # (!(c_push | c_pop)) -> (next(size) = size); trans.append( Implies( Not(done), Implies(Not(Or(pos_c_push, pos_c_pop)), EqualsOrIff(size, TS.to_next(size))))) # (!(c_push | c_pop)) -> (next(count) = count); trans.append( Implies( Not(done), Implies(Not(Or(pos_c_push, pos_c_pop)), EqualsOrIff(count, TS.to_next(count))))) # (c_pop & size = 0) -> (next(size) = 0); trans.append( Implies( Not(done), Implies(And(pos_c_pop, EqualsOrIff(size, zero)), EqualsOrIff(TS.to_next(size), zero)))) # (!c_push) -> (next(count) = count); trans.append( Implies(Not(done), Implies(neg_c_push, EqualsOrIff(TS.to_next(count), count)))) init = And(init) invar = And(invar) trans = And(trans) ts = TS() ts.vars, ts.init, ts.invar, ts.trans = set( [tracking, end, packet, count, size]), init, invar, trans return ts
def run_verification(config): reset_env() Logger.verbosity = config.verbosity coreir_parser = None ets_parser = None sts_parser = None if config.ltl: ltl_reset_env() hts = HTS("Top level") if config.strfiles[0][-4:] != ".pkl": ps = ProblemSolver() (hts, invar_props, ltl_props) = ps.parse_model("./", config.strfiles, config.abstract_clock, config.symbolic_init, deterministic=config.deterministic, boolean=config.boolean, no_clock=config.no_clock) config.parser = ps.parser if config.pickle_file: Logger.msg("Pickling model to %s\n" % (config.pickle_file), 1) sys.setrecursionlimit(50000) with open(config.pickle_file, "wb") as f: pickle.dump(hts, f) else: if config.pickle_file: raise RuntimeError("Don't need to re-pickle the input file %s" % (config.strfile)) Logger.msg("Loading pickle file %s\n" % (config.strfile), 0) with open(config.pickle_file, "rb") as f: hts = pickle.load(f) Logger.log("DONE", 0) printsmv = True mc_config = MCConfig() sparser = StringParser() sparser.remap_or2an = config.parser.remap_or2an ltlparser = LTLParser() # if equivalence checking wait to add assumptions to combined system if config.assumptions is not None and config.equivalence is None: Logger.log("Adding %d assumptions... " % len(config.assumptions), 1) assumps = [t[1] for t in sparser.parse_formulae(config.assumptions)] hts.assumptions = assumps lemmas = None if config.lemmas is not None: Logger.log("Adding %d lemmas... " % len(config.lemmas), 1) parsed_formulae = sparser.parse_formulae(config.lemmas) if list(set([t[2] for t in parsed_formulae]))[0][0] != False: Logger.error("Lemmas do not support \"next\" operators") lemmas = [t[1] for t in parsed_formulae] hts.lemmas = lemmas mc_config.smt2file = config.smt2file mc_config.full_trace = config.full_trace mc_config.trace_vars_change = config.trace_vars_change mc_config.trace_all_vars = config.trace_all_vars mc_config.prefix = config.prefix mc_config.strategy = config.strategy mc_config.skip_solving = config.skip_solving mc_config.map_function = config.parser.remap_an2or mc_config.solver_name = config.solver_name mc_config.vcd_trace = config.vcd mc_config.prove = config.prove mc_config.incremental = config.incremental if config.ltl: bmc_ltl = BMCLTL(hts, mc_config) else: bmc_safety = BMCSafety(hts, mc_config) if config.translate: Logger.log("Writing system to \"%s\"" % (config.translate), 0) printer = PrintersFactory.printer_by_name(config.printer) props = [] if config.ltl: props += ltlparser.parse_formulae(config.properties) props += [(str(p), p, None) for p in ltl_props] else: props += sparser.parse_formulae(config.properties) props += [(str(p), p, None) for p in invar_props] with open(config.translate, "w") as f: f.write(printer.print_hts(hts, props)) if config.simulate: count = 0 if config.properties is None: props = [("True", TRUE(), None)] else: props = sparser.parse_formulae(config.properties) for (strprop, prop, _) in props: Logger.log("Simulation for property \"%s\":" % (strprop), 0) res, trace = bmc_safety.simulate(prop, config.bmc_length) if res == VerificationStatus.TRUE: count += 1 print_trace("Execution", trace, count, config.prefix) else: Logger.log("No execution found", 0) if config.safety: count = 0 props = sparser.parse_formulae(config.properties) props += [(str(p), p, None) for p in invar_props] if len(props) == 0: Logger.warning("Safety verification requires at least a property") for (strprop, prop, _) in props: Logger.log("Safety verification for property \"%s\":" % (strprop), 0) res, trace, t = bmc_safety.safety(prop, config.bmc_length, config.bmc_length_min) Logger.log("\nProperty is %s" % res, 0) if res == VerificationStatus.FALSE: count += 1 print_trace("Counterexample", trace, count, config.prefix) return 0 if config.equivalence or config.fsm_check: if config.equivalence: parser2 = CoreIRParser(config.abstract_clock, config.symbolic_init, config.run_passes) Logger.msg("Parsing file \"%s\"... " % (config.equivalence), 0) hts2 = parser2.parse_file(config.equivalence) Logger.log("DONE", 0) symb = " (symbolic init)" if config.symbolic_init else "" Logger.log( "Equivalence checking%s with k=%s:" % (symb, config.bmc_length), 0) if Logger.level(1): print(hts2.print_statistics("System 2", Logger.level(2))) else: hts2 = hts # TODO: Make incremental solving optional htseq, miter_out = Miter.combine_systems(hts, hts2, config.bmc_length, config.symbolic_init, config.properties, True) if config.assumptions is not None: Logger.log( "Adding %d assumptions to combined system... " % len(config.assumptions), 1) assumps = [ t[1] for t in sparser.parse_formulae(config.assumptions) ] htseq.assumptions = assumps # create bmc object for combined system bmcseq = BMC(htseq, mc_config) res, trace, t = bmcseq.safety(miter_out, config.bmc_length, config.bmc_length_min) msg = "Systems are %s equivalent" if config.equivalence else "System is%s deterministic" if res == VerificationStatus.FALSE: Logger.log(msg % (" not"), 0) print_trace("Counterexample", trace, 1, config.prefix) elif res == VerificationStatus.UNK: if config.symbolic_init: # strong equivalence with symbolic initial state Logger.log(msg % (""), 0) else: Logger.log(msg % ("") + " up to k=%i" % t, 0) else: Logger.log(msg % ("") + " up to k=%i" % t, 0) if config.ltl: count = 0 props = ltlparser.parse_formulae(config.properties) props += [(str(p), p, None) for p in ltl_props] if len(props) == 0: Logger.warning("LTL verification requires at least a property") for (strprop, prop, _) in props: Logger.log("LTL verification for property \"%s\":" % (strprop), 0) res, trace, t = bmc_ltl.ltl(prop, config.bmc_length, config.bmc_length_min) Logger.log("\nProperty is %s" % res, 0) if res == VerificationStatus.FALSE: count += 1 print_trace("Counterexample", trace, count, config.prefix) return 0
def combine_systems(hts, hts2, k, symbolic_init, eqprop=None, inc=True, non_deterministic=False): htseq = HTS("eq") hts1_varnames = [v.symbol_name() for v in hts.vars] hts2_varnames = [v.symbol_name() for v in hts2.vars] map1 = dict([(v, TS.get_prefix_name(v, S1)) for v in hts1_varnames]+\ [(TS.get_prime_name(v), TS.get_prefix_name(TS.get_prime_name(v), S1)) for v in hts1_varnames]) map2 = dict([(v, TS.get_prefix_name(v, S2)) for v in hts2_varnames]+\ [(TS.get_prime_name(v), TS.get_prefix_name(TS.get_prime_name(v), S2)) for v in hts2_varnames]) ts1_init = TRUE() ts2_init = TRUE() if not symbolic_init: ts1_init = substitute(hts.single_init(), map1) ts2_init = substitute(hts2.single_init(), map2) ts1 = TS() ts1.vars = set([TS.get_prefix(v, S1) for v in hts.vars]) ts1.set_behavior(ts1_init,\ substitute(hts.single_trans(), map1),\ substitute(hts.single_invar(), map1)) ts1.state_vars = set([TS.get_prefix(v, S1) for v in hts.state_vars]) ts2 = TS() ts2.vars = set([TS.get_prefix(v, S2) for v in hts2.vars]) ts2.set_behavior(ts2_init,\ substitute(hts2.single_trans(), map2),\ substitute(hts2.single_invar(), map2)) ts2.state_vars = set([TS.get_prefix(v, S2) for v in hts2.state_vars]) htseq.add_ts(ts1) htseq.add_ts(ts2) assumptions = [] lemmas = [] def sets_intersect(set1, set2): for el in set1: if not el in set2: return False return True if hts.assumptions is not None: for assumption in hts.assumptions: assumptions.append(assumption) if hts.lemmas is not None: for lemma in hts.lemmas: lemmas.append(lemma) if hts2.assumptions is not None: for assumption in hts2.assumptions: assumptions.append(assumption) if hts2.lemmas is not None: for lemma in hts2.lemmas: lemmas.append(lemma) for assumption in assumptions: fv_assumption = get_free_variables(assumption) c_assumption = TRUE() if sets_intersect(fv_assumption, hts.vars): c_assumption = And(c_assumption, substitute(assumption, map1)) if sets_intersect(fv_assumption, hts2.vars): c_assumption = And(c_assumption, substitute(assumption, map2)) if c_assumption != TRUE(): htseq.add_assumption(c_assumption) for lemma in lemmas: fv_lemma = get_free_variables(lemma) c_lemma = TRUE() if sets_intersect(fv_lemma, hts.vars): c_lemma = And(c_lemma, substitute(lemma, map1)) if sets_intersect(fv_lemma, hts2.vars): c_lemma = And(c_lemma, substitute(lemma, map2)) if c_lemma != TRUE(): htseq.add_lemma(c_lemma) miter_out = Symbol(EQS, BOOL) inputs = hts.input_vars.intersection(hts2.input_vars) outputs = hts.output_vars.intersection(hts2.output_vars) htseq.input_vars = set([ TS.get_prefix(v, S1) for v in hts.input_vars ]).union(set([TS.get_prefix(v, S2) for v in hts2.input_vars])) htseq.output_vars = set([ TS.get_prefix(v, S1) for v in hts.output_vars ]).union(set([TS.get_prefix(v, S2) for v in hts2.output_vars])) if symbolic_init or (not non_deterministic): states = hts.state_vars.intersection(hts2.state_vars) else: states = [] eqinputs = TRUE() eqoutputs = TRUE() eqstates = TRUE() for inp in inputs: eqinputs = And( eqinputs, EqualsOrIff(TS.get_prefix(inp, S1), TS.get_prefix(inp, S2))) for out in outputs: eqoutputs = And( eqoutputs, EqualsOrIff(TS.get_prefix(out, S1), TS.get_prefix(out, S2))) for svar in states: eqstates = And( eqstates, EqualsOrIff(TS.get_prefix(svar, S1), TS.get_prefix(svar, S2))) if eqprop is None: if symbolic_init or (not non_deterministic): invar = And(eqinputs, Iff(miter_out, Implies(eqstates, eqoutputs))) else: invar = And(eqinputs, Iff(miter_out, eqoutputs)) Logger.log('Inferring equivalence property: {}'.format(invar), 2) else: sparser = StringParser() eqprop = sparser.parse_formulae(eqprop) if len(eqprop) > 1: Logger.error("Expecting a single equivalence property") eqprop = eqprop[0][1] invar = Iff(miter_out, eqprop) Logger.log('Using provided equivalence property: {}'.format(invar), 2) tsmo = TS() tsmo.vars = set([miter_out]) tsmo.invar = invar htseq.add_ts(tsmo) return (htseq, miter_out)
def solve_problems(self, problems_config: ProblemsManager) -> None: general_config = problems_config.general_config model_extension = general_config.model_extension assume_if_true = general_config.assume_if_true self.sparser = StringParser(general_config) self.lparser = LTLParser() self.coi = ConeOfInfluence() modifier = None if general_config.model_extension is not None: modifier = lambda hts: ModelExtension.extend( hts, ModelModifiersFactory.modifier_by_name(general_config. model_extension)) # generate main system system hts, invar_props, ltl_props = self.parse_model( general_config.model_files, problems_config.relative_path, general_config, "System 1", modifier) # Generate second models if any are necessary for problem in problems_config.problems: if problem.verification == VerificationType.EQUIVALENCE: if problem.equal_to is None: raise RuntimeError( "No second model for equivalence " "checking provided for problem {}".format( problem.name)) hts2, _, _ = self.parse_model(problem.equal_to, problems_config.relative_path, general_config, "System 2", modifier) problems_config.add_second_model(problem, hts2) # TODO : contain these types of passes in functions # they should be registered as passes if general_config.init is not None: iparser = InitParser() init_hts, inv_a, ltl_a = iparser.parse_file( general_config.init, general_config) assert inv_a is None and ltl_a is None, "Not expecting assertions from init state file" # remove old inits for ts in hts.tss: ts.init = TRUE() hts.combine(init_hts) hts.single_init(rebuild=True) # set default bit-wise initial values (0 or 1) if general_config.default_initial_value is not None: def_init_val = int(general_config.default_initial_value) try: if int(def_init_val) not in {0, 1}: raise RuntimeError except: raise RuntimeError( "Expecting 0 or 1 for default_initial_value," "but received {}".format(def_init_val)) def_init_ts = TS("Default initial values") new_init = [] initialized_vars = get_free_variables(hts.single_init()) state_vars = hts.state_vars num_def_init_vars = 0 num_state_vars = len(state_vars) const_arr_supported = True if hts.logic == L_ABV: for p in problems_config.problems: if p.solver_name not in CONST_ARRAYS_SUPPORT: const_arr_supported = False Logger.warning( "Using default_initial_value with arrays, " "but one of the selected solvers, " "{} does not support constant arrays. " "Any assumptions on initial array values will " "have to be done manually".format( problem.solver_name)) break for sv in state_vars - initialized_vars: if sv.get_type().is_bv_type(): width = sv.get_type().width if int(def_init_val) == 1: val = BV((2**width) - 1, width) else: val = BV(0, width) num_def_init_vars += 1 elif sv.get_type().is_array_type() and \ sv.get_type().elem_type.is_bv_type() and \ const_arr_supported: svtype = sv.get_type() width = svtype.elem_type.width if int(def_init_val) == 1: val = BV((2**width) - 1, width) else: val = BV(0, width) # create a constant array with a default value val = Array(svtype.index_type, val) else: continue def_init_ts.add_state_var(sv) new_init.append(EqualsOrIff(sv, val)) def_init_ts.set_behavior(simplify(And(new_init)), TRUE(), TRUE()) hts.add_ts(def_init_ts) Logger.msg( "Set {}/{} state elements to zero " "in initial state\n".format(num_def_init_vars, num_state_vars), 1) problems_config.hts = hts # TODO: Update this so that we can control whether embedded assertions are solved automatically if not general_config.skip_embedded: for invar_prop in invar_props: problems_config.add_problem( verification=VerificationType.SAFETY, name=invar_prop[0], description=invar_prop[1], properties=invar_prop[2]) self.properties.append(invar_prop[2]) for ltl_prop in ltl_props: problems_config.add_problem(verification=VerificationType.LTL, name=invar_prop[0], description=invar_prop[1], properties=invar_prop[2]) self.properties.append(ltl_prop[2]) Logger.log( "Solving with abstract_clock=%s, add_clock=%s" % (general_config.abstract_clock, general_config.add_clock), 2) # ensure the miter_out variable exists miter_out = None for problem in problems_config.problems: if problem.name is not None: Logger.log( "\n*** Analyzing problem \"%s\" ***" % (problem.name), 1) Logger.msg("Solving \"%s\" " % problem.name, 0, not (Logger.level(1))) # apply parametric behaviors (such as toggling the clock) # Note: This is supposed to be *before* creating the combined system for equivalence checking # we want this assumption to be applied to both copies of the clock problem_hts = ParametricBehavior.apply_to_problem( problems_config.hts, problem, general_config, self.model_info) if problem.verification == VerificationType.EQUIVALENCE: hts2 = problems_config.get_second_model(problem) problem_hts, miter_out = Miter.combine_systems( hts, hts2, problem.bmc_length, general_config.symbolic_init, problem.properties, True) try: # convert the formulas to PySMT FNodes # lemmas, assumptions and precondition always use the regular parser lemmas, assumptions, precondition = self.convert_formulae( [ problem.lemmas, problem.assumptions, problem.precondition ], parser=self.sparser, relative_path=problems_config.relative_path) if problem.verification != VerificationType.LTL: parser = self.sparser else: parser = self.lparser prop = None if problem.properties is not None: prop = self.convert_formula( problem.properties, relative_path=problems_config.relative_path, parser=parser) assert len(prop) == 1, "Properties should already have been split into " \ "multiple problems but found {} properties here".format(len(prop)) prop = prop[0] self.properties.append(prop) else: if problem.verification == VerificationType.SIMULATION: prop = TRUE() elif (problem.verification is not None) and (problem.verification != VerificationType.EQUIVALENCE): Logger.error( "Property not provided for problem {}".format( problem.name)) if problem.verification == VerificationType.EQUIVALENCE: assert miter_out is not None # set property to be the miter output # if user provided a different equivalence property, this has already # been incorporated in the miter_out prop = miter_out # reset the miter output miter_out = None if precondition: assert len(precondition ) == 1, "There should only be one precondition" prop = Implies(precondition[0], prop) # TODO: keep assumptions separate from the hts # IMPORTANT: CLEAR ANY PREVIOUS ASSUMPTIONS AND LEMMAS # This was previously done in __solve_problem and has been moved here # during the frontend refactor in April 2019 # this is necessary because the problem hts is just a reference to the # overall (shared) HTS problem_hts.assumptions = None problem_hts.lemmas = None # Compute the Cone Of Influence # Returns a *new* hts (not pointing to the original one anymore) if problem.coi: if Logger.level(2): timer = Logger.start_timer("COI") hts = self.coi.compute(hts, prop) if Logger.level(2): Logger.get_timer(timer) if general_config.time: timer_solve = Logger.start_timer( "Problem %s" % problem.name, False) status, trace, traces, region = self.__solve_problem( problem_hts, prop, lemmas, assumptions, problem) # set status for this problem problems_config.set_problem_status(problem, status) # TODO: Determine whether we need both trace and traces assert trace is None or traces is None, "Expecting either a trace or a list of traces" if trace is not None: problem_traces = self.__process_trace( hts, trace, general_config, problem) problems_config.set_problem_traces(problem, problem_traces) if traces is not None: traces_to_add = [] for trace in traces: problem_trace = self.__process_trace( hts, trace, general_config, problem) for pt in problem_trace: traces_to_add.append(pt) problems_config.set_problem_traces(problem, traces_to_add) if problem.verification == VerificationType.PARAMETRIC: assert region is not None problems_config.set_problem_region(problem, region) if status is not None: Logger.msg(" %s\n" % status, 0, not (Logger.level(1))) if (assume_if_true) and \ (status == VerificationStatus.TRUE) and \ (problem.assumptions == None) and \ (problem.verification == VerificationType.SAFETY): # TODO: relax the assumption on problem.assumptions # can still add it, just need to make it an implication ass_ts = TS("Previous assumption from property") if TS.has_next(prop): ass_ts.trans = prop else: ass_ts.invar = prop # add assumptions to main system problem_hts.reset_formulae() problem_hts.add_ts(ass_ts) if general_config.time: problems_config.set_problem_time( problem, Logger.get_timer(timer_solve, False)) except KeyboardInterrupt as e: Logger.msg("\b\b Skipped!\n", 0)
def solve_problem(self, problem, config): Logger.log("\n*** Analyzing problem \"%s\" ***" % (problem), 1) Logger.msg("Solving \"%s\" " % problem.name, 0, not (Logger.level(1))) sparser = StringParser() lparser = LTLParser() mc_config = self.problem2mc_config(problem, config) bmc_safety = BMCSafety(problem.hts, mc_config) bmc_ltl = BMCLTL(problem.hts, mc_config) res = VerificationStatus.UNC bmc_length = max(problem.bmc_length, config.bmc_length) bmc_length_min = max(problem.bmc_length_min, config.bmc_length_min) parsing_defs = [ mc_config.properties, mc_config.lemmas, mc_config.assumptions ] for i in range(len(parsing_defs)): if parsing_defs[i] is not None: pdef_file = problem.relative_path + parsing_defs[i] if os.path.isfile(pdef_file): with open(pdef_file) as f: parsing_defs[i] = [ p.strip() for p in f.read().strip().split("\n") ] else: parsing_defs[i] = [ p.strip() for p in parsing_defs[i].split(MODEL_SP) ] else: parsing_defs[i] = None [mc_config.properties, mc_config.lemmas, mc_config.assumptions] = parsing_defs assumps = None lemmas = None accepted_ver = False if problem.monitors is not None: varsdict = dict([(var.symbol_name(), var) for var in problem.hts.vars]) for strmonitor in problem.monitors.split(")"): strmonitor = strmonitor.replace(" ", "") if strmonitor == "": continue instance, mtype = strmonitor.split("=") mtype, pars = mtype.split("(") pars = pars.split(",") monitor = MonitorsFactory.monitor_by_name(mtype) pars = [varsdict[v] if v in varsdict else v for v in pars] ts = monitor.get_sts(instance, pars) problem.hts.add_ts(ts) if problem.verification != VerificationType.EQUIVALENCE: assumps = [ t[1] for t in sparser.parse_formulae(mc_config.assumptions) ] lemmas = [t[1] for t in sparser.parse_formulae(mc_config.lemmas)] for ass in assumps: problem.hts.add_assumption(ass) for lemma in lemmas: problem.hts.add_lemma(lemma) if problem.verification != VerificationType.LTL: (strprop, prop, types) = sparser.parse_formulae(mc_config.properties)[0] else: (strprop, prop, types) = lparser.parse_formulae(mc_config.properties)[0] if problem.verification == VerificationType.SAFETY: accepted_ver = True res, trace, _ = bmc_safety.safety(prop, bmc_length, bmc_length_min) if problem.verification == VerificationType.LTL: accepted_ver = True res, trace, _ = bmc_ltl.ltl(prop, bmc_length, bmc_length_min) if problem.verification == VerificationType.SIMULATION: accepted_ver = True res, trace = bmc_safety.simulate(prop, bmc_length) if problem.verification == VerificationType.EQUIVALENCE: accepted_ver = True if problem.equivalence: (problem.hts2, _, _) = self.parse_model(problem.relative_path, \ problem.equivalence, \ problem.abstract_clock, \ problem.symbolic_init, "System 2", \ no_clock=problem.no_clock, \ run_passes=problem.run_coreir_passes) htseq, miter_out = Miter.combine_systems(problem.hts, \ problem.hts2, \ bmc_length, \ problem.symbolic_init, \ mc_config.properties, \ True) if mc_config.assumptions is not None: assumps = [ t[1] for t in sparser.parse_formulae(mc_config.assumptions) ] if mc_config.lemmas is not None: lemmas = [ t[1] for t in sparser.parse_formulae(mc_config.lemmas) ] if assumps is not None: for assumption in assumps: htseq.add_assumption(assumption) if lemmas is not None: for lemma in lemmas: htseq.add_lemma(lemma) bmcseq = BMCSafety(htseq, mc_config) res, trace, t = bmcseq.safety(miter_out, bmc_length, bmc_length_min) if not accepted_ver: Logger.error("Invalid verification type") problem.status = res problem.trace = trace if problem.assumptions is not None: problem.hts.assumptions = None Logger.log("\n*** Problem \"%s\" is %s ***" % (problem, res), 1)
def parse_string(self, lines): [none, var, state, input, output, init, invar, trans] = range(8) section = none inits = TRUE() invars = TRUE() transs = TRUE() sparser = StringParser() count = 0 vars = set([]) states = set([]) inputs = set([]) outputs = set([]) invar_props = [] ltl_props = [] for line in lines: count += 1 if line.strip() in ["", "\n"]: continue if T_VAR == line[:len(T_VAR)]: section = var continue if T_STATE == line[:len(T_STATE)]: section = state continue if T_INPUT == line[:len(T_INPUT)]: section = input continue if T_OUTPUT == line[:len(T_OUTPUT)]: section = output continue if T_INIT == line[:len(T_INIT)]: section = init continue if T_INVAR == line[:len(T_INVAR)]: section = invar continue if T_TRANS == line[:len(T_TRANS)]: section = trans continue if section in [var, state, input, output]: line = line[:-2].replace(" ", "").split(":") varname, vartype = line[0], (line[1][:-1].split("(")) if varname[0] == "'": varname = varname[1:-1] vardef = self._define_var(varname, vartype) vars.add(vardef) if section == state: states.add(vardef) if section == input: inputs.add(vardef) if section == output: outputs.add(vardef) if section in [init, invar, trans]: qline = quote_names(line[:-2], replace_ops=False) if section == init: inits = And(inits, sparser.parse_formula(qline)) if section == invar: invars = And(invars, sparser.parse_formula(qline)) if section == trans: transs = And(transs, sparser.parse_formula(qline)) hts = HTS("STS") ts = TS() ts.vars = vars ts.state_vars = states ts.input_vars = inputs ts.output_vars = outputs ts.init = inits ts.invar = invars ts.trans = transs hts.add_ts(ts) return (hts, invar_props, ltl_props)
def __init__(self): self.parser = StringParser() self._pysmt_formula_manager = get_env().formula_manager
class InitParser(ModelParser): extensions = ['init'] name = "INIT" def __init__(self): self.parser = StringParser() self._pysmt_formula_manager = get_env().formula_manager def parse_line(self, string:str)->Union[FNode, None]: if '=' not in string: raise RuntimeError("Expecting a single equality but got: {}".format(string)) split = string.split("=") if len(split) > 2: raise RuntimeError("Expecting exactly one equality but got: {}".format(string)) lhs, rhs = split try: lhs = self.parser.parse_formula(lhs) rhs = self.parser.parse_formula(rhs) except UndefinedSymbolError: return None for fv in get_free_variables(lhs): if not self._pysmt_formula_manager.is_state_symbol(fv): return None if not rhs.is_constant(): raise RuntimeError("Expecting a constant on the right side but got: {}".format(rhs)) return EqualsOrIff(lhs, rhs) # implementing ModelParser interface @staticmethod def get_extensions(): return InitParser.extensions def is_available(self): return True def get_model_info(self): return None def parse_string(self, contents:str)->HTS: ''' Parses a string representation of an initial state file ''' hts = HTS("INIT") ts = TS("TS INIT") init = [] for line in contents.split('\n'): line = line.strip() if not line: continue else: res = self.parse_line(line) if res is not None: init.append(res) Logger.msg("Initial state file set concrete values for {} state variables".format(len(init)), 1) ts.init = And(init) ts.invar = TRUE() ts.trans = TRUE() hts.add_ts(ts) return hts def parse_file(self, filepath:Path, config:NamedTuple, flags:str=None)->Tuple[HTS, List[FNode], List[FNode]]: ''' Reads an initial state file and produces (HTS, invariants, ltl_invariants) ''' hts = HTS(filepath.name) ts = TS("TS %s"%filepath.name) init = [] with filepath.open("r") as f: hts = self.parse_string(f.read()) return hts, None, None