def model_if_none(clauses1, implied, model): h = model if h == None: s = z3.Solver() z3c = clauses_to_z3(clauses1) s.add(z3c) if implied != None: s.add(not_clauses_to_z3(implied)) sort_size = 1 while True: s.push() for sort in ivy_logic.uninterpreted_sorts(): s.add(formula_to_z3(sort_size_constraint(sort, sort_size))) if s.check() != z3.unsat: m = get_model(s) print "model = {}, size = {}".format(m, sort_size) ## print "clauses1 = {}".format(clauses1) ## print "z3c = {}".format(str(z3c)) syms = used_symbols_clauses(clauses1) if implied != None: syms.update(used_symbols_clauses(implied)) h = HerbrandModel(s, m, syms) s.pop() return h sort_size += 1 s.pop() return h
def rename_distinct(clauses1, clauses2): """ rename skolems in clauses1 so they don't occur in clauses2. """ # print "rename_distinct clauses1 = {}".format(clauses1) # print "rename_distinct clauses2 = {!r}".format(clauses2) used1 = used_symbols_clauses(clauses1) used2 = used_symbols_clauses(clauses2) rn = UniqueRenamer('', used2) map1 = dict() for s in used1: if is_skolem(s) and not is_global_skolem(s): map1[s] = rename(s, rn) return rename_clauses(clauses1, map1)
def rename_distinct(clauses1,clauses2): """ rename skolems in clauses1 so they don't occur in clauses2. """ # print "rename_distinct clauses1 = {}".format(clauses1) # print "rename_distinct clauses2 = {!r}".format(clauses2) used1 = used_symbols_clauses(clauses1) used2 = used_symbols_clauses(clauses2) rn = UniqueRenamer('',used2) map1 = dict() for s in used1: if is_skolem(s): map1[s] = rename(s,rn) return rename_clauses(clauses1,map1)
def check_final_cond(ag, post, final_cond, rels_to_min=[], shrink=False, handler_class=None): history = ag.get_history(post) axioms = im.module.background_theory() clauses = history.post clauses = lut.and_clauses(clauses, axioms) model = slv.get_small_model(clauses, lg.uninterpreted_sorts(), rels_to_min, final_cond=final_cond, shrink=shrink) if model is not None: failed = ([final_cond] if not isinstance(final_cond, list) else [c.cond() for c in ffcs if c.failed]) mclauses = lut.and_clauses(*([clauses] + failed)) vocab = lut.used_symbols_clauses(mclauses) handler = (handler_class(mclauses, model, vocab) if handler_class is not None else Trace(mclauses, model, vocab)) assert all(x is not None for x in history.actions) # work around a bug in ivy_interp actions = [ im.module.actions[a] if isinstance(a, str) else a for a in history.actions ] action = act.Sequence(*actions) act.match_annotation(action, clauses.annot, handler) handler.end() return handler return None
def check_fcs_in_state(mod,ag,post,fcs): # iu.dbg('"foo"') history = ag.get_history(post) # iu.dbg('history.actions') gmc = lambda cls, final_cond: itr.small_model_clauses(cls,final_cond,shrink=diagnose.get()) axioms = im.module.background_theory() if opt_trace.get(): clauses = history.post clauses = lut.and_clauses(clauses,axioms) ffcs = filter_fcs(fcs) model = itr.small_model_clauses(clauses,ffcs,shrink=True) if model is not None: # iu.dbg('history.actions') mclauses = lut.and_clauses(*([clauses] + [c.cond() for c in ffcs if c.failed])) vocab = lut.used_symbols_clauses(mclauses) handler = MatchHandler(mclauses,model,vocab) assert all(x is not None for x in history.actions) # work around a bug in ivy_interp actions = [im.module.actions[a] if isinstance(a,str) else a for a in history.actions] # iu.dbg('actions') action = act.Sequence(*actions) act.match_annotation(action,clauses.annot,handler) handler.end() exit(0) else: res = history.satisfy(axioms,gmc,filter_fcs(fcs)) if res is not None and diagnose.get(): show_counterexample(ag,post,res) return not any(fc.failed for fc in fcs)
def compose_updates(update1, axioms, update2): updated1, clauses1, pre1 = update1 updated2, clauses2, pre2 = update2 clauses2 = rename_distinct(clauses2, clauses1) pre2 = rename_distinct(pre2, clauses1) # print "clauses2 = {}".format(clauses2) us1 = set(updated1) us2 = set(updated2) mid = us1.intersection(us2) mid_ax = clauses_using_symbols(mid, axioms) used = used_symbols_clauses(and_clauses(clauses1, clauses2)) rn = UniqueRenamer('__m_', used) map1 = dict() map2 = dict() for v in updated1: map2[v] = new(v) for mv in mid: mvf = rename(mv, rn) map1[new(mv)] = mvf map2[mv] = mvf clauses1 = rename_clauses(clauses1, map1) new_clauses = and_clauses( clauses1, rename_clauses(and_clauses(clauses2, mid_ax), map2)) new_updated = list(us1.union(us2)) # print "pre1 before = {}".format(pre1) pre1 = and_clauses( pre1, diff_frame(updated1, updated2, None, new)) # keep track of post-state of assertion failure # print "pre1 = {}".format(pre1) new_pre = or_clauses( pre1, and_clauses(clauses1, rename_clauses(and_clauses(pre2, mid_ax), map2))) # print "new_pre = {}".format(new_pre) return (new_updated, new_clauses, new_pre)
def compose_updates(update1,axioms,update2): updated1, clauses1, pre1 = update1 updated2, clauses2, pre2 = update2 clauses2 = rename_distinct(clauses2,clauses1) pre2 = rename_distinct(pre2,clauses1) # print "clauses2 = {}".format(clauses2) us1 = set(updated1) us2 = set(updated2) mid = us1.intersection(us2) mid_ax = clauses_using_symbols(mid,axioms) used = used_symbols_clauses(and_clauses(clauses1,clauses2)) rn = UniqueRenamer('__m_',used) map1 = dict() map2 = dict() for v in updated1: map2[v] = new(v) for mv in mid: mvf = rename(mv,rn) map1[new(mv)] = mvf map2[mv] = mvf clauses1 = rename_clauses(clauses1,map1) new_clauses = and_clauses(clauses1, rename_clauses(and_clauses(clauses2,mid_ax),map2)) new_updated = list(us1.union(us2)) # print "pre1 before = {}".format(pre1) pre1 = and_clauses(pre1,diff_frame(updated1,updated2,None,new)) # keep track of post-state of assertion failure # print "pre1 = {}".format(pre1) new_pre = or_clauses(pre1,and_clauses(clauses1,rename_clauses(and_clauses(pre2,mid_ax),map2))) # print "new_pre = {}".format(new_pre) return (new_updated,new_clauses,new_pre)
def exist_quant_map(syms, clauses): used = used_symbols_clauses(clauses) rn = UniqueRenamer('__', used) map1 = dict() for s in syms: map1[s] = rename(s, rn) return map1, rename_clauses(clauses, map1)
def exist_quant_map(syms,clauses): used = used_symbols_clauses(clauses) rn = UniqueRenamer('__',used) map1 = dict() for s in syms: map1[s] = rename(s,rn) return map1,rename_clauses(clauses,map1)
def interp_from_unsat_core(clauses1, clauses2, core, interpreted): used_syms = used_symbols_clauses(core) vars = used_variables_clauses(core) if vars: # print "interpolant would require skolem constants" return None core_consts = used_constants_clauses(core) clauses2_consts = used_constants_clauses(clauses2) # print "interp_from_unsat_core core_consts = {}".format(map(str,core_consts)) # print "interp_from_unsat_core clauses2_consts = {}".format(map(str,clauses2_consts)) renaming = dict() i = 0 for v in core_consts: if v not in clauses2_consts or v.is_skolem( ): # and v not in interpreted: renaming[v] = Variable('V' + str(i), Constant(v).get_sort()) i += 1 # print "interp_from_unsat_core core = {}".format(core) # print "interp_from_unsat_core renaming = {}".format(renaming) renamed_core = substitute_constants_clauses(core, renaming) # print "interp_from_unsat_core renamed_core = {}".format(renamed_core) res = simplify_clauses( Clauses([Or(*[negate(c) for c in renamed_core.fmlas])])) # print "interp_from_unsat_core res = {}".format(res) return res
def get_small_model(clauses, sorts_to_minimize, relations_to_minimize): """ Return a HerbrandModel with a "small" model of clauses. sorts_to_minimize is a list of sorts, and relations_to_minimize is a list of relations, The model minimization occurs in 2 ways: First, minimize universe size lexicographically according to the order of sorts_to_minimize. Second, minimize the number of positive entries in the relations according to the order of relations_to_minimize. """ s = z3.Solver() s.add(clauses_to_z3(clauses)) if s.check() == z3.unsat: return None print "shrinking model {" for x in chain(sorts_to_minimize, relations_to_minimize): for n in itertools.count(1): s.push() s.add(formula_to_z3(size_constraint(x, n))) if s.check() == z3.sat: break else: s.pop() print "} shrinking model" m = get_model(s) h = HerbrandModel(s,m,used_symbols_clauses(clauses)) return h
def get_model_clauses(clauses1): s = z3.Solver() z3c = clauses_to_z3(clauses1) s.add(z3c) if s.check() == z3.unsat: return None m = get_model(s) return HerbrandModel(s,m,used_symbols_clauses(clauses1))
def get_model_clauses(clauses1): s = z3.Solver() z3c = clauses_to_z3(clauses1) s.add(z3c) res = s.check() if res == z3.unsat: return None m = get_model(s) return HerbrandModel(s, m, used_symbols_clauses(clauses1))
def action_to_state(update): """ convert from the "action" style to the "state" style """ updated,tr,pre = update renaming = dict() for s in updated: renaming[s] = old(s) for s in used_symbols_clauses(tr): if is_new(s): renaming[s] = new_of(s) return (updated,rename_clauses(tr,renaming),pre)
def mine_constants2(mod,trans,invariant): defnd = set(dfn.defines() for dfn in trans.defs) res = defaultdict(list) syms = ilu.used_symbols_ast(invariant) syms.update(ilu.used_symbols_clauses(trans)) for c in syms: if not il.is_function_sort(c.sort): res[c.sort].append(c) # iu.dbg('res') return res
def action_to_state(update): """ convert from the "action" style to the "state" style """ updated, tr, pre = update renaming = dict() for s in updated: renaming[s] = old(s) for s in used_symbols_clauses(tr): if is_new(s): renaming[s] = new_of(s) return (updated, rename_clauses(tr, renaming), pre)
def get_model_clauses(clauses1): s = z3.Solver() z3c = clauses_to_z3(clauses1) s.add(z3c) iu.dbg('"before check"') res = s.check() iu.dbg('"after check"') if res == z3.unsat: return None m = get_model(s) return HerbrandModel(s,m,used_symbols_clauses(clauses1))
def state_to_action(update): """ convert from the "state" style to the "action" style """ updated,postcond,pre = update postcond,pre = clausify(postcond), clausify(pre) renaming = dict() for s in updated: renaming[s] = new(s) for s in used_symbols_clauses(postcond): if is_old(s): renaming[s] = old_of(s) return (updated,rename_clauses(postcond,renaming),pre)
def action_to_state(update): """ convert from the "action" style to the "state" style """ assert isinstance(state, SemActionValue) updated, tr, pre = update.comps renaming = dict() for s in updated: renaming[s] = old(s) for s in used_symbols_clauses(tr): if is_new(s): renaming[s] = new_of(s) return SemStateValue(updated, rename_clauses(tr, renaming), pre)
def action_to_state(update): """ convert from the "action" style to the "state" style """ assert isinstance(state,SemActionValue) updated,tr,pre = update.comps renaming = dict() for s in updated: renaming[s] = old(s) for s in used_symbols_clauses(tr): if is_new(s): renaming[s] = new_of(s) return SemStateValue(updated,rename_clauses(tr,renaming),pre)
def state_to_action(update): """ convert from the "state" style to the "action" style """ updated, postcond, pre = update postcond, pre = clausify(postcond), clausify(pre) renaming = dict() for s in updated: renaming[s] = new(s) for s in used_symbols_clauses(postcond): if is_old(s): renaming[s] = old_of(s) return (updated, rename_clauses(postcond, renaming), pre)
def state_to_action(update): """ convert from the "state" style to the "action" style """ assert isinstance(update, SemStateValue) updated, postcond, pre = update.comps postcond, pre = clausify(postcond), clausify(pre) renaming = dict() for s in updated: renaming[s] = new(s) for s in used_symbols_clauses(postcond): if is_old(s): renaming[s] = old_of(s) return SemActionValue(updated, rename_clauses(postcond, renaming), pre)
def state_to_action(update): """ convert from the "state" style to the "action" style """ assert isinstance(update,SemStateValue) updated,postcond,pre = update.comps postcond,pre = clausify(postcond), clausify(pre) renaming = dict() for s in updated: renaming[s] = new(s) for s in used_symbols_clauses(postcond): if is_old(s): renaming[s] = old_of(s) return SemActionValue(updated,rename_clauses(postcond,renaming),pre)
def compose_updates(update1, axioms, update2): updated1, clauses1, pre1 = update1 updated2, clauses2, pre2 = update2 clauses2 = rename_distinct(clauses2, clauses1) pre2 = rename_distinct(pre2, clauses1) # print "clauses2 = {}".format(clauses2) us1 = set(updated1) us2 = set(updated2) mid = us1.intersection(us2) mid_ax = clauses_using_symbols(mid, axioms) used = used_symbols_clauses(and_clauses(clauses1, clauses2)) used.update(symbols_clauses(pre1)) used.update(symbols_clauses(pre2)) rn = UniqueRenamer('__m_', used) map1 = dict() map2 = dict() for v in updated1: map2[v] = new(v) for mv in mid: mvf = rename(mv, rn) map1[new(mv)] = mvf map2[mv] = mvf # iu.dbg('clauses1') # iu.dbg('clauses1.annot') clauses1 = rename_clauses(clauses1, map1) annot_op = lambda x, y: x.compose( y) if x is not None and y is not None else None new_clauses = and_clauses(clauses1, rename_clauses(and_clauses(clauses2, mid_ax), map2), annot_op=annot_op) new_updated = list(us1.union(us2)) # print "pre1 before = {}".format(pre1) # iu.dbg('pre1.annot') # iu.dbg('pre1') pre1 = and_clauses(pre1, diff_frame( updated1, updated2, new, axioms)) # keep track of post-state of assertion failure # print "pre1 = {}".format(pre1) temp = and_clauses(clauses1, rename_clauses(and_clauses(pre2, mid_ax), map2), annot_op=my_annot_op) # iu.dbg('temp.annot') new_pre = or_clauses(pre1, temp) # iu.dbg('new_pre.annot') # print "new_pre = {}".format(new_pre) # iu.dbg('new_clauses') # iu.dbg('new_clauses.annot') return (new_updated, new_clauses, new_pre)
def get_small_model(clauses, sorts_to_minimize, relations_to_minimize, final_cond=None): """ Return a HerbrandModel with a "small" model of clauses. sorts_to_minimize is a list of sorts, and relations_to_minimize is a list of relations, The model minimization occurs in 2 ways: First, minimize universe size lexicographically according to the order of sorts_to_minimize. Second, minimize the number of positive entries in the relations according to the order of relations_to_minimize. """ s = z3.Solver() s.add(clauses_to_z3(clauses)) res = decide(s) if res == z3.unsat: return None if final_cond is not None: s.add(clauses_to_z3(final_cond)) res = decide(s) if res == z3.unsat: return None # print "shrinking model {" for x in chain(sorts_to_minimize, relations_to_minimize): for n in itertools.count(1): s.push() sc = size_constraint(x, n) s.add(formula_to_z3(sc)) res = decide(s) if res == z3.sat: break else: s.pop() # print "} shrinking model" m = get_model(s) h = HerbrandModel(s, m, used_symbols_clauses(clauses)) return h
def witness(self, node): g = self for lit in node.fmla: # print lit if is_equality_lit(lit) and isinstance(lit.atom.args[0], Variable): self.add_witness_constraint(node, lit.atom.args[1]) return lit.atom.args[1] uc = used_symbols_clauses(g.state) fmlas = [n.fmla for n in g.all_nodes] for f in fmlas: uc.update(used_symbols_clause(f)) nc = unused_constant(uc, node.sort) # print "type(nc) = {}".format(type(nc)) self.add_witness_constraint(node, nc) self.split(node, eq_lit(Variable('X', node.sort), nc)) return nc
def witness(self,node): g = self for lit in node.fmla: # print lit if is_equality_lit(lit) and isinstance(lit.atom.args[0],Variable): self.add_witness_constraint(node,lit.atom.args[1]) return lit.atom.args[1] uc = used_symbols_clauses(g.state) fmlas = [n.fmla for n in g.all_nodes] for f in fmlas: uc.update(used_symbols_clause(f)) nc = unused_constant(uc,node.sort) # print "type(nc) = {}".format(type(nc)) self.add_witness_constraint(node,nc) self.split(node,eq_lit(Variable('X',node.sort),nc)) return nc
def check_fcs_in_state(mod, ag, post, fcs): # iu.dbg('"foo"') history = ag.get_history(post) # iu.dbg('history.actions') gmc = lambda cls, final_cond: itr.small_model_clauses( cls, final_cond, shrink=diagnose.get()) axioms = im.module.background_theory() if opt_trace.get() or diagnose.get(): clauses = history.post clauses = lut.and_clauses(clauses, axioms) ffcs = filter_fcs(fcs) model = itr.small_model_clauses(clauses, ffcs, shrink=True) if model is not None: # iu.dbg('history.actions') failed = [c for c in ffcs if c.failed] mclauses = lut.and_clauses(*([clauses] + [c.cond() for c in failed])) vocab = lut.used_symbols_clauses(mclauses) # handler = MatchHandler(mclauses,model,vocab) if opt_trace.get() else ivy_trace.Trace(mclauses,model,vocab) handler = ivy_trace.Trace(mclauses, model, vocab) thing = failed[-1].get_annot() if thing is None: assert all(x is not None for x in history.actions) # work around a bug in ivy_interp actions = [ im.module.actions[a] if isinstance(a, str) else a for a in history.actions ] action = act.Sequence(*actions) annot = clauses.annot else: action, annot = thing act.match_annotation(action, annot, handler) handler.end() ff = failed[0] handler.is_cti = (lut.formula_to_clauses(ff.lf.formula) if isinstance(ff, ConjChecker) else None) if not opt_trace.get(): gui_art(handler) else: print str(handler) exit(0) else: res = history.satisfy(axioms, gmc, filter_fcs(fcs)) if res is not None and diagnose.get(): show_counterexample(ag, post, res) return not any(fc.failed for fc in fcs)
def minimize_conjecture(self, button=None, bound=None): import ivy_transrel import ivy_solver from proof import ProofGoal from ivy_logic_utils import Clauses, and_clauses, dual_clauses, used_symbols_clauses, negate from ivy_solver import unsat_core from logic_util import free_variables, substitute if self.bmc_conjecture(bound=bound): # found a BMC counter-example return with self.ui_parent.run_context(): step_action = im.module.actions['ext'] n_steps = self.current_bound ag = self.parent.new_ag() with ag.context as ac: post = ac.new_state(ag.init_cond) if 'initialize' in im.module.actions: init_action = im.module.actions['initialize'] post = ag.execute(init_action, None, None, 'initialize') for n in range(n_steps): post = ag.execute(step_action, None, None, 'ext') axioms = im.module.background_theory() post_clauses = and_clauses(post.clauses, axioms) used_names = ( frozenset(x.name for x in il.sig.symbols.values()) | frozenset(x.name for x in used_symbols_clauses(post_clauses)) ) facts = self.get_active_facts() assert not any( c.is_skolem() and c.name in used_names for c in lu.used_constants(*facts) ) core = unsat_core(Clauses(facts), post_clauses) if core is None: core = Clauses([]) ## can happen if we are proving true # assert core is not None, "bmc_conjecture returned False but unsat core is None" core_formulas = frozenset(core.fmlas) self.set_facts([fact for fact in facts if fact in core_formulas]) self.highlight_selected_facts() self.ui_parent.text_dialog("BMC found the following possible conjecture:", str(self.get_selected_conjecture()))
def minimize_conjecture(self, button=None, bound=None): import ivy_transrel import ivy_solver from proof import ProofGoal from ivy_logic_utils import Clauses, and_clauses, dual_clauses, used_symbols_clauses, negate from ivy_solver import unsat_core from logic_util import free_variables, substitute if self.bmc_conjecture(bound=bound): # found a BMC counter-example return with self.ui_parent.run_context(): step_action = im.module.actions['ext'] n_steps = self.current_bound ag = self.parent.new_ag() with ag.context as ac: post = ac.new_state(ag.init_cond) if 'initialize' in im.module.actions: init_action = im.module.actions['initialize'] post = ag.execute(init_action, None, None, 'initialize') for n in range(n_steps): post = ag.execute(step_action, None, None, 'ext') axioms = im.module.background_theory() post_clauses = and_clauses(post.clauses, axioms) used_names = (frozenset(x.name for x in il.sig.symbols.values()) | frozenset( x.name for x in used_symbols_clauses(post_clauses))) facts = self.get_active_facts() assert not any(c.is_skolem() and c.name in used_names for c in lu.used_constants(*facts)) core = unsat_core(Clauses(facts), post_clauses) if core is None: core = Clauses([]) ## can happen if we are proving true # assert core is not None, "bmc_conjecture returned False but unsat core is None" core_formulas = frozenset(core.fmlas) self.set_facts([fact for fact in facts if fact in core_formulas]) self.highlight_selected_facts() self.ui_parent.text_dialog( "BMC found the following possible conjecture:", str(self.get_selected_conjecture()))
def compose_updates(update1,axioms,update2): updated1, clauses1, pre1 = update1 updated2, clauses2, pre2 = update2 clauses2 = rename_distinct(clauses2,clauses1) pre2 = rename_distinct(pre2,clauses1) # print "clauses2 = {}".format(clauses2) us1 = set(updated1) us2 = set(updated2) mid = us1.intersection(us2) mid_ax = clauses_using_symbols(mid,axioms) used = used_symbols_clauses(and_clauses(clauses1,clauses2)) rn = UniqueRenamer('__m_',used) map1 = dict() map2 = dict() for v in updated1: map2[v] = new(v) for mv in mid: mvf = rename(mv,rn) map1[new(mv)] = mvf map2[mv] = mvf # iu.dbg('clauses1') # iu.dbg('clauses1.annot') clauses1 = rename_clauses(clauses1,map1) annot_op = lambda x,y: x.compose(y) if x is not None and y is not None else None new_clauses = and_clauses(clauses1, rename_clauses(and_clauses(clauses2,mid_ax),map2),annot_op=annot_op) new_updated = list(us1.union(us2)) # print "pre1 before = {}".format(pre1) # iu.dbg('pre1.annot') # iu.dbg('pre1') pre1 = and_clauses(pre1,diff_frame(updated1,updated2,None,new)) # keep track of post-state of assertion failure # print "pre1 = {}".format(pre1) temp = and_clauses(clauses1,rename_clauses(and_clauses(pre2,mid_ax),map2),annot_op=my_annot_op) # iu.dbg('temp.annot') new_pre = or_clauses(pre1,temp) # iu.dbg('new_pre.annot') # print "new_pre = {}".format(new_pre) # iu.dbg('new_clauses') # iu.dbg('new_clauses.annot') return (new_updated,new_clauses,new_pre)
def check_vc(clauses, action, final_cond=None, rels_to_min=[], shrink=False, handler_class=None): model = slv.get_small_model(clauses, lg.uninterpreted_sorts(), rels_to_min, final_cond=final_cond, shrink=shrink) if model is not None: failed = ([] if final_cond is None else [final_cond] if not isinstance(final_cond, list) else [c.cond() for c in ffcs if c.failed]) mclauses = lut.and_clauses(*([clauses] + failed)) vocab = lut.used_symbols_clauses(mclauses) handler = (handler_class(mclauses, model, vocab) if handler_class is not None else Trace(mclauses, model, vocab)) act.match_annotation(action, clauses.annot, handler) handler.end() return handler return None
def interp_from_unsat_core(clauses1,clauses2,core,interpreted): used_syms = used_symbols_clauses(core) vars = used_variables_clauses(core) if vars: # print "interpolant would require skolem constants" return None core_consts = used_constants_clauses(core) clauses2_consts = used_constants_clauses(clauses2) # print "interp_from_unsat_core core_consts = {}".format(map(str,core_consts)) # print "interp_from_unsat_core clauses2_consts = {}".format(map(str,clauses2_consts)) renaming = dict() i = 0 for v in core_consts: if v not in clauses2_consts or v.is_skolem(): # and v not in interpreted: renaming[v] = Variable('V' + str(i),Constant(v).get_sort()) i += 1 # print "interp_from_unsat_core core = {}".format(core) # print "interp_from_unsat_core renaming = {}".format(renaming) renamed_core = substitute_constants_clauses(core,renaming) # print "interp_from_unsat_core renamed_core = {}".format(renamed_core) res = simplify_clauses(Clauses([Or(*[negate(c) for c in renamed_core.fmlas])])) # print "interp_from_unsat_core res = {}".format(res) return res
def forward_clauses(clauses,inflex): return lu.rename_clauses(clauses, dict((x,tr.new(x)) for x in lu.used_symbols_clauses(clauses) if x != '=' and x not in inflex))
def forward_clauses(clauses, inflex): return lu.rename_clauses( clauses, dict((x, tr.new(x)) for x in lu.used_symbols_clauses(clauses) if x != '=' and x not in inflex))
def bind_olds_clauses(clauses): subst = dict( (s, old_of(s)) for s in used_symbols_clauses(clauses) if is_old(s)) return rename_clauses(clauses, subst)
def to_aiger(mod,ext_act): erf = il.Symbol('err_flag',il.find_sort('bool')) errconds = [] add_err_flag_mod(mod,erf,errconds) # we use a special state variable __init to indicate the initial state ext_acts = [mod.actions[x] for x in sorted(mod.public_actions)] ext_act = ia.EnvAction(*ext_acts) init_var = il.Symbol('__init',il.find_sort('bool')) init = add_err_flag(ia.Sequence(*([a for n,a in mod.initializers]+[ia.AssignAction(init_var,il.And())])),erf,errconds) action = ia.Sequence(ia.AssignAction(erf,il.Or()),ia.IfAction(init_var,ext_act,init)) # get the invariant to be proved, replacing free variables with # skolems. First, we apply any proof tactics. pc = ivy_proof.ProofChecker(mod.axioms,mod.definitions,mod.schemata) pmap = dict((lf.id,p) for lf,p in mod.proofs) conjs = [] for lf in mod.labeled_conjs: if lf.id in pmap: proof = pmap[lf.id] subgoals = pc.admit_proposition(lf,proof) conjs.extend(subgoals) else: conjs.append(lf) invariant = il.And(*[il.drop_universals(lf.formula) for lf in conjs]) # iu.dbg('invariant') skolemizer = lambda v: ilu.var_to_skolem('__',il.Variable(v.rep,v.sort)) vs = ilu.used_variables_in_order_ast(invariant) sksubs = dict((v.rep,skolemizer(v)) for v in vs) invariant = ilu.substitute_ast(invariant,sksubs) invar_syms = ilu.used_symbols_ast(invariant) # compute the transition relation stvars,trans,error = action.update(mod,None) # print 'action : {}'.format(action) # print 'annotation: {}'.format(trans.annot) annot = trans.annot # match_annotation(action,annot,MatchHandler()) indhyps = [il.close_formula(il.Implies(init_var,lf.formula)) for lf in mod.labeled_conjs] # trans = ilu.and_clauses(trans,indhyps) # save the original symbols for trace orig_syms = ilu.used_symbols_clauses(trans) orig_syms.update(ilu.used_symbols_ast(invariant)) # TODO: get the axioms (or maybe only the ground ones?) # axioms = mod.background_theory() # rn = dict((sym,tr.new(sym)) for sym in stvars) # next_axioms = ilu.rename_clauses(axioms,rn) # return ilu.and_clauses(axioms,next_axioms) funs = set() for df in trans.defs: funs.update(ilu.used_symbols_ast(df.args[1])) for fmla in trans.fmlas: funs.update(ilu.used_symbols_ast(fmla)) # funs = ilu.used_symbols_clauses(trans) funs.update(ilu.used_symbols_ast(invariant)) funs = set(sym for sym in funs if il.is_function_sort(sym.sort)) iu.dbg('[str(fun) for fun in funs]') # Propositionally abstract # step 1: get rid of definitions of non-finite symbols by turning # them into constraints new_defs = [] new_fmlas = [] for df in trans.defs: if len(df.args[0].args) == 0 and is_finite_sort(df.args[0].sort): new_defs.append(df) else: fmla = df.to_constraint() new_fmlas.append(fmla) trans = ilu.Clauses(new_fmlas+trans.fmlas,new_defs) # step 2: get rid of ite's over non-finite sorts, by introducing constraints cnsts = [] new_defs = [elim_ite(df,cnsts) for df in trans.defs] new_fmlas = [elim_ite(fmla,cnsts) for fmla in trans.fmlas] trans = ilu.Clauses(new_fmlas+cnsts,new_defs) # step 3: eliminate quantfiers using finite instantiations from_asserts = il.And(*[il.Equals(x,x) for x in ilu.used_symbols_ast(il.And(*errconds)) if tr.is_skolem(x) and not il.is_function_sort(x.sort)]) iu.dbg('from_asserts') invar_syms.update(ilu.used_symbols_ast(from_asserts)) sort_constants = mine_constants(mod,trans,il.And(invariant,from_asserts)) sort_constants2 = mine_constants2(mod,trans,invariant) print '\ninstantiations:' trans,invariant = Qelim(sort_constants,sort_constants2)(trans,invariant,indhyps) # print 'after qe:' # print 'trans: {}'.format(trans) # print 'invariant: {}'.format(invariant) # step 4: instantiate the axioms using patterns # We have to condition both the transition relation and the # invariant on the axioms, so we define a boolean symbol '__axioms' # to represent the axioms. axs = instantiate_axioms(mod,stvars,trans,invariant,sort_constants,funs) ax_conj = il.And(*axs) ax_var = il.Symbol('__axioms',ax_conj.sort) ax_def = il.Definition(ax_var,ax_conj) invariant = il.Implies(ax_var,invariant) trans = ilu.Clauses(trans.fmlas+[ax_var],trans.defs+[ax_def]) # step 5: eliminate all non-propositional atoms by replacing with fresh booleans # An atom with next-state symbols is converted to a next-state symbol if possible stvarset = set(stvars) prop_abs = dict() # map from atoms to proposition variables global prop_abs_ctr # sigh -- python lameness prop_abs_ctr = 0 # counter for fresh symbols new_stvars = [] # list of fresh symbols # get the propositional abstraction of an atom def new_prop(expr): res = prop_abs.get(expr,None) if res is None: prev = prev_expr(stvarset,expr,sort_constants) if prev is not None: # print 'stvar: old: {} new: {}'.format(prev,expr) pva = new_prop(prev) res = tr.new(pva) new_stvars.append(pva) prop_abs[expr] = res # prevent adding this again to new_stvars else: global prop_abs_ctr res = il.Symbol('__abs[{}]'.format(prop_abs_ctr),expr.sort) # print '{} = {}'.format(res,expr) prop_abs[expr] = res prop_abs_ctr += 1 return res # propositionally abstract an expression global mk_prop_fmlas mk_prop_fmlas = [] def mk_prop_abs(expr): if il.is_quantifier(expr) or len(expr.args) > 0 and any(not is_finite_sort(a.sort) for a in expr.args): return new_prop(expr) return expr.clone(map(mk_prop_abs,expr.args)) # apply propositional abstraction to the transition relation new_defs = map(mk_prop_abs,trans.defs) new_fmlas = [mk_prop_abs(il.close_formula(fmla)) for fmla in trans.fmlas] # find any immutable abstract variables, and give them a next definition def my_is_skolem(x): res = tr.is_skolem(x) and x not in invar_syms return res def is_immutable_expr(expr): res = not any(my_is_skolem(sym) or tr.is_new(sym) or sym in stvarset for sym in ilu.used_symbols_ast(expr)) return res for expr,v in prop_abs.iteritems(): if is_immutable_expr(expr): new_stvars.append(v) print 'new state: {}'.format(expr) new_defs.append(il.Definition(tr.new(v),v)) trans = ilu.Clauses(new_fmlas+mk_prop_fmlas,new_defs) # apply propositional abstraction to the invariant invariant = mk_prop_abs(invariant) # create next-state symbols for atoms in the invariant (is this needed?) rn = dict((sym,tr.new(sym)) for sym in stvars) mk_prop_abs(ilu.rename_ast(invariant,rn)) # this is to pick up state variables from invariant # update the state variables by removing the non-finite ones and adding the fresh state booleans stvars = [sym for sym in stvars if is_finite_sort(sym.sort)] + new_stvars # iu.dbg('trans') # iu.dbg('stvars') # iu.dbg('invariant') # exit(0) # For each state var, create a variable that corresponds to the input of its latch # Also, havoc all the state bits except the init flag at the initial time. This # is needed because in aiger, all latches start at 0! def fix(v): return v.prefix('nondet') def curval(v): return v.prefix('curval') def initchoice(v): return v.prefix('initchoice') stvars_fix_map = dict((tr.new(v),fix(v)) for v in stvars) stvars_fix_map.update((v,curval(v)) for v in stvars if v != init_var) trans = ilu.rename_clauses(trans,stvars_fix_map) # iu.dbg('trans') new_defs = trans.defs + [il.Definition(ilu.sym_inst(tr.new(v)),ilu.sym_inst(fix(v))) for v in stvars] new_defs.extend(il.Definition(curval(v),il.Ite(init_var,v,initchoice(v))) for v in stvars if v != init_var) trans = ilu.Clauses(trans.fmlas,new_defs) # Turn the transition constraint into a definition cnst_var = il.Symbol('__cnst',il.find_sort('bool')) new_defs = list(trans.defs) new_defs.append(il.Definition(tr.new(cnst_var),fix(cnst_var))) new_defs.append(il.Definition(fix(cnst_var),il.Or(cnst_var,il.Not(il.And(*trans.fmlas))))) stvars.append(cnst_var) trans = ilu.Clauses([],new_defs) # Input are all the non-defined symbols. Output indicates invariant is false. # iu.dbg('trans') def_set = set(df.defines() for df in trans.defs) def_set.update(stvars) # iu.dbg('def_set') used = ilu.used_symbols_clauses(trans) used.update(ilu.symbols_ast(invariant)) inputs = [sym for sym in used if sym not in def_set and not il.is_interpreted_symbol(sym)] fail = il.Symbol('__fail',il.find_sort('bool')) outputs = [fail] # iu.dbg('trans') # make an aiger aiger = Encoder(inputs,stvars,outputs) comb_defs = [df for df in trans.defs if not tr.is_new(df.defines())] invar_fail = il.Symbol('invar__fail',il.find_sort('bool')) # make a name for invariant fail cond comb_defs.append(il.Definition(invar_fail,il.Not(invariant))) aiger.deflist(comb_defs) for df in trans.defs: if tr.is_new(df.defines()): aiger.set(tr.new_of(df.defines()),aiger.eval(df.args[1])) miter = il.And(init_var,il.Not(cnst_var),il.Or(invar_fail,il.And(fix(erf),il.Not(fix(cnst_var))))) aiger.set(fail,aiger.eval(miter)) # aiger.sub.debug() # make a decoder for the abstract propositions decoder = dict((y,x) for x,y in prop_abs.iteritems()) for sym in aiger.inputs + aiger.latches: if sym not in decoder and sym in orig_syms: decoder[sym] = sym cnsts = set(sym for syms in sort_constants.values() for sym in syms) return aiger,decoder,annot,cnsts,action,stvarset
def get_small_model(clauses, sorts_to_minimize, relations_to_minimize, final_cond=None, shrink=True): """ Return a HerbrandModel with a "small" model of clauses. sorts_to_minimize is a list of sorts, and relations_to_minimize is a list of relations, The model minimization occurs in 2 ways: First, minimize universe size lexicographically according to the order of sorts_to_minimize. Second, minimize the number of positive entries in the relations according to the order of relations_to_minimize. The parameter final_cond can be a list of objects have the following interface: cond(): returns a final condition as a clauses object start(): called before starting sat(): called if sat, return True if should ignore result unsat() : called if unsat assume() : if returns true, assume rather than check """ s = z3.Solver() s.add(clauses_to_z3(clauses)) # res = decide(s) # if res == z3.unsat: # return None if final_cond is not None: if isinstance(final_cond, list): res = z3.unsat for fc in final_cond: fc.start() if fc.assume(): s.add(clauses_to_z3(fc.cond())) else: s.push() s.add(clauses_to_z3(fc.cond())) res = decide(s) if res != z3.unsat: if fc.sat(): res = z3.unsat else: break else: fc.unsat() s.pop() else: s.add(clauses_to_z3(final_cond)) res = decide(s) else: res = decide(s) if res == z3.unsat: return None if shrink: print "searching for a small model...", sys.stdout.flush() for x in chain(sorts_to_minimize, relations_to_minimize): for n in itertools.count(1): s.push() sc = size_constraint(x, n) s.add(formula_to_z3(sc)) res = decide(s) if res == z3.sat: break else: s.pop() print "done" m = get_model(s) # print "model = {}".format(m) # f = open("ivy.smt2","w") # f.write(s.to_smt2()) # f.close() h = HerbrandModel(s, m, used_symbols_clauses(clauses)) return h
def bind_olds_clauses(clauses): subst = dict((s,old_of(s)) for s in used_symbols_clauses(clauses) if is_old(s)) return rename_clauses(clauses,subst)