def queries_equivalent(q1: Query, q2: Query, state_vars: [EVar], extern_funcs: {str: TFunc}, assumptions: Exp = ETRUE): """Determine whether two queries always return the same result. This function also checks that the two queries have semantically equivalent preconditions. Checking the preconditions is necessary to ensure semantic equivalence of the queries: a query object should be interpreted to mean "if my preconditions hold then I compute and return my body expression". If two queries do not have semantically equivalent preconditions, then there might be cases where one is obligated to return a value and the other has no defined behavior. """ with task("checking query equivalence", q1=q1.name, q2=q2.name): if q1.ret.type != q2.ret.type: return False q1args = dict(q1.args) q2args = dict(q2.args) if q1args != q2args: return False checker = solver_for_context(context=RootCtx( state_vars=state_vars, args=[EVar(a).with_type(t) for (a, t) in q1.args], funcs=extern_funcs), assumptions=assumptions) q1a = EAll(q1.assumptions) q2a = EAll(q2.assumptions) return checker.valid(EEq(q1a, q2a)) and checker.valid( EImplies(q1a, EEq(q1.ret, q2.ret)))
def queries_equivalent(q1: Query, q2: Query, state_vars: [EVar], extern_funcs: {str: TFunc}, assumptions: Exp = T): with task("checking query equivalence", q1=q1.name, q2=q2.name): if q1.ret.type != q2.ret.type: return False q1args = dict(q1.args) q2args = dict(q2.args) if q1args != q2args: return False args = FrozenDict(q1args) key = (args, assumptions) checker = _qe_cache.get(key) if checker is None: checker = ModelCachingSolver(vars=list( itertools.chain(state_vars, (EVar(v).with_type(t) for v, t in args.items()))), funcs=extern_funcs, assumptions=assumptions) _qe_cache[key] = checker q1a = EAll(q1.assumptions) q2a = EAll(q2.assumptions) return checker.valid(EEq(q1a, q2a)) and checker.valid( EImplies(q1a, EEq(q1.ret, q2.ret)))
def queries_equivalent(q1: Query, q2: Query): if q1.ret.type != q2.ret.type: return False q1args = dict(q1.args) q2args = dict(q2.args) if q1args != q2args: return False q1a = EAll(q1.assumptions) q2a = EAll(q2.assumptions) return valid(EImplies(EAny([q1a, q2a]), EEq(q1.ret, q2.ret)))
def can_elim_vars(spec: Exp, assumptions: Exp, vs: [EVar]): """Does any execution of `spec` actually depend on any of `vs`? It is possible for a variable to appear in an expression like `spec` without affecting its value. This function uses the solver to determine whether any of the given variables can affect the output of `spec`. """ spec = strip_EStateVar(spec) sub = {v.id: fresh_var(v.type) for v in vs} return valid( EImplies(EAll([assumptions, subst(assumptions, sub)]), EEq(spec, subst(spec, sub))))
def _add_subquery(self, sub_q: Query, used_by: Stm) -> Stm: """Add a query that helps maintain some other state. Parameters: sub_q - the specification of the helper query used_by - the statement that calls `sub_q` If a query already exists that is equivalent to `sub_q`, this method returns `used_by` rewritten to use the existing query and does not add the query to the implementation. Otherwise it returns `used_by` unchanged. """ with task("adding query", query=sub_q.name): sub_q = shallow_copy(sub_q) with task("checking whether we need more handle assumptions"): new_a = implicit_handle_assumptions( reachable_handles_at_method(self.spec, sub_q)) if not valid(EImplies(EAll(sub_q.assumptions), EAll(new_a))): event("we do!") sub_q.assumptions = list( itertools.chain(sub_q.assumptions, new_a)) with task("repairing state var boundaries"): extra_available_state = [ e for v, e in self._concretization_functions ] sub_q.ret = repair_well_formedness( strip_EStateVar(sub_q.ret), self.context_for_method(sub_q), extra_available_state) with task("simplifying"): orig_a = sub_q.assumptions orig_a_size = sum(a.size() for a in sub_q.assumptions) orig_ret_size = sub_q.ret.size() sub_q.assumptions = tuple( simplify_or_ignore(a) for a in sub_q.assumptions) sub_q.ret = simplify(sub_q.ret) a_size = sum(a.size() for a in sub_q.assumptions) ret_size = sub_q.ret.size() event("|assumptions|: {} -> {}".format(orig_a_size, a_size)) event("|ret|: {} -> {}".format(orig_ret_size, ret_size)) if a_size > orig_a_size: print("NO, BAD SIMPLIFICATION") print("original") for a in orig_a: print(" - {}".format(pprint(a))) print("simplified") for a in sub_q.assumptions: print(" - {}".format(pprint(a))) assert False state_vars = self.abstract_state funcs = self.extern_funcs qq = find_one( self.query_specs, lambda qq: dedup_queries.value and queries_equivalent(qq, sub_q, state_vars=state_vars, extern_funcs=funcs, assumptions=EAll(self.abstract_invariants))) if qq is not None: event("subgoal {} is equivalent to {}".format( sub_q.name, qq.name)) arg_reorder = [[x[0] for x in sub_q.args].index(a) for (a, t) in qq.args] class Repl(BottomUpRewriter): def visit_ECall(self, e): args = tuple(self.visit(a) for a in e.args) if e.func == sub_q.name: args = tuple(args[idx] for idx in arg_reorder) return ECall(qq.name, args).with_type(e.type) else: return ECall(e.func, args).with_type(e.type) used_by = Repl().visit(used_by) else: self.add_query(sub_q) return used_by
def possibly_useful_nonrecursive( solver, e: Exp, context: Context, pool=RUNTIME_POOL, assumptions: Exp = ETRUE, ops: [Op] = ()) -> bool: """Heuristic filter to ignore expressions that are almost certainly useless.""" state_vars = OrderedSet(v for v, p in context.vars() if p == STATE_POOL) args = OrderedSet(v for v, p in context.vars() if p == RUNTIME_POOL) assumptions = EAll([assumptions, context.path_condition()]) at_runtime = pool == RUNTIME_POOL h = extension_handler(type(e)) if h is not None: res = h.possibly_useful(e, context, pool, assumptions, ops, solver) if not res: return res if isinstance(e, EStateVar) and not free_vars(e.e): return No("constant value in state position") if (isinstance(e, EDropFront) or isinstance(e, EDropBack)) and not at_runtime: return No("EDrop* in state position") if not allow_big_sets.value and isinstance(e, EFlatMap) and not at_runtime: return No("EFlatMap in state position") if not allow_int_arithmetic_state.value and not at_runtime and isinstance( e, EBinOp) and e.type == INT: return No("integer arithmetic in state position") if is_collection(e.type) and not is_scalar(e.type.elem_type): return No("collection of nonscalar: e {}\n elem_type: {}\n".format( e, e.type.elem_type)) if isinstance(e.type, TMap) and not is_scalar(e.type.k): return No("bad key type {}".format(pprint(e.type.k))) if isinstance(e.type, TMap) and isinstance(e.type.v, TMap): return No("map to map") # This check is probably a bad idea: whether `the` is legal may depend on # the contex that the expression is embedded within, so we can't skip it # during synthesis just because it looks invalid now. # if isinstance(e, EUnaryOp) and e.op == UOp.The: # len = EUnaryOp(UOp.Length, e.e).with_type(INT) # if not valid(EImplies(assumptions, EBinOp(len, "<=", ENum(1).with_type(INT)).with_type(BOOL))): # return No("illegal application of 'the': could have >1 elems") if not at_runtime and isinstance( e, EBinOp) and e.op == "-" and is_collection(e.type): return No("collection subtraction in state position") # if not at_runtime and isinstance(e, ESingleton): # return No("singleton in state position") if not allow_nonzero_state_constants.value and not at_runtime and isinstance( e, ENum) and e.val != 0: return No("nonzero integer constant in state position") if not allow_binop_state.value and at_runtime and isinstance( e, EStateVar) and isinstance(e.e, EBinOp) and is_scalar( e.e.e1.type) and is_scalar(e.e.e2.type): return No( "constant-time binary operator {!r} in state position".format( e.e.op)) if not allow_conditional_state.value and not at_runtime and isinstance( e, ECond): return No("conditional in state position") if isinstance(e, EMakeMap2) and isinstance(e.e, EEmptyList): return No("trivially empty map") if isinstance(e, EMakeMap2) and isinstance(e.e, ESingleton): return No("really tiny map") if not at_runtime and (isinstance(e, EArgMin) or isinstance(e, EArgMax)): # Cozy has no way to efficiently implement mins/maxes when more than # one element may leave the collection. from cozy.state_maintenance import mutate for op in ops: elems = e.e elems_prime = mutate(elems, op.body) formula = EAll([assumptions] + list(op.assumptions) + [ EGt( ELen( EBinOp(elems, "-", elems_prime).with_type(elems.type)), ONE) ]) if solver.satisfiable(formula): return No( "more than one element might be removed during {}".format( op.name)) if not allow_peels.value and not at_runtime and isinstance(e, EFilter): # catch "peels": removal of zero or one elements if solver.valid( EImplies( assumptions, ELe( ELen( EFilter( e.e, ELambda(e.predicate.arg, ENot( e.predicate.body))).with_type(e.type)), ONE))): return No("filter is a peel") if not allow_big_maps.value and not at_runtime and isinstance( e, EMakeMap2) and is_collection(e.type.v): all_collections = [sv for sv in state_vars if is_collection(sv.type)] total_size = ENum(0).with_type(INT) for c in all_collections: total_size = EBinOp(total_size, "+", EUnaryOp(UOp.Length, c).with_type(INT)).with_type(INT) my_size = EUnaryOp( UOp.Length, EFlatMap( EUnaryOp(UOp.Distinct, e.e).with_type(e.e.type), e.value_function).with_type(e.type.v)).with_type(INT) s = EImplies(assumptions, EBinOp(total_size, ">=", my_size).with_type(BOOL)) if not solver.valid(s): return No("non-polynomial-sized map") return True
def can_elim_vars(spec: Exp, assumptions: Exp, vs: [EVar]): spec = strip_EStateVar(spec) sub = {v.id: fresh_var(v.type) for v in vs} return valid( EImplies(EAll([assumptions, subst(assumptions, sub)]), EEq(spec, subst(spec, sub))))