示例#1
0
    def typecheck(self, e : Exp, typecheck, report_err):
        """Typecheck expression `e`.

        This function must write a type to `e.type` or call `report_err` to
        indicate a type error.  It is allowed to do both.

        The `typecheck` parameter should be used to make a recursive call to
        typecheck child nodes.
        """
        from cozy.typecheck import is_scalar
        if isinstance(e, EMakeMaxTreeMultiset):
            assert is_scalar(e.e.type.elem_type)
            typecheck(e.e)
            e.type = TMaxTreeMultiset(e.e.type.elem_type)
        elif isinstance(e, EMakeMinTreeMultiset):
            assert is_scalar(e.e.type.elem_type)
            typecheck(e.e)
            e.type = TMinTreeMultiset(e.e.type.elem_type)
        elif isinstance(e, ETreeMultisetPeek):
            typecheck(e.e)
            typecheck(e.index)
            ok = True
            if not (isinstance(e.e.type, (TMaxTreeMultiset, TMinTreeMultiset)) and isinstance(e.index.type, INT)):
                report_err(e, "cannot peek a non-ordered")
                ok = False
            if ok:
                e.type = e.e.type.elem_type
        elif isinstance(e, ETreeMultisetElems):
            typecheck(e.e)
            if isinstance(e.e.type, TMinTreeMultiset) or isinstance(e.e.type, TMaxTreeMultiset):
                e.type = TList(e.e.type.elem_type)
            else:
                report_err(e, "cannot get ordered elems of non-ordered")
        else:
            raise NotImplementedError(e)
示例#2
0
 def construct_concrete(self, t: Type, e: Exp, out: Exp):
     """
     Construct a value of type `t` from the expression `e` and store it in
     lvalue `out`.
     """
     if hasattr(t, "construct_concrete"):
         return t.construct_concrete(e, out)
     elif isinstance(t, TBag) or isinstance(t, TList):
         assert out not in free_vars(e)
         x = self.fv(t.t, "x")
         return SSeq(self.initialize_native_list(out),
                     SForEach(x, e, SCall(out, "add", [x])))
     elif isinstance(t, TSet):
         if isinstance(e, EUnaryOp) and e.op == UOp.Distinct:
             return self.construct_concrete(t, e.e, out)
         x = self.fv(t.t, "x")
         return SSeq(self.initialize_native_set(out),
                     SForEach(x, e, SCall(out, "add", [x])))
     elif isinstance(t, TMap):
         return SSeq(self.initialize_native_map(out),
                     self.construct_map(t, e, out))
     elif isinstance(t, THandle):
         return SEscape("{indent}{lhs} = {rhs};\n", ["lhs", "rhs"],
                        [out, e])
     elif is_scalar(t):
         return SEscape("{indent}{lhs} = {rhs};\n", ["lhs", "rhs"],
                        [out, e])
     else:
         h = extension_handler(type(t))
         if h is not None:
             return h.codegen(e, self.state_exps, out=out)
         raise NotImplementedError(t, e, out)
示例#3
0
 def visit_SAssign(self, s):
     if is_scalar(s.rhs.type):
         self.write_stmt(self.visit(s.lhs), " = ", self.visit(s.rhs), ";")
     else:
         v = self.fv(s.lhs.type)
         self.declare(v, s.rhs)
         self.write_stmt(self.visit(s.lhs), " = ",
                         self.visit(EMove(v).with_type(v.type)), ";")
示例#4
0
 def declare(self, v: EVar, initial_value: Exp = None):
     if initial_value is not None and is_scalar(v.type):
         iv = self.visit(initial_value)
         self.write_stmt(self.visit(v.type, v.id), " = ", iv, ";")
     else:
         self.write_stmt(self.visit(v.type, v.id), ";")
         if initial_value is not None:
             self.visit(self.construct_concrete(v.type, initial_value, v))
示例#5
0
 def _eq(self, e1, e2):
     if not self.boxed and self.is_primitive(e1.type):
         return self.visit(
             EEscape("({e1} == {e2})", ("e1", "e2"),
                     (e1, e2)).with_type(BOOL))
     if is_scalar(e1.type):
         return self.visit(
             EEscape("java.util.Objects.equals({e1}, {e2})", ["e1", "e2"],
                     [e1, e2]).with_type(BOOL))
     return super()._eq(e1, e2)
示例#6
0
文件: cxx.py 项目: uwplse/cozy
 def compute_hash_1(self, hc: Exp, e : Exp) -> Stm:
     if is_scalar(e.type):
         return SAssign(hc, self.compute_hash_scalar(e))
     elif isinstance(e.type, TArray):
         x = fresh_var(e.type.elem_type, "x")
         s = SSeq(SAssign(hc, ZERO.with_type(hc.type)),
                  SForEach(x, e,
                      SAssign(hc, EEscape("({hc} * 31) ^ ({h})", ("hc", "h"),
                                          (hc, self.compute_hash_scalar(x))).with_type(INT))))
         return s
     else:
         raise NotImplementedError("can't compute hash for type {}".format(e.type))
示例#7
0
    def typecheck(self, e: Exp, typecheck, report_err):
        """Typecheck expression `e`.

        This function must write a type to `e.type` or call `report_err` to
        indicate a type error.  It is allowed to do both.

        The `typecheck` parameter should be used to make a recursive call to
        typecheck child nodes.
        """
        from cozy.typecheck import is_scalar
        if isinstance(e, EMakeMaxTreeMultiset):
            assert is_scalar(e.e.type.elem_type)
            typecheck(e.e)
            e.type = TMaxTreeMultiset(e.e.type.elem_type)
        elif isinstance(e, EMakeMinTreeMultiset):
            assert is_scalar(e.e.type.elem_type)
            typecheck(e.e)
            e.type = TMinTreeMultiset(e.e.type.elem_type)
        elif isinstance(e, ETreeMultisetPeek):
            typecheck(e.e)
            typecheck(e.index)
            ok = True
            if not (isinstance(e.e.type, (TMaxTreeMultiset, TMinTreeMultiset))
                    and isinstance(e.index.type, INT)):
                report_err(e, "cannot peek a non-ordered")
                ok = False
            if ok:
                e.type = e.e.type.elem_type
        elif isinstance(e, ETreeMultisetElems):
            typecheck(e.e)
            if isinstance(e.e.type, TMinTreeMultiset) or isinstance(
                    e.e.type, TMaxTreeMultiset):
                e.type = TList(e.e.type.elem_type)
            else:
                report_err(e, "cannot get ordered elems of non-ordered")
        else:
            raise NotImplementedError(e)
示例#8
0
 def compute_hash_1(self, hc: Exp, e: Exp) -> Stm:
     if is_scalar(e.type):
         return SAssign(hc, self.compute_hash_scalar(e))
     elif isinstance(e.type, TArray):
         x = fresh_var(e.type.elem_type, "x")
         s = SSeq(
             SAssign(hc, ZERO.with_type(hc.type)),
             SForEach(
                 x, e,
                 SAssign(
                     hc,
                     EEscape(
                         "({hc} * 31) ^ ({h})", ("hc", "h"),
                         (hc,
                          self.compute_hash_scalar(x))).with_type(INT))))
         return s
     else:
         raise NotImplementedError("can't compute hash for type {}".format(
             e.type))
示例#9
0
    def enumerate_core(self, context: Context, size: int, pool: Pool) -> [Exp]:
        """
        Arguments:
            conext : a Context object describing the vars in scope
            size   : size to enumerate
            pool   : pool to enumerate

        Yields all expressions of the given size legal in the given context and
        pool.
        """

        if size < 0:
            return

        if size == 0:
            for (e, p) in LITERALS:
                if p == pool:
                    yield e
            for (v, p) in context.vars():
                if p == pool:
                    yield v
                for t in all_types(v):
                    yield construct_value(t)
            for (e, ctx, p) in self.hints:
                if p == pool and ctx.alpha_equivalent(context):
                    yield context.adapt(e, ctx)
                for t in all_types(e):
                    yield construct_value(t)
            return

        yield from self.heuristic_enumeration(context, size, pool)

        for e in collections(self.enumerate(context, size - 1, pool)):
            yield EEmptyList().with_type(e.type)
            if is_numeric(e.type.t):
                yield EUnaryOp(UOp.Sum, e).with_type(e.type.t)

        for e in self.enumerate(context, size - 1, pool):
            yield ESingleton(e).with_type(TBag(e.type))

        for e in self.enumerate(context, size - 1, pool):
            if isinstance(e.type, TRecord):
                for (f, t) in e.type.fields:
                    yield EGetField(e, f).with_type(t)

        for e in self.enumerate(context, size - 1, pool):
            if isinstance(e.type, THandle):
                yield EGetField(e, "val").with_type(e.type.value_type)

        for e in self.enumerate(context, size - 1, pool):
            if isinstance(e.type, TTuple):
                for n in range(len(e.type.ts)):
                    yield ETupleGet(e, n).with_type(e.type.ts[n])

        for e in of_type(self.enumerate(context, size - 1, pool), BOOL):
            yield EUnaryOp(UOp.Not, e).with_type(BOOL)

        for e in self.enumerate(context, size - 1, pool):
            if is_numeric(e.type):
                yield EUnaryOp("-", e).with_type(e.type)

        for m in self.enumerate(context, size - 1, pool):
            if isinstance(m.type, TMap):
                yield EMapKeys(m).with_type(TBag(m.type.k))

        for (sz1, sz2) in pick_to_sum(2, size - 1):
            for a1 in self.enumerate(context, sz1, pool):
                t = a1.type
                if not is_numeric(t):
                    continue
                for a2 in of_type(self.enumerate(context, sz2, pool), t):
                    yield EBinOp(a1, "+", a2).with_type(t)
                    yield EBinOp(a1, "-", a2).with_type(t)
                    yield EBinOp(a1, ">", a2).with_type(BOOL)
                    yield EBinOp(a1, "<", a2).with_type(BOOL)
                    yield EBinOp(a1, ">=", a2).with_type(BOOL)
                    yield EBinOp(a1, "<=", a2).with_type(BOOL)
            for a1 in collections(self.enumerate(context, sz1, pool)):
                for a2 in of_type(self.enumerate(context, sz2, pool), a1.type):
                    yield EBinOp(a1, "+", a2).with_type(a1.type)
                    yield EBinOp(a1, "-", a2).with_type(a1.type)
                for a2 in of_type(self.enumerate(context, sz2, pool),
                                  a1.type.t):
                    yield EBinOp(a2, BOp.In, a1).with_type(BOOL)
            for a1 in of_type(self.enumerate(context, sz1, pool), BOOL):
                for a2 in of_type(self.enumerate(context, sz2, pool), BOOL):
                    yield EBinOp(a1, BOp.And, a2).with_type(BOOL)
                    yield EBinOp(a1, BOp.Or, a2).with_type(BOOL)
            for a1 in self.enumerate(context, sz1, pool):
                if not isinstance(a1.type, TMap):
                    for a2 in of_type(self.enumerate(context, sz2, pool),
                                      a1.type):
                        yield EEq(a1, a2)
                        yield EBinOp(a1, "!=", a2).with_type(BOOL)
            for m in self.enumerate(context, sz1, pool):
                if isinstance(m.type, TMap):
                    for k in of_type(self.enumerate(context, sz2, pool),
                                     m.type.k):
                        yield EMapGet(m, k).with_type(m.type.v)
                        yield EHasKey(m, k).with_type(BOOL)
            for l in self.enumerate(context, sz1, pool):
                if not isinstance(l.type, TList):
                    continue
                for i in of_type(self.enumerate(context, sz2, pool), INT):
                    yield EListGet(l, i).with_type(l.type.t)

        for (sz1, sz2, sz3) in pick_to_sum(3, size - 1):
            for cond in of_type(self.enumerate(context, sz1, pool), BOOL):
                for then_branch in self.enumerate(context, sz2, pool):
                    for else_branch in of_type(
                            self.enumerate(context, sz2, pool),
                            then_branch.type):
                        yield ECond(cond, then_branch,
                                    else_branch).with_type(then_branch.type)

            for l in self.enumerate(context, sz1, pool):
                if not isinstance(l.type, TList):
                    continue
                for st in of_type(self.enumerate(context, sz2, pool), INT):
                    for ed in of_type(self.enumerate(context, sz3, pool), INT):
                        yield EListSlice(l, st, ed).with_type(l.type)

        for bag in collections(self.enumerate(context, size - 1, pool)):
            # len of bag
            count = EUnaryOp(UOp.Length, bag).with_type(INT)
            yield count
            # empty?
            yield EUnaryOp(UOp.Empty, bag).with_type(BOOL)
            # exists?
            yield EUnaryOp(UOp.Exists, bag).with_type(BOOL)
            # singleton?
            yield EEq(count, ONE)

            yield EUnaryOp(UOp.The, bag).with_type(bag.type.t)
            yield EUnaryOp(UOp.Distinct, bag).with_type(bag.type)
            yield EUnaryOp(UOp.AreUnique, bag).with_type(BOOL)

            if bag.type.t == BOOL:
                yield EUnaryOp(UOp.Any, bag).with_type(BOOL)
                yield EUnaryOp(UOp.All, bag).with_type(BOOL)

        def build_lambdas(bag, pool, body_size):
            v = fresh_var(bag.type.t, omit=set(v for v, p in context.vars()))
            inner_context = UnderBinder(context, v=v, bag=bag, bag_pool=pool)
            for lam_body in self.enumerate(inner_context, body_size, pool):
                yield ELambda(v, lam_body)

        # Iteration
        for (sz1, sz2) in pick_to_sum(2, size - 1):
            for bag in collections(self.enumerate(context, sz1, pool)):
                for lam in build_lambdas(bag, pool, sz2):
                    body_type = lam.body.type
                    yield EMap(bag, lam).with_type(TBag(body_type))
                    if body_type == BOOL:
                        yield EFilter(bag, lam).with_type(bag.type)
                    if is_numeric(body_type):
                        yield EArgMin(bag, lam).with_type(bag.type.t)
                        yield EArgMax(bag, lam).with_type(bag.type.t)
                    if is_collection(body_type):
                        yield EFlatMap(bag, lam).with_type(TBag(body_type.t))

        # Enable use of a state-pool expression at runtime
        if pool == RUNTIME_POOL:
            for e in self.enumerate(context, size - 1, STATE_POOL):
                yield EStateVar(e).with_type(e.type)

        # Create maps
        if pool == STATE_POOL:
            for (sz1, sz2) in pick_to_sum(2, size - 1):
                for bag in collections(self.enumerate(context, sz1,
                                                      STATE_POOL)):
                    if not is_scalar(bag.type.t):
                        continue
                    for lam in build_lambdas(bag, STATE_POOL, sz2):
                        t = TMap(bag.type.t, lam.body.type)
                        m = EMakeMap2(bag, lam).with_type(t)
                        yield m
示例#10
0
文件: java.py 项目: uwplse/cozy
 def _eq(self, e1, e2):
     if not self.boxed and self.is_primitive(e1.type):
         return self.visit(EEscape("({e1} == {e2})", ("e1", "e2"), (e1, e2)).with_type(BOOL))
     if is_scalar(e1.type):
         return self.visit(EEscape("java.util.Objects.equals({e1}, {e2})", ["e1", "e2"], [e1, e2]).with_type(BOOL))
     return super()._eq(e1, e2)
示例#11
0
def _maintenance_cost(e: Exp, op: Op, freebies: [Exp] = []):
    """Determines the cost of maintaining the expression when there are
    freebies and ops being considered.

    The cost is the result of mutating the expression and getting the storage
    size of the difference between the mutated expression and the original.
    """
    e_prime = mutate(e, op.body)
    if alpha_equivalent(e, e_prime):
        return ZERO

    h = extension_handler(type(e.type))
    if h is not None:
        return h.maintenance_cost(old_value=e,
                                  new_value=e_prime,
                                  op=op,
                                  freebies=freebies,
                                  storage_size=storage_size,
                                  maintenance_cost=_maintenance_cost)

    if is_scalar(e.type):
        return storage_size(e, freebies)
    elif isinstance(e.type, TBag) or isinstance(e.type, TSet):
        things_added = storage_size(
            EBinOp(e_prime, "-", e).with_type(e.type), freebies).with_type(INT)
        things_remov = storage_size(
            EBinOp(e, "-", e_prime).with_type(e.type), freebies).with_type(INT)

        return ESum([things_added, things_remov])
    elif isinstance(e.type, TList):
        return storage_size(e_prime, freebies)
    elif isinstance(e.type, TMap):
        keys = EMapKeys(e).with_type(TBag(e.type.k))
        vals = EMap(
            keys,
            mk_lambda(e.type.k,
                      lambda k: EMapGet(e, k).with_type(e.type.v))).with_type(
                          TBag(e.type.v))

        keys_prime = EMapKeys(e_prime).with_type(TBag(e_prime.type.k))
        vals_prime = EMap(
            keys_prime,
            mk_lambda(e_prime.type.k, lambda k: EMapGet(e_prime, k).with_type(
                e_prime.type.v))).with_type(TBag(e_prime.type.v))

        keys_added = storage_size(
            EBinOp(keys_prime, "-", keys).with_type(keys.type),
            freebies).with_type(INT)
        keys_rmved = storage_size(
            EBinOp(keys, "-", keys_prime).with_type(keys.type),
            freebies).with_type(INT)

        vals_added = storage_size(
            EBinOp(vals_prime, "-", vals).with_type(vals.type),
            freebies).with_type(INT)
        vals_rmved = storage_size(
            EBinOp(vals, "-", vals_prime).with_type(vals.type),
            freebies).with_type(INT)

        keys_difference = ESum([keys_added, keys_rmved])
        vals_difference = ESum([vals_added, vals_rmved])
        return EBinOp(keys_difference, "*", vals_difference).with_type(INT)

    else:
        raise NotImplementedError(repr(e.type))
示例#12
0
文件: wf.py 项目: Nathan-Fenner/cozy
def exp_wf_nonrecursive(solver, e : Exp, context : Context, pool = RUNTIME_POOL, assumptions : Exp = T):
    state_vars = OrderedSet(v for v, p in context.vars() if p == STATE_POOL)
    args       = OrderedSet(v for v, p in context.vars() if p == RUNTIME_POOL)
    assumptions = EAll([assumptions, context.path_condition()])

    h = extension_handler(type(e))
    if h is not None:
        msg = h.check_wf(e, state_vars=state_vars, args=args, pool=pool, assumptions=assumptions, is_valid=solver.valid)
        if msg is not None:
            raise ExpIsNotWf(e, e, msg)
        return
    at_runtime = pool == RUNTIME_POOL
    if isinstance(e, EStateVar) and not at_runtime:
        raise ExpIsNotWf(e, e, "EStateVar in state pool position")
    if isinstance(e, EStateVar):
        fvs = free_vars(e.e)
        if not fvs:
            raise ExpIsNotWf(e, e, "constant value in state position")
        bad = [v for v in fvs if v not in state_vars]
        if bad:
            raise ExpIsNotWf(e, e, "free non-statevars in state position: {}".format(", ".join(v.id for v in bad)))
    if (isinstance(e, EDropFront) or isinstance(e, EDropBack)) and not at_runtime:
        raise ExpIsNotWf(e, e, "EDrop* in state position")
    if isinstance(e, EFlatMap) and not at_runtime:
        raise ExpIsNotWf(e, e, "EFlatMap in state position")
    if not allow_int_arithmetic_state.value and not at_runtime and isinstance(e, EBinOp) and e.type == INT:
        raise ExpIsNotWf(e, e, "integer arithmetic in state position")
    # if isinstance(e, EUnaryOp) and e.op == UOp.Distinct and not at_runtime:
    #     raise ExpIsNotWf(e, e, "'distinct' in state position")
    # if isinstance(e, EMapKeys) and not at_runtime:
    #     raise ExpIsNotWf(e, e, "'mapkeys' in state position")
    if isinstance(e, EVar):
        if at_runtime and e in state_vars:
            raise ExpIsNotWf(e, e, "state var at runtime")
        elif not at_runtime and e in args:
            raise ExpIsNotWf(e, e, "arg in state exp")
    # if is_collection(e.type) and is_collection(e.type.t):
    #     raise ExpIsNotWf(e, e, "collection of collection")
    if is_collection(e.type) and not is_scalar(e.type.t):
        raise ExpIsNotWf(e, e, "collection of nonscalar")
    if isinstance(e.type, TMap) and not is_scalar(e.type.k):
        raise ExpIsNotWf(e, e, "bad key type {}".format(pprint(e.type.k)))
    if isinstance(e.type, TMap) and isinstance(e.type.v, TMap):
        raise ExpIsNotWf(e, e, "map to map")
    # This check is probably a bad idea: whether `the` is legal may depend on
    # the contex that the expression is embedded within, so we can't skip it
    # during synthesis just because it looks invalid now.
    # if isinstance(e, EUnaryOp) and e.op == UOp.The:
    #     len = EUnaryOp(UOp.Length, e.e).with_type(INT)
    #     if not valid(EImplies(assumptions, EBinOp(len, "<=", ENum(1).with_type(INT)).with_type(BOOL))):
    #         raise ExpIsNotWf(e, e, "illegal application of 'the': could have >1 elems")
    if not at_runtime and isinstance(e, EBinOp) and e.op == "-" and is_collection(e.type):
        raise ExpIsNotWf(e, e, "collection subtraction in state position")
    if not at_runtime and isinstance(e, ESingleton):
        raise ExpIsNotWf(e, e, "singleton in state position")
    # if not at_runtime and isinstance(e, ENum) and e.val != 0 and e.type == INT:
    #     raise ExpIsNotWf(e, e, "nonzero integer constant in state position")
    if not allow_conditional_state.value and not at_runtime and isinstance(e, ECond):
        raise ExpIsNotWf(e, e, "conditional in state position")
    if isinstance(e, EMakeMap2) and isinstance(e.e, EEmptyList):
        raise ExpIsNotWf(e, e, "trivially empty map")
    if do_expensive_checks.value and not at_runtime and isinstance(e, EFilter):
        # catch "peels": removal of zero or one elements
        if solver.valid(EImplies(assumptions, ELe(ELen(EFilter(e.e, ELambda(e.p.arg, ENot(e.p.body))).with_type(e.type)), ONE))):
            raise ExpIsNotWf(e, e, "filter is a peel")
    if do_expensive_checks.value and not at_runtime and isinstance(e, EMakeMap2) and is_collection(e.type.v):
        all_collections = [sv for sv in state_vars if is_collection(sv.type)]
        total_size = ENum(0).with_type(INT)
        for c in all_collections:
            total_size = EBinOp(total_size, "+", EUnaryOp(UOp.Length, c).with_type(INT)).with_type(INT)
        my_size = EUnaryOp(UOp.Length, EFlatMap(EUnaryOp(UOp.Distinct, e.e).with_type(e.e.type), e.value).with_type(e.type.v)).with_type(INT)
        s = EImplies(
            assumptions,
            EBinOp(total_size, ">=", my_size).with_type(BOOL))
        if not solver.valid(s):
            # from cozy.evaluation import eval
            # from cozy.solver import satisfy
            # model = satisfy(EAll([assumptions, EBinOp(total_size, "<", my_size).with_type(BOOL)]), collection_depth=3, validate_model=True)
            # assert model is not None
            # raise ExpIsNotWf(e, e, "non-polynomial-sized map ({}); total_size={}, this_size={}".format(model, eval(total_size, model), eval(my_size, model)))
            raise ExpIsNotWf(e, e, "non-polynomial-sized map")
示例#13
0
def improve_implementation(impl: Implementation,
                           timeout: datetime.timedelta = datetime.timedelta(
                               seconds=60),
                           progress_callback=None) -> Implementation:

    start_time = datetime.datetime.now()

    # we statefully modify `impl`, so let's make a defensive copy
    impl = Implementation(impl.spec, list(impl.concrete_state),
                          list(impl.query_specs),
                          OrderedDict(impl.query_impls),
                          defaultdict(SNoOp, impl.updates),
                          defaultdict(SNoOp, impl.handle_updates))

    # gather root types
    types = list(all_types(impl.spec))
    basic_types = set(t for t in types if is_scalar(t))
    basic_types |= {BOOL, INT}
    print("basic types:")
    for t in basic_types:
        print("  --> {}".format(pprint(t)))
    basic_types = list(basic_types)
    ctx = SynthCtx(all_types=types, basic_types=basic_types)

    # the actual worker threads
    improvement_jobs = []

    with jobs.SafeQueue() as solutions_q:

        def stop_jobs(js):
            js = list(js)
            jobs.stop_jobs(js)
            for j in js:
                improvement_jobs.remove(j)

        def reconcile_jobs():
            # figure out what new jobs we need
            job_query_names = set(j.q.name for j in improvement_jobs)
            new = []
            for q in impl.query_specs:
                if q.name not in job_query_names:
                    states_maintained_by_q = impl.states_maintained_by(q)
                    new.append(
                        ImproveQueryJob(
                            ctx,
                            impl.abstract_state,
                            list(impl.spec.assumptions) + list(q.assumptions),
                            q,
                            k=(lambda q: lambda new_rep, new_ret: solutions_q.
                               put((q, new_rep, new_ret)))(q),
                            hints=[
                                EStateVar(c).with_type(c.type) for c in
                                impl.concretization_functions.values()
                            ],
                            freebies=[
                                e for (v, e) in impl.concrete_state
                                if v in states_maintained_by_q
                            ],
                            ops=impl.op_specs,
                            funcs=impl.extern_funcs))

            # figure out what old jobs we can stop
            impl_query_names = set(q.name for q in impl.query_specs)
            old = [
                j for j in improvement_jobs if j.q.name not in impl_query_names
            ]

            # make it so
            stop_jobs(old)
            for j in new:
                j.start()
            improvement_jobs.extend(new)

        # start jobs
        reconcile_jobs()

        # wait for results
        timeout = Timeout(timeout)
        done = False
        while not done and not timeout.is_timed_out():
            for j in improvement_jobs:
                if j.done:
                    if j.successful:
                        j.join()
                    else:
                        print("failed job: {}".format(j), file=sys.stderr)
                        # raise Exception("failed job: {}".format(j))

            done = all(j.done for j in improvement_jobs)

            try:
                # list of (Query, new_rep, new_ret) objects
                results = solutions_q.drain(block=True, timeout=0.5)
            except Empty:
                continue

            # group by query name, favoring later (i.e. better) solutions
            print("updating with {} new solutions".format(len(results)))
            improved_queries_by_name = OrderedDict()
            killed = 0
            for r in results:
                q, new_rep, new_ret = r
                if q.name in improved_queries_by_name:
                    killed += 1
                improved_queries_by_name[q.name] = r
            if killed:
                print(" --> dropped {} worse solutions".format(killed))

            improvements = list(improved_queries_by_name.values())

            def index_of(l, p):
                if not isinstance(l, list):
                    l = list(l)
                for i in range(len(l)):
                    if p(l[i]):
                        return i
                return -1

            improvements.sort(key=lambda i: index_of(
                impl.query_specs, lambda qq: qq.name == i[0].name))
            print("update order:")
            for (q, _, _) in improvements:
                print("  --> {}".format(q.name))

            # update query implementations
            i = 1
            for (q, new_rep, new_ret) in improvements:
                if timeout.is_timed_out():
                    break

                print("considering update {}/{}...".format(
                    i, len(improvements)))
                i += 1
                # this guard might be false if a better solution was
                # enqueued but the job has already been cleaned up
                if q.name in [qq.name for qq in impl.query_specs]:
                    elapsed = datetime.datetime.now() - start_time
                    print("SOLUTION FOR {} AT {} [size={}]".format(
                        q.name, elapsed,
                        new_ret.size() + sum(proj.size()
                                             for (v, proj) in new_rep)))
                    print("-" * 40)
                    for (sv, proj) in new_rep:
                        print("  {} : {} = {}".format(sv.id, pprint(sv.type),
                                                      pprint(proj)))
                    print("  return {}".format(pprint(new_ret)))
                    print("-" * 40)
                    impl.set_impl(q, new_rep, new_ret)

                    # clean up
                    impl.cleanup()
                    if progress_callback is not None:
                        progress_callback(
                            (impl, impl.code, impl.concretization_functions))
                    reconcile_jobs()
                else:
                    print("  (skipped)")

        # stop jobs
        print("Stopping jobs")
        stop_jobs(list(improvement_jobs))
        return impl
示例#14
0
def possibly_useful_nonrecursive(
    solver,
    e: Exp,
    context: Context,
    pool=RUNTIME_POOL,
    assumptions: Exp = ETRUE,
    ops: [Op] = ()) -> bool:
    """Heuristic filter to ignore expressions that are almost certainly useless."""

    state_vars = OrderedSet(v for v, p in context.vars() if p == STATE_POOL)
    args = OrderedSet(v for v, p in context.vars() if p == RUNTIME_POOL)
    assumptions = EAll([assumptions, context.path_condition()])
    at_runtime = pool == RUNTIME_POOL

    h = extension_handler(type(e))
    if h is not None:
        res = h.possibly_useful(e, context, pool, assumptions, ops, solver)
        if not res:
            return res

    if isinstance(e, EStateVar) and not free_vars(e.e):
        return No("constant value in state position")
    if (isinstance(e, EDropFront)
            or isinstance(e, EDropBack)) and not at_runtime:
        return No("EDrop* in state position")
    if not allow_big_sets.value and isinstance(e, EFlatMap) and not at_runtime:
        return No("EFlatMap in state position")
    if not allow_int_arithmetic_state.value and not at_runtime and isinstance(
            e, EBinOp) and e.type == INT:
        return No("integer arithmetic in state position")
    if is_collection(e.type) and not is_scalar(e.type.elem_type):
        return No("collection of nonscalar: e {}\n elem_type: {}\n".format(
            e, e.type.elem_type))
    if isinstance(e.type, TMap) and not is_scalar(e.type.k):
        return No("bad key type {}".format(pprint(e.type.k)))
    if isinstance(e.type, TMap) and isinstance(e.type.v, TMap):
        return No("map to map")
    # This check is probably a bad idea: whether `the` is legal may depend on
    # the contex that the expression is embedded within, so we can't skip it
    # during synthesis just because it looks invalid now.
    # if isinstance(e, EUnaryOp) and e.op == UOp.The:
    #     len = EUnaryOp(UOp.Length, e.e).with_type(INT)
    #     if not valid(EImplies(assumptions, EBinOp(len, "<=", ENum(1).with_type(INT)).with_type(BOOL))):
    #         return No("illegal application of 'the': could have >1 elems")
    if not at_runtime and isinstance(
            e, EBinOp) and e.op == "-" and is_collection(e.type):
        return No("collection subtraction in state position")
    # if not at_runtime and isinstance(e, ESingleton):
    #     return No("singleton in state position")
    if not allow_nonzero_state_constants.value and not at_runtime and isinstance(
            e, ENum) and e.val != 0:
        return No("nonzero integer constant in state position")
    if not allow_binop_state.value and at_runtime and isinstance(
            e, EStateVar) and isinstance(e.e, EBinOp) and is_scalar(
                e.e.e1.type) and is_scalar(e.e.e2.type):
        return No(
            "constant-time binary operator {!r} in state position".format(
                e.e.op))
    if not allow_conditional_state.value and not at_runtime and isinstance(
            e, ECond):
        return No("conditional in state position")
    if isinstance(e, EMakeMap2) and isinstance(e.e, EEmptyList):
        return No("trivially empty map")
    if isinstance(e, EMakeMap2) and isinstance(e.e, ESingleton):
        return No("really tiny map")
    if not at_runtime and (isinstance(e, EArgMin) or isinstance(e, EArgMax)):
        # Cozy has no way to efficiently implement mins/maxes when more than
        # one element may leave the collection.
        from cozy.state_maintenance import mutate
        for op in ops:
            elems = e.e
            elems_prime = mutate(elems, op.body)
            formula = EAll([assumptions] + list(op.assumptions) + [
                EGt(
                    ELen(
                        EBinOp(elems, "-", elems_prime).with_type(elems.type)),
                    ONE)
            ])
            if solver.satisfiable(formula):
                return No(
                    "more than one element might be removed during {}".format(
                        op.name))
    if not allow_peels.value and not at_runtime and isinstance(e, EFilter):
        # catch "peels": removal of zero or one elements
        if solver.valid(
                EImplies(
                    assumptions,
                    ELe(
                        ELen(
                            EFilter(
                                e.e,
                                ELambda(e.predicate.arg, ENot(
                                    e.predicate.body))).with_type(e.type)),
                        ONE))):
            return No("filter is a peel")
    if not allow_big_maps.value and not at_runtime and isinstance(
            e, EMakeMap2) and is_collection(e.type.v):
        all_collections = [sv for sv in state_vars if is_collection(sv.type)]
        total_size = ENum(0).with_type(INT)
        for c in all_collections:
            total_size = EBinOp(total_size, "+",
                                EUnaryOp(UOp.Length,
                                         c).with_type(INT)).with_type(INT)
        my_size = EUnaryOp(
            UOp.Length,
            EFlatMap(
                EUnaryOp(UOp.Distinct, e.e).with_type(e.e.type),
                e.value_function).with_type(e.type.v)).with_type(INT)
        s = EImplies(assumptions,
                     EBinOp(total_size, ">=", my_size).with_type(BOOL))
        if not solver.valid(s):
            return No("non-polynomial-sized map")

    return True
示例#15
0
def good_idea(solver,
              e: Exp,
              context: Context,
              pool=RUNTIME_POOL,
              assumptions: Exp = T) -> bool:
    """Heuristic filter to ignore expressions that are almost certainly useless."""

    state_vars = OrderedSet(v for v, p in context.vars() if p == STATE_POOL)
    args = OrderedSet(v for v, p in context.vars() if p == RUNTIME_POOL)
    assumptions = EAll([assumptions, context.path_condition()])
    at_runtime = pool == RUNTIME_POOL

    if isinstance(e, EStateVar) and not free_vars(e.e):
        return No("constant value in state position")
    if (isinstance(e, EDropFront)
            or isinstance(e, EDropBack)) and not at_runtime:
        return No("EDrop* in state position")
    if not allow_big_sets.value and isinstance(e, EFlatMap) and not at_runtime:
        return No("EFlatMap in state position")
    if not allow_int_arithmetic_state.value and not at_runtime and isinstance(
            e, EBinOp) and e.type == INT:
        return No("integer arithmetic in state position")
    if is_collection(e.type) and not is_scalar(e.type.t):
        return No("collection of nonscalar")
    if isinstance(e.type, TMap) and not is_scalar(e.type.k):
        return No("bad key type {}".format(pprint(e.type.k)))
    if isinstance(e.type, TMap) and isinstance(e.type.v, TMap):
        return No("map to map")
    # This check is probably a bad idea: whether `the` is legal may depend on
    # the contex that the expression is embedded within, so we can't skip it
    # during synthesis just because it looks invalid now.
    # if isinstance(e, EUnaryOp) and e.op == UOp.The:
    #     len = EUnaryOp(UOp.Length, e.e).with_type(INT)
    #     if not valid(EImplies(assumptions, EBinOp(len, "<=", ENum(1).with_type(INT)).with_type(BOOL))):
    #         return No("illegal application of 'the': could have >1 elems")
    if not at_runtime and isinstance(
            e, EBinOp) and e.op == "-" and is_collection(e.type):
        return No("collection subtraction in state position")
    # if not at_runtime and isinstance(e, ESingleton):
    #     return No("singleton in state position")
    # if not at_runtime and isinstance(e, ENum) and e.val != 0 and e.type == INT:
    #     return No("nonzero integer constant in state position")
    if at_runtime and isinstance(e, EStateVar) and isinstance(
            e.e, EBinOp) and is_scalar(e.e.e1.type) and is_scalar(e.e.e2.type):
        return No("constant-time binary operator in state position")
    if not allow_conditional_state.value and not at_runtime and isinstance(
            e, ECond):
        return No("conditional in state position")
    if isinstance(e, EMakeMap2) and isinstance(e.e, EEmptyList):
        return No("trivially empty map")
    if not allow_peels.value and not at_runtime and isinstance(e, EFilter):
        # catch "peels": removal of zero or one elements
        if solver.valid(
                EImplies(
                    assumptions,
                    ELe(
                        ELen(
                            EFilter(e.e, ELambda(e.p.arg, ENot(
                                e.p.body))).with_type(e.type)), ONE))):
            return No("filter is a peel")
    if not allow_big_maps.value and not at_runtime and isinstance(
            e, EMakeMap2) and is_collection(e.type.v):
        all_collections = [sv for sv in state_vars if is_collection(sv.type)]
        total_size = ENum(0).with_type(INT)
        for c in all_collections:
            total_size = EBinOp(total_size, "+",
                                EUnaryOp(UOp.Length,
                                         c).with_type(INT)).with_type(INT)
        my_size = EUnaryOp(
            UOp.Length,
            EFlatMap(EUnaryOp(UOp.Distinct, e.e).with_type(e.e.type),
                     e.value).with_type(e.type.v)).with_type(INT)
        s = EImplies(assumptions,
                     EBinOp(total_size, ">=", my_size).with_type(BOOL))
        if not solver.valid(s):
            # from cozy.evaluation import eval
            # from cozy.solver import satisfy
            # model = satisfy(EAll([assumptions, EBinOp(total_size, "<", my_size).with_type(BOOL)]), collection_depth=3, validate_model=True)
            # assert model is not None
            # return No("non-polynomial-sized map ({}); total_size={}, this_size={}".format(model, eval(total_size, model), eval(my_size, model)))
            return No("non-polynomial-sized map")

    return True
示例#16
0
文件: core.py 项目: uwplse/cozy
def possibly_useful_nonrecursive(solver, e : Exp, context : Context, pool = RUNTIME_POOL, assumptions : Exp = ETRUE, ops : [Op] = ()) -> bool:
    """Heuristic filter to ignore expressions that are almost certainly useless."""

    state_vars  = OrderedSet(v for v, p in context.vars() if p == STATE_POOL)
    args        = OrderedSet(v for v, p in context.vars() if p == RUNTIME_POOL)
    assumptions = EAll([assumptions, context.path_condition()])
    at_runtime  = pool == RUNTIME_POOL

    h = extension_handler(type(e))
    if h is not None:
        res = h.possibly_useful(e, context, pool, assumptions, ops, solver)
        if not res:
            return res

    if isinstance(e, EStateVar) and not free_vars(e.e):
        return No("constant value in state position")
    if (isinstance(e, EDropFront) or isinstance(e, EDropBack)) and not at_runtime:
        return No("EDrop* in state position")
    if not allow_big_sets.value and isinstance(e, EFlatMap) and not at_runtime:
        return No("EFlatMap in state position")
    if not allow_int_arithmetic_state.value and not at_runtime and isinstance(e, EBinOp) and e.type == INT:
        return No("integer arithmetic in state position")
    if is_collection(e.type) and not is_scalar(e.type.elem_type):
        return No("collection of nonscalar: e {}\n elem_type: {}\n".format(e, e.type.elem_type))
    if isinstance(e.type, TMap) and not is_scalar(e.type.k):
        return No("bad key type {}".format(pprint(e.type.k)))
    if isinstance(e.type, TMap) and isinstance(e.type.v, TMap):
        return No("map to map")
    # This check is probably a bad idea: whether `the` is legal may depend on
    # the contex that the expression is embedded within, so we can't skip it
    # during synthesis just because it looks invalid now.
    # if isinstance(e, EUnaryOp) and e.op == UOp.The:
    #     len = EUnaryOp(UOp.Length, e.e).with_type(INT)
    #     if not valid(EImplies(assumptions, EBinOp(len, "<=", ENum(1).with_type(INT)).with_type(BOOL))):
    #         return No("illegal application of 'the': could have >1 elems")
    if not at_runtime and isinstance(e, EBinOp) and e.op == "-" and is_collection(e.type):
        return No("collection subtraction in state position")
    # if not at_runtime and isinstance(e, ESingleton):
    #     return No("singleton in state position")
    if not allow_nonzero_state_constants.value and not at_runtime and isinstance(e, ENum) and e.val != 0:
        return No("nonzero integer constant in state position")
    if not allow_binop_state.value and at_runtime and isinstance(e, EStateVar) and isinstance(e.e, EBinOp) and is_scalar(e.e.e1.type) and is_scalar(e.e.e2.type):
        return No("constant-time binary operator {!r} in state position".format(e.e.op))
    if not allow_conditional_state.value and not at_runtime and isinstance(e, ECond):
        return No("conditional in state position")
    if isinstance(e, EMakeMap2) and isinstance(e.e, EEmptyList):
        return No("trivially empty map")
    if isinstance(e, EMakeMap2) and isinstance(e.e, ESingleton):
        return No("really tiny map")
    if not at_runtime and (isinstance(e, EArgMin) or isinstance(e, EArgMax)):
        # Cozy has no way to efficiently implement mins/maxes when more than
        # one element may leave the collection.
        from cozy.state_maintenance import mutate
        for op in ops:
            elems = e.e
            elems_prime = mutate(elems, op.body)
            formula = EAll([assumptions] + list(op.assumptions) + [EGt(ELen(EBinOp(elems, "-", elems_prime).with_type(elems.type)), ONE)])
            if solver.satisfiable(formula):
                return No("more than one element might be removed during {}".format(op.name))
    if not allow_peels.value and not at_runtime and isinstance(e, EFilter):
        # catch "peels": removal of zero or one elements
        if solver.valid(EImplies(assumptions, ELe(ELen(EFilter(e.e, ELambda(e.predicate.arg, ENot(e.predicate.body))).with_type(e.type)), ONE))):
            return No("filter is a peel")
    if not allow_big_maps.value and not at_runtime and isinstance(e, EMakeMap2) and is_collection(e.type.v):
        all_collections = [sv for sv in state_vars if is_collection(sv.type)]
        total_size = ENum(0).with_type(INT)
        for c in all_collections:
            total_size = EBinOp(total_size, "+", EUnaryOp(UOp.Length, c).with_type(INT)).with_type(INT)
        my_size = EUnaryOp(UOp.Length, EFlatMap(EUnaryOp(UOp.Distinct, e.e).with_type(e.e.type), e.value_function).with_type(e.type.v)).with_type(INT)
        s = EImplies(
            assumptions,
            EBinOp(total_size, ">=", my_size).with_type(BOOL))
        if not solver.valid(s):
            return No("non-polynomial-sized map")

    return True