def context_complexity(context): result = 0 for arg in iter_shared_list(context.stack): result += complexity(arg) for var in iter_shared_list(context.bound): result += complexity(var) return result
def _collect(continuations, nonlinear): PROFILE_COUNTERS[_collect, '...'] += 1 # Collect unique samples. samples = set() for continuation in continuations: sample = _close(continuation, nonlinear) if sample is TOP: return TOP samples.add(sample) if not samples: return BOT # Filter out dominated samples. # FIXME If x [= y and y [= x, this filters out both. filtered_samples = [] for sample in samples: if not any(oracle.try_decide_less(sample, other) for other in samples if other is not sample): filtered_samples.append(sample) filtered_samples.sort(key=lambda code: (complexity(code), code)) # Construct a join term. result = filtered_samples[0] for sample in filtered_samples[1:]: result = JOIN(result, sample) return result
def priority(code): return is_normal(code), complexity(code), code
def test_complexity(code, expected): assert complexity(code) == expected
def test_complexity_runs(code): complexity(code)
def continuation_complexity(continuation): head, context = continuation return complexity(head) + context_complexity(context)