def test_isfinite(self): space = SimpleStateSpace() with Patched(), StateSpaceContext(space): x = SymbolicFloat("symfloat") self.assertTrue(math.isfinite(x)) self.assertTrue(math.isfinite(2.3)) self.assertFalse(math.isfinite(float("nan")))
def test_isfinite(self): space = SimpleStateSpace() with Patched(enabled=lambda: True), StateSpaceContext(space): x = SmtFloat('symfloat') self.assertTrue(math.isfinite(x)) self.assertTrue(math.isfinite(2.3)) self.assertFalse(math.isfinite(float('nan')))
def test_isinstance(self): with StateSpaceContext(SimpleStateSpace()): f = SmtFloat("f") self.assertFalse(isinstance(f, float)) self.assertFalse(isinstance(f, int)) self.assertTrue(_isinstance(f, float)) self.assertFalse(_isinstance(f, int))
def symbolic_run( self, fn: Callable[[TrackingStateSpace], object] ) -> Tuple[object, Optional[BaseException], TrackingStateSpace]: search_root = SinglePathNode(True) with Patched(enabled=lambda: True): for itr in range(1, 200): debug('iteration', itr) space = TrackingStateSpace(time.monotonic() + 10.0, 1.0, search_root=search_root) try: with StateSpaceContext(space): ret = (realize(fn(space)), None, space) space.check_deferred_assumptions() return ret except IgnoreAttempt as e: debug('ignore iteration attempt: ', str(e)) pass except BaseException as e: debug(traceback.format_exc()) return (None, e, space) top_analysis, space_exhausted = space.bubble_status( CallAnalysis()) if space_exhausted: return ( None, CrosshairInternal(f'exhausted after {itr} iterations'), space) return (None, CrosshairInternal( 'Unable to find a successful symbolic execution'), space)
def test_copy(self) -> None: with StateSpaceContext(SimpleStateSpace()): poke1 = make_fake_object(Pokeable, 'ppoke') poke1.poke() poke2 = copy.copy(poke1) self.assertIsNot(poke1, poke2) self.assertEqual(type(poke1), type(poke2)) self.assertIs(poke1.x, poke2.x) poke1.poke() self.assertIsNot(poke1.x, poke2.x) self.assertNotEqual(str(poke1.x.var), str(poke2.x.var))
def test_copy(self): with StateSpaceContext(SimpleStateSpace()): poke1 = proxy_class_as_masquerade(Pokeable, "ppoke") poke1.poke() poke2 = copy.copy(poke1) self.assertIsNot(poke1, poke2) self.assertEqual(type(poke1), type(poke2)) self.assertIs(poke1.x, poke2.x) poke1.poke() self.assertIsNot(poke1.x, poke2.x) self.assertNotEqual(str(poke1.x.var), str(poke2.x.var))
def path_cover(ctxfn: FunctionInfo, options: AnalysisOptions, coverage_type: CoverageType) -> List[PathSummary]: fn, sig = ctxfn.callable() search_root = SinglePathNode(True) condition_start = time.monotonic() paths: List[PathSummary] = [] for i in range(1, options.max_iterations): debug("Iteration ", i) itr_start = time.monotonic() if itr_start > condition_start + options.per_condition_timeout: debug( "Stopping due to --per_condition_timeout=", options.per_condition_timeout, ) break space = StateSpace( execution_deadline=itr_start + options.per_path_timeout, model_check_timeout=options.per_path_timeout / 2, search_root=search_root, ) with condition_parser(options.analysis_kind), Patched( ), COMPOSITE_TRACER, StateSpaceContext(space): summary = None try: summary = run_iteration(fn, sig, space) verification_status = VerificationStatus.CONFIRMED except UnexploredPath: verification_status = VerificationStatus.UNKNOWN debug("Verification status:", verification_status) top_analysis, exhausted = space.bubble_status( CallAnalysis(verification_status)) debug("Path tree stats", search_root.stats()) if summary: paths.append(summary) if exhausted: debug("Stopping due to code path exhaustion. (yay!)") break opcodes_found: Set[int] = set() selected: List[PathSummary] = [] while paths: next_best = max( paths, key=lambda p: len(p.coverage.offsets_covered - opcodes_found)) cur_offsets = next_best.coverage.offsets_covered if coverage_type == CoverageType.OPCODE: if len(cur_offsets - opcodes_found) == 0: break selected.append(next_best) opcodes_found |= cur_offsets paths = [p for p in paths if p is not next_best] return selected
def symbolic_run( self, fn: Callable[[StateSpace, Dict[str, object]], object], typed_args: Dict[str, type], ) -> Tuple[object, # return value Optional[Dict[str, object]], # arguments after execution Optional[BaseException], # exception thrown, if any StateSpace, ]: search_root = SinglePathNode(True) with COMPOSITE_TRACER, Patched(): for itr in range(1, 200): debug("iteration", itr) space = StateSpace(time.monotonic() + 10.0, 1.0, search_root=search_root) symbolic_args = {} try: with StateSpaceContext(space): symbolic_args = { name: proxy_for_type(typ, name) for name, typ in typed_args.items() } ret = fn(space, symbolic_args) ret = (deep_realize(ret), symbolic_args, None, space) space.check_deferred_assumptions() return ret except IgnoreAttempt as e: debug("ignore iteration attempt: ", str(e)) pass except BaseException as e: debug(traceback.format_exc()) return (None, symbolic_args, e, space) top_analysis, space_exhausted = space.bubble_status( CallAnalysis()) if space_exhausted: return ( None, symbolic_args, CrosshairInternal(f"exhausted after {itr} iterations"), space, ) return ( None, None, CrosshairInternal( "Unable to find a successful symbolic execution"), space, )
def diff_behavior_with_signature( fn1: Callable, fn2: Callable, sig: inspect.Signature, options: AnalysisOptions) -> Iterable[str]: search_root = SinglePathNode(True) condition_start = time.monotonic() for i in range(1, options.max_iterations): debug('Iteration ', i) itr_start = time.monotonic() if itr_start > condition_start + options.per_condition_timeout: yield f'(stopping due to --per_condition_timeout={options.per_condition_timeout})' break space = TrackingStateSpace( execution_deadline=itr_start + options.per_path_timeout, model_check_timeout=options.per_path_timeout / 2, search_root=search_root) with StateSpaceContext(space): (verification_status, output) = run_iteration(fn1, fn2, sig, space) debug('Verification status:', verification_status) top_analysis, space_exhausted = space.bubble_status(CallAnalysis(verification_status)) if top_analysis and top_analysis.verification_status == VerificationStatus.CONFIRMED: break yield from output
def diff_behavior_with_signature( fn1: Callable, fn2: Callable, sig: inspect.Signature, options: AnalysisOptions ) -> Iterable[BehaviorDiff]: search_root = SinglePathNode(True) condition_start = time.monotonic() for i in range(1, options.max_iterations): debug("Iteration ", i) itr_start = time.monotonic() if itr_start > condition_start + options.per_condition_timeout: debug( "Stopping due to --per_condition_timeout=", options.per_condition_timeout, ) return options.incr("num_paths") space = StateSpace( execution_deadline=itr_start + options.per_path_timeout, model_check_timeout=options.per_path_timeout / 2, search_root=search_root, ) with StateSpaceContext(space): output = None try: (verification_status, output) = run_iteration(fn1, fn2, sig, space) except UnexploredPath: verification_status = VerificationStatus.UNKNOWN debug("Verification status:", verification_status) top_analysis, space_exhausted = space.bubble_status( CallAnalysis(verification_status) ) if ( top_analysis and top_analysis.verification_status == VerificationStatus.CONFIRMED ): debug("Stopping due to code path exhaustion. (yay!)") options.incr("exhaustion") break if output: yield output
def run_trial(self, expr_str: str, arg_type_roots: Dict[str, Type], trial_desc: str) -> TrialStatus: expr = expr_str.format(*arg_type_roots.keys()) typed_args = { name: gen_type(self.r, type_root) for name, type_root in arg_type_roots.items() } literal_args = { name: value_for_type(typ, self.r) for name, typ in typed_args.items() } def symbolic_checker(space: TrackingStateSpace) -> object: symbolic_args = { name: proxy_for_type(typ, name) for name, typ in typed_args.items() } for name in typed_args.keys(): literal, symbolic = literal_args[name], symbolic_args[name] if isinstance(literal, (set, dict)): # We need not only equality, but equal ordering, because some operations # like pop() are order-dependent: if len(literal) != len(symbolic): raise IgnoreAttempt( f'symbolic "{name}" not equal to literal "{name}"') if isinstance(literal, set): literal, symbolic = list(literal), list(symbolic) else: literal, symbolic = list(literal.items()), list( symbolic.items()) if literal != symbolic: raise IgnoreAttempt( f'symbolic "{name}" not equal to literal "{name}"') return eval(expr, symbolic_args) with self.subTest( msg=f'Trial {trial_desc}: evaluating {expr} with {literal_args}' ): debug(f' ===== {expr} with {literal_args} ===== ') compiled = compile(expr, '<string>', 'eval') literal_ret, literal_exc = self.runexpr( expr, copy.deepcopy(literal_args)) symbolic_ret, symbolic_exc, space = self.symbolic_run( symbolic_checker) if isinstance(symbolic_exc, CrosshairUnsupported): return TrialStatus.UNSUPPORTED with StateSpaceContext(space): rets_differ = bool(literal_ret != symbolic_ret) if (rets_differ or type(literal_exc) != type(symbolic_exc)): debug( f' ***** BEGIN FAILURE FOR {expr} WITH {literal_args} ***** ' ) debug(f' ***** Expected: {literal_ret} / {literal_exc}') debug( f' ***** Symbolic result: {symbolic_ret} / {symbolic_exc}' ) debug(f' ***** END FAILURE FOR {expr} ***** ') self.assertEqual((literal_ret, literal_exc), (symbolic_ret, symbolic_exc)) debug(' OK ret= ', literal_ret, symbolic_ret) debug(' OK exc= ', literal_exc, symbolic_exc) return TrialStatus.NORMAL
def setUp(self): self.space_ctx_ = StateSpaceContext(SimpleStateSpace()) self.space_ctx_.__enter__()
class RegularExpressionUnitTests(unittest.TestCase): def setUp(self): self.space_ctx_ = StateSpaceContext(SimpleStateSpace()) self.space_ctx_.__enter__() def tearDown(self): self.space_ctx_.__exit__(None, None, None) def test_handle_simple(self): self.assertIsNotNone(eval_regex("abc", 0, "abc", 0)) self.assertIsNone(eval_regex("abc", 0, "ab", 0)) def test_handle_or(self): self.assertIsNotNone(eval_regex("a|bc", 0, "bc", 0)) self.assertEqual(eval_regex("a|bc", 0, "bc", 0).span(), (0, 2)) self.assertIsNotNone(eval_regex("a|bc", 0, "ab", 0)) self.assertEqual(eval_regex("a|bc", 0, "ab", 0).span(), (0, 1)) self.assertIsNone(eval_regex("a|bc", 0, "c", 0)) self.assertIsNone(eval_regex("a|bc", 0, "bd", 0)) def test_handle_start_markers(self): self.assertIsNotNone(eval_regex(r"^ab", 0, "abc", 0)) self.assertIsNotNone(eval_regex(r"\Aab", 0, "abc", 0)) with self.assertRaises(ReUnhandled): # Surprisingly!: re.compile('^bc').match('abc', 1) is None # Even more surprisingly, the end markers work differently. # We simply don't handle start markers with offset: self.assertIsNone(eval_regex(r"^bc", 0, "abc", 1)) def test_handle_end_markers(self): self.assertIsNotNone(eval_regex(r"abc$", 0, "abc", 0)) self.assertIsNotNone(eval_regex(r"abc$", 0, "abcd", 0, 3)) self.assertIsNotNone(eval_regex(r"abc\Z", 0, "abc", 0)) self.assertIsNotNone(eval_regex(r"abc\Z", re.MULTILINE, "abc", 0)) with self.assertRaises(ReUnhandled): self.assertIsNone(eval_regex("abc$", re.MULTILINE, "abc", 0)) def test_handle_range(self): self.assertIsNotNone(eval_regex("[a-z]7", 0, "b7", 0)) self.assertIsNotNone(eval_regex("[a-z]7", 0, "z7", 0)) self.assertIsNone(eval_regex("[a-z]7", 0, "A7", 0)) def test_handle_ascii_wildcard(self): self.assertIsNotNone(eval_regex("1.2", re.A, "1x2", 0)) self.assertIsNotNone(eval_regex("1.2", re.A, "1\x002", 0)) self.assertIsNone(eval_regex("1.2", re.A, "111", 0)) def test_handle_repeats(self): self.assertIsNotNone(eval_regex("a+a", 0, "aa", 0)) self.assertEqual(eval_regex("s", 0, "ssss", 0).span(), (0, 1)) self.assertEqual(eval_regex("ss", 0, "ssss", 0).span(), (0, 2)) self.assertIsNotNone(eval_regex("s{1,2}x", 0, "sx", 0)) self.assertIsNotNone(eval_regex("s{1,2}x", 0, "ssx", 0)) self.assertIsNone(eval_regex("s{1,2}x", 0, "sssx", 0)) self.assertIsNone(eval_regex("s{1,2}x", 0, "x", 0)) self.assertIsNotNone(eval_regex("s{2,3}", 0, "ssss", 0)) self.assertEqual(eval_regex("s{2,3}", 0, "ssss", 0).span(), (0, 3)) self.assertIsNotNone(eval_regex("y*", 0, "y", 0)) self.assertEqual(eval_regex("y*", 0, "y", 0).span(), (0, 1)) self.assertIsNotNone(eval_regex("y*e+", 0, "ye", 0)) self.assertIsNotNone(eval_regex("y*e", 0, "yye", 0)) self.assertEqual(eval_regex("y*e", 0, "yye", 0).span(), (0, 3)) self.assertIsNotNone(eval_regex("y*e+s{2,3}x", 0, "yessx", 0)) self.assertIsNotNone(eval_regex("y*e+s{2,3}x", 0, "essx", 0)) self.assertIsNone(eval_regex("y*e+s{2,3}x", 0, "yyessssx", 0)) self.assertIsNone(eval_regex("y*e+s{2,3}x", 0, "yssx", 0)) self.assertIsNone(eval_regex("y*e+s{2,3}x", 0, "ex", 0)) def test_handle_ascii_numeric(self): self.assertIsNotNone(eval_regex(r"a\d", re.A, "a3", 0)) self.assertIsNotNone(eval_regex(r"a\d", re.A, "a0", 0)) self.assertIsNone(eval_regex(r"a\d", re.A, "a-", 0)) def test_handle_noncapturing_group(self): self.assertIsNotNone(eval_regex("(?:a|b)c", 0, "ac", 0)) self.assertIsNotNone(eval_regex("(?:a|b)c", 0, "bc", 0)) self.assertIsNone(eval_regex("(?:a|b)c", 0, "a", 0)) def test_handle_capturing_group(self): self.assertIsNotNone(eval_regex("(a|b)c", 0, "ac", 0)) self.assertIsNone(eval_regex("(a|b)c", 0, "a", 0)) self.assertEqual(eval_regex("(a|b)c", 0, "bc", 0).groups(), ("b", )) def test_handle_named_groups(self): self.assertIsNotNone(eval_regex("(?P<foo>a|b)c", 0, "bc", 0)) self.assertEqual(eval_regex("(?P<foo>a|b)c", 0, "bc", 0)["foo"], "b") def test_handle_nested_groups(self): self.assertIsNotNone(eval_regex("(a|b(xx))+(c)?", 0, "bxxc", 0)) self.assertEqual( eval_regex("(bxx)(c)?", 0, "bxxc", 0).groups(), ("bxx", "c")) self.assertEqual( eval_regex("(a|b(xx))+(c)?", 0, "bxxc", 0).groups(), ("bxx", "xx", "c")) self.assertEqual( eval_regex("(a|b(xx))+(c)?", 0, "a", 0).groups(), ("a", None, None)) def test_with_fuzzed_inputs(self) -> None: rand = random.Random(253209) def check(pattern, literal_string, offset): flags = re.ASCII | re.DOTALL sym_match = eval_regex(pattern, flags, literal_string, offset) py_match = re.compile(pattern, flags).match(literal_string, offset) if (sym_match is None) != (py_match is None): self.assertEqual(py_match, sym_match) if py_match is None: return self.assertEqual(py_match.span(), sym_match.span()) self.assertEqual(py_match.group(0), sym_match.group(0)) self.assertEqual(py_match.groups(), sym_match.groups()) self.assertEqual(py_match.pos, sym_match.pos) self.assertEqual(py_match.endpos, sym_match.endpos) self.assertEqual(py_match.lastgroup, sym_match.lastgroup) for iter in range(100): literal_string = "".join( rand.choice(["a", "5", "_"]) for _ in range(rand.choice([0, 1, 1, 2, 2, 3, 4]))) pattern = "".join( rand.choice(["a", "5", "."]) + rand.choice(["", "", "+", "*"]) for _ in range(rand.choice([0, 1, 1, 2, 2]))) offset = rand.choice([0, 0, 0, 0, 1]) with self.subTest( msg= f'Trial {iter}: evaluating pattern "{pattern}" against "{literal_string}" at {offset}' ): check(pattern, literal_string, offset)
def test_proxy_type(self) -> None: with StateSpaceContext(SimpleStateSpace()): poke = make_fake_object(Pokeable, 'ppoke') self.assertIs(type(poke), Pokeable)
def test_proxy_type(self) -> None: with StateSpaceContext(SimpleStateSpace()): poke = proxy_class_as_masquerade(Pokeable, "ppoke") self.assertIs(type(poke), Pokeable)
class RegularExpressionUnitTests(unittest.TestCase): def setUp(self): self.space_ctx_ = StateSpaceContext(SimpleStateSpace()) self.space_ctx_.__enter__() def tearDown(self): self.space_ctx_.__exit__(None, None, None) def test_handle_simple(self): self.assertIsNotNone(eval_regex('abc', 0, 'abc', 0)) self.assertIsNone(eval_regex('abc', 0, 'ab', 0)) def test_handle_or(self): self.assertIsNotNone(eval_regex('a|bc', 0, 'bc', 0)) self.assertEqual(eval_regex('a|bc', 0, 'bc', 0).span(), (0, 2)) self.assertIsNotNone(eval_regex('a|bc', 0, 'ab', 0)) self.assertEqual(eval_regex('a|bc', 0, 'ab', 0).span(), (0, 1)) self.assertIsNone(eval_regex('a|bc', 0, 'c', 0)) self.assertIsNone(eval_regex('a|bc', 0, 'bd', 0)) def test_handle_range(self): self.assertIsNotNone(eval_regex('[a-z]7', 0, 'b7', 0)) self.assertIsNotNone(eval_regex('[a-z]7', 0, 'z7', 0)) self.assertIsNone(eval_regex('[a-z]7', 0, 'A7', 0)) def test_handle_ascii_wildcard(self): self.assertIsNotNone(eval_regex('1.2', re.A, '1x2', 0)) self.assertIsNotNone(eval_regex('1.2', re.A, '1\x002', 0)) self.assertIsNone(eval_regex('1.2', re.A, '111', 0)) def test_handle_repeats(self): self.assertIsNotNone(eval_regex('a+a', 0, 'aa', 0)) self.assertEqual(eval_regex('s', 0, 'ssss', 0).span(), (0, 1)) self.assertEqual(eval_regex('ss', 0, 'ssss', 0).span(), (0, 2)) self.assertIsNotNone(eval_regex('s{1,2}x', 0, 'sx', 0)) self.assertIsNotNone(eval_regex('s{1,2}x', 0, 'ssx', 0)) self.assertIsNone(eval_regex('s{1,2}x', 0, 'sssx', 0)) self.assertIsNone(eval_regex('s{1,2}x', 0, 'x', 0)) self.assertIsNotNone(eval_regex('s{2,3}', 0, 'ssss', 0)) self.assertEqual(eval_regex('s{2,3}', 0, 'ssss', 0).span(), (0, 3)) self.assertIsNotNone(eval_regex('y*', 0, 'y', 0)) self.assertEqual(eval_regex('y*', 0, 'y', 0).span(), (0, 1)) self.assertIsNotNone(eval_regex('y*e+', 0, 'ye', 0)) self.assertIsNotNone(eval_regex('y*e', 0, 'yye', 0)) self.assertEqual(eval_regex('y*e', 0, 'yye', 0).span(), (0, 3)) self.assertIsNotNone(eval_regex('y*e+s{2,3}x', 0, 'yessx', 0)) self.assertIsNotNone(eval_regex('y*e+s{2,3}x', 0, 'essx', 0)) self.assertIsNone(eval_regex('y*e+s{2,3}x', 0, 'yyessssx', 0)) self.assertIsNone(eval_regex('y*e+s{2,3}x', 0, 'yssx', 0)) self.assertIsNone(eval_regex('y*e+s{2,3}x', 0, 'ex', 0)) def test_handle_ascii_numeric(self): self.assertIsNotNone(eval_regex(r'a\d', re.A, 'a3', 0)) self.assertIsNotNone(eval_regex(r'a\d', re.A, 'a0', 0)) self.assertIsNone(eval_regex(r'a\d', re.A, 'a-', 0)) def test_handle_noncapturing_subgroup(self): self.assertIsNotNone(eval_regex('(?:a|b)c', 0, 'ac', 0)) self.assertIsNotNone(eval_regex('(?:a|b)c', 0, 'bc', 0)) self.assertIsNone(eval_regex('(?:a|b)c', 0, 'a', 0)) def test_handle_capturing_subgroup(self): self.assertIsNotNone(eval_regex('(a|b)c', 0, 'ac', 0)) self.assertIsNone(eval_regex('(a|b)c', 0, 'a', 0)) self.assertEqual(eval_regex('(a|b)c', 0, 'bc', 0).groups(), ('b', )) def test_handle_nested_subgroups(self): self.assertIsNotNone(eval_regex('(a|b(xx))+(c)?', 0, 'bxxc', 0)) self.assertEqual( eval_regex('(bxx)(c)?', 0, 'bxxc', 0).groups(), ('bxx', 'c')) self.assertEqual( eval_regex('(a|b(xx))+(c)?', 0, 'bxxc', 0).groups(), ('bxx', 'xx', 'c')) self.assertEqual( eval_regex('(a|b(xx))+(c)?', 0, 'a', 0).groups(), ('a', None, None)) def test_with_fuzzed_inputs(self) -> None: rand = random.Random(253209) def check(pattern, literal_string, offset): flags = re.ASCII | re.DOTALL sym_match = eval_regex(pattern, flags, literal_string, offset) py_match = re.compile(pattern, flags).match(literal_string, offset) if (sym_match is None) != (py_match is None): self.assertEqual(py_match, sym_match) if py_match is None: return self.assertEqual(py_match.span(), sym_match.span()) self.assertEqual(py_match.group(0), sym_match.group(0)) self.assertEqual(py_match.groups(), sym_match.groups()) self.assertEqual(py_match.pos, sym_match.pos) self.assertEqual(py_match.endpos, sym_match.endpos) self.assertEqual(py_match.lastgroup, sym_match.lastgroup) for iter in range(100): literal_string = ''.join( rand.choice(['a', '5', '_']) for _ in range(rand.choice([0, 1, 1, 2, 2, 3, 4]))) pattern = ''.join( rand.choice(['a', '5', '.']) + rand.choice(['', '', '+', '*']) for _ in range(rand.choice([0, 1, 1, 2, 2]))) offset = rand.choice([0, 0, 0, 0, 1]) with self.subTest( msg= f'Trial {iter}: evaluating pattern "{pattern}" against "{literal_string}" at {offset}' ): check(pattern, literal_string, offset)
def run_trial(self, expr_str: str, arg_type_roots: Dict[str, Type], trial_desc: str) -> TrialStatus: expr = expr_str.format(*arg_type_roots.keys()) typed_args = { name: gen_type(self.r, type_root) for name, type_root in arg_type_roots.items() } literal_args = { name: value_for_type(typ, self.r) for name, typ in typed_args.items() } def symbolic_checker(space: StateSpace, symbolic_args: Dict[str, object]) -> object: for name in typed_args.keys(): literal, symbolic = literal_args[name], symbolic_args[name] if isinstance(literal, (set, frozenset, dict)): assert isinstance(symbolic, Sized) # We need not only equality, but equal ordering, because some operations # like pop() are order-dependent: if len(literal) != len(symbolic): raise IgnoreAttempt( f'symbolic "{name}" not equal to literal "{name}"') if isinstance(literal, Mapping): assert isinstance(symbolic, Mapping) literal, symbolic = list(literal.items()), list( symbolic.items()) else: assert isinstance(symbolic, Iterable) literal, symbolic = list(literal), list(symbolic) if literal != symbolic: raise IgnoreAttempt( f'symbolic "{name}" not equal to literal "{name}"') return eval(expr, symbolic_args.copy()) with self.subTest( msg=f"Trial {trial_desc}: evaluating {expr} with {literal_args}" ): debug(f" ===== {expr} with {literal_args} ===== ") compiled = compile(expr, "<string>", "eval") postexec_literal_args = copy.deepcopy(literal_args) literal_ret, literal_exc = self.runexpr(expr, postexec_literal_args) ( symbolic_ret, postexec_symbolic_args, symbolic_exc, space, ) = self.symbolic_run(symbolic_checker, typed_args) if isinstance(symbolic_exc, CrosshairUnsupported): return TrialStatus.UNSUPPORTED with StateSpaceContext(space): # compare iterators as the values they produce: if isinstance(literal_ret, Iterable) and isinstance( symbolic_ret, Iterable): literal_ret = list(literal_ret) symbolic_ret = list(symbolic_ret) rets_differ = bool(literal_ret != symbolic_ret) postexec_args_differ = bool( postexec_literal_args != postexec_symbolic_args) if (rets_differ or postexec_args_differ or type(literal_exc) != type(symbolic_exc)): debug( f" ***** BEGIN FAILURE FOR {expr} WITH {literal_args} ***** " ) debug(f" ***** Expected: {literal_ret} / {literal_exc}") debug(f" ***** {postexec_literal_args}") debug( f" ***** Symbolic result: {symbolic_ret} / {symbolic_exc}" ) debug(f" ***** {postexec_symbolic_args}") debug(f" ***** END FAILURE FOR {expr} ***** ") self.assertEqual((literal_ret, literal_exc), (symbolic_ret, symbolic_exc)) debug(" OK ret= ", literal_ret, symbolic_ret) debug(" OK exc= ", literal_exc, symbolic_exc) return TrialStatus.NORMAL
def analyze_calltree(options: AnalysisOptions, conditions: Conditions) -> CallTreeAnalysis: fn = conditions.fn debug('Begin analyze calltree ', fn.__name__) all_messages = MessageCollector() search_root = SinglePathNode(True) space_exhausted = False failing_precondition: Optional[ ConditionExpr] = conditions.pre[0] if conditions.pre else None failing_precondition_reason: str = '' num_confirmed_paths = 0 _ = get_subclass_map() # ensure loaded short_circuit = ShortCircuitingContext() top_analysis: Optional[CallAnalysis] = None enforced_conditions = EnforcedConditions( options.condition_parser(), fn_globals(fn), builtin_patches(), interceptor=short_circuit.make_interceptor) def in_symbolic_mode(): space = optional_context_statespace() return space and not space.running_framework_code patched = Patched(in_symbolic_mode) with enforced_conditions, patched, enforced_conditions.disabled_enforcement( ): for i in itertools.count(1): start = time.monotonic() if start > options.deadline: debug('Exceeded condition timeout, stopping') break options.incr('num_paths') debug('Iteration ', i) space = TrackingStateSpace( execution_deadline=start + options.per_path_timeout, model_check_timeout=options.per_path_timeout / 2, search_root=search_root) try: # The real work happens here!: with StateSpaceContext(space): call_analysis = attempt_call(conditions, fn, short_circuit, enforced_conditions) if failing_precondition is not None: cur_precondition = call_analysis.failing_precondition if cur_precondition is None: if call_analysis.verification_status is not None: # We escaped the all the pre conditions on this try: failing_precondition = None elif (cur_precondition.line == failing_precondition.line and call_analysis.failing_precondition_reason): failing_precondition_reason = call_analysis.failing_precondition_reason elif cur_precondition.line > failing_precondition.line: failing_precondition = cur_precondition failing_precondition_reason = call_analysis.failing_precondition_reason except UnexploredPath: call_analysis = CallAnalysis(VerificationStatus.UNKNOWN) except IgnoreAttempt: call_analysis = CallAnalysis() status = call_analysis.verification_status if status == VerificationStatus.CONFIRMED: num_confirmed_paths += 1 top_analysis, space_exhausted = space.bubble_status(call_analysis) overall_status = top_analysis.verification_status if top_analysis else None debug('Iter complete. Worst status found so far:', overall_status.name if overall_status else 'None') if space_exhausted or top_analysis == VerificationStatus.REFUTED: break top_analysis = search_root.child.get_result() if top_analysis.messages: #log = space.execution_log() all_messages.extend( replace( m, #execution_log=log, test_fn=fn.__qualname__, condition_src=conditions.post[0].expr_source) for m in top_analysis.messages) if top_analysis.verification_status is None: top_analysis.verification_status = VerificationStatus.UNKNOWN if failing_precondition: assert num_confirmed_paths == 0 addl_ctx = ' ' + failing_precondition.addl_context if failing_precondition.addl_context else '' message = f'Unable to meet precondition{addl_ctx}' if failing_precondition_reason: message += f' (possibly because {failing_precondition_reason}?)' all_messages.extend([ AnalysisMessage(MessageType.PRE_UNSAT, message + '.', failing_precondition.filename, failing_precondition.line, 0, '') ]) top_analysis = CallAnalysis(VerificationStatus.REFUTED) assert top_analysis.verification_status is not None debug( ('Exhausted' if space_exhausted else 'Aborted'), ' calltree search with', top_analysis.verification_status.name, 'and', len(all_messages.get()), 'messages.', 'Number of iterations: ', i) return CallTreeAnalysis( messages=all_messages.get(), verification_status=top_analysis.verification_status, num_confirmed_paths=num_confirmed_paths)
def test_smtfloat_like_a_float(self): with StateSpaceContext(SimpleStateSpace()): self.assertEqual(type(SymbolicFloat(12)), float) self.assertEqual(SymbolicFloat(12), 12.0)