Пример #1
0
def run_state_machine_as_test(state_machine_factory, settings=None):
    """Run a state machine definition as a test, either silently doing nothing
    or printing a minimal breaking program and raising an exception.

    state_machine_factory is anything which returns an instance of
    GenericStateMachine when called with no arguments - it can be a class or a
    function. settings will be used to control the execution of the test.
    """
    if settings is None:
        try:
            settings = state_machine_factory.TestCase.settings
            check_type(Settings, settings,
                       "state_machine_factory.TestCase.settings")
        except AttributeError:
            settings = Settings(deadline=None,
                                suppress_health_check=HealthCheck.all())
    check_type(Settings, settings, "settings")

    @settings
    @given(st.data())
    def run_state_machine(factory, data):
        machine = factory()
        check_type(GenericStateMachine, machine, "state_machine_factory()")
        data.conjecture_data.hypothesis_runner = machine

        n_steps = settings.stateful_step_count
        should_continue = cu.many(data.conjecture_data,
                                  min_size=1,
                                  max_size=n_steps,
                                  average_size=n_steps)

        print_steps = (current_build_context().is_final
                       or current_verbosity() >= Verbosity.debug)
        try:
            if print_steps:
                machine.print_start()
            machine.check_invariants()

            while should_continue.more():
                value = data.conjecture_data.draw(machine.steps())
                if print_steps:
                    machine.print_step(value)
                machine.execute_step(value)
                machine.check_invariants()
        finally:
            if print_steps:
                machine.print_end()
            machine.teardown()

    # Use a machine digest to identify stateful tests in the example database
    run_state_machine.hypothesis.inner_test._hypothesis_internal_add_digest = function_digest(
        state_machine_factory)
    # Copy some attributes so @seed and @reproduce_failure "just work"
    run_state_machine._hypothesis_internal_use_seed = getattr(
        state_machine_factory, "_hypothesis_internal_use_seed", None)
    run_state_machine._hypothesis_internal_use_reproduce_failure = getattr(
        state_machine_factory, "_hypothesis_internal_use_reproduce_failure",
        None)

    run_state_machine(state_machine_factory)
Пример #2
0
def test_digest_is_stable_across_process_runs():
    # Hard coded as the only sensible way to check this doesn't change between
    # process runs. There's nothing special about these values. If you update
    # the code just update them to match.
    digest = function_digest(test_digests_are_reasonably_unique)
    print(repr(digest))
    assert digest == b'\x8d\x07\xdb\xe1\xbeC\x92\xec-\xb4PWj\x0c%\x87'
Пример #3
0
def run_state_machine_as_test(state_machine_factory, settings=None):
    """Run a state machine definition as a test, either silently doing nothing
    or printing a minimal breaking program and raising an exception.

    state_machine_factory is anything which returns an instance of
    GenericStateMachine when called with no arguments - it can be a class or a
    function. settings will be used to control the execution of the test.
    """
    if settings is None:
        try:
            settings = state_machine_factory.TestCase.settings
            check_type(Settings, settings, "state_machine_factory.TestCase.settings")
        except AttributeError:
            settings = Settings(deadline=None, suppress_health_check=HealthCheck.all())
    check_type(Settings, settings, "settings")

    @settings
    @given(st.data())
    def run_state_machine(factory, data):
        machine = factory()
        check_type(GenericStateMachine, machine, "state_machine_factory()")
        data.conjecture_data.hypothesis_runner = machine

        n_steps = settings.stateful_step_count
        should_continue = cu.many(
            data.conjecture_data, min_size=1, max_size=n_steps, average_size=n_steps
        )

        print_steps = (
            current_build_context().is_final or current_verbosity() >= Verbosity.debug
        )
        try:
            if print_steps:
                machine.print_start()
            machine.check_invariants()

            while should_continue.more():
                value = data.conjecture_data.draw(machine.steps())
                if print_steps:
                    machine.print_step(value)
                machine.execute_step(value)
                machine.check_invariants()
        finally:
            if print_steps:
                machine.print_end()
            machine.teardown()

    # Use a machine digest to identify stateful tests in the example database
    run_state_machine.hypothesis.inner_test._hypothesis_internal_add_digest = function_digest(
        state_machine_factory
    )
    # Copy some attributes so @seed and @reproduce_failure "just work"
    run_state_machine._hypothesis_internal_use_seed = getattr(
        state_machine_factory, "_hypothesis_internal_use_seed", None
    )
    run_state_machine._hypothesis_internal_use_reproduce_failure = getattr(
        state_machine_factory, "_hypothesis_internal_use_reproduce_failure", None
    )

    run_state_machine(state_machine_factory)
Пример #4
0
def find(specifier, condition, settings=None, random=None, storage=None):
    settings = settings or Settings(
        max_examples=2000,
        min_satisfying_examples=0,
        max_shrinks=2000,
    )

    search = strategy(specifier, settings)

    if storage is None and settings.database is not None:
        storage = settings.database.storage(
            u'find(%s)' %
            (binascii.hexlify(function_digest(condition)).decode(u'ascii'), ))

    random = random or Random()
    successful_examples = [0]

    def template_condition(template):
        with BuildContext():
            result = search.reify(template)
            success = condition(result)

        if success:
            successful_examples[0] += 1

        if not successful_examples[0]:
            verbose_report(lambda: u'Trying example %s' % (repr(result), ))
        elif success:
            if successful_examples[0] == 1:
                verbose_report(lambda: u'Found satisfying example %s' %
                               (repr(result), ))
            else:
                verbose_report(lambda: u'Shrunk example to %s' %
                               (repr(result), ))
        return success

    template_condition.__name__ = condition.__name__
    tracker = Tracker()

    try:
        template = best_satisfying_template(
            search,
            random,
            template_condition,
            settings,
            tracker=tracker,
            max_parameter_tries=2,
            storage=storage,
        )
        with BuildContext():
            return search.reify(template)
    except Timeout:
        raise
    except NoSuchExample:
        if search.template_upper_bound <= len(tracker):
            raise DefinitelyNoSuchExample(
                get_pretty_function_description(condition),
                search.template_upper_bound,
            )
        raise NoSuchExample(get_pretty_function_description(condition))
Пример #5
0
def find(specifier, condition, settings=None, random=None, storage=None):
    settings = settings or Settings(
        max_examples=2000,
        min_satisfying_examples=0,
        max_shrinks=2000,
    )

    search = strategy(specifier, settings)

    if storage is None and settings.database is not None:
        storage = settings.database.storage(
            'find(%s)' % (
                binascii.hexlify(function_digest(condition)).decode('ascii'),
            )
        )

    random = random or Random()
    successful_examples = [0]

    def template_condition(template):
        result = search.reify(template)
        success = condition(result)

        if success:
            successful_examples[0] += 1

        if not successful_examples[0]:
            verbose_report(lambda: 'Trying example %s' % (
                repr(result),
            ))
        elif success:
            if successful_examples[0] == 1:
                verbose_report(lambda: 'Found satisfying example %s' % (
                    repr(result),
                ))
            else:
                verbose_report(lambda: 'Shrunk example to %s' % (
                    repr(result),
                ))
        return success

    template_condition.__name__ = condition.__name__
    tracker = Tracker()

    try:
        return search.reify(best_satisfying_template(
            search, random, template_condition, settings,
            tracker=tracker, max_parameter_tries=2,
            storage=storage,
        ))
    except Timeout:
        raise
    except NoSuchExample:
        if search.template_upper_bound <= len(tracker):
            raise DefinitelyNoSuchExample(
                get_pretty_function_description(condition),
                search.template_upper_bound,
            )
        raise NoSuchExample(get_pretty_function_description(condition))
Пример #6
0
def get_random_for_wrapped_test(test, wrapped_test):
    settings = wrapped_test._hypothesis_internal_use_settings

    if wrapped_test._hypothesis_internal_use_seed is not None:
        return Random(wrapped_test._hypothesis_internal_use_seed)
    elif settings.derandomize:
        return Random(function_digest(test))
    else:
        return new_random()
Пример #7
0
def get_random_for_wrapped_test(test, wrapped_test):
    settings = wrapped_test._hypothesis_internal_use_settings
    wrapped_test._hypothesis_internal_use_generated_seed = None

    if wrapped_test._hypothesis_internal_use_seed is not None:
        return Random(wrapped_test._hypothesis_internal_use_seed)
    elif settings.derandomize:
        return Random(int_from_bytes(function_digest(test)))
    elif global_force_seed is not None:
        return Random(global_force_seed)
    else:
        seed = rnd_module.getrandbits(128)
        wrapped_test._hypothesis_internal_use_generated_seed = seed
        return Random(seed)
Пример #8
0
def get_random_for_wrapped_test(test, wrapped_test):
    settings = wrapped_test._hypothesis_internal_use_settings
    wrapped_test._hypothesis_internal_use_generated_seed = None

    if wrapped_test._hypothesis_internal_use_seed is not None:
        return Random(wrapped_test._hypothesis_internal_use_seed)
    elif settings.derandomize:
        return Random(int_from_bytes(function_digest(test)))
    elif global_force_seed is not None:
        return Random(global_force_seed)
    else:
        seed = rnd_module.getrandbits(128)
        wrapped_test._hypothesis_internal_use_generated_seed = seed
        return Random(seed)
Пример #9
0
        def fuzz_one_input(
            buffer: Union[bytes, bytearray, memoryview, BinaryIO]
        ) -> Optional[bytes]:
            """Run the test as a fuzz target, driven with the `buffer` of bytes.

            Returns None if buffer invalid for the strategy, canonical pruned
            bytes if the buffer was valid, and leaves raised exceptions alone.

            Note: this feature is experimental and may change or be removed.
            """
            if isinstance(buffer, io.IOBase):
                buffer = buffer.read()
            if isinstance(buffer, (bytearray, memoryview)):
                buffer = bytes(buffer)
            assert isinstance(buffer, bytes)

            test = wrapped_test.hypothesis.inner_test
            settings = wrapped_test._hypothesis_internal_use_settings
            random = get_random_for_wrapped_test(test, wrapped_test)
            _args, _kwargs, test_runner, search_strategy = process_arguments_to_given(
                wrapped_test,
                (),
                {},
                given_kwargs,
                argspec,
                settings,
            )
            assert not _args
            assert not _kwargs
            state = StateForActualGivenExecution(
                test_runner,
                search_strategy,
                test,
                settings,
                random,
                wrapped_test,
            )

            data = ConjectureData.for_buffer(buffer)
            try:
                state.execute_once(data)
            except (StopTest, UnsatisfiedAssumption):
                return None
            except BaseException:
                if settings.database is not None:
                    settings.database.save(function_digest(test),
                                           bytes(data.buffer))
                raise
            return bytes(data.buffer)
Пример #10
0
def find(
    specifier: SearchStrategy[Ex],
    condition: Callable[[Any], bool],
    *,
    settings: Settings = None,
    random: Random = None,
    database_key: bytes = None
) -> Ex:
    """Returns the minimal example from the given strategy ``specifier`` that
    matches the predicate function ``condition``."""
    if settings is None:
        settings = Settings(max_examples=2000)
    settings = Settings(
        settings, suppress_health_check=HealthCheck.all(), report_multiple_bugs=False
    )

    if database_key is None and settings.database is not None:
        database_key = function_digest(condition)

    if not isinstance(specifier, SearchStrategy):
        raise InvalidArgument(
            "Expected SearchStrategy but got %r of type %s"
            % (specifier, type(specifier).__name__)
        )
    specifier.validate()

    last = []  # type: List[Ex]

    @settings
    @given(specifier)
    def test(v):
        if condition(v):
            last[:] = [v]
            raise Found()

    if random is not None:
        test = seed(random.getrandbits(64))(test)

    test._hypothesis_internal_is_find = True
    test._hypothesis_internal_database_key = database_key

    try:
        test()
    except Found:
        return last[0]

    raise NoSuchExample(get_pretty_function_description(condition))
Пример #11
0
def find(specifier, condition, settings=None, random=None, storage=None):
    settings = settings or Settings(max_examples=2000, min_satisfying_examples=0, max_shrinks=2000)

    from hypothesis.internal.strategymethod import strategy

    search = strategy(specifier, settings)

    if storage is None and settings.database is not None:
        storage = settings.database.storage(
            u"find(%s)" % (binascii.hexlify(function_digest(condition)).decode(u"ascii"),)
        )

    random = random or new_random()
    successful_examples = [0]

    def template_condition(template):
        with BuildContext():
            result = search.reify(template)
            success = condition(result)

        if success:
            successful_examples[0] += 1

        if not successful_examples[0]:
            verbose_report(lambda: u"Trying example %s" % (repr(result),))
        elif success:
            if successful_examples[0] == 1:
                verbose_report(lambda: u"Found satisfying example %s" % (repr(result),))
            else:
                verbose_report(lambda: u"Shrunk example to %s" % (repr(result),))
        return success

    template_condition.__name__ = condition.__name__
    tracker = Tracker()

    try:
        template = best_satisfying_template(
            search, random, template_condition, settings, tracker=tracker, max_parameter_tries=2, storage=storage
        )
        with BuildContext(is_final=True, close_on_capture=False):
            return search.reify(template)
    except Timeout:
        raise
    except NoSuchExample:
        if search.template_upper_bound <= len(tracker):
            raise DefinitelyNoSuchExample(get_pretty_function_description(condition), search.template_upper_bound)
        raise NoSuchExample(get_pretty_function_description(condition))
Пример #12
0
    def run_test_with_generator(test):
        if settings.derandomize:
            assert provided_random is None
            random = Random(
                function_digest(test)
            )
        else:
            random = provided_random or Random()

        original_argspec = inspect.getargspec(test)
        if generator_arguments and original_argspec.varargs:
            raise InvalidArgument(
                'varargs are not supported with positional arguments to @given'
            )
        extra_kwargs = [
            k for k in generator_kwargs if k not in original_argspec.args]
        if extra_kwargs and not original_argspec.keywords:
            raise InvalidArgument(
                '%s() got an unexpected keyword argument %r' % (
                    test.__name__,
                    extra_kwargs[0]
                ))
        if (
            len(generator_arguments) > len(original_argspec.args)
        ):
            raise InvalidArgument((
                'Too many positional arguments for %s() (got %d but'
                ' expected at most %d') % (
                    test.__name__, len(generator_arguments),
                    len(original_argspec.args)))
        arguments = original_argspec.args
        specifiers = list(generator_arguments)
        seen_kwarg = None
        for a in arguments:
            if a in generator_kwargs:
                seen_kwarg = seen_kwarg or a
                specifiers.append(generator_kwargs[a])
            else:
                if seen_kwarg is not None:
                    raise InvalidArgument((
                        'Argument %s comes after keyword %s which has been '
                        'specified, but does not itself have a '
                        'specification') % (
                        a, seen_kwarg
                    ))

        argspec = inspect.ArgSpec(
            args=arguments,
            keywords=original_argspec.keywords,
            varargs=original_argspec.varargs,
            defaults=tuple(map(HypothesisProvided, specifiers))
        )

        unused_kwargs = {}
        for k in extra_kwargs:
            unused_kwargs[k] = HypothesisProvided(generator_kwargs[k])

        @copy_argspec(
            test.__name__, argspec
        )
        def wrapped_test(*arguments, **kwargs):
            selfy = None
            arguments, kwargs = convert_positional_arguments(
                wrapped_test, arguments, kwargs)
            # Anything in unused_kwargs hasn't been injected through
            # argspec.defaults, so we need to add them.
            for k in unused_kwargs:
                if k not in kwargs:
                    kwargs[k] = unused_kwargs[k]
            # If the test function is a method of some kind, the bound object
            # will be the first named argument if there are any, otherwise the
            # first vararg (if any).
            if argspec.args:
                selfy = kwargs.get(argspec.args[0])
            elif arguments:
                selfy = arguments[0]
            if isinstance(selfy, HypothesisProvided):
                selfy = None
            test_runner = executor(selfy)

            for example in getattr(
                wrapped_test, 'hypothesis_explicit_examples', ()
            ):
                if example.args:
                    example_kwargs = dict(zip(
                        argspec.args[-len(example.args):], example.args
                    ))
                else:
                    example_kwargs = dict(example.kwargs)

                for k, v in kwargs.items():
                    if not isinstance(v, HypothesisProvided):
                        example_kwargs[k] = v
                # Note: Test may mutate arguments and we can't rerun explicit
                # examples, so we have to calculate the failure message at this
                # point rather than than later.
                message_on_failure = 'Falsifying example: %s(%s)' % (
                    test.__name__, arg_string(test, arguments, example_kwargs)
                )
                try:
                    test_runner(
                        lambda: test(*arguments, **example_kwargs)
                    )
                except BaseException:
                    report(message_on_failure)
                    raise

            if not any(
                isinstance(x, HypothesisProvided)
                for xs in (arguments, kwargs.values())
                for x in xs
            ):
                # All arguments have been satisfied without needing to invoke
                # hypothesis
                test_runner(lambda: test(*arguments, **kwargs))
                return

            def convert_to_specifier(v):
                if isinstance(v, HypothesisProvided):
                    return strategy(v.value, settings)
                else:
                    return sd.just(v)

            given_specifier = sd.tuples(
                sd.tuples(*map(convert_to_specifier, arguments)),
                sd.fixed_dictionaries(dict(
                    (k, convert_to_specifier(v)) for (k, v) in kwargs.items()))
            )

            search_strategy = strategy(given_specifier, settings)

            if settings.database:
                storage = settings.database.storage(
                    fully_qualified_name(test))
            else:
                storage = None

            last_exception = [None]
            repr_for_last_exception = [None]

            def is_template_example(xs):
                record_repr = [None]
                try:
                    test_runner(reify_and_execute(
                        search_strategy, xs, test,
                        always_print=settings.max_shrinks <= 0,
                        record_repr=record_repr,
                    ))
                    return False
                except UnsatisfiedAssumption as e:
                    raise e
                except Exception as e:
                    if settings.max_shrinks <= 0:
                        raise e
                    last_exception[0] = traceback.format_exc()
                    repr_for_last_exception[0] = record_repr[0]
                    verbose_report(last_exception[0])
                    return True

            is_template_example.__name__ = test.__name__
            is_template_example.__qualname__ = qualname(test)

            falsifying_template = None
            try:
                falsifying_template = best_satisfying_template(
                    search_strategy, random, is_template_example,
                    settings, storage
                )
            except NoSuchExample:
                return

            assert last_exception[0] is not None

            with settings:
                test_runner(reify_and_execute(
                    search_strategy, falsifying_template, test,
                    print_example=True
                ))

                report(
                    'Failed to reproduce exception. Expected: \n' +
                    last_exception[0],
                )

                test_runner(reify_and_execute(
                    search_strategy, falsifying_template,
                    test_is_flaky(test, repr_for_last_exception[0]),
                    print_example=True
                ))

        wrapped_test.__name__ = test.__name__
        wrapped_test.__doc__ = test.__doc__
        wrapped_test.is_hypothesis_test = True
        wrapped_test.hypothesis_explicit_examples = getattr(
            test, 'hypothesis_explicit_examples', []
        )
        return wrapped_test
Пример #13
0
def run_state_machine_as_test(state_machine_factory, settings=None):
    """Run a state machine definition as a test, either silently doing nothing
    or printing a minimal breaking program and raising an exception.

    state_machine_factory is anything which returns an instance of
    RuleBasedStateMachine when called with no arguments - it can be a class or a
    function. settings will be used to control the execution of the test.
    """
    if settings is None:
        try:
            settings = state_machine_factory.TestCase.settings
            check_type(Settings, settings,
                       "state_machine_factory.TestCase.settings")
        except AttributeError:
            settings = Settings(deadline=None,
                                suppress_health_check=HealthCheck.all())
    check_type(Settings, settings, "settings")

    @settings
    @given(st.data())
    def run_state_machine(factory, data):
        machine = factory()
        if not isinstance(machine, _GenericStateMachine):
            raise InvalidArgument(
                "Expected RuleBasedStateMachine but state_machine_factory() "
                "returned %r (type=%s)" % (machine, type(machine).__name__))
        data.conjecture_data.hypothesis_runner = machine

        print_steps = (current_build_context().is_final
                       or current_verbosity() >= Verbosity.debug)
        try:
            if print_steps:
                machine.print_start()
            machine.check_invariants()
            max_steps = settings.stateful_step_count
            steps_run = 0

            cd = data.conjecture_data

            while True:
                # We basically always want to run the maximum number of steps,
                # but need to leave a small probability of terminating early
                # in order to allow for reducing the number of steps once we
                # find a failing test case, so we stop with probability of
                # 2 ** -16 during normal operation but force a stop when we've
                # generated enough steps.
                cd.start_example(STATE_MACHINE_RUN_LABEL)
                if steps_run == 0:
                    cd.draw_bits(16, forced=1)
                elif steps_run >= max_steps:
                    cd.draw_bits(16, forced=0)
                    break
                else:
                    # All we really care about is whether this value is zero
                    # or non-zero, so if it's > 1 we discard it and insert a
                    # replacement value after
                    cd.start_example(SHOULD_CONTINUE_LABEL)
                    should_continue_value = cd.draw_bits(16)
                    if should_continue_value > 1:
                        cd.stop_example(discard=True)
                        cd.draw_bits(16,
                                     forced=int(bool(should_continue_value)))
                    else:
                        cd.stop_example()
                        if should_continue_value == 0:
                            break
                steps_run += 1

                value = data.conjecture_data.draw(machine.steps())
                # Assign 'result' here in case 'execute_step' fails below
                result = multiple()
                try:
                    result = machine.execute_step(value)
                finally:
                    if print_steps:
                        # 'result' is only used if the step has target bundles.
                        # If it does, and the result is a 'MultipleResult',
                        # then 'print_step' prints a multi-variable assignment.
                        machine.print_step(value, result)
                machine.check_invariants()
                data.conjecture_data.stop_example()
        finally:
            if print_steps:
                machine.print_end()
            machine.teardown()

    # Use a machine digest to identify stateful tests in the example database
    run_state_machine.hypothesis.inner_test._hypothesis_internal_add_digest = function_digest(
        state_machine_factory)
    # Copy some attributes so @seed and @reproduce_failure "just work"
    run_state_machine._hypothesis_internal_use_seed = getattr(
        state_machine_factory, "_hypothesis_internal_use_seed", None)
    run_state_machine._hypothesis_internal_use_reproduce_failure = getattr(
        state_machine_factory, "_hypothesis_internal_use_reproduce_failure",
        None)
    run_state_machine._hypothesis_internal_print_given_args = False

    run_state_machine(state_machine_factory)
Пример #14
0
def test_can_digest_a_function_with_no_name():
    def foo(x, y):
        pass

    function_digest(partial(foo, 1))
Пример #15
0
def test_can_digest_a_built_in_function():
    import math

    assert function_digest(math.isnan) != function_digest(range)
Пример #16
0
def test_digests_are_reasonably_unique():
    assert function_digest(test_simple_conversion) != function_digest(
        test_does_not_error_on_confused_sources)
Пример #17
0
        def wrapped_test(*arguments, **kwargs):
            settings = wrapped_test._hypothesis_internal_use_settings
            if wrapped_test._hypothesis_internal_use_seed is not None:
                random = Random(wrapped_test._hypothesis_internal_use_seed)
            elif settings.derandomize:
                random = Random(function_digest(test))
            else:
                random = new_random()

            import hypothesis.strategies as sd

            selfy = None
            arguments, kwargs = convert_positional_arguments(wrapped_test, arguments, kwargs)

            # If the test function is a method of some kind, the bound object
            # will be the first named argument if there are any, otherwise the
            # first vararg (if any).
            if argspec.args:
                selfy = kwargs.get(argspec.args[0])
            elif arguments:
                selfy = arguments[0]
            test_runner = new_style_executor(selfy)

            for example in reversed(getattr(wrapped_test, "hypothesis_explicit_examples", ())):
                if example.args:
                    if len(example.args) > len(original_argspec.args):
                        raise InvalidArgument(
                            "example has too many arguments for test. "
                            "Expected at most %d but got %d" % (len(original_argspec.args), len(example.args))
                        )
                    example_kwargs = dict(zip(original_argspec.args[-len(example.args) :], example.args))
                else:
                    example_kwargs = example.kwargs
                if Phase.explicit not in settings.phases:
                    continue
                example_kwargs.update(kwargs)
                # Note: Test may mutate arguments and we can't rerun explicit
                # examples, so we have to calculate the failure message at this
                # point rather than than later.
                message_on_failure = "Falsifying example: %s(%s)" % (
                    test.__name__,
                    arg_string(test, arguments, example_kwargs),
                )
                try:
                    with BuildContext() as b:
                        test_runner(None, lambda data: test(*arguments, **example_kwargs))
                except BaseException:
                    report(message_on_failure)
                    for n in b.notes:
                        report(n)
                    raise
            if settings.max_examples <= 0:
                return

            arguments = tuple(arguments)

            given_specifier = sd.tuples(
                sd.just(arguments), sd.fixed_dictionaries(generator_kwargs).map(lambda args: dict(args, **kwargs))
            )

            def fail_health_check(message, label):
                if label in settings.suppress_health_check:
                    return
                message += (
                    "\nSee https://hypothesis.readthedocs.io/en/latest/health"
                    "checks.html for more information about this. "
                )
                message += (
                    "If you want to disable just this health check, add %s "
                    "to the suppress_health_check settings for this test."
                ) % (label,)
                raise FailedHealthCheck(message)

            search_strategy = given_specifier
            if selfy is not None:
                search_strategy = WithRunner(search_strategy, selfy)

            search_strategy.validate()

            perform_health_check = settings.perform_health_check
            perform_health_check &= Settings.default.perform_health_check

            from hypothesis.internal.conjecture.data import TestData, Status, StopTest

            if not (Phase.reuse in settings.phases or Phase.generate in settings.phases):
                return

            if perform_health_check:
                health_check_random = Random(random.getrandbits(128))
                # We "pre warm" the health check with one draw to give it some
                # time to calculate any cached data. This prevents the case
                # where the first draw of the health check takes ages because
                # of loading unicode data the first time.
                data = TestData(
                    max_length=settings.buffer_size,
                    draw_bytes=lambda data, n, distribution: distribution(health_check_random, n),
                )
                with Settings(settings, verbosity=Verbosity.quiet):
                    try:
                        test_runner(data, reify_and_execute(search_strategy, lambda *args, **kwargs: None))
                    except BaseException:
                        pass
                count = 0
                overruns = 0
                filtered_draws = 0
                start = time.time()
                while count < 10 and time.time() < start + 1 and filtered_draws < 50 and overruns < 20:
                    try:
                        data = TestData(
                            max_length=settings.buffer_size,
                            draw_bytes=lambda data, n, distribution: distribution(health_check_random, n),
                        )
                        with Settings(settings, verbosity=Verbosity.quiet):
                            test_runner(data, reify_and_execute(search_strategy, lambda *args, **kwargs: None))
                        count += 1
                    except UnsatisfiedAssumption:
                        filtered_draws += 1
                    except StopTest:
                        if data.status == Status.INVALID:
                            filtered_draws += 1
                        else:
                            assert data.status == Status.OVERRUN
                            overruns += 1
                    except InvalidArgument:
                        raise
                    except Exception:
                        if HealthCheck.exception_in_generation in settings.suppress_health_check:
                            raise
                        report(traceback.format_exc())
                        if test_runner is default_new_style_executor:
                            fail_health_check(
                                "An exception occurred during data "
                                "generation in initial health check. "
                                "This indicates a bug in the strategy. "
                                "This could either be a Hypothesis bug or "
                                "an error in a function you've passed to "
                                "it to construct your data.",
                                HealthCheck.exception_in_generation,
                            )
                        else:
                            fail_health_check(
                                "An exception occurred during data "
                                "generation in initial health check. "
                                "This indicates a bug in the strategy. "
                                "This could either be a Hypothesis bug or "
                                "an error in a function you've passed to "
                                "it to construct your data. Additionally, "
                                "you have a custom executor, which means "
                                "that this could be your executor failing "
                                "to handle a function which returns None. ",
                                HealthCheck.exception_in_generation,
                            )
                if overruns >= 20 or (not count and overruns > 0):
                    fail_health_check(
                        (
                            "Examples routinely exceeded the max allowable size. "
                            "(%d examples overran while generating %d valid ones)"
                            ". Generating examples this large will usually lead to"
                            " bad results. You should try setting average_size or "
                            "max_size parameters on your collections and turning "
                            "max_leaves down on recursive() calls."
                        )
                        % (overruns, count),
                        HealthCheck.data_too_large,
                    )
                if filtered_draws >= 50 or (not count and filtered_draws > 0):
                    fail_health_check(
                        (
                            "It looks like your strategy is filtering out a lot "
                            "of data. Health check found %d filtered examples but "
                            "only %d good ones. This will make your tests much "
                            "slower, and also will probably distort the data "
                            "generation quite a lot. You should adapt your "
                            "strategy to filter less. This can also be caused by "
                            "a low max_leaves parameter in recursive() calls"
                        )
                        % (filtered_draws, count),
                        HealthCheck.filter_too_much,
                    )
                runtime = time.time() - start
                if runtime > 1.0 or count < 10:
                    fail_health_check(
                        (
                            "Data generation is extremely slow: Only produced "
                            "%d valid examples in %.2f seconds (%d invalid ones "
                            "and %d exceeded maximum size). Try decreasing "
                            "size of the data you're generating (with e.g."
                            "average_size or max_leaves parameters)."
                        )
                        % (count, runtime, filtered_draws, overruns),
                        HealthCheck.too_slow,
                    )
            last_exception = [None]
            repr_for_last_exception = [None]

            def evaluate_test_data(data):
                try:
                    result = test_runner(data, reify_and_execute(search_strategy, test))
                    if result is not None and settings.perform_health_check:
                        fail_health_check(
                            ("Tests run under @given should return None, but " "%s returned %r instead.")
                            % (test.__name__, result),
                            HealthCheck.return_value,
                        )
                    return False
                except UnsatisfiedAssumption:
                    data.mark_invalid()
                except (HypothesisDeprecationWarning, FailedHealthCheck, StopTest):
                    raise
                except Exception:
                    last_exception[0] = traceback.format_exc()
                    verbose_report(last_exception[0])
                    data.mark_interesting()

            from hypothesis.internal.conjecture.engine import TestRunner

            falsifying_example = None
            database_key = str_to_bytes(fully_qualified_name(test))
            start_time = time.time()
            runner = TestRunner(evaluate_test_data, settings=settings, random=random, database_key=database_key)
            runner.run()
            run_time = time.time() - start_time
            timed_out = settings.timeout > 0 and run_time >= settings.timeout
            if runner.last_data is None:
                return
            if runner.last_data.status == Status.INTERESTING:
                falsifying_example = runner.last_data.buffer
                if settings.database is not None:
                    settings.database.save(database_key, falsifying_example)
            else:
                if runner.valid_examples < min(settings.min_satisfying_examples, settings.max_examples):
                    if timed_out:
                        raise Timeout(
                            (
                                "Ran out of time before finding a satisfying "
                                "example for "
                                "%s. Only found %d examples in " + "%.2fs."
                            )
                            % (get_pretty_function_description(test), runner.valid_examples, run_time)
                        )
                    else:
                        raise Unsatisfiable(
                            (
                                "Unable to satisfy assumptions of hypothesis "
                                "%s. Only %d examples considered "
                                "satisfied assumptions"
                            )
                            % (get_pretty_function_description(test), runner.valid_examples)
                        )
                return

            assert last_exception[0] is not None

            try:
                with settings:
                    test_runner(
                        TestData.for_buffer(falsifying_example),
                        reify_and_execute(search_strategy, test, print_example=True, is_final=True),
                    )
            except (UnsatisfiedAssumption, StopTest):
                report(traceback.format_exc())
                raise Flaky(
                    "Unreliable assumption: An example which satisfied " "assumptions on the first run now fails it."
                )

            report("Failed to reproduce exception. Expected: \n" + last_exception[0])

            filter_message = (
                "Unreliable test data: Failed to reproduce a failure "
                "and then when it came to recreating the example in "
                "order to print the test data with a flaky result "
                "the example was filtered out (by e.g. a "
                "call to filter in your strategy) when we didn't "
                "expect it to be."
            )

            try:
                test_runner(
                    TestData.for_buffer(falsifying_example),
                    reify_and_execute(
                        search_strategy,
                        test_is_flaky(test, repr_for_last_exception[0]),
                        print_example=True,
                        is_final=True,
                    ),
                )
            except (UnsatisfiedAssumption, StopTest):
                raise Flaky(filter_message)
Пример #18
0
def test_can_digest_a_unicode_lambda():
    function_digest(lambda x: '☃' in str(x))
Пример #19
0
        def wrapped_test(*arguments, **kwargs):
            settings = wrapped_test._hypothesis_internal_use_settings
            if wrapped_test._hypothesis_internal_use_seed is not None:
                random = Random(
                    wrapped_test._hypothesis_internal_use_seed)
            elif settings.derandomize:
                random = Random(function_digest(test))
            else:
                random = new_random()

            import hypothesis.strategies as sd

            selfy = None
            arguments, kwargs = convert_positional_arguments(
                wrapped_test, arguments, kwargs)

            # If the test function is a method of some kind, the bound object
            # will be the first named argument if there are any, otherwise the
            # first vararg (if any).
            if argspec.args:
                selfy = kwargs.get(argspec.args[0])
            elif arguments:
                selfy = arguments[0]
            test_runner = new_style_executor(selfy)

            for example in reversed(getattr(
                wrapped_test, 'hypothesis_explicit_examples', ()
            )):
                if example.args:
                    if len(example.args) > len(original_argspec.args):
                        raise InvalidArgument(
                            'example has too many arguments for test. '
                            'Expected at most %d but got %d' % (
                                len(original_argspec.args), len(example.args)))
                    example_kwargs = dict(zip(
                        original_argspec.args[-len(example.args):],
                        example.args
                    ))
                else:
                    example_kwargs = example.kwargs
                example_kwargs.update(kwargs)
                # Note: Test may mutate arguments and we can't rerun explicit
                # examples, so we have to calculate the failure message at this
                # point rather than than later.
                message_on_failure = 'Falsifying example: %s(%s)' % (
                    test.__name__, arg_string(test, arguments, example_kwargs)
                )
                try:
                    with BuildContext() as b:
                        test_runner(
                            None,
                            lambda data: test(*arguments, **example_kwargs)
                        )
                except BaseException:
                    report(message_on_failure)
                    for n in b.notes:
                        report(n)
                    raise
            if settings.max_examples <= 0:
                return

            arguments = tuple(arguments)

            given_specifier = sd.tuples(
                sd.just(arguments),
                sd.fixed_dictionaries(generator_kwargs).map(
                    lambda args: dict(args, **kwargs)
                )
            )

            def fail_health_check(message):
                message += (
                    '\nSee http://hypothesis.readthedocs.org/en/latest/health'
                    'checks.html for more information about this.'
                )
                raise FailedHealthCheck(message)

            search_strategy = given_specifier
            search_strategy.validate()

            perform_health_check = settings.perform_health_check
            perform_health_check &= Settings.default.perform_health_check

            from hypothesis.internal.conjecture.data import TestData, Status, \
                StopTest

            if perform_health_check:
                initial_state = getglobalrandomstate()
                health_check_random = Random(random.getrandbits(128))
                # We "pre warm" the health check with one draw to give it some
                # time to calculate any cached data. This prevents the case
                # where the first draw of the health check takes ages because
                # of loading unicode data the first time.
                data = TestData(
                    max_length=settings.buffer_size,
                    draw_bytes=lambda data, n, distribution:
                    distribution(health_check_random, n)
                )
                with Settings(settings, verbosity=Verbosity.quiet):
                    try:
                        test_runner(data, reify_and_execute(
                            search_strategy,
                            lambda *args, **kwargs: None,
                        ))
                    except BaseException:
                        pass
                count = 0
                overruns = 0
                filtered_draws = 0
                start = time.time()
                while (
                    count < 10 and time.time() < start + 1 and
                    filtered_draws < 50 and overruns < 20
                ):
                    try:
                        data = TestData(
                            max_length=settings.buffer_size,
                            draw_bytes=lambda data, n, distribution:
                            distribution(health_check_random, n)
                        )
                        with Settings(settings, verbosity=Verbosity.quiet):
                            test_runner(data, reify_and_execute(
                                search_strategy,
                                lambda *args, **kwargs: None,
                            ))
                        count += 1
                    except UnsatisfiedAssumption:
                        filtered_draws += 1
                    except StopTest:
                        if data.status == Status.INVALID:
                            filtered_draws += 1
                        else:
                            assert data.status == Status.OVERRUN
                            overruns += 1
                    except Exception:
                        report(traceback.format_exc())
                        if test_runner is default_new_style_executor:
                            fail_health_check(
                                'An exception occurred during data '
                                'generation in initial health check. '
                                'This indicates a bug in the strategy. '
                                'This could either be a Hypothesis bug or '
                                "an error in a function yo've passed to "
                                'it to construct your data.'
                            )
                        else:
                            fail_health_check(
                                'An exception occurred during data '
                                'generation in initial health check. '
                                'This indicates a bug in the strategy. '
                                'This could either be a Hypothesis bug or '
                                'an error in a function you\'ve passed to '
                                'it to construct your data. Additionally, '
                                'you have a custom executor, which means '
                                'that this could be your executor failing '
                                'to handle a function which returns None. '
                            )
                if overruns >= 20 or (
                    not count and overruns > 0
                ):
                    fail_health_check((
                        'Examples routinely exceeded the max allowable size. '
                        '(%d examples overran while generating %d valid ones)'
                        '. Generating examples this large will usually lead to'
                        ' bad results. You should try setting average_size or '
                        'max_size parameters on your collections and turning '
                        'max_leaves down on recursive() calls.') % (
                        overruns, count
                    ))
                if filtered_draws >= 50 or (
                    not count and filtered_draws > 0
                ):
                    fail_health_check((
                        'It looks like your strategy is filtering out a lot '
                        'of data. Health check found %d filtered examples but '
                        'only %d good ones. This will make your tests much '
                        'slower, and also will probably distort the data '
                        'generation quite a lot. You should adapt your '
                        'strategy to filter less. This can also be caused by '
                        'a low max_leaves parameter in recursive() calls') % (
                        filtered_draws, count
                    ))
                runtime = time.time() - start
                if runtime > 1.0 or count < 10:
                    fail_health_check((
                        'Data generation is extremely slow: Only produced '
                        '%d valid examples in %.2f seconds (%d invalid ones '
                        'and %d exceeded maximum size). Try decreasing '
                        "size of the data you're generating (with e.g."
                        'average_size or max_leaves parameters).'
                    ) % (count, runtime, filtered_draws, overruns))
                if getglobalrandomstate() != initial_state:
                    fail_health_check(
                        'Data generation depends on global random module. '
                        'This makes results impossible to replay, which '
                        'prevents Hypothesis from working correctly. '
                        'If you want to use methods from random, use '
                        'randoms() from hypothesis.strategies to get an '
                        'instance of Random you can use. Alternatively, you '
                        'can use the random_module() strategy to explicitly '
                        'seed the random module.'
                    )
            last_exception = [None]
            repr_for_last_exception = [None]
            performed_random_check = [False]

            def evaluate_test_data(data):
                if perform_health_check and not performed_random_check[0]:
                    initial_state = getglobalrandomstate()
                    performed_random_check[0] = True
                else:
                    initial_state = None
                try:
                    result = test_runner(data, reify_and_execute(
                        search_strategy, test,
                    ))
                    if result is not None and settings.perform_health_check:
                        raise FailedHealthCheck((
                            'Tests run under @given should return None, but '
                            '%s returned %r instead.'
                        ) % (test.__name__, result), settings)
                    return False
                except UnsatisfiedAssumption:
                    data.mark_invalid()
                except (
                    HypothesisDeprecationWarning, FailedHealthCheck,
                    StopTest,
                ):
                    raise
                except Exception:
                    last_exception[0] = traceback.format_exc()
                    verbose_report(last_exception[0])
                    data.mark_interesting()
                finally:
                    if (
                        initial_state is not None and
                        getglobalrandomstate() != initial_state
                    ):
                        fail_health_check(
                            'Your test used the global random module. '
                            'This is unlikely to work correctly. You should '
                            'consider using the randoms() strategy from '
                            'hypothesis.strategies instead. Alternatively, '
                            'you can use the random_module() strategy to '
                            'explicitly seed the random module.')

            from hypothesis.internal.conjecture.engine import TestRunner

            falsifying_example = None
            database_key = str_to_bytes(fully_qualified_name(test))
            start_time = time.time()
            runner = TestRunner(
                evaluate_test_data,
                settings=settings, random=random,
                database_key=database_key,
            )
            runner.run()
            run_time = time.time() - start_time
            timed_out = (
                settings.timeout > 0 and
                run_time >= settings.timeout
            )
            if runner.last_data.status == Status.INTERESTING:
                falsifying_example = runner.last_data.buffer
                if settings.database is not None:
                    settings.database.save(
                        database_key, falsifying_example
                    )
            else:
                if runner.valid_examples < min(
                    settings.min_satisfying_examples,
                    settings.max_examples,
                ):
                    if timed_out:
                        raise Timeout((
                            'Ran out of time before finding a satisfying '
                            'example for '
                            '%s. Only found %d examples in ' +
                            '%.2fs.'
                        ) % (
                            get_pretty_function_description(test),
                            runner.valid_examples, run_time
                        ))
                    else:
                        raise Unsatisfiable((
                            'Unable to satisfy assumptions of hypothesis '
                            '%s. Only %d examples considered '
                            'satisfied assumptions'
                        ) % (
                            get_pretty_function_description(test),
                            runner.valid_examples,))
                return

            assert last_exception[0] is not None

            try:
                with settings:
                    test_runner(
                        TestData.for_buffer(falsifying_example),
                        reify_and_execute(
                            search_strategy, test,
                            print_example=True, is_final=True
                        ))
            except (UnsatisfiedAssumption, StopTest):
                report(traceback.format_exc())
                raise Flaky(
                    'Unreliable assumption: An example which satisfied '
                    'assumptions on the first run now fails it.'
                )

            report(
                'Failed to reproduce exception. Expected: \n' +
                last_exception[0],
            )

            filter_message = (
                'Unreliable test data: Failed to reproduce a failure '
                'and then when it came to recreating the example in '
                'order to print the test data with a flaky result '
                'the example was filtered out (by e.g. a '
                'call to filter in your strategy) when we didn\'t '
                'expect it to be.'
            )

            try:
                test_runner(
                    TestData.for_buffer(falsifying_example),
                    reify_and_execute(
                        search_strategy,
                        test_is_flaky(test, repr_for_last_exception[0]),
                        print_example=True, is_final=True
                    ))
            except (UnsatisfiedAssumption, StopTest):
                raise Flaky(filter_message)
Пример #20
0
def find(specifier, condition, settings=None, random=None, database_key=None):
    settings = settings or Settings(
        max_examples=2000,
        min_satisfying_examples=0,
        max_shrinks=2000,
    )

    if database_key is None and settings.database is not None:
        database_key = function_digest(condition)

    if not isinstance(specifier, SearchStrategy):
        raise InvalidArgument(
            'Expected SearchStrategy but got %r of type %s' % (
                specifier, type(specifier).__name__
            ))
    specifier.validate()

    search = specifier

    random = random or new_random()
    successful_examples = [0]
    last_data = [None]

    def template_condition(data):
        with BuildContext(data):
            try:
                data.is_find = True
                result = data.draw(search)
                data.note(result)
                success = condition(result)
            except UnsatisfiedAssumption:
                data.mark_invalid()

        if success:
            successful_examples[0] += 1

        if settings.verbosity == Verbosity.verbose:
            if not successful_examples[0]:
                report(lambda: u'Trying example %s' % (
                    nicerepr(result),
                ))
            elif success:
                if successful_examples[0] == 1:
                    report(lambda: u'Found satisfying example %s' % (
                        nicerepr(result),
                    ))
                else:
                    report(lambda: u'Shrunk example to %s' % (
                        nicerepr(result),
                    ))
                last_data[0] = data
        if success and not data.frozen:
            data.mark_interesting()
    from hypothesis.internal.conjecture.engine import ConjectureRunner
    from hypothesis.internal.conjecture.data import ConjectureData, Status

    start = time.time()
    runner = ConjectureRunner(
        template_condition, settings=settings, random=random,
        database_key=database_key,
    )
    runner.run()
    note_engine_for_statistics(runner)
    run_time = time.time() - start
    if runner.last_data.status == Status.INTERESTING:
        data = ConjectureData.for_buffer(runner.last_data.buffer)
        with BuildContext(data):
            return data.draw(search)
    if runner.valid_examples <= settings.min_satisfying_examples:
        if settings.timeout > 0 and run_time > settings.timeout:
            raise Timeout((
                'Ran out of time before finding enough valid examples for '
                '%s. Only %d valid examples found in %.2f seconds.'
            ) % (
                get_pretty_function_description(condition),
                runner.valid_examples, run_time))

        else:
            raise Unsatisfiable((
                'Unable to satisfy assumptions of '
                '%s. Only %d examples considered satisfied assumptions'
            ) % (
                get_pretty_function_description(condition),
                runner.valid_examples,))

    raise NoSuchExample(get_pretty_function_description(condition))
Пример #21
0
        def wrapped_test(*arguments, **kwargs):
            settings = wrapped_test._hypothesis_internal_use_settings
            if wrapped_test._hypothesis_internal_use_seed is not None:
                random = Random(
                    wrapped_test._hypothesis_internal_use_seed)
            elif settings.derandomize:
                random = Random(function_digest(test))
            else:
                random = new_random()

            import hypothesis.strategies as sd

            selfy = None
            arguments, kwargs = convert_positional_arguments(
                wrapped_test, arguments, kwargs)

            # If the test function is a method of some kind, the bound object
            # will be the first named argument if there are any, otherwise the
            # first vararg (if any).
            if argspec.args:
                selfy = kwargs.get(argspec.args[0])
            elif arguments:
                selfy = arguments[0]
            test_runner = new_style_executor(selfy)

            for example in reversed(getattr(
                wrapped_test, 'hypothesis_explicit_examples', ()
            )):
                if example.args:
                    if len(example.args) > len(original_argspec.args):
                        raise InvalidArgument(
                            'example has too many arguments for test. '
                            'Expected at most %d but got %d' % (
                                len(original_argspec.args), len(example.args)))
                    example_kwargs = dict(zip(
                        original_argspec.args[-len(example.args):],
                        example.args
                    ))
                else:
                    example_kwargs = example.kwargs
                if Phase.explicit not in settings.phases:
                    continue
                example_kwargs.update(kwargs)
                # Note: Test may mutate arguments and we can't rerun explicit
                # examples, so we have to calculate the failure message at this
                # point rather than than later.
                message_on_failure = 'Falsifying example: %s(%s)' % (
                    test.__name__, arg_string(test, arguments, example_kwargs)
                )
                try:
                    with BuildContext(None) as b:
                        test_runner(
                            None,
                            lambda data: test(*arguments, **example_kwargs)
                        )
                except BaseException:
                    traceback.print_exc()
                    report(message_on_failure)
                    for n in b.notes:
                        report(n)
                    raise
            if settings.max_examples <= 0:
                return

            arguments = tuple(arguments)

            given_specifier = sd.tuples(
                sd.just(arguments),
                sd.fixed_dictionaries(generator_kwargs).map(
                    lambda args: dict(args, **kwargs)
                )
            )

            def fail_health_check(message, label):
                if label in settings.suppress_health_check:
                    return
                message += (
                    '\nSee https://hypothesis.readthedocs.io/en/latest/health'
                    'checks.html for more information about this. '
                )
                message += (
                    'If you want to disable just this health check, add %s '
                    'to the suppress_health_check settings for this test.'
                ) % (label,)
                raise FailedHealthCheck(message)

            search_strategy = given_specifier
            if selfy is not None:
                search_strategy = WithRunner(search_strategy, selfy)

            search_strategy.validate()

            perform_health_check = settings.perform_health_check
            perform_health_check &= Settings.default.perform_health_check

            from hypothesis.internal.conjecture.data import ConjectureData, \
                Status, StopTest
            if not (
                Phase.reuse in settings.phases or
                Phase.generate in settings.phases
            ):
                return

            if perform_health_check:
                health_check_random = Random(random.getrandbits(128))
                # We "pre warm" the health check with one draw to give it some
                # time to calculate any cached data. This prevents the case
                # where the first draw of the health check takes ages because
                # of loading unicode data the first time.
                data = ConjectureData(
                    max_length=settings.buffer_size,
                    draw_bytes=lambda data, n, distribution:
                    distribution(health_check_random, n)
                )
                with Settings(settings, verbosity=Verbosity.quiet):
                    try:
                        test_runner(data, reify_and_execute(
                            search_strategy,
                            lambda *args, **kwargs: None,
                        ))
                    except BaseException:
                        pass
                count = 0
                overruns = 0
                filtered_draws = 0
                start = time.time()
                while (
                    count < 10 and time.time() < start + 1 and
                    filtered_draws < 50 and overruns < 20
                ):
                    try:
                        data = ConjectureData(
                            max_length=settings.buffer_size,
                            draw_bytes=lambda data, n, distribution:
                            distribution(health_check_random, n)
                        )
                        with Settings(settings, verbosity=Verbosity.quiet):
                            test_runner(data, reify_and_execute(
                                search_strategy,
                                lambda *args, **kwargs: None,
                            ))
                        count += 1
                    except UnsatisfiedAssumption:
                        filtered_draws += 1
                    except StopTest:
                        if data.status == Status.INVALID:
                            filtered_draws += 1
                        else:
                            assert data.status == Status.OVERRUN
                            overruns += 1
                    except InvalidArgument:
                        raise
                    except Exception:
                        if (
                            HealthCheck.exception_in_generation in
                            settings.suppress_health_check
                        ):
                            raise
                        report(traceback.format_exc())
                        if test_runner is default_new_style_executor:
                            fail_health_check(
                                'An exception occurred during data '
                                'generation in initial health check. '
                                'This indicates a bug in the strategy. '
                                'This could either be a Hypothesis bug or '
                                "an error in a function you've passed to "
                                'it to construct your data.',
                                HealthCheck.exception_in_generation,
                            )
                        else:
                            fail_health_check(
                                'An exception occurred during data '
                                'generation in initial health check. '
                                'This indicates a bug in the strategy. '
                                'This could either be a Hypothesis bug or '
                                'an error in a function you\'ve passed to '
                                'it to construct your data. Additionally, '
                                'you have a custom executor, which means '
                                'that this could be your executor failing '
                                'to handle a function which returns None. ',
                                HealthCheck.exception_in_generation,
                            )
                if overruns >= 20 or (
                    not count and overruns > 0
                ):
                    fail_health_check((
                        'Examples routinely exceeded the max allowable size. '
                        '(%d examples overran while generating %d valid ones)'
                        '. Generating examples this large will usually lead to'
                        ' bad results. You should try setting average_size or '
                        'max_size parameters on your collections and turning '
                        'max_leaves down on recursive() calls.') % (
                        overruns, count
                    ), HealthCheck.data_too_large)
                if filtered_draws >= 50 or (
                    not count and filtered_draws > 0
                ):
                    fail_health_check((
                        'It looks like your strategy is filtering out a lot '
                        'of data. Health check found %d filtered examples but '
                        'only %d good ones. This will make your tests much '
                        'slower, and also will probably distort the data '
                        'generation quite a lot. You should adapt your '
                        'strategy to filter less. This can also be caused by '
                        'a low max_leaves parameter in recursive() calls') % (
                        filtered_draws, count
                    ), HealthCheck.filter_too_much)
                runtime = time.time() - start
                if runtime > 1.0 or count < 10:
                    fail_health_check((
                        'Data generation is extremely slow: Only produced '
                        '%d valid examples in %.2f seconds (%d invalid ones '
                        'and %d exceeded maximum size). Try decreasing '
                        "size of the data you're generating (with e.g."
                        'average_size or max_leaves parameters).'
                    ) % (count, runtime, filtered_draws, overruns),
                        HealthCheck.too_slow,
                    )
            last_exception = [None]
            repr_for_last_exception = [None]

            def evaluate_test_data(data):
                try:
                    result = test_runner(data, reify_and_execute(
                        search_strategy, test,
                    ))
                    if result is not None and settings.perform_health_check:
                        fail_health_check((
                            'Tests run under @given should return None, but '
                            '%s returned %r instead.'
                        ) % (test.__name__, result), HealthCheck.return_value)
                    return False
                except UnsatisfiedAssumption:
                    data.mark_invalid()
                except (
                    HypothesisDeprecationWarning, FailedHealthCheck,
                    StopTest,
                ):
                    raise
                except Exception:
                    last_exception[0] = traceback.format_exc()
                    verbose_report(last_exception[0])
                    data.mark_interesting()

            from hypothesis.internal.conjecture.engine import ConjectureRunner

            falsifying_example = None
            database_key = str_to_bytes(fully_qualified_name(test))
            start_time = time.time()
            runner = ConjectureRunner(
                evaluate_test_data,
                settings=settings, random=random,
                database_key=database_key,
            )
            runner.run()
            note_engine_for_statistics(runner)
            run_time = time.time() - start_time
            timed_out = (
                settings.timeout > 0 and
                run_time >= settings.timeout
            )
            if runner.last_data is None:
                return
            if runner.last_data.status == Status.INTERESTING:
                falsifying_example = runner.last_data.buffer
                if settings.database is not None:
                    settings.database.save(
                        database_key, falsifying_example
                    )
            else:
                if runner.valid_examples < min(
                    settings.min_satisfying_examples,
                    settings.max_examples,
                ):
                    if timed_out:
                        raise Timeout((
                            'Ran out of time before finding a satisfying '
                            'example for '
                            '%s. Only found %d examples in ' +
                            '%.2fs.'
                        ) % (
                            get_pretty_function_description(test),
                            runner.valid_examples, run_time
                        ))
                    else:
                        raise Unsatisfiable((
                            'Unable to satisfy assumptions of hypothesis '
                            '%s. Only %d examples considered '
                            'satisfied assumptions'
                        ) % (
                            get_pretty_function_description(test),
                            runner.valid_examples,))
                return

            assert last_exception[0] is not None

            try:
                with settings:
                    test_runner(
                        ConjectureData.for_buffer(falsifying_example),
                        reify_and_execute(
                            search_strategy, test,
                            print_example=True, is_final=True
                        ))
            except (UnsatisfiedAssumption, StopTest):
                report(traceback.format_exc())
                raise Flaky(
                    'Unreliable assumption: An example which satisfied '
                    'assumptions on the first run now fails it.'
                )

            report(
                'Failed to reproduce exception. Expected: \n' +
                last_exception[0],
            )

            filter_message = (
                'Unreliable test data: Failed to reproduce a failure '
                'and then when it came to recreating the example in '
                'order to print the test data with a flaky result '
                'the example was filtered out (by e.g. a '
                'call to filter in your strategy) when we didn\'t '
                'expect it to be.'
            )

            try:
                test_runner(
                    ConjectureData.for_buffer(falsifying_example),
                    reify_and_execute(
                        search_strategy,
                        test_is_flaky(test, repr_for_last_exception[0]),
                        print_example=True, is_final=True
                    ))
            except (UnsatisfiedAssumption, StopTest):
                raise Flaky(filter_message)
Пример #22
0
    def run(self):
        # Tell pytest to omit the body of this function from tracebacks
        __tracebackhide__ = True
        if global_force_seed is None:
            database_key = function_digest(self.test)
        else:
            database_key = None
        runner = ConjectureRunner(
            self.evaluate_test_data,
            settings=self.settings,
            random=self.random,
            database_key=database_key,
        )
        try:
            runner.run()
        finally:
            self.used_examples_from_database = runner.used_examples_from_database
        note_engine_for_statistics(runner)

        self.used_examples_from_database = runner.used_examples_from_database

        if runner.call_count == 0:
            return
        if runner.interesting_examples:
            self.falsifying_examples = sorted(
                [d for d in runner.interesting_examples.values()],
                key=lambda d: sort_key(d.buffer),
                reverse=True,
            )
        else:
            if runner.valid_examples == 0:
                raise Unsatisfiable(
                    "Unable to satisfy assumptions of hypothesis %s."
                    % (get_pretty_function_description(self.test),)
                )

        if not self.falsifying_examples:
            return
        elif not self.settings.report_multiple_bugs:
            del self.falsifying_examples[:-1]

        self.failed_normally = True

        flaky = 0

        for falsifying_example in self.falsifying_examples:
            info = falsifying_example.extra_information

            ran_example = ConjectureData.for_buffer(falsifying_example.buffer)
            self.__was_flaky = False
            assert info.__expected_exception is not None
            try:
                self.execute(
                    ran_example,
                    print_example=True,
                    is_final=True,
                    expected_failure=(
                        info.__expected_exception,
                        info.__expected_traceback,
                    ),
                )
            except (UnsatisfiedAssumption, StopTest):
                report(traceback.format_exc())
                self.__flaky(
                    "Unreliable assumption: An example which satisfied "
                    "assumptions on the first run now fails it."
                )
            except BaseException as e:
                if len(self.falsifying_examples) <= 1:
                    raise
                tb = get_trimmed_traceback()
                report("".join(traceback.format_exception(type(e), e, tb)))
            finally:  # pragma: no cover
                # This section is in fact entirely covered by the tests in
                # test_reproduce_failure, but it seems to trigger a lovely set
                # of coverage bugs: The branches show up as uncovered (despite
                # definitely being covered - you can add an assert False else
                # branch to verify this and see it fail - and additionally the
                # second branch still complains about lack of coverage even if
                # you add a pragma: no cover to it!
                # See https://bitbucket.org/ned/coveragepy/issues/623/
                if self.settings.print_blob is not PrintSettings.NEVER:
                    failure_blob = encode_failure(falsifying_example.buffer)
                    # Have to use the example we actually ran, not the original
                    # falsifying example! Otherwise we won't catch problems
                    # where the repr of the generated example doesn't parse.
                    can_use_repr = ran_example.can_reproduce_example_from_repr
                    if self.settings.print_blob is PrintSettings.ALWAYS or (
                        self.settings.print_blob is PrintSettings.INFER
                        and self.settings.verbosity >= Verbosity.normal
                        and not can_use_repr
                        and len(failure_blob) < 200
                    ):
                        report(
                            (
                                "\nYou can reproduce this example by temporarily "
                                "adding @reproduce_failure(%r, %r) as a decorator "
                                "on your test case"
                            )
                            % (__version__, failure_blob)
                        )
            if self.__was_flaky:
                flaky += 1

        # If we only have one example then we should have raised an error or
        # flaky prior to this point.
        assert len(self.falsifying_examples) > 1

        if flaky > 0:
            raise Flaky(
                (
                    "Hypothesis found %d distinct failures, but %d of them "
                    "exhibited some sort of flaky behaviour."
                )
                % (len(self.falsifying_examples), flaky)
            )
        else:
            raise MultipleFailures(
                ("Hypothesis found %d distinct failures.")
                % (len(self.falsifying_examples))
            )
Пример #23
0
def test_digests_are_reasonably_unique():
    assert (
        function_digest(test_simple_conversion) !=
        function_digest(test_does_not_error_on_confused_sources)
    )
Пример #24
0
    def run_test_with_generator(test):
        if settings.derandomize:
            assert provided_random is None
            random = Random(function_digest(test))
        else:
            random = provided_random or Random()

        original_argspec = inspect.getargspec(test)
        if original_argspec.varargs:
            raise InvalidArgument('varargs are not supported with @given')
        extra_kwargs = [
            k for k in generator_kwargs if k not in original_argspec.args
        ]
        if extra_kwargs and not original_argspec.keywords:
            raise InvalidArgument(
                '%s() got an unexpected keyword argument %r' %
                (test.__name__, extra_kwargs[0]))
        if (len(generator_arguments) > len(original_argspec.args)):
            raise InvalidArgument(
                ('Too many positional arguments for %s() (got %d but'
                 ' expected at most %d') %
                (test.__name__, len(generator_arguments),
                 len(original_argspec.args)))
        arguments = original_argspec.args + sorted(extra_kwargs)
        specifiers = list(generator_arguments)
        seen_kwarg = None
        for a in arguments:
            if a in generator_kwargs:
                seen_kwarg = seen_kwarg or a
                specifiers.append(generator_kwargs[a])
            else:
                if seen_kwarg is not None:
                    raise InvalidArgument(
                        ('Argument %s comes after keyword %s which has been '
                         'specified, but does not itself have a '
                         'specification') % (a, seen_kwarg))

        argspec = inspect.ArgSpec(args=arguments,
                                  keywords=original_argspec.keywords,
                                  varargs=original_argspec.varargs,
                                  defaults=tuple(
                                      map(HypothesisProvided, specifiers)))

        @copy_argspec(test.__name__, argspec)
        def wrapped_test(*arguments, **kwargs):
            selfy = None
            # Because we converted all kwargs to given into real args and
            # error if we have neither args nor kwargs, this should always
            # be valid
            assert argspec.args
            selfy = kwargs.get(argspec.args[0])
            if isinstance(selfy, HypothesisProvided):
                selfy = None
            test_runner = executor(selfy)

            for example in getattr(wrapped_test,
                                   'hypothesis_explicit_examples', ()):
                if example.args:
                    example_kwargs = dict(
                        zip(argspec.args[-len(example.args):], example.args))
                else:
                    example_kwargs = dict(example.kwargs)

                for k, v in kwargs.items():
                    if not isinstance(v, HypothesisProvided):
                        example_kwargs[k] = v

                test_runner(lambda: test(*arguments, **example_kwargs))

            if not any(
                    isinstance(x, HypothesisProvided)
                    for xs in (arguments, kwargs.values()) for x in xs):
                # All arguments have been satisfied without needing to invoke
                # hypothesis
                test_runner(lambda: test(*arguments, **kwargs))
                return

            def convert_to_specifier(v):
                if isinstance(v, HypothesisProvided):
                    return v.value
                else:
                    return just(v)

            given_specifier = (tuple(map(convert_to_specifier, arguments)),
                               dict([(k, convert_to_specifier(v))
                                     for (k, v) in kwargs.items()]))

            search_strategy = strategy(given_specifier, settings)

            if settings.database:
                storage = settings.database.storage_for(
                    given_specifier, search_strategy)
            else:
                storage = None

            def is_template_example(xs):
                try:
                    test_runner(reify_and_execute(search_strategy, xs, test))
                    return False
                except UnsatisfiedAssumption as e:
                    raise e
                except Exception:
                    return True

            is_template_example.__name__ = test.__name__
            is_template_example.__qualname__ = getattr(test, '__qualname__',
                                                       test.__name__)

            falsifying_template = None
            try:
                falsifying_template = best_satisfying_template(
                    search_strategy, random, is_template_example, settings,
                    storage)
            except NoSuchExample:
                return

            test_runner(
                reify_and_execute(search_strategy,
                                  falsifying_template,
                                  test,
                                  print_example=True))

            test_runner(
                reify_and_execute(search_strategy,
                                  falsifying_template,
                                  test_is_flaky(test),
                                  print_example=True))

        wrapped_test.__name__ = test.__name__
        wrapped_test.__doc__ = test.__doc__
        wrapped_test.is_hypothesis_test = True
        wrapped_test.hypothesis_explicit_examples = getattr(
            test, 'hypothesis_explicit_examples', [])
        return wrapped_test
Пример #25
0
def test_digest_returns_the_same_value_for_two_calls():
    assert (
        function_digest(test_simple_conversion) ==
        function_digest(test_simple_conversion)
    )
Пример #26
0
def find(specifier, condition, settings=None, random=None, storage=None):
    settings = settings or Settings(
        max_examples=2000,
        min_satisfying_examples=0,
        max_shrinks=2000,
    )

    if not isinstance(specifier, SearchStrategy):
        raise InvalidArgument(
            'Expected SearchStrategy but got %r of type %s' % (
                specifier, type(specifier).__name__
            ))

    search = specifier

    if storage is None and settings.database is not None:
        storage = settings.database.storage(
            'find(%s)' % (
                binascii.hexlify(function_digest(condition)).decode('ascii'),
            )
        )

    random = random or new_random()
    successful_examples = [0]

    def template_condition(template):
        with BuildContext():
            result = search.reify(template)
            success = condition(result)

        if success:
            successful_examples[0] += 1

        if not successful_examples[0]:
            verbose_report(lambda: 'Trying example %s' % (
                repr(result),
            ))
        elif success:
            if successful_examples[0] == 1:
                verbose_report(lambda: 'Found satisfying example %s' % (
                    repr(result),
                ))
            else:
                verbose_report(lambda: 'Shrunk example to %s' % (
                    repr(result),
                ))
        return success

    template_condition.__name__ = condition.__name__
    tracker = Tracker()

    try:
        template = best_satisfying_template(
            search, random, template_condition, settings,
            tracker=tracker, max_parameter_tries=2,
            storage=storage,
        )
        with BuildContext(is_final=True, close_on_capture=False):
            return search.reify(template)
    except Timeout:
        raise
    except NoSuchExample:
        if search.template_upper_bound <= len(tracker):
            raise DefinitelyNoSuchExample(
                get_pretty_function_description(condition),
                search.template_upper_bound,
            )
        raise NoSuchExample(get_pretty_function_description(condition))
Пример #27
0
def test_evalled_lambda_digests_are_stable_across_process_runs():
    digest = function_digest(eval('lambda x: 42'))
    print(repr(digest))
    assert digest == b'\x86\x95\xf2\xab\x8a\x83\x91F|1\xbdi\x05\xdeC?'
Пример #28
0
    def falsify(self, hypothesis, *argument_types, **kwargs):  # pylint: disable=too-many-locals,too-many-branches
        """
        Attempt to construct an example tuple x matching argument_types such
        that hypothesis(*x) returns a falsey value
        """
        teardown_example = kwargs.get('teardown_example') or (lambda x: None)
        setup_example = kwargs.get('setup_example') or (lambda: None)
        random = self.random
        if random is None:
            random = Random(function_digest(hypothesis))

        build_context = BuildContext(random)

        search_strategy = strategy(argument_types, self.settings)
        storage = None
        if self.database is not None:
            storage = self.database.storage_for(argument_types)

        def falsifies(args):  # pylint: disable=missing-docstring
            example = None
            try:
                try:
                    setup_example()
                    example = search_strategy.reify(args)
                    return not hypothesis(*example)
                except UnsatisfiedAssumption:
                    return False
            finally:
                teardown_example(example)

        track_seen = Tracker()
        falsifying_examples = []
        if storage:
            for example in storage.fetch():
                track_seen.track(example)
                if falsifies(example):
                    falsifying_examples = [example]
                break

        satisfying_examples = 0
        timed_out = False
        max_examples = self.max_examples
        min_satisfying_examples = self.min_satisfying_examples

        parameter_source = ParameterSource(context=build_context,
                                           strategy=search_strategy,
                                           min_parameters=max(
                                               2,
                                               int(float(max_examples) / 10)))
        start_time = time.time()

        def time_to_call_it_a_day():
            """Have we exceeded our timeout?"""
            if self.timeout <= 0:
                return False
            return time.time() >= start_time + self.timeout

        for parameter in islice(parameter_source,
                                max_examples - len(track_seen)):
            if len(track_seen) >= search_strategy.size_upper_bound:
                break

            if falsifying_examples:
                break
            if time_to_call_it_a_day():
                break

            args = search_strategy.produce_template(build_context, parameter)

            if track_seen.track(args) > 1:
                parameter_source.mark_bad()
                continue
            try:
                setup_example()
                a = None
                try:
                    a = search_strategy.reify(args)
                    is_falsifying_example = not hypothesis(*a)
                finally:
                    teardown_example(a)
            except UnsatisfiedAssumption:
                parameter_source.mark_bad()
                continue
            satisfying_examples += 1
            if is_falsifying_example:
                falsifying_examples.append(args)
        run_time = time.time() - start_time
        timed_out = self.timeout >= 0 and run_time >= self.timeout
        if not falsifying_examples:
            if (satisfying_examples
                    and len(track_seen) >= search_strategy.size_lower_bound):
                raise Exhausted(hypothesis, satisfying_examples)
            elif satisfying_examples < min_satisfying_examples:
                if timed_out:
                    raise Timeout(hypothesis, satisfying_examples, run_time)
                else:
                    raise Unsatisfiable(hypothesis, satisfying_examples,
                                        run_time)
            else:
                raise Unfalsifiable(hypothesis)

        for example in falsifying_examples:
            if not falsifies(example):
                raise Flaky(hypothesis, example)

        best_example = falsifying_examples[0]

        for simpler in search_strategy.simplify_such_that(
                random,
                best_example,
                falsifies,
                tracker=track_seen,
        ):
            best_example = simpler
            if time_to_call_it_a_day():
                # We no cover in here because it's a bit sensitive to timing
                # and tends to make tests flaky. There are tests that mean
                # this is definitely covered most of the time.
                break  # pragma: no cover

        if storage is not None:
            storage.save(best_example)

        setup_example()
        return search_strategy.reify(best_example)
Пример #29
0
def find(specifier, condition, settings=None, random=None, database_key=None):
    settings = settings or Settings(max_examples=2000, min_satisfying_examples=0, max_shrinks=2000)

    if database_key is None and settings.database is not None:
        database_key = function_digest(condition)

    if not isinstance(specifier, SearchStrategy):
        raise InvalidArgument("Expected SearchStrategy but got %r of type %s" % (specifier, type(specifier).__name__))

    search = specifier

    random = random or new_random()
    successful_examples = [0]
    last_data = [None]

    def template_condition(data):
        with BuildContext():
            try:
                data.is_find = True
                result = data.draw(search)
                data.note(result)
                success = condition(result)
            except UnsatisfiedAssumption:
                data.mark_invalid()

        if success:
            successful_examples[0] += 1

        if settings.verbosity == Verbosity.verbose:
            if not successful_examples[0]:
                report(lambda: u"Trying example %s" % (nicerepr(result),))
            elif success:
                if successful_examples[0] == 1:
                    report(lambda: u"Found satisfying example %s" % (nicerepr(result),))
                else:
                    report(lambda: u"Shrunk example to %s" % (nicerepr(result),))
                last_data[0] = data
        if success and not data.frozen:
            data.mark_interesting()

    from hypothesis.internal.conjecture.engine import TestRunner
    from hypothesis.internal.conjecture.data import TestData, Status

    start = time.time()
    runner = TestRunner(template_condition, settings=settings, random=random, database_key=database_key)
    runner.run()
    run_time = time.time() - start
    if runner.last_data.status == Status.INTERESTING:
        with BuildContext():
            return TestData.for_buffer(runner.last_data.buffer).draw(search)
    if runner.valid_examples <= settings.min_satisfying_examples:
        if settings.timeout > 0 and run_time > settings.timeout:
            raise Timeout(
                (
                    "Ran out of time before finding enough valid examples for "
                    "%s. Only %d valid examples found in %.2f seconds."
                )
                % (get_pretty_function_description(condition), runner.valid_examples, run_time)
            )

        else:
            raise Unsatisfiable(
                ("Unable to satisfy assumptions of " "%s. Only %d examples considered satisfied assumptions")
                % (get_pretty_function_description(condition), runner.valid_examples)
            )

    raise NoSuchExample(get_pretty_function_description(condition))
Пример #30
0
        def wrapped_test(*arguments, **kwargs):
            if settings.derandomize:
                random = Random(function_digest(test))
            else:
                random = provided_random or new_random()

            import hypothesis.strategies as sd
            from hypothesis.internal.strategymethod import strategy

            selfy = None
            arguments, kwargs = convert_positional_arguments(
                wrapped_test, arguments, kwargs)

            for arg in hypothesis_owned_arguments:
                try:
                    value = kwargs[arg]
                except KeyError:
                    continue
                if not isinstance(value, HypothesisProvided):
                    note_deprecation(
                        'Passing in explicit values to override Hypothesis '
                        'provided values is deprecated and will no longer '
                        'work in Hypothesis 2.0. If you need to do this, '
                        'extract a common function and call that from a '
                        'Hypothesis based test.', settings
                    )

            # Anything in unused_kwargs hasn't been injected through
            # argspec.defaults, so we need to add them.
            for k in unused_kwargs:
                if k not in kwargs:
                    kwargs[k] = unused_kwargs[k]
            # If the test function is a method of some kind, the bound object
            # will be the first named argument if there are any, otherwise the
            # first vararg (if any).
            if argspec.args:
                selfy = kwargs.get(argspec.args[0])
            elif arguments:
                selfy = arguments[0]
            if isinstance(selfy, HypothesisProvided):
                selfy = None
            test_runner = executor(selfy)

            for example in reversed(getattr(
                wrapped_test, u'hypothesis_explicit_examples', ()
            )):
                if example.args:
                    example_kwargs = dict(zip(
                        argspec.args[-len(example.args):], example.args
                    ))
                else:
                    example_kwargs = dict(example.kwargs)

                for k, v in kwargs.items():
                    if not isinstance(v, HypothesisProvided):
                        example_kwargs[k] = v
                # Note: Test may mutate arguments and we can't rerun explicit
                # examples, so we have to calculate the failure message at this
                # point rather than than later.
                message_on_failure = u'Falsifying example: %s(%s)' % (
                    test.__name__, arg_string(test, arguments, example_kwargs)
                )
                try:
                    with BuildContext() as b:
                        test_runner(
                            lambda: test(*arguments, **example_kwargs)
                        )
                except BaseException:
                    report(message_on_failure)
                    for n in b.notes:
                        report(n)
                    raise

            if not any(
                isinstance(x, HypothesisProvided)
                for xs in (arguments, kwargs.values())
                for x in xs
            ):
                # All arguments have been satisfied without needing to invoke
                # hypothesis
                test_runner(lambda: test(*arguments, **kwargs))
                return

            def convert_to_specifier(v):
                if isinstance(v, HypothesisProvided):
                    return strategy(v.value, settings)
                else:
                    return sd.just(v)

            given_specifier = sd.tuples(
                sd.tuples(*map(convert_to_specifier, arguments)),
                sd.fixed_dictionaries(dict(
                    (k, convert_to_specifier(v)) for (k, v) in kwargs.items()))
            )

            def fail_health_check(message):
                message += (
                    '\nSee http://hypothesis.readthedocs.org/en/latest/health'
                    'checks.html for more information about this.'
                )
                if settings.strict:
                    raise FailedHealthCheck(message)
                else:
                    warnings.warn(FailedHealthCheck(message))

            search_strategy = strategy(given_specifier, settings)
            search_strategy.validate()

            if settings.database:
                storage = settings.database.storage(
                    fully_qualified_name(test))
            else:
                storage = None

            start = time.time()
            warned_random = [False]
            perform_health_check = settings.perform_health_check
            if Settings.default is not None:
                perform_health_check &= Settings.default.perform_health_check

            if perform_health_check:
                initial_state = getglobalrandomstate()
                health_check_random = Random(random.getrandbits(128))
                count = 0
                bad_draws = 0
                filtered_draws = 0
                errors = 0
                while (
                    count < 10 and time.time() < start + 1 and
                    filtered_draws < 50 and bad_draws < 50
                ):
                    try:
                        with Settings(settings, verbosity=Verbosity.quiet):
                            test_runner(reify_and_execute(
                                search_strategy,
                                search_strategy.draw_template(
                                    health_check_random,
                                    search_strategy.draw_parameter(
                                        health_check_random,
                                    )),
                                lambda *args, **kwargs: None,
                            ))
                        count += 1
                    except BadTemplateDraw:
                        bad_draws += 1
                    except UnsatisfiedAssumption:
                        filtered_draws += 1
                    except Exception:
                        if errors == 0:
                            report(traceback.format_exc())
                        errors += 1
                        if test_runner is default_executor:
                            fail_health_check(
                                'An exception occurred during data '
                                'generation in initial health check. '
                                'This indicates a bug in the strategy. '
                                'This could either be a Hypothesis bug or '
                                "an error in a function you've passed to "
                                'it to construct your data.'
                            )
                        else:
                            fail_health_check(
                                'An exception occurred during data '
                                'generation in initial health check. '
                                'This indicates a bug in the strategy. '
                                'This could either be a Hypothesis bug or '
                                'an error in a function you\'ve passed to '
                                'it to construct your data. Additionally, '
                                'you have a custom executor, which means '
                                'that this could be your executor failing '
                                'to handle a function which returns None. '
                            )
                if filtered_draws >= 50:
                    fail_health_check((
                        'It looks like your strategy is filtering out a lot '
                        'of data. Health check found %d filtered examples but '
                        'only %d good ones. This will make your tests much '
                        'slower, and also will probably distort the data '
                        'generation quite a lot. You should adapt your '
                        'strategy to filter less.') % (
                        filtered_draws, count
                    ))
                if bad_draws >= 50:
                    fail_health_check(
                        'Hypothesis is struggling to generate examples. '
                        'This is often a sign of a recursive strategy which '
                        'fans out too broadly. If you\'re using recursive, '
                        'try to reduce the size of the recursive step or '
                        'increase the maximum permitted number of leaves.'
                    )
                runtime = time.time() - start
                if runtime > 1.0 or count < 10:
                    fail_health_check((
                        'Data generation is extremely slow: Only produced '
                        '%d valid examples in %.2f seconds. Try decreasing '
                        "size of the data you're generating (with e.g."
                        'average_size or max_leaves parameters).'
                    ) % (count, runtime))
                if getglobalrandomstate() != initial_state:
                    warned_random[0] = True
                    fail_health_check(
                        'Data generation depends on global random module. '
                        'This makes results impossible to replay, which '
                        'prevents Hypothesis from working correctly. '
                        'If you want to use methods from random, use '
                        'randoms() from hypothesis.strategies to get an '
                        'instance of Random you can use. Alternatively, you '
                        'can use the random_module() strategy to explicitly '
                        'seed the random module.'
                    )

            last_exception = [None]
            repr_for_last_exception = [None]

            def is_template_example(xs):
                if perform_health_check and not warned_random[0]:
                    initial_state = getglobalrandomstate()
                record_repr = [None]
                try:
                    result = test_runner(reify_and_execute(
                        search_strategy, xs, test,
                        record_repr=record_repr,
                    ))
                    if result is not None:
                        note_deprecation((
                            'Tests run under @given should return None, but '
                            '%s returned %r instead.'
                            'In Hypothesis 2.0 this will become an error.'
                        ) % (test.__name__, result), settings)
                    return False
                except HypothesisDeprecationWarning:
                    raise
                except UnsatisfiedAssumption as e:
                    raise e
                except Exception as e:
                    last_exception[0] = traceback.format_exc()
                    repr_for_last_exception[0] = record_repr[0]
                    verbose_report(last_exception[0])
                    return True
                finally:
                    if (
                        not warned_random[0] and
                        perform_health_check and
                        getglobalrandomstate() != initial_state
                    ):
                        warned_random[0] = True
                        fail_health_check(
                            'Your test used the global random module. '
                            'This is unlikely to work correctly. You should '
                            'consider using the randoms() strategy from '
                            'hypothesis.strategies instead. Alternatively, '
                            'you can use the random_module() strategy to '
                            'explicitly seed the random module.'
                        )
            is_template_example.__name__ = test.__name__
            is_template_example.__qualname__ = qualname(test)

            falsifying_template = None
            try:
                falsifying_template = best_satisfying_template(
                    search_strategy, random, is_template_example,
                    settings, storage, start_time=start,
                )
            except NoSuchExample:
                return

            assert last_exception[0] is not None

            with settings:
                test_runner(reify_and_execute(
                    search_strategy, falsifying_template, test,
                    print_example=True, is_final=True
                ))

                report(
                    u'Failed to reproduce exception. Expected: \n' +
                    last_exception[0],
                )

                test_runner(reify_and_execute(
                    search_strategy, falsifying_template,
                    test_is_flaky(test, repr_for_last_exception[0]),
                    print_example=True, is_final=True
                ))
Пример #31
0
def test_digest_returns_the_same_value_for_two_calls():
    assert function_digest(test_simple_conversion) == function_digest(
        test_simple_conversion)
Пример #32
0
        def wrapped_test(*arguments, **kwargs):
            settings = wrapped_test._hypothesis_internal_use_settings
            if wrapped_test._hypothesis_internal_use_seed is not None:
                random = Random(wrapped_test._hypothesis_internal_use_seed)
            elif settings.derandomize:
                random = Random(function_digest(test))
            else:
                random = new_random()

            import hypothesis.strategies as sd

            selfy = None
            arguments, kwargs = convert_positional_arguments(
                wrapped_test, arguments, kwargs)

            # If the test function is a method of some kind, the bound object
            # will be the first named argument if there are any, otherwise the
            # first vararg (if any).
            if argspec.args:
                selfy = kwargs.get(argspec.args[0])
            elif arguments:
                selfy = arguments[0]
            test_runner = executor(selfy)

            for example in reversed(
                    getattr(wrapped_test, 'hypothesis_explicit_examples', ())):
                if example.args:
                    example_kwargs = dict(
                        zip(original_argspec.args[-len(example.args):],
                            example.args))
                else:
                    example_kwargs = example.kwargs
                example_kwargs.update(kwargs)
                # Note: Test may mutate arguments and we can't rerun explicit
                # examples, so we have to calculate the failure message at this
                # point rather than than later.
                message_on_failure = 'Falsifying example: %s(%s)' % (
                    test.__name__, arg_string(test, arguments, example_kwargs))
                try:
                    with BuildContext() as b:
                        test_runner(lambda: test(*arguments, **example_kwargs))
                except BaseException:
                    report(message_on_failure)
                    for n in b.notes:
                        report(n)
                    raise

            arguments = tuple(arguments)

            given_specifier = sd.tuples(
                sd.just(arguments),
                sd.fixed_dictionaries(generator_kwargs).map(
                    lambda args: dict(args, **kwargs)))

            def fail_health_check(message):
                message += (
                    '\nSee http://hypothesis.readthedocs.org/en/latest/health'
                    'checks.html for more information about this.')
                if settings.strict:
                    raise FailedHealthCheck(message)
                else:
                    warnings.warn(FailedHealthCheck(message))

            search_strategy = given_specifier
            search_strategy.validate()

            if settings.database:
                storage = settings.database.storage(fully_qualified_name(test))
            else:
                storage = None

            start = time.time()
            warned_random = [False]
            perform_health_check = settings.perform_health_check
            if Settings.default is not None:
                perform_health_check &= Settings.default.perform_health_check

            if perform_health_check:
                initial_state = getglobalrandomstate()
                health_check_random = Random(random.getrandbits(128))
                count = 0
                bad_draws = 0
                filtered_draws = 0
                errors = 0
                while (count < 10 and time.time() < start + 1
                       and filtered_draws < 50 and bad_draws < 50):
                    try:
                        with Settings(settings, verbosity=Verbosity.quiet):
                            test_runner(
                                reify_and_execute(
                                    search_strategy,
                                    search_strategy.draw_template(
                                        health_check_random,
                                        search_strategy.draw_parameter(
                                            health_check_random, )),
                                    lambda *args, **kwargs: None,
                                ))
                        count += 1
                    except BadTemplateDraw:
                        bad_draws += 1
                    except UnsatisfiedAssumption:
                        filtered_draws += 1
                    except Exception:
                        if errors == 0:
                            report(traceback.format_exc())
                        errors += 1
                        if test_runner is default_executor:
                            fail_health_check(
                                'An exception occurred during data '
                                'generation in initial health check. '
                                'This indicates a bug in the strategy. '
                                'This could either be a Hypothesis bug or '
                                "an error in a function yo've passed to "
                                'it to construct your data.')
                        else:
                            fail_health_check(
                                'An exception occurred during data '
                                'generation in initial health check. '
                                'This indicates a bug in the strategy. '
                                'This could either be a Hypothesis bug or '
                                'an error in a function you\'ve passed to '
                                'it to construct your data. Additionally, '
                                'you have a custom executor, which means '
                                'that this could be your executor failing '
                                'to handle a function which returns None. ')
                if filtered_draws >= 50:
                    fail_health_check((
                        'It looks like your strategy is filtering out a lot '
                        'of data. Health check found %d filtered examples but '
                        'only %d good ones. This will make your tests much '
                        'slower, and also will probably distort the data '
                        'generation quite a lot. You should adapt your '
                        'strategy to filter less.') % (filtered_draws, count))
                if bad_draws >= 50:
                    fail_health_check(
                        'Hypothesis is struggling to generate examples. '
                        'This is often a sign of a recursive strategy which '
                        'fans out too broadly. If you\'re using recursive, '
                        'try to reduce the size of the recursive step or '
                        'increase the maximum permitted number of leaves.')
                runtime = time.time() - start
                if runtime > 1.0 or count < 10:
                    fail_health_check(
                        ('Data generation is extremely slow: Only produced '
                         '%d valid examples in %.2f seconds. Try decreasing '
                         "size of the data yo're generating (with e.g."
                         'average_size or max_leaves parameters).') %
                        (count, runtime))
                if getglobalrandomstate() != initial_state:
                    warned_random[0] = True
                    fail_health_check(
                        'Data generation depends on global random module. '
                        'This makes results impossible to replay, which '
                        'prevents Hypothesis from working correctly. '
                        'If you want to use methods from random, use '
                        'randoms() from hypothesis.strategies to get an '
                        'instance of Random you can use. Alternatively, you '
                        'can use the random_module() strategy to explicitly '
                        'seed the random module.')

            last_exception = [None]
            repr_for_last_exception = [None]

            def is_template_example(xs):
                if perform_health_check and not warned_random[0]:
                    initial_state = getglobalrandomstate()
                record_repr = [None]
                try:
                    result = test_runner(
                        reify_and_execute(
                            search_strategy,
                            xs,
                            test,
                            record_repr=record_repr,
                        ))
                    if result is not None and settings.perform_health_check:
                        raise FailedHealthCheck(
                            ('Tests run under @given should return None, but '
                             '%s returned %r instead.') %
                            (test.__name__, result), settings)
                    return False
                except (HypothesisDeprecationWarning, FailedHealthCheck,
                        UnsatisfiedAssumption):
                    raise
                except Exception:
                    last_exception[0] = traceback.format_exc()
                    repr_for_last_exception[0] = record_repr[0]
                    verbose_report(last_exception[0])
                    return True
                finally:
                    if (not warned_random[0] and perform_health_check
                            and getglobalrandomstate() != initial_state):
                        warned_random[0] = True
                        fail_health_check(
                            'Your test used the global random module. '
                            'This is unlikely to work correctly. You should '
                            'consider using the randoms() strategy from '
                            'hypothesis.strategies instead. Alternatively, '
                            'you can use the random_module() strategy to '
                            'explicitly seed the random module.')

            is_template_example.__name__ = test.__name__
            is_template_example.__qualname__ = qualname(test)

            with settings:
                falsifying_template = None
                try:
                    falsifying_template = best_satisfying_template(
                        search_strategy,
                        random,
                        is_template_example,
                        settings,
                        storage,
                        start_time=start,
                    )
                except NoSuchExample:
                    return

                assert last_exception[0] is not None

                try:
                    test_runner(
                        reify_and_execute(search_strategy,
                                          falsifying_template,
                                          test,
                                          print_example=True,
                                          is_final=True))
                except UnsatisfiedAssumption:
                    report(traceback.format_exc())
                    raise Flaky(
                        'Unreliable assumption: An example which satisfied '
                        'assumptions on the first run now fails it.')

                report(
                    'Failed to reproduce exception. Expected: \n' +
                    last_exception[0], )

                try:
                    test_runner(
                        reify_and_execute(search_strategy,
                                          falsifying_template,
                                          test_is_flaky(
                                              test,
                                              repr_for_last_exception[0]),
                                          print_example=True,
                                          is_final=True))
                except UnsatisfiedAssumption:
                    raise Flaky(
                        'Unreliable test data: Failed to reproduce a failure '
                        'and then when it came to recreating the example in '
                        'order to print the test data with a flaky result '
                        'the example was filtered out (by e.g. a '
                        'call to filter in your strategy) when we didn\'t '
                        'expect it to be.')
Пример #33
0
def test_can_digest_a_unicode_lambda():
    function_digest(lambda x: "☃" in str(x))
Пример #34
0
    def run_test_with_generator(test):
        if settings.derandomize:
            assert provided_random is None
            random = Random(function_digest(test))
        else:
            random = provided_random or Random()

        original_argspec = inspect.getargspec(test)
        if generator_arguments and original_argspec.varargs:
            raise InvalidArgument("varargs are not supported with positional arguments to @given")
        extra_kwargs = [k for k in generator_kwargs if k not in original_argspec.args]
        if extra_kwargs and not original_argspec.keywords:
            raise InvalidArgument("%s() got an unexpected keyword argument %r" % (test.__name__, extra_kwargs[0]))
        if len(generator_arguments) > len(original_argspec.args):
            raise InvalidArgument(
                ("Too many positional arguments for %s() (got %d but" " expected at most %d")
                % (test.__name__, len(generator_arguments), len(original_argspec.args))
            )
        arguments = original_argspec.args + sorted(extra_kwargs)
        specifiers = list(generator_arguments)
        seen_kwarg = None
        for a in arguments:
            if a in generator_kwargs:
                seen_kwarg = seen_kwarg or a
                specifiers.append(generator_kwargs[a])
            else:
                if seen_kwarg is not None:
                    raise InvalidArgument(
                        (
                            "Argument %s comes after keyword %s which has been "
                            "specified, but does not itself have a "
                            "specification"
                        )
                        % (a, seen_kwarg)
                    )

        argspec = inspect.ArgSpec(
            args=arguments,
            keywords=original_argspec.keywords,
            varargs=original_argspec.varargs,
            defaults=tuple(map(HypothesisProvided, specifiers)),
        )

        @copy_argspec(test.__name__, argspec)
        def wrapped_test(*arguments, **kwargs):
            selfy = None
            arguments, kwargs = convert_positional_arguments(wrapped_test, arguments, kwargs)
            # Because we converted all kwargs to given into real args and
            # error if we have neither args nor kwargs, this should always
            # be valid
            assert argspec.args
            selfy = kwargs.get(argspec.args[0])
            if isinstance(selfy, HypothesisProvided):
                selfy = None
            test_runner = executor(selfy)

            for example in getattr(wrapped_test, "hypothesis_explicit_examples", ()):
                if example.args:
                    example_kwargs = dict(zip(argspec.args[-len(example.args) :], example.args))
                else:
                    example_kwargs = dict(example.kwargs)

                for k, v in kwargs.items():
                    if not isinstance(v, HypothesisProvided):
                        example_kwargs[k] = v
                # Note: Test may mutate arguments and we can't rerun explicit
                # examples, so we have to calculate the failure message at this
                # point rather than than later.
                message_on_failure = "Falsifying example: %s(%s)" % (
                    test.__name__,
                    arg_string(test, arguments, example_kwargs),
                )
                try:
                    test_runner(lambda: test(*arguments, **example_kwargs))
                except BaseException:
                    report(message_on_failure)
                    raise

            if not any(isinstance(x, HypothesisProvided) for xs in (arguments, kwargs.values()) for x in xs):
                # All arguments have been satisfied without needing to invoke
                # hypothesis
                test_runner(lambda: test(*arguments, **kwargs))
                return

            def convert_to_specifier(v):
                if isinstance(v, HypothesisProvided):
                    return strategy(v.value, settings)
                else:
                    return sd.just(v)

            given_specifier = sd.tuples(
                sd.tuples(*map(convert_to_specifier, arguments)),
                sd.fixed_dictionaries(dict((k, convert_to_specifier(v)) for (k, v) in kwargs.items())),
            )

            search_strategy = strategy(given_specifier, settings)

            if settings.database:
                storage = settings.database.storage(fully_qualified_name(test))
            else:
                storage = None

            last_exception = [None]
            repr_for_last_exception = [None]

            def is_template_example(xs):
                record_repr = [None]
                try:
                    test_runner(
                        reify_and_execute(
                            search_strategy, xs, test, always_print=settings.max_shrinks <= 0, record_repr=record_repr
                        )
                    )
                    return False
                except UnsatisfiedAssumption as e:
                    raise e
                except Exception as e:
                    if settings.max_shrinks <= 0:
                        raise e
                    last_exception[0] = traceback.format_exc()
                    repr_for_last_exception[0] = record_repr[0]
                    verbose_report(last_exception[0])
                    return True

            is_template_example.__name__ = test.__name__
            is_template_example.__qualname__ = qualname(test)

            falsifying_template = None
            try:
                falsifying_template = best_satisfying_template(
                    search_strategy, random, is_template_example, settings, storage
                )
            except NoSuchExample:
                return

            assert last_exception[0] is not None

            with settings:
                test_runner(reify_and_execute(search_strategy, falsifying_template, test, print_example=True))

                report("Failed to reproduce exception. Expected: \n" + last_exception[0])

                test_runner(
                    reify_and_execute(
                        search_strategy,
                        falsifying_template,
                        test_is_flaky(test, repr_for_last_exception[0]),
                        print_example=True,
                    )
                )

        wrapped_test.__name__ = test.__name__
        wrapped_test.__doc__ = test.__doc__
        wrapped_test.is_hypothesis_test = True
        wrapped_test.hypothesis_explicit_examples = getattr(test, "hypothesis_explicit_examples", [])
        return wrapped_test
Пример #35
0
def find(
    specifier,  # type: SearchStrategy
    condition,  # type: Callable[[Any], bool]
    settings=None,  # type: Settings
    random=None,   # type: Any
    database_key=None,  # type: bytes
):
    # type: (...) -> Any
    """Returns the minimal example from the given strategy ``specifier`` that
    matches the predicate function ``condition``."""
    if settings is None:
        settings = Settings(max_examples=2000)
    settings = Settings(settings, suppress_health_check=HealthCheck.all())

    if database_key is None and settings.database is not None:
        database_key = function_digest(condition)

    if not isinstance(specifier, SearchStrategy):
        raise InvalidArgument(
            'Expected SearchStrategy but got %r of type %s' % (
                specifier, type(specifier).__name__
            ))
    specifier.validate()

    search = specifier

    random = random or new_random()
    successful_examples = [0]
    last_data = [None]
    last_repr = [None]

    def template_condition(data):
        with BuildContext(data):
            try:
                data.is_find = True
                with deterministic_PRNG():
                    result = data.draw(search)
                    data.note(result)
                    success = condition(result)
            except UnsatisfiedAssumption:
                data.mark_invalid()

        if success:
            successful_examples[0] += 1

        if settings.verbosity >= Verbosity.verbose:
            if not successful_examples[0]:
                report(
                    u'Tried non-satisfying example %s' % (nicerepr(result),))
            elif success:
                if successful_examples[0] == 1:
                    last_repr[0] = nicerepr(result)
                    report(u'Found satisfying example %s' % (last_repr[0],))
                    last_data[0] = data
                elif (
                    sort_key(hbytes(data.buffer)) <
                    sort_key(last_data[0].buffer)
                ) and nicerepr(result) != last_repr[0]:
                    last_repr[0] = nicerepr(result)
                    report(u'Shrunk example to %s' % (last_repr[0],))
                    last_data[0] = data
        if success and not data.frozen:
            data.mark_interesting()
    start = benchmark_time()
    runner = ConjectureRunner(
        template_condition, settings=settings, random=random,
        database_key=database_key,
    )
    runner.run()
    note_engine_for_statistics(runner)
    run_time = benchmark_time() - start
    if runner.interesting_examples:
        data = ConjectureData.for_buffer(
            list(runner.interesting_examples.values())[0].buffer)
        with BuildContext(data):
            with deterministic_PRNG():
                return data.draw(search)
    if runner.valid_examples == 0 and (
        runner.exit_reason != ExitReason.finished
    ):
        if settings.timeout > 0 and run_time > settings.timeout:
            raise Timeout((  # pragma: no cover
                'Ran out of time before finding enough valid examples for '
                '%s. Only %d valid examples found in %.2f seconds.'
            ) % (
                get_pretty_function_description(condition),
                runner.valid_examples, run_time))

        else:
            raise Unsatisfiable(
                'Unable to satisfy assumptions of %s.' %
                (get_pretty_function_description(condition),)
            )

    raise NoSuchExample(get_pretty_function_description(condition))
Пример #36
0
    def run(self):
        # Tell pytest to omit the body of this function from tracebacks
        __tracebackhide__ = True
        if global_force_seed is None:
            database_key = function_digest(self.test)
        else:
            database_key = None
        runner = ConjectureRunner(
            self.evaluate_test_data,
            settings=self.settings,
            random=self.random,
            database_key=database_key,
        )
        try:
            runner.run()
        finally:
            self.used_examples_from_database = runner.used_examples_from_database
        note_engine_for_statistics(runner)

        self.used_examples_from_database = runner.used_examples_from_database

        if runner.call_count == 0:
            return
        if runner.interesting_examples:
            self.falsifying_examples = sorted(
                [d for d in runner.interesting_examples.values()],
                key=lambda d: sort_key(d.buffer),
                reverse=True,
            )
        else:
            if runner.valid_examples == 0:
                raise Unsatisfiable(
                    "Unable to satisfy assumptions of hypothesis %s." %
                    (get_pretty_function_description(self.test), ))

        if not self.falsifying_examples:
            return

        self.failed_normally = True

        flaky = 0

        for falsifying_example in self.falsifying_examples:
            ran_example = ConjectureData.for_buffer(falsifying_example.buffer)
            self.__was_flaky = False
            assert falsifying_example.__expected_exception is not None
            try:
                self.execute(
                    ran_example,
                    print_example=True,
                    is_final=True,
                    expected_failure=(
                        falsifying_example.__expected_exception,
                        falsifying_example.__expected_traceback,
                    ),
                )
            except (UnsatisfiedAssumption, StopTest):
                report(traceback.format_exc())
                self.__flaky(
                    "Unreliable assumption: An example which satisfied "
                    "assumptions on the first run now fails it.")
            except BaseException as e:
                if len(self.falsifying_examples) <= 1:
                    raise
                tb = get_trimmed_traceback()
                report("".join(traceback.format_exception(type(e), e, tb)))
            finally:  # pragma: no cover
                # This section is in fact entirely covered by the tests in
                # test_reproduce_failure, but it seems to trigger a lovely set
                # of coverage bugs: The branches show up as uncovered (despite
                # definitely being covered - you can add an assert False else
                # branch to verify this and see it fail - and additionally the
                # second branch still complains about lack of coverage even if
                # you add a pragma: no cover to it!
                # See https://bitbucket.org/ned/coveragepy/issues/623/
                if self.settings.print_blob is not PrintSettings.NEVER:
                    failure_blob = encode_failure(falsifying_example.buffer)
                    # Have to use the example we actually ran, not the original
                    # falsifying example! Otherwise we won't catch problems
                    # where the repr of the generated example doesn't parse.
                    can_use_repr = ran_example.can_reproduce_example_from_repr
                    if self.settings.print_blob is PrintSettings.ALWAYS or (
                            self.settings.print_blob is PrintSettings.INFER
                            and self.settings.verbosity >= Verbosity.normal
                            and not can_use_repr and len(failure_blob) < 200):
                        report((
                            "\nYou can reproduce this example by temporarily "
                            "adding @reproduce_failure(%r, %r) as a decorator "
                            "on your test case") % (__version__, failure_blob))
            if self.__was_flaky:
                flaky += 1

        # If we only have one example then we should have raised an error or
        # flaky prior to this point.
        assert len(self.falsifying_examples) > 1

        if flaky > 0:
            raise Flaky(
                ("Hypothesis found %d distinct failures, but %d of them "
                 "exhibited some sort of flaky behaviour.") %
                (len(self.falsifying_examples), flaky))
        else:
            raise MultipleFailures(("Hypothesis found %d distinct failures.") %
                                   (len(self.falsifying_examples)))
Пример #37
0
    def run_test_with_generator(test):
        if settings.derandomize:
            assert provided_random is None
            random = Random(
                function_digest(test)
            )
        else:
            random = provided_random or Random()

        original_argspec = inspect.getargspec(test)
        if original_argspec.varargs:
            raise InvalidArgument(
                'varargs are not supported with @given'
            )
        extra_kwargs = [
            k for k in generator_kwargs if k not in original_argspec.args]
        if extra_kwargs and not original_argspec.keywords:
            raise InvalidArgument(
                '%s() got an unexpected keyword argument %r' % (
                    test.__name__,
                    extra_kwargs[0]
                ))
        if (
            len(generator_arguments) > len(original_argspec.args)
        ):
            raise InvalidArgument((
                'Too many positional arguments for %s() (got %d but'
                ' expected at most %d') % (
                    test.__name__, len(generator_arguments),
                    len(original_argspec.args)))
        arguments = original_argspec.args + sorted(extra_kwargs)
        specifiers = list(generator_arguments)
        seen_kwarg = None
        for a in arguments:
            if a in generator_kwargs:
                seen_kwarg = seen_kwarg or a
                specifiers.append(generator_kwargs[a])
            else:
                if seen_kwarg is not None:
                    raise InvalidArgument((
                        'Argument %s comes after keyword %s which has been '
                        'specified, but does not itself have a '
                        'specification') % (
                        a, seen_kwarg
                    ))

        argspec = inspect.ArgSpec(
            args=arguments,
            keywords=original_argspec.keywords,
            varargs=original_argspec.varargs,
            defaults=tuple(map(HypothesisProvided, specifiers))
        )

        @copy_argspec(
            test.__name__, argspec
        )
        def wrapped_test(*arguments, **kwargs):
            selfy = None
            # Because we converted all kwargs to given into real args and
            # error if we have neither args nor kwargs, this should always
            # be valid
            assert argspec.args
            selfy = kwargs.get(argspec.args[0])
            if isinstance(selfy, HypothesisProvided):
                selfy = None
            test_runner = executor(selfy)

            for example in getattr(
                wrapped_test, 'hypothesis_explicit_examples', ()
            ):
                if example.args:
                    example_kwargs = dict(zip(
                        argspec.args[-len(example.args):], example.args
                    ))
                else:
                    example_kwargs = dict(example.kwargs)

                for k, v in kwargs.items():
                    if not isinstance(v, HypothesisProvided):
                        example_kwargs[k] = v

                test_runner(
                    lambda: test(*arguments, **example_kwargs)
                )

            if not any(
                isinstance(x, HypothesisProvided)
                for xs in (arguments, kwargs.values())
                for x in xs
            ):
                # All arguments have been satisfied without needing to invoke
                # hypothesis
                test_runner(lambda: test(*arguments, **kwargs))
                return

            def convert_to_specifier(v):
                if isinstance(v, HypothesisProvided):
                    return v.value
                else:
                    return just(v)

            given_specifier = (
                tuple(map(convert_to_specifier, arguments)),
                dict([(k, convert_to_specifier(v)) for (k, v) in kwargs.items()])
            )

            search_strategy = strategy(given_specifier, settings)

            if settings.database:
                storage = settings.database.storage_for(
                    given_specifier, search_strategy)
            else:
                storage = None

            def is_template_example(xs):
                try:
                    test_runner(reify_and_execute(search_strategy, xs, test))
                    return False
                except UnsatisfiedAssumption as e:
                    raise e
                except Exception:
                    return True

            is_template_example.__name__ = test.__name__
            is_template_example.__qualname__ = getattr(
                test, '__qualname__', test.__name__)

            falsifying_template = None
            try:
                falsifying_template = best_satisfying_template(
                    search_strategy, random, is_template_example,
                    settings, storage
                )
            except NoSuchExample:
                return

            test_runner(reify_and_execute(
                search_strategy, falsifying_template, test,
                print_example=True
            ))

            test_runner(reify_and_execute(
                search_strategy, falsifying_template, test_is_flaky(test),
                print_example=True
            ))

        wrapped_test.__name__ = test.__name__
        wrapped_test.__doc__ = test.__doc__
        wrapped_test.is_hypothesis_test = True
        wrapped_test.hypothesis_explicit_examples = getattr(
            test, 'hypothesis_explicit_examples', []
        )
        return wrapped_test
Пример #38
0
def find(specifier, condition, settings=None, random=None, database_key=None):
    """Returns the minimal example from the given strategy ``specifier`` that
    matches the predicate function ``condition``."""
    settings = settings or Settings(
        max_examples=2000,
        min_satisfying_examples=0,
        max_shrinks=2000,
    )

    if database_key is None and settings.database is not None:
        database_key = function_digest(condition)

    if not isinstance(specifier, SearchStrategy):
        raise InvalidArgument(
            'Expected SearchStrategy but got %r of type %s' % (
                specifier, type(specifier).__name__
            ))
    specifier.validate()

    search = specifier

    random = random or new_random()
    successful_examples = [0]
    last_data = [None]

    def template_condition(data):
        with BuildContext(data):
            try:
                data.is_find = True
                result = data.draw(search)
                data.note(result)
                success = condition(result)
            except UnsatisfiedAssumption:
                data.mark_invalid()

        if success:
            successful_examples[0] += 1

        if settings.verbosity == Verbosity.verbose:
            if not successful_examples[0]:
                report(lambda: u'Trying example %s' % (
                    nicerepr(result),
                ))
            elif success:
                if successful_examples[0] == 1:
                    report(lambda: u'Found satisfying example %s' % (
                        nicerepr(result),
                    ))
                else:
                    report(lambda: u'Shrunk example to %s' % (
                        nicerepr(result),
                    ))
                last_data[0] = data
        if success and not data.frozen:
            data.mark_interesting()
    start = time.time()
    runner = ConjectureRunner(
        template_condition, settings=settings, random=random,
        database_key=database_key,
    )
    runner.run()
    note_engine_for_statistics(runner)
    run_time = time.time() - start
    if runner.last_data.status == Status.INTERESTING:
        data = ConjectureData.for_buffer(runner.last_data.buffer)
        with BuildContext(data):
            return data.draw(search)
    if (
        runner.valid_examples <= settings.min_satisfying_examples and
        runner.exit_reason != ExitReason.finished
    ):
        if settings.timeout > 0 and run_time > settings.timeout:
            raise Timeout((
                'Ran out of time before finding enough valid examples for '
                '%s. Only %d valid examples found in %.2f seconds.'
            ) % (
                get_pretty_function_description(condition),
                runner.valid_examples, run_time))

        else:
            raise Unsatisfiable((
                'Unable to satisfy assumptions of '
                '%s. Only %d examples considered satisfied assumptions'
            ) % (
                get_pretty_function_description(condition),
                runner.valid_examples,))

    raise NoSuchExample(get_pretty_function_description(condition))
Пример #39
0
def find(
        specifier,  # type: SearchStrategy
        condition,  # type: Callable[[Any], bool]
        settings=None,  # type: Settings
        random=None,  # type: Any
        database_key=None,  # type: bytes
):
    # type: (...) -> Any
    """Returns the minimal example from the given strategy ``specifier`` that
    matches the predicate function ``condition``."""
    if settings is None:
        settings = Settings(max_examples=2000)
    settings = Settings(settings, suppress_health_check=HealthCheck.all())

    if database_key is None and settings.database is not None:
        database_key = function_digest(condition)

    if not isinstance(specifier, SearchStrategy):
        raise InvalidArgument("Expected SearchStrategy but got %r of type %s" %
                              (specifier, type(specifier).__name__))
    specifier.validate()

    search = specifier

    random = random or new_random()
    successful_examples = [0]
    last_data = [None]
    last_repr = [None]

    def template_condition(data):
        with BuildContext(data):
            try:
                data.is_find = True
                with deterministic_PRNG():
                    result = data.draw(search)
                    data.note(result)
                    success = condition(result)
            except UnsatisfiedAssumption:
                data.mark_invalid()

        if success:
            successful_examples[0] += 1

        if settings.verbosity >= Verbosity.verbose:
            if not successful_examples[0]:
                report(u"Tried non-satisfying example %s" %
                       (nicerepr(result), ))
            elif success:
                if successful_examples[0] == 1:
                    last_repr[0] = nicerepr(result)
                    report(u"Found satisfying example %s" % (last_repr[0], ))
                    last_data[0] = data
                elif (sort_key(hbytes(data.buffer)) < sort_key(
                        last_data[0].buffer)
                      ) and nicerepr(result) != last_repr[0]:
                    last_repr[0] = nicerepr(result)
                    report(u"Shrunk example to %s" % (last_repr[0], ))
                    last_data[0] = data
        if success and not data.frozen:
            data.mark_interesting()

    runner = ConjectureRunner(template_condition,
                              settings=settings,
                              random=random,
                              database_key=database_key)
    runner.run()
    note_engine_for_statistics(runner)
    if runner.interesting_examples:
        data = ConjectureData.for_buffer(
            list(runner.interesting_examples.values())[0].buffer)
        with BuildContext(data):
            with deterministic_PRNG():
                return data.draw(search)
    if runner.valid_examples == 0 and (runner.exit_reason !=
                                       ExitReason.finished):
        raise Unsatisfiable("Unable to satisfy assumptions of %s." %
                            (get_pretty_function_description(condition), ))

    raise NoSuchExample(get_pretty_function_description(condition))
Пример #40
0
def run_state_machine_as_test(state_machine_factory, settings=None):
    """Run a state machine definition as a test, either silently doing nothing
    or printing a minimal breaking program and raising an exception.

    state_machine_factory is anything which returns an instance of
    GenericStateMachine when called with no arguments - it can be a class or a
    function. settings will be used to control the execution of the test.
    """
    if settings is None:
        try:
            settings = state_machine_factory.TestCase.settings
            check_type(Settings, settings,
                       "state_machine_factory.TestCase.settings")
        except AttributeError:
            settings = Settings(deadline=None,
                                suppress_health_check=HealthCheck.all())
    check_type(Settings, settings, "settings")

    @settings
    @given(st.data())
    def run_state_machine(factory, data):
        machine = factory()
        if isinstance(machine, GenericStateMachine) and not isinstance(
                machine, RuleBasedStateMachine):
            note_deprecation(
                "%s inherits from GenericStateMachine, which is deprecated.  Use a "
                "RuleBasedStateMachine, or a test function with st.data(), instead."
                % (type(machine).__name__, ),
                since="2019-05-29",
            )
        else:
            check_type(RuleBasedStateMachine, machine,
                       "state_machine_factory()")
        data.conjecture_data.hypothesis_runner = machine

        n_steps = settings.stateful_step_count
        should_continue = cu.many(data.conjecture_data,
                                  min_size=1,
                                  max_size=n_steps,
                                  average_size=n_steps)

        print_steps = (current_build_context().is_final
                       or current_verbosity() >= Verbosity.debug)
        try:
            if print_steps:
                machine.print_start()
            machine.check_invariants()

            while should_continue.more():
                value = data.conjecture_data.draw(machine.steps())
                # Assign 'result' here in case 'execute_step' fails below
                result = multiple()
                try:
                    result = machine.execute_step(value)
                finally:
                    if print_steps:
                        # 'result' is only used if the step has target bundles.
                        # If it does, and the result is a 'MultipleResult',
                        # then 'print_step' prints a multi-variable assignment.
                        machine.print_step(value, result)
                machine.check_invariants()
        finally:
            if print_steps:
                machine.print_end()
            machine.teardown()

    # Use a machine digest to identify stateful tests in the example database
    run_state_machine.hypothesis.inner_test._hypothesis_internal_add_digest = function_digest(
        state_machine_factory)
    # Copy some attributes so @seed and @reproduce_failure "just work"
    run_state_machine._hypothesis_internal_use_seed = getattr(
        state_machine_factory, "_hypothesis_internal_use_seed", None)
    run_state_machine._hypothesis_internal_use_reproduce_failure = getattr(
        state_machine_factory, "_hypothesis_internal_use_reproduce_failure",
        None)
    run_state_machine._hypothesis_internal_print_given_args = False

    run_state_machine(state_machine_factory)
Пример #41
0
    def run_engine(self):
        """Run the test function many times, on database input and generated
        input, using the Conjecture engine.
        """
        # Tell pytest to omit the body of this function from tracebacks
        __tracebackhide__ = True
        try:
            database_key = self.wrapped_test._hypothesis_internal_database_key
        except AttributeError:
            if global_force_seed is None:
                database_key = function_digest(self.test)
            else:
                database_key = None

        runner = ConjectureRunner(
            self._execute_once_for_engine,
            settings=self.settings,
            random=self.random,
            database_key=database_key,
        )
        # Use the Conjecture engine to run the test function many times
        # on different inputs.
        runner.run()
        note_engine_for_statistics(runner)

        if runner.call_count == 0:
            return
        if runner.interesting_examples:
            self.falsifying_examples = sorted(
                runner.interesting_examples.values(),
                key=lambda d: sort_key(d.buffer),
                reverse=True,
            )
        else:
            if runner.valid_examples == 0:
                raise Unsatisfiable(
                    "Unable to satisfy assumptions of hypothesis %s." %
                    (get_pretty_function_description(self.test), ))

        if not self.falsifying_examples:
            return
        elif not self.settings.report_multiple_bugs:
            # Pretend that we only found one failure, by discarding the others.
            del self.falsifying_examples[:-1]

        # The engine found one or more failures, so we need to reproduce and
        # report them.

        self.failed_normally = True

        flaky = 0

        for falsifying_example in self.falsifying_examples:
            info = falsifying_example.extra_information

            ran_example = ConjectureData.for_buffer(falsifying_example.buffer)
            self.__was_flaky = False
            assert info.__expected_exception is not None
            try:
                self.execute_once(
                    ran_example,
                    print_example=not self.is_find,
                    is_final=True,
                    expected_failure=(
                        info.__expected_exception,
                        info.__expected_traceback,
                    ),
                )
            except (UnsatisfiedAssumption, StopTest):
                report(traceback.format_exc())
                self.__flaky(
                    "Unreliable assumption: An example which satisfied "
                    "assumptions on the first run now fails it.")
            except BaseException as e:
                if len(self.falsifying_examples) <= 1:
                    # There is only one failure, so we can report it by raising
                    # it directly.
                    raise

                # We are reporting multiple failures, so we need to manually
                # print each exception's stack trace and information.
                tb = get_trimmed_traceback()
                report("".join(traceback.format_exception(type(e), e, tb)))

            finally:  # pragma: no cover
                # Mostly useful for ``find`` and ensuring that objects that
                # hold on to a reference to ``data`` know that it's now been
                # finished and they shouldn't attempt to draw more data from
                # it.
                ran_example.freeze()

                # This section is in fact entirely covered by the tests in
                # test_reproduce_failure, but it seems to trigger a lovely set
                # of coverage bugs: The branches show up as uncovered (despite
                # definitely being covered - you can add an assert False else
                # branch to verify this and see it fail - and additionally the
                # second branch still complains about lack of coverage even if
                # you add a pragma: no cover to it!
                # See https://bitbucket.org/ned/coveragepy/issues/623/
                if self.settings.print_blob:
                    report(("\nYou can reproduce this example by temporarily "
                            "adding @reproduce_failure(%r, %r) as a decorator "
                            "on your test case") %
                           (__version__,
                            encode_failure(falsifying_example.buffer)))
            if self.__was_flaky:
                flaky += 1

        # If we only have one example then we should have raised an error or
        # flaky prior to this point.
        assert len(self.falsifying_examples) > 1

        if flaky > 0:
            raise Flaky(
                ("Hypothesis found %d distinct failures, but %d of them "
                 "exhibited some sort of flaky behaviour.") %
                (len(self.falsifying_examples), flaky))
        else:
            raise MultipleFailures(("Hypothesis found %d distinct failures.") %
                                   (len(self.falsifying_examples)))
Пример #42
0
    def run_test_with_generator(test):
        if settings.derandomize:
            assert provided_random is None
            random = Random(function_digest(test))
        else:
            random = provided_random or Random()

        original_argspec = getargspec(test)
        if generator_arguments and original_argspec.varargs:
            raise InvalidArgument(
                u'varargs are not supported with positional arguments to '
                u'@given'
            )
        extra_kwargs = [
            k for k in generator_kwargs if k not in original_argspec.args]
        if extra_kwargs and not original_argspec.keywords:
            raise InvalidArgument(
                u'%s() got an unexpected keyword argument %r' % (
                    test.__name__,
                    extra_kwargs[0]
                ))
        if (
            len(generator_arguments) > len(original_argspec.args)
        ):
            raise InvalidArgument((
                u'Too many positional arguments for %s() (got %d but'
                u' expected at most %d') % (
                    test.__name__, len(generator_arguments),
                    len(original_argspec.args)))
        arguments = original_argspec.args
        specifiers = list(generator_arguments)
        seen_kwarg = None
        for a in arguments:
            if isinstance(a, list):  # pragma: no cover
                raise InvalidArgument((
                    u'Cannot decorate function %s() because it has '
                    u'destructuring arguments') % (
                        test.__name__,
                ))
            if a in generator_kwargs:
                seen_kwarg = seen_kwarg or a
                specifiers.append(generator_kwargs[a])
            else:
                if seen_kwarg is not None:
                    raise InvalidArgument((
                        u'Argument %s comes after keyword %s which has been '
                        u'specified, but does not itself have a '
                        u'specification') % (
                        a, seen_kwarg
                    ))

        argspec = inspect.ArgSpec(
            args=arguments,
            keywords=original_argspec.keywords,
            varargs=original_argspec.varargs,
            defaults=tuple(map(HypothesisProvided, specifiers))
        )

        unused_kwargs = {}
        for k in extra_kwargs:
            unused_kwargs[k] = HypothesisProvided(generator_kwargs[k])

        @impersonate(test)
        @copy_argspec(
            test.__name__, argspec
        )
        def wrapped_test(*arguments, **kwargs):
            import hypothesis.strategies as sd
            from hypothesis.internal.strategymethod import strategy

            selfy = None
            arguments, kwargs = convert_positional_arguments(
                wrapped_test, arguments, kwargs)
            # Anything in unused_kwargs hasn't been injected through
            # argspec.defaults, so we need to add them.
            for k in unused_kwargs:
                if k not in kwargs:
                    kwargs[k] = unused_kwargs[k]
            # If the test function is a method of some kind, the bound object
            # will be the first named argument if there are any, otherwise the
            # first vararg (if any).
            if argspec.args:
                selfy = kwargs.get(argspec.args[0])
            elif arguments:
                selfy = arguments[0]
            if isinstance(selfy, HypothesisProvided):
                selfy = None
            test_runner = executor(selfy)

            for example in getattr(
                wrapped_test, u'hypothesis_explicit_examples', ()
            ):
                if example.args:
                    example_kwargs = dict(zip(
                        argspec.args[-len(example.args):], example.args
                    ))
                else:
                    example_kwargs = dict(example.kwargs)

                for k, v in kwargs.items():
                    if not isinstance(v, HypothesisProvided):
                        example_kwargs[k] = v
                # Note: Test may mutate arguments and we can't rerun explicit
                # examples, so we have to calculate the failure message at this
                # point rather than than later.
                message_on_failure = u'Falsifying example: %s(%s)' % (
                    test.__name__, arg_string(test, arguments, example_kwargs)
                )
                try:
                    test_runner(
                        lambda: test(*arguments, **example_kwargs)
                    )
                except BaseException:
                    report(message_on_failure)
                    raise

            if not any(
                isinstance(x, HypothesisProvided)
                for xs in (arguments, kwargs.values())
                for x in xs
            ):
                # All arguments have been satisfied without needing to invoke
                # hypothesis
                test_runner(lambda: test(*arguments, **kwargs))
                return

            def convert_to_specifier(v):
                if isinstance(v, HypothesisProvided):
                    return strategy(v.value, settings)
                else:
                    return sd.just(v)

            given_specifier = sd.tuples(
                sd.tuples(*map(convert_to_specifier, arguments)),
                sd.fixed_dictionaries(dict(
                    (k, convert_to_specifier(v)) for (k, v) in kwargs.items()))
            )

            search_strategy = strategy(given_specifier, settings)

            if settings.database:
                storage = settings.database.storage(
                    fully_qualified_name(test))
            else:
                storage = None

            last_exception = [None]
            repr_for_last_exception = [None]

            def is_template_example(xs):
                record_repr = [None]
                try:
                    test_runner(reify_and_execute(
                        search_strategy, xs, test,
                        always_print=settings.max_shrinks <= 0,
                        record_repr=record_repr,
                    ))
                    return False
                except UnsatisfiedAssumption as e:
                    raise e
                except Exception as e:
                    if settings.max_shrinks <= 0:
                        raise e
                    last_exception[0] = traceback.format_exc()
                    repr_for_last_exception[0] = record_repr[0]
                    verbose_report(last_exception[0])
                    return True

            is_template_example.__name__ = test.__name__
            is_template_example.__qualname__ = qualname(test)

            falsifying_template = None
            try:
                falsifying_template = best_satisfying_template(
                    search_strategy, random, is_template_example,
                    settings, storage
                )
            except NoSuchExample:
                return

            assert last_exception[0] is not None

            with settings:
                test_runner(reify_and_execute(
                    search_strategy, falsifying_template, test,
                    print_example=True, is_final=True
                ))

                report(
                    u'Failed to reproduce exception. Expected: \n' +
                    last_exception[0],
                )

                test_runner(reify_and_execute(
                    search_strategy, falsifying_template,
                    test_is_flaky(test, repr_for_last_exception[0]),
                    print_example=True, is_final=True
                ))
        for attr in dir(test):
            if attr[0] != '_' and not hasattr(wrapped_test, attr):
                setattr(wrapped_test, attr, getattr(test, attr))
        wrapped_test.is_hypothesis_test = True
        return wrapped_test
Пример #43
0
def test_digests_are_reasonably_unique():
    assert (
        function_digest(test_simple_conversion) !=
        function_digest(test_does_not_error_on_dynamically_defined_functions)
    )
Пример #44
0
def find(specifier, condition, settings=None, random=None, database_key=None):
    """Returns the minimal example from the given strategy ``specifier`` that
    matches the predicate function ``condition``."""
    settings = settings or Settings(
        max_examples=2000,
        min_satisfying_examples=0,
        max_shrinks=2000,
    )
    settings = Settings(settings, perform_health_check=False)

    if database_key is None and settings.database is not None:
        database_key = function_digest(condition)

    if not isinstance(specifier, SearchStrategy):
        raise InvalidArgument(
            'Expected SearchStrategy but got %r of type %s' % (
                specifier, type(specifier).__name__
            ))
    specifier.validate()

    search = specifier

    random = random or new_random()
    successful_examples = [0]
    last_data = [None]

    def template_condition(data):
        with BuildContext(data):
            try:
                data.is_find = True
                result = data.draw(search)
                data.note(result)
                success = condition(result)
            except UnsatisfiedAssumption:
                data.mark_invalid()

        if success:
            successful_examples[0] += 1

        if settings.verbosity == Verbosity.verbose:
            if not successful_examples[0]:
                report(lambda: u'Trying example %s' % (
                    nicerepr(result),
                ))
            elif success:
                if successful_examples[0] == 1:
                    report(lambda: u'Found satisfying example %s' % (
                        nicerepr(result),
                    ))
                    last_data[0] = data
                elif (
                    sort_key(hbytes(data.buffer)) <
                    sort_key(last_data[0].buffer)
                ):
                    report(lambda: u'Shrunk example to %s' % (
                        nicerepr(result),
                    ))
                    last_data[0] = data
        if success and not data.frozen:
            data.mark_interesting()
    start = time.time()
    runner = ConjectureRunner(
        template_condition, settings=settings, random=random,
        database_key=database_key,
    )
    runner.run()
    note_engine_for_statistics(runner)
    run_time = time.time() - start
    if runner.interesting_examples:
        data = ConjectureData.for_buffer(
            list(runner.interesting_examples.values())[0].buffer)
        with BuildContext(data):
            return data.draw(search)
    if (
        runner.valid_examples <= settings.min_satisfying_examples and
        runner.exit_reason != ExitReason.finished
    ):
        if settings.timeout > 0 and run_time > settings.timeout:
            raise Timeout((
                'Ran out of time before finding enough valid examples for '
                '%s. Only %d valid examples found in %.2f seconds.'
            ) % (
                get_pretty_function_description(condition),
                runner.valid_examples, run_time))

        else:
            raise Unsatisfiable((
                'Unable to satisfy assumptions of '
                '%s. Only %d examples considered satisfied assumptions'
            ) % (
                get_pretty_function_description(condition),
                runner.valid_examples,))

    raise NoSuchExample(get_pretty_function_description(condition))
Пример #45
0
def test_can_digest_a_built_in_function():
    import math
    assert function_digest(math.isnan) != function_digest(range)
Пример #46
0
    def falsify(
            self, hypothesis,
            *argument_types,
            **kwargs
    ):  # pylint: disable=too-many-locals,too-many-branches
        """
        Attempt to construct an example tuple x matching argument_types such
        that hypothesis(*x) returns a falsey value
        """
        teardown_example = kwargs.get('teardown_example') or (lambda x: None)
        setup_example = kwargs.get('setup_example') or (lambda: None)
        random = self.random
        if random is None:
            random = Random(
                function_digest(hypothesis)
            )

        build_context = BuildContext(random)

        search_strategy = strategy(argument_types, self.settings)
        storage = None
        if self.database is not None:
            storage = self.database.storage_for(argument_types)

        def falsifies(args):  # pylint: disable=missing-docstring
            example = None
            try:
                try:
                    setup_example()
                    example = search_strategy.reify(args)
                    return not hypothesis(*example)
                except UnsatisfiedAssumption:
                    return False
            finally:
                teardown_example(example)

        track_seen = Tracker()
        falsifying_examples = []
        if storage:
            for example in storage.fetch():
                track_seen.track(example)
                if falsifies(example):
                    falsifying_examples = [example]
                break

        satisfying_examples = 0
        timed_out = False
        max_examples = self.max_examples
        min_satisfying_examples = self.min_satisfying_examples

        parameter_source = ParameterSource(
            context=build_context, strategy=search_strategy,
            min_parameters=max(2, int(float(max_examples) / 10))
        )
        start_time = time.time()

        def time_to_call_it_a_day():
            """Have we exceeded our timeout?"""
            if self.timeout <= 0:
                return False
            return time.time() >= start_time + self.timeout

        for parameter in islice(
            parameter_source, max_examples - len(track_seen)
        ):
            if len(track_seen) >= search_strategy.size_upper_bound:
                break

            if falsifying_examples:
                break
            if time_to_call_it_a_day():
                break

            args = search_strategy.produce_template(
                build_context, parameter
            )

            if track_seen.track(args) > 1:
                parameter_source.mark_bad()
                continue
            try:
                setup_example()
                a = None
                try:
                    a = search_strategy.reify(args)
                    is_falsifying_example = not hypothesis(*a)
                finally:
                    teardown_example(a)
            except UnsatisfiedAssumption:
                parameter_source.mark_bad()
                continue
            satisfying_examples += 1
            if is_falsifying_example:
                falsifying_examples.append(args)
        run_time = time.time() - start_time
        timed_out = self.timeout >= 0 and run_time >= self.timeout
        if not falsifying_examples:
            if (
                satisfying_examples and
                len(track_seen) >= search_strategy.size_lower_bound
            ):
                raise Exhausted(
                    hypothesis, satisfying_examples)
            elif satisfying_examples < min_satisfying_examples:
                if timed_out:
                    raise Timeout(hypothesis, satisfying_examples, run_time)
                else:
                    raise Unsatisfiable(
                        hypothesis, satisfying_examples, run_time)
            else:
                raise Unfalsifiable(hypothesis)

        for example in falsifying_examples:
            if not falsifies(example):
                raise Flaky(hypothesis, example)

        best_example = falsifying_examples[0]

        for simpler in search_strategy.simplify_such_that(
            random,
            best_example, falsifies,
            tracker=track_seen,
        ):
            best_example = simpler
            if time_to_call_it_a_day():
                # We no cover in here because it's a bit sensitive to timing
                # and tends to make tests flaky. There are tests that mean
                # this is definitely covered most of the time.
                break  # pragma: no cover

        if storage is not None:
            storage.save(best_example)

        setup_example()
        return search_strategy.reify(best_example)
Пример #47
0
def test_can_digest_a_function_with_no_name():
    def foo(x, y):
        pass
    function_digest(partial(foo, 1))
Пример #48
0
def test_lambda_digests_are_stable_across_process_runs():
    digest = function_digest(lambda x: 42)
    print(repr(digest))
    assert digest == b'\x96=\xed6\x07\x1d\xb6:\x1a\x8b\x8eA\xd0\x19\xd7\x9c'
Пример #49
0
def run_state_machine_as_test(state_machine_factory, *, settings=None):
    """Run a state machine definition as a test, either silently doing nothing
    or printing a minimal breaking program and raising an exception.

    state_machine_factory is anything which returns an instance of
    RuleBasedStateMachine when called with no arguments - it can be a class or a
    function. settings will be used to control the execution of the test.
    """
    if settings is None:
        try:
            settings = state_machine_factory.TestCase.settings
            check_type(Settings, settings,
                       "state_machine_factory.TestCase.settings")
        except AttributeError:
            settings = Settings(deadline=None,
                                suppress_health_check=HealthCheck.all())
    check_type(Settings, settings, "settings")

    @settings
    @given(st.data())
    def run_state_machine(factory, data):
        cd = data.conjecture_data
        machine = factory()
        check_type(RuleBasedStateMachine, machine, "state_machine_factory()")
        cd.hypothesis_runner = machine

        print_steps = (current_build_context().is_final
                       or current_verbosity() >= Verbosity.debug)
        try:
            if print_steps:
                report("state = %s()" % (machine.__class__.__name__, ))
            machine.check_invariants()
            max_steps = settings.stateful_step_count
            steps_run = 0

            while True:
                # We basically always want to run the maximum number of steps,
                # but need to leave a small probability of terminating early
                # in order to allow for reducing the number of steps once we
                # find a failing test case, so we stop with probability of
                # 2 ** -16 during normal operation but force a stop when we've
                # generated enough steps.
                cd.start_example(STATE_MACHINE_RUN_LABEL)
                if steps_run == 0:
                    cd.draw_bits(16, forced=1)
                elif steps_run >= max_steps:
                    cd.draw_bits(16, forced=0)
                    break
                else:
                    # All we really care about is whether this value is zero
                    # or non-zero, so if it's > 1 we discard it and insert a
                    # replacement value after
                    cd.start_example(SHOULD_CONTINUE_LABEL)
                    should_continue_value = cd.draw_bits(16)
                    if should_continue_value > 1:
                        cd.stop_example(discard=True)
                        cd.draw_bits(16,
                                     forced=int(bool(should_continue_value)))
                    else:
                        cd.stop_example()
                        if should_continue_value == 0:
                            break
                steps_run += 1

                # Choose a rule to run, preferring an initialize rule if there are
                # any which have not been run yet.
                if machine._initialize_rules_to_run:
                    init_rules = [
                        st.tuples(st.just(rule),
                                  st.fixed_dictionaries(rule.arguments))
                        for rule in machine._initialize_rules_to_run
                    ]
                    rule, data = cd.draw(st.one_of(init_rules))
                    machine._initialize_rules_to_run.remove(rule)
                else:
                    rule, data = cd.draw(machine._rules_strategy)

                # Pretty-print the values this rule was called with *before* calling
                # _add_result_to_targets, to avoid printing arguments which are also
                # a return value using the variable name they are assigned to.
                # See https://github.com/HypothesisWorks/hypothesis/issues/2341
                if print_steps:
                    data_to_print = {
                        k: machine._pretty_print(v)
                        for k, v in data.items()
                    }

                # Assign 'result' here in case executing the rule fails below
                result = multiple()
                try:
                    data = dict(data)
                    for k, v in list(data.items()):
                        if isinstance(v, VarReference):
                            data[k] = machine.names_to_values[v.name]
                    result = rule.function(machine, **data)
                    if rule.targets:
                        if isinstance(result, MultipleResults):
                            for single_result in result.values:
                                machine._add_result_to_targets(
                                    rule.targets, single_result)
                        else:
                            machine._add_result_to_targets(
                                rule.targets, result)
                finally:
                    if print_steps:
                        # 'result' is only used if the step has target bundles.
                        # If it does, and the result is a 'MultipleResult',
                        # then 'print_step' prints a multi-variable assignment.
                        machine._print_step(rule, data_to_print, result)
                machine.check_invariants()
                cd.stop_example()
        finally:
            if print_steps:
                report("state.teardown()")
            machine.teardown()

    # Use a machine digest to identify stateful tests in the example database
    run_state_machine.hypothesis.inner_test._hypothesis_internal_add_digest = function_digest(
        state_machine_factory)
    # Copy some attributes so @seed and @reproduce_failure "just work"
    run_state_machine._hypothesis_internal_use_seed = getattr(
        state_machine_factory, "_hypothesis_internal_use_seed", None)
    run_state_machine._hypothesis_internal_use_reproduce_failure = getattr(
        state_machine_factory, "_hypothesis_internal_use_reproduce_failure",
        None)
    run_state_machine._hypothesis_internal_print_given_args = False

    run_state_machine(state_machine_factory)
Пример #50
0
def test_digests_are_reasonably_unique():
    assert (
        function_digest(test_simple_conversion) !=
        function_digest(test_does_not_error_on_dynamically_defined_functions))