示例#1
0
def find(specifier, condition, settings=None, random=None):
    settings = settings or Settings(
        max_examples=2000,
        min_satisfying_examples=0,
    )

    search = strategy(specifier, settings)
    random = random or Random()

    def template_condition(template):
        return assume(condition(search.reify(template)))

    template_condition.__name__ = condition.__name__
    tracker = Tracker()

    try:
        return search.reify(best_satisfying_template(
            search, random, template_condition, settings, None,
            tracker=tracker,
        ))
    except NoSuchExample:
        if search.size_upper_bound <= len(tracker):
            raise DefinitelyNoSuchExample(
                get_pretty_function_description(condition),
                search.size_upper_bound,
            )
        raise NoSuchExample(get_pretty_function_description(condition))
def test_does_not_error_on_confused_sources():
    def ed(f, *args):
        return f

    x = ed(lambda x, y: (x * y).conjugate() == x.conjugate() * y.conjugate(), complex, complex)  # pragma: no cover

    get_pretty_function_description(x)
示例#3
0
def find(specifier, condition, settings=None, random=None, storage=None):
    settings = settings or Settings(
        max_examples=2000,
        min_satisfying_examples=0,
        max_shrinks=2000,
    )

    search = strategy(specifier, settings)

    if storage is None and settings.database is not None:
        storage = settings.database.storage(
            'find(%s)' % (
                binascii.hexlify(function_digest(condition)).decode('ascii'),
            )
        )

    random = random or Random()
    successful_examples = [0]

    def template_condition(template):
        result = search.reify(template)
        success = condition(result)

        if success:
            successful_examples[0] += 1

        if not successful_examples[0]:
            verbose_report(lambda: 'Trying example %s' % (
                repr(result),
            ))
        elif success:
            if successful_examples[0] == 1:
                verbose_report(lambda: 'Found satisfying example %s' % (
                    repr(result),
                ))
            else:
                verbose_report(lambda: 'Shrunk example to %s' % (
                    repr(result),
                ))
        return success

    template_condition.__name__ = condition.__name__
    tracker = Tracker()

    try:
        return search.reify(best_satisfying_template(
            search, random, template_condition, settings,
            tracker=tracker, max_parameter_tries=2,
            storage=storage,
        ))
    except Timeout:
        raise
    except NoSuchExample:
        if search.template_upper_bound <= len(tracker):
            raise DefinitelyNoSuchExample(
                get_pretty_function_description(condition),
                search.template_upper_bound,
            )
        raise NoSuchExample(get_pretty_function_description(condition))
示例#4
0
def find(specifier, condition, settings=None, random=None):
    settings = settings or Settings(
        max_examples=2000,
        min_satisfying_examples=0,
        max_shrinks=2000,
    )

    search = strategy(specifier, settings)
    random = random or Random()
    successful_examples = [0]

    def template_condition(template):
        result = search.reify(template)
        success = condition(result)

        if success:
            successful_examples[0] += 1

        if not successful_examples[0]:
            verbose_report(lambda: 'Trying example %s' % (
                show(result),
            ))
        elif success:
            if successful_examples[0] == 1:
                verbose_report(lambda: 'Found satisfying example %s' % (
                    show(result),
                ))
            else:
                verbose_report(lambda: 'Shrunk example to %s' % (
                    show(result),
                ))
        return success

    template_condition.__name__ = condition.__name__
    tracker = Tracker()

    try:
        return search.reify(best_satisfying_template(
            search, random, template_condition, settings, None,
            tracker=tracker, max_parameter_tries=2,
        ))
    except Timeout:
        raise
    except (NoSuchExample, Unsatisfiable):
        if search.size_upper_bound <= len(tracker):
            raise DefinitelyNoSuchExample(
                get_pretty_function_description(condition),
                search.size_upper_bound,
            )
        raise NoSuchExample(get_pretty_function_description(condition))
示例#5
0
 def __repr__(self):
     if not hasattr(self, '_cached_repr'):
         self._cached_repr = '%r.map(%s)' % (
             self.mapped_strategy, get_pretty_function_description(
                 self.pack)
         )
     return self._cached_repr
示例#6
0
 def __repr__(self):
     if not hasattr(self, "_cached_repr"):
         self._cached_repr = "%r.filter(%s)" % (
             self.filtered_strategy,
             get_pretty_function_description(self.condition),
         )
     return self._cached_repr
示例#7
0
 def __repr__(self):
     if not hasattr(self, '_cached_repr'):
         self._cached_repr = '%r.filter(%s)' % (
             self.filtered_strategy, get_pretty_function_description(
                 self.condition)
         )
     return self._cached_repr
 def __repr__(self):
     if not hasattr(self, "_cached_repr"):
         self._cached_repr = "%r.map(%s)" % (
             self.mapped_strategy,
             get_pretty_function_description(self.pack),
         )
     return self._cached_repr
def test_collapses_whitespace_nicely():
    # fmt: off
    t = (
        lambda x,       y:           1
    )
    # fmt: on
    assert get_pretty_function_description(t) == "lambda x, y: 1"
示例#10
0
 def __repr__(self):
     if not hasattr(self, '_cached_repr'):
         self._cached_repr = 'recursive(%r, %s, max_leaves=%d)' % (
             self.base, get_pretty_function_description(self.extend),
             self.max_leaves
         )
     return self._cached_repr
 def __repr__(self):
     if not hasattr(self, u"_cached_repr"):
         self._cached_repr = u"%r.flatmap(%s)" % (
             self.flatmapped_strategy,
             get_pretty_function_description(self.expand),
         )
     return self._cached_repr
def test_does_not_error_on_unparsable_source():
    t = [
        lambda x: \
        # This will break ast.parse, but the brackets are needed for the real
        # parser to accept this lambda
        x][0]
    assert get_pretty_function_description(t) == 'lambda x: <unknown>'
示例#13
0
def find(specifier, condition, settings=None, random=None, storage=None):
    settings = settings or Settings(max_examples=2000, min_satisfying_examples=0, max_shrinks=2000)

    from hypothesis.internal.strategymethod import strategy

    search = strategy(specifier, settings)

    if storage is None and settings.database is not None:
        storage = settings.database.storage(
            u"find(%s)" % (binascii.hexlify(function_digest(condition)).decode(u"ascii"),)
        )

    random = random or new_random()
    successful_examples = [0]

    def template_condition(template):
        with BuildContext():
            result = search.reify(template)
            success = condition(result)

        if success:
            successful_examples[0] += 1

        if not successful_examples[0]:
            verbose_report(lambda: u"Trying example %s" % (repr(result),))
        elif success:
            if successful_examples[0] == 1:
                verbose_report(lambda: u"Found satisfying example %s" % (repr(result),))
            else:
                verbose_report(lambda: u"Shrunk example to %s" % (repr(result),))
        return success

    template_condition.__name__ = condition.__name__
    tracker = Tracker()

    try:
        template = best_satisfying_template(
            search, random, template_condition, settings, tracker=tracker, max_parameter_tries=2, storage=storage
        )
        with BuildContext(is_final=True, close_on_capture=False):
            return search.reify(template)
    except Timeout:
        raise
    except NoSuchExample:
        if search.template_upper_bound <= len(tracker):
            raise DefinitelyNoSuchExample(get_pretty_function_description(condition), search.template_upper_bound)
        raise NoSuchExample(get_pretty_function_description(condition))
    def run_test():
        if condition is None:
            def _condition(x):
                return True
            condition_string = u''
        else:
            _condition = condition
            condition_string = strip_lambda(
                reflection.get_pretty_function_description(condition))

        def test_function(data):
            try:
                value = data.draw(specifier)
            except UnsatisfiedAssumption:
                data.mark_invalid()
            if not _condition(value):
                data.mark_invalid()
            if predicate(value):
                data.mark_interesting()

        successes = 0
        for _ in range(RUNS):
            runner = ConConjectureRunner(
                test_function,
                settings=Settings(
                    max_examples=100,
                    max_iterations=1000,
                    max_shrinks=0
                ))
            runner.run()
            if runner.last_data.status == Status.INTERESTING:
                successes += 1
                if successes >= REQUIRED_RUNS:
                    return
        event = reflection.get_pretty_function_description(predicate)
        if condition is not None:
            event += '|'
            event += condition_string

        description = (
            u'P(%s) ~ %d / %d = %.2f < %.2f'
        ) % (
            event,
            successes, RUNS,
            successes / RUNS, (REQUIRED_RUNS / RUNS)
        )
        raise HypothesisFalsified(description + u' rejected')
def test_lambda_source_break_after_def_with_brackets():
    # fmt: off
    f = (lambda n:
         'aaa')
    # fmt: on

    source = get_pretty_function_description(f)
    assert source == "lambda n: 'aaa'"
def test_lambda_source_break_after_def_with_line_continuation():
    # fmt: off
    f = lambda n:\
        'aaa'
    # fmt: on

    source = get_pretty_function_description(f)
    assert source == "lambda n: 'aaa'"
示例#17
0
 def __init__(self, hypothesis, examples, run_time):
     super(Unsatisfiable, self).__init__((
         'Unable to satisfy assumptions of hypothesis %s. ' +
         'Only %s examples found after %g seconds'
     ) % (
         get_pretty_function_description(hypothesis),
         str(examples),
         run_time))
示例#18
0
    def __call__(self, test):
        """Make the settings object (self) an attribute of the test.

        The settings are later discovered by looking them up on the test
        itself.

        Also, we want to issue a deprecation warning for settings used alone
        (without @given) so, note the deprecation in the new test, but also
        attach the version without the warning as an attribute, so that @given
        can unwrap it (since if @given is used, that means we don't want the
        deprecation warning).

        When it's time to turn the warning into an error, we'll raise an
        exception instead of calling note_deprecation (and can delete
        "test(*args, **kwargs)").
        """
        if not callable(test):
            raise InvalidArgument(
                'settings objects can be called as a decorator with @given, '
                'but test=%r' % (test,)
            )
        if hasattr(test, '_hypothesis_internal_settings_applied'):
            note_deprecation(
                '%s has already been decorated with a settings object, which '
                'will be overridden.  This will be an error in a future '
                'version of Hypothesis.\n    Previous:  %r\n    This:  %r' % (
                    get_pretty_function_description(test),
                    test._hypothesis_internal_use_settings,
                    self
                )
            )

        test._hypothesis_internal_use_settings = self

        # For double-@settings check:
        test._hypothesis_internal_settings_applied = True

        @proxies(test)
        def new_test(*args, **kwargs):
            note_deprecation(
                'Using `@settings` without `@given` does not make sense and '
                'will be an error in a future version of Hypothesis.'
            )
            test(*args, **kwargs)

        # @given will get the test from this attribution (rather than use the
        # version with the deprecation warning)
        new_test._hypothesis_internal_test_function_without_warning = test

        # This means @given has been applied, so we don't need to worry about
        # warning for @settings alone.
        has_given_applied = getattr(test, 'is_hypothesis_test', False)
        test_to_use = test if has_given_applied else new_test
        test_to_use._hypothesis_internal_use_settings = self
        # Can't use _hypothesis_internal_use_settings as an indicator that
        # @settings was applied, because @given also assigns that attribute.
        test._hypothesis_internal_settings_applied = True
        return test_to_use
 def __repr__(self):
     if not hasattr(self, "_cached_repr"):
         self._cached_repr = "%r%s" % (
             self.filtered_strategy,
             "".join(
                 ".filter(%s)" % get_pretty_function_description(cond)
                 for cond in self.flat_conditions
             ),
         )
     return self._cached_repr
    def run_test():
        if condition is None:
            def _condition(x):
                return True
            condition_string = u''
        else:
            _condition = condition
            condition_string = strip_lambda(
                reflection.get_pretty_function_description(condition))

        count = [0]
        successful_runs = [0]

        def test_function(data):
            try:
                value = data.draw(specifier)
            except UnsatisfiedAssumption:
                data.mark_invalid()
            if not _condition(value):
                data.mark_invalid()
            successful_runs[0] += 1
            if predicate(value):
                count[0] += 1
        ConTestRunner(
            test_function,
            settings=Settings(
                max_examples=MAX_RUNS,
                max_iterations=MAX_RUNS * 10,
            )).run()
        successful_runs = successful_runs[0]
        count = count[0]
        if successful_runs < MIN_RUNS:
            raise ConditionTooHard((
                u'Unable to find enough examples satisfying predicate %s '
                u'only found %d but required at least %d for validity'
            ) % (
                condition_string, successful_runs, MIN_RUNS
            ))

        result = Result(
            count,
            successful_runs,
            q,
            predicate,
            condition_string,
            specifier,
        )

        p = cumulative_binomial_probability(successful_runs, q, count)
        run_test.test_result = result
        # The test passes if we fail to reject the null hypothesis that
        # the probability is at least q
        if p < REQUIRED_P:
            result.failed = True
            raise HypothesisFalsified(result.description() + u' rejected')
 def __repr__(self):
     if self.__wrapped_strategy is not None:
         if self.__in_repr:
             return "(deferred@%r)" % (id(self),)
         try:
             self.__in_repr = True
             return repr(self.__wrapped_strategy)
         finally:
             self.__in_repr = False
     else:
         return "deferred(%s)" % (get_pretty_function_description(self.__definition))
 def description(self):
     condition_string = (
         u' | ' + self.condition_string if self.condition_string else u'')
     return (
         u'P(%s%s) >= %g: p = %g. Occurred in %d / %d = %g of runs. '
     ) % (
         strip_lambda(
             reflection.get_pretty_function_description(self.predicate)),
         condition_string,
         self.desired_probability,
         self.p,
         self.success_count, self.total_runs,
         float(self.success_count) / self.total_runs
     )
    def run_test():
        if condition is None:
            _condition = lambda x: True
            condition_string = u''
        else:
            _condition = condition
            condition_string = strip_lambda(
                reflection.get_pretty_function_description(condition))

        count = 0
        successful_runs = 0
        s = specifier
        for _ in hrange(MAX_RUNS):
            pv = s.draw_parameter(random)
            try:
                x = s.reify(s.draw_template(random, pv))
            except UnsatisfiedAssumption:
                continue
            if not _condition(x):
                continue
            successful_runs += 1
            if predicate(x):
                count += 1
        if successful_runs < MIN_RUNS:
            raise ConditionTooHard((
                u'Unable to find enough examples satisfying predicate %s '
                u'only found %d but required at least %d for validity'
            ) % (
                condition_string, successful_runs, MIN_RUNS
            ))

        result = Result(
            count,
            successful_runs,
            q,
            predicate,
            condition_string,
            specifier,
        )

        p = cumulative_binomial_probability(successful_runs, q, count)
        run_test.test_result = result
        # The test passes if we fail to reject the null hypothesis that
        # the probability is at least q
        if p < REQUIRED_P:
            result.failed = True
            raise HypothesisFalsified(result.description() + u' rejected')
示例#24
0
def test_collapses_whitespace_nicely():
    t = (
        lambda x,       y:           1  # pragma: no cover
    )
    assert get_pretty_function_description(t) == 'lambda x, y: 1'
示例#25
0
def test_is_not_confused_by_tuples():
    p = (lambda x: x > 1, 2)[0]  # pragma: no cover

    assert get_pretty_function_description(p) == 'lambda x: x > 1'
示例#26
0
def test_does_not_strip_hashes_within_a_string():
    t = lambda x: '#'  # pragma: no cover
    assert get_pretty_function_description(t) == "lambda x: '#'"
示例#27
0
def test_does_not_error_if_it_cannot_distinguish_between_two_lambdas():
    a, b = (lambda x: 1, lambda x: 2)  # pragma: no cover
    assert 'lambda x:' in get_pretty_function_description(a)
    assert 'lambda x:' in get_pretty_function_description(b)
示例#28
0
 def __init__(self, hypothesis, extra=''):
     super(Unfalsifiable, self).__init__(
         'Unable to falsify hypothesis %s%s' % (
             get_pretty_function_description(hypothesis), extra)
     )
示例#29
0
def test_does_not_error_if_it_cannot_distinguish_between_two_lambdas():
    a, b = (lambda x: 1, lambda x: 2)  # pragma: no cover
    assert 'lambda x:' in get_pretty_function_description(a)
    assert 'lambda x:' in get_pretty_function_description(b)
示例#30
0
def test_can_handle_keyword_argument_lambdas():
    assert get_pretty_function_description(lambda **x: 1) == 'lambda **x: 1'
示例#31
0
def test_can_handle_variadic_argument_lambdas():
    assert get_pretty_function_description(lambda *x: 1) == 'lambda *x: 1'
示例#32
0
    def run(self):
        # Tell pytest to omit the body of this function from tracebacks
        __tracebackhide__ = True
        database_key = str_to_bytes(fully_qualified_name(self.test))
        start_time = time.time()
        runner = ConjectureRunner(
            self.evaluate_test_data,
            settings=self.settings,
            random=self.random,
            database_key=database_key,
        )
        runner.run()
        note_engine_for_statistics(runner)
        run_time = time.time() - start_time
        timed_out = (self.settings.timeout > 0
                     and run_time >= self.settings.timeout)
        if runner.last_data is None:
            return
        if runner.last_data.status == Status.INTERESTING:
            self.falsifying_example = runner.last_data.buffer
            if self.settings.database is not None:
                self.settings.database.save(database_key,
                                            self.falsifying_example)
        else:
            if runner.valid_examples < min(
                    self.settings.min_satisfying_examples,
                    self.settings.max_examples,
            ) and not (runner.exit_reason == ExitReason.finished
                       and self.at_least_one_success):
                if timed_out:
                    raise Timeout(
                        ('Ran out of time before finding a satisfying '
                         'example for '
                         '%s. Only found %d examples in ' + '%.2fs.') %
                        (get_pretty_function_description(
                            self.test), runner.valid_examples, run_time))
                else:
                    raise Unsatisfiable(
                        ('Unable to satisfy assumptions of hypothesis '
                         '%s. Only %d examples considered '
                         'satisfied assumptions') % (
                             get_pretty_function_description(self.test),
                             runner.valid_examples,
                         ))

        if self.falsifying_example is None:
            return

        assert self.last_exception is not None

        try:
            with self.settings:
                self.test_runner(
                    ConjectureData.for_buffer(self.falsifying_example),
                    reify_and_execute(self.search_strategy,
                                      self.test,
                                      print_example=True,
                                      is_final=True))
        except (UnsatisfiedAssumption, StopTest):
            report(traceback.format_exc())
            raise Flaky('Unreliable assumption: An example which satisfied '
                        'assumptions on the first run now fails it.')

        report(
            'Failed to reproduce exception. Expected: \n' +
            self.last_exception, )

        filter_message = (
            'Unreliable test data: Failed to reproduce a failure '
            'and then when it came to recreating the example in '
            'order to print the test data with a flaky result '
            'the example was filtered out (by e.g. a '
            'call to filter in your strategy) when we didn\'t '
            'expect it to be.')

        try:
            self.test_runner(
                ConjectureData.for_buffer(self.falsifying_example),
                reify_and_execute(self.search_strategy,
                                  test_is_flaky(self.test,
                                                self.repr_for_last_exception),
                                  print_example=True,
                                  is_final=True))
        except (UnsatisfiedAssumption, StopTest):
            raise Flaky(filter_message)
示例#33
0
def find(
        specifier,  # type: SearchStrategy
        condition,  # type: Callable[[Any], bool]
        settings=None,  # type: Settings
        random=None,  # type: Any
        database_key=None,  # type: bytes
):
    # type: (...) -> Any
    """Returns the minimal example from the given strategy ``specifier`` that
    matches the predicate function ``condition``."""
    if settings is None:
        settings = Settings(max_examples=2000)
    settings = Settings(settings, suppress_health_check=HealthCheck.all())

    if database_key is None and settings.database is not None:
        database_key = function_digest(condition)

    if not isinstance(specifier, SearchStrategy):
        raise InvalidArgument('Expected SearchStrategy but got %r of type %s' %
                              (specifier, type(specifier).__name__))
    specifier.validate()

    search = specifier

    random = random or new_random()
    successful_examples = [0]
    last_data = [None]
    last_repr = [None]

    def template_condition(data):
        with BuildContext(data):
            try:
                data.is_find = True
                with deterministic_PRNG():
                    result = data.draw(search)
                    data.note(result)
                    success = condition(result)
            except UnsatisfiedAssumption:
                data.mark_invalid()

        if success:
            successful_examples[0] += 1

        if settings.verbosity >= Verbosity.verbose:
            if not successful_examples[0]:
                report(u'Tried non-satisfying example %s' %
                       (nicerepr(result), ))
            elif success:
                if successful_examples[0] == 1:
                    last_repr[0] = nicerepr(result)
                    report(u'Found satisfying example %s' % (last_repr[0], ))
                    last_data[0] = data
                elif (sort_key(hbytes(data.buffer)) < sort_key(
                        last_data[0].buffer)
                      ) and nicerepr(result) != last_repr[0]:
                    last_repr[0] = nicerepr(result)
                    report(u'Shrunk example to %s' % (last_repr[0], ))
                    last_data[0] = data
        if success and not data.frozen:
            data.mark_interesting()

    start = benchmark_time()
    runner = ConjectureRunner(
        template_condition,
        settings=settings,
        random=random,
        database_key=database_key,
    )
    runner.run()
    note_engine_for_statistics(runner)
    run_time = benchmark_time() - start
    if runner.interesting_examples:
        data = ConjectureData.for_buffer(
            list(runner.interesting_examples.values())[0].buffer)
        with BuildContext(data):
            with deterministic_PRNG():
                return data.draw(search)
    if runner.valid_examples == 0 and (runner.exit_reason !=
                                       ExitReason.finished):
        if settings.timeout > 0 and run_time > settings.timeout:
            raise Timeout((  # pragma: no cover
                'Ran out of time before finding enough valid examples for '
                '%s. Only %d valid examples found in %.2f seconds.') %
                          (get_pretty_function_description(condition),
                           runner.valid_examples, run_time))

        else:
            raise Unsatisfiable('Unable to satisfy assumptions of %s.' %
                                (get_pretty_function_description(condition), ))

    raise NoSuchExample(get_pretty_function_description(condition))
示例#34
0
    def run(self):
        # Tell pytest to omit the body of this function from tracebacks
        __tracebackhide__ = True
        if global_force_seed is None:
            database_key = str_to_bytes(fully_qualified_name(self.test))
        else:
            database_key = None
        self.start_time = benchmark_time()
        runner = ConjectureRunner(
            self.evaluate_test_data,
            settings=self.settings,
            random=self.random,
            database_key=database_key,
        )
        try:
            runner.run()
        finally:
            self.used_examples_from_database = \
                runner.used_examples_from_database
        note_engine_for_statistics(runner)
        run_time = benchmark_time() - self.start_time

        self.used_examples_from_database = runner.used_examples_from_database

        if runner.used_examples_from_database:
            if self.settings.derandomize:
                note_deprecation((
                    'In future derandomize will imply database=None, but your '
                    'test: %s is currently using examples from the database. '
                    'To get the future behaviour, update your settings to '
                    'include database=None.') % (self.test.__name__, ))
            if self.__had_seed:
                note_deprecation(
                    ('In future use of @seed will imply database=None in your '
                     'settings, but your test: %s is currently using examples '
                     'from the database. To get the future behaviour, update '
                     'your settings for this test to include database=None.') %
                    (self.test.__name__, ))

        timed_out = runner.exit_reason == ExitReason.timeout
        if runner.call_count == 0:
            return
        if runner.interesting_examples:
            self.falsifying_examples = sorted(
                [d for d in runner.interesting_examples.values()],
                key=lambda d: sort_key(d.buffer),
                reverse=True)
        else:
            if runner.valid_examples == 0:
                if timed_out:
                    raise Timeout(
                        ('Ran out of time before finding a satisfying '
                         'example for %s. Only found %d examples in %.2fs.') %
                        (get_pretty_function_description(
                            self.test), runner.valid_examples, run_time))
                else:
                    raise Unsatisfiable(
                        'Unable to satisfy assumptions of hypothesis %s.' %
                        (get_pretty_function_description(self.test), ))

        if not self.falsifying_examples:
            return

        self.failed_normally = True

        flaky = 0

        for falsifying_example in self.falsifying_examples:
            ran_example = ConjectureData.for_buffer(falsifying_example.buffer)
            self.__was_flaky = False
            assert falsifying_example.__expected_exception is not None
            try:
                self.execute(ran_example,
                             print_example=True,
                             is_final=True,
                             expected_failure=(
                                 falsifying_example.__expected_exception,
                                 falsifying_example.__expected_traceback,
                             ))
            except (UnsatisfiedAssumption, StopTest):
                report(traceback.format_exc())
                self.__flaky(
                    'Unreliable assumption: An example which satisfied '
                    'assumptions on the first run now fails it.')
            except BaseException:
                if len(self.falsifying_examples) <= 1:
                    raise
                report(traceback.format_exc())
            finally:  # pragma: no cover
                # This section is in fact entirely covered by the tests in
                # test_reproduce_failure, but it seems to trigger a lovely set
                # of coverage bugs: The branches show up as uncovered (despite
                # definitely being covered - you can add an assert False else
                # branch to verify this and see it fail - and additionally the
                # second branch still complains about lack of coverage even if
                # you add a pragma: no cover to it!
                # See https://bitbucket.org/ned/coveragepy/issues/623/
                if self.settings.print_blob is not PrintSettings.NEVER:
                    failure_blob = encode_failure(falsifying_example.buffer)
                    # Have to use the example we actually ran, not the original
                    # falsifying example! Otherwise we won't catch problems
                    # where the repr of the generated example doesn't parse.
                    can_use_repr = ran_example.can_reproduce_example_from_repr
                    if (self.settings.print_blob is PrintSettings.ALWAYS or
                        (self.settings.print_blob is PrintSettings.INFER
                         and self.settings.verbosity >= Verbosity.normal
                         and not can_use_repr and len(failure_blob) < 200)):
                        report((
                            '\n'
                            'You can reproduce this example by temporarily '
                            'adding @reproduce_failure(%r, %r) as a decorator '
                            'on your test case') % (
                                __version__,
                                failure_blob,
                            ))
            if self.__was_flaky:
                flaky += 1

        # If we only have one example then we should have raised an error or
        # flaky prior to this point.
        assert len(self.falsifying_examples) > 1

        if flaky > 0:
            raise Flaky(
                ('Hypothesis found %d distinct failures, but %d of them '
                 'exhibited some sort of flaky behaviour.') %
                (len(self.falsifying_examples), flaky))
        else:
            raise MultipleFailures(('Hypothesis found %d distinct failures.') %
                                   (len(self.falsifying_examples, )))
示例#35
0
def find(specifier, condition, settings=None, random=None, database_key=None):
    """Returns the minimal example from the given strategy ``specifier`` that
    matches the predicate function ``condition``."""
    settings = settings or Settings(
        max_examples=2000,
        min_satisfying_examples=0,
        max_shrinks=2000,
    )

    if database_key is None and settings.database is not None:
        database_key = function_digest(condition)

    if not isinstance(specifier, SearchStrategy):
        raise InvalidArgument('Expected SearchStrategy but got %r of type %s' %
                              (specifier, type(specifier).__name__))
    specifier.validate()

    search = specifier

    random = random or new_random()
    successful_examples = [0]
    last_data = [None]

    def template_condition(data):
        with BuildContext(data):
            try:
                data.is_find = True
                result = data.draw(search)
                data.note(result)
                success = condition(result)
            except UnsatisfiedAssumption:
                data.mark_invalid()

        if success:
            successful_examples[0] += 1

        if settings.verbosity == Verbosity.verbose:
            if not successful_examples[0]:
                report(lambda: u'Trying example %s' % (nicerepr(result), ))
            elif success:
                if successful_examples[0] == 1:
                    report(lambda: u'Found satisfying example %s' %
                           (nicerepr(result), ))
                else:
                    report(lambda: u'Shrunk example to %s' %
                           (nicerepr(result), ))
                last_data[0] = data
        if success and not data.frozen:
            data.mark_interesting()

    start = time.time()
    runner = ConjectureRunner(
        template_condition,
        settings=settings,
        random=random,
        database_key=database_key,
    )
    runner.run()
    note_engine_for_statistics(runner)
    run_time = time.time() - start
    if runner.last_data.status == Status.INTERESTING:
        data = ConjectureData.for_buffer(runner.last_data.buffer)
        with BuildContext(data):
            return data.draw(search)
    if (runner.valid_examples <= settings.min_satisfying_examples
            and runner.exit_reason != ExitReason.finished):
        if settings.timeout > 0 and run_time > settings.timeout:
            raise Timeout(
                ('Ran out of time before finding enough valid examples for '
                 '%s. Only %d valid examples found in %.2f seconds.') %
                (get_pretty_function_description(condition),
                 runner.valid_examples, run_time))

        else:
            raise Unsatisfiable(
                ('Unable to satisfy assumptions of '
                 '%s. Only %d examples considered satisfied assumptions') % (
                     get_pretty_function_description(condition),
                     runner.valid_examples,
                 ))

    raise NoSuchExample(get_pretty_function_description(condition))
示例#36
0
def test_is_not_confused_by_tuples():
    p = (lambda x: x > 1, 2)[0]  # pragma: no cover

    assert get_pretty_function_description(p) == 'lambda x: x > 1'
示例#37
0
def test_does_not_strip_hashes_within_a_string():
    t = lambda x: '#'  # pragma: no cover
    assert get_pretty_function_description(t) == "lambda x: '#'"
示例#38
0
    def __call__(self, test):
        """Make the settings object (self) an attribute of the test.

        The settings are later discovered by looking them up on the test
        itself.

        Also, we want to issue a deprecation warning for settings used alone
        (without @given) so, note the deprecation in the new test, but also
        attach the version without the warning as an attribute, so that @given
        can unwrap it (since if @given is used, that means we don't want the
        deprecation warning).

        When it's time to turn the warning into an error, we'll raise an
        exception instead of calling note_deprecation (and can delete
        "test(*args, **kwargs)").
        """
        if not callable(test):
            raise InvalidArgument(
                "settings objects can be called as a decorator with @given, "
                "but test=%r" % (test,)
            )
        if inspect.isclass(test):
            from hypothesis.stateful import GenericStateMachine

            if issubclass(test, GenericStateMachine):
                attr_name = "_hypothesis_internal_settings_applied"
                if getattr(test, attr_name, False):
                    raise InvalidArgument(
                        "Applying the @settings decorator twice would "
                        "overwrite the first version; merge their arguments "
                        "instead."
                    )
                setattr(test, attr_name, True)
                test.TestCase.settings = self
                return test
            else:
                raise InvalidArgument(
                    "@settings(...) can only be used as a decorator on "
                    "functions, or on subclasses of GenericStateMachine."
                )
        if hasattr(test, "_hypothesis_internal_settings_applied"):
            raise InvalidArgument(
                "%s has already been decorated with a settings object."
                "\n    Previous:  %r\n    This:  %r"
                % (
                    get_pretty_function_description(test),
                    test._hypothesis_internal_use_settings,
                    self,
                )
            )

        test._hypothesis_internal_use_settings = self

        # For double-@settings check:
        test._hypothesis_internal_settings_applied = True

        @proxies(test)
        def new_test(*args, **kwargs):
            raise InvalidArgument(
                "Using `@settings` on a test without `@given` is completely pointless."
            )

        # @given will get the test from this attribution (rather than use the
        # version with the deprecation warning)
        new_test._hypothesis_internal_test_function_without_warning = test

        # This means @given has been applied, so we don't need to worry about
        # warning for @settings alone.
        has_given_applied = getattr(test, "is_hypothesis_test", False)
        test_to_use = test if has_given_applied else new_test
        test_to_use._hypothesis_internal_use_settings = self
        # Can't use _hypothesis_internal_use_settings as an indicator that
        # @settings was applied, because @given also assigns that attribute.
        test._hypothesis_internal_settings_applied = True
        return test_to_use
示例#39
0
def test_does_not_error_on_dynamically_defined_functions():
    x = eval('lambda t: 1')
    get_pretty_function_description(x)
示例#40
0
def test_handles_if_else_in_lambda():
    assert get_pretty_function_description(lambda x: 1 if x else 2) == \
        u'lambda x: 1 if x else 2'
示例#41
0
def test_lambda_source_break_after_def_with_line_continuation():
    f = lambda n:\
        'aaa'

    source = get_pretty_function_description(f)
    assert source == "lambda n: 'aaa'"
示例#42
0
def test_class_names_are_not_included_in_class_method_prettiness():
    assert get_pretty_function_description(Foo.bar) == "bar"
示例#43
0
def test_lambda_source_break_after_def_with_brackets():
    f = (lambda n:
         'aaa')

    source = get_pretty_function_description(f)
    assert source == "lambda n: 'aaa'"
示例#44
0
def test_repr_is_included_in_bound_method_prettiness():
    assert get_pretty_function_description(Foo().baz) == "SoNotFoo().baz"
示例#45
0
def test_can_distinguish_between_two_lambdas_with_different_args():
    a, b = (lambda x: 1, lambda y: 2)  # pragma: no cover
    assert get_pretty_function_description(a) == 'lambda x: 1'
    assert get_pretty_function_description(b) == 'lambda y: 2'
示例#46
0
def test_class_is_not_included_in_unbound_method():
    assert get_pretty_function_description(Foo.baz) == "baz"
示例#47
0
def test_strips_comments_from_the_end():
    t = lambda x: 1  # pragma: no cover
    assert get_pretty_function_description(t) == 'lambda x: 1'
示例#48
0
def test_can_handle_unicode_identifier_in_same_line_as_lambda_def():
    assert get_pretty_function_description(is_str_pi) == "lambda x: x == pi"
示例#49
0
 def test_or_flaky(*args, **kwargs):
     raise Flaky(
         (
             'Hypothesis %r produces unreliable results: %r falsified it on'
             ' the first call but did not on a subsequent one'
         ) % (get_pretty_function_description(test), example))
示例#50
0
def find_satisfying_template(
    search_strategy, random, condition, tracker, settings, storage=None,
    max_parameter_tries=None,
):
    """Attempt to find a template for search_strategy such that condition is
    truthy.

    Exceptions other than UnsatisfiedAssumption will be immediately propagated.
    UnsatisfiedAssumption will indicate that similar examples should be avoided
    in future.

    Returns such a template as soon as it is found, otherwise stops after
    settings.max_examples examples have been considered or settings.timeout
    seconds have passed (if settings.timeout > 0).

    May raise a variety of exceptions depending on exact circumstances, but
    these will all subclass either Unsatisfiable (to indicate not enough
    examples were found which did not raise UnsatisfiedAssumption to consider
    this a valid test) or NoSuchExample (to indicate that this probably means
    that condition is true with very high probability).

    """
    satisfying_examples = 0
    examples_considered = 0
    timed_out = False
    max_iterations = max(settings.max_iterations, settings.max_examples)
    max_examples = min(max_iterations, settings.max_examples)
    min_satisfying_examples = min(
        settings.min_satisfying_examples,
        max_examples,
    )
    start_time = time.time()

    if storage:
        for example in storage.fetch(search_strategy):
            if examples_considered >= max_iterations:
                break
            examples_considered += 1
            if time_to_call_it_a_day(settings, start_time):
                break
            tracker.track(example)
            try:
                if condition(example):
                    return example
                satisfying_examples += 1
            except UnsatisfiedAssumption:
                pass
            if satisfying_examples >= max_examples:
                break

    parameter_source = ParameterSource(
        random=random, strategy=search_strategy,
        max_tries=max_parameter_tries,
    )

    assert search_strategy.template_upper_bound >= 0
    if isinstance(search_strategy.template_upper_bound, float):
        assert math.isinf(search_strategy.template_upper_bound)
    else:
        assert isinstance(search_strategy.template_upper_bound, int)

    for parameter in parameter_source:  # pragma: no branch
        if len(tracker) >= search_strategy.template_upper_bound:
            break
        if examples_considered >= max_iterations:
            break
        if satisfying_examples >= max_examples:
            break
        if time_to_call_it_a_day(settings, start_time):
            break
        examples_considered += 1

        example = search_strategy.draw_template(
            random, parameter
        )
        if tracker.track(example) > 1:
            debug_report('Skipping duplicate example')
            parameter_source.mark_bad()
            continue
        try:
            if condition(example):
                return example
        except UnsatisfiedAssumption:
            parameter_source.mark_bad()
            continue
        satisfying_examples += 1
    run_time = time.time() - start_time
    timed_out = settings.timeout >= 0 and run_time >= settings.timeout
    if (
        satisfying_examples and
        len(tracker) >= search_strategy.template_upper_bound
    ):
        raise DefinitelyNoSuchExample(
            get_pretty_function_description(condition),
            satisfying_examples,
        )
    elif satisfying_examples < min_satisfying_examples:
        if timed_out:
            raise Timeout((
                'Ran out of time before finding a satisfying example for %s.' +
                ' Only found %d examples (%d satisfying assumptions) in %.2fs.'
            ) % (
                get_pretty_function_description(condition),
                len(tracker), satisfying_examples, run_time
            ))
        else:
            raise Unsatisfiable((
                'Unable to satisfy assumptions of hypothesis %s. ' +
                'Only %d out of %d examples considered satisfied assumptions'
            ) % (
                get_pretty_function_description(condition),
                satisfying_examples, len(tracker)))
    else:
        raise NoSuchExample(get_pretty_function_description(condition))
示例#51
0
def test_collapses_whitespace_nicely():
    t = (
        lambda x,       y:           1  # pragma: no cover
    )
    assert get_pretty_function_description(t) == 'lambda x, y: 1'
示例#52
0
def test_handles_brackets():
    assert get_pretty_function_description(lambda x, y, z: (x + y) * z) == \
        u'lambda x, y, z: (x + y) * z'
示例#53
0
def test_strips_comments_from_the_end():
    t = lambda x: 1  # pragma: no cover
    assert get_pretty_function_description(t) == 'lambda x: 1'
示例#54
0
 def __repr__(self):
     if not hasattr(self, '_cached_repr'):
         self._cached_repr = '%r.map(%s)' % (
             self.mapped_strategy, get_pretty_function_description(
                 self.pack))
     return self._cached_repr
示例#55
0
def test_can_distinguish_between_two_lambdas_with_different_args():
    a, b = (lambda x: 1, lambda y: 2)  # pragma: no cover
    assert get_pretty_function_description(a) == 'lambda x: 1'
    assert get_pretty_function_description(b) == 'lambda y: 2'
示例#56
0
def test_can_see_references_to_enclosing_variables():
    x = 1
    assert get_pretty_function_description(lambda y: x * y) == \
        u'lambda y: x * y'
示例#57
0
def find_satisfying_template(
    search_strategy, random, condition, tracker, settings, storage=None,
    max_parameter_tries=None,
):
    """Attempt to find a template for search_strategy such that condition is
    truthy.

    Exceptions other than UnsatisfiedAssumption will be immediately propagated.
    UnsatisfiedAssumption will indicate that similar examples should be avoided
    in future.

    Returns such a template as soon as it is found, otherwise stops after
    settings.max_examples examples have been considered or settings.timeout
    seconds have passed (if settings.timeout > 0).

    May raise a variety of exceptions depending on exact circumstances, but
    these will all subclass either Unsatisfiable (to indicate not enough
    examples were found which did not raise UnsatisfiedAssumption to consider
    this a valid test) or NoSuchExample (to indicate that this probably means
    that condition is true with very high probability).

    """
    satisfying_examples = 0
    examples_considered = 0
    timed_out = False
    max_iterations = max(settings.max_iterations, settings.max_examples)
    max_examples = min(max_iterations, settings.max_examples)
    min_satisfying_examples = min(
        settings.min_satisfying_examples,
        max_examples,
    )
    start_time = time.time()

    if storage:
        for example in storage.fetch(search_strategy):
            if examples_considered >= max_iterations:
                break
            examples_considered += 1
            if time_to_call_it_a_day(settings, start_time):
                break
            tracker.track(example)
            try:
                if condition(example):
                    return example
                satisfying_examples += 1
            except UnsatisfiedAssumption:
                pass
            if satisfying_examples >= max_examples:
                break

    parameter_source = ParameterSource(
        random=random, strategy=search_strategy,
        max_tries=max_parameter_tries,
    )

    assert search_strategy.template_upper_bound >= 0
    if isinstance(search_strategy.template_upper_bound, float):
        assert math.isinf(search_strategy.template_upper_bound)
    else:
        assert isinstance(search_strategy.template_upper_bound, int)

    for parameter in parameter_source:  # pragma: no branch
        if len(tracker) >= search_strategy.template_upper_bound:
            break
        if examples_considered >= max_iterations:
            break
        if satisfying_examples >= max_examples:
            break
        if time_to_call_it_a_day(settings, start_time):
            break
        examples_considered += 1

        try:
            example = search_strategy.draw_template(
                random, parameter
            )
        except BadTemplateDraw:
            debug_report(u'Failed attempt to draw a template')
            parameter_source.mark_bad()
            continue
        if tracker.track(example) > 1:
            debug_report(u'Skipping duplicate example')
            parameter_source.mark_bad()
            continue
        try:
            if condition(example):
                return example
        except UnsatisfiedAssumption:
            parameter_source.mark_bad()
            continue
        satisfying_examples += 1
    run_time = time.time() - start_time
    timed_out = settings.timeout >= 0 and run_time >= settings.timeout
    if (
        satisfying_examples and
        len(tracker) >= search_strategy.template_upper_bound
    ):
        raise DefinitelyNoSuchExample(
            get_pretty_function_description(condition),
            satisfying_examples,
        )
    elif satisfying_examples < min_satisfying_examples:
        if timed_out:
            raise Timeout((
                u'Ran out of time before finding a satisfying example for '
                u'%s. Only found %d examples (%d satisfying assumptions) in ' +
                u'%.2fs.'
            ) % (
                get_pretty_function_description(condition),
                len(tracker), satisfying_examples, run_time
            ))
        else:
            raise Unsatisfiable((
                u'Unable to satisfy assumptions of hypothesis %s. ' +
                u'Only %d out of %d examples considered satisfied assumptions'
            ) % (
                get_pretty_function_description(condition),
                satisfying_examples, len(tracker)))
    else:
        raise NoSuchExample(get_pretty_function_description(condition))
示例#58
0
def test_can_extract_lambdas_in_decorators():
    assert get_pretty_function_description(foo()) == u'lambda x: x % 4 == 0'
示例#59
0
def test_names_of_functions_are_pretty():
    assert (get_pretty_function_description(test_names_of_functions_are_pretty)
            == "test_names_of_functions_are_pretty")
示例#60
0
 def __init__(self, hypothesis, example):
     super(Flaky, self).__init__((
         'Hypothesis %r produces unreliable results: %r falsified it on the'
         ' first call but did not on a subsequent one'
     ) % (get_pretty_function_description(hypothesis), example))