Ejemplo n.º 1
0
    def evaluate_test_data(self, data):
        try:
            result = self.execute(data)
            if result is not None:
                fail_health_check(
                    self.settings,
                    ('Tests run under @given should return None, but '
                     '%s returned %r instead.') % (self.test.__name__, result),
                    HealthCheck.return_value)
        except UnsatisfiedAssumption:
            data.mark_invalid()
        except (
                HypothesisDeprecationWarning,
                FailedHealthCheck,
                StopTest,
        ) + EXCEPTIONS_TO_RERAISE:
            raise
        except EXCEPTIONS_TO_FAIL as e:
            escalate_hypothesis_internal_error()
            tb = get_trimmed_traceback()
            data.__expected_traceback = ''.join(
                traceback.format_exception(type(e), e, tb))
            data.__expected_exception = e
            verbose_report(data.__expected_traceback)

            origin = traceback.extract_tb(tb)[-1]
            filename = origin[0]
            lineno = origin[1]
            data.mark_interesting((type(e), filename, lineno))
Ejemplo n.º 2
0
 def evaluate_test_data(data):
     try:
         result = test_runner(
             data, reify_and_execute(
                 search_strategy,
                 test,
             ))
         if result is not None and settings.perform_health_check:
             fail_health_check(
                 ('Tests run under @given should return None, but '
                  '%s returned %r instead.') %
                 (test.__name__, result), HealthCheck.return_value)
         return False
     except UnsatisfiedAssumption:
         data.mark_invalid()
     except (
             HypothesisDeprecationWarning,
             FailedHealthCheck,
             StopTest,
     ):
         raise
     except Exception:
         last_exception[0] = traceback.format_exc()
         verbose_report(last_exception[0])
         data.mark_interesting()
Ejemplo n.º 3
0
    def evaluate_test_data(self, data):
        try:
            result = self.execute(data)
            if result is not None:
                fail_health_check(self.settings, (
                    'Tests run under @given should return None, but '
                    '%s returned %r instead.'
                ) % (self.test.__name__, result), HealthCheck.return_value)
            return False
        except UnsatisfiedAssumption:
            data.mark_invalid()
        except (
            HypothesisDeprecationWarning, FailedHealthCheck,
            StopTest,
        ) + EXCEPTIONS_TO_RERAISE:
            raise
        except Exception as e:
            escalate_hypothesis_internal_error()
            data.__expected_traceback = traceback.format_exc()
            data.__expected_exception = e
            verbose_report(data.__expected_traceback)

            error_class, _, tb = sys.exc_info()

            origin = traceback.extract_tb(tb)[-1]
            filename = origin[0]
            lineno = origin[1]
            data.mark_interesting((error_class, filename, lineno))
Ejemplo n.º 4
0
 def evaluate_test_data(self, data):
     try:
         result = self.test_runner(
             data, reify_and_execute(
                 self.search_strategy,
                 self.test,
             ))
         if result is not None and self.settings.perform_health_check:
             fail_health_check(
                 self.settings,
                 ('Tests run under @given should return None, but '
                  '%s returned %r instead.') % (self.test.__name__, result),
                 HealthCheck.return_value)
         self.at_least_one_success = True
         return False
     except UnsatisfiedAssumption:
         data.mark_invalid()
     except (
             HypothesisDeprecationWarning,
             FailedHealthCheck,
             StopTest,
     ):
         raise
     except Exception:
         escalate_hypothesis_internal_error()
         self.last_exception = traceback.format_exc()
         verbose_report(self.last_exception)
         data.mark_interesting()
Ejemplo n.º 5
0
def target(observation: Union[int, float],
           *,
           label: str = "") -> Union[int, float]:
    """Calling this function with an ``int`` or ``float`` observation gives it feedback
    with which to guide our search for inputs that will cause an error, in
    addition to all the usual heuristics.  Observations must always be finite.

    Hypothesis will try to maximize the observed value over several examples;
    almost any metric will work so long as it makes sense to increase it.
    For example, ``-abs(error)`` is a metric that increases as ``error``
    approaches zero.

    Example metrics:

    - Number of elements in a collection, or tasks in a queue
    - Mean or maximum runtime of a task (or both, if you use ``label``)
    - Compression ratio for data (perhaps per-algorithm or per-level)
    - Number of steps taken by a state machine

    The optional ``label`` argument can be used to distinguish between
    and therefore separately optimise distinct observations, such as the
    mean and standard deviation of a dataset.  It is an error to call
    ``target()`` with any label more than once per test case.

    .. note::
        **The more examples you run, the better this technique works.**

        As a rule of thumb, the targeting effect is noticeable above
        :obj:`max_examples=1000 <hypothesis.settings.max_examples>`,
        and immediately obvious by around ten thousand examples
        *per label* used by your test.

    :ref:`statistics` include the best score seen for each label,
    which can help avoid `the threshold problem
    <https://hypothesis.works/articles/threshold-problem/>`__ when the minimal
    example shrinks right down to the threshold of failure (:issue:`2180`).
    """
    check_type((int, float), observation, "observation")
    if not math.isfinite(observation):
        raise InvalidArgument(
            f"observation={observation!r} must be a finite float.")
    check_type(str, label, "label")

    context = _current_build_context.value
    if context is None:
        raise InvalidArgument(
            "Calling target() outside of a test is invalid.  "
            "Consider guarding this call with `if currently_in_test_context(): ...`"
        )
    verbose_report(f"Saw target(observation={observation!r}, label={label!r})")

    if label in context.data.target_observations:
        raise InvalidArgument(
            "Calling target(%r, label=%r) would overwrite target(%r, label=%r)"
            % (observation, label, context.data.target_observations[label],
               label))
    else:
        context.data.target_observations[label] = observation

    return observation
Ejemplo n.º 6
0
    def evaluate_test_data(self, data):
        if (time.time() - self.start_time >= HUNG_TEST_TIME_LIMIT):
            fail_health_check(
                self.settings,
                ('Your test has been running for at least five minutes. This '
                 'is probably not what you intended, so by default Hypothesis '
                 'turns it into an error.'), HealthCheck.hung_test)

        try:
            result = self.test_runner(
                data, reify_and_execute(
                    self.search_strategy,
                    self.test,
                ))
            if result is not None and self.settings.perform_health_check:
                fail_health_check(
                    self.settings,
                    ('Tests run under @given should return None, but '
                     '%s returned %r instead.') % (self.test.__name__, result),
                    HealthCheck.return_value)
            self.at_least_one_success = True
            return False
        except UnsatisfiedAssumption:
            data.mark_invalid()
        except (
                HypothesisDeprecationWarning,
                FailedHealthCheck,
                StopTest,
        ) + exceptions_to_reraise:
            raise
        except Exception:
            escalate_hypothesis_internal_error()
            self.last_exception = traceback.format_exc()
            verbose_report(self.last_exception)
            data.mark_interesting()
Ejemplo n.º 7
0
    def _execute_once_for_engine(self, data):
        """Wrapper around ``execute_once`` that intercepts test failure
        exceptions and single-test control exceptions, and turns them into
        appropriate method calls to `data` instead.

        This allows the engine to assume that any exception other than
        ``StopTest`` must be a fatal error, and should stop the entire engine.
        """
        try:
            result = self.execute_once(data)
            if result is not None:
                fail_health_check(
                    self.settings,
                    ("Tests run under @given should return None, but "
                     "%s returned %r instead.") % (self.test.__name__, result),
                    HealthCheck.return_value,
                )
        except UnsatisfiedAssumption:
            # An "assume" check failed, so instead we inform the engine that
            # this test run was invalid.
            data.mark_invalid()
        except StopTest:
            # The engine knows how to handle this control exception, so it's
            # OK to re-raise it.
            raise
        except (
                HypothesisDeprecationWarning,
                FailedHealthCheck,
        ) + skip_exceptions_to_reraise():
            # These are fatal errors or control exceptions that should stop the
            # engine, so we re-raise them.
            raise
        except failure_exceptions_to_catch() as e:
            # If the error was raised by Hypothesis-internal code, re-raise it
            # as a fatal error instead of treating it as a test failure.
            escalate_hypothesis_internal_error()

            if data.frozen:
                # This can happen if an error occurred in a finally
                # block somewhere, suppressing our original StopTest.
                # We raise a new one here to resume normal operation.
                raise StopTest(data.testcounter)
            else:
                # The test failed by raising an exception, so we inform the
                # engine that this test run was interesting. This is the normal
                # path for test runs that fail.

                tb = get_trimmed_traceback()
                info = data.extra_information
                info.__expected_traceback = "".join(
                    traceback.format_exception(type(e), e, tb))
                info.__expected_exception = e
                verbose_report(info.__expected_traceback)

                origin = traceback.extract_tb(tb)[-1]
                filename = origin[0]
                lineno = origin[1]
                data.mark_interesting((type(e), filename, lineno))
Ejemplo n.º 8
0
 def is_breaking_run(runner):
     try:
         runner.run(state_machine_factory())
         return False
     except (InvalidDefinition, UnsatisfiedAssumption):
         raise
     except Exception:
         verbose_report(traceback.format_exc)
         return True
Ejemplo n.º 9
0
 def is_template_example(xs):
     try:
         test_runner(reify_and_execute(search_strategy, xs, test))
         return False
     except UnsatisfiedAssumption as e:
         raise e
     except Exception:
         verbose_report(traceback.format_exc)
         return True
Ejemplo n.º 10
0
 def run():
     args, kwargs = search_strategy.reify(template)
     if print_example:
         report(lambda: 'Falsifying example: %s(%s)' %
                (test.__name__, arg_string(test, args, kwargs)))
     else:
         verbose_report(lambda: 'Trying example: %s(%s)' %
                        (test.__name__, arg_string(test, args, kwargs)))
     return test(*args, **kwargs)
Ejemplo n.º 11
0
 def is_breaking_run(runner):
     try:
         runner.run(state_machine_factory())
         return False
     except (InvalidDefinition, UnsatisfiedAssumption):
         raise
     except Exception:
         verbose_report(traceback.format_exc)
         return True
Ejemplo n.º 12
0
 def is_template_example(xs):
     try:
         test_runner(reify_and_execute(search_strategy, xs, test))
         return False
     except UnsatisfiedAssumption as e:
         raise e
     except Exception:
         verbose_report(traceback.format_exc)
         return True
Ejemplo n.º 13
0
 def is_breaking_run(runner):
     try:
         runner.run(state_machine_factory())
         return False
     except HypothesisException:
         raise
     except Exception:
         verbose_report(traceback.format_exc)
         return True
Ejemplo n.º 14
0
 def is_breaking_run(runner):
     try:
         runner.run(state_machine_factory())
         return False
     except HypothesisException:
         raise
     except Exception:
         verbose_report(traceback.format_exc)
         return True
Ejemplo n.º 15
0
def target(observation, label=""):
    # type: (float, str) -> None
    """Calling this function with a ``float`` observation gives it feedback
    with which to guide our search for inputs that will cause an error, in
    addition to all the usual heuristics.  Observations must always be finite.

    Hypothesis will try to maximize the observed value over several examples;
    almost any metric will work so long as it makes sense to increase it.
    For example, ``-abs(error)`` is a metric that increases as ``error``
    approaches zero.

    Example metrics:

    - Number of elements in a collection, or tasks in a queue
    - Mean or maximum runtime of a task (or both, if you use ``label``)
    - Compression ratio for data (perhaps per-algorithm or per-level)
    - Number of steps taken by a state machine

    The optional ``label`` argument can be used to distinguish between
    and therefore separately optimise distinct observations, such as the
    mean and standard deviation of a dataset.  It is an error to call
    ``target()`` with any label more than once per test case.

    .. note::
        **The more examples you run, the better this technique works.**

        As a rule of thumb, the targeting effect is noticeable above
        :obj:`max_exmples=1000 <hypothesis.settings.max_examples>`,
        and immediately obvious by around ten thousand examples
        *per label* used by your test.

    .. note::
        ``hypothesis.target`` is considered experimental, and may be radically
        changed or even removed in a future version.  If you find it useful,
        please let us know so we can share and build on that success!
    """
    check_type(float, observation, "observation")
    if math.isinf(observation) or math.isnan(observation):
        raise InvalidArgument("observation=%r must be a finite float." %
                              observation)
    check_type(string_types, label, "label")

    context = _current_build_context.value
    if context is None:
        raise InvalidArgument("Calling target() outside of a test is invalid.")
    verbose_report("Saw target(observation=%r, label=%r)" %
                   (observation, label))

    if context.data is not None:
        if label in context.data.target_observations:
            raise InvalidArgument(
                "Calling target(%r, label=%r) would overwrite target(%r, label=%r)"
                % (observation, label, context.data.target_observations[label],
                   label))
        else:
            context.data.target_observations[label] = observation
Ejemplo n.º 16
0
    def evaluate_test_data(self, data):
        try:
            if self.collector is None:
                result = self.execute(data)
            else:  # pragma: no cover
                # This should always be a no-op, but the coverage tracer has
                # a bad habit of resurrecting itself.
                original = sys.gettrace()
                sys.settrace(None)
                try:
                    self.collector.data = {}
                    result = self.execute(data, collect=True)
                finally:
                    sys.settrace(original)
                    covdata = CoverageData()
                    self.collector.save_data(covdata)
                    self.coverage_data.update(covdata)
                    for filename in covdata.measured_files():
                        if is_hypothesis_file(filename):
                            continue
                        data.tags.update(
                            arc(filename, source, target)
                            for source, target in covdata.arcs(filename))
            if result is not None and self.settings.perform_health_check:
                fail_health_check(
                    self.settings,
                    ('Tests run under @given should return None, but '
                     '%s returned %r instead.') % (self.test.__name__, result),
                    HealthCheck.return_value)
            self.at_least_one_success = True
            return False
        except UnsatisfiedAssumption:
            data.mark_invalid()
        except (
                HypothesisDeprecationWarning,
                FailedHealthCheck,
                StopTest,
        ) + exceptions_to_reraise:
            raise
        except Exception as e:
            escalate_hypothesis_internal_error()
            data.__expected_traceback = traceback.format_exc()
            data.__expected_exception = e
            verbose_report(data.__expected_traceback)

            error_class, _, tb = sys.exc_info()

            origin = traceback.extract_tb(tb)[-1]
            filename = origin[0]
            lineno = origin[1]
            data.mark_interesting((error_class, filename, lineno))
Ejemplo n.º 17
0
 def is_template_example(xs):
     try:
         test_runner(reify_and_execute(
             search_strategy, xs, test,
             always_print=settings.max_shrinks <= 0
         ))
         return False
     except UnsatisfiedAssumption as e:
         raise e
     except Exception as e:
         if settings.max_shrinks <= 0:
             raise e
         verbose_report(traceback.format_exc)
         return True
Ejemplo n.º 18
0
 def is_template_example(xs):
     record_repr = [None]
     try:
         test_runner(reify_and_execute(
             search_strategy, xs, test,
             record_repr=record_repr,
         ))
         return False
     except UnsatisfiedAssumption as e:
         raise e
     except Exception as e:
         last_exception[0] = traceback.format_exc()
         repr_for_last_exception[0] = record_repr[0]
         verbose_report(last_exception[0])
         return True
Ejemplo n.º 19
0
 def is_template_example(xs):
     try:
         test_runner(reify_and_execute(
             search_strategy, xs, test,
             always_print=settings.max_shrinks <= 0
         ))
         return False
     except UnsatisfiedAssumption as e:
         raise e
     except Exception as e:
         if settings.max_shrinks <= 0:
             raise e
         last_exception[0] = traceback.format_exc()
         verbose_report(last_exception[0])
         return True
Ejemplo n.º 20
0
 def is_template_example(xs):
     record_repr = [None]
     try:
         test_runner(
             reify_and_execute(
                 search_strategy, xs, test, always_print=settings.max_shrinks <= 0, record_repr=record_repr
             )
         )
         return False
     except UnsatisfiedAssumption as e:
         raise e
     except Exception as e:
         if settings.max_shrinks <= 0:
             raise e
         last_exception[0] = traceback.format_exc()
         repr_for_last_exception[0] = record_repr[0]
         verbose_report(last_exception[0])
         return True
Ejemplo n.º 21
0
 def evaluate_test_data(data):
     try:
         result = test_runner(data, reify_and_execute(search_strategy, test))
         if result is not None and settings.perform_health_check:
             fail_health_check(
                 ("Tests run under @given should return None, but " "%s returned %r instead.")
                 % (test.__name__, result),
                 HealthCheck.return_value,
             )
         return False
     except UnsatisfiedAssumption:
         data.mark_invalid()
     except (HypothesisDeprecationWarning, FailedHealthCheck, StopTest):
         raise
     except Exception:
         last_exception[0] = traceback.format_exc()
         verbose_report(last_exception[0])
         data.mark_interesting()
Ejemplo n.º 22
0
def execute_explicit_examples(
    test_runner, test, wrapped_test, settings, arguments, kwargs
):
    original_argspec = getfullargspec(test)

    for example in reversed(getattr(
        wrapped_test, 'hypothesis_explicit_examples', ()
    )):
        example_kwargs = dict(original_argspec.kwonlydefaults or {})
        if example.args:
            if len(example.args) > len(original_argspec.args):
                raise InvalidArgument(
                    'example has too many arguments for test. '
                    'Expected at most %d but got %d' % (
                        len(original_argspec.args), len(example.args)))
            example_kwargs.update(dict(zip(
                original_argspec.args[-len(example.args):],
                example.args
            )))
        else:
            example_kwargs.update(example.kwargs)
        if Phase.explicit not in settings.phases:
            continue
        example_kwargs.update(kwargs)
        # Note: Test may mutate arguments and we can't rerun explicit
        # examples, so we have to calculate the failure message at this
        # point rather than than later.
        example_string = '%s(%s)' % (
            test.__name__, arg_string(test, arguments, example_kwargs)
        )
        with local_settings(settings):
            try:
                with BuildContext(None) as b:
                    verbose_report('Trying example: ' + example_string)
                    test_runner(
                        None, lambda data: test(*arguments, **example_kwargs)
                    )
            except BaseException:
                report('Falsifying example: ' + example_string)
                for n in b.notes:
                    report(n)
                raise
Ejemplo n.º 23
0
def execute_explicit_examples(
    test_runner, test, wrapped_test, settings, arguments, kwargs
):
    original_argspec = getfullargspec(test)

    for example in reversed(getattr(
        wrapped_test, 'hypothesis_explicit_examples', ()
    )):
        example_kwargs = dict(original_argspec.kwonlydefaults or {})
        if example.args:
            if len(example.args) > len(original_argspec.args):
                raise InvalidArgument(
                    'example has too many arguments for test. '
                    'Expected at most %d but got %d' % (
                        len(original_argspec.args), len(example.args)))
            example_kwargs.update(dict(zip(
                original_argspec.args[-len(example.args):],
                example.args
            )))
        else:
            example_kwargs.update(example.kwargs)
        if Phase.explicit not in settings.phases:
            continue
        example_kwargs.update(kwargs)
        # Note: Test may mutate arguments and we can't rerun explicit
        # examples, so we have to calculate the failure message at this
        # point rather than than later.
        example_string = '%s(%s)' % (
            test.__name__, arg_string(test, arguments, example_kwargs)
        )
        with local_settings(settings):
            try:
                with BuildContext(None) as b:
                    verbose_report('Trying example: ' + example_string)
                    test_runner(
                        None, lambda data: test(*arguments, **example_kwargs)
                    )
            except BaseException:
                report('Falsifying example: ' + example_string)
                for n in b.notes:
                    report(n)
                raise
Ejemplo n.º 24
0
 def evaluate_test_data(data):
     if perform_health_check and not performed_random_check[0]:
         initial_state = getglobalrandomstate()
         performed_random_check[0] = True
     else:
         initial_state = None
     try:
         result = test_runner(
             data, reify_and_execute(
                 search_strategy,
                 test,
             ))
         if result is not None and settings.perform_health_check:
             fail_health_check(
                 ('Tests run under @given should return None, but '
                  '%s returned %r instead.') %
                 (test.__name__, result), HealthCheck.return_value)
         return False
     except UnsatisfiedAssumption:
         data.mark_invalid()
     except (
             HypothesisDeprecationWarning,
             FailedHealthCheck,
             StopTest,
     ):
         raise
     except Exception:
         last_exception[0] = traceback.format_exc()
         verbose_report(last_exception[0])
         data.mark_interesting()
     finally:
         if (initial_state is not None
                 and getglobalrandomstate() != initial_state):
             fail_health_check(
                 'Your test used the global random module. '
                 'This is unlikely to work correctly. You should '
                 'consider using the randoms() strategy from '
                 'hypothesis.strategies instead. Alternatively, '
                 'you can use the random_module() strategy to '
                 'explicitly seed the random module.',
                 HealthCheck.random_module,
             )
Ejemplo n.º 25
0
    def evaluate_test_data(self, data):
        try:
            result = self.execute(data)
            if result is not None:
                fail_health_check(
                    self.settings,
                    (
                        "Tests run under @given should return None, but "
                        "%s returned %r instead."
                    )
                    % (self.test.__name__, result),
                    HealthCheck.return_value,
                )
        except UnsatisfiedAssumption:
            data.mark_invalid()
        except (
            HypothesisDeprecationWarning,
            FailedHealthCheck,
            StopTest,
        ) + skip_exceptions_to_reraise():
            raise
        except failure_exceptions_to_catch() as e:
            escalate_hypothesis_internal_error()
            if data.frozen:
                # This can happen if an error occurred in a finally
                # block somewhere, suppressing our original StopTest.
                # We raise a new one here to resume normal operation.
                raise StopTest(data.testcounter)
            else:
                tb = get_trimmed_traceback()
                info = data.extra_information
                info.__expected_traceback = "".join(
                    traceback.format_exception(type(e), e, tb)
                )
                info.__expected_exception = e
                verbose_report(info.__expected_traceback)

                origin = traceback.extract_tb(tb)[-1]
                filename = origin[0]
                lineno = origin[1]
                data.mark_interesting((type(e), filename, lineno))
Ejemplo n.º 26
0
    def evaluate_test_data(self, data):
        try:
            result = self.execute(data)
            if result is not None:
                fail_health_check(
                    self.settings,
                    (
                        "Tests run under @given should return None, but "
                        "%s returned %r instead."
                    )
                    % (self.test.__name__, result),
                    HealthCheck.return_value,
                )
        except UnsatisfiedAssumption:
            data.mark_invalid()
        except (
            HypothesisDeprecationWarning,
            FailedHealthCheck,
            StopTest,
        ) + skip_exceptions_to_reraise():
            raise
        except failure_exceptions_to_catch() as e:
            escalate_hypothesis_internal_error()
            if data.frozen:
                # This can happen if an error occurred in a finally
                # block somewhere, suppressing our original StopTest.
                # We raise a new one here to resume normal operation.
                raise StopTest(data.testcounter)
            else:
                tb = get_trimmed_traceback()
                info = data.extra_information
                info.__expected_traceback = "".join(
                    traceback.format_exception(type(e), e, tb)
                )
                info.__expected_exception = e
                verbose_report(info.__expected_traceback)

                origin = traceback.extract_tb(tb)[-1]
                filename = origin[0]
                lineno = origin[1]
                data.mark_interesting((type(e), filename, lineno))
Ejemplo n.º 27
0
 def evaluate_test_data(data):
     if perform_health_check and not performed_random_check[0]:
         initial_state = getglobalrandomstate()
         performed_random_check[0] = True
     else:
         initial_state = None
     try:
         result = test_runner(data, reify_and_execute(
             search_strategy, test,
         ))
         if result is not None and settings.perform_health_check:
             fail_health_check((
                 'Tests run under @given should return None, but '
                 '%s returned %r instead.'
             ) % (test.__name__, result), HealthCheck.return_value)
         return False
     except UnsatisfiedAssumption:
         data.mark_invalid()
     except (
         HypothesisDeprecationWarning, FailedHealthCheck,
         StopTest,
     ):
         raise
     except Exception:
         last_exception[0] = traceback.format_exc()
         verbose_report(last_exception[0])
         data.mark_interesting()
     finally:
         if (
             initial_state is not None and
             getglobalrandomstate() != initial_state
         ):
             fail_health_check(
                 'Your test used the global random module. '
                 'This is unlikely to work correctly. You should '
                 'consider using the randoms() strategy from '
                 'hypothesis.strategies instead. Alternatively, '
                 'you can use the random_module() strategy to '
                 'explicitly seed the random module.',
                 HealthCheck.random_module,
             )
Ejemplo n.º 28
0
 def run():
     args, kwargs = search_strategy.reify(template)
     if print_example:
         report(
             lambda: 'Falsifying example: %s(%s)' % (
                 test.__name__,
                 arg_string(
                     test, args, kwargs
                 )
             )
         )
     else:
         verbose_report(
             lambda: 'Trying example: %s(%s)' % (
                 test.__name__,
                 arg_string(
                     test, args, kwargs
                 )
             )
         )
     return test(*args, **kwargs)
Ejemplo n.º 29
0
def best_satisfying_template(
    search_strategy, random, condition, settings, storage, tracker=None, max_parameter_tries=None
):
    """Find and then minimize a satisfying template.

    First look in storage if it is not None, then attempt to generate
    one. May throw all the exceptions of find_satisfying_template. Once
    an example has been found it will be further minimized.

    """
    if tracker is None:
        tracker = Tracker()
    start_time = time.time()

    successful_shrinks = -1
    with settings:
        satisfying_example = find_satisfying_template(
            search_strategy, random, condition, tracker, settings, storage, max_parameter_tries=max_parameter_tries
        )
        for simpler in simplify_template_such_that(
            search_strategy, random, satisfying_example, condition, tracker, settings, start_time
        ):
            successful_shrinks += 1
            satisfying_example = simpler
        if storage is not None:
            storage.save(satisfying_example, search_strategy)
        if not successful_shrinks:
            verbose_report("Could not shrink example")
        elif successful_shrinks == 1:
            verbose_report("Successfully shrunk example once")
        else:
            verbose_report("Successfully shrunk example %d times" % (successful_shrinks,))
        return satisfying_example
Ejemplo n.º 30
0
 def is_template_example(xs):
     if perform_health_check and not warned_random[0]:
         initial_state = getglobalrandomstate()
     record_repr = [None]
     try:
         result = test_runner(reify_and_execute(
             search_strategy, xs, test,
             record_repr=record_repr,
         ))
         if result is not None:
             note_deprecation((
                 'Tests run under @given should return None, but '
                 '%s returned %r instead.'
                 'In Hypothesis 2.0 this will become an error.'
             ) % (test.__name__, result), settings)
         return False
     except HypothesisDeprecationWarning:
         raise
     except UnsatisfiedAssumption as e:
         raise e
     except Exception as e:
         last_exception[0] = traceback.format_exc()
         repr_for_last_exception[0] = record_repr[0]
         verbose_report(last_exception[0])
         return True
     finally:
         if (
             not warned_random[0] and
             perform_health_check and
             getglobalrandomstate() != initial_state
         ):
             warned_random[0] = True
             fail_health_check(
                 'Your test used the global random module. '
                 'This is unlikely to work correctly. You should '
                 'consider using the randoms() strategy from '
                 'hypothesis.strategies instead. Alternatively, '
                 'you can use the random_module() strategy to '
                 'explicitly seed the random module.'
             )
Ejemplo n.º 31
0
 def is_template_example(xs):
     if perform_health_check and not warned_random[0]:
         initial_state = getglobalrandomstate()
     record_repr = [None]
     try:
         result = test_runner(
             reify_and_execute(
                 search_strategy,
                 xs,
                 test,
                 record_repr=record_repr,
             ))
         if result is not None:
             note_deprecation(
                 ('Tests run under @given should return None, but '
                  '%s returned %r instead.'
                  'In Hypothesis 2.0 this will become an error.') %
                 (test.__name__, result), settings)
         return False
     except HypothesisDeprecationWarning:
         raise
     except UnsatisfiedAssumption as e:
         raise e
     except Exception as e:
         last_exception[0] = traceback.format_exc()
         repr_for_last_exception[0] = record_repr[0]
         verbose_report(last_exception[0])
         return True
     finally:
         if (not warned_random[0] and perform_health_check
                 and getglobalrandomstate() != initial_state):
             warned_random[0] = True
             fail_health_check(
                 'Your test used the global random module. '
                 'This is unlikely to work correctly. You should '
                 'consider using the randoms() strategy from '
                 'hypothesis.strategies instead. Alternatively, '
                 'you can use the random_module() strategy to '
                 'explicitly seed the random module.')
Ejemplo n.º 32
0
def best_satisfying_template(
    search_strategy,
    random,
    condition,
    settings,
    storage,
    tracker=None,
    max_parameter_tries=None,
    start_time=None,
):
    """Find and then minimize a satisfying template.

    First look in storage if it is not None, then attempt to generate
    one. May throw all the exceptions of find_satisfying_template. Once
    an example has been found it will be further minimized.

    """
    if tracker is None:
        tracker = Tracker()
    if start_time is None:
        start_time = time.time()

    successful_shrinks = -1
    with settings:
        satisfying_example = find_satisfying_template(
            search_strategy,
            random,
            condition,
            tracker,
            settings,
            storage,
            max_parameter_tries=max_parameter_tries,
        )
        for simpler in simplify_template_such_that(
                search_strategy,
                random,
                satisfying_example,
                condition,
                tracker,
                settings,
                start_time,
        ):
            successful_shrinks += 1
            satisfying_example = simpler
        if storage is not None:
            storage.save(satisfying_example, search_strategy)
        if not successful_shrinks:
            verbose_report('Could not shrink example')
        elif successful_shrinks == 1:
            verbose_report('Successfully shrunk example once')
        else:
            verbose_report('Successfully shrunk example %d times' %
                           (successful_shrinks, ))
        return satisfying_example
Ejemplo n.º 33
0
    def template_condition(template):
        with BuildContext():
            result = search.reify(template)
            success = condition(result)

        if success:
            successful_examples[0] += 1

        if not successful_examples[0]:
            verbose_report(lambda: "Trying example %s" % (repr(result),))
        elif success:
            if successful_examples[0] == 1:
                verbose_report(lambda: "Found satisfying example %s" % (repr(result),))
            else:
                verbose_report(lambda: "Shrunk example to %s" % (repr(result),))
        return success
Ejemplo n.º 34
0
    def template_condition(template):
        result = search.reify(template)
        success = condition(result)

        if success:
            successful_examples[0] += 1

        if not successful_examples[0]:
            verbose_report(lambda: 'Trying example %s' % (show(result), ))
        elif success:
            if successful_examples[0] == 1:
                verbose_report(lambda: 'Found satisfying example %s' %
                               (show(result), ))
            else:
                verbose_report(lambda: 'Shrunk example to %s' %
                               (show(result), ))
        return assume(success)
Ejemplo n.º 35
0
    def template_condition(template):
        with BuildContext():
            result = search.reify(template)
            success = condition(result)

        if success:
            successful_examples[0] += 1

        if not successful_examples[0]:
            verbose_report(lambda: u'Trying example %s' % (repr(result), ))
        elif success:
            if successful_examples[0] == 1:
                verbose_report(lambda: u'Found satisfying example %s' %
                               (repr(result), ))
            else:
                verbose_report(lambda: u'Shrunk example to %s' %
                               (repr(result), ))
        return success
Ejemplo n.º 36
0
def best_satisfying_template(search_strategy,
                             random,
                             condition,
                             settings,
                             storage,
                             tracker=None):
    """Find and then minimize a satisfying template.

    First look in storage if it is not None, then attempt to generate
    one. May throw all the exceptions of find_satisfying_template. Once
    an example has been found it will be further minimized.

    """
    if tracker is None:
        tracker = Tracker()
    start_time = time.time()

    successful_shrinks = -1
    with settings:
        satisfying_example = find_satisfying_template(search_strategy, random,
                                                      condition, tracker,
                                                      settings, storage)

        for simpler in simplify_template_such_that(search_strategy, random,
                                                   satisfying_example,
                                                   condition, tracker):
            successful_shrinks += 1
            satisfying_example = simpler
            if time_to_call_it_a_day(settings, start_time):
                # It's very hard to reliably hit this line even though we have
                # tests for it. No cover prevents this from causing a flaky
                # build.
                break  # pragma: no cover

        if storage is not None:
            storage.save(satisfying_example)
    if not successful_shrinks:
        verbose_report('Could not shrink example')
    elif successful_shrinks == 1:
        verbose_report('Successfully shrunk example once')
    else:
        verbose_report('Successfully shrunk example %d times' %
                       (successful_shrinks, ))
    return satisfying_example
Ejemplo n.º 37
0
def best_satisfying_template(
    search_strategy, random, condition, settings, storage, tracker=None,
    max_parameter_tries=None,
):
    """Find and then minimize a satisfying template.

    First look in storage if it is not None, then attempt to generate
    one. May throw all the exceptions of find_satisfying_template. Once
    an example has been found it will be further minimized.

    """
    if tracker is None:
        tracker = Tracker()
    start_time = time.time()

    successful_shrinks = -1
    with settings:
        satisfying_example = find_satisfying_template(
            search_strategy, random, condition, tracker, settings, storage,
            max_parameter_tries=max_parameter_tries,
        )

        for simpler in simplify_template_such_that(
            search_strategy, random, satisfying_example, condition, tracker,
            settings, start_time,
        ):
            successful_shrinks += 1
            satisfying_example = simpler
            if time_to_call_it_a_day(settings, start_time):
                # It's very hard to reliably hit this line even though we have
                # tests for it. No cover prevents this from causing a flaky
                # build.
                break  # pragma: no cover

        if storage is not None:
            storage.save(satisfying_example)
    if not successful_shrinks:
        verbose_report('Could not shrink example')
    elif successful_shrinks == 1:
        verbose_report('Successfully shrunk example once')
    else:
        verbose_report(
            'Successfully shrunk example %d times' % (successful_shrinks,))
    return satisfying_example
Ejemplo n.º 38
0
    def template_condition(template):
        result = search.reify(template)
        success = condition(result)

        if success:
            successful_examples[0] += 1

        if not successful_examples[0]:
            verbose_report(lambda: 'Trying example %s' % (
                show(result),
            ))
        elif success:
            if successful_examples[0] == 1:
                verbose_report(lambda: 'Found satisfying example %s' % (
                    show(result),
                ))
            else:
                verbose_report(lambda: 'Shrunk example to %s' % (
                    show(result),
                ))
        return success
Ejemplo n.º 39
0
def test_does_print_verbose_in_debug():
    with settings(verbosity=Verbosity.debug):
        with capture_out() as o:
            verbose_report(u'Hi')
    assert u'Hi' in o.getvalue()
Ejemplo n.º 40
0
 def f(x):
     verbose_report('Hi')
Ejemplo n.º 41
0
 def f(x):
     verbose_report('Hi')
Ejemplo n.º 42
0
    def evaluate_test_data(self, data):
        if (
            time.time() - self.start_time >= HUNG_TEST_TIME_LIMIT
        ):
            fail_health_check(self.settings, (
                'Your test has been running for at least five minutes. This '
                'is probably not what you intended, so by default Hypothesis '
                'turns it into an error.'
            ), HealthCheck.hung_test)

        try:
            if self.collector is None:
                result = self.test_runner(data, reify_and_execute(
                    self.search_strategy, self.test,
                ))
            else:  # pragma: no cover
                # This should always be a no-op, but the coverage tracer has
                # a bad habit of resurrecting itself.
                original = sys.gettrace()
                sys.settrace(None)
                try:
                    self.collector.data = {}
                    result = self.test_runner(data, reify_and_execute(
                        self.search_strategy, self.test,
                        collector=self.collector
                    ))
                finally:
                    sys.settrace(original)
                    covdata = CoverageData()
                    self.collector.save_data(covdata)
                    self.coverage_data.update(covdata)
                    for filename in covdata.measured_files():
                        if is_hypothesis_file(filename):
                            continue
                        data.tags.update(
                            arc(filename, source, target)
                            for source, target in covdata.arcs(filename)
                        )
            if result is not None and self.settings.perform_health_check:
                fail_health_check(self.settings, (
                    'Tests run under @given should return None, but '
                    '%s returned %r instead.'
                ) % (self.test.__name__, result), HealthCheck.return_value)
            self.at_least_one_success = True
            return False
        except UnsatisfiedAssumption:
            data.mark_invalid()
        except (
            HypothesisDeprecationWarning, FailedHealthCheck,
            StopTest,
        ) + exceptions_to_reraise:
            raise
        except Exception as e:
            escalate_hypothesis_internal_error()
            data.__expected_traceback = traceback.format_exc()
            data.__expected_exception = e
            verbose_report(data.__expected_traceback)

            error_class, _, tb = sys.exc_info()

            origin = traceback.extract_tb(tb)[-1]
            filename = origin[0]
            lineno = origin[1]
            data.mark_interesting((error_class, filename, lineno))
Ejemplo n.º 43
0
def test_does_print_verbose_in_debug():
    with settings(verbosity=Verbosity.debug):
        with capture_out() as o:
            verbose_report(u'Hi')
    assert u'Hi' in o.getvalue()
Ejemplo n.º 44
0
 def f(x):
     verbose_report("Hi")
Ejemplo n.º 45
0
    def evaluate_test_data(self, data):
        if (time.time() - self.start_time >= HUNG_TEST_TIME_LIMIT):
            fail_health_check(
                self.settings,
                ('Your test has been running for at least five minutes. This '
                 'is probably not what you intended, so by default Hypothesis '
                 'turns it into an error.'), HealthCheck.hung_test)

        try:
            if self.collector is None:
                result = self.test_runner(
                    data, reify_and_execute(
                        self.search_strategy,
                        self.test,
                    ))
            else:  # pragma: no cover
                # This should always be a no-op, but the coverage tracer has
                # a bad habit of resurrecting itself.
                original = sys.gettrace()
                sys.settrace(None)
                try:
                    try:
                        self.collector.data = {}
                        self.collector.start()
                        result = self.test_runner(
                            data,
                            reify_and_execute(
                                self.search_strategy,
                                self.test,
                            ))
                    finally:
                        self.collector.stop()
                finally:
                    sys.settrace(original)
                    covdata = CoverageData()
                    self.collector.save_data(covdata)
                    self.coverage_data.update(covdata)
                    for filename in covdata.measured_files():
                        if is_hypothesis_file(filename):
                            continue
                        for lineno in covdata.lines(filename):
                            data.add_tag(Line(filename, lineno))
                        for source, target in covdata.arcs(filename):
                            data.add_tag(Arc(filename, source, target))
            if result is not None and self.settings.perform_health_check:
                fail_health_check(
                    self.settings,
                    ('Tests run under @given should return None, but '
                     '%s returned %r instead.') % (self.test.__name__, result),
                    HealthCheck.return_value)
            self.at_least_one_success = True
            return False
        except UnsatisfiedAssumption:
            data.mark_invalid()
        except (
                HypothesisDeprecationWarning,
                FailedHealthCheck,
                StopTest,
        ) + exceptions_to_reraise:
            raise
        except Exception:
            escalate_hypothesis_internal_error()
            data.__expected_exception = traceback.format_exc()
            verbose_report(data.__expected_exception)

            error_class, _, tb = sys.exc_info()

            origin = traceback.extract_tb(tb)[-1]
            filename = origin[0]
            lineno = origin[1]
            data.mark_interesting((error_class, filename, lineno))
 def f(x):
     verbose_report("Hi")