def test_can_reduce_poison_from_any_subtree(size, seed):
    """This test validates that we can minimize to any leaf node of a binary
    tree, regardless of where in the tree the leaf is."""
    random = Random(seed)

    # Initially we create the minimal tree of size n, regardless of whether it
    # is poisoned (which it won't be - the poison event essentially never
    # happens when drawing uniformly at random).

    # Choose p so that the expected size of the tree is equal to the desired
    # size.
    p = 1.0 / (2.0 - 1.0 / size)
    strat = PoisonedTree(p)

    def test_function(data):
        v = data.draw(strat)
        if len(v) >= size:
            data.mark_interesting()

    runner = ConjectureRunner(test_function,
                              random=random,
                              settings=settings(TEST_SETTINGS,
                                                buffer_size=LOTS))

    while not runner.interesting_examples:
        runner.test_function(
            runner.new_conjecture_data(lambda data, n: uniform(random, n)))

    runner.shrink_interesting_examples()

    data, = runner.interesting_examples.values()

    assert len(ConjectureData.for_buffer(data.buffer).draw(strat)) == size

    starts = [b.start for b in data.blocks if b.length == 2]
    assert len(starts) % 2 == 0

    for i in hrange(0, len(starts), 2):
        # Now for each leaf position in the tree we try inserting a poison
        # value artificially. Additionally, we add a marker to the end that
        # must be preserved. The marker means that we are not allow to rely on
        # discarding the end of the buffer to get the desired shrink.
        u = starts[i]
        marker = hbytes([1, 2, 3, 4])

        def test_function_with_poison(data):
            v = data.draw(strat)
            m = data.draw_bytes(len(marker))
            if POISON in v and m == marker:
                data.mark_interesting()

        runner = ConjectureRunner(test_function_with_poison,
                                  random=random,
                                  settings=TEST_SETTINGS)

        runner.cached_test_function(data.buffer[:u] + hbytes([255]) * 4 +
                                    data.buffer[u + 4:] + marker)

        assert runner.interesting_examples
        runner.shrink_interesting_examples()

        shrunk, = runner.interesting_examples.values()

        assert ConjectureData.for_buffer(
            shrunk.buffer).draw(strat) == (POISON, )
Пример #2
0
def perform_health_checks(random, settings, test_runner, search_strategy):
    # Tell pytest to omit the body of this function from tracebacks
    __tracebackhide__ = True
    if not settings.perform_health_check:
        return
    if not Settings.default.perform_health_check:
        return

    health_check_random = Random(random.getrandbits(128))
    # We "pre warm" the health check with one draw to give it some
    # time to calculate any cached data. This prevents the case
    # where the first draw of the health check takes ages because
    # of loading unicode data the first time.
    data = ConjectureData(
        max_length=settings.buffer_size,
        draw_bytes=lambda data, n: uniform(health_check_random, n))
    with Settings(settings, verbosity=Verbosity.quiet):
        try:
            test_runner(
                data,
                reify_and_execute(
                    search_strategy,
                    lambda *args, **kwargs: None,
                ))
        except BaseException:
            pass
    count = 0
    overruns = 0
    filtered_draws = 0
    start = time.time()
    while (count < 10 and time.time() < start + 1 and filtered_draws < 50
           and overruns < 20):
        try:
            data = ConjectureData(
                max_length=settings.buffer_size,
                draw_bytes=lambda data, n: uniform(health_check_random, n))
            with Settings(settings, verbosity=Verbosity.quiet):
                test_runner(
                    data,
                    reify_and_execute(
                        search_strategy,
                        lambda *args, **kwargs: None,
                    ))
            count += 1
        except UnsatisfiedAssumption:
            filtered_draws += 1
        except StopTest:
            if data.status == Status.INVALID:
                filtered_draws += 1
            else:
                assert data.status == Status.OVERRUN
                overruns += 1
        except InvalidArgument:
            raise
        except Exception:
            escalate_hypothesis_internal_error()
            if (HealthCheck.exception_in_generation
                    in settings.suppress_health_check):
                raise
            report(traceback.format_exc())
            if test_runner is default_new_style_executor:
                fail_health_check(
                    settings,
                    'An exception occurred during data '
                    'generation in initial health check. '
                    'This indicates a bug in the strategy. '
                    'This could either be a Hypothesis bug or '
                    "an error in a function you've passed to "
                    'it to construct your data.',
                    HealthCheck.exception_in_generation,
                )
            else:
                fail_health_check(
                    settings,
                    'An exception occurred during data '
                    'generation in initial health check. '
                    'This indicates a bug in the strategy. '
                    'This could either be a Hypothesis bug or '
                    'an error in a function you\'ve passed to '
                    'it to construct your data. Additionally, '
                    'you have a custom executor, which means '
                    'that this could be your executor failing '
                    'to handle a function which returns None. ',
                    HealthCheck.exception_in_generation,
                )
    if overruns >= 20 or (not count and overruns > 0):
        fail_health_check(
            settings,
            ('Examples routinely exceeded the max allowable size. '
             '(%d examples overran while generating %d valid ones)'
             '. Generating examples this large will usually lead to'
             ' bad results. You should try setting average_size or '
             'max_size parameters on your collections and turning '
             'max_leaves down on recursive() calls.') % (overruns, count),
            HealthCheck.data_too_large)
    if filtered_draws >= 50 or (not count and filtered_draws > 0):
        fail_health_check(
            settings, ('It looks like your strategy is filtering out a lot '
                       'of data. Health check found %d filtered examples but '
                       'only %d good ones. This will make your tests much '
                       'slower, and also will probably distort the data '
                       'generation quite a lot. You should adapt your '
                       'strategy to filter less. This can also be caused by '
                       'a low max_leaves parameter in recursive() calls') %
            (filtered_draws, count), HealthCheck.filter_too_much)
    runtime = time.time() - start
    if runtime > 1.0 or count < 10:
        fail_health_check(
            settings,
            ('Data generation is extremely slow: Only produced '
             '%d valid examples in %.2f seconds (%d invalid ones '
             'and %d exceeded maximum size). Try decreasing '
             "size of the data you're generating (with e.g."
             'average_size or max_leaves parameters).') %
            (count, runtime, filtered_draws, overruns),
            HealthCheck.too_slow,
        )
def test_can_reduce_poison_from_any_subtree(size, seed):
    """This test validates that we can minimize to any leaf node of a binary
    tree, regardless of where in the tree the leaf is."""
    random = Random(seed)

    # Initially we create the minimal tree of size n, regardless of whether it
    # is poisoned (which it won't be - the poison event essentially never
    # happens when drawing uniformly at random).

    # Choose p so that the expected size of the tree is equal to the desired
    # size.
    p = 1.0 / (2.0 - 1.0 / size)
    strat = PoisonedTree(p)

    def test_function(data):
        v = data.draw(strat)
        if len(v) >= size:
            data.mark_interesting()

    runner = ConjectureRunner(
        test_function, random=random, settings=settings(TEST_SETTINGS, buffer_size=LOTS)
    )

    while not runner.interesting_examples:
        runner.test_function(
            runner.new_conjecture_data(lambda data, n: uniform(random, n))
        )

    runner.shrink_interesting_examples()

    data, = runner.interesting_examples.values()

    assert len(ConjectureData.for_buffer(data.buffer).draw(strat)) == size

    starts = [b.start for b in data.blocks if b.length == 2]
    assert len(starts) % 2 == 0

    for i in hrange(0, len(starts), 2):
        # Now for each leaf position in the tree we try inserting a poison
        # value artificially. Additionally, we add a marker to the end that
        # must be preserved. The marker means that we are not allow to rely on
        # discarding the end of the buffer to get the desired shrink.
        u = starts[i]
        marker = hbytes([1, 2, 3, 4])

        def test_function_with_poison(data):
            v = data.draw(strat)
            m = data.draw_bytes(len(marker))
            if POISON in v and m == marker:
                data.mark_interesting()

        runner = ConjectureRunner(
            test_function_with_poison, random=random, settings=TEST_SETTINGS
        )

        runner.cached_test_function(
            data.buffer[:u] + hbytes([255]) * 4 + data.buffer[u + 4 :] + marker
        )

        assert runner.interesting_examples
        runner.shrink_interesting_examples()

        shrunk, = runner.interesting_examples.values()

        assert ConjectureData.for_buffer(shrunk.buffer).draw(strat) == (POISON,)
Пример #4
0
 def draw_bytes(data, n):
     return uniform(self.random, n)
Пример #5
0
def perform_health_checks(random, settings, test_runner, search_strategy):
    # Tell pytest to omit the body of this function from tracebacks
    __tracebackhide__ = True
    if not settings.perform_health_check:
        return
    if not Settings.default.perform_health_check:
        return

    health_check_random = Random(random.getrandbits(128))
    # We "pre warm" the health check with one draw to give it some
    # time to calculate any cached data. This prevents the case
    # where the first draw of the health check takes ages because
    # of loading unicode data the first time.
    data = ConjectureData(
        max_length=settings.buffer_size,
        draw_bytes=lambda data, n: uniform(health_check_random, n)
    )
    with Settings(settings, verbosity=Verbosity.quiet):
        try:
            test_runner(data, reify_and_execute(
                search_strategy,
                lambda *args, **kwargs: None,
            ))
        except BaseException:
            pass
    count = 0
    overruns = 0
    filtered_draws = 0
    start = time.time()
    while (
        count < 10 and time.time() < start + 1 and
        filtered_draws < 50 and overruns < 20
    ):
        try:
            data = ConjectureData(
                max_length=settings.buffer_size,
                draw_bytes=lambda data, n: uniform(health_check_random, n)
            )
            with Settings(settings, verbosity=Verbosity.quiet):
                test_runner(data, reify_and_execute(
                    search_strategy,
                    lambda *args, **kwargs: None,
                ))
            count += 1
        except UnsatisfiedAssumption:
            filtered_draws += 1
        except StopTest:
            if data.status == Status.INVALID:
                filtered_draws += 1
            else:
                assert data.status == Status.OVERRUN
                overruns += 1
        except InvalidArgument:
            raise
        except Exception:
            escalate_hypothesis_internal_error()
            if (
                HealthCheck.exception_in_generation in
                settings.suppress_health_check
            ):
                raise
            report(traceback.format_exc())
            if test_runner is default_new_style_executor:
                fail_health_check(
                    settings,
                    'An exception occurred during data '
                    'generation in initial health check. '
                    'This indicates a bug in the strategy. '
                    'This could either be a Hypothesis bug or '
                    "an error in a function you've passed to "
                    'it to construct your data.',
                    HealthCheck.exception_in_generation,
                )
            else:
                fail_health_check(
                    settings,
                    'An exception occurred during data '
                    'generation in initial health check. '
                    'This indicates a bug in the strategy. '
                    'This could either be a Hypothesis bug or '
                    'an error in a function you\'ve passed to '
                    'it to construct your data. Additionally, '
                    'you have a custom executor, which means '
                    'that this could be your executor failing '
                    'to handle a function which returns None. ',
                    HealthCheck.exception_in_generation,
                )
    if overruns >= 20 or (
        not count and overruns > 0
    ):
        fail_health_check(settings, (
            'Examples routinely exceeded the max allowable size. '
            '(%d examples overran while generating %d valid ones)'
            '. Generating examples this large will usually lead to'
            ' bad results. You should try setting average_size or '
            'max_size parameters on your collections and turning '
            'max_leaves down on recursive() calls.') % (
            overruns, count
        ), HealthCheck.data_too_large)
    if filtered_draws >= 50 or (
        not count and filtered_draws > 0
    ):
        fail_health_check(settings, (
            'It looks like your strategy is filtering out a lot '
            'of data. Health check found %d filtered examples but '
            'only %d good ones. This will make your tests much '
            'slower, and also will probably distort the data '
            'generation quite a lot. You should adapt your '
            'strategy to filter less. This can also be caused by '
            'a low max_leaves parameter in recursive() calls') % (
            filtered_draws, count
        ), HealthCheck.filter_too_much)
    runtime = time.time() - start
    if runtime > 1.0 or count < 10:
        fail_health_check(settings, (
            'Data generation is extremely slow: Only produced '
            '%d valid examples in %.2f seconds (%d invalid ones '
            'and %d exceeded maximum size). Try decreasing '
            "size of the data you're generating (with e.g."
            'average_size or max_leaves parameters).'
        ) % (count, runtime, filtered_draws, overruns),
            HealthCheck.too_slow,
        )
 def draw_bytes(data, n):
     return uniform(self.random, n)