Exemplo n.º 1
0
def test_can_grow_the_set_of_available_parameters_if_doing_badly():
    runs = 10
    number_grown = 0
    number_grown_large = 0
    for _ in hrange(runs):
        source = ExampleSource(
            random=random.Random(),
            strategy=StrategyTable.default().strategy(int),
            storage=None,
            min_parameters=1,
        )
        i = 0
        for example in source:
            if example < 0:
                source.mark_bad()
            i += 1
            if i >= 100:
                break
        if len(source.parameters) > 1:
            number_grown += 1
        if len(source.parameters) > 10:
            number_grown_large += 1

    assert number_grown >= 0.5 * runs
    assert number_grown_large <= 0.5 * runs
Exemplo n.º 2
0
def test_errors_if_you_mark_bad_before_fetching():
    storage = None
    if hs.default.database is not None:
        storage = hs.default.database.storage_for(int)
    source = ExampleSource(
        random=random.Random(),
        strategy=StrategyTable.default().strategy(int),
        storage=storage,
    )
    with pytest.raises(ValueError):
        source.mark_bad()
Exemplo n.º 3
0
def test_tries_each_parameter_at_least_min_index_times():
    source = ExampleSource(random=random.Random(),
                           strategy=StrategyTable.default().strategy(int),
                           storage=None,
                           min_tries=5)
    i = 0
    for x in source:
        i += 1
        if i > 500:
            break
        if i % 2:
            source.mark_bad()
    # The last index may not have been fully populated
    assert all(c >= 5 for c in source.counts[:-1])
Exemplo n.º 4
0
def test_example_source_needs_random():
    with pytest.raises(ValueError):
        ExampleSource(
            random=None,
            strategy=StrategyTable.default().strategy(int),
            storage=None,
        )
Exemplo n.º 5
0
def test_marking_negative_avoids_similar_examples():
    source = ExampleSource(
        random=random.Random(),
        strategy=StrategyTable.default().strategy(int),
        storage=None,
    )
    positive = 0
    i = 0
    for example in source:
        if example >= 0:
            positive += 1
        else:
            source.mark_bad()
        i += 1
        if i >= N_EXAMPLES:
            break
    assert float(positive) / N_EXAMPLES >= 0.8
Exemplo n.º 6
0
def test_example_source_terminates_if_just_from_db():
    db = ExampleDatabase()
    storage = db.storage_for(int)
    storage.save(1)
    source = ExampleSource(random=random.Random(),
                           storage=storage,
                           strategy=None)
    its = iter(source)
    assert next(its) == 1
    with pytest.raises(StopIteration):
        next(its)
Exemplo n.º 7
0
def test_negative_is_not_too_far_off_mean():
    source = ExampleSource(
        random=random.Random(),
        strategy=StrategyTable.default().strategy(int),
        storage=None,
    )
    positive = 0
    i = 0
    for example in source:
        if example >= 0:
            positive += 1
        i += 1
        if i >= N_EXAMPLES:
            break
    assert 0.3 <= float(positive) / N_EXAMPLES <= 0.7
Exemplo n.º 8
0
def test_example_source_needs_at_least_one_useful_argument():
    with pytest.raises(ValueError):
        ExampleSource(random=random.Random(), storage=None, strategy=None)
Exemplo n.º 9
0
    def falsify(self, hypothesis, *argument_types):  # pylint: disable=too-many-locals,too-many-branches
        """
        Attempt to construct an example tuple x matching argument_types such
        that hypothesis(*x) returns a falsey value or throws an AssertionError
        """
        random = self.random
        if random is None:
            random = Random(function_digest(hypothesis))

        search_strategy = (
            self.strategy_table.specification_for(argument_types))
        storage = None
        if self.database is not None:
            try:
                storage = self.database.storage_for(argument_types)
            except NotSerializeable:
                pass

        def falsifies(args):  # pylint: disable=missing-docstring
            try:
                return not hypothesis(*search_strategy.copy(args))
            except AssertionError:
                return True
            except UnsatisfiedAssumption:
                return False

        track_seen = Tracker()
        falsifying_examples = []
        examples_found = 0
        satisfying_examples = 0
        timed_out = False
        if argument_types:
            max_examples = self.max_examples
            min_satisfying_examples = self.min_satisfying_examples
        else:
            max_examples = 1
            min_satisfying_examples = 1

        example_source = ExampleSource(random=random,
                                       strategy=search_strategy,
                                       storage=storage,
                                       min_parameters=max(
                                           2, int(float(max_examples) / 10)))
        start_time = time.time()

        def time_to_call_it_a_day():
            """Have we exceeded our timeout?"""
            return time.time() >= start_time + self.timeout

        skipped_examples = 0
        examples_seen = 0
        # At present this loop will never exit normally . This needs proper
        # testing when "database only" mode becomes available but right now
        # it's not.
        for args in example_source:  # pragma: no branch
            assert search_strategy.could_have_produced(args)

            if falsifying_examples:
                break
            if examples_seen >= max_examples:
                break
            if time_to_call_it_a_day():
                break
            examples_seen += 1

            if track_seen.track(args) > 1:
                example_source.mark_bad()
                skipped_examples += 1
                if skipped_examples >= self.max_skipped_examples:
                    raise Exhausted(hypothesis, examples_found)
                else:
                    # This really is covered. I suspect a bug in coverage that
                    # I have not yet narrowed down. It is impossible to execute
                    # the other branch without first executing this one and
                    # there is a test that cannot pass without executing the
                    # other branch.
                    continue  # pragma: no cover
            else:
                skipped_examples = 0
            examples_found += 1
            try:
                is_falsifying_example = not hypothesis(
                    *search_strategy.copy(args))
            except AssertionError:
                is_falsifying_example = True
            except UnsatisfiedAssumption:
                example_source.mark_bad()
                continue
            satisfying_examples += 1
            if is_falsifying_example:
                falsifying_examples.append(args)
        run_time = time.time() - start_time
        timed_out = run_time >= self.timeout

        if not falsifying_examples:
            if satisfying_examples < min_satisfying_examples:
                raise Unsatisfiable(hypothesis, satisfying_examples, run_time)
            elif timed_out:
                raise Timeout(hypothesis, satisfying_examples, run_time)
            else:
                raise Unfalsifiable(hypothesis)

        for example in falsifying_examples:
            if not falsifies(example):
                raise Flaky(hypothesis, example)

        best_example = falsifying_examples[0]

        for simpler in search_strategy.simplify_such_that(
                best_example, falsifies):
            best_example = simpler
            if time_to_call_it_a_day():
                break

        if storage is not None:
            storage.save(best_example)

        return best_example