示例#1
0
def test_flaky_plugin_raises_errors_in_fixture_setup(
        flaky_test,
        flaky_plugin,
        string_io,
        mock_io,
):
    """
    Test for Issue #57 - fixtures which raise an error should show up as
    test errors.

    This test ensures that exceptions occurring when running a test
    fixture are copied into the call info's excinfo field.
    """
    def error_raising_setup_function(item):
        assert item is flaky_test
        item.ran_setup = True
        return 5 / 0

    flaky()(flaky_test)
    flaky_test.ihook = Mock()
    flaky_test.ihook.pytest_runtest_setup = error_raising_setup_function
    flaky_plugin._call_infos[flaky_test] = {}  # pylint:disable=protected-access
    call_info = flaky_plugin.call_runtest_hook(flaky_test, 'setup')
    assert flaky_test.ran_setup
    assert string_io.getvalue() == mock_io.getvalue()
    assert call_info.excinfo.type is ZeroDivisionError
def test_flaky_plugin_raises_errors_in_fixture_setup(
    flaky_test,
    flaky_plugin,
    string_io,
    mock_io,
):
    """
    Test for Issue #57 - fixtures which raise an error should show up as
    test errors.

    This test ensures that exceptions occurring when running a test
    fixture are copied into the call info's excinfo field.
    """
    def error_raising_setup_function(item):
        assert item is flaky_test
        item.ran_setup = True
        return 5 / 0

    flaky()(flaky_test)
    flaky_test.ihook = Mock()
    flaky_test.ihook.pytest_runtest_setup = error_raising_setup_function
    flaky_plugin._call_infos[flaky_test] = {}  # pylint:disable=protected-access
    call_info = flaky_plugin.call_runtest_hook(flaky_test, 'setup')
    assert flaky_test.ran_setup
    assert string_io.getvalue() == mock_io.getvalue()
    assert call_info.excinfo.type is ZeroDivisionError
示例#3
0
    def _test_flaky_plugin_handles_success(
        self,
        test,
        plugin,
        info,
        stream,
        mock_stream,
        current_passes=0,
        current_runs=0,
        max_runs=2,
        min_passes=1,
    ):
        flaky(max_runs, min_passes)(test)
        setattr(
            test,
            FlakyNames.CURRENT_PASSES,
            current_passes,
        )
        setattr(
            test,
            FlakyNames.CURRENT_RUNS,
            current_runs,
        )

        too_few_passes = current_passes + 1 < min_passes
        retries_remaining = current_runs + 1 < max_runs
        expected_plugin_handles_success = too_few_passes and retries_remaining

        info.when = 'call'
        actual_plugin_handles_success = plugin.add_success(
            info,
            test,
        )

        assert expected_plugin_handles_success == actual_plugin_handles_success
        self._assert_flaky_attributes_contains(
            {
                FlakyNames.CURRENT_PASSES: current_passes + 1,
                FlakyNames.CURRENT_RUNS: current_runs + 1,
            },
            test,
        )
        stream.writelines([
            self._test_method_name,
            " passed {0} out of the required {1} times. ".format(
                current_passes + 1, min_passes,
            ),
        ])
        if expected_plugin_handles_success:
            stream.write(
                'Running test again until it passes {0} times.\n'.format(
                    min_passes,
                ),
            )
        else:
            stream.write('Success!\n')
        assert stream.getvalue() == mock_stream.getvalue()
    def _test_flaky_plugin_handles_success(
        self,
        test,
        plugin,
        info,
        stream,
        mock_stream,
        current_passes=0,
        current_runs=0,
        max_runs=2,
        min_passes=1,
    ):
        flaky(max_runs, min_passes)(test)
        setattr(
            test,
            FlakyNames.CURRENT_PASSES,
            current_passes,
        )
        setattr(
            test,
            FlakyNames.CURRENT_RUNS,
            current_runs,
        )

        too_few_passes = current_passes + 1 < min_passes
        retries_remaining = current_runs + 1 < max_runs
        expected_plugin_handles_success = too_few_passes and retries_remaining

        info.when = 'call'
        actual_plugin_handles_success = plugin.add_success(
            info,
            test,
        )

        assert expected_plugin_handles_success == actual_plugin_handles_success
        self._assert_flaky_attributes_contains(
            {
                FlakyNames.CURRENT_PASSES: current_passes + 1,
                FlakyNames.CURRENT_RUNS: current_runs + 1,
            },
            test,
        )
        stream.writelines([
            self._test_method_name,
            " passed {0} out of the required {1} times. ".format(
                current_passes + 1,
                min_passes,
            ),
        ])
        if expected_plugin_handles_success:
            stream.write(
                'Running test again until it passes {0} times.\n'.format(
                    min_passes, ), )
        else:
            stream.write('Success!\n')
        assert stream.getvalue() == mock_stream.getvalue()
def test_flaky_plugin_can_suppress_success_report(
    flaky_test,
    flaky_plugin,
    call_info,
    string_io,
    mock_io,
):
    flaky()(flaky_test)
    # pylint:disable=protected-access
    flaky_plugin._flaky_success_report = False
    # pylint:enable=protected-access
    call_info.when = 'call'
    actual_plugin_handles_success = flaky_plugin.add_success(flaky_test)

    assert actual_plugin_handles_success is False
    assert string_io.getvalue() == mock_io.getvalue()
示例#6
0
def test_flaky_plugin_can_suppress_success_report(
    flaky_test,
    flaky_plugin,
    call_info,
    string_io,
    mock_io,
):
    flaky()(flaky_test)
    # pylint:disable=protected-access
    flaky_plugin._flaky_success_report = False
    # pylint:enable=protected-access
    call_info.when = 'call'
    actual_plugin_handles_success = flaky_plugin.add_success(flaky_test)

    assert actual_plugin_handles_success is False
    assert string_io.getvalue() == mock_io.getvalue()
示例#7
0
def make_iter_test(is_flaky=False, **kwargs):
    def itest(self):
        if is_flaky:
            self.sleep(1.0)
        self.run_iteration(**kwargs)
    if is_flaky:
        itest = flaky.flaky(max_runs=3)(itest)
    return itest
    def test_flaky_plugin_exits_after_false_rerun_filter(
        self,
        flaky_test,
        flaky_plugin,
        call_info,
        string_io,
        mock_io,
        mock_error,
        mock_plugin_rerun,
    ):
        err_tuple = (mock_error.type, mock_error.value, mock_error.traceback)

        def rerun_filter(err, name, test, plugin):
            assert err == err_tuple
            assert name == flaky_test.name
            assert test is flaky_test
            assert plugin is flaky_plugin
            return False

        flaky(rerun_filter=rerun_filter)(flaky_test)
        call_info.when = 'call'

        actual_plugin_handles_failure = flaky_plugin.add_failure(
            call_info,
            flaky_test,
            mock_error,
        )
        assert actual_plugin_handles_failure is False
        assert not mock_plugin_rerun()

        string_io.writelines([
            self._test_method_name,
            ' failed and was not selected for rerun.',
            '\n\t',
            unicode_type(mock_error.type),
            '\n\t',
            unicode_type(mock_error.value),
            '\n\t',
            unicode_type(mock_error.traceback),
            '\n',
        ])
        assert string_io.getvalue() == mock_io.getvalue()
    def test_flaky_plugin_exits_after_false_rerun_filter(
            self,
            flaky_test,
            flaky_plugin,
            call_info,
            string_io,
            mock_io,
            mock_error,
            mock_plugin_rerun,
    ):
        err_tuple = (mock_error.type, mock_error.value, mock_error.traceback)

        def rerun_filter(err, name, test, plugin):
            assert err == err_tuple
            assert name == flaky_test.name
            assert test is flaky_test
            assert plugin is flaky_plugin
            return False

        flaky(rerun_filter=rerun_filter)(flaky_test)
        call_info.when = 'call'

        actual_plugin_handles_failure = flaky_plugin.add_failure(
            call_info,
            flaky_test,
            mock_error,
        )
        assert actual_plugin_handles_failure is False
        assert not mock_plugin_rerun()

        string_io.writelines([
            self._test_method_name,
            ' failed and was not selected for rerun.',
            '\n\t',
            unicode_type(mock_error.type),
            '\n\t',
            unicode_type(mock_error.value),
            '\n\t',
            unicode_type(mock_error.traceback),
            '\n',
        ])
        assert string_io.getvalue() == mock_io.getvalue()
示例#10
0
def flaky_slow(test=None,
               max_runs=5,
               min_passes=1,
               rerun_filter=lambda *a: True):
    """A flaky test decorator that waits between reruns

    Use for tests that depend on eventual database consistency.
    """
    from flaky import flaky

    def rerun(*args):
        sleep(0.5)
        return rerun_filter(*args)

    deco = flaky(max_runs=max_runs, min_passes=min_passes, rerun_filter=rerun)
    return deco if test is None else deco(test)
示例#11
0
    def _test_flaky_plugin_handles_failure(
        self,
        test,
        plugin,
        info,
        stream,
        mock_stream,
        mock_error,
        current_errors=None,
        current_passes=0,
        current_runs=0,
        max_runs=2,
        min_passes=1,
    ):
        flaky(max_runs, min_passes)(test)
        if current_errors is None:
            current_errors = [None]
        else:
            current_errors.append(None)
        setattr(
            test,
            FlakyNames.CURRENT_ERRORS,
            current_errors,
        )
        setattr(
            test,
            FlakyNames.CURRENT_PASSES,
            current_passes,
        )
        setattr(
            test,
            FlakyNames.CURRENT_RUNS,
            current_runs,
        )

        too_few_passes = current_passes < min_passes
        retries_remaining = current_runs + 1 < max_runs
        expected_plugin_handles_failure = too_few_passes and retries_remaining

        info.when = 'call'
        actual_plugin_handles_failure = plugin.add_failure(
            info,
            test,
            mock_error,
        )

        assert expected_plugin_handles_failure == actual_plugin_handles_failure
        self._assert_flaky_attributes_contains(
            {
                FlakyNames.CURRENT_RUNS: current_runs + 1,
                FlakyNames.CURRENT_ERRORS: current_errors
            },
            test,
        )
        if expected_plugin_handles_failure:
            stream.writelines([
                self._test_method_name,
                ' failed ({0} runs remaining out of {1}).'.format(
                    max_runs - current_runs - 1, max_runs
                ),
                '\n\t',
                unicode_type(mock_error.type),
                '\n\t',
                unicode_type(mock_error.value),
                '\n\t',
                unicode_type(mock_error.traceback),
                '\n',
            ])
        else:
            message = ' failed; it passed {0} out of the required {1} times.'
            stream.writelines([
                self._test_method_name,
                message.format(
                    current_passes,
                    min_passes
                ),
                '\n\t',
                unicode_type(mock_error.type),
                '\n\t',
                unicode_type(mock_error.value),
                '\n\t',
                unicode_type(mock_error.traceback),
                '\n',
            ])
        assert stream.getvalue() == mock_stream.getvalue()
示例#12
0
import pytest
from flaky import flaky
import random
"""
By default, flaky will retry a failing test once, but that behavior can be
overridden by passing values to the flaky decorator. It accepts two
parameters: max_runs, and min_passes; flaky will run tests up to max_runs
times, until it has succeeded min_passes times. Once a test passes min_passes
times, it’s considered a success; once it has been run max_runs times without
passing min_passes times, it’s considered a failure.
"""

flaky = flaky(max_runs=5, min_passes=1)


@flaky
@pytest.mark.parametrize('i', range(10))
def test_flaky_1(i):
    assert random.randint(1, 10) >= 5


#conditional decorator


def conditional_decorator(dec, condition):
    def decorator(func):
        if not condition:
            # Return the function unchanged, not decorated.
            return func
        return dec(func)
示例#13
0
def mark_flaky(f):
    """Makes a test retry on remote service errors."""
    return flaky(max_runs=3, rerun_filter=flaky_filter)(
        pytest.mark.flaky(f))
示例#14
0
def mark_flaky(f):
    return flaky(max_runs=3, rerun_filter=flaky_filter)(
        attr('flaky')(f))
    def _test_flaky_plugin_handles_failure(
        self,
        test,
        plugin,
        info,
        stream,
        mock_stream,
        mock_error,
        current_errors=None,
        current_passes=0,
        current_runs=0,
        max_runs=2,
        min_passes=1,
        rerun_filter=None,
    ):
        flaky(max_runs, min_passes, rerun_filter)(test)
        if current_errors is None:
            current_errors = [None]
        else:
            current_errors.append(None)
        setattr(
            test,
            FlakyNames.CURRENT_ERRORS,
            current_errors,
        )
        setattr(
            test,
            FlakyNames.CURRENT_PASSES,
            current_passes,
        )
        setattr(
            test,
            FlakyNames.CURRENT_RUNS,
            current_runs,
        )

        too_few_passes = current_passes < min_passes
        retries_remaining = current_runs + 1 < max_runs
        expected_plugin_handles_failure = too_few_passes and retries_remaining

        info.when = 'call'
        actual_plugin_handles_failure = plugin.add_failure(
            info,
            test,
            mock_error,
        )

        assert expected_plugin_handles_failure == actual_plugin_handles_failure
        self._assert_flaky_attributes_contains(
            {
                FlakyNames.CURRENT_RUNS: current_runs + 1,
                FlakyNames.CURRENT_ERRORS: current_errors
            },
            test,
        )
        if expected_plugin_handles_failure:
            stream.writelines([
                self._test_method_name,
                ' failed ({0} runs remaining out of {1}).'.format(
                    max_runs - current_runs - 1, max_runs),
                '\n\t',
                unicode_type(mock_error.type),
                '\n\t',
                unicode_type(mock_error.value),
                '\n\t',
                unicode_type(mock_error.traceback),
                '\n',
            ])
        else:
            message = ' failed; it passed {0} out of the required {1} times.'
            stream.writelines([
                self._test_method_name,
                message.format(current_passes, min_passes),
                '\n\t',
                unicode_type(mock_error.type),
                '\n\t',
                unicode_type(mock_error.value),
                '\n\t',
                unicode_type(mock_error.traceback),
                '\n',
            ])
        assert stream.getvalue() == mock_stream.getvalue()
示例#16
0
 def __call__(self, func):
     return flaky(**self.kwargs)(func)
示例#17
0
                  for _ in range(num_benchmarks))
    }
    assert len(random_benchmarks) == num_benchmarks


def test_csmith_from_seed_retry_count_exceeded(csmith_dataset: CsmithDataset):
    with pytest.raises(OSError,
                       match="Csmith failed after 5 attempts with seed 1"):
        csmith_dataset.benchmark_from_seed(seed=1,
                                           max_retries=3,
                                           retry_count=5)


csmith_runtime_flaky = flaky(
    max_runs=5,
    rerun_filter=lambda err, *args: issubclass(err[0], ServiceError) or
    isinstance(err[0], TimeoutError),
)


@csmith_runtime_flaky
def test_csmith_positive_runtimes(env: LlvmEnv, csmith_dataset: CsmithDataset):
    benchmark = next(csmith_dataset.benchmarks())
    env.reset(benchmark=benchmark)
    val = env.observation["Runtime"]
    print(val.tolist())
    assert np.all(np.greater(val, 0))


@csmith_runtime_flaky
def test_csmith_positive_buildtimes(env: LlvmEnv,
from decimal import Decimal
from fractions import Fraction

import pytest
from flaky import flaky

from hypothesis import find, given, assume, example, settings
from tests.common import parametrize, ordered_pair, constant_list
from hypothesis.strategies import just, sets, text, lists, binary, \
    floats, tuples, randoms, booleans, decimals, integers, fractions, \
    recursive, frozensets, dictionaries, sampled_from, random_module
from hypothesis.internal.debug import minimal
from hypothesis.internal.compat import PY3, hrange, reduce, Counter, \
    OrderedDict, integer_types

slightly_flaky = flaky(min_passes=1, max_runs=3)


@slightly_flaky
def test_minimize_list_on_large_structure():
    def test_list_in_range(xs):
        return len([
            x for x in xs
            if x >= 10
        ]) >= 60

    assert minimal(
        lists(integers(), min_size=60, average_size=120), test_list_in_range,
        timeout_after=30,
    ) == [10] * 60
示例#19
0
def mark_flaky(f):
    return flaky(max_runs=3, rerun_filter=flaky_filter)(attr('flaky')(f))