예제 #1
0
    def wrapper(f):
        assert failure_source in valid_failure_sources
        assert isinstance(flaky, bool)

        tagged_func = attr(known_failure=failure_source, jira_url=jira_url)(f)
        if flaky:
            tagged_func = attr('known_flaky')(tagged_func)
        if notes:
            tagged_func = attr(failure_notes=notes)(tagged_func)
        return tagged_func
예제 #2
0
    def wrapper(f):
        assert failure_source in valid_failure_sources
        assert isinstance(flaky, bool)

        tagged_func = attr(known_failure=failure_source, jira_url=jira_url)(f)
        if flaky:
            tagged_func = attr("known_flaky")(tagged_func)
        if notes:
            tagged_func = attr(failure_notes=notes)(tagged_func)
        return tagged_func
예제 #3
0
    def wrapper(f):
        assert_in(failure_source, valid_failure_sources)
        assert_is_instance(flaky, bool)

        tagged_func = attr(known_failure=failure_source,
                           jira_url=jira_url)(f)
        if flaky:
            tagged_func = attr('known_flaky')(tagged_func)

        tagged_func = attr(failure_notes=notes)(tagged_func)
        return tagged_func
예제 #4
0
def use_vcr(func=None, **kwargs):
    """
    Decorator for test functions which go online. A vcr cassette will automatically be created and used to capture and
    play back online interactions. The nose 'vcr' attribute will be set, and the nose 'online' attribute will be set on
    it based on whether it might go online.

    The record mode of VCR can be set using the VCR_RECORD_MODE environment variable when running tests. Depending on
    the record mode, and the existence of an already recorded cassette, this decorator will also dynamically set the
    nose 'online' attribute.

    Keyword arguments to :func:`vcr.VCR.use_cassette` can be supplied.
    """
    if func is None:
        # When called with kwargs, e.g. @use_vcr(inject_cassette=True)
        return functools.partial(use_vcr, **kwargs)
    module = func.__module__.split('tests.')[-1]
    class_name = inspect.stack()[1][3]
    cassette_name = '.'.join([module, class_name, func.__name__])
    kwargs.setdefault('path', cassette_name)
    cassette_path = os.path.join(VCR_CASSETTE_DIR, cassette_name)
    online = True
    # Set our nose online attribute based on the VCR record mode
    if vcr.record_mode == 'none':
        online = False
    elif vcr.record_mode == 'once':
        online = not os.path.exists(cassette_path)
    func = attr(online=online, vcr=True)(func)
    # If we are not going online, disable domain delay during test
    if not online:
        func = mock.patch('flexget.utils.requests.wait_for_domain', new=mock.MagicMock())(func)
    if VCR_RECORD_MODE == 'off':
        return func
    else:
        return vcr.use_cassette(**kwargs)(func)
예제 #5
0
def require(require_pattern, broken_in=None):
    """Skips the decorated class or method, unless the argument
    'require_pattern' is a case-insensitive regex match for the name of the git
    branch in the directory from which Cassandra is running. For example, the
    method defined here:

        @require('compaction-fixes')
        def compaction_test(self):
            ...

    will run if Cassandra is running from a directory whose current git branch
    is named 'compaction-fixes'. If 'require_pattern' were
    '.*compaction-fixes.*', it would run only when Cassandra is being run from a
    branch whose name contains 'compaction-fixes'.

    To accommodate current branch-naming conventions, it also will run if the
    current Cassandra branch matches 'CASSANDRA-{require_pattern}'. This allows
    users to run tests like:

        @require(4200)
        class TestNewFeature(self):
            ...

    on branches named 'CASSANDRA-4200'.

    If neither 'require_pattern' nor 'CASSANDRA-{require_pattern}' is a
    case-insensitive match for the name of Cassandra's current git branch, the
    test function or class will be skipped with unittest.skip.

    To run decorated methods as if they were not decorated with @require, set
    the environment variable IGNORE_REQUIRE to 'yes' or 'true'. To only run
    methods decorated with require, set IGNORE_REQUIRE to 'yes' or 'true' and
    run `nosetests` with `-a required`. (This uses the built-in `attrib`
    plugin.)
    """
    tagging_decorator = attr('required')
    if IGNORE_REQUIRE:
        return tagging_decorator
    require_pattern = str(require_pattern)
    git_branch = ''
    git_branch = cassandra_git_branch()

    if git_branch:
        git_branch = git_branch.lower()
        run_on_branch_patterns = (require_pattern, 'cassandra-{b}'.format(b=require_pattern))
        # always run the test if the git branch name matches
        if any(re.match(p, git_branch, re.IGNORECASE) for p in run_on_branch_patterns):
            return tagging_decorator
        # if skipping a buggy/flapping test, use since
        elif broken_in:
            def tag_and_skip_after_version(decorated):
                return since('0', broken_in)(tagging_decorator(decorated))
            return tag_and_skip_after_version
        # otherwise, skip with a message
        else:
            def tag_and_skip(decorated):
                return unittest.skip('require ' + str(require_pattern))(tagging_decorator(decorated))
            return tag_and_skip
    else:
        return tagging_decorator
예제 #6
0
def require(require_pattern, broken_in=None):
    """Skips the decorated class or method, unless the argument
    'require_pattern' is a case-insensitive regex match for the name of the git
    branch in the directory from which Cassandra is running. For example, the
    method defined here:

        @require('compaction-fixes')
        def compaction_test(self):
            ...

    will run if Cassandra is running from a directory whose current git branch
    is named 'compaction-fixes'. If 'require_pattern' were
    '.*compaction-fixes.*', it would run only when Cassandra is being run from a
    branch whose name contains 'compaction-fixes'.

    To accommodate current branch-naming conventions, it also will run if the
    current Cassandra branch matches 'CASSANDRA-{require_pattern}'. This allows
    users to run tests like:

        @require(4200)
        class TestNewFeature(self):
            ...

    on branches named 'CASSANDRA-4200'.

    If neither 'require_pattern' nor 'CASSANDRA-{require_pattern}' is a
    case-insensitive match for the name of Cassandra's current git branch, the
    test function or class will be skipped with unittest.skip.

    To run decorated methods as if they were not decorated with @require, set
    the environment variable IGNORE_REQUIRE to 'yes' or 'true'. To only run
    methods decorated with require, set IGNORE_REQUIRE to 'yes' or 'true' and
    run `nosetests` with `-a required`. (This uses the built-in `attrib`
    plugin.)
    """
    tagging_decorator = attr('required')
    if IGNORE_REQUIRE:
        return tagging_decorator
    require_pattern = str(require_pattern)
    git_branch = ''
    git_branch = cassandra_git_branch()

    if git_branch:
        git_branch = git_branch.lower()
        run_on_branch_patterns = (require_pattern, 'cassandra-{b}'.format(b=require_pattern))
        # always run the test if the git branch name matches
        if any(re.match(p, git_branch, re.IGNORECASE) for p in run_on_branch_patterns):
            return tagging_decorator
        # if skipping a buggy/flapping test, use since
        elif broken_in:
            def tag_and_skip_after_version(decorated):
                return since('0', broken_in)(tagging_decorator(decorated))
            return tag_and_skip_after_version
        # otherwise, skip with a message
        else:
            def tag_and_skip(decorated):
                return unittest.skip('require ' + str(require_pattern))(tagging_decorator(decorated))
            return tag_and_skip
    else:
        return tagging_decorator
def travis_only(func):
    @functools.wraps(func)
    def run_test(*args, **kwargs):
        if not travis:
            raise SkipTest('Tunnel tests are run in travis-ci only.')
        func(*args, **kwargs)
    return attr('travis_only')(run_test)
예제 #8
0
def use_vcr(func=None, **kwargs):
    """
    Decorator for test functions which go online. A vcr cassette will automatically be created and used to capture and
    play back online interactions. The nose 'vcr' attribute will be set, and the nose 'online' attribute will be set on
    it based on whether it might go online.

    The record mode of VCR can be set using the VCR_RECORD_MODE environment variable when running tests. Depending on
    the record mode, and the existence of an already recorded cassette, this decorator will also dynamically set the
    nose 'online' attribute.

    Keyword arguments to :func:`vcr.VCR.use_cassette` can be supplied.
    """
    if func is None:
        # When called with kwargs, e.g. @use_vcr(inject_cassette=True)
        return functools.partial(use_vcr, **kwargs)
    module = func.__module__.split('tests.')[-1]
    class_name = inspect.stack()[1][3]
    cassette_name = '.'.join([module, class_name, func.__name__])
    kwargs.setdefault('path', cassette_name)
    cassette_path = os.path.join(VCR_CASSETTE_DIR, cassette_name)
    online = True
    # Set our nose online attribute based on the VCR record mode
    if vcr.record_mode == 'none':
        online = False
    elif vcr.record_mode == 'once':
        online = not os.path.exists(cassette_path)
    func = attr(online=online, vcr=True)(func)
    # If we are not going online, disable domain delay during test
    if not online:
        func = mock.patch('flexget.utils.requests.wait_for_domain', new=mock.MagicMock())(func)

    if VCR_RECORD_MODE == 'off':
        return func
    else:
        return vcr.use_cassette(**kwargs)(func)
예제 #9
0
def test_values():
    # @attr(mood="hohum", colors=['red','blue'])
    def test():
        pass
    test = attr(mood="hohum", colors=['red','blue'])(test)
    
    eq_(test.mood, "hohum")
    eq_(test.colors, ['red','blue'])
def test_values():
    # @attr(mood="hohum", colors=['red','blue'])
    def test():
        pass
    test = attr(mood="hohum", colors=['red','blue'])(test)
    
    eq_(test.mood, "hohum")
    eq_(test.colors, ['red','blue'])
def test_flags():
    # @attr('one','two')
    def test():
        pass
    test = attr('one','two')(test)
    
    eq_(test.one, 1)
    eq_(test.two, 1)
예제 #12
0
def test_flags():
    # @attr('one','two')
    def test():
        pass
    test = attr('one','two')(test)
    
    eq_(test.one, 1)
    eq_(test.two, 1)
예제 #13
0
파일: testclass.py 프로젝트: kiawin/aloe
    def make_steps(cls, step_container, steps, is_background, outline=None):
        """
        Construct either a scenario or a background calling the specified
        steps.

        The method will have debugging information corresponding to the lines
        in the feature file.
        """

        assert len(steps) > 0

        step_definitions = [cls.prepare_step(step) for step in steps]

        source = 'def run_steps(self):\n'
        if not is_background:
            source += '    self.background()\n'
        source += '\n'.join(
            '    func{i}(step{i}, *args{i}, **kwargs{i})'.format(i=i)
            for i in range(len(step_definitions)))
        source = ast.parse(source)

        # Set locations of the steps
        for step, step_call in zip(steps, source.body[0].body[1:]):
            for node in ast.walk(step_call):
                node.lineno = step.described_at.line

        # Supply all the step functions and arguments
        context = {
            k + str(i): v
            for i, definition in enumerate(step_definitions)
            for k, v in definition.items()
        }

        if is_background:
            func_name = 'background'
        else:
            func_name = step_container.name

        run_steps = make_function(
            source=source,
            context=context,
            source_file=step_container.described_at.file,
            name=func_name,
        )

        try:
            tags = step_container.tags
        except AttributeError:
            tags = ()

        for tag in tags:
            run_steps = attr(tag)(run_steps)

        if not is_background:
            run_steps = CALLBACK_REGISTRY.wrap('example', run_steps,
                                               step_container, outline, steps)

        return run_steps
예제 #14
0
def _test_notebooks_in_path(root_path):
    with TemporaryDirectory() as tmp:
        tmp_path = Path(tmp)
        for nb_path in notebooks_in_path(root_path):
            rel_path = nb_path.relative_to(root_path)
            workdir = tmp_path / rel_path.parent
            workdir.mkdir(parents=True, exist_ok=True)
            description = "Running notebook {}".format(rel_path)
            yield attr(description=description)(run_notebook), nb_path, workdir
예제 #15
0
def wip(f):
    @wraps(f)
    def run_test(*args, **kwargs):
        try:
            f(*args, **kwargs)
        except Exception as e:
            raise SkipTest("WIP test failed: " + str(e))
        fail("test passed but marked as work in progress")
    return attr('wip')(run_test)
def test_mixed():
    # @attr('slow', 'net', role='integration')
    def test():
        pass
    test = attr('slow', 'net', role='integration')(test)
    
    eq_(test.slow, 1)
    eq_(test.net, 1)
    eq_(test.role, 'integration')
def wip(f):
    @wraps(f)
    def run_test(*args, **kwargs):
        try:
            f(*args, **kwargs)
        except Exception as e:
            raise SkipTest("WIP test failed: " + str(e))
        raise AssertionError("Passing test marked as WIP")
    return attr('wip')(run_test)
예제 #18
0
파일: testing.py 프로젝트: mwilliamson/nope
def wip(func):
    @functools.wraps(func)
    def run_test(*args, **kwargs):
        try:
            func(*args, **kwargs)
        except Exception as e:
            raise SkipTest("WIP test failed: " + str(e))
        assert False, "test passed but marked as work in progress"
    return attr('wip')(run_test)
예제 #19
0
def wip(f):
    @wraps(f)
    def run_test(*args, **kwargs):
        try:
            f(*args, **kwargs)
        except Exception as e:
            raise SkipTest("WIP test failed: " + str(e))
        raise AssertionError("Passing test marked as WIP")
    return attr('wip')(run_test)
예제 #20
0
def integration(f):
    @wraps(f)
    def run_test(*args, **kwargs):
        integration_run = (os.getenv('INTEGRATION', None) is not None)
        if integration_run:
            f(*args, **kwargs)
        else:
            raise SkipTest("Skipping integration test")
    return attr('integration')(run_test)
예제 #21
0
def wip(f):
    @wraps(f)
    def run_test(*args, **kwargs):
        try:
            f(*args, **kwargs)
        except Exception as e:
            raise SkipTest("WIP test failed: " + str(e))
        fail("test passed but marked as work in progress")

    return attr('wip')(run_test)
def integration(f):
    @wraps(f)
    def run_test(*args, **kwargs):
        integration_run = (os.getenv('INTEGRATION', None) is not None)
        if integration_run:
            f(*args, **kwargs)
        else:
            raise SkipTest("Skipping integration test")

    return attr('integration')(run_test)
예제 #23
0
def wip(f):
    @wraps(f)
    def run_test(*args, **kwargs):
        try:
            # Set to success so we don't save out the output when we know things
            # are awry
            args[0].success = True
            f(*args, **kwargs)
        except Exception as e:
            raise SkipTest("WIP test failed: " + str(e))
        fail("test passed but marked as work in progress")

    return attr('wip')(run_test)
예제 #24
0
    def make_example(cls, method, scenario, index):
        """
        Set the method attributes to associate it with given scenario and index.
        """

        method.is_example = True
        method.scenario = scenario
        method.scenario_index = index

        for tag in scenario.tags:
            method = attr(tag)(method)

        return method
예제 #25
0
def wip(f):
    @wraps(f)
    def run_test(*args, **kwargs):
        try:
            # Set to success so we don't save out the output when we know things
            # are awry
            args[0].success = True
            f(*args, **kwargs)
        except Exception as e:
            raise SkipTest("WIP test failed: " + str(e))
        fail("test passed but marked as work in progress")
    
    return attr('wip')(run_test)
예제 #26
0
def wip(f):
  """
  Use this as a decorator to mark tests that are "works in progress"
  """
  @wraps(f)
  def run_test(*args, **kwargs):
    try:
      f(*args, **kwargs)
    except Exception as e:
      raise SkipTest("WIP test failed: " + str(e))
    fail("test passed but marked as work in progress")

  return attr('wip')(run_test)
예제 #27
0
    def make_example(cls, method, scenario, index):
        """
        Set the method attributes to associate it with given scenario and index.
        """

        method.is_example = True
        method.scenario = scenario
        method.scenario_index = index

        for tag in scenario.tags:
            method = attr(tag)(method)

        return method
예제 #28
0
def wip(f):
    """
  Use this as a decorator to mark tests that are "works in progress"
  """
    @wraps(f)
    def run_test(*args, **kwargs):
        try:
            f(*args, **kwargs)
        except Exception as e:
            raise SkipTest("WIP test failed: " + str(e))
        fail("test passed but marked as work in progress")

    return attr('wip')(run_test)
예제 #29
0
def wipd(f):
    """
    - "work in progress decorator"
    Custom decorator and flag.

    # Then "nosetests -a wip" can be used at the command line to narrow the execution of the test to
    # the ones marked with @wipd

    Usage:
        @widp
        def test_channelistion(self):
            pass
    """
    return attr('wip')(f)
예제 #30
0
    def wrapper(f):
        assert_in(failure_source, valid_failure_sources)
        assert_is_instance(flaky, bool)

        try:
            existing_failure_annotations = f.known_failure
        except AttributeError:
            existing_failure_annotations = []

        new_annotation = [{'failure_source': failure_source, 'jira_url': jira_url, 'notes': notes, 'flaky': flaky}]

        failure_annotations = existing_failure_annotations + new_annotation

        tagged_func = attr(known_failure=failure_annotations)(f)

        return tagged_func
예제 #31
0
    def make_scenario(cls, scenario, index):
        """
        Construct a method running the scenario steps.

        index is the 1-based number of the scenario in the feature.
        """

        if scenario.outlines:
            source = 'def run_outlines(self):\n' + '\n'.join(
                '    outline{i}(self)'.format(i=i)
                for i in range(len(scenario.outlines))
            )
            source = ast.parse(source)

            # Set locations of the steps
            for outline, outline_call in \
                    zip(scenario.outlines, source.body[0].body):
                for node in ast.walk(outline_call):
                    node.lineno = outline.line

            context = {
                'outline' + str(i): cls.make_steps(scenario,
                                                   steps,
                                                   is_background=False,
                                                   outline=outline)
                for i, (outline, steps) in enumerate(scenario.evaluated)
            }

            result = make_function(
                source=source,
                context=context,
                source_file=scenario.feature.filename,
                name=scenario.name,
            )
        else:
            result = cls.make_steps(scenario,
                                    scenario.steps,
                                    is_background=False)

        result.is_scenario = True
        result.scenario = scenario
        result.scenario_index = index

        for tag in scenario.tags:
            result = attr(tag)(result)

        return result
예제 #32
0
파일: testing.py 프로젝트: thomassa/xenrt
def wip(fn):
    """
    Work-in-progress decorator @wip
    This decorator lets you check tests into version control and not gate a push
    while allowing you to work on making them pass
     - If the test fails it will be skipped
     - If the test passes it will report a failure
    """
    @wraps(fn)
    def run_test(*args, **kwargs):
        try:
            fn(*args, **kwargs)
        except Exception as ex:
            raise SkipTest("WIP FAILURE: %s" % str(ex))
        raise AssertionError("Test passed but is work in progress")

    return attr('wip')(run_test)
예제 #33
0
def wip(fn):
    """
    Work-in-progress decorator @wip
    This decorator lets you check tests into version control and not gate a push
    while allowing you to work on making them pass
     - If the test fails it will be skipped
     - If the test passes it will report a failure
    """
    @wraps(fn)
    def run_test(*args, **kwargs):
        try:
            fn(*args, **kwargs)
        except Exception as ex:
            raise SkipTest( "WIP FAILURE: %s" % str(ex))
        raise AssertionError("Test passed but is work in progress")

    return attr('wip')(run_test)
예제 #34
0
def use_vcr(func):
    """
    Decorator for test functions which go online. A vcr cassette will automatically be created and used to capture and
    play back online interactions. The nose 'vcr' attribute will be set, and the nose 'online' attribute will be set on
    it based on whether it might go online.

    The record mode of VCR can be set using the VCR_RECORD_MODE environment variable when running tests. Depending on
    the record mode, and the existence of an already recorded cassette, this decorator will also dynamically set the
    nose 'online' attribute.
    """
    module = func.__module__.split('tests.')[-1]
    class_name = inspect.stack()[1][3]
    cassette_name = '.'.join([module, class_name, func.__name__])
    cassette_path, _ = vcr.get_path_and_merged_config(cassette_name)
    online = True
    # Set our nose online attribute based on the VCR record mode
    if vcr.record_mode == 'none':
        online = False
    elif vcr.record_mode == 'once':
        online = not os.path.exists(cassette_path)
    func = attr(online=online, vcr=True)(func)
    # If we are not going online, disable domain delay during test
    if not online:
        func = mock.patch('flexget.utils.requests.wait_for_domain',
                          new=mock.MagicMock())(func)
    # VCR playback on windows needs a bit of help https://github.com/kevin1024/vcrpy/issues/116
    if sys.platform.startswith(
            'win') and vcr.record_mode != 'all' and os.path.exists(
                cassette_path):
        func = mock.patch(
            'requests.packages.urllib3.connectionpool.is_connection_dropped',
            new=mock.MagicMock(return_value=False))(func)

    @wraps(func)
    def func_with_cassette(*args, **kwargs):
        with vcr.use_cassette(cassette_name) as cassette:
            try:
                func(*args, cassette=cassette, **kwargs)
            except TypeError:
                func(*args, **kwargs)

    if VCR_RECORD_MODE == 'off':
        return func
    else:
        return func_with_cassette
예제 #35
0
def wip(f):
    """Allows to run a single test using a @wip decorator

    i.e.
    from superdesk.tests import TestCase
    from tests import wip


    class TestWIP(TestCase):
        @wip
        def test_something(self):
            self.assertEqual(1, 1)

    then run with:
    nosetests -a wip
    """

    return attr("wip")(f)
예제 #36
0
def flaky(max_runs=None, min_passes=None):
    """
    Decorator used to mark a test as "flaky". When used in conjuction with
    the flaky nosetests plugin, will cause the decorated test to be retried
    until min_passes successes are achieved out of up to max_runs test runs.
    :param max_runs:
        The maximum number of times the decorated test will be run.
    :type max_runs:
        `int`
    :param min_passes:
        The minimum number of times the test must pass to be a success.
    :type min_passes:
        `int`
    :return:
        A wrapper function that includes attributes describing the flaky test.
    :rtype:
        `callable`
    """
    if max_runs is None:
        max_runs = 2
    if min_passes is None:
        min_passes = 1
    if min_passes <= 0:
        raise ValueError('min_passes must be positive')
    # In case @flaky is applied to a function or class without arguments
    # (and without parentheses), max_runs will refer to the wrapped object.
    # In this case, the default value can be used.
    wrapped = None
    if hasattr(max_runs, '__call__'):
        wrapped = max_runs
        max_runs = 2
    if max_runs < min_passes:
        raise ValueError('min_passes cannot be greater than max_runs!')
    wrapper = attr(
        **{
            FlakyNames.MAX_RUNS: max_runs,
            FlakyNames.MIN_PASSES: min_passes,
            FlakyNames.CURRENT_RUNS: 0,
            FlakyNames.CURRENT_PASSES: 0,
        })
    return wrapper(wrapped) if wrapped is not None else wrapper
def test_class_attrs():
    # @attr('slow', 'net', role='integration')
    class MyTest:
        def setUp():
            pass
        def test_one(self):
            pass
        def test_two(self):
            pass

    class SubClass(MyTest):
        pass

    MyTest = attr('slow', 'net', role='integration')(MyTest)
    eq_(MyTest.slow, 1)
    eq_(MyTest.net, 1)
    eq_(MyTest.role, 'integration')
    eq_(SubClass.slow, 1)

    assert not hasattr(MyTest.setUp, 'slow')
    assert not hasattr(MyTest.test_two, 'slow')
    def wrapper(f):
        assert_in(failure_source, valid_failure_sources)
        assert_is_instance(flaky, bool)

        try:
            existing_failure_annotations = f.known_failure
        except AttributeError:
            existing_failure_annotations = []

        new_annotation = [{
            'failure_source': failure_source,
            'jira_url': jira_url,
            'notes': notes,
            'flaky': flaky
        }]

        failure_annotations = existing_failure_annotations + new_annotation

        tagged_func = attr(known_failure=failure_annotations)(f)

        return tagged_func
예제 #39
0
def test_tutorials():
    with TemporaryDirectory() as tmp:
        tmp_path = Path(tmp)

        # Copy tutorial file resources
        for f_path in _TUTORIAL_FILES:
            src = _TUTORIALS_ROOT / f_path
            dest = tmp_path / f_path
            dest.parent.mkdir(parents=True, exist_ok=True)
            if src.is_dir():
                shutil.copytree(src, dest)
            else:
                shutil.copy(src, dest)

        # Emit a test for each notebook
        for nb_path in notebooks_in_path(_TUTORIALS_ROOT):
            rel_path = nb_path.relative_to(_TUTORIALS_ROOT)
            workdir = tmp_path / rel_path.parent
            workdir.mkdir(parents=True, exist_ok=True)
            description = "Running notebook {}".format(rel_path)
            yield attr(description=description)(run_notebook), nb_path, workdir
예제 #40
0
def flaky(max_runs=None, min_passes=None):
    """
    Decorator used to mark a test as "flaky". When used in conjuction with
    the flaky nosetests plugin, will cause the decorated test to be retried
    until min_passes successes are achieved out of up to max_runs test runs.
    :param max_runs:
        The maximum number of times the decorated test will be run.
    :type max_runs:
        `int`
    :param min_passes:
        The minimum number of times the test must pass to be a success.
    :type min_passes:
        `int`
    :return:
        A wrapper function that includes attributes describing the flaky test.
    :rtype:
        `callable`
    """
    if max_runs is None:
        max_runs = 2
    if min_passes is None:
        min_passes = 1
    if min_passes <= 0:
        raise ValueError('min_passes must be positive')
    # In case @flaky is applied to a function or class without arguments
    # (and without parentheses), max_runs will refer to the wrapped object.
    # In this case, the default value can be used.
    wrapped = None
    if hasattr(max_runs, '__call__'):
        wrapped = max_runs
        max_runs = 2
    if max_runs < min_passes:
        raise ValueError('min_passes cannot be greater than max_runs!')
    wrapper = attr(**{
        FlakyNames.MAX_RUNS: max_runs,
        FlakyNames.MIN_PASSES: min_passes,
        FlakyNames.CURRENT_RUNS: 0,
        FlakyNames.CURRENT_PASSES: 0,
    })
    return wrapper(wrapped) if wrapped is not None else wrapper
예제 #41
0
파일: __init__.py 프로젝트: H1ghT0p/Flexget
def use_vcr(func):
    """
    Decorator for test functions which go online. A vcr cassette will automatically be created and used to capture and
    play back online interactions. The nose 'vcr' attribute will be set, and the nose 'online' attribute will be set on
    it based on whether it might go online.

    The record mode of VCR can be set using the VCR_RECORD_MODE environment variable when running tests. Depending on
    the record mode, and the existence of an already recorded cassette, this decorator will also dynamically set the
    nose 'online' attribute.
    """
    module = func.__module__.split('tests.')[-1]
    class_name = inspect.stack()[1][3]
    cassette_name = '.'.join([module, class_name, func.__name__])
    cassette_path, _ = vcr.get_path_and_merged_config(cassette_name)
    online = True
    # Set our nose online attribute based on the VCR record mode
    if vcr.record_mode == 'none':
        online = False
    elif vcr.record_mode == 'once':
        online = not os.path.exists(cassette_path)
    func = attr(online=online, vcr=True)(func)
    # If we are not going online, disable domain delay during test
    if not online:
        func = mock.patch('flexget.utils.requests.wait_for_domain', new=mock.MagicMock())(func)
    # VCR playback on windows needs a bit of help https://github.com/kevin1024/vcrpy/issues/116
    if sys.platform.startswith('win') and vcr.record_mode != 'all' and os.path.exists(cassette_path):
        func = mock.patch('requests.packages.urllib3.connectionpool.is_connection_dropped',
                          new=mock.MagicMock(return_value=False))(func)
    @wraps(func)
    def func_with_cassette(*args, **kwargs):
        with vcr.use_cassette(cassette_name) as cassette:
            try:
                func(*args, cassette=cassette, **kwargs)
            except TypeError:
                func(*args, **kwargs)

    if VCR_RECORD_MODE == 'off':
        return func
    else:
        return func_with_cassette
예제 #42
0
파일: utils.py 프로젝트: stevenxing/qtile
    def __call__(self, function):
        def setup():
            args = [
                "Xephyr", "-keybd", "evdev", "-name", "qtile_test",
                self.display, "-ac", "-screen",
                "%sx%s" % (self.width, self.height)
            ]
            if self.two_screens:
                args.extend(
                    ["-screen",
                     "%sx%s+800+0" % (SECOND_WIDTH, SECOND_HEIGHT)])

            if self.xinerama:
                args.extend(["+xinerama"])
            if self.randr:
                args.extend(["+extension", "RANDR"])
            self.sub = subprocess.Popen(
                args,
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE,
            )
            time.sleep(0.05)
            self.testwindows = []
            if self.start_qtile:
                self.startQtile(self.config)

        def teardown():
            if self.start_qtile:
                libqtile.hook.clear()
                self.stopQtile()
            os.kill(self.sub.pid, 9)
            os.waitpid(self.sub.pid, 0)

        @wraps(function)
        def wrapped_fun():
            return function(self)

        return attr('xephyr')(with_setup(setup, teardown)(wrapped_fun))
예제 #43
0
파일: utils.py 프로젝트: Cadair/qtile
    def __call__(self, function):
        def setup():
            args = [
                "Xephyr", "-keybd", "evdev",
                "-name", "qtile_test",
                self.display, "-ac",
                "-screen", "%sx%s" % (self.width, self.height)]
            if self.two_screens:
                args.extend(["-screen", "%sx%s+800+0" % (
                    SECOND_WIDTH, SECOND_HEIGHT)])

            if self.xinerama:
                args.extend(["+xinerama"])
            if self.randr:
                args.extend(["+extension", "RANDR"])
            self.sub = subprocess.Popen(
                            args,
                            stdout=subprocess.PIPE,
                            stderr=subprocess.PIPE,
                        )
            time.sleep(0.05)
            self.testwindows = []
            if self.start_qtile:
                self.startQtile(self.config)

        def teardown():
            if self.start_qtile:
                libqtile.hook.clear()
                self.stopQtile()
            os.kill(self.sub.pid, 9)
            os.waitpid(self.sub.pid, 0)

        @wraps(function)
        def wrapped_fun():
            return function(self)

        return attr('xephyr')(with_setup(setup, teardown)(wrapped_fun))
예제 #44
0
파일: utils.py 프로젝트: shots47s/datalad
def slow(f):
    """Mark test as a slow, although not necessarily integration or usecase test
    """
    return attr('slow')(f)
예제 #45
0
파일: utils.py 프로젝트: shots47s/datalad
def integration(f):
    """Mark test as an "integration" test which generally is not needed to be run
    
    Generally tend to be slower
    """
    return attr('integration')(f)
예제 #46
0
파일: attr.py 프로젝트: nihohi0428/chainer
from nose.plugins.attrib import attr

gpu = attr('gpu')
cudnn = attr('gpu', 'cudnn')
예제 #47
0
파일: test_lnorm.py 프로젝트: npinto/sthor
def get_suite(plugin, plugin_kwargs=None, tags=None):

    if plugin_kwargs is None:
        plugin_kwargs = {}

    if tags is None:
        tags = []

    def test_input_d_1_default_remove_mean_threshold_stretch():

        arr_in = np.zeros((20, 30, 1), dtype=DTYPE)
        inker_shape = 5, 5
        arr_out = np.zeros((16, 26, 1), dtype=DTYPE)
        np.random.seed(42)
        data = np.random.randn(np.prod(arr_in.shape))
        arr_in[:] = data.reshape(arr_in.shape)

        idx = [[4, 3], [20, 12]]
        gt = np.array([[0.20177312], [0.21249016]], dtype=DTYPE)

        lnorm(arr_in, arr_out=arr_out,
              inker_shape=inker_shape, outker_shape=inker_shape,
              plugin=plugin, plugin_kwargs=plugin_kwargs)
        gv = arr_out[idx]
        assert_allclose(gv, gt, rtol=RTOL, atol=ATOL)

        arr_out = lnorm(arr_in,
                        inker_shape=inker_shape, outker_shape=inker_shape,
                        plugin=plugin, plugin_kwargs=plugin_kwargs)
        gv = arr_out[idx]
        assert_allclose(gv, gt, rtol=RTOL, atol=ATOL)

    def test_input_d_4_div_euclidean_remove_mean_false_default_rest():

        arr_in = np.zeros((20, 30, 4), dtype=DTYPE)
        inker_shape = 5, 5
        arr_out = np.zeros((16, 26, 4), dtype=DTYPE)
        np.random.seed(42)
        arr_in[:] = np.random.randn(np.prod(arr_in.shape)).reshape(arr_in.shape)

        idx = [[4, 3], [20, 12]]
        gt = np.array([[ 0.13273999, -0.09456467,
                        -0.01975331, -0.04648187],
                       [ 0.00148955, -0.00257985,
                         0.02118244, -0.01543736]],
                      dtype=DTYPE)

        lnorm(arr_in, arr_out=arr_out,
              inker_shape=inker_shape, outker_shape=inker_shape,
              div_method='euclidean', remove_mean=False,
              plugin=plugin, plugin_kwargs=plugin_kwargs)
        gv = arr_out[idx]
        assert_allclose(gv, gt, rtol=RTOL, atol=ATOL)

        arr_out = lnorm(arr_in,
                        inker_shape=inker_shape, outker_shape=inker_shape,
                        div_method='euclidean', remove_mean=False,
                        plugin=plugin, plugin_kwargs=plugin_kwargs)
        gv = arr_out[idx]
        assert_allclose(gv, gt, rtol=RTOL, atol=ATOL)

    def test_div_euclidean_remove_mean_false_threshold_1_stretch_1e_2():
        arr_in = np.zeros((20, 30, 4), dtype=DTYPE)
        inker_shape = 5, 5
        arr_out = np.zeros((16, 26, 4), dtype=DTYPE)
        np.random.seed(42)
        arr_in[:] = np.random.randn(np.prod(arr_in.shape)).reshape(arr_in.shape)

        idx = [[4, 3], [20, 12]]
        gt = np.array([[ 0.01255756, -0.00894607,
                        -0.00186872, -0.00439731],
                       [ 0.00013929, -0.00024125,
                         0.00198085, -0.0014436 ]],
                      dtype=DTYPE)

        lnorm(arr_in, arr_out=arr_out,
              inker_shape=inker_shape, outker_shape=inker_shape,
              div_method='euclidean', remove_mean=False, threshold=1, stretch=1e-2,
              plugin=plugin, plugin_kwargs=plugin_kwargs)
        gv = arr_out[idx]
        assert_allclose(gv, gt, rtol=RTOL, atol=ATOL)

        arr_out = lnorm(arr_in,
                        inker_shape=inker_shape, outker_shape=inker_shape,
                        div_method='euclidean', remove_mean=False, threshold=1, stretch=1e-2,
                        plugin=plugin, plugin_kwargs=plugin_kwargs)
        gv = arr_out[idx]
        assert_allclose(gv, gt, rtol=RTOL, atol=ATOL)

    def test_div_euclidean_remove_mean_true():

        arr_in = np.zeros((20, 30, 4), dtype=DTYPE)
        inker_shape = 5, 5
        arr_out = np.zeros((16, 26, 4), dtype=DTYPE)
        np.random.seed(42)
        arr_in[:] = np.random.randn(np.prod(arr_in.shape)).reshape(arr_in.shape)

        idx = [[4, 3], [20, 12]]

        gt = np.array([[  1.27813682e-01, -9.97862518e-02,
                         -2.48777084e-02, -5.16409911e-02],
                       [ -2.00690944e-02, -2.42322776e-02,
                          7.76741435e-05, -3.73861268e-02]],
                      dtype=DTYPE)

        lnorm(arr_in, arr_out=arr_out,
              inker_shape=inker_shape, outker_shape=inker_shape,
              div_method='euclidean', remove_mean=True,
              plugin=plugin, plugin_kwargs=plugin_kwargs)
        gv = arr_out[idx]
        assert_allclose(gv, gt, rtol=RTOL, atol=ATOL)

        arr_out = lnorm(arr_in,
                        inker_shape=inker_shape, outker_shape=inker_shape,
                        div_method='euclidean', remove_mean=True,
                        plugin=plugin, plugin_kwargs=plugin_kwargs)
        gv = arr_out[idx]
        assert_allclose(gv, gt, rtol=RTOL, atol=ATOL)

    def test_div_std_remove_mean_false():

        arr_in = np.zeros((20, 30, 4), dtype=DTYPE)
        inker_shape = 5, 5
        arr_out = np.zeros((16, 26, 4), dtype=DTYPE)
        np.random.seed(42)
        arr_in[:] = np.random.randn(np.prod(arr_in.shape)).reshape(arr_in.shape)

        idx = [[4, 8], [20, 12]]

        gt = np.array([[  1.32899761, -0.94678491,
                         -0.19777086, -0.46537822],
                       [  1.67757177,  0.42027149,
                         -0.70711917, -0.05593578]],
                      dtype=DTYPE)

        lnorm(arr_in, arr_out=arr_out,
              inker_shape=inker_shape, outker_shape=inker_shape,
              div_method='std', remove_mean=False,
              plugin=plugin, plugin_kwargs=plugin_kwargs)
        gv = arr_out[idx]
        assert_allclose(gv, gt, rtol=RTOL, atol=ATOL)

        arr_out = lnorm(arr_in,
                        inker_shape=inker_shape, outker_shape=inker_shape,
                        div_method='std', remove_mean=False,
                        plugin=plugin, plugin_kwargs=plugin_kwargs)
        gv = arr_out[idx]
        assert_allclose(gv, gt, rtol=RTOL, atol=ATOL)

    def test_div_std_remove_mean_true():

        arr_in = np.zeros((20, 30, 4), dtype=DTYPE)
        inker_shape = 5, 5
        arr_out = np.zeros((16, 26, 4), dtype=DTYPE)
        np.random.seed(42)
        arr_in[:] = np.random.randn(np.prod(arr_in.shape)).reshape(arr_in.shape)

        idx = [[4, 8], [20, 12]]
        gt = np.array([[  1.27801514, -0.99776751,
                         -0.2487534 , -0.51636076],
                       [  1.42037416,  0.16307378,
                         -0.9643169 , -0.31313351]],
                      dtype=DTYPE)

        lnorm(arr_in, arr_out=arr_out,
              inker_shape=inker_shape, outker_shape=inker_shape,
              div_method='std', remove_mean=True,
              plugin=plugin, plugin_kwargs=plugin_kwargs)
        gv = arr_out[idx]
        assert_allclose(gv, gt, rtol=RTOL, atol=ATOL)

        arr_out = lnorm(arr_in,
                        inker_shape=inker_shape, outker_shape=inker_shape,
                        div_method='std', remove_mean=True,
                        plugin=plugin, plugin_kwargs=plugin_kwargs)
        gv = arr_out[idx]
        assert_allclose(gv, gt, rtol=RTOL, atol=ATOL)

    def test_outker_shape_0_div_mag_remove_mean_false():
        arr_in = np.zeros((20, 30, 4), dtype=DTYPE)
        inker_shape = 5, 5
        outker_shape = 0, 0
        arr_out = np.zeros((16, 26, 4), dtype=DTYPE)
        np.random.seed(42)
        arr_in[:] = np.random.randn(np.prod(arr_in.shape)).reshape(arr_in.shape)

        idx = [[4, 3], [20, 12]]

        gt = np.array([[ 0.24052431, -0.18180957,
                        -0.04978044, -0.0898783 ],
                       [ 0.00301287, -0.00500357,
                         0.04109935, -0.03260877]],
                      dtype=DTYPE)

        lnorm(arr_in, arr_out=arr_out,
              inker_shape=inker_shape, outker_shape=outker_shape,
              div_method='euclidean', remove_mean=False,
              plugin=plugin, plugin_kwargs=plugin_kwargs)
        gv = arr_out[idx]
        assert_allclose(gv, gt, rtol=RTOL, atol=ATOL)

        arr_out = lnorm(arr_in,
                        inker_shape=inker_shape, outker_shape=outker_shape,
                        div_method='euclidean', remove_mean=False,
                        plugin=plugin, plugin_kwargs=plugin_kwargs)
        gv = arr_out[idx]
        assert_allclose(gv, gt, rtol=RTOL, atol=ATOL)

    def test_outker_shape_0_div_mag_remove_mean_true():
        arr_in = np.zeros((20, 30, 4), dtype=DTYPE)
        inker_shape = 5, 5
        outker_shape = 0, 0
        arr_out = np.zeros((16, 26, 4), dtype=DTYPE)
        np.random.seed(42)
        arr_in[:] = np.random.randn(np.prod(arr_in.shape)).reshape(arr_in.shape)

        idx = [[4, 3], [20, 12]]

        gt = np.array([[ 0.18866782, -0.17986178,
                        -0.05663793, -0.06177634],
                       [-0.00420652, -0.03951693,
                        -0.0673274 , -0.05859426]],
                      dtype=DTYPE)

        lnorm(arr_in, arr_out=arr_out,
              inker_shape=inker_shape, outker_shape=outker_shape,
              div_method='euclidean', remove_mean=True,
              plugin=plugin, plugin_kwargs=plugin_kwargs)
        gv = arr_out[idx]
        assert_allclose(gv, gt, rtol=RTOL, atol=ATOL)

        arr_out = lnorm(arr_in,
                        inker_shape=inker_shape, outker_shape=outker_shape,
                        div_method='euclidean', remove_mean=True,
              plugin=plugin, plugin_kwargs=plugin_kwargs)
        gv = arr_out[idx]
        assert_allclose(gv, gt, rtol=RTOL, atol=ATOL)

    def test_outker_shape_0_div_std_remove_mean_false():
        arr_in = np.zeros((20, 30, 4), dtype=DTYPE)
        inker_shape = 5, 5
        outker_shape = 0, 0
        arr_out = np.zeros((16, 26, 4), dtype=DTYPE)
        np.random.seed(42)
        arr_in[:] = np.random.randn(np.prod(arr_in.shape)).reshape(arr_in.shape)

        idx = [[4, 8], [20, 12]]

        gt = np.array([[ 1.26222396, -0.90901738,
                        -0.24902068, -0.45406818],
                       [ 1.54160333,  0.49371463,
                         -0.80440265, -0.05310058]],
                      dtype=DTYPE)

        lnorm(arr_in, arr_out=arr_out,
              inker_shape=inker_shape, outker_shape=outker_shape,
              div_method='std', remove_mean=False,
              plugin=plugin, plugin_kwargs=plugin_kwargs)
        gv = arr_out[idx]
        assert_allclose(gv, gt, rtol=RTOL, atol=ATOL)

        arr_out = lnorm(arr_in,
                        inker_shape=inker_shape, outker_shape=outker_shape,
                        div_method='std', remove_mean=False,
                        plugin=plugin, plugin_kwargs=plugin_kwargs)
        gv = arr_out[idx]
        assert_allclose(gv, gt, rtol=RTOL, atol=ATOL)


    def test_outker_shape_0_div_std_remove_mean_true():
        arr_in = np.zeros((20, 30, 4), dtype=DTYPE)
        inker_shape = 5, 5
        outker_shape = 0, 0
        arr_out = np.zeros((16, 26, 4), dtype=DTYPE)
        np.random.seed(42)
        arr_in[:] = np.random.randn(np.prod(arr_in.shape)).reshape(arr_in.shape)

        idx = [[4, 8], [20, 12]]
        gt = np.array([[ 0.94326323, -0.89923584,
                        -0.28315943, -0.30885619],
                       [ 1.27807069,  0.63492846,
                         -1.23798132, -0.50979644]],
                      dtype=DTYPE)

        lnorm(arr_in, arr_out=arr_out,
              inker_shape=inker_shape, outker_shape=outker_shape,
              div_method='std', remove_mean=True,
              plugin=plugin, plugin_kwargs=plugin_kwargs)
        gv = arr_out[idx]
        assert_allclose(gv, gt, rtol=RTOL, atol=ATOL)

        arr_out = lnorm(arr_in,
                        inker_shape=inker_shape, outker_shape=outker_shape,
                        div_method='std', remove_mean=True,
                        plugin=plugin, plugin_kwargs=plugin_kwargs)
        gv = arr_out[idx]
        assert_allclose(gv, gt, rtol=RTOL, atol=ATOL)


    def test_lena_npy_array():

        arr_in = lena()[::32, ::32].astype(DTYPE)

        idx = [[4, 2], [4, 2]]

        gt = np.array([0.2178068, 0.30647671],
                      dtype=DTYPE)

        arr_out = lnorm(arr_in,
                        plugin=plugin, plugin_kwargs=plugin_kwargs)
        gv = arr_out[idx]
        assert_allclose(gv, gt, rtol=RTOL, atol=ATOL)


    def test_lena_npy_array_non_C_contiguous():

        arr_in = lena()[::32, ::32].astype(DTYPE)
        arr_in = np.asfortranarray(arr_in)

        idx = [[4, 2], [4, 2]]

        gt = np.array([0.2178068, 0.30647671],
                      dtype=DTYPE)


        try:
            arr_out = lnorm(arr_in,
                            plugin=plugin, plugin_kwargs=plugin_kwargs)
            gv = arr_out[idx]
            assert_allclose(gv, gt, rtol=RTOL, atol=ATOL)
        except NotImplementedError:
            raise SkipTest

    def test_lena_pt3_array():

        lena32 = lena()[::32, ::32].astype(DTYPE)/255.
        arr_in = Array(lena32.shape, dtype=DTYPE)
        arr_in[:] = lena32

        idx = [[4, 2], [4, 2]]

        gt = np.array([0.21779411,  0.30645376],
                      dtype=DTYPE)

        arr_out = lnorm(arr_in,
                        plugin=plugin, plugin_kwargs=plugin_kwargs)
        gv = arr_out[idx]
        assert_allclose(gv, gt, rtol=RTOL, atol=ATOL)


    suite = {}
    if plugin_kwargs:
        plugin_kwarg_hash = '__kwargs_hash_' + get_pkl_sha1(plugin_kwargs)
    else:
        plugin_kwarg_hash = ""

    for key, value in locals().iteritems():
        if isinstance(value, types.FunctionType) and key.startswith('test_'):
            func = value
            func.__name__ += '__plugin_%s' % plugin + plugin_kwarg_hash
            func = attr(*tags)(info(plugin, plugin_kwargs, func))
            suite[func.__name__] = func

    return suite
예제 #48
0
def multi_gpu(gpu_num):
    return attrib.attr(gpu=gpu_num)
예제 #49
0
파일: attr.py 프로젝트: 2php/chainer
from nose.plugins import attrib

gpu = attrib.attr('gpu')
cudnn = attrib.attr('gpu', 'cudnn')


def multi_gpu(gpu_num):
    return attrib.attr(gpu=gpu_num)
예제 #50
0

class TestAttrClass:
    from_super = True

    def ends_with_test(self):
        pass

    def test_one(self):
        pass

    def test_two(self):
        pass
    test_two.from_super = False

TestAttrClass = attr('a')(TestAttrClass)


class TestAttrSubClass(TestAttrClass):
    def test_sub_three(self):
        pass

def added_later_test(self):
    pass

TestAttrSubClass.added_later_test = added_later_test

class TestClassAndMethodIterAttr(object):
    foo = [ 'a' ]

    def test_one(self):
예제 #51
0
파일: helpers.py 프로젝트: kjcontri/pymssql
import logging
from os import path
import time

try:
    from nose.tools import eq_
    from nose.plugins.skip import SkipTest
    from nose.plugins.attrib import attr

    def skip_test(reason="No reason given to skip_test"):
        raise SkipTest(reason)

    mark_slow = attr("slow")
except ImportError:
    import pytest

    def eq_(a, b):
        assert a == b

    def skip_test(reason="No reason given to skip_test"):
        pytest.skip(reason)

    def mark_slow(f):
        return f


import _mssql
import pymssql


class Config(object):
예제 #52
0
파일: utils.py 프로젝트: shots47s/datalad
def usecase(f):
    """Mark test as a usecase user ran into and which (typically) caused bug report
    to be filed/troubleshooted
    """
    return attr('usecase')(f)
예제 #53
0
def partitioned(cls):
    """
    Marks a test to be run with the partitioned database settings in
    addition to the non-partitioned database settings.
    """
    return attr(sql_backend=True)(cls)
예제 #54
0
def wipd(f):
    return attr('wip')(f)
예제 #55
0
from nose.plugins import attrib

gpu = attrib.attr('gpu')
cudnn = attrib.attr('gpu', 'cudnn')
slow = attrib.attr('slow')


def multi_gpu(gpu_num):
    return attrib.attr(gpu=gpu_num)
예제 #56
0
파일: tools.py 프로젝트: alexravitz/cyclus
import sys
import imp
import shutil
import unittest
import subprocess
import tempfile
from contextlib import contextmanager

from nose.tools import assert_true, assert_equal
from nose.plugins.attrib import attr
from nose.plugins.skip import SkipTest

if sys.version_info[0] >= 3:
    basestring = str

unit = attr('unit')
integration = attr('integration')

def cleanfs(paths):
    """Removes the paths from the file system."""
    for p in paths:
        p = os.path.join(*p)
        if os.path.isfile(p):
            os.remove(p)
        elif os.path.isdir(p):
            shutil.rmtree(p)

def check_cmd(args, cwd, holdsrtn):
    """Runs a command in a subprocess and verifies that it executed properly.
    """
    if not isinstance(args, basestring):
예제 #57
0
파일: common.py 프로젝트: AmesianX/angr
import os
import pickle

from nose.plugins.attrib import attr

try:
    import tracer
except ImportError:
    tracer = None

bin_location = str(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../binaries'))
if not os.path.isdir(bin_location):
    raise Exception("Can't find the angr/binaries repo for holding testcases. It should be cloned into the same folder as the rest of your angr modules.")

slow_test = attr(speed='slow')

TRACE_VERSION = 1

def do_trace(proj, test_name, input_data, **kwargs):
    """
    trace, magic, crash_mode, crash_addr = load_cached_trace(proj, "test_blurble")
    """
    fname = os.path.join(bin_location, 'tests_data', 'runner_traces', '%s_%s_%s.p' % (test_name, os.path.basename(proj.filename), proj.arch.name))

    if os.path.isfile(fname):
        try:
            with open(fname, 'rb') as f:
                r = pickle.load(f)
                if type(r) is tuple and len(r) == 2 and r[1] == TRACE_VERSION:
                    return r[0]
        except (pickle.UnpicklingError, UnicodeDecodeError):