Example #1
0
        def decorate(fn):
            def run_test(args):
                pip = pipeline.make()
                argspec = tuple(from_value(arg, broaden=True) for arg in args)

                result_py = fn(*args)

                try:
                    res = pip(input=fn, argspec=argspec)
                except InferenceError as ierr:
                    print_inference_error(ierr)
                    raise ierr
                except ValidationError as verr:
                    print('Collected the following errors:')
                    for err in verr.errors:
                        n = err.node
                        nlbl = lbl.label(n)
                        print(f'   {nlbl} ({type(n).__name__}) :: {n.type}')
                        print(f'      {err.args[0]}')
                    raise verr

                result_final = res['output'](*args)
                assert _eq(result_py, result_final)

            m = mark.parametrize('args', arglists)(run_test)
            m.__orig__ = fn
            return m
Example #2
0
def _parametrize_side_effect_response(wrapped):
    def func(websocket):
        return "respdata"

    def func2(websocket):
        websocket.send_message("respdata")

    def gen(websocket):
        yield "respdata"

    def gen2(websocket):
        websocket.send_message("respdata")
        return
        yield  # noqa

    def bytefunc(websocket):
        return b"respdata"

    return mark.parametrize(
        "expected_response,side_effect",
        [("respdata", "respdata"), (b"respdata", b"respdata"),
         ("respdata", ["respdata"]), (b"respdata", [b"respdata"]),
         (b"respdata", bytearray(b"respdata")), ("respdata", func),
         ("respdata", func2), ("respdata", gen), ("respdata", gen2),
         (b"respdata", bytefunc)])(wrapped)
Example #3
0
        def decorate(fn):
            def run_test(args):
                pip = pipeline.make()
                argspec = tuple({'value': arg} for arg in args)
                pip.resources.inferrer.fill_in(argspec)
                print(argspec)
                for arg in argspec:
                    del arg['value']

                result_py = fn(*args)

                try:
                    res = pip(input=fn, argspec=argspec)
                except InferenceError as ierr:
                    print_inference_error(ierr)
                    raise ierr
                except ValidationError as verr:
                    print('Collected the following errors:')
                    for err in verr.errors:
                        n = err.node
                        nlbl = lbl.label(n)
                        print(f'   {nlbl} ({type(n).__name__}) :: {n.type}')
                        print(f'      {err.args[0]}')
                    raise verr

                result_final = res['output'](*args)
                if isinstance(result_py, numpy.ndarray):
                    assert (result_py == result_final).all()
                else:
                    assert result_py == result_final

            m = mark.parametrize('args', arglists)(run_test)
            m.__orig__ = fn
            return m
def parametrize_from(
    data: List[Tuple[str, str, Any]]
) -> Callable[[Callable[..., Any]], Callable[..., Any]]:
    """Utility function to create PyTest parameters from the lists above"""
    return mark.parametrize(
        ["definition", "expected"],
        [param(definition, expected, id=key) for key, definition, expected in data],
    )
Example #5
0
    def decorate(fn):
        def run_test(args):
            result_py = fn(*args)
            res = cconv_pipeline.make()(input=fn)
            check_no_free_variables(res['graph'])
            result_final = res['output'](*args)
            assert result_py == result_final

        m = mark.parametrize('args', arglists)(run_test)
        m.__orig__ = fn
        return m
Example #6
0
    def decorate(fn):
        def run_test(args):
            result_py = fn(*args)
            res = cconv_pipeline.make()(input=fn, use_llift=use_llift)
            check_no_free_variables(res["graph"])
            result_final = res["output"](*args)
            assert result_py == result_final

        m = mark.parametrize("args", arglists)(run_test)
        m.__orig__ = fn
        return m
Example #7
0
 def parametrize(cases: Mapping[Callable[..., Tensor],
                                Iterable[tuple[Iterable, Number]]]):
     return mark.parametrize(
         'func, args, truth',
         tuple((func, args, truth) for func, vals in cases.items()
               for args, truth in vals)
     )(
         # TODO: pytest fails with "duplicate 'func'"
         #  if you run multiple files that have dtyped cases
         #  if BaseCasesTest.test_case is not copied?!?....
         partial(BaseCasesTest.test_case))
Example #8
0
    def decorate(fn):
        def test(args):
            if not isinstance(args, tuple):
                args = (args, )
            py_result = fn(*map(copy, args))
            myia_fn = lang_pipeline.run(input=fn)['output']
            myia_result = myia_fn(*map(copy, args))
            assert py_result == myia_result

        m = mark.parametrize('args', list(tests))(test)
        m.__orig__ = fn
        return m
Example #9
0
    def decorate(fn):
        def test(args):
            if not isinstance(args, tuple):
                args = (args,)

            _fwd_test(fn, args,
                      pipeline=fwd_pipeline,
                      optimize=optimize,
                      python=python)

        m = mark.parametrize('args', list(tests))(test)
        m.__orig__ = fn
        return m
Example #10
0
    def decorate(fn):
        def test(args):
            if not isinstance(args, tuple):
                args = (args,)
            # TODO: avoid re-parsing every time
            fn2 = parse(fn)
            py_result = fn(*map(copy, args))
            myia_result = run(fn2, tuple(map(copy, args)))
            assert py_result == myia_result

        m = mark.parametrize('args', list(tests))(test)
        m.__orig__ = fn
        return m
Example #11
0
    def decorate(fn):
        def test(args):
            if not isinstance(args, tuple):
                args = (args,)
            py_result = fn(*map(copy, args))
            argspec = tuple(from_value(arg, broaden=True) for arg in args)
            myia_fn = pipeline.run(input=fn, argspec=argspec)['output']
            myia_result = myia_fn(*map(copy, args))
            assert py_result == myia_result

        m = mark.parametrize('args', list(tests))(test)
        m.__orig__ = fn
        return m
Example #12
0
    def decorate(fn):
        def test(args):
            if not isinstance(args, tuple):
                args = (args, )
            py_result = fn(*map(copy, args))
            argspec = tuple({'value': a} for a in args)
            res = debug_lin_pipeline.run(input=fn, argspec=argspec)
            myia_fn = res['output']
            myia_result = myia_fn(*map(copy, args))
            assert py_result == myia_result

        m = mark.parametrize('args', list(tests))(test)
        m.__orig__ = fn
        return m
Example #13
0
def bundles(versions):
    '''
    Parameterize the `bundle` fixture with bundle IDs and versions to test against

    Parameters
    ----------
    versions : list of int
        Versions of the bundle to test against
    '''
    return mark.parametrize(
        'bundle',
        versions,
        ids=[f'{bundle_id}@{version}' for bundle_id, version in versions],
        indirect=True)
Example #14
0
def parametrize(
        names: str,
        vals: typing.Sequence,
        ids: typing.Sequence[str]):
    """Simplify `pytest.mark.parametrize`

    Arguments:
        names {str} -- parameter names
        vals {typing.Iterable} -- parameter values
        ids {StringList} -- test IDs
    """
    if not check_parametrize(vals, ids):
        breakpoint()

    return mark.parametrize(names, list(vals), ids=pad_to_longest(list(ids)))
Example #15
0
def bundle_versions(fixture_name, versions):
    '''
    Parameterize a bundle fixture with versions of the bundle to test against

    Parameters
    ----------
    fixture_name : str
        The name of the fixture to parameterize
    versions : list of int
        Versions of the bundle to test against
    '''
    return mark.parametrize(fixture_name,
                            versions,
                            ids=[f'{fixture_name}@{v}' for v in versions],
                            indirect=True)
Example #16
0
        def decorate(fn):
            def run_test(args):
                if isinstance(args, Exception):
                    exc = type(args)
                    args = args.args
                else:
                    exc = None
                pdef = pipeline
                if not validate:
                    pdef = pdef.configure(validate=False)
                pip = pdef.make()
                if abstract is None:
                    argspec = tuple(
                        from_value(arg, broaden=True) for arg in args)
                else:
                    argspec = tuple(to_abstract_test(a) for a in abstract)

                if exc is not None:
                    try:
                        mfn = pip(input=fn, argspec=argspec)
                        mfn['output'](*args)
                    except exc:
                        pass
                    return

                result_py = fn(*args)

                try:
                    res = pip(input=fn, argspec=argspec)
                except InferenceError as ierr:
                    print_inference_error(ierr)
                    raise ierr
                except ValidationError as verr:
                    print('Collected the following errors:')
                    for err in verr.errors:
                        n = err.node
                        nlbl = lbl.label(n)
                        tname = type(n).__name__
                        print(f'   {nlbl} ({tname}) :: {n.abstract}')
                        print(f'      {err.args[0]}')
                    raise verr

                result_final = res['output'](*args)
                assert _eq(result_py, result_final)

            m = mark.parametrize('args', arglists)(run_test)
            m.__orig__ = fn
            return m
Example #17
0
    def decorate(fn):
        def test(test_data):
            testfn = analysis('grad', fn).test
            results = testfn(test_data)
            print(results)
            if not results['match']:
                for row in ['python', 'myia', 'myiag']:
                    print(f'{row}:\t{results[row+"_result"]}')
                fail('Mismatch is output values (see stdout)')
            for arg, d in results["derivatives"].items():
                if not d['match']:
                    print(f'Argument {arg}:')
                    print(f'\tFinite differences: {d["difference"]}')
                    print(f'\tGradient output:    {d["exact"]}')
                    fail(f'Mismatch in gradients for {arg} (see stdout)')

        m = mark.parametrize('test_data', list(tests))(test)
        m.__orig__ = fn
        return m
Example #18
0
    def decorate(fn):
        def test(args):
            if not isinstance(args, tuple):
                args = (args, )
            if python:
                ref_result = fn(*map(copy, args))
            argspec = tuple(from_value(arg, broaden=True) for arg in args)
            res = pipeline.run(input=fn, argspec=argspec)
            myia_fn = res['output']
            myia_result = myia_fn(*map(copy, args))
            if python:
                if justeq:
                    assert ref_result == myia_result
                else:
                    np.testing.assert_allclose(ref_result, myia_result)

        m = mark.parametrize('args', list(tests))(test)
        m.__orig__ = fn
        return m
Example #19
0
    def decorate(fn):
        def test(args):
            nonlocal profile
            if not isinstance(args, tuple):
                args = (args, )
            if python:
                ref_result = fn(*map(copy, args))
            argspec = tuple(from_value(arg, broaden=True) for arg in args)
            if profile is True:
                profile = Profile()
            res = pipeline.run(input=fn, argspec=argspec, profile=profile)
            profile.print()
            myia_fn = res['output']
            myia_result = myia_fn(*map(copy, args))
            if python:
                np.testing.assert_allclose(ref_result, myia_result)

        m = mark.parametrize('args', list(tests))(test)
        m.__orig__ = fn
        return m
Example #20
0
    def decorate(fn):
        try:
            exc = None
            testfn = analysis('grad2', fn).test
        except Exception as e:
            exc = e

        def test(test_data):
            if exc:
                raise exc
            results = testfn(test_data)
            print(results)
            for arg, d in results.items():
                if not d['match']:
                    print(f'Argument {arg}:')
                    print(f'\tFinite differences: {d["difference"]}')
                    print(f'\tGradient output:    {d["computed"]}')
                    fail(f'Mismatch in gradients for {arg} (see stdout)')

        m = mark.parametrize('test_data', list(tests))(test)
        m.__orig__ = fn
        return m
Example #21
0
    SomeObjectType,
    SomeUnionType,
    SomeInterfaceType,
])

not_output_types: List[GraphQLInputType] = with_modifiers(
    [SomeInputObjectType])

input_types: List[GraphQLInputType] = with_modifiers(
    [GraphQLString, SomeScalarType, SomeEnumType, SomeInputObjectType])

not_input_types: List[GraphQLOutputType] = with_modifiers(
    [SomeObjectType, SomeUnionType, SomeInterfaceType])

parametrize_type = partial(
    mark.parametrize("type_", ids=lambda type_: type_.__class__.__name__))


def schema_with_field_type(type_):
    return GraphQLSchema(
        query=GraphQLObjectType(name="Query",
                                fields={"f": GraphQLField(type_)}),
        types=[type_],
    )


def describe_type_system_a_schema_must_have_object_root_types():
    def accepts_a_schema_whose_query_type_is_an_object_type():
        schema = build_schema("""
            type Query {
              test: String
Example #22
0
"""
Unit tests.
"""

from mock import (
    MagicMock,
    call,
)
from pytest import mark
from threading import Thread

from pyslot import ThreadSafeSignal as Signal

with_emit_arguments = mark.parametrize('args,kwargs', [
    ([], {}),
    ([1], {}),
    ([], {'a': 2}),
    ([1], {'a': 2}),
])


@with_emit_arguments
def test_signal_emit_no_connection(args, kwargs):
    signal = Signal()
    signal.emit(*args, **kwargs)


@with_emit_arguments
def test_signal_emit_single_connection(args, kwargs):
    callback = MagicMock()
    signal = Signal()
    signal.connect(callback)
Example #23
0
@fixture(scope="session")
def fixlib():
    "A fixture to guarantee that in pytest lib is finalized at the end"
    if not lib.initialized:
        lib.init_quda()
    yield lib
    if lib.initialized:
        lib.end_quda()


lattice_loop = mark.parametrize(
    "lattice",
    [
        # (2, 2, 2, 2),
        # (3, 3, 3, 3),
        (4, 4, 4, 4),
        (8, 8, 8, 8),
    ],
)

device_loop = mark.parametrize(
    "device",
    [
        True,
        # False,
    ],
)

dtype_loop = mark.parametrize(
    "dtype",
Example #24
0
def _as_async(code: str):
    return f"__import__('sys').__async_eval__({code!r}, globals(), locals())"


@fixture(autouse=True)
def _clear_modules():
    for name in [*sys.modules]:
        if name.startswith("pydevd") or name.startswith("async_pydevd"):
            del sys.modules[name]


params_mark = mark.parametrize(
    "code,result",
    [
        ("foo()", ) * 2,
        (_as_async("await foo()"), ) * 2,
        ("await foo()", _as_async("await foo()")),
    ],
)


@params_mark
def test_evaluate_expression(mocker, code, result):
    mock_eval: MagicMock = mocker.patch(
        "_pydevd_bundle.pydevd_vars.evaluate_expression")
    mock_find_frame: MagicMock = mocker.patch(
        "_pydevd_bundle.pydevd_vars.find_frame")

    from async_pydevd import pydevd_patch  # noqa # isort:skip
    from _pydevd_bundle.pydevd_vars import evaluate_expression  # isort:skip
Example #25
0
def pytest_parametrize(*args, **kwargs):
    from pytest import mark
    return mark.parametrize(*pytest_variants(*args, **kwargs))
Example #26
0
def parametrise_dictionaries(db):
    return mark.parametrize("dictionary", [
        get_mem_dictionary(),
        get_db_dictionary(db),
    ],
                            ids=lambda x: type(x).__name__)
Example #27
0
import lenskit.util.test as lktu
from lenskit.algorithms import Recommender
from lenskit.util import Stopwatch

from hypothesis import given
from hypothesis.strategies import randoms

_log = logging.getLogger(__name__)

simple_df = pd.DataFrame({
    'item': [1, 1, 2, 3],
    'user': [10, 12, 10, 13],
    'rating': [4.0, 3.0, 5.0, 2.0]
})

methods = mark.parametrize('m', ['lu', 'cg'])


@methods
def test_als_basic_build(m):
    algo = als.ImplicitMF(20,
                          iterations=10,
                          progress=util.no_progress,
                          method=m)
    algo.fit(simple_df)

    assert set(algo.user_index_) == set([10, 12, 13])
    assert set(algo.item_index_) == set([1, 2, 3])
    assert algo.user_features_.shape == (3, 20)
    assert algo.item_features_.shape == (3, 20)
Example #28
0
def parametrise_hashers(db):
    return mark.parametrize("hasher", [
        MemWordHasher(),
        DBWordHasher(db, 'test'),
    ],
                            ids=lambda x: type(x).__name__)
Example #29
0
import lenskit.util.test as lktu
from lenskit import sharing as lks
from lenskit.algorithms import Recommender
from lenskit.algorithms.basic import Popular
from lenskit.algorithms.als import BiasedMF
from lenskit.algorithms.item_knn import ItemItem

from pytest import mark

stores = [lks.FileModelStore]
if pickle.HIGHEST_PROTOCOL >= 5:
    # we have Python 3.8
    stores.append(lks.SHMModelStore)

store_param = mark.parametrize('store_cls', stores)


def test_sharing_mode():
    "Ensure sharing mode decorator turns on sharing"
    assert not lks.in_share_context()

    with lks.sharing_mode():
        assert lks.in_share_context()

    assert not lks.in_share_context()


@store_param
def test_store_init(store_cls):
    "Test that a store initializes and shuts down."
Example #30
0
    def expectJobDone(self):
        self.__nrFinishedJobs += 1

    def expectScheduleDone(self):
        self.__isDone = True


@fixture
def sim(request, databases):
    preparedTime = getattr(request, 'param', None)
    return Simulator(databases, preparedTime)


time2007 = mark.parametrize('sim',
                            [int(time.mktime((2007, 1, 1, 0, 0, 0, 0, 1, 0)))],
                            indirect=True)
"""Start Monday 2007-01-01 at midnight."""


def testScheduleNonExistingOnce(sim):
    """Test one-shot schedule with a non-existing config."""

    sim.prepare(-120, ScheduleRepeat.ONCE, missingConfig=True)
    sim.expectScheduleDone()
    sim.wait(60)


def testScheduleNonExistingRepeat(sim):
    """Test daily schedule with a non-existing config."""
Example #31
0
import os

from hamcrest import *
from mock import patch
from pytest import mark

from pizzapy.address import Address
from pizzapy.urls import Urls, COUNTRY_USA

fixture_path = os.path.join('tests', 'fixtures', 'stores.json')
with open(fixture_path) as fp:
    stores_fixture = json.load(fp)

address_params = mark.parametrize(
    argnames=('street', 'city', 'region', 'zip'),
    argvalues=[('700 Pennsylvania Avenue NW', 'Washington', 'DC', '20408'),
               ('700 Pennsylvania Avenue NW ', ' Washington ', ' DC ',
                ' 20408 '),
               ('700 Pennsylvania Avenue NW', 'Washington', 'DC', 20408)])


def mocked_request_json(url, **kwargs):

    assert_that(url, equal_to(Urls(COUNTRY_USA).find_url()))
    assert_that(
        kwargs,
        has_entries(line1='700 Pennsylvania Avenue NW',
                    line2='Washington, DC, 20408',
                    type='Delivery'))
    return stores_fixture

Example #32
0
def backends(*backends):
    return mark.parametrize(('backend',), [(b(),) for b in backends])