Пример #1
0
def pytest_funcarg__knl(request):
    pytest.importorskip("pexpect")

    from pymbolic.maxima import MaximaKernel
    knl = MaximaKernel()
    request.addfinalizer(knl.shutdown)
    return knl
def test_imread_fspath():
    pytest.importorskip("PIL")
    from pathlib import Path
    img = plt.imread(
        Path(__file__).parent / 'baseline_images/test_image/uint16.tif')
    assert img.dtype == np.uint16
    assert np.sum(img) == 134184960
Пример #3
0
def test_iterative_imputer_truncated_normal_posterior():
    #  test that the values that are imputed using `sample_posterior=True`
    #  with boundaries (`min_value` and `max_value` are not None) are drawn
    #  from a distribution that looks gaussian via the Kolmogorov Smirnov test.
    #  note that starting from the wrong random seed will make this test fail
    #  because random sampling doesn't occur at all when the imputation
    #  is outside of the (min_value, max_value) range
    pytest.importorskip("scipy", minversion="0.17.0")
    rng = np.random.RandomState(42)

    X = rng.normal(size=(5, 5))
    X[0][0] = np.nan

    imputer = IterativeImputer(min_value=0,
                               max_value=0.5,
                               sample_posterior=True,
                               random_state=rng)

    imputer.fit_transform(X)
    # generate multiple imputations for the single missing value
    imputations = np.array([imputer.transform(X)[0][0] for _ in range(100)])

    assert all(imputations >= 0)
    assert all(imputations <= 0.5)

    mu, sigma = imputations.mean(), imputations.std()
    ks_statistic, p_value = kstest((imputations - mu) / sigma, 'norm')
    if sigma == 0:
        sigma += 1e-12
    ks_statistic, p_value = kstest((imputations - mu) / sigma, 'norm')
    # we want to fail to reject null hypothesis
    # null hypothesis: distributions are the same
    assert ks_statistic < 0.2 or p_value > 0.1, \
        "The posterior does appear to be normal"
Пример #4
0
def test_scan(ctx_factory, dtype, scan_cls):
    from pytest import importorskip
    importorskip("mako")

    context = ctx_factory()
    queue = cl.CommandQueue(context)

    knl = scan_cls(context, dtype, "a+b", "0")

    for n in scan_test_counts:
        host_data = np.random.randint(0, 10, n).astype(dtype)
        dev_data = cl_array.to_device(queue, host_data)

        # /!\ fails on Nv GT2?? for some drivers
        assert (host_data == dev_data.get()).all()

        knl(dev_data)

        desired_result = np.cumsum(host_data, axis=0)
        if scan_cls is ExclusiveScanKernel:
            desired_result -= host_data

        is_ok = (dev_data.get() == desired_result).all()
        if 1 and not is_ok:
            print("something went wrong, summarizing error...")
            print(summarize_error(dev_data.get(), desired_result, host_data))

        print("dtype:%s n:%d %s worked:%s" % (dtype, n, scan_cls, is_ok))
        assert is_ok
        from gc import collect
        collect()
Пример #5
0
 def test_get_store(self):
     pytest.importorskip('tables')
     with tm.ensure_clean() as path:
         with tm.assert_produces_warning(FutureWarning,
                                         check_stacklevel=False):
             s = pd.get_store(path)
             s.close()
Пример #6
0
    def test_mock(self, testdir):
        pytest.importorskip("mock", "1.0.1")
        testdir.makepyfile("""
            import os
            import unittest
            import mock

            class T(unittest.TestCase):
                @mock.patch("os.path.abspath")
                def test_hello(self, abspath):
                    os.path.abspath("hello")
                    abspath.assert_any_call("hello")
            def mock_basename(path):
                return "mock_basename"
            @mock.patch("os.path.abspath")
            @mock.patch("os.path.normpath")
            @mock.patch("os.path.basename", new=mock_basename)
            def test_someting(normpath, abspath, tmpdir):
                abspath.return_value = "this"
                os.path.normpath(os.path.abspath("hello"))
                normpath.assert_any_call("this")
                assert os.path.basename("123") == "mock_basename"
        """)
        reprec = testdir.inline_run()
        reprec.assertoutcome(passed=2)
        calls = reprec.getcalls("pytest_runtest_logreport")
        funcnames = [call.report.location[2] for call in calls
                        if call.report.when == "call"]
        assert funcnames == ["T.test_hello", "test_someting"]
Пример #7
0
def test_writing_parquet_with_kwargs(tmpdir, engine):
    fn = str(tmpdir)
    path1 = os.path.join(fn, 'normal')
    path2 = os.path.join(fn, 'partitioned')
    pytest.importorskip("snappy")

    df = pd.DataFrame({'a': np.random.choice(['A', 'B', 'C'], size=100),
                       'b': np.random.random(size=100),
                       'c': np.random.randint(1, 5, size=100)})
    ddf = dd.from_pandas(df, npartitions=3)

    engine_kwargs = {
        'pyarrow': {
            'compression': 'snappy',
            'coerce_timestamps': None,
            'use_dictionary': True
        },
        'fastparquet': {
            'compression': 'snappy',
            'times': 'int64',
            'fixed_text': None
        }
    }

    ddf.to_parquet(path1,  engine=engine, **engine_kwargs[engine])
    out = dd.read_parquet(path1, engine=engine, infer_divisions=should_check_divs(engine))
    assert_eq(out, ddf, check_index=(engine != 'fastparquet'), check_divisions=should_check_divs(engine))

    # Avoid race condition in pyarrow 0.8.0 on writing partitioned datasets
    with dask.config.set(scheduler='sync'):
        ddf.to_parquet(path2, engine=engine, partition_on=['a'],
                       **engine_kwargs[engine])
    out = dd.read_parquet(path2, engine=engine).compute()
    for val in df.a.unique():
        assert set(df.b[df.a == val]) == set(out.b[out.a == val])
Пример #8
0
def test_hdf_globbing():
    pytest.importorskip("tables")
    df = pd.DataFrame({"x": ["a", "b", "c", "d"], "y": [1, 2, 3, 4]}, index=[1.0, 2.0, 3.0, 4.0])

    tdir = tempfile.mkdtemp()
    try:
        df.to_hdf(os.path.join(tdir, "one.h5"), "/foo/data", format="table")
        df.to_hdf(os.path.join(tdir, "two.h5"), "/bar/data", format="table")
        df.to_hdf(os.path.join(tdir, "two.h5"), "/foo/data", format="table")

        res = dd.read_hdf(os.path.join(tdir, "one.h5"), "/*/data", chunksize=2)
        assert res.npartitions == 2
        tm.assert_frame_equal(res.compute(), df)

        res = dd.read_hdf(os.path.join(tdir, "one.h5"), "/*/data", chunksize=2, start=1, stop=3)
        expected = pd.read_hdf(os.path.join(tdir, "one.h5"), "/foo/data", start=1, stop=3)
        tm.assert_frame_equal(res.compute(), expected)

        res = dd.read_hdf(os.path.join(tdir, "two.h5"), "/*/data", chunksize=2)
        assert res.npartitions == 2 + 2
        tm.assert_frame_equal(res.compute(), pd.concat([df] * 2))

        res = dd.read_hdf(os.path.join(tdir, "*.h5"), "/foo/data", chunksize=2)
        assert res.npartitions == 2 + 2
        tm.assert_frame_equal(res.compute(), pd.concat([df] * 2))

        res = dd.read_hdf(os.path.join(tdir, "*.h5"), "/*/data", chunksize=2)
        assert res.npartitions == 2 + 2 + 2
        tm.assert_frame_equal(res.compute(), pd.concat([df] * 3))
    finally:
        shutil.rmtree(tdir)
Пример #9
0
def test_nose_setup_partial(testdir):
    pytest.importorskip("functools")
    p = testdir.makepyfile("""
        from functools import partial

        values = []

        def my_setup(x):
            a = x
            values.append(a)

        def my_teardown(x):
            b = x
            values.append(b)

        my_setup_partial = partial(my_setup, 1)
        my_teardown_partial = partial(my_teardown, 2)

        def test_hello():
            print (values)
            assert values == [1]

        def test_world():
            print (values)
            assert values == [1,2]

        test_hello.setup = my_setup_partial
        test_hello.teardown = my_teardown_partial
    """)
    result = testdir.runpytest(p, '-p', 'nose')
    result.stdout.fnmatch_lines([
        "*2 passed*"
    ])
Пример #10
0
def test_read_hdf():
    pytest.importorskip('tables')
    df = pd.DataFrame({'x': ['a', 'b', 'c', 'd'],
                       'y': [1, 2, 3, 4]}, index=[1., 2., 3., 4.])
    with tmpfile('h5') as fn:
        df.to_hdf(fn, '/data')
        try:
            dd.read_hdf(fn, 'data', chunksize=2)
            assert False
        except TypeError as e:
            assert "format='table'" in str(e)

    with tmpfile('h5') as fn:
        df.to_hdf(fn, '/data', format='table')
        a = dd.read_hdf(fn, '/data', chunksize=2)
        assert a.npartitions == 2
        assert a._known_dtype

        tm.assert_frame_equal(a.compute(), df)

        tm.assert_frame_equal(
              dd.read_hdf(fn, '/data', chunksize=2, start=1, stop=3).compute(),
              pd.read_hdf(fn, '/data', start=1, stop=3))

        assert sorted(dd.read_hdf(fn, '/data').dask) == \
               sorted(dd.read_hdf(fn, '/data').dask)
Пример #11
0
def test_to_castra():
    pytest.importorskip("castra")
    df = pd.DataFrame({"x": ["a", "b", "c", "d"], "y": [2, 3, 4, 5]}, index=pd.Index([1.0, 2.0, 3.0, 4.0], name="ind"))
    a = dd.from_pandas(df, 2)

    c = a.to_castra()
    b = c.to_dask()
    try:
        tm.assert_frame_equal(df, c[:])
        tm.assert_frame_equal(b.compute(), df)
    finally:
        c.drop()

    c = a.to_castra(categories=["x"])
    try:
        assert c[:].dtypes["x"] == "category"
    finally:
        c.drop()

    c = a.to_castra(sorted_index_column="y")
    try:
        tm.assert_frame_equal(c[:], df.set_index("y"))
    finally:
        c.drop()

    dsk, keys = a.to_castra(compute=False)
    assert isinstance(dsk, dict)
    assert isinstance(keys, list)
    c, last = keys
    assert last[1] == a.npartitions - 1
Пример #12
0
def test_no_outputs(nb_file):
    """Ensure that no cells have output."""
    pytest.importorskip("IPython", minversion="3.0")
    nb = load_notebook(os.path.join(examples_dir, "%s.ipynb" % nb_file))
    for cell in iter_cells(nb):
        assert cell.outputs == [], "Cell outputs not cleared"
        assert cell.execution_count is None, "Execution count not cleared"
Пример #13
0
def test_to_castra():
    pytest.importorskip('castra')
    df = pd.DataFrame({'x': ['a', 'b', 'c', 'd'],
                       'y': [2, 3, 4, 5]},
                       index=pd.Index([1., 2., 3., 4.], name='ind'))
    a = dd.from_pandas(df, 2)

    c = a.to_castra()
    b = c.to_dask()
    try:
        tm.assert_frame_equal(df, c[:])
        tm.assert_frame_equal(b.compute(), df)
    finally:
        c.drop()

    c = a.to_castra(categories=['x'])
    try:
        assert c[:].dtypes['x'] == 'category'
    finally:
        c.drop()

    c = a.to_castra(sorted_index_column='y')
    try:
        tm.assert_frame_equal(c[:], df.set_index('y'))
    finally:
        c.drop()

    dsk, keys = a.to_castra(compute=False)
    assert isinstance(dsk, dict)
    assert isinstance(keys, list)
    c, last = keys
    assert last[1] == a.npartitions - 1
Пример #14
0
def test_sympy_interaction():
    pytest.importorskip("sympy")

    import sympy as sp

    x, y = sp.symbols("x y")
    f = sp.symbols("f")

    s1_expr = 1/f(x/sp.sqrt(x**2+y**2)).diff(x, 5)

    from pymbolic.sympy_interface import (
            SympyToPymbolicMapper,
            PymbolicToSympyMapper)
    s2p = SympyToPymbolicMapper()
    p2s = PymbolicToSympyMapper()

    p1_expr = s2p(s1_expr)
    s2_expr = p2s(p1_expr)

    assert sp.ratsimp(s1_expr - s2_expr) == 0

    p2_expr = s2p(s2_expr)
    s3_expr = p2s(p2_expr)

    assert sp.ratsimp(s1_expr - s3_expr) == 0
Пример #15
0
    def test_hdf_key(self, tmpdir):

        pytest.importorskip('tables')

        # tmp hdf store
        folder = str(tmpdir)
        vo = v.drop('o', axis=1)
        vs = pd.HDFStore(folder + 'vs.h5', mode='w')
        vs.put('v', vo, format='t', data_columns=True, index=False)

        e_true = e_full_true[(e_full_true.dsi <= 3) &
                             (e_full_true.dsf <= 1)]

        g = DeepGraph(vs)
        g.create_edges(selectors=[dsi_dsf_t], hdf_key='v')
        e_test = g.e

        pdt.assert_frame_equal(e_test.sort(axis=1), e_true.sort(axis=1))

        g.create_edges_ft(('si', v.si.max()),
                          selectors=[dsi_dsf_t], hdf_key='v')
        e_test = g.e.drop('ft_r', axis=1)
        vs.close()

        pdt.assert_frame_equal(e_test.sort(axis=1), e_true.sort(axis=1))
Пример #16
0
def test_to_hdf_lock_delays():
    pytest.importorskip('tables')
    df16 = pd.DataFrame({'x': ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p'],
                       'y': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]},
                            index=[1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16.])
    a = dd.from_pandas(df16, 16)

    # adding artifichial delays to make sure last tasks finish first
    # that's a way to simulate last tasks finishing last
    def delayed_nop(i):
        if i[1] < 10:
            sleep(0.1*(10-i[1]))
        return i

    # saving to multiple hdf nodes
    with tmpfile() as fn:
        a = a.apply(delayed_nop, axis=1, columns=a.columns)
        a.to_hdf(fn, '/data*')
        out = dd.read_hdf(fn, '/data*')
        eq(df16, out)

    # saving to multiple hdf files
    # adding artifichial delays to make sure last tasks finish first
    with tmpdir() as dn:
        fn = os.path.join(dn, 'data*')
        a = a.apply(delayed_nop, axis=1, columns=a.columns)
        a.to_hdf(fn, '/data')
        out = dd.read_hdf(fn, '/data')
        eq(df16, out)
Пример #17
0
def test_to_hdf_thread():
    pytest.importorskip('tables')
    df = pd.DataFrame({'x': ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p'],
                       'y': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]},
                            index=[1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16.])
    a = dd.from_pandas(df, 16)

    # test single file single node
    with tmpfile('h5') as fn:
        a.to_hdf(fn, '/data', get=dask.threaded.get)
        out = pd.read_hdf(fn, '/data')
        eq(df, out)

    # test multiple files single node
    with tmpdir() as dn:
        fn = os.path.join(dn, 'data_*.h5')
        a.to_hdf(fn, '/data', get=dask.threaded.get)
        out = dd.read_hdf(fn, '/data')
        eq(df, out)

    # test single file multiple nodes
    with tmpfile('h5') as fn:
        a.to_hdf(fn, '/data*', get=dask.threaded.get)
        out = dd.read_hdf(fn, '/data*')
        eq(df, out)
Пример #18
0
def test_to_hdf():
    pytest.importorskip('tables')
    df = pd.DataFrame({'x': ['a', 'b', 'c', 'd'],
                       'y': [1, 2, 3, 4]}, index=[1., 2., 3., 4.])
    a = dd.from_pandas(df, 2)

    with tmpfile('h5') as fn:
        a.to_hdf(fn, '/data')
        out = pd.read_hdf(fn, '/data')
        tm.assert_frame_equal(df, out[:])

    with tmpfile('h5') as fn:
        a.x.to_hdf(fn, '/data')
        out = pd.read_hdf(fn, '/data')
        tm.assert_series_equal(df.x, out[:])

    a = dd.from_pandas(df, 1)
    with tmpfile('h5') as fn:
        a.to_hdf(fn, '/data')
        out = pd.read_hdf(fn, '/data')
        tm.assert_frame_equal(df, out[:])

    # test compute = False
    with tmpfile('h5') as fn:
        r = a.to_hdf(fn, '/data', compute=False)
        r.compute()
        out = pd.read_hdf(fn, '/data')
        tm.assert_frame_equal(df, out[:])
Пример #19
0
def test_to_hdf_link_optimizations():
    """testing dask link levels is correct by calculating the depth of the dask graph"""
    pytest.importorskip('tables')
    df16 = pd.DataFrame({'x': ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p'],
                       'y': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]},
                            index=[1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16.])
    a = dd.from_pandas(df16, 16)

    # saving to multiple hdf files, no links are needed
    # expected layers: from_pandas, to_hdf, list = depth of 3
    with tmpdir() as dn:
        fn = os.path.join(dn, 'data*')
        d = a.to_hdf(fn, '/data', compute=False)
        assert dependency_depth(d.dask) == 3

    # saving to a single hdf file with multiple nodes
    # all subsequent nodes depend on the first
    # expected layers: from_pandas, first to_hdf(creates file+node), subsequent to_hdfs, list = 4
    with tmpfile() as fn:
        d = a.to_hdf(fn, '/data*', compute=False)
        assert dependency_depth(d.dask) == 4

    # saving to a single hdf file with a single node
    # every node depends on the previous node
    # expected layers: from_pandas, to_hdf times npartitions(15), list = 2 + npartitions = 17
    with tmpfile() as fn:
        d = a.to_hdf(fn, '/data', compute=False)
        assert dependency_depth(d.dask) == 2 + a.npartitions
Пример #20
0
def test_key_value_sorter(ctx_factory):
    from pytest import importorskip
    importorskip("mako")

    context = ctx_factory()
    queue = cl.CommandQueue(context)

    n = 10**5
    nkeys = 2000
    from pyopencl.clrandom import rand as clrand
    keys = clrand(queue, n, np.int32, b=nkeys)
    values = clrand(queue, n, np.int32, b=n).astype(np.int64)

    assert np.max(keys.get()) < nkeys

    from pyopencl.algorithm import KeyValueSorter
    kvs = KeyValueSorter(context)
    starts, lists, evt = kvs(queue, keys, values, nkeys, starts_dtype=np.int32)

    starts = starts.get()
    lists = lists.get()

    mydict = dict()
    for k, v in zip(keys.get(), values.get()):
        mydict.setdefault(k, []).append(v)

    for i in range(nkeys):
        start, end = starts[i:i+2]
        assert sorted(mydict[i]) == sorted(lists[start:end])
Пример #21
0
def test_hdf_globbing():
    pytest.importorskip('tables')
    df = pd.DataFrame({'x': ['a', 'b', 'c', 'd'],
                       'y': [1, 2, 3, 4]}, index=[1., 2., 3., 4.])

    with tmpdir() as tdir:
        df.to_hdf(os.path.join(tdir, 'one.h5'), '/foo/data', format='table')
        df.to_hdf(os.path.join(tdir, 'two.h5'), '/bar/data', format='table')
        df.to_hdf(os.path.join(tdir, 'two.h5'), '/foo/data', format='table')

        with dask.set_options(get=dask.get):
            res = dd.read_hdf(os.path.join(tdir, 'one.h5'), '/*/data',
                              chunksize=2)
            assert res.npartitions == 2
            tm.assert_frame_equal(res.compute(), df)

            res = dd.read_hdf(os.path.join(tdir, 'one.h5'), '/*/data',
                              chunksize=2, start=1, stop=3)
            expected = pd.read_hdf(os.path.join(tdir, 'one.h5'), '/foo/data',
                                   start=1, stop=3)
            tm.assert_frame_equal(res.compute(), expected)

            res = dd.read_hdf(os.path.join(tdir, 'two.h5'), '/*/data', chunksize=2)
            assert res.npartitions == 2 + 2
            tm.assert_frame_equal(res.compute(), pd.concat([df] * 2))

            res = dd.read_hdf(os.path.join(tdir, '*.h5'), '/foo/data', chunksize=2)
            assert res.npartitions == 2 + 2
            tm.assert_frame_equal(res.compute(), pd.concat([df] * 2))

            res = dd.read_hdf(os.path.join(tdir, '*.h5'), '/*/data', chunksize=2)
            assert res.npartitions == 2 + 2 + 2
            tm.assert_frame_equal(res.compute(), pd.concat([df] * 3))
Пример #22
0
def test_partition(ctx_factory):
    from pytest import importorskip
    importorskip("mako")

    context = ctx_factory()
    queue = cl.CommandQueue(context)

    from pyopencl.clrandom import rand as clrand
    for n in scan_test_counts:
        print("part", n)

        a_dev = clrand(queue, (n,), dtype=np.int32, a=0, b=1000)
        a = a_dev.get()

        crit = a_dev.dtype.type(300)
        true_host = a[a > crit]
        false_host = a[a <= crit]

        from pyopencl.algorithm import partition
        true_dev, false_dev, count_true_dev, evt = partition(
                a_dev, "ary[i] > myval", [("myval", crit)])

        count_true_dev = count_true_dev.get()

        assert (true_dev.get()[:count_true_dev] == true_host).all()
        assert (false_dev.get()[:n-count_true_dev] == false_host).all()
Пример #23
0
def test_dot(ctx_factory):
    from pytest import importorskip
    importorskip("mako")

    context = ctx_factory()
    queue = cl.CommandQueue(context)

    dtypes = [np.float32, np.complex64]
    if has_double_support(context.devices[0]):
        dtypes.extend([np.float64, np.complex128])

    for a_dtype in dtypes:
        for b_dtype in dtypes:
            print(a_dtype, b_dtype)
            a_gpu = general_clrand(queue, (200000,), a_dtype)
            a = a_gpu.get()
            b_gpu = general_clrand(queue, (200000,), b_dtype)
            b = b_gpu.get()

            dot_ab = np.dot(a, b)
            dot_ab_gpu = cl_array.dot(a_gpu, b_gpu).get()

            assert abs(dot_ab_gpu - dot_ab) / abs(dot_ab) < 1e-4

            vdot_ab = np.vdot(a, b)
            vdot_ab_gpu = cl_array.vdot(a_gpu, b_gpu).get()

            assert abs(vdot_ab_gpu - vdot_ab) / abs(vdot_ab) < 1e-4
Пример #24
0
def test_subsolvers_L2(rng, logger):
    pytest.importorskip('scipy', minversion='0.11')  # version for lsmr

    ref_solver = cholesky
    solvers = [conjgrad, block_conjgrad, conjgrad_scipy, lsmr_scipy]

    A, B = get_system(m=2000, n=1000, d=10, rng=rng)
    sigma = 0.1 * A.max()

    with Timer() as t0:
        x0, _ = ref_solver(A, B, sigma)

    xs = np.zeros((len(solvers),) + x0.shape)
    for i, solver in enumerate(solvers):
        with Timer() as t:
            xs[i], info = solver(A, B, sigma)
        logger.info('solver: %s', solver.__name__)
        logger.info('duration: %0.3f', t.duration)
        logger.info('duration relative to reference solver: %0.2f',
                    (t.duration / t0.duration))
        logger.info('info: %s', info)

    for solver, x in zip(solvers, xs):
        assert np.allclose(x0, x, atol=1e-5, rtol=1e-3), (
            "Solver %s" % solver.__name__)
Пример #25
0
    def test_resources_provider_for_loader(self, testdir):
        """
        Attempts to load resources from a package should succeed normally,
        even when the AssertionRewriteHook is used to load the modules.

        See #366 for details.
        """
        pytest.importorskip("pkg_resources")

        testdir.mkpydir('testpkg')
        contents = {
            'testpkg/test_pkg': """
                import pkg_resources

                import pytest
                from _pytest.assertion.rewrite import AssertionRewritingHook

                def test_load_resource():
                    assert isinstance(__loader__, AssertionRewritingHook)
                    res = pkg_resources.resource_string(__name__, 'resource.txt')
                    res = res.decode('ascii')
                    assert res == 'Load me please.'
                """,
        }
        testdir.makepyfile(**contents)
        testdir.maketxtfile(**{'testpkg/resource': "Load me please."})

        result = testdir.runpytest_subprocess()
        result.assert_outcomes(passed=1)
Пример #26
0
 def test_boxed_option_default(self, testdir):
     tmpdir = testdir.tmpdir.ensure("subdir", dir=1)
     config = testdir.parseconfig()
     assert not config.option.boxed
     pytest.importorskip("execnet")
     config = testdir.parseconfig('-d', tmpdir)
     assert not config.option.boxed
Пример #27
0
def test_init(backend, qapp, tmpdir, monkeypatch, cleanup_init):
    if backend == usertypes.Backend.QtWebKit:
        pytest.importorskip('PyQt5.QtWebKitWidgets')
    else:
        assert backend == usertypes.Backend.QtWebEngine

    monkeypatch.setattr(history.objects, 'backend', backend)
    history.init(qapp)
    hist = objreg.get('web-history')
    assert hist.parent() is qapp

    try:
        from PyQt5.QtWebKit import QWebHistoryInterface
    except ImportError:
        QWebHistoryInterface = None

    if backend == usertypes.Backend.QtWebKit:
        default_interface = QWebHistoryInterface.defaultInterface()
        assert default_interface._history is hist
    else:
        assert backend == usertypes.Backend.QtWebEngine
        if QWebHistoryInterface is None:
            default_interface = None
        else:
            default_interface = QWebHistoryInterface.defaultInterface()
        # For this to work, nothing can ever have called setDefaultInterface
        # before (so we need to test webengine before webkit)
        assert default_interface is None
Пример #28
0
def test_struct_reduce(ctx_factory):
    pytest.importorskip("mako")

    context = ctx_factory()
    queue = cl.CommandQueue(context)

    dev, = context.devices
    if (dev.vendor == "NVIDIA" and dev.platform.vendor == "Apple"
            and dev.driver_version == "8.12.47 310.40.00.05f01"):
        pytest.skip("causes a compiler hang on Apple/Nv GPU")

    mmc_dtype, mmc_c_decl = make_mmc_dtype(context.devices[0])

    preamble = mmc_c_decl + r"""//CL//

    minmax_collector mmc_neutral()
    {
        // FIXME: needs infinity literal in real use, ok here
        minmax_collector result;
        result.cur_min = 1<<30;
        result.cur_max = -(1<<30);
        return result;
    }

    minmax_collector mmc_from_scalar(float x)
    {
        minmax_collector result;
        result.cur_min = x;
        result.cur_max = x;
        return result;
    }

    minmax_collector agg_mmc(minmax_collector a, minmax_collector b)
    {
        minmax_collector result = a;
        if (b.cur_min < result.cur_min)
            result.cur_min = b.cur_min;
        if (b.cur_max > result.cur_max)
            result.cur_max = b.cur_max;
        return result;
    }

    """

    from pyopencl.clrandom import rand as clrand
    a_gpu = clrand(queue, (20000,), dtype=np.int32, a=0, b=10**6)
    a = a_gpu.get()

    from pyopencl.reduction import ReductionKernel
    red = ReductionKernel(context, mmc_dtype,
            neutral="mmc_neutral()",
            reduce_expr="agg_mmc(a, b)", map_expr="mmc_from_scalar(x[i])",
            arguments="__global int *x", preamble=preamble)

    minmax = red(a_gpu).get()
    #print minmax["cur_min"], minmax["cur_max"]
    #print np.min(a), np.max(a)

    assert abs(minmax["cur_min"] - np.min(a)) < 1e-5
    assert abs(minmax["cur_max"] - np.max(a)) < 1e-5
Пример #29
0
def dont_test_dataframes(s, a):  # slow
    pytest.importorskip('pandas')
    n = 3000000
    fn = '/tmp/test/file.csv'
    with make_hdfs() as hdfs:
        data = (b'name,amount,id\r\n' +
                b'Alice,100,1\r\nBob,200,2\r\n' * n)
        with hdfs.open(fn, 'w') as f:
            f.write(data)

        e = Executor((s.ip, s.port), start=False)
        yield e._start()

        futures = read_bytes(fn, hdfs=hdfs, delimiter=b'\r\n')
        assert len(futures) > 1

        def load(b, **kwargs):
            assert b
            from io import BytesIO
            import pandas as pd
            bio = BytesIO(b)
            return pd.read_csv(bio, **kwargs)

        dfs = e.map(load, futures, names=['name', 'amount', 'id'], skiprows=1)
        dfs2 = yield e._gather(dfs)
        assert sum(map(len, dfs2)) == n * 2 - 1
Пример #30
0
def test_index_preservation(ctx_factory):
    from pytest import importorskip
    importorskip("mako")

    context = ctx_factory()
    queue = cl.CommandQueue(context)

    from pyopencl.scan import GenericScanKernel, GenericDebugScanKernel
    classes = [GenericScanKernel]

    dev = context.devices[0]
    if dev.type & cl.device_type.CPU:
        classes.append(GenericDebugScanKernel)

    for cls in classes:
        for n in scan_test_counts:
            knl = cls(
                    context, np.int32,
                    arguments="__global int *out",
                    input_expr="i",
                    scan_expr="b", neutral="0",
                    output_statement="""
                        out[i] = item;
                        """)

            out = cl_array.empty(queue, n, dtype=np.int32)
            knl(out)

            assert (out.get() == np.arange(n)).all()
            from gc import collect
            collect()
Пример #31
0
"""Test trophic levels, trophic differences and trophic coherence
"""
import pytest

np = pytest.importorskip("numpy")

import networkx as nx
from networkx.testing import almost_equal


def test_trophic_levels():
    """Trivial example"""
    G = nx.DiGraph()
    G.add_edge("a", "b")
    G.add_edge("b", "c")

    d = nx.trophic_levels(G)
    assert d == {"a": 1, "b": 2, "c": 3}


def test_trophic_levels_levine():
    """Example from Figure 5 in Stephen Levine (1980) J. theor. Biol. 83,
    195-207
    """
    S = nx.DiGraph()
    S.add_edge(1, 2, weight=1.0)
    S.add_edge(1, 3, weight=0.2)
    S.add_edge(1, 4, weight=0.8)
    S.add_edge(2, 3, weight=0.2)
    S.add_edge(2, 5, weight=0.3)
    S.add_edge(4, 3, weight=0.6)
Пример #32
0
def visualization():
    """"""
    return pytest.importorskip("rigid_body_motion.ros.visualization")
Пример #33
0
def ReferenceFrameTransformBroadcaster():
    """"""
    tf = pytest.importorskip("rigid_body_motion.ros.transformer")
    return tf.ReferenceFrameTransformBroadcaster
import pytest
pytest.importorskip("firedrake")

from firedrake import *
from revolve_adjoint import *

from numpy.random import rand


def test_assign_linear_combination():
    tr = TimestepRegister()
    mesh = IntervalMesh(10, 0, 1)
    V = FunctionSpace(mesh, "CG", 1)

    x, = SpatialCoordinate(mesh)
    f = interpolate(x, V)
    g = interpolate(sin(x), V)
    u = Function(V)

    u.assign(3 * f + g)
    tr.mark_end_of_timestep()

    J = assemble(u**2 * dx)
    tr.mark_end_of_timestep()
    rf = RevolveReducedFunctional(J, Control(f), tr, 1)

    h = Function(V)
    h.vector()[:] = rand(V.dim())
    assert taylor_test(rf, f, h) > 1.9

Пример #35
0
    get_scheduler,
)
from dask.core import literal
from dask.delayed import Delayed
from dask.utils import tmpdir, tmpfile, ignoring
from dask.utils_test import inc, dec
from dask.diagnostics import Profiler


def import_or_none(path):
    with ignoring(BaseException):
        return pytest.importorskip(path)
    return None


tz = pytest.importorskip("tlz")
da = import_or_none("dask.array")
db = import_or_none("dask.bag")
dd = import_or_none("dask.dataframe")
np = import_or_none("numpy")
sp = import_or_none("scipy.sparse")
pd = import_or_none("pandas")


def f1(a, b, c=1):
    pass


def f2(a, b=1, c=2):
    pass
Пример #36
0
from __future__ import absolute_import, division, print_function, unicode_literals

import pytest
xr = pytest.importorskip('xarray')  # noqa

from cf2cdm import cfunits


def test_are_convertible():
    assert cfunits.are_convertible('m', 'm')
    assert cfunits.are_convertible('hPa', 'Pa')
    assert not cfunits.are_convertible('m', 'Pa')
Пример #37
0
import re
from inspect import signature
import pkgutil
import inspect
import importlib
from typing import Optional

import pytest
from sklearn.utils import all_estimators
import sklearn

numpydoc_validation = pytest.importorskip("numpydoc.validate")

FUNCTION_DOCSTRING_IGNORE_LIST = [
    "sklearn.covariance._shrunk_covariance.ledoit_wolf",
    "sklearn.covariance._shrunk_covariance.ledoit_wolf_shrinkage",
    "sklearn.datasets._base.load_breast_cancer",
    "sklearn.datasets._base.load_digits",
    "sklearn.datasets._base.load_linnerud",
    "sklearn.datasets._base.load_sample_image",
    "sklearn.datasets._base.load_wine",
    "sklearn.datasets._california_housing.fetch_california_housing",
    "sklearn.datasets._covtype.fetch_covtype",
    "sklearn.datasets._kddcup99.fetch_kddcup99",
    "sklearn.datasets._lfw.fetch_lfw_pairs",
    "sklearn.datasets._lfw.fetch_lfw_people",
    "sklearn.datasets._olivetti_faces.fetch_olivetti_faces",
    "sklearn.datasets._openml.fetch_openml",
    "sklearn.datasets._samples_generator.make_biclusters",
    "sklearn.datasets._samples_generator.make_blobs",
    "sklearn.datasets._samples_generator.make_checkerboard",
Пример #38
0
import os
import pytest
import subprocess
import sys
import time

from dask.bytes.core import open_files
from dask.compatibility import PY2
from dask.utils import tmpdir

files = ["a", "b"]
requests = pytest.importorskip("requests")


@pytest.fixture(scope="module")
def dir_server():
    with tmpdir() as d:
        for fn in files:
            with open(os.path.join(d, fn), "wb") as f:
                f.write(b"a" * 10000)

        if PY2:
            cmd = [sys.executable, "-m", "SimpleHTTPServer", "8999"]
        else:
            cmd = [sys.executable, "-m", "http.server", "8999"]
        p = subprocess.Popen(cmd, cwd=d)
        timeout = 10
        while True:
            try:
                requests.get("http://localhost:8999")
                break
from decimal import Decimal

import pytest

from djmoney.money import Money

from ..testapp.models import ModelWithVanillaMoneyField, NullMoneyFieldModel, ValidatedMoneyModel

pytestmark = pytest.mark.django_db
serializers = pytest.importorskip("rest_framework.serializers")
fields = pytest.importorskip("rest_framework.fields")


class TestMoneyField:
    def get_serializer(self,
                       model_class,
                       field_name=None,
                       instance=None,
                       data=fields.empty,
                       fields_="__all__",
                       field_kwargs=None):
        class MetaSerializer(serializers.SerializerMetaclass):
            def __new__(cls, name, bases, attrs):
                from djmoney.contrib.django_rest_framework import MoneyField

                if field_name is not None and field_kwargs is not None:
                    attrs[field_name] = MoneyField(max_digits=10,
                                                   decimal_places=2,
                                                   **field_kwargs)
                return super().__new__(cls, name, bases, attrs)
Пример #40
0
    def modify_solution(cls):
        # Skip this class if dask not available
        pytest.importorskip('dask.array')

        cls.solution['sst'] = cls.solution['sst'].chunk({'time': 1})
Пример #41
0
from dask import array as da
from distributed.protocol import (
    deserialize,
    deserialize_bytes,
    serialize,
    serialize_bytelist,
)
from distributed.protocol.pickle import HIGHEST_PROTOCOL

from dask_cuda.device_host_file import (
    DeviceHostFile,
    device_to_host,
    host_to_device,
)

cupy = pytest.importorskip("cupy")


def test_device_host_file_config(tmp_path):
    dhf_disk_path = str(tmp_path / "dask-worker-space" / "storage")
    with dask.config.set(temporary_directory=str(tmp_path)):
        dhf = DeviceHostFile()
        assert os.path.exists(dhf_disk_path)
        assert dhf.disk_func_path == dhf_disk_path


@pytest.mark.parametrize("num_host_arrays", [1, 10, 100])
@pytest.mark.parametrize("num_device_arrays", [1, 10, 100])
@pytest.mark.parametrize("array_size_range", [(1, 1000), (100, 100),
                                              (1000, 1000)])
def test_device_host_file_short(tmp_path, num_device_arrays, num_host_arrays,
Пример #42
0
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type

import pytest

from io import StringIO

from units.compat.mock import MagicMock
from assible.errors import AssibleConnectionFailure
from assible.module_utils._text import to_bytes
from assible.playbook.play_context import PlayContext
from assible.plugins.loader import connection_loader
from assible.plugins.connection import winrm

pytest.importorskip("winrm")


class TestConnectionWinRM(object):

    OPTIONS_DATA = (
        # default options
        (
            {'_extras': {}},
            {},
            {
                '_kerb_managed': False,
                '_kinit_cmd': 'kinit',
                '_winrm_connection_timeout': None,
                '_winrm_host': 'inventory_hostname',
                '_winrm_kwargs': {'username': None, 'password': None},
Пример #43
0
import copy

import numpy as np
import pytest
import qcelemental as qcel
from qcelemental.testing import compare_recursive

import qcengine as qcng

# qcenginerecords not required, skips whole file
qcer = pytest.importorskip("qcenginerecords")

# Prep globals
molpro_info = qcer.get_info('molpro')


@pytest.mark.parametrize('test_case', molpro_info.list_test_cases())
def test_molpro_output_parser(test_case):

    # Get output file data
    data = molpro_info.get_test_data(test_case)
    inp = qcel.models.ResultInput.parse_raw(data["input.json"])

    output = qcng.get_program('molpro').parse_output(data, inp).dict()
    output.pop("provenance", None)

    output_ref = qcel.models.Result.parse_raw(data["output.json"]).dict()
    output_ref.pop("provenance", None)

    # TODO add `skip` to compare_recusive
    check = compare_recursive(output_ref, output)
Пример #44
0
import sys
import unittest

import pytest

wptserve = pytest.importorskip("wptserve")
from .base import TestUsingServer


class TestResponseSetCookie(TestUsingServer):
    @pytest.mark.xfail(sys.version_info >= (3, ),
                       reason="wptserve only works on Py2")
    def test_name_value(self):
        @wptserve.handlers.handler
        def handler(request, response):
            response.set_cookie("name", "value")
            return "Test"

        route = ("GET", "/test/name_value", handler)
        self.server.router.register(*route)
        resp = self.request(route[1])

        self.assertEqual(resp.info()["Set-Cookie"], "name=value; Path=/")

    @pytest.mark.xfail(sys.version_info >= (3, ),
                       reason="wptserve only works on Py2")
    def test_unset(self):
        @wptserve.handlers.handler
        def handler(request, response):
            response.set_cookie("name", "value")
            response.unset_cookie("name")
Пример #45
0
from sympy import cos
import numpy as np
from cached_property import cached_property

import pytest  # noqa

pexpect = pytest.importorskip('yask')  # Run only if YASK is available

from conftest import skipif  # noqa
from devito import (Eq, Grid, Dimension, ConditionalDimension, Operator, Constant,
                    Function, TimeFunction, SparseTimeFunction, configuration,
                    clear_cache, switchconfig)  # noqa
from devito.ir.iet import FindNodes, ForeignExpression, retrieve_iteration_tree  # noqa
from examples.seismic.acoustic import iso_stencil  # noqa
from examples.seismic import demo_model, TimeAxis, RickerSource, Receiver  # noqa

pytestmark = skipif('noyask')


def setup_module(module):
    """Get rid of any YASK modules generated and JIT-compiled in previous runs.
    This is not strictly necessary for the tests, but it helps in keeping the
    lib directory clean, which may be helpful for offline analysis.
    """
    from devito.yask.wrappers import contexts  # noqa
    contexts.dump()


@pytest.fixture(autouse=True)
def reset_isa():
    """Force back to NO-SIMD after each test, as some tests may optionally
Пример #46
0
                        unicode_literals)

import os
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import pytest

import dask
import dask.multiprocessing
from dask.utils import tmpdir, tmpfile
import dask.dataframe as dd
from dask.dataframe.io.parquet import read_parquet, to_parquet
from dask.dataframe.utils import assert_eq

fastparquet = pytest.importorskip('fastparquet')

try:
    import pyarrow.parquet as pyarrow  # noqa
except ImportError:
    pyarrow = False

df = pd.DataFrame({
    'x': [6, 2, 3, 4, 5],
    'y': [1.0, 2.0, 1.0, 2.0, 1.0]
},
                  index=pd.Index([10, 20, 30, 40, 50], name='myindex'))


@pytest.fixture
def fn(tmpdir):
Пример #47
0
from __future__ import print_function, division, absolute_import

from io import BytesIO
import os
import gzip
from time import sleep

import pytest
pd = pytest.importorskip('pandas')
dd = pytest.importorskip('dask.dataframe')

from toolz import partition_all, valmap

import pandas.util.testing as tm

import dask
import dask.dataframe as dd
from dask.dataframe.io.csv import (text_blocks_to_pandas, pandas_read_text,
                                   auto_blocksize)
from dask.dataframe.utils import assert_eq, has_known_categories, PANDAS_VERSION
from dask.bytes.core import read_bytes
from dask.utils import filetexts, filetext, tmpfile, tmpdir
from dask.bytes.compression import compress, files as cfiles, seekable_files
fmt_bs = [(fmt, None) for fmt in cfiles] + [(fmt, 10)
                                            for fmt in seekable_files]


def normalize_text(s):
    return '\n'.join(map(str.strip, s.strip().split('\n')))

Пример #48
0
"""
.. codeauthor:: Tsuyoshi Hombashi <*****@*****.**>
"""

from __future__ import unicode_literals

import itertools
from datetime import date, datetime

import pytest
import six
from dateutil.tz import tzoffset
from termcolor import colored
from typepy import DateTime, StrictLevel, Typecode

dateutil = pytest.importorskip("dateutil", minversion="2.7")

class_under_test = DateTime
nan = float("nan")
inf = float("inf")


class Test_DateTime_is_type(object):
    @pytest.mark.parametrize(
        ["value", "strict_level", "expected"],
        list(
            itertools.product(
                [
                    datetime(2017, 3, 22, 10, 0, tzinfo=tzoffset(None, 32400)),
                    date(2017, 3, 22)
                ],
Пример #49
0
def test_db2(name):
    if name == 'postgresql':
        pytest.importorskip('psycopg2')
        if os.environ.get('POSTGRES_DB'):  # gitlab-ci
            name = 'postgresql://*****:*****@postgres:5432/testase'
        else:
            name = os.environ.get('ASE_TEST_POSTGRES_URL')
            if name is None:
                return
    elif name == 'mysql':
        pytest.importorskip('pymysql')
        if os.environ.get('CI_PROJECT_DIR'):  # gitlab-ci
            name = 'mysql://*****:*****@mysql:3306/testase_mysql'
        else:
            name = os.environ.get('MYSQL_DB_URL')

        if name is None:
            return
    elif name == 'mariadb':
        pytest.importorskip('pymysql')
        if os.environ.get('CI_PROJECT_DIR'):  # gitlab-ci
            name = 'mariadb://*****:*****@mariadb:3306/testase_mysql'
        else:
            name = os.environ.get('MYSQL_DB_URL')

        if name is None:
            return

    c = connect(name)
    print(name, c)

    if 'postgres' in name or 'mysql' in name or 'mariadb' in name:
        c.delete([row.id for row in c.select()])

    id = c.reserve(abc=7)
    c.delete([d.id for d in c.select(abc=7)])
    id = c.reserve(abc=7)
    assert c[id].abc == 7

    a = c.get_atoms(id)
    c.write(Atoms())
    ch4 = molecule('CH4', calculator=EMT())
    ch4.constraints = [FixAtoms(indices=[1]), FixBondLength(0, 2)]
    f1 = ch4.get_forces()
    print(f1)

    c.delete([d.id for d in c.select(C=1)])
    chi = np.array([1 + 0.5j, 0.5])
    if 'db' in name:
        kvp = {
            'external_tables': {
                'blabla': {
                    'a': 1,
                    'b': 2,
                    'c': 3
                },
                'lala': {
                    'a': 0.01,
                    'b': 0.02,
                    'c': 0.0
                }
            }
        }

    else:
        kvp = {'a': 1}

    id = c.write(ch4,
                 key_value_pairs=kvp,
                 data={
                     '1-butyne': 'bla-bla',
                     'chi': chi
                 })

    row = c.get(id)
    print(row.data['1-butyne'], row.data.chi)
    assert (row.data.chi == chi).all(), (row.data.chi, chi)
    print(row)

    assert len(c.get_atoms(C=1).constraints) == 2

    f2 = c.get(C=1).forces
    assert abs(f2.sum(0)).max() < 1e-14
    f3 = c.get_atoms(C=1).get_forces()
    assert abs(f1 - f3).max() < 1e-14

    a = read(name, index='id={}'.format(id))[0]
    f4 = a.get_forces()
    assert abs(f1 - f4).max() < 1e-14

    with pytest.raises(ValueError):
        c.update(id, abc={'a': 42})

    c.update(id, grr='hmm')
    row = c.get(C=1)
    assert row.id == id
    assert (row.data.chi == chi).all()

    for row in c.select(include_data=False):
        assert len(row.data) == 0

    with pytest.raises(ValueError):
        c.write(ch4, foo=['bar', 2])  # not int, bool, float or str

    with pytest.raises(ValueError):
        c.write(Atoms(), pi='3.14')  # number as a string

    with pytest.raises(ValueError):
        c.write(Atoms(), fmax=0.0)  # reserved word

    with pytest.raises(ValueError):
        c.write(Atoms(), S=42)  # chemical symbol as key

    id = c.write(Atoms(),
                 b=np.bool_(True),
                 i=np.int64(42),
                 n=np.nan,
                 x=np.inf,
                 s='NaN2',
                 A=42)
    row = c[id]
    assert isinstance(row.b, bool)
    assert isinstance(row.i, int)
    assert np.isnan(row.n)
    assert np.isinf(row.x)

    # Make sure deleting a single key works:
    id = c.write(Atoms(), key=7)
    c.update(id, delete_keys=['key'])
    assert 'key' not in c[id]

    e = [row.get('energy') for row in c.select(sort='energy')]
    assert len(e) == 5 and abs(e[0] - 1.991) < 0.0005

    # Test the offset keyword
    ids = [row.get('id') for row in c.select()]
    offset = 2
    assert next(c.select(offset=offset)).id == ids[offset]
Пример #50
0
 def test_write_trajectory_netCDF4(self, universe, outfile):
     pytest.importorskip("netCDF4")
     return self._test_write_trajectory(universe, outfile)
Пример #51
0
def test_tracers_as_arrays():
    numpy = pytest.importorskip("numpy")
Пример #52
0
# specific language governing permissions and limitations
# under the License.
"""BNNS pattern detection check"""

import pytest

import tvm
from tvm import relay
from tvm.relay import transform
from tvm.contrib import utils, graph_executor
from tvm.contrib.download import download_testdata
from tvm.relay.op.contrib.bnns import partition_for_bnns

import numpy as np

pytest.importorskip("onnx")

bnns_is_absent = tvm.get_global_func("relay.ext.bnns", True) is None

TARGET = "llvm"
INPUT_SHAPE = [1, 3, 224, 224]

BASE_MODEL_URL = "https://github.com/onnx/models/raw/master/"
MODEL_URL_COLLECTION = {
    "BERT":
    "text/machine_comprehension/bert-squad/model/bertsquad-10.onnx",
    "MobileNet-v2":
    "vision/classification/mobilenet/model/mobilenetv2-7.onnx",
    "ResNet50-v1":
    "vision/classification/resnet/model/resnet50-v1-7.onnx",
    "ResNet50-v2":
Пример #53
0
def test_imread_pil_uint16():
    pytest.importorskip("PIL")
    img = plt.imread(os.path.join(os.path.dirname(__file__),
                     'baseline_images', 'test_image', 'uint16.tif'))
    assert img.dtype == np.uint16
    assert np.sum(img) == 134184960
Пример #54
0
def test_tracers_as_arrays_114():
    numpy = pytest.importorskip("numpy", minversion="1.14")
Пример #55
0
#    this software without specific prior written permission.
#
#  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
#  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
#  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
#  DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
#  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
#  DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
#  SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
#  CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
#  OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
#  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

import pytest  # isort:skip

pytest.importorskip("cassandra")  # isort:skip

import os

from cassandra.cluster import Cluster
from cassandra.query import SimpleStatement

from elasticapm.conf.constants import TRANSACTION
from elasticapm.instrumentation.packages.dbapi2 import extract_signature

pytestmark = pytest.mark.cassandra


@pytest.fixture()
def cassandra_cluster():
    cluster = Cluster([os.environ.get("CASSANDRA_HOST", "localhost")])
Пример #56
0
def test_imread_fspath():
    pytest.importorskip("PIL")
    img = plt.imread(
        Path(__file__).parent / 'baseline_images/test_image/uint16.tif')
    assert img.dtype == np.uint16
    assert np.sum(img) == 134184960
Пример #57
0
"""Unit tests for the fastText backend in Annif"""

import logging
import pytest
import annif.backend
import annif.corpus
from annif.exception import NotSupportedException

fasttext = pytest.importorskip("annif.backend.fasttext")


def test_fasttext_default_params(project):
    fasttext_type = annif.backend.get_backend("fasttext")
    fasttext = fasttext_type(
        backend_id='fasttext',
        config_params={},
        project=project)

    expected_default_params = {
        'limit': 100,
        'chunksize': 1,
        'dim': 100,
        'lr': 0.25,
        'epoch': 5,
        'loss': 'hs',
    }
    actual_params = fasttext.params
    for param, val in expected_default_params.items():
        assert param in actual_params and actual_params[param] == val

Пример #58
0
import pytest

openshiftdynamic = pytest.importorskip("openshift.dynamic")

from ansible_collections.notmintest.not_a_real_collection.tests.unit.modules.utils import set_module_args
from ansible_collections.notmintest.not_a_real_collection.tests.unit.utils.kubevirt_fixtures import base_fixture, RESOURCE_DEFAULT_ARGS, AnsibleExitJson

from ansible_collections.notmintest.not_a_real_collection.plugins.module_utils.k8s.raw import KubernetesRawModule
from ansible_collections.notmintest.not_a_real_collection.plugins.modules import kubevirt_rs as mymodule

KIND = 'VirtualMachineInstanceReplicaSet'


@pytest.mark.usefixtures("base_fixture")
@pytest.mark.parametrize("_replicas, _changed", (
    (1, True),
    (3, True),
    (2, False),
    (5, True),
))
def test_scale_rs_nowait(_replicas, _changed):
    _name = 'test-rs'
    # Desired state:
    args = dict(name=_name, namespace='vms', replicas=_replicas, wait=False)
    set_module_args(args)

    # Mock pre-change state:
    resource_args = dict(kind=KIND, **RESOURCE_DEFAULT_ARGS)
    mymodule.KubeVirtVMIRS.find_supported_resource.return_value = openshiftdynamic.Resource(
        **resource_args)
    res_inst = openshiftdynamic.ResourceInstance(
Пример #59
0
def test_non_splittable_reductions():
    np = pytest.importorskip('numpy')
    data = list(range(100))
    c = db.from_sequence(data, npartitions=10)
    assert c.mean().compute() == np.mean(data)
    assert c.std().compute(get=dask.get) == np.std(data)
Пример #60
0
import math
import pytest
import operator
import pandas as pd
import pandas.util.testing as tm

from operator import methodcaller
from datetime import date, datetime

import ibis
import ibis.expr.types as ir
from ibis import literal as L

pytest.importorskip('clickhouse_driver')
pytestmark = pytest.mark.clickhouse


@pytest.mark.parametrize(('to_type', 'expected'),
                         [('int8', 'CAST(`double_col` AS Int8)'),
                          ('int16', 'CAST(`double_col` AS Int16)'),
                          ('float', 'CAST(`double_col` AS Float32)'),
                          ('double', '`double_col`')])
def test_cast_double_col(alltypes, translate, to_type, expected):
    expr = alltypes.double_col.cast(to_type)
    assert translate(expr) == expected


@pytest.mark.parametrize(('to_type', 'expected'),
                         [('int8', 'CAST(`string_col` AS Int8)'),
                          ('int16', 'CAST(`string_col` AS Int16)'),
                          ('string', '`string_col`'),