Ejemplo n.º 1
0
class TestSparse:
    basic_options = FalkonOptions(debug=True,
                                  compute_arch_speed=False,
                                  max_cpu_mem=max_mem_sparse,
                                  max_gpu_mem=max_mem_sparse)
    # sparse_dim and sparse_density result in sparse matrices with m and n non-zero entries.
    sparse_dim = 10_000
    sparse_density = 1e-4

    @pytest.fixture(scope="class")
    def s_A(self):
        A = gen_sparse_matrix(n,
                              self.sparse_dim,
                              np.float64,
                              density=self.sparse_density,
                              seed=14)
        Ad = torch.from_numpy(A.to_scipy().todense())
        return A, Ad

    @pytest.fixture(scope="class")
    def s_B(self):
        B = gen_sparse_matrix(m,
                              self.sparse_dim,
                              np.float64,
                              density=self.sparse_density,
                              seed=14)
        Bd = torch.from_numpy(B.to_scipy().todense())
        return B, Bd

    @pytest.fixture(scope="class")
    def s_gram(self, kernel, s_A, s_B):
        opt = FalkonOptions(use_cpu=True, compute_arch_speed=False)
        return kernel(s_A[1], s_B[1], opt=opt)  # n x m kernel

    @pytest.fixture(scope="class")
    def s_expected_fmmv(self, s_gram, v):
        return s_gram @ v

    @pytest.fixture(scope="class")
    def s_e_dfmmv1(self, s_gram, v, w):
        return s_gram.T @ (s_gram @ v + w)

    @pytest.fixture(scope="class")
    def s_e_dfmmv2(self, s_gram, v):
        return s_gram.T @ (s_gram @ v)

    @pytest.fixture(scope="class")
    def s_e_dfmmv3(self, s_gram, w):
        return s_gram.T @ w

    @pytest.fixture(scope="class")
    def s_e_dfmmv(self, request):
        return request.getfixturevalue(request.param)

    @pytest.mark.parametrize("cpu", cpu_params, ids=["cpu", "gpu"])
    @pytest.mark.parametrize(
        "Adt,Bdt,vo,vdt", [
            (np.float32, np.float32, "F", np.float32),
            (np.float32, np.float32, "C", np.float32),
            (np.float64, np.float64, "F", np.float64),
            (np.float64, np.float64, "C", np.float64),
        ],
        ids=["A32-B32-vF32", "A32-B32-vC32", "A64-B64-vF64", "A64-B64-vC64"])
    def test_fmmv(self, s_A, s_B, v, Adt, Bdt, vo, vdt, kernel,
                  s_expected_fmmv, cpu):
        A = fix_sparse_mat(s_A[0], dtype=Adt)
        B = fix_sparse_mat(s_B[0], dtype=Bdt)
        v = fix_mat(v, dtype=vdt, order=vo, copy=True)

        opt = dataclasses.replace(self.basic_options, use_cpu=cpu)
        rtol = choose_on_dtype(A.dtype)

        # Test normal
        _run_fmmv_test(kernel.mmv,
                       s_expected_fmmv, (A, B, v),
                       out=None,
                       rtol=rtol,
                       opt=opt)
        # Test with out
        out = torch.empty(A.shape[0], v.shape[1], dtype=A.dtype)
        _run_fmmv_test(kernel.mmv,
                       s_expected_fmmv, (A, B, v),
                       out=out,
                       rtol=rtol,
                       opt=opt)

    @pytest.mark.skipif(not decide_cuda(), reason="No GPU found.")
    @pytest.mark.parametrize("Adt,Bdt,vo,vdt",
                             [(np.float32, np.float32, "F", np.float32)],
                             ids=["A32-B32-vF32"])
    @pytest.mark.xfail(reason="Squared-norm not implemented for CUDA tensors",
                       run=True)
    def test_fmmv_input_device(self, s_A, s_B, v, Adt, Bdt, vo, vdt, kernel,
                               s_expected_fmmv):
        input_device = "cuda:0"
        A = fix_sparse_mat(s_A[0], dtype=Adt, device=input_device)
        B = fix_sparse_mat(s_B[0], dtype=Bdt, device=input_device)
        v = fix_mat(v, dtype=vdt, order=vo, copy=True, device=input_device)

        opt = dataclasses.replace(self.basic_options, use_cpu=False)
        rtol = choose_on_dtype(A.dtype)

        # Test normal
        _run_fmmv_test(kernel.mmv,
                       s_expected_fmmv, (A, B, v),
                       out=None,
                       rtol=rtol,
                       opt=opt)
        # Test with out
        out = torch.empty(A.shape[0],
                          v.shape[1],
                          dtype=A.dtype,
                          device=input_device)
        _run_fmmv_test(kernel.mmv,
                       s_expected_fmmv, (A, B, v),
                       out=out,
                       rtol=rtol,
                       opt=opt)

    @pytest.mark.parametrize("cpu", cpu_params, ids=["cpu", "gpu"])
    @pytest.mark.parametrize(
        "Adt,Bdt,vo,vdt,wo,wdt,s_e_dfmmv",
        [
            pytest.param(n32,
                         n32,
                         "F",
                         n32,
                         "F",
                         n32,
                         "s_e_dfmmv1",
                         marks=mark.usefixtures("s_e_dfmmv1")),
            pytest.param(n32,
                         n32,
                         "C",
                         n32,
                         "C",
                         n32,
                         "s_e_dfmmv1",
                         marks=mark.usefixtures("s_e_dfmmv1")),
            pytest.param(n64,
                         n64,
                         "F",
                         n64,
                         "F",
                         n64,
                         "s_e_dfmmv1",
                         marks=mark.usefixtures("s_e_dfmmv1")),
            pytest.param(n64,
                         n64,
                         "C",
                         n64,
                         "C",
                         n64,
                         "s_e_dfmmv1",
                         marks=mark.usefixtures("s_e_dfmmv1")),
            pytest.param(n32,
                         n32,
                         "F",
                         n32,
                         None,
                         None,
                         "s_e_dfmmv2",
                         marks=mark.usefixtures("s_e_dfmmv2")),
            pytest.param(n32,
                         n32,
                         "C",
                         n32,
                         None,
                         None,
                         "s_e_dfmmv2",
                         marks=mark.usefixtures("s_e_dfmmv2")),
            pytest.param(n64,
                         n64,
                         "F",
                         n64,
                         None,
                         None,
                         "s_e_dfmmv2",
                         marks=mark.usefixtures("s_e_dfmmv2")),
            pytest.param(n64,
                         n64,
                         "C",
                         n64,
                         None,
                         None,
                         "s_e_dfmmv2",
                         marks=mark.usefixtures("s_e_dfmmv2")),
            pytest.param(n32,
                         n32,
                         None,
                         None,
                         "F",
                         n32,
                         "s_e_dfmmv3",
                         marks=mark.usefixtures("s_e_dfmmv3")),
            pytest.param(n32,
                         n32,
                         None,
                         None,
                         "C",
                         n32,
                         "s_e_dfmmv3",
                         marks=mark.usefixtures("s_e_dfmmv3")),
            pytest.param(n64,
                         n64,
                         None,
                         None,
                         "F",
                         n64,
                         "s_e_dfmmv3",
                         marks=mark.usefixtures("s_e_dfmmv3")),
            pytest.param(n64,
                         n64,
                         None,
                         None,
                         "C",
                         n64,
                         "s_e_dfmmv3",
                         marks=mark.usefixtures("s_e_dfmmv3")),
            # A few mixed-contiguity examples
            pytest.param(n32,
                         n32,
                         "C",
                         n32,
                         "F",
                         n32,
                         "s_e_dfmmv1",
                         marks=mark.usefixtures("s_e_dfmmv1")),
        ],
        ids=[
            "32-32-vF32-wF32", "32-32-vC32-wC32", "64-64-vF64-wF64",
            "64-64-vC64-wC64", "32-32-vF32", "32-32-vC32", "64-64-vF64",
            "64-64-vC64", "32-32-wF32", "32-32-wC32", "64-64-wF64",
            "64-64-wC64", "32-32-vC32-wF32"
        ],
        indirect=["s_e_dfmmv"])
    def test_dfmmv(self, s_A, s_B, v, w, Adt, Bdt, vo, vdt, wo, wdt, kernel,
                   s_e_dfmmv, cpu):
        A = fix_sparse_mat(s_A[0], dtype=Adt)
        B = fix_sparse_mat(s_B[0], dtype=Bdt)
        v = fix_mat(v, order=vo, dtype=vdt)
        w = fix_mat(w, order=wo, dtype=wdt)

        opt = dataclasses.replace(self.basic_options, use_cpu=cpu)
        rtol = choose_on_dtype(A.dtype)

        # Test normal
        _run_fmmv_test(kernel.dmmv,
                       s_e_dfmmv, (A, B, v, w),
                       out=None,
                       rtol=rtol,
                       opt=opt)
        # Test with out
        out = torch.empty(m, t, dtype=A.dtype)
        _run_fmmv_test(kernel.dmmv,
                       s_e_dfmmv, (A, B, v, w),
                       out=out,
                       rtol=rtol,
                       opt=opt)

    @pytest.mark.skipif(not decide_cuda(), reason="No GPU found.")
    @pytest.mark.xfail(reason="Squared-norm not implemented for CUDA tensors",
                       run=True)
    @pytest.mark.parametrize(
        "Adt,Bdt,vo,vdt,wo,wdt,s_e_dfmmv", [
            pytest.param(n32,
                         n32,
                         "F",
                         n32,
                         "F",
                         n32,
                         "s_e_dfmmv1",
                         marks=mark.usefixtures("s_e_dfmmv1")),
            pytest.param(n32,
                         n32,
                         "F",
                         n32,
                         None,
                         None,
                         "s_e_dfmmv2",
                         marks=mark.usefixtures("s_e_dfmmv2")),
            pytest.param(n32,
                         n32,
                         None,
                         None,
                         "F",
                         n32,
                         "s_e_dfmmv3",
                         marks=mark.usefixtures("s_e_dfmmv3")),
        ],
        ids=["32-32-vF32-wF32", "32-32-vF32", "32-32-wF32"],
        indirect=["s_e_dfmmv"])
    def test_dfmmv_input_devices(self, s_A, s_B, v, w, Adt, Bdt, vo, vdt, wo,
                                 wdt, kernel, s_e_dfmmv):
        input_device = "cuda:0"
        A = fix_sparse_mat(s_A[0], dtype=Adt, device=input_device)
        B = fix_sparse_mat(s_B[0], dtype=Bdt, device=input_device)
        v = fix_mat(v, order=vo, dtype=vdt, device=input_device)
        w = fix_mat(w, order=wo, dtype=wdt, device=input_device)

        opt = dataclasses.replace(self.basic_options, use_cpu=False)
        rtol = choose_on_dtype(A.dtype)

        # Test normal
        _run_fmmv_test(kernel.dmmv,
                       s_e_dfmmv, (A, B, v, w),
                       out=None,
                       rtol=rtol,
                       opt=opt)
        # Test with out
        out = torch.empty(m, t, dtype=A.dtype, device=input_device)
        _run_fmmv_test(kernel.dmmv,
                       s_e_dfmmv, (A, B, v, w),
                       out=out,
                       rtol=rtol,
                       opt=opt)
Ejemplo n.º 2
0
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""Unit tests for `grizzly.reduce.main`."""
from unittest.mock import Mock

from pytest import mark, raises

from ..common.storage import TestCase, TestCaseLoadFailure
from ..common.utils import Exit
from ..target import AssetManager, TargetLaunchError, TargetLaunchTimeout
from . import ReduceManager
from .args import ReduceArgs, ReduceFuzzManagerIDArgs, ReduceFuzzManagerIDQualityArgs
from .exceptions import GrizzlyReduceBaseException

pytestmark = mark.usefixtures(
    "tmp_path_fm_config",
    "tmp_path_replay_status_db",
    "tmp_path_reduce_status_db",
)


def test_args_01(capsys, tmp_path, mocker):
    """test args in common with grizzly.replay"""
    # pylint: disable=import-outside-toplevel
    from ..replay.test_args import test_replay_args_01 as real_test

    mocker.patch("grizzly.replay.test_args.ReplayArgs", new=ReduceArgs)
    real_test(capsys, mocker, tmp_path)


def test_args_02(tmp_path):
    """test parsing args specific to grizzly.reduce"""
    in_file_name = join(temp_dir, filename)
    return in_file_name

@fixture()
def out_file(temp_dir, filename='from_python_tester'):
    return join(temp_dir, filename)

@fixture()
def in_file_contents(): return "generic_fcn_name,--,for in file"

@fixture()
def initialize_infile(in_file, in_file_contents):
    with open(in_file, 'w') as f:
        f.write(in_file_contents)

pytestmark = mark.usefixtures('initialize_infile')

def my_fcn1(arg): return 'my_fcn1 called with argument %s' % arg
def my_fcn2(arg): return 'my_fcn2 called with argument %s' % arg

@fixture()
def cd1():
    cd = CommandDispatcher()
    cd.generic_fcn_name = my_fcn1
    return cd

@fixture()
def cd2(cd1):
    cd = CommandDispatcher()
    cd.generic_fcn_name = my_fcn2
    cd.cd1 = cd1
Ejemplo n.º 4
0
class TestDense:
    basic_options = FalkonOptions(debug=True,
                                  compute_arch_speed=False,
                                  keops_active="no",
                                  max_gpu_mem=max_mem_dense,
                                  max_cpu_mem=max_mem_dense)

    @pytest.mark.parametrize(
        "Ao,Adt,Bo,Bdt,vo,vdt",
        [
            ("F", np.float32, "F", np.float32, "F", np.float32),
            ("C", np.float32, "C", np.float32, "C", np.float32),
            ("F", np.float64, "F", np.float64, "F", np.float64),
            ("C", np.float64, "C", np.float64, "C", np.float64),
            # A few mixed-contiguity examples
            ("F", np.float32, "C", np.float32, "F", np.float32),
            ("F", np.float32, "C", np.float32, "C", np.float32),
        ],
        ids=[
            "AF32-BF32-vF32", "AC32-BC32-vC32", "AF64-BF64-vF64",
            "AC64-BC64-vC64", "AF32-BC32-vF32", "AF32-BC32-vC32"
        ])
    @pytest.mark.parametrize("cpu", cpu_params, ids=["cpu", "gpu"])
    def test_fmmv(self, A, B, v, Ao, Adt, Bo, Bdt, vo, vdt, kernel,
                  expected_fmmv, cpu):
        A = fix_mat(A, order=Ao, dtype=Adt)
        B = fix_mat(B, order=Bo, dtype=Bdt)
        v = fix_mat(v, order=vo, dtype=vdt)

        opt = dataclasses.replace(self.basic_options, use_cpu=cpu)
        rtol = choose_on_dtype(A.dtype)

        # Test normal
        _run_fmmv_test(kernel.mmv,
                       expected_fmmv, (A, B, v),
                       out=None,
                       rtol=rtol,
                       opt=opt)
        # Test with out
        out = torch.empty(A.shape[0], v.shape[1], dtype=A.dtype)
        _run_fmmv_test(kernel.mmv,
                       expected_fmmv, (A, B, v),
                       out=out,
                       rtol=rtol,
                       opt=opt)

    @pytest.mark.skipif(not decide_cuda(), reason="No GPU found.")
    @pytest.mark.parametrize("Ao,Adt,Bo,Bdt,vo,vdt", [
        ("F", np.float32, "F", np.float32, "F", np.float32),
    ],
                             ids=["AF32-BF32-vF32"])
    def test_fmmv_input_device(self, A, B, v, Ao, Adt, Bo, Bdt, vo, vdt,
                               kernel, expected_fmmv):
        input_device = "cuda:0"
        A = fix_mat(A, order=Ao, dtype=Adt, device=input_device)
        B = fix_mat(B, order=Bo, dtype=Bdt, device=input_device)
        v = fix_mat(v, order=vo, dtype=vdt, device=input_device)

        opt = dataclasses.replace(self.basic_options, use_cpu=False)
        rtol = choose_on_dtype(A.dtype)

        # Test normal
        _run_fmmv_test(kernel.mmv,
                       expected_fmmv, (A, B, v),
                       out=None,
                       rtol=rtol,
                       opt=opt)
        # Test with out
        out = torch.empty(A.shape[0],
                          v.shape[1],
                          dtype=A.dtype,
                          device=input_device)
        _run_fmmv_test(kernel.mmv,
                       expected_fmmv, (A, B, v),
                       out=out,
                       rtol=rtol,
                       opt=opt)

    @pytest.mark.parametrize("cpu", cpu_params, ids=["cpu", "gpu"])
    @pytest.mark.parametrize(
        "Ao,Adt,Bo,Bdt,vo,vdt,wo,wdt,e_dfmmv",
        [
            pytest.param("F",
                         n32,
                         "F",
                         n32,
                         "F",
                         n32,
                         "F",
                         n32,
                         "e_dfmmv1",
                         marks=mark.usefixtures("e_dfmmv1")),
            pytest.param("C",
                         n32,
                         "C",
                         n32,
                         "C",
                         n32,
                         "C",
                         n32,
                         "e_dfmmv1",
                         marks=mark.usefixtures("e_dfmmv1")),
            pytest.param("F",
                         n64,
                         "F",
                         n64,
                         "F",
                         n64,
                         "F",
                         n64,
                         "e_dfmmv1",
                         marks=mark.usefixtures("e_dfmmv1")),
            pytest.param("C",
                         n64,
                         "C",
                         n64,
                         "C",
                         n64,
                         "C",
                         n64,
                         "e_dfmmv1",
                         marks=mark.usefixtures("e_dfmmv1")),
            pytest.param("F",
                         n32,
                         "F",
                         n32,
                         "F",
                         n32,
                         None,
                         None,
                         "e_dfmmv2",
                         marks=mark.usefixtures("e_dfmmv2")),
            pytest.param("C",
                         n32,
                         "C",
                         n32,
                         "C",
                         n32,
                         None,
                         None,
                         "e_dfmmv2",
                         marks=mark.usefixtures("e_dfmmv2")),
            pytest.param("F",
                         n64,
                         "F",
                         n64,
                         "F",
                         n64,
                         None,
                         None,
                         "e_dfmmv2",
                         marks=mark.usefixtures("e_dfmmv2")),
            pytest.param("C",
                         n64,
                         "C",
                         n64,
                         "C",
                         n64,
                         None,
                         None,
                         "e_dfmmv2",
                         marks=mark.usefixtures("e_dfmmv2")),
            pytest.param("F",
                         n32,
                         "F",
                         n32,
                         None,
                         None,
                         "F",
                         n32,
                         "e_dfmmv3",
                         marks=mark.usefixtures("e_dfmmv3")),
            pytest.param("C",
                         n32,
                         "C",
                         n32,
                         None,
                         None,
                         "C",
                         n32,
                         "e_dfmmv3",
                         marks=mark.usefixtures("e_dfmmv3")),
            pytest.param("F",
                         n64,
                         "F",
                         n64,
                         None,
                         None,
                         "F",
                         n64,
                         "e_dfmmv3",
                         marks=mark.usefixtures("e_dfmmv3")),
            pytest.param("C",
                         n64,
                         "C",
                         n64,
                         None,
                         None,
                         "C",
                         n64,
                         "e_dfmmv3",
                         marks=mark.usefixtures("e_dfmmv3")),
            # A few mixed-contiguity examples
            pytest.param("F",
                         n32,
                         "C",
                         n32,
                         "C",
                         n32,
                         "F",
                         n32,
                         "e_dfmmv1",
                         marks=mark.usefixtures("e_dfmmv1")),
        ],
        ids=[
            "F32-F32-vF32-wF32", "C32-C32-vC32-wC32", "F64-F64-vF64-wF64",
            "C64-C64-vC64-wC64", "F32-F32-vF32", "C32-C32-vC32",
            "F64-F64-vF64", "C64-C64-vC64", "F32-F32-wF32", "C32-C32-wC32",
            "F64-F64-wF64", "C64-C64-wC64", "F32-C32-vC32-wF32"
        ],
        indirect=["e_dfmmv"])
    def test_dfmmv(self, A, B, v, w, Ao, Adt, Bo, Bdt, vo, vdt, wo, wdt,
                   kernel, e_dfmmv, cpu):
        A = fix_mat(A, order=Ao, dtype=Adt)
        B = fix_mat(B, order=Bo, dtype=Bdt)
        v = fix_mat(v, order=vo, dtype=vdt)
        w = fix_mat(w, order=wo, dtype=wdt)

        opt = dataclasses.replace(self.basic_options, use_cpu=cpu)
        rtol = choose_on_dtype(A.dtype)

        # Test normal
        _run_fmmv_test(kernel.dmmv,
                       e_dfmmv, (A, B, v, w),
                       out=None,
                       rtol=rtol,
                       opt=opt)
        # Test with out
        out = torch.empty(m, t, dtype=A.dtype)
        _run_fmmv_test(kernel.dmmv,
                       e_dfmmv, (A, B, v, w),
                       out=out,
                       rtol=rtol,
                       opt=opt)

    @pytest.mark.parametrize(
        "Ao,Adt,Bo,Bdt,vo,vdt,wo,wdt,e_dfmmv", [
            pytest.param("F",
                         n32,
                         "F",
                         n32,
                         "F",
                         n32,
                         "F",
                         n32,
                         "e_dfmmv1",
                         marks=mark.usefixtures("e_dfmmv1")),
            pytest.param("F",
                         n32,
                         "F",
                         n32,
                         "F",
                         n32,
                         None,
                         None,
                         "e_dfmmv2",
                         marks=mark.usefixtures("e_dfmmv2")),
            pytest.param("F",
                         n32,
                         "F",
                         n32,
                         None,
                         None,
                         "F",
                         n32,
                         "e_dfmmv3",
                         marks=mark.usefixtures("e_dfmmv3"))
        ],
        ids=["F32-F32-vF32-wF32", "F32-F32-vF32", "F32-F32-wF32"],
        indirect=["e_dfmmv"])
    @pytest.mark.skipif(not decide_cuda(), reason="No GPU found.")
    def test_dfmmv_input_device(self, A, B, v, w, Ao, Adt, Bo, Bdt, vo, vdt,
                                wo, wdt, kernel, e_dfmmv):
        input_device = "cuda:0"
        A = fix_mat(A, order=Ao, dtype=Adt, device=input_device)
        B = fix_mat(B, order=Bo, dtype=Bdt, device=input_device)
        v = fix_mat(v, order=vo, dtype=vdt, device=input_device)
        w = fix_mat(w, order=wo, dtype=wdt, device=input_device)

        opt = dataclasses.replace(self.basic_options, use_cpu=False)
        rtol = choose_on_dtype(A.dtype)

        # Test normal
        _run_fmmv_test(kernel.dmmv,
                       e_dfmmv, (A, B, v, w),
                       out=None,
                       rtol=rtol,
                       opt=opt)
        # Test with out
        out = torch.empty(m, t, dtype=A.dtype, device=input_device)
        _run_fmmv_test(kernel.dmmv,
                       e_dfmmv, (A, B, v, w),
                       out=out,
                       rtol=rtol,
                       opt=opt)

    @pytest.mark.skipif(not decide_cuda(), reason="No GPU found.")
    def test_incorrect_dev_setting(self, A, B, v, w, kernel, e_dfmmv1,
                                   expected_fmmv):
        # tests when use_cpu = True, but CUDA input tensors
        A = A.cuda()
        B = B.cuda()
        v = v.cuda()
        w = w.cuda()
        opt = dataclasses.replace(self.basic_options, use_cpu=True)
        rtol = choose_on_dtype(A.dtype)

        with pytest.warns(
                UserWarning,
                match=
                'backend was chosen to be CPU, but GPU input tensors found'):
            _run_fmmv_test(kernel.dmmv,
                           e_dfmmv1, (A, B, v, w),
                           out=None,
                           rtol=rtol,
                           opt=opt)

        with pytest.warns(
                UserWarning,
                match=
                'backend was chosen to be CPU, but GPU input tensors found'):
            _run_fmmv_test(kernel.mmv,
                           expected_fmmv, (A, B, v),
                           out=None,
                           rtol=rtol,
                           opt=opt)
Ejemplo n.º 5
0
def rp(request):
    process = BottleServer(ResourceProviderMock(access_tokens[0]),
                           port=rp_port)
    process.start()
    request.addfinalizer(process.terminate)


@fixture(scope='module')
def http():
    class HttpClient(requests.Session):
        def request(self, method, uri, **kwargs):
            url = urljoin(nginx_base_uri, uri)
            defaults = {'allow_redirects': False, 'verify': False}
            return super().request(method, url,
                                   **merge_dicts(defaults, kwargs))

        # Original get method sets allow_redirects to True, so we must override it.
        def get(self, url, **kwargs):
            return self.request('GET', url, **kwargs)

    return HttpClient()


@fixture
def logged_in_fixture(http):
    http.post('/_oauth/login', allow_redirects=True)
    assert len(http.cookies) == 3


logged_in = mark.usefixtures('logged_in_fixture')
Ejemplo n.º 6
0
from pytest import mark, raises

from sapphire import Sapphire

from ..common.reporter import Report
from ..common.storage import TestCase
from ..replay import ReplayResult
from ..target import AssetManager, Target
from . import ReduceManager
from .strategies import Strategy, _load_strategies

LOG = getLogger(__name__)
pytestmark = mark.usefixtures(
    "reporter_sequential_strftime",
    "tmp_path_fm_config",
    "tmp_path_replay_status_db",
    "tmp_path_reduce_status_db",
)


@mark.parametrize("is_hang", [True, False])
def test_strategy_tc_load(is_hang):
    """test that strategy base class dump and load doesn't change testcase metadata"""

    class _TestStrategy(Strategy):
        def __iter__(self):
            yield TestCase.load(str(self._testcase_root), False)

        def update(self, success, served=None):
            pass
Ejemplo n.º 7
0
from pdb import set_trace as bp
from pytest import fixture, mark
from flask import current_app
from .utils import is_in
from .db_utils import add_note, add_user
import json
import tireta
import json
import random
import string
from .conftest import build_user_payload, build_note_payload

# reset db before each test
pytestmark = mark.usefixtures("reset_db")

# ---------------------CLIENT--------------------------


def test_client_exist(client):
    assert client is not None


# ---------------------USER--------------------------


def test_user_resource_exist(client):
    response = client.get('/api/users')
    assert response.status_code == 200


def test_test_start_with_no_user(client):
Ejemplo n.º 8
0
"""
unit tests for grizzly.Session
"""
from itertools import chain, count, repeat

from pytest import mark, raises

from sapphire import Sapphire, Served

from .adapter import Adapter
from .common.reporter import Report, Reporter
from .common.runner import RunResult
from .session import LogOutputLimiter, Session, SessionError
from .target import AssetManager, Result, Target, TargetLaunchError

pytestmark = mark.usefixtures("patch_collector", "tmp_path_status_db")


class SimpleAdapter(Adapter):
    def __init__(self, use_harness, remaining=None):
        super().__init__("simple")
        self.remaining = remaining
        self._use_harness = use_harness

    def setup(self, input_path, _server_map):
        if self._use_harness:
            self.enable_harness()
        self.fuzz["input"] = input_path

    def generate(self, testcase, _server_map):
        assert testcase.adapter_name == self.name
Ejemplo n.º 9
0
class TestSparse:
    basic_options = FalkonOptions(debug=True, compute_arch_speed=False)

    @pytest.mark.parametrize("Adt,Bdt,vo,vdt", [
        (np.float32, np.float32, "F", np.float32),
        (np.float32, np.float32, "C", np.float32),
        (np.float64, np.float64, "F", np.float64),
        (np.float64, np.float64, "C", np.float64),
    ], ids=["A32-B32-vF32", "A32-B32-vC32", "A64-B64-vF64", "A64-B64-vC64"])
    @pytest.mark.parametrize("max_mem", [2 * 2 ** 20])
    def test_fmmv(self, getA, getB, getv, Adt, Bdt, vo, vdt, kernel,
                  s_expected_fmmv, max_mem, cpu):
        A = getA(dtype=Adt, sparse=True)
        B = getB(dtype=Bdt, sparse=True)
        v = getv(order=vo, dtype=vdt)

        opt = dataclasses.replace(self.basic_options, use_cpu=cpu, max_cpu_mem=max_mem, max_gpu_mem=max_mem)
        rtol = choose_on_dtype(A.dtype)

        # Test normal
        _run_fmmv_test(kernel.mmv, s_expected_fmmv, (A, B, v), out=None, rtol=rtol, opt=opt)
        # Test with out
        out = torch.empty(A.shape[0], v.shape[1], dtype=A.dtype)
        _run_fmmv_test(kernel.mmv, s_expected_fmmv, (A, B, v), out=out, rtol=rtol, opt=opt)

    @pytest.mark.parametrize("Adt,Bdt,vo,vdt,wo,wdt,s_e_dfmmv", [
        pytest.param(n32, n32, "F", n32, "F", n32, "s_e_dfmmv1", marks=mark.usefixtures("s_e_dfmmv1")),
        pytest.param(n32, n32, "C", n32, "C", n32, "s_e_dfmmv1", marks=mark.usefixtures("s_e_dfmmv1")),
        pytest.param(n64, n64, "F", n64, "F", n64, "s_e_dfmmv1", marks=mark.usefixtures("s_e_dfmmv1")),
        pytest.param(n64, n64, "C", n64, "C", n64, "s_e_dfmmv1", marks=mark.usefixtures("s_e_dfmmv1")),
        pytest.param(n32, n32, "F", n32, None, None, "s_e_dfmmv2", marks=mark.usefixtures("s_e_dfmmv2")),
        pytest.param(n32, n32, "C", n32, None, None, "s_e_dfmmv2", marks=mark.usefixtures("s_e_dfmmv2")),
        pytest.param(n64, n64, "F", n64, None, None, "s_e_dfmmv2", marks=mark.usefixtures("s_e_dfmmv2")),
        pytest.param(n64, n64, "C", n64, None, None, "s_e_dfmmv2", marks=mark.usefixtures("s_e_dfmmv2")),
        pytest.param(n32, n32, None, None, "F", n32, "s_e_dfmmv3", marks=mark.usefixtures("s_e_dfmmv3")),
        pytest.param(n32, n32, None, None, "C", n32, "s_e_dfmmv3", marks=mark.usefixtures("s_e_dfmmv3")),
        pytest.param(n64, n64, None, None, "F", n64, "s_e_dfmmv3", marks=mark.usefixtures("s_e_dfmmv3")),
        pytest.param(n64, n64, None, None, "C", n64, "s_e_dfmmv3", marks=mark.usefixtures("s_e_dfmmv3")),
        # A few mixed-contiguity examples
        pytest.param(n32, n32, "C", n32, "F", n32, "s_e_dfmmv1", marks=mark.usefixtures("s_e_dfmmv1")),
    ], ids=["32-32-vF32-wF32", "32-32-vC32-wC32", "64-64-vF64-wF64", "64-64-vC64-wC64",
            "32-32-vF32", "32-32-vC32", "64-64-vF64", "64-64-vC64",
            "32-32-wF32", "32-32-wC32", "64-64-wF64", "64-64-wC64",
            "32-32-vC32-wF32"
            ],
       indirect=["s_e_dfmmv"])
    @pytest.mark.parametrize("max_mem", [2 * 2 ** 20])
    def test_dfmmv(self, getA, getB, getv, getw, Adt, Bdt, vo, vdt, wo, wdt, kernel,
                   s_e_dfmmv, max_mem, cpu, m, t):
        A = getA(dtype=Adt, sparse=True)
        B = getB(dtype=Bdt, sparse=True)
        v = getv(order=vo, dtype=vdt)
        w = getw(order=wo, dtype=wdt)

        opt = dataclasses.replace(self.basic_options, use_cpu=cpu, max_cpu_mem=max_mem, max_gpu_mem=max_mem)
        rtol = choose_on_dtype(A.dtype)

        # Test normal
        _run_fmmv_test(kernel.dmmv, s_e_dfmmv, (A, B, v, w), out=None, rtol=rtol, opt=opt)
        # Test with out
        out = torch.empty(m, t, dtype=A.dtype)
        _run_fmmv_test(kernel.dmmv, s_e_dfmmv, (A, B, v, w), out=out, rtol=rtol, opt=opt)
# applying fixture to all test in a module

from pytest import fixture, mark


@fixture(scope='module')
def meta_fixture():
    print()
    print('=== begin meta fixture')
    yield
    print()
    print('=== end meta fixture')


# apply this fixture to everything in this module
pytestmark = mark.usefixtures('meta_fixture')


def test_with_meta_fixtures_a():
    print()
    print('running test with meta fixtures a')


def test_with_meta_fixtures_b():
    print()
    print('running test with meta fixtures b')


# marked_meta_fixtures.py::test_with_meta_fixtures_a
# === begin meta fixture
#
Ejemplo n.º 11
0
    marker = request.node.get_marker('oaas_config')
    extra_conf = marker.kwargs if marker else {}
    config = merge_dicts(proxy_conf, oaas_state, extra_conf)

    process = BottleServer(OAuthServerMock(config), port=oaas_port)
    process.start()
    request.addfinalizer(process.terminate)


@fixture(scope='module')
def http():
    class HttpClient(requests.Session):
        def request(self, method, uri, **kwargs):
            url = urljoin(nginx_base_uri, uri)
            defaults = {'allow_redirects': False, 'verify': False}
            return super().request(method, url, **merge_dicts(defaults, kwargs))

        # Original get method sets allow_redirects to True, so we must override it.
        def get(self, url, **kwargs):
            return self.request('GET', url, **kwargs)

    return HttpClient()


@fixture
def logged_in_fixture(http):
    http.get('/_oauth/login', allow_redirects=True)
    assert len(http.cookies) == 3

logged_in = mark.usefixtures('logged_in_fixture')
Ejemplo n.º 12
0
"""
unit tests for grizzly.replay
"""
from itertools import cycle
from pathlib import Path

from pytest import mark, raises

from sapphire import Sapphire, Served

from ..common.reporter import Report
from ..common.storage import TestCase, TestCaseLoadFailure
from ..target import AssetManager, Result, Target
from .replay import ReplayManager, ReplayResult

pytestmark = mark.usefixtures("patch_collector", "tmp_path_grz_tmp",
                              "tmp_path_replay_status_db")


def _fake_save_logs(result_logs, _meta=False):
    """write fake log data to disk"""
    log_path = Path(result_logs)
    (log_path / "log_stderr.txt").write_text("STDERR log\n")
    (log_path / "log_stdout.txt").write_text("STDOUT log\n")
    with (log_path / "log_asan_blah.txt").open("w") as log_fp:
        log_fp.write("==1==ERROR: AddressSanitizer: ")
        log_fp.write("SEGV on unknown address 0x0 (pc 0x0 bp 0x0 sp 0x0 T0)\n")
        log_fp.write("    #0 0xbad000 in foo /file1.c:123:234\n")
        log_fp.write("    #1 0x1337dd in bar /file2.c:1806:19\n")


def test_replay_01(mocker):
Ejemplo n.º 13
0
from pytest import fixture, mark
from sqlalchemy import text

pytestmark = mark.usefixtures("db_migration")


@fixture(scope="session")
def alembic_ini_path():
    return "./tests/alembic.ini"


def test_it(session):
    session.execute(text("select * from testuser"))
Ejemplo n.º 14
0
from pytest import fixture, mark


@fixture(scope="module")
def meta_fixture():
    print("\n*** begin meta_fixture ***")
    yield
    print("\n*** end meta_fixture ***")


# Apply this fixture to everything in this module!
pytestmark = mark.usefixtures("meta_fixture")


def test_with_meta_fixtures_a():
    print("\n   Running test_with_meta_fixtures_a")


def test_with_meta_fixtures_b():
    print("\n   Running test_with_meta_fixtures_b")


# How could we tell meta_fixture to only run once, "around" our tests?
# (See 16_scoped_and_meta_fixtures_test.py for a hint...)
Ejemplo n.º 15
0
    extra_conf = marker.kwargs if marker else {}
    config = merge_dicts(proxy_conf, oaas_state, extra_conf)

    process = BottleServer(OAuthServerMock(config), port=oaas_port)
    process.start()
    request.addfinalizer(process.terminate)


@fixture(scope="module")
def http():
    class HttpClient(requests.Session):
        def request(self, method, uri, **kwargs):
            url = urljoin(nginx_base_uri, uri)
            defaults = {"allow_redirects": False, "verify": False}
            return super().request(method, url, **merge_dicts(defaults, kwargs))

        # Original get method sets allow_redirects to True, so we must override it.
        def get(self, url, **kwargs):
            return self.request("GET", url, **kwargs)

    return HttpClient()


@fixture
def logged_in_fixture(http):
    http.post("/_oauth/login", allow_redirects=True)
    assert len(http.cookies) == 3


logged_in = mark.usefixtures("logged_in_fixture")