Пример #1
0
def pytest_generate_tests(metafunc):
    if 'example' in metafunc.fixturenames:
        config = metafunc.config
        examples = get_all_examples(config)

        def marks(example):
            result = []
            if example.is_skip:
                result.append(pytest.mark.skip(reason="skipping %s" % example.relpath))
            if example.is_xfail and not example.no_js:
                result.append(pytest.mark.xfail(reason="xfail %s" % example.relpath, strict=True))
            return result

        if 'js_example' in metafunc.fixturenames:
            params = [ pytest.param(e.path, e, config, marks=marks(e)) for e in examples if e.is_js ]
            metafunc.parametrize('js_example,example,config', params)
        if 'file_example' in metafunc.fixturenames:
            params = [ pytest.param(e.path, e, config, marks=marks(e)) for e in examples if e.is_file ]
            metafunc.parametrize('file_example,example,config', params)
        if 'server_example' in metafunc.fixturenames:
            params = [ pytest.param(e.path, e, config, marks=marks(e)) for e in examples if e.is_server ]
            metafunc.parametrize('server_example,example,config', params)
        if 'notebook_example' in metafunc.fixturenames:
            params = [ pytest.param(e.path, e, config, marks=marks(e)) for e in examples if e.is_notebook ]
            metafunc.parametrize('notebook_example,example,config', params)
Пример #2
0
def iter_struct_object_dtypes():
    """
    Iterates over a few complex dtypes and object pattern which
    fill the array with a given object (defaults to a singleton).

    Yields
    ------
    dtype : dtype
    pattern : tuple
        Structured tuple for use with `np.array`.
    count : int
        Number of objects stored in the dtype.
    singleton : object
        A singleton object. The returned pattern is constructed so that
        all objects inside the datatype are set to the singleton.
    """
    obj = object()

    dt = np.dtype([('b', 'O', (2, 3))])
    p = ([[obj] * 3] * 2,)
    yield pytest.param(dt, p, 6, obj, id="<subarray>")

    dt = np.dtype([('a', 'i4'), ('b', 'O', (2, 3))])
    p = (0, [[obj] * 3] * 2)
    yield pytest.param(dt, p, 6, obj, id="<subarray in field>")

    dt = np.dtype([('a', 'i4'),
                   ('b', [('ba', 'O'), ('bb', 'i1')], (2, 3))])
    p = (0, [[(obj, 0)] * 3] * 2)
    yield pytest.param(dt, p, 6, obj, id="<structured subarray 1>")

    dt = np.dtype([('a', 'i4'),
                   ('b', [('ba', 'O'), ('bb', 'O')], (2, 3))])
    p = (0, [[(obj, obj)] * 3] * 2)
    yield pytest.param(dt, p, 12, obj, id="<structured subarray 2>")
Пример #3
0
def test_pytest_param_id_requires_string():
    with pytest.raises(TypeError) as excinfo:
        pytest.param(id=True)
    msg, = excinfo.value.args
    if six.PY2:
        assert msg == "Expected id to be a string, got <type 'bool'>: True"
    else:
        assert msg == "Expected id to be a string, got <class 'bool'>: True"
Пример #4
0
 def __call__(self, f):
     params = []
     for product in _products:
         if product not in _active_products:
             params.append(pytest.param(product, marks=pytest.mark.skip(reason="wrong toxenv")))
         elif product in self.marks:
             params.append(pytest.param(product, marks=self.marks[product]))
         else:
             params.append(product)
     return pytest.mark.parametrize(self.arg, params)(f)
Пример #5
0
def pandas_skip(test):  # pragma: no cover
    """Skips a test if the pandas plugin is not available."""
    # Check libraries are present
    if not has_pandas():
        return pytest.param(test, marks=pytest.mark.skip(reason='the pandas plugin requires pandas'))
    # Check library versions
    minor = LooseVersion(pd.__version__).version[1]
    if minor not in (16, 17, 18, 20, 21, 22, 23):
        reason = 'these tests do not support pandas version %s' % pd.__version__
        return pytest.param(test, marks=pytest.mark.skip(reason=reason))
    return test
Пример #6
0
def write_read_engines(xfail_arrow_to_fastparquet=True):
    if xfail_arrow_to_fastparquet:
        xfail = (pytest.mark.xfail(reason="Can't read arrow directories with fastparquet"),)
    else:
        xfail = ()
    ff = () if fastparquet else (pytest.mark.skip(reason='fastparquet not found'),)
    aa = () if pq else (pytest.mark.skip(reason='pyarrow not found'),)
    engines = [pytest.param('fastparquet', 'fastparquet', marks=ff),
               pytest.param('pyarrow', 'pyarrow', marks=aa),
               pytest.param('fastparquet', 'pyarrow', marks=ff + aa),
               pytest.param('pyarrow', 'fastparquet', marks=ff + aa + xfail)]
    return pytest.mark.parametrize(('write_engine', 'read_engine'), engines)
Пример #7
0
def filter_fixtures(all_fixtures, fixtures_base_dir, mark_fn=None, ignore_fn=None):
    """
    Helper function for filtering test fixtures.

    - `fixtures_base_dir` should be the base directory that the fixtures were collected from.
    - `mark_fn` should be a function which either returns `None` or a `pytest.mark` object.
    - `ignore_fn` should be a function which returns `True` for any fixture
       which should be ignored.
    """
    for fixture_data in all_fixtures:
        fixture_path = fixture_data[0]
        fixture_relpath = os.path.relpath(fixture_path, fixtures_base_dir)

        if ignore_fn:
            if ignore_fn(fixture_relpath, *fixture_data[1:]):
                continue

        if mark_fn is not None:
            mark = mark_fn(fixture_relpath, *fixture_data[1:])
            if mark:
                yield pytest.param(
                    (fixture_path, *fixture_data[1:]),
                    marks=mark,
                )
                continue

        yield fixture_data
Пример #8
0
def _get_pip_versions():
    # This fixture will attempt to detect if tests are being run without
    # network connectivity and if so skip some tests

    network = True
    if not os.environ.get('NETWORK_REQUIRED', False):  # pragma: nocover
        try:
            from urllib.request import urlopen
            from urllib.error import URLError
        except ImportError:
            from urllib2 import urlopen, URLError # Python 2.7 compat

        try:
            urlopen('https://pypi.org', timeout=1)
        except URLError:
            # No network, disable most of these tests
            network = False

    network_versions = [
        'pip==9.0.3',
        'pip==10.0.1',
        'pip==18.1',
        'pip==19.0.1',
        'https://github.com/pypa/pip/archive/master.zip',
    ]

    versions = [None] + [
        pytest.param(v, **({} if network else {'marks': pytest.mark.skip}))
        for v in network_versions
    ]

    return versions
Пример #9
0
def pytest_generate_tests(metafunc):
    """ zipper auth_modes and auth_prov together and drop the nonsensical combos """
    # TODO use supportability and provider type+version parametrization
    argnames = ['auth_mode', 'prov_key', 'user_type', 'auth_user']
    argvalues = []
    idlist = []
    if 'auth_providers' not in auth_data:
        metafunc.parametrize(argnames, [
            pytest.param(
                None, None, None, None,
                marks=pytest.mark.uncollect("auth providers data missing"))])
        return
    # Holy nested loops, batman
    # go through each mode, then each auth type, and find auth providers matching that type
    # go through each user type for the given mode+auth_type (from param_maps above)
    # for each user type, find users in the yaml matching user_type an on the given auth provider
    # add parametrization for matching set of mode, auth_provider key, user_type, and user_dict
    # set id, use the username from userdict instead of an auto-generated "auth_user[\d]" ID
    for mode in test_param_maps.keys():
        for auth_type in test_param_maps.get(mode, {}):
            eligible_providers = {key: prov_dict
                                  for key, prov_dict in iteritems(auth_data.auth_providers)
                                  if prov_dict.type == auth_type}
            for user_type in test_param_maps[mode][auth_type]['user_types']:
                for key, prov_dict in eligible_providers.items():
                    for user_dict in [u for u in auth_user_data(key, user_type) or []]:
                        if user_type in prov_dict.get('user_types', []):
                            argvalues.append((mode, key, user_type, user_dict))
                            idlist.append('-'.join([mode, key, user_type, user_dict.username]))
    metafunc.parametrize(argnames, argvalues, ids=idlist)
Пример #10
0
def parametrize_test_working_set_resolve(*test_list):
    idlist = []
    argvalues = []
    for test in test_list:
        (
            name,
            installed_dists,
            installable_dists,
            requirements,
            expected1, expected2
        ) = [
            strip_comments(s.lstrip()) for s in
            textwrap.dedent(test).lstrip().split('\n\n', 5)
        ]
        installed_dists = list(parse_distributions(installed_dists))
        installable_dists = list(parse_distributions(installable_dists))
        requirements = list(pkg_resources.parse_requirements(requirements))
        for id_, replace_conflicting, expected in (
            (name, False, expected1),
            (name + '_replace_conflicting', True, expected2),
        ):
            idlist.append(id_)
            expected = strip_comments(expected.strip())
            if re.match('\w+$', expected):
                expected = getattr(pkg_resources, expected)
                assert issubclass(expected, Exception)
            else:
                expected = list(parse_distributions(expected))
            argvalues.append(pytest.param(installed_dists, installable_dists,
                                          requirements, replace_conflicting,
                                          expected))
    return pytest.mark.parametrize('installed_dists,installable_dists,'
                                   'requirements,replace_conflicting,'
                                   'resolved_dists_or_exception',
                                   argvalues, ids=idlist)
Пример #11
0
def write_read_engines(**kwargs):
    """Product of both engines for write/read:

    To add custom marks, pass keyword of the form: `mark_writer_reader=reason`,
    or `mark_engine=reason` to apply to all parameters with that engine."""
    backends = {'pyarrow', 'fastparquet'}
    marks = {(w, r): [] for w in backends for r in backends}

    # Skip if uninstalled
    for name, exists in [('fastparquet', fastparquet), ('pyarrow', pq)]:
        val = pytest.mark.skip(reason='%s not found' % name)
        if not exists:
            for k in marks:
                if name in k:
                    marks[k].append(val)

    # Custom marks
    for kw, val in kwargs.items():
        kind, rest = kw.split('_', 1)
        key = tuple(rest.split('_'))
        if (kind not in ('xfail', 'skip') or len(key) > 2 or
                set(key).difference(backends)):
            raise ValueError("unknown keyword %r" % kw)
        val = getattr(pytest.mark, kind)(reason=val)
        if len(key) == 2:
            marks[key].append(val)
        else:
            for k in marks:
                if key in k:
                    marks[k].append(val)

    return pytest.mark.parametrize(('write_engine', 'read_engine'),
                                   [pytest.param(*k, marks=tuple(v))
                                    for (k, v) in sorted(marks.items())])
Пример #12
0
 def parametrize(*test_list, **format_dict):
     idlist = []
     argvalues = []
     for test in test_list:
         test_params = test.lstrip().split('\n\n', 3)
         name_kwargs = test_params.pop(0).split('\n')
         if len(name_kwargs) > 1:
             val = name_kwargs[1].strip()
             install_cmd_kwargs = ast.literal_eval(val)
         else:
             install_cmd_kwargs = {}
         name = name_kwargs[0].strip()
         setup_py_requires, setup_cfg_requires, expected_requires = (
             DALS(a).format(**format_dict) for a in test_params
         )
         for id_, requires, use_cfg in (
             (name, setup_py_requires, False),
             (name + '_in_setup_cfg', setup_cfg_requires, True),
         ):
             idlist.append(id_)
             marks = ()
             if requires.startswith('@xfail\n'):
                 requires = requires[7:]
                 marks = pytest.mark.xfail
             argvalues.append(pytest.param(requires, use_cfg,
                                           expected_requires,
                                           install_cmd_kwargs,
                                           marks=marks))
     return pytest.mark.parametrize(
         'requires,use_setup_cfg,'
         'expected_requires,install_cmd_kwargs',
         argvalues, ids=idlist,
     )
Пример #13
0
def cases_test_cont_basic():
    for distname, arg in distcont[:] + [(histogram_test_instance, tuple())]:
        if distname == 'levy_stable':
            continue
        elif distname in distslow:
            yield pytest.param(distname, arg, marks=pytest.mark.slow)
        else:
            yield distname, arg
Пример #14
0
 def __call__(self, f):
     params = []
     for product in _products:
         if product in self.marks:
             params.append(pytest.param(product, marks=self.marks[product]))
         else:
             params.append(product)
     return pytest.mark.parametrize(self.arg, params)(f)
Пример #15
0
def skipif_32bit(param):
    """
    Skip parameters in a parametrize on 32bit systems. Specifically used
    here to skip leaf_size parameters related to GH 23440.
    """
    marks = pytest.mark.skipif(compat.is_platform_32bit(),
                               reason='GH 23440: int type mismatch on 32bit')
    return pytest.param(param, marks=marks)
Пример #16
0
def create_gradient_acquisition_fixtures():
    # Create list of tuples of parameters with (fixture, tolerance) for acquisitions that gave gradients only
    parameters = []
    for acquisition in acquisition_tests:
        if acquisition.has_gradients:
            acquisition_name = acquisition.name
            lazy_fixture = pytest_lazyfixture.lazy_fixture(acquisition.name)
            parameters.append(pytest.param(lazy_fixture, acquisition.rmse_gradient_tolerance, id=acquisition_name))
    return parameters
def cases():
    skiplist = get_skiplist()
    for test_instance in get_cases():
        for index in range(test_instance.test_count):
            test = (test_instance.tool_id + "_test_%d" % (index + 1), test_instance, index)
            marks = []
            marks.append(pytest.mark.skipif(test_instance.tool_id in skiplist, reason="tool in skiplist"))
            if 'data_manager_' in test_instance.tool_id:
                marks.append(pytest.mark.data_manager(test))
            else:
                marks.append(pytest.mark.tool(test))
            yield pytest.param(test, marks=marks)
Пример #18
0
def write_read_engines(xfail_arrow_to_fastparquet=True,
                       xfail_fastparquet_to_arrow=False):
    xfail = []
    if xfail_arrow_to_fastparquet:
        a2f = (pytest.mark.xfail(reason=("Can't read arrow directories "
                                         "with fastparquet")),)
    else:
        a2f = ()
    if xfail_fastparquet_to_arrow:
        f2a = (pytest.mark.xfail(reason=("Can't read this fastparquet "
                                         "file with pyarrow")),)
    else:
        f2a = ()

    xfail = tuple(xfail)
    ff = () if fastparquet else (pytest.mark.skip(reason='fastparquet not found'),)
    aa = () if pq else (pytest.mark.skip(reason='pyarrow not found'),)
    engines = [pytest.param('fastparquet', 'fastparquet', marks=ff),
               pytest.param('pyarrow', 'pyarrow', marks=aa),
               pytest.param('fastparquet', 'pyarrow', marks=ff + aa + f2a),
               pytest.param('pyarrow', 'fastparquet', marks=ff + aa + a2f)]
    return pytest.mark.parametrize(('write_engine', 'read_engine'), engines)
Пример #19
0
def cases_test_moments():
    fail_normalization = set(['vonmises', 'ksone'])
    fail_higher = set(['vonmises', 'ksone', 'ncf'])

    for distname, arg in distcont[:] + [(histogram_test_instance, tuple())]:
        if distname == 'levy_stable':
            continue

        cond1 = distname not in fail_normalization
        cond2 = distname not in fail_higher

        yield distname, arg, cond1, cond2

        if not cond1 or not cond2:
            yield pytest.param(distname, arg, True, True, marks=pytest.mark.xfail)
Пример #20
0
def _get_ufuncs():
    ufuncs = []
    ufunc_names = []
    for name in sorted(sc.__dict__):
        obj = sc.__dict__[name]
        if not isinstance(obj, np.ufunc):
            continue
        msg = KNOWNFAILURES.get(obj)
        if msg is None:
            ufuncs.append(obj)
            ufunc_names.append(name)
        else:
            fail = pytest.mark.xfail(run=False, reason=msg)
            ufuncs.append(pytest.param(obj, marks=fail))
            ufunc_names.append(name)
    return ufuncs, ufunc_names
Пример #21
0
def _mark_skip_if_format_is_uncomparable(extension):
    import pytest
    if isinstance(extension, str):
        name = extension
        marks = []
    elif isinstance(extension, tuple):
        # Extension might be a pytest ParameterSet instead of a plain string.
        # Unfortunately, this type is not exposed, so since it's a namedtuple,
        # check for a tuple instead.
        name, = extension.values
        marks = [*extension.marks]
    else:
        # Extension might be a pytest marker instead of a plain string.
        name, = extension.args
        marks = [extension.mark]
    return pytest.param(name,
                        marks=[*marks, _skip_if_format_is_uncomparable(name)])
Пример #22
0
def cases_test_moments():
    fail_normalization = set(['vonmises'])
    fail_higher = set(['vonmises', 'ncf'])

    for distname, arg in distcont[:] + [(histogram_test_instance, tuple())]:
        if distname == 'levy_stable':
            continue

        cond1 = distname not in fail_normalization
        cond2 = distname not in fail_higher

        yield distname, arg, cond1, cond2, False

        if not cond1 or not cond2:
            # Run the distributions that have issues twice, once skipping the
            # not_ok parts, once with the not_ok parts but marked as knownfail
            yield pytest.param(distname, arg, True, True, True,
                               marks=pytest.mark.xfail)
Пример #23
0
def pytest_generate_tests(metafunc):
    if metafunc.function in {test_rh_updates, test_rhsm_registration_check_repo_names}:
        return
    """ Generates tests specific to RHSM or SAT6 with proxy-on or off """
    argnames = ['reg_method', 'reg_data', 'proxy_url', 'proxy_creds']
    argvalues = []
    idlist = []

    try:
        holder = metafunc.config.pluginmanager.get_plugin('appliance-holder')
        stream = holder.held_appliance.version.stream()
        all_reg_data = conf.cfme_data.get('redhat_updates', {})['streams'][stream]
    except KeyError:
        logger.warning('Could not find rhsm data for stream in yaml')
        metafunc.parametrize(argnames, [
            pytest.param(
                None, None, None, None,
                marks=pytest.mark.uncollect("Could not find rhsm data for stream in yaml"))])
        return

    if 'reg_method' in metafunc.fixturenames:
        for reg_method in REG_METHODS:

            reg_data = all_reg_data.get(reg_method)
            if not reg_data or not reg_data.get('test_registration', False):
                continue

            proxy_data = conf.cfme_data.get('redhat_updates', {}).get('http_proxy', False)
            if proxy_data and reg_data.get('use_http_proxy', False):
                proxy_url = proxy_data['url']
                proxy_creds_key = proxy_data['credentials']
                proxy_creds = conf.credentials[proxy_creds_key]
                argval = [reg_method, reg_data, proxy_url, proxy_creds]
                argid = '{}-{}'.format(reg_method, 'proxy_on')
                idlist.append(argid)
                argvalues.append(argval)

            argval = [reg_method, reg_data, None, None]
            argid = '{}-{}'.format(reg_method, 'proxy_off')
            idlist.append(argid)
            argvalues.append(argval)
        return metafunc.parametrize(argnames, argvalues, ids=idlist, scope="function")
Пример #24
0
def find_modules():
    """
    Yields fully qualified modules names in the vdsm package.
    """
    expected_to_fail = {
        # TODO: imports os_brick which is a soft dependency
        # remove when os_brick can be required.
        "vdsm.storage.nos_brick",
    }

    def error(name):
        raise

    vdsm_pkg = importlib.import_module("vdsm")
    for _, name, _ in pkgutil.walk_packages(vdsm_pkg.__path__,
                                            prefix="vdsm.",
                                            onerror=error):
        if name in expected_to_fail:
            name = pytest.param(name, marks=pytest.mark.xfail)
        yield name
def _get_testable_interactive_backends():
    backends = []
    for deps, backend in [
            (["cairo", "gi"], "gtk3agg"),
            (["cairo", "gi"], "gtk3cairo"),
            (["PyQt5"], "qt5agg"),
            (["PyQt5", "cairocffi"], "qt5cairo"),
            (["tkinter"], "tkagg"),
            (["wx"], "wx"),
            (["wx"], "wxagg"),
    ]:
        reason = None
        if not os.environ.get("DISPLAY"):
            reason = "No $DISPLAY"
        elif any(importlib.util.find_spec(dep) is None for dep in deps):
            reason = "Missing dependency"
        if reason:
            backend = pytest.param(
                backend, marks=pytest.mark.skip(reason=reason))
        backends.append(backend)
    return backends
def pytest_generate_tests(metafunc):
    """The following lines generate appliance versions based from the current build.
    Appliance version is split and minor_build is picked out for generating each version
    and appending it to the empty versions list"""
    versions = []
    version = find_appliance(metafunc).version

    split_ver = str(version).split(".")
    try:
        minor_build = split_ver[2]
        assert int(minor_build) != 0
    except IndexError:
        logger.exception('Caught IndexError generating for test_appliance_update, skipping')
    except AssertionError:
        logger.debug('Caught AssertionError: No previous z-stream version to update from')
        versions.append(pytest.param("bad:{!r}".format(version), marks=pytest.mark.uncollect(
            'Could not parse minor_build version from: {}'.format(version)
        )))
    else:
        for i in range(int(minor_build) - 1, -1, -1):
            versions.append("{}.{}.{}".format(split_ver[0], split_ver[1], i))
    metafunc.parametrize('old_version', versions, indirect=True)
Пример #27
0
def _mark_xfail_if_format_is_uncomparable(extension):
    if isinstance(extension, str):
        name = extension
        marks = []
    elif isinstance(extension, tuple):
        # Extension might be a pytest ParameterSet instead of a plain string.
        # Unfortunately, this type is not exposed, so since it's a namedtuple,
        # check for a tuple instead.
        name = extension.values[0]
        marks = list(extension.marks)
    else:
        # Extension might be a pytest marker instead of a plain string.
        name = extension.args[0]
        marks = [extension.mark]

    if name not in comparable_formats():
        fail_msg = 'Cannot compare %s files on this system' % (name, )
        import pytest
        marks += [pytest.mark.xfail(reason=fail_msg, strict=False,
                                    raises=ImageComparisonFailure)]
        return pytest.param(name, marks=marks)
    else:
        return extension
Пример #28
0
def generate_yaml_tests(directory):
    for yml_file in directory.glob("*/*.yml"):
        data = yaml.safe_load(yml_file.read_text())
        assert "cases" in data, "A fixture needs cases to be used in testing"

        # Strip the parts of the directory to only get a name without
        # extension and resolver directory
        base_name = str(yml_file)[len(str(directory)) + 1:-4]

        base = data.get("base", {})
        cases = data["cases"]

        for i, case_template in enumerate(cases):
            case = base.copy()
            case.update(case_template)

            case[":name:"] = base_name
            if len(cases) > 1:
                case[":name:"] += "-" + str(i)

            if case.pop("skip", False):
                case = pytest.param(case, marks=pytest.mark.xfail)

            yield case
Пример #29
0
        create_package(
            pearl_env,
            PackageArgs(
                name="mypkg",
                dest_dir=dest_dir
            )
        )


@pytest.mark.parametrize(
    'initial_package_list, package_deps, expected_result',
    [
        pytest.param(
            ["A"],
            {
                "A": [],
            },
            ["A"]
        ),
        pytest.param(
            ["A"],
            {
                "A": ["B"],
                "B": [],
            },
            ["B", "A"]
        ),
        pytest.param(
            ["A", "B"],
            {
                "A": ["C"],
Пример #30
0
"Mr. Smith, throughout his distinguished"""
    tokens = en_tokenizer(text)
    assert len(tokens) == 76


@pytest.mark.parametrize(
    "text,length",
    [
        ("The U.S. Army likes Shock and Awe.", 8),
        ("U.N. regulations are not a part of their concern.", 10),
        ("“Isn't it?”", 6),
        ("""Yes! "I'd rather have a walk", Ms. Comble sighed. """, 15),
        ("""'Me too!', Mr. P. Delaware cried. """, 11),
        ("They ran about 10km.", 6),
        pytest.param(
            "But then the 6,000-year ice age came...", 10, marks=pytest.mark.xfail()
        ),
    ],
)
def test_en_tokenizer_handles_cnts(en_tokenizer, text, length):
    tokens = en_tokenizer(text)
    assert len(tokens) == length


@pytest.mark.parametrize(
    "text,match",
    [
        ("10", True),
        ("1", True),
        ("10,000", True),
        ("10,00", True),
Пример #31
0
        assert_array_equal(spatial_smoothing(test_input)[0], test_output)
    except ARTTestException as e:
        art_warning(e)


@pytest.mark.only_with_platform("pytorch")
@pytest.mark.parametrize("channels_first", [True, False])
@pytest.mark.parametrize(
    "window_size",
    [
        1,
        2,
        pytest.param(
            10,
            marks=pytest.mark.xfail(
                reason=
                "Window size of 10 fails, because PyTorch requires that Padding size should be less than "
                "the corresponding input dimension."),
        ),
    ],
)
def test_spatial_smoothing_image_data(art_warning, image_batch, channels_first,
                                      window_size):
    try:
        test_input, test_output = image_batch
        spatial_smoothing = SpatialSmoothingPyTorch(
            channels_first=channels_first, window_size=window_size)

        assert_array_equal(spatial_smoothing(test_input)[0], test_output)
    except ARTTestException as e:
        art_warning(e)
Пример #32
0
    assert adata.obs_names.tolist() == ["r1", "r2", "r3"]
    assert adata.var_names.tolist() == ["c1", "c2"]
    assert adata.X.tolist() == X_list


@pytest.mark.parametrize("typ", [np.array, csr_matrix])
def test_write_csv(typ, tmp_path):
    X = typ(X_list)
    adata = ad.AnnData(X, obs=obs_dict, var=var_dict, uns=uns_dict)
    adata.write_csvs(tmp_path / "test_csv_dir", skip_data=False)


@pytest.mark.parametrize(
    ["read", "write", "name"],
    [
        pytest.param(ad.read_h5ad, ad._io.write._write_h5ad,
                     "test_empty.h5ad"),
        pytest.param(
            ad.read_loom,
            ad._io.write_loom,
            "test_empty.loom",
            marks=pytest.mark.xfail(reason="Loom can’t handle 0×0 matrices"),
        ),
        pytest.param(ad.read_zarr, ad._io.write_zarr, "test_empty.zarr"),
        pytest.param(
            ad.read_zarr,
            ad._io.write_zarr,
            "test_empty.zip",
            marks=pytest.mark.xfail(
                reason="Zarr zip storage doesn’t seem to work…"),
        ),
    ],
Пример #33
0
        for i in range(N):
            if s1[i] != s2[i]:
                if s1[i] == "x":
                    x1 += 1
                else:
                    y1 += 1
        if (x1 + y1) % 2 == 1:
            return -1
        res = x1 // 2 + y1 // 2
        if x1 % 2 == 1:
            res += 2
        #  xx, yy needs one swap, and xy yx needs two swaps, so find the pair of x and the number of redundant x
        return res


# leetcode submit region end(Prohibit modification and deletion)


@pytest.mark.parametrize("kwargs,expected", [
    (dict(s1="xx", s2="yy"), 1),
    pytest.param(dict(s1="xy", s2="yx"), 2),
    pytest.param(dict(s1="xx", s2="xy"), -1),
    pytest.param(dict(s1="xxyyxyxyxx", s2="xyyxyxxxyx"), 4),
])
def test_solutions(kwargs, expected):
    assert Solution().minimumSwap(**kwargs) == expected


if __name__ == '__main__':
    pytest.main(["-q", "--color=yes", "--capture=tee-sys", __file__])
    room_id = "!roomofdoom:server"
    room = Room(client=None, room_id=room_id)  # type: ignore
    for member in room_members:
        room._mkmembers(member)

    return room, number_of_partners > 1


@pytest.mark.parametrize(
    "partner_config_for_room",
    [
        pytest.param(
            {
                "number_of_partners": 1,
                "users_per_address": 1,
                "number_of_base_users": 1
            },
            id="should_not_leave_one_partner",
        ),
        pytest.param(
            {
                "number_of_partners": 1,
                "users_per_address": 4,
                "number_of_base_users": 1
            },
            id="should_not_leave_multiple_use_one_address",
        ),
        pytest.param(
            {
                "number_of_partners": 0,
                "users_per_address": 0,
Пример #35
0
import numpy as np
import pytest
import torch

from mmcv.ops import Voxelization


def _get_voxel_points_indices(points, coors, voxel):
    result_form = np.equal(coors, voxel)
    return result_form[:, 0] & result_form[:, 1] & result_form[:, 2]


@pytest.mark.parametrize('device_type', [
    'cpu',
    pytest.param('cuda:0',
                 marks=pytest.mark.skipif(not torch.cuda.is_available(),
                                          reason='requires CUDA support'))
])
def test_voxelization(device_type):
    voxel_size = [0.5, 0.5, 0.5]
    point_cloud_range = [0, -40, -3, 70.4, 40, 1]

    voxel_dict = np.load('tests/data/for_3d_ops/test_voxel.npy',
                         allow_pickle=True).item()
    expected_coors = voxel_dict['coors']
    expected_voxels = voxel_dict['voxels']
    expected_num_points_per_voxel = voxel_dict['num_points_per_voxel']
    points = voxel_dict['points']

    points = torch.tensor(points)
    max_num_points = -1
Пример #36
0
import pytest

from auth.signedgrant import validate_signed_grant, generate_signed_token, SIGNATURE_PREFIX
from auth.validateresult import AuthKind, ValidateResult


@pytest.mark.parametrize(
    "header, expected_result",
    [
        pytest.param("",
                     ValidateResult(AuthKind.signed_grant, missing=True),
                     id="Missing"),
        pytest.param(
            "somerandomtoken",
            ValidateResult(AuthKind.signed_grant, missing=True),
            id="Invalid header",
        ),
        pytest.param(
            "token somerandomtoken",
            ValidateResult(AuthKind.signed_grant, missing=True),
            id="Random Token",
        ),
        pytest.param(
            "token " + SIGNATURE_PREFIX + "foo",
            ValidateResult(
                AuthKind.signed_grant,
                error_message="Signed grant could not be validated"),
            id="Invalid token",
        ),
    ],
)
Пример #37
0
class TestConfigLoader:
    def test_load_configuration(self, path: str) -> None:
        config_loader = ConfigLoaderImpl(
            config_search_path=create_config_search_path(path))
        cfg = config_loader.load_configuration(
            config_name="config.yaml",
            strict=False,
            overrides=["abc=123"],
            run_mode=RunMode.RUN,
        )
        with open_dict(cfg):
            del cfg["hydra"]
        assert cfg == {"normal_yaml_config": True, "abc": 123}

    def test_load_with_missing_default(self, path: str) -> None:
        config_loader = ConfigLoaderImpl(
            config_search_path=create_config_search_path(path))
        with pytest.raises(MissingConfigException):
            config_loader.load_configuration(
                config_name="missing-default.yaml",
                overrides=[],
                strict=False,
                run_mode=RunMode.RUN,
            )

    def test_load_with_optional_default(self, path: str) -> None:
        config_loader = ConfigLoaderImpl(
            config_search_path=create_config_search_path(path))
        cfg = config_loader.load_configuration(
            config_name="optional-default.yaml",
            overrides=[],
            strict=False,
            run_mode=RunMode.RUN,
        )
        with open_dict(cfg):
            del cfg["hydra"]
        assert cfg == {"foo": 10}

    @pytest.mark.parametrize(
        "overrides,expected",
        [
            pytest.param(
                [],
                {
                    "pkg1": {
                        "group1_option1": True
                    },
                    "pkg2": {
                        "group1_option1": True
                    }
                },
                id="baseline",
            ),
            pytest.param(
                ["+group1@pkg3=option1"],
                {
                    "pkg1": {
                        "group1_option1": True
                    },
                    "pkg2": {
                        "group1_option1": True
                    },
                    "pkg3": {
                        "group1_option1": True
                    },
                },
                id="append",
            ),
            pytest.param(
                ["~group1@pkg1"],
                {"pkg2": {
                    "group1_option1": True
                }},
                id="delete_package",
            ),
        ],
    )
    def test_override_compose_two_package_one_group(self, path: str,
                                                    overrides: List[str],
                                                    expected: Any) -> None:
        config_loader = ConfigLoaderImpl(
            config_search_path=create_config_search_path(
                f"{path}/package_tests"))
        cfg = config_loader.load_configuration(
            config_name="two_packages_one_group",
            overrides=overrides,
            run_mode=RunMode.RUN,
        )
        with open_dict(cfg):
            del cfg["hydra"]
        assert cfg == expected

    def test_load_adding_group_not_in_default(self, path: str) -> None:
        config_loader = ConfigLoaderImpl(
            config_search_path=create_config_search_path(path))
        cfg = config_loader.load_configuration(
            config_name="optional-default.yaml",
            overrides=["+group2=file1"],
            strict=False,
            run_mode=RunMode.RUN,
        )
        with open_dict(cfg):
            del cfg["hydra"]
        assert cfg == {"foo": 10, "bar": 100}

    def test_change_run_dir_with_override(self, path: str) -> None:
        config_loader = ConfigLoaderImpl(
            config_search_path=create_config_search_path(path))
        cfg = config_loader.load_configuration(
            config_name="overriding_run_dir.yaml",
            overrides=["hydra.run.dir=abc"],
            strict=False,
            run_mode=RunMode.RUN,
        )
        assert cfg.hydra.run.dir == "abc"

    def test_change_run_dir_with_config(self, path: str) -> None:
        config_loader = ConfigLoaderImpl(
            config_search_path=create_config_search_path(path))
        cfg = config_loader.load_configuration(
            config_name="overriding_run_dir.yaml",
            overrides=[],
            strict=False,
            run_mode=RunMode.RUN,
        )
        assert cfg.hydra.run.dir == "cde"

    def test_load_strict(self, path: str) -> None:
        """
        Ensure that in strict mode we can override only things that are already in the config
        :return:
        """
        config_loader = ConfigLoaderImpl(
            config_search_path=create_config_search_path(path))
        # Test that overriding existing things works in strict mode
        cfg = config_loader.load_configuration(
            config_name="compose.yaml",
            overrides=["foo=ZZZ"],
            strict=True,
            run_mode=RunMode.RUN,
        )
        with open_dict(cfg):
            del cfg["hydra"]
        assert cfg == {"foo": "ZZZ", "bar": 100}

        # Test that accessing a key that is not there will fail
        with pytest.raises(AttributeError):
            # noinspection PyStatementEffect
            cfg.not_here

        # Test that bad overrides triggers the KeyError
        with pytest.raises(HydraException):
            config_loader.load_configuration(
                config_name="compose.yaml",
                overrides=["f00=ZZZ"],
                strict=True,
                run_mode=RunMode.RUN,
            )

    def test_load_history(self, path: str) -> None:
        config_loader = ConfigLoaderImpl(
            config_search_path=create_config_search_path(path))
        cfg = config_loader.load_configuration(
            config_name="missing-optional-default.yaml",
            overrides=[],
            run_mode=RunMode.RUN,
        )
        expected = deepcopy(hydra_load_list)
        expected.append(
            LoadTrace(
                config_path="missing-optional-default.yaml",
                package="",
                parent="<root>",
                is_self=True,
                search_path=path,
                provider="main",
            ))

        assert_same_composition_trace(cfg.hydra.composition_trace, expected)

    def test_load_history_with_basic_launcher(self, path: str) -> None:
        config_loader = ConfigLoaderImpl(
            config_search_path=create_config_search_path(path))
        cfg = config_loader.load_configuration(
            config_name="custom_default_launcher.yaml",
            overrides=["hydra/launcher=basic"],
            strict=False,
            run_mode=RunMode.RUN,
        )

        expected = deepcopy(hydra_load_list)
        expected.append(
            LoadTrace(
                config_path="custom_default_launcher.yaml",
                package="",
                parent="<root>",
                search_path=path,
                provider="main",
            ))
        assert_same_composition_trace(cfg.hydra.composition_trace, expected)

    def test_load_yml_file(self, path: str) -> None:
        config_loader = ConfigLoaderImpl(
            config_search_path=create_config_search_path(path))
        cfg = config_loader.load_configuration(config_name="config.yml",
                                               overrides=[],
                                               strict=False,
                                               run_mode=RunMode.RUN)
        with open_dict(cfg):
            del cfg["hydra"]

        assert cfg == {"yml_file_here": True}

    def test_override_with_equals(self, path: str) -> None:
        config_loader = ConfigLoaderImpl(
            config_search_path=create_config_search_path(path))
        cfg = config_loader.load_configuration(
            config_name="config.yaml",
            overrides=["abc='cde=12'"],
            strict=False,
            run_mode=RunMode.RUN,
        )
        with open_dict(cfg):
            del cfg["hydra"]
        assert cfg == OmegaConf.create({
            "normal_yaml_config": True,
            "abc": "cde=12"
        })

    def test_compose_file_with_dot(self, path: str) -> None:
        config_loader = ConfigLoaderImpl(
            config_search_path=create_config_search_path(path))
        cfg = config_loader.load_configuration(
            config_name="compose.yaml",
            overrides=["group1=abc.cde"],
            strict=False,
            run_mode=RunMode.RUN,
        )
        with open_dict(cfg):
            del cfg["hydra"]
        assert cfg == {"abc=cde": None, "bar": 100}

    def test_load_config_with_schema(self, hydra_restore_singletons: Any,
                                     path: str) -> None:

        ConfigStore.instance().store(name="config",
                                     node=TopLevelConfig,
                                     provider="this_test")
        ConfigStore.instance().store(group="db",
                                     name="mysql",
                                     node=MySQLConfig,
                                     provider="this_test")

        config_loader = ConfigLoaderImpl(
            config_search_path=create_config_search_path(path))

        cfg = config_loader.load_configuration(config_name="config",
                                               overrides=["+db=mysql"],
                                               run_mode=RunMode.RUN)

        expected = deepcopy(hydra_load_list)
        expected.append(
            LoadTrace(
                config_path="config",
                package="",
                parent="<root>",
                is_self=False,
                search_path=path,
                provider="main",
            ))
        expected.append(
            LoadTrace(
                config_path="db/mysql",
                package="db",
                parent="<root>",
                is_self=False,
                search_path=path,
                provider="main",
            ))
        assert_same_composition_trace(cfg.hydra.composition_trace, expected)

        with open_dict(cfg):
            del cfg["hydra"]
        assert cfg == {
            "normal_yaml_config": True,
            "db": {
                "driver": "mysql",
                "host": "???",
                "port": "???",
                "user": "******",
                "password": "******",
            },
        }

        # verify illegal modification is rejected at runtime
        with pytest.raises(ValidationError):
            cfg.db.port = "fail"

        # verify illegal override is rejected during load
        with pytest.raises(HydraException):
            config_loader.load_configuration(config_name="db/mysql",
                                             overrides=["db.port=fail"],
                                             run_mode=RunMode.RUN)

    def test_load_config_file_with_schema_validation(
            self, hydra_restore_singletons: Any, path: str) -> None:

        with ConfigStoreWithProvider("this_test") as cs:
            cs.store(name="config", node=TopLevelConfig)
            cs.store(group="db", name="mysql", node=MySQLConfig, package="db")

        config_loader = ConfigLoaderImpl(
            config_search_path=create_config_search_path(path))
        cfg = config_loader.load_configuration(
            config_name="config",
            overrides=["+db=mysql"],
            strict=False,
            run_mode=RunMode.RUN,
        )

        expected = deepcopy(hydra_load_list)
        expected.extend([
            LoadTrace(
                config_path="config",
                package="",
                parent="<root>",
                search_path=path,
                provider="main",
            ),
            LoadTrace(
                config_path="db/mysql",
                package="db",
                parent="<root>",
                search_path=path,
                provider="main",
            ),
        ])
        assert_same_composition_trace(cfg.hydra.composition_trace, expected)

        with open_dict(cfg):
            del cfg["hydra"]
        assert cfg == {
            "normal_yaml_config": True,
            "db": {
                "driver": "mysql",
                "host": "???",
                "port": "???",
                "user": "******",
                "password": "******",
            },
        }

    def test_assign_null(self, hydra_restore_singletons: Any,
                         path: str) -> None:
        config_loader = ConfigLoaderImpl(
            config_search_path=create_config_search_path(path))
        cfg = config_loader.load_configuration(config_name="config.yaml",
                                               overrides=["+abc=null"],
                                               run_mode=RunMode.RUN)
        with open_dict(cfg):
            del cfg["hydra"]
        assert cfg == {"normal_yaml_config": True, "abc": None}

    def test_sweep_config_cache(self, hydra_restore_singletons: Any, path: str,
                                monkeypatch: Any) -> None:
        setup_globals()

        config_loader = ConfigLoaderImpl(
            config_search_path=create_config_search_path(path))
        master_cfg = config_loader.load_configuration(
            config_name="config.yaml",
            strict=False,
            overrides=["+time=${now:%H-%M-%S}", "+home=${env:HOME}"],
            run_mode=RunMode.RUN,
        )

        # trigger resolution by type assertion
        assert type(master_cfg.time) == str
        assert type(master_cfg.home) == str

        master_cfg_cache = OmegaConf.get_cache(master_cfg)
        assert "now" in master_cfg_cache.keys(
        ) and "env" in master_cfg_cache.keys()
        assert master_cfg.home == os.environ["HOME"]

        sweep_cfg = config_loader.load_sweep_config(
            master_config=master_cfg,
            sweep_overrides=["+time=${now:%H-%M-%S}", "+home=${env:HOME}"],
        )

        sweep_cfg_cache = OmegaConf.get_cache(sweep_cfg)
        assert len(
            sweep_cfg_cache.keys()) == 1 and "now" in sweep_cfg_cache.keys()
        assert sweep_cfg_cache["now"] == master_cfg_cache["now"]
        monkeypatch.setenv("HOME", "/another/home/dir/")
        assert sweep_cfg.home == os.getenv("HOME")
Пример #38
0
    ("http://\0www/", "May not contain NUL byte"),

    # Chromium: PARSE_ERROR_INVALID_HOST_WILDCARD
    ("http://*foo/bar", "Invalid host wildcard"),
    ("http://foo.*.bar/baz", "Invalid host wildcard"),
    ("http://fo.*.ba:123/baz", "Invalid host wildcard"),
    ("http://foo.*/bar", "TLD wildcards are not implemented yet"),

    # Chromium: PARSE_ERROR_INVALID_PORT
    ("http://foo:/", "Invalid port: Port is empty"),
    ("http://*.foo:/", "Invalid port: Port is empty"),
    ("http://foo:com/",
     "Invalid port: invalid literal for int() with base 10: 'com'"),
    pytest.param("http://foo:123456/",
                 "Invalid port: Port out of range 0-65535",
                 marks=pytest.mark.skipif(
                     sys.hexversion < 0x03060000,
                     reason="Doesn't show an error on Python 3.5")),
    ("http://foo:80:80/monkey",
     "Invalid port: invalid literal for int() with base 10: '80:80'"),
    ("chrome://foo:1234/bar", "Ports are unsupported with chrome scheme"),

    # Additional tests
    ("http://[", "Invalid IPv6 URL"),
    ("http://[fc2e:bb88::edac]:", "Invalid port: Port is empty"),
    ("http://[fc2e::bb88::edac]", 'Invalid IPv6 address; source was "fc2e::bb88::edac"; host = ""'),
    ("http://[fc2e:0e35:bb88::edac:fc2e:0e35:bb88:edac]", 'Invalid IPv6 address; source was "fc2e:0e35:bb88::edac:fc2e:0e35:bb88:edac"; host = ""'),
    ("http://[fc2e:0e35:bb88:af:edac:fc2e:0e35:bb88:edac]", 'Invalid IPv6 address; source was "fc2e:0e35:bb88:af:edac:fc2e:0e35:bb88:edac"; host = ""'),
    ("http://[127.0.0.1:fc2e::bb88:edac]", 'Invalid IPv6 address; source was "127.0.0.1:fc2e::bb88:edac'),
    ("http://[]:20", "Pattern without host"),
    ("http://[fc2e::bb88", "Invalid IPv6 URL"),
Пример #39
0
try:
    import fastparquet

    _HAVE_FASTPARQUET = True
except ImportError:
    _HAVE_FASTPARQUET = False

pytestmark = pytest.mark.filterwarnings(
    "ignore:RangeIndex.* is deprecated:DeprecationWarning")


# setup engines & skips
@pytest.fixture(params=[
    pytest.param(
        "fastparquet",
        marks=pytest.mark.skipif(not _HAVE_FASTPARQUET,
                                 reason="fastparquet is not installed"),
    ),
    pytest.param(
        "pyarrow",
        marks=pytest.mark.skipif(not _HAVE_PYARROW,
                                 reason="pyarrow is not installed"),
    ),
])
def engine(request):
    return request.param


@pytest.fixture
def pa():
    if not _HAVE_PYARROW:
Пример #40
0
@requires_scipy
@pytest.mark.parametrize(
    "x_new, expected",
    [
        (pd.date_range("2000-01-02", periods=3), [1, 2, 3]),
        (
            np.array([
                np.datetime64("2000-01-01T12:00"),
                np.datetime64("2000-01-02T12:00")
            ]),
            [0.5, 1.5],
        ),
        (["2000-01-01T12:00", "2000-01-02T12:00"], [0.5, 1.5]),
        (["2000-01-01T12:00", "2000-01-02T12:00", "NaT"], [0.5, 1.5, np.nan]),
        (["2000-01-01T12:00"], 0.5),
        pytest.param("2000-01-01T12:00", 0.5, marks=pytest.mark.xfail),
    ],
)
def test_datetime(x_new, expected):
    da = xr.DataArray(
        np.arange(24),
        dims="time",
        coords={"time": pd.date_range("2000-01-01", periods=24)},
    )

    actual = da.interp(time=x_new)
    expected_da = xr.DataArray(
        np.atleast_1d(expected),
        dims=["time"],
        coords={"time": (np.atleast_1d(x_new).astype("datetime64[ns]"))},
    )
                .withColumn('inc_count_c', f.count('c').over(inclusiveWindowSpec)) \
                .withColumn('inc_max_c', f.max('c').over(inclusiveWindowSpec)) \
                .withColumn('inc_min_c', f.min('c').over(inclusiveWindowSpec)) \
                .withColumn('row_num', f.row_number().over(baseWindowSpec))

    assert_gpu_and_cpu_are_equal_collect(
        do_it, conf={'spark.rapids.sql.hasNans': 'false'})


# Test for RANGE queries, with timestamp order-by expressions.
# Non-timestamp order-by columns are currently unsupported for RANGE queries.
# See https://github.com/NVIDIA/spark-rapids/issues/216
@ignore_order
@pytest.mark.parametrize('data_gen', [
    _grpkey_longs_with_timestamps,
    pytest.param(_grpkey_longs_with_nullable_timestamps)
],
                         ids=idfn)
def test_window_aggs_for_ranges(data_gen):
    assert_gpu_and_cpu_are_equal_sql(
        lambda spark: gen_df(spark, data_gen, length=2048),
        "window_agg_table", 'select '
        ' sum(c) over '
        '   (partition by a order by cast(b as timestamp) asc  '
        '       range between interval 1 day preceding and interval 1 day following) as sum_c_asc, '
        ' avg(c) over '
        '   (partition by a order by cast(b as timestamp) asc  '
        '       range between interval 1 day preceding and interval 1 day following) as avg_c_asc, '
        ' max(c) over '
        '   (partition by a order by cast(b as timestamp) desc '
        '       range between interval 2 days preceding and interval 1 days following) as max_c_desc, '
Пример #42
0
import os

import numpy as np
import pytest

from pysip.regressors import FreqRegressor as Regressor
from pysip.statespace import Matern12, Matern52, Periodic, TwTi_RoRiAwAicv, TwTiTm_RoRiRmRbAwAicv
from pysip.utils import load_model, save_model


@pytest.mark.parametrize(
    'reg',
    [
        pytest.param(TwTi_RoRiAwAicv(), id="rc_model"),
        pytest.param(Matern12(), id="gp_model"),
        pytest.param(Periodic() * Matern12(), id="gp_product"),
        pytest.param(Periodic() + Matern12(), id="gp_sum"),
        pytest.param(TwTi_RoRiAwAicv(latent_forces='Qv') <= Matern12(),
                     id="lfm_rc_gp"),
        pytest.param(
            TwTi_RoRiAwAicv(latent_forces='Qv') <= Periodic() * Matern12(),
            id="lfm_rc_gp_product"),
        pytest.param(
            TwTi_RoRiAwAicv(latent_forces='Qv') <= Periodic() + Matern12(),
            id="lfm_rc_gp_sum"),
    ],
)
def test_save_model_to_pickle(reg):
    reg = Regressor(reg)
    reg.ss.parameters.theta = np.random.uniform(1e-1, 1,
                                                len(reg.ss.parameters.theta))
Пример #43
0
import numpy as np
import pytest

from respy import RespyCls
from respy.pre_processing.model_processing import _options_spec_from_attributes
from respy.pre_processing.model_processing import _params_spec_from_attributes
from respy.python.shared.shared_constants import IS_FORTRAN
from respy.python.shared.shared_constants import IS_PARALLELISM_MPI
from respy.python.shared.shared_constants import IS_PARALLELISM_OMP
from respy.python.shared.shared_constants import OPT_EST_PYTH
from respy.python.shared.shared_constants import TOL
from respy.tests.codes.auxiliary import simulate_observed


params_fortran = [0, 1, 2, 3, 4, 5, 6, pytest.param(7, marks=pytest.mark.xfail), 8, 9]

params_python = [
    0,
    1,
    2,
    pytest.param(3, marks=pytest.mark.xfail),
    pytest.param(4, marks=pytest.mark.xfail),
    pytest.param(5, marks=pytest.mark.xfail),
    6,
    pytest.param(7, marks=pytest.mark.xfail),
    pytest.param(8, marks=pytest.mark.xfail),
    pytest.param(9, marks=pytest.mark.xfail),
]

params = params_fortran if IS_FORTRAN else params_python
class TestAMQNodeReboot(E2ETest):
    """
    Test case to reboot or shutdown and recovery
    node when amq workload is running

    """
    @pytest.fixture(autouse=True)
    def init_sanity(self):
        """
        Initialize Sanity instance

        """
        self.sanity_helpers = Sanity()

    @pytest.fixture(autouse=True)
    def teardown(self, request, nodes):
        """
        Restart nodes that are in status NotReady
        for situations in which the test failed in between

        """
        def finalizer():

            # Validate all nodes are in READY state
            not_ready_nodes = [
                n for n in get_node_objs() if n.ocp.get_resource_status(n.name)
                == constants.NODE_NOT_READY
            ]
            log.warning(
                f"Nodes in NotReady status found: {[n.name for n in not_ready_nodes]}"
            )
            if not_ready_nodes:
                nodes.restart_nodes_by_stop_and_start(not_ready_nodes)
                wait_for_nodes_status()

            log.info("All nodes are in Ready status")

        request.addfinalizer(finalizer)

    @pytest.fixture()
    def amq_setup(self, amq_factory_fixture):
        """
        Creates amq cluster and run benchmarks
        """
        sc_name = default_storage_class(interface_type=constants.CEPHBLOCKPOOL)
        self.amq, self.threads = amq_factory_fixture(sc_name=sc_name.name)

    @pytest.mark.parametrize(
        argnames=["node_type"],
        argvalues=[
            pytest.param(*["worker"],
                         marks=pytest.mark.polarion_id("OCS-1282")),
            pytest.param(*["master"],
                         marks=pytest.mark.polarion_id("OCS-1281")),
        ],
    )
    def test_amq_after_rebooting_node(self, node_type, nodes, amq_setup):
        """
        Test case to validate rebooting master node shouldn't effect
        amq workloads running in background

        """
        # Get all amq pods
        pod_obj_list = get_all_pods(namespace=constants.AMQ_NAMESPACE)

        # Get the node list
        node = get_nodes(node_type, num_of_nodes=1)

        # Reboot one master nodes
        nodes.restart_nodes(node, wait=False)

        # Wait some time after rebooting master
        waiting_time = 40
        log.info(f"Waiting {waiting_time} seconds...")
        time.sleep(waiting_time)

        # Validate all nodes and services are in READY state and up
        retry(
            (CommandFailed, TimeoutError, AssertionError,
             ResourceWrongStatusException),
            tries=60,
            delay=15,
        )(ocp.wait_for_cluster_connectivity(tries=400))
        retry(
            (CommandFailed, TimeoutError, AssertionError,
             ResourceWrongStatusException),
            tries=60,
            delay=15,
        )(wait_for_nodes_status(timeout=1800))

        # Check the node are Ready state and check cluster is health ok
        self.sanity_helpers.health_check(tries=40)

        # Check all amq pods are up and running
        assert POD.wait_for_resource(condition="Running",
                                     resource_count=len(pod_obj_list),
                                     timeout=300)

        # Validate the results
        log.info("Validate message run completely")
        for thread in self.threads:
            thread.result(timeout=1800)

    @pytest.mark.polarion_id("OCS-1278")
    def test_amq_after_shutdown_and_recovery_worker_node(
            self, nodes, amq_setup):
        """
        Test case to validate shutdown and recovery node
        shouldn't effect amq workloads running in background

        """
        # Get all amq pods
        pod_obj_list = get_all_pods(namespace=constants.AMQ_NAMESPACE)

        # Get the node list
        node = get_nodes(node_type="worker", num_of_nodes=1)

        # Reboot one master nodes
        nodes.stop_nodes(nodes=node)

        waiting_time = 20
        log.info(f"Waiting for {waiting_time} seconds")
        time.sleep(waiting_time)

        nodes.start_nodes(nodes=node)

        # Validate all nodes are in READY state and up
        retry(
            (CommandFailed, TimeoutError, AssertionError,
             ResourceWrongStatusException),
            tries=30,
            delay=15,
        )(wait_for_nodes_status(timeout=1800))

        # Check the node are Ready state and check cluster is health ok
        self.sanity_helpers.health_check(tries=40)

        # Check all amq pods are up and running
        assert POD.wait_for_resource(condition="Running",
                                     resource_count=len(pod_obj_list),
                                     timeout=300)

        # Validate the results
        log.info("Validate message run completely")
        for thread in self.threads:
            thread.result(timeout=1800)
Пример #45
0
base_completion_list: List[str] = [
    "dict.",
    "dict_prefix=",
    "group=",
    "hydra.",
    "hydra/",
    "list.",
    "list_prefix=",
    "test_hydra/",
]


@pytest.mark.parametrize(
    "line_prefix",
    [
        pytest.param("", id="no_prefix"),
        pytest.param("dict.key1=val1 ", id="prefix")
    ],
)
@pytest.mark.parametrize(
    "line,num_tabs,expected",
    [
        ("", 2, base_completion_list),
        (" ", 2, base_completion_list),
        ("dict", 2, ["dict.", "dict_prefix="]),
        ("dict.", 3, ["dict.key1=", "dict.key2=", "dict.key3="]),
        ("dict.key", 2, ["dict.key1=", "dict.key2=", "dict.key3="]),
        ("dict.key1=", 2, ["dict.key1=val1"]),
        ("dict.key3=", 2, ["dict.key3="]),  # no value because dict.key3 is ???
        ("list", 2, ["list.", "list_prefix="]),
        ("list.", 2, ["list.0=", "list.1=", "list.2="]),
Пример #46
0
        search_path="structured://",
        provider="hydra",
    ),
]


def assert_same_composition_trace(composition_trace: Any,
                                  expected: Any) -> None:
    actual = [LoadTrace(**x) for x in composition_trace]
    assert actual == expected


@pytest.mark.parametrize(
    "path",
    [
        pytest.param("file://hydra/test_utils/configs", id="file"),
        pytest.param("pkg://hydra.test_utils.configs", id="pkg"),
    ],
)
class TestConfigLoader:
    def test_load_configuration(self, path: str) -> None:
        config_loader = ConfigLoaderImpl(
            config_search_path=create_config_search_path(path))
        cfg = config_loader.load_configuration(
            config_name="config.yaml",
            strict=False,
            overrides=["abc=123"],
            run_mode=RunMode.RUN,
        )
        with open_dict(cfg):
            del cfg["hydra"]
Пример #47
0
    time = t[column].time()
    expr = time.between('01:00', '02:00', timezone=tz)
    result = expr.compile()
    tm.assert_series_equal(result.compute(), expected.compute())

    # Test that casting behavior is the same as using the timezone kwarg
    ts = t[column].cast(dt.Timestamp(timezone=tz))
    expr = ts.time().between('01:00', '02:00')
    result = expr.compile()
    tm.assert_series_equal(result.compute(), expected.compute())


@pytest.mark.parametrize(
    ('op', 'expected'),
    [
        param(lambda x, y: x + y, lambda x, y: x.values * 2, id='add'),
        param(lambda x, y: x - y, lambda x, y: x.values - y.values, id='sub'),
        param(lambda x, y: x * 2, lambda x, y: x.values * 2, id='mul'),
        param(
            lambda x, y: x // 2,
            lambda x, y: x.values // 2,
            id='floordiv',
            marks=pytest.mark.xfail(
                parse_version(pd.__version__) < parse_version('0.23.0'),
                raises=TypeError,
                reason=(
                    'pandas versions less than 0.23.0 do not support floor '
                    'division involving timedelta columns'),
            ),
        ),
    ],
Пример #48
0
import numpy as np
import pytest

from pandas.compat import lrange
import pandas.util._test_decorators as td

import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series, date_range
from pandas.core.computation.check import _NUMEXPR_INSTALLED
from pandas.tests.frame.common import TestData
from pandas.util.testing import (
    assert_frame_equal, assert_series_equal, makeCustomDataframe as mkdf)

PARSERS = 'python', 'pandas'
ENGINES = 'python', pytest.param('numexpr', marks=td.skip_if_no_ne)


@pytest.fixture(params=PARSERS, ids=lambda x: x)
def parser(request):
    return request.param


@pytest.fixture(params=ENGINES, ids=lambda x: x)
def engine(request):
    return request.param


def skip_if_no_pandas_parser(parser):
    if parser != 'pandas':
        pytest.skip("cannot evaluate with parser {0!r}".format(parser))
Пример #49
0
    ".. start short_desc\n\n..end short_desc",
    f".. start short_desc\n{lorem.paragraph()}\n..end short_desc"
])
def test_short_desc_regex(value):
    m = short_desc_regex.sub(value, "hello world")
    assert m == "hello world"


@pytest.mark.parametrize("kwargs", [
    pytest.param(dict(
        username="******",
        repo_name="REPO_NAME",
        version="1.2.3",
        conda=True,
        tests=True,
        docs=True,
        pypi_name="PYPI_NAME",
        docker_shields=False,
        docker_name='',
        platforms=["Windows", "macOS", "Linux"],
    ),
                 id="case_1"),
    pytest.param(dict(
        username="******",
        repo_name="REPO_NAME",
        version="1.2.3",
        conda=False,
        tests=False,
        docs=False,
        pypi_name="PYPI_NAME",
        unique_name="_UNIQUE_NAME",