Ejemplo n.º 1
0
def browser_implementation_quits(item):
    appliance = find_appliance(item, require=False)
    if appliance is not None:
        for implementation in [appliance.browser, appliance.ssui]:
            implementation.quit_browser()
    else:
        logger.debug('Browser isolation specified, but no appliance browsers available to quit on')
Ejemplo n.º 2
0
def pytest_runtest_teardown(item, nextitem):
    name, location = get_test_idents(item)
    app = find_appliance(item)
    ip = app.hostname
    fire_art_test_hook(
        item, 'finish_test',
        slaveid=store.slaveid, ip=ip, wait_for_task=True)
    fire_art_test_hook(item, 'sanitize', words=words)
    jenkins_data = {
        'build_url': os.environ.get('BUILD_URL'),
        'build_number': os.environ.get('BUILD_NUMBER'),
        'git_commit': os.environ.get('GIT_COMMIT'),
        'job_name': os.environ.get('JOB_NAME')
    }
    param_dict = None
    try:
        caps = app.browser.widgetastic.selenium.capabilities
        param_dict = {
            'browserName': caps['browserName'],
            'browserPlatform': caps['platform'],
            'browserVersion': caps['version']
        }
    except Exception:
        logger.exception("Couldn't grab browser env_vars")
        pass  # already set param_dict

    fire_art_test_hook(
        item, 'ostriz_send', env_params=param_dict,
        slaveid=store.slaveid, polarion_ids=extract_polarion_ids(item), jenkins=jenkins_data)
Ejemplo n.º 3
0
    def pytest_runtest_logreport(self, report):
        """pytest runtest logreport hook

        - sends serialized log reports to the master

        """
        self.send_event("runtest_logreport", report=serialize_report(report))
        if report.when == 'teardown':
            path, lineno, domaininfo = report.location
            test_status = _test_status(_format_nodeid(report.nodeid, False))
            if test_status == "failed":
                appliance = find_appliance(self)
                try:
                    self.log.info(
                        "Managed providers: {}".format(
                            ", ".join([
                                prov.key for prov in
                                appliance.managed_known_providers]))
                    )
                except KeyError as ex:
                    if 'ext_management_systems' in ex.msg:
                        self.log.warning("Unable to query ext_management_systems table; DB issue")
                    else:
                        raise
            self.log.info(log.format_marker('{} result: {}'.format(_format_nodeid(report.nodeid),
                                                                   test_status)),
                          extra={'source_file': path, 'source_lineno': lineno})
        if report.outcome == "skipped":
            self.log.info(log.format_marker(report.longreprtext))
Ejemplo n.º 4
0
    def pytest_runtest_logreport(self, report):
        """pytest runtest logreport hook

        - sends serialized log reports to the master

        """
        self.send_event("runtest_logreport", report=serialize_report(report))
        if report.when == 'teardown':
            path, lineno, domaininfo = report.location
            test_status = _test_status(_format_nodeid(report.nodeid, False))
            if test_status == "failed":
                appliance = find_appliance(self)
                try:
                    self.log.info("Managed providers: {}".format(", ".join([
                        prov.key for prov in appliance.managed_known_providers
                    ])))
                except KeyError as ex:
                    if 'ext_management_systems' in ex.msg:
                        self.log.warning(
                            "Unable to query ext_management_systems table; DB issue"
                        )
                    else:
                        raise
            self.log.info(log.format_marker('{} result: {}'.format(
                _format_nodeid(report.nodeid), test_status)),
                          extra={
                              'source_file': path,
                              'source_lineno': lineno
                          })
        if report.outcome == "skipped":
            self.log.info(log.format_marker(report.longreprtext))
Ejemplo n.º 5
0
def pytest_runtest_teardown(item, nextitem):
    yield
    if item.config.getoption("browser_isolation"):
        appliance = find_appliance(item, require=False)
        if appliance is not None:
            for implementation in [appliance.browser, appliance.ssui]:
                implementation.quit_browser()
def pytest_generate_tests(metafunc):
    """
    Build a list of tuples containing (group_name, context)
    Returns:
        tuple containing (group_name, context)
        where group_name is a string and context is ViaUI/SSUI
    """
    appliance = find_appliance(metafunc)
    parameter_list = []
    id_list = []
    # TODO: Include SSUI role_access dict and VIASSUI context
    role_access_ui = VersionPick({
        Version.lowest(): role_access_ui_58z,
        '5.9': role_access_ui_59z,
        '5.10': role_access_ui_510z
    }).pick(appliance.version)
    logger.info('Using the role access dict: %s', role_access_ui)
    roles_and_context = [(
        role_access_ui, ViaUI)
    ]
    for role_access, context in roles_and_context:
        for group in role_access.keys():
            parameter_list.append((group, role_access, context))
            id_list.append('{}-{}'.format(group, context))
    metafunc.parametrize('group_name, role_access, context', parameter_list)
Ejemplo n.º 7
0
def pytest_runtest_teardown(item, nextitem):
    yield
    if item.config.getoption("browser_isolation"):
        appliance = find_appliance(item, require=False)
        if appliance is not None:
            for implementation in [appliance.browser, appliance.ssui]:
                implementation.quit_browser()
def pytest_generate_tests(metafunc):
    argnames, argvalues, idlist = testgen.providers_by_class(
        metafunc, [VMwareProvider])
    argnames = argnames + ["_host_provider"]

    new_idlist = []
    new_argvalues = []

    for i, argvalue_tuple in enumerate(argvalues):
        args = dict(zip(argnames, argvalue_tuple))
        # TODO
        # All this should be replaced with a proper ProviderFilter passed to testgen.providers()
        if args['provider'].type != "virtualcenter":
            continue
        hosts = args['provider'].data.get("hosts", [])
        if not hosts:
            continue

        version = args['provider'].data.get("version")
        if version is None:
            # No version, no test
            continue
        if Version(version) < "5.0":
            # Ignore lesser than 5
            continue

        host = hosts[0]
        ip_address = resolve_hostname(host["name"])
        endpoint = VirtualCenterEndpoint(
            credentials=host["credentials"]["default"],
            hostname=host["name"],
            ip_address=ip_address)
        # Mock provider data
        provider_data = {}
        provider_data.update(args['provider'].data)
        provider_data["name"] = host["name"]
        provider_data["hostname"] = host["name"]
        provider_data["ipaddress"] = ip_address
        provider_data.pop("host_provisioning", None)
        provider_data["hosts"] = [host]
        provider_data["discovery_range"] = {}
        provider_data["discovery_range"]["start"] = ip_address
        provider_data["discovery_range"]["end"] = ip_address
        appliance = find_appliance(metafunc, require=False)
        host_provider = appliance.collections.infra_providers.instantiate(
            prov_class=VMwareProvider,
            name=host["name"],
            endpoints=endpoint,
            provider_data=provider_data)
        argvalues[i].append(host_provider)
        idlist[i] = "{}/{}".format(args['provider'].key, host["name"])
        new_idlist.append(idlist[i])
        new_argvalues.append(argvalues[i])

    testgen.parametrize(metafunc,
                        argnames,
                        new_argvalues,
                        ids=new_idlist,
                        scope="module")
Ejemplo n.º 9
0
def resolve_blockers(item, blockers):
    if not isinstance(blockers, (list, tuple, set)):
        raise ValueError("Type of the 'blockers' parameter must be one of: list, tuple, set")

    # Prepare the global env for the kwarg insertion
    appliance = find_appliance(item)
    global_env = dict(
        appliance_version=appliance.version,
        appliance_downstream=appliance.is_downstream,
        item=item,
        blockers=blockers,
    )
    # We will now extend the env with fixtures, so they can be used in the guard functions
    # We will however add only those that are not in the global_env otherwise we could overwrite
    # our own stuff.
    params = extract_fixtures_values(item)
    for funcarg, value in params.items():
        if funcarg not in global_env:
            global_env[funcarg] = value

    # Check blockers
    use_blockers = []
    # Bugzilla shortcut
    blockers = map(lambda b: "BZ#{}".format(b) if isinstance(b, int) else b, blockers)
    for blocker in map(Blocker.parse, blockers):
        if blocker.blocks:
            use_blockers.append(blocker)
    # Unblocking
    discard_blockers = set([])
    for blocker in use_blockers:
        unblock_func = kwargify(blocker.kwargs.get("unblock"))
        local_env = {"blocker": blocker}
        local_env.update(global_env)
        if unblock_func(**local_env):
            discard_blockers.add(blocker)
    for blocker in discard_blockers:
        use_blockers.remove(blocker)
    # We now have those that block testing, so we have to skip
    # Let's go in the order that they were added
    # Custom actions first
    for blocker in use_blockers:
        if "custom_action" in blocker.kwargs:
            action = kwargify(blocker.kwargs["custom_action"])
            local_env = {"blocker": blocker}
            local_env.update(global_env)
            action(**local_env)
    # And then skip
    if use_blockers:
        bugs = [bug.bug_id for bug in use_blockers if hasattr(bug, "bug_id")]
        skip_data = {'type': 'blocker', 'reason': bugs}
        fire_art_test_hook(item, 'skip_test', skip_data=skip_data)
        pytest.skip("Skipping due to these blockers:\n{}".format(
            "\n".join(
                "- {}".format(str(blocker))
                for blocker
                in use_blockers
            )
        ))
Ejemplo n.º 10
0
def resolve_blockers(item, blockers):
    if not isinstance(blockers, (list, tuple, set)):
        raise ValueError("Type of the 'blockers' parameter must be one of: list, tuple, set")

    # Prepare the global env for the kwarg insertion
    appliance = find_appliance(item)
    global_env = dict(
        appliance_version=appliance.version,
        appliance_downstream=appliance.is_downstream,
        item=item,
        blockers=blockers,
    )
    # We will now extend the env with fixtures, so they can be used in the guard functions
    # We will however add only those that are not in the global_env otherwise we could overwrite
    # our own stuff.
    params = extract_fixtures_values(item)
    for funcarg, value in params.items():
        if funcarg not in global_env:
            global_env[funcarg] = value

    # Check blockers
    use_blockers = []
    # Bugzilla shortcut
    blockers = map(lambda b: "BZ#{}".format(b) if isinstance(b, int) else b, blockers)
    for blocker in map(Blocker.parse, blockers):
        if blocker.blocks:
            use_blockers.append(blocker)
    # Unblocking
    discard_blockers = set([])
    for blocker in use_blockers:
        unblock_func = kwargify(blocker.kwargs.get("unblock"))
        local_env = {"blocker": blocker}
        local_env.update(global_env)
        if unblock_func(**local_env):
            discard_blockers.add(blocker)
    for blocker in discard_blockers:
        use_blockers.remove(blocker)
    # We now have those that block testing, so we have to skip
    # Let's go in the order that they were added
    # Custom actions first
    for blocker in use_blockers:
        if "custom_action" in blocker.kwargs:
            action = kwargify(blocker.kwargs["custom_action"])
            local_env = {"blocker": blocker}
            local_env.update(global_env)
            action(**local_env)
    # And then skip
    if use_blockers:
        bugs = [bug.bug_id for bug in use_blockers if hasattr(bug, "bug_id")]
        skip_data = {'type': 'blocker', 'reason': bugs}
        fire_art_test_hook(item, 'skip_test', skip_data=skip_data)
        pytest.skip("Skipping due to these blockers:\n{}".format(
            "\n".join(
                "- {}".format(str(blocker))
                for blocker
                in use_blockers
            )
        ))
Ejemplo n.º 11
0
def uncollectif(item):
    """ Evaluates if an item should be uncollected

    Tests markers against a supplied lambda from the markers object to determine
    if the item should be uncollected or not.
    """
    from cfme.utils.appliance import find_appliance

    from cfme.utils.pytest_shortcuts import extract_fixtures_values
    markers = item.get_marker('uncollectif')
    if not markers:
        return False, None
    for mark in markers:
        log_msg = 'Trying uncollecting {}: {}'.format(
            item.name, mark.kwargs.get('reason', 'No reason given'))
        logger.debug(log_msg)
        try:
            arg_names = inspect.getargspec(get_uncollect_function(mark)).args
        except TypeError:
            logger.debug(log_msg)
            return not bool(mark.args[0]), mark.kwargs.get(
                'reason', 'No reason given')

        app = find_appliance(item, require=False)
        if app:
            global_vars = {'appliance': app}
        else:
            logger.info("while uncollecting %s - appliance not known", item)
            global_vars = {}

        try:
            values = extract_fixtures_values(item)
            values.update(global_vars)
            # The test has already been uncollected
            if arg_names and not values:
                return True, None
            args = [values[arg] for arg in arg_names]
        except KeyError:
            missing_argnames = list(
                set(arg_names) - set(item._request.funcargnames))
            func_name = item.name
            if missing_argnames:
                raise Exception(
                    "You asked for a fixture which wasn't in the function {} "
                    "prototype {}".format(func_name, missing_argnames))
            else:
                raise Exception(
                    "Failed to uncollect {}, best guess a fixture wasn't "
                    "ready".format(func_name))
        retval = mark.args[0](*args)
        if retval:
            # shortcut
            return retval, mark.kwargs.get('reason', "No reason given")
        else:
            return False, None

    else:
        return False, None
Ejemplo n.º 12
0
def uncollectif(item):
    """ Evaluates if an item should be uncollected

    Tests markers against a supplied lambda from the markers object to determine
    if the item should be uncollected or not.
    """
    from cfme.utils.appliance import find_appliance

    from cfme.utils.pytest_shortcuts import extract_fixtures_values
    for _, mark in item.iter_markers_with_node('uncollectif') or []:
        reason = mark.kwargs.get('reason')
        if reason is None:
            raise ValueError(REASON_REQUIRED.format(item.name))
        log_msg = f'Trying uncollectif {item.name}: {reason}'
        logger.debug(log_msg)
        try:
            arg_names = inspect.signature(
                mark.args[0]).parameters.keys()  # odict_keys
        except TypeError:
            logger.exception(log_msg)
            return not bool(mark.args[0]), reason

        app = find_appliance(item, require=False)
        if app:
            global_vars = {'appliance': app}
        else:
            logger.info("while uncollecting %s - appliance not known", item)
            global_vars = {}

        try:
            values = extract_fixtures_values(item)
            values.update(global_vars)
            # The test has already been uncollected
            if arg_names and not values:
                return True, None
            args = [values[arg] for arg in arg_names]
        except KeyError:
            missing_argnames = list(
                set(arg_names) - set(item._request.fixturenames))
            func_name = item.name
            if missing_argnames:
                raise Exception(
                    f'uncollectif lambda requesting fixture {missing_argnames} '
                    f'which is not in the test function {func_name} signature')
            else:
                raise Exception(f'uncollectif {func_name} hit KeyError, '
                                'best guess a fixture was not ready')
        retval = mark.args[0](*args)
        if retval:
            # shortcut
            return retval, reason
        else:
            return False, None
    else:
        # no uncollect markers
        return False, None
Ejemplo n.º 13
0
def pytest_runtest_setup(item):
    from cfme.utils.appliance import (
        DummyAppliance, )

    appliance = find_appliance(item, require=False)
    if isinstance(appliance, DummyAppliance):
        return

    if set(getattr(item, 'fixturenames', [])) & browser_fixtures:
        cfme.utils.browser.ensure_browser_open()
Ejemplo n.º 14
0
def pytest_generate_tests(metafunc):
    argnames, argvalues, idlist = testgen.providers_by_class(metafunc, [VMwareProvider])
    argnames = argnames + ["_host_provider"]

    new_idlist = []
    new_argvalues = []

    for i, argvalue_tuple in enumerate(argvalues):
        args = dict(zip(argnames, argvalue_tuple))
        # TODO
        # All this should be replaced with a proper ProviderFilter passed to testgen.providers()
        if args['provider'].type != "virtualcenter":
            continue
        hosts = args['provider'].data.get("hosts", [])
        if not hosts:
            continue

        version = args['provider'].data.get("version")
        if version is None:
            # No version, no test
            continue
        if Version(version) < "5.0":
            # Ignore lesser than 5
            continue

        host = hosts[0]
        ip_address = resolve_hostname(host["name"])
        endpoint = VirtualCenterEndpoint(
            credentials=host["credentials"]["default"],
            hostname=host["name"],
            ip_address=ip_address
        )
        # Mock provider data
        provider_data = {}
        provider_data.update(args['provider'].data)
        provider_data["name"] = host["name"]
        provider_data["hostname"] = host["name"]
        provider_data["ipaddress"] = ip_address
        provider_data.pop("host_provisioning", None)
        provider_data["hosts"] = [host]
        provider_data["discovery_range"] = {}
        provider_data["discovery_range"]["start"] = ip_address
        provider_data["discovery_range"]["end"] = ip_address
        appliance = find_appliance(metafunc, require=False)
        host_provider = appliance.collections.infra_providers.instantiate(
            prov_class=VMwareProvider,
            name=host["name"],
            endpoints=endpoint,
            provider_data=provider_data)
        argvalues[i].append(host_provider)
        idlist[i] = "{}/{}".format(args['provider'].key, host["name"])
        new_idlist.append(idlist[i])
        new_argvalues.append(argvalues[i])

    testgen.parametrize(metafunc, argnames, new_argvalues, ids=new_idlist, scope="module")
Ejemplo n.º 15
0
def uncollectif(item):
    """ Evaluates if an item should be uncollected

    Tests markers against a supplied lambda from the markers object to determine
    if the item should be uncollected or not.
    """
    from cfme.utils.appliance import find_appliance

    from cfme.utils.pytest_shortcuts import extract_fixtures_values
    markers = item.get_marker('uncollectif')
    if not markers:
        return False, None
    for mark in markers:
        log_msg = 'Trying uncollecting {}: {}'.format(
            item.name,
            mark.kwargs.get('reason', 'No reason given'))
        logger.debug(log_msg)
        try:
            arg_names = inspect.getargspec(get_uncollect_function(mark)).args
        except TypeError:
            logger.debug(log_msg)
            return not bool(mark.args[0]), mark.kwargs.get('reason', 'No reason given')

        app = find_appliance(item, require=False)
        if app:
            global_vars = {'appliance': app}
        else:
            logger.info("while uncollecting %s - appliance not known", item)
            global_vars = {}

        try:
            values = extract_fixtures_values(item)
            values.update(global_vars)
            # The test has already been uncollected
            if arg_names and not values:
                return True, None
            args = [values[arg] for arg in arg_names]
        except KeyError:
            missing_argnames = list(set(arg_names) - set(item._request.funcargnames))
            func_name = item.name
            if missing_argnames:
                raise Exception("You asked for a fixture which wasn't in the function {} "
                                "prototype {}".format(func_name, missing_argnames))
            else:
                raise Exception("Failed to uncollect {}, best guess a fixture wasn't "
                                "ready".format(func_name))
        retval = mark.args[0](*args)
        if retval:
            # shortcut
            return retval, mark.kwargs.get('reason', "No reason given")
        else:
            return False, None

    else:
        return False, None
Ejemplo n.º 16
0
def pytest_runtest_setup(item):
    from cfme.utils.appliance import (
        DummyAppliance,
    )

    appliance = find_appliance(item, require=False)
    if isinstance(appliance, DummyAppliance):
        return

    if set(getattr(item, 'fixturenames', [])) & browser_fixtures:
        cfme.utils.browser.ensure_browser_open()
Ejemplo n.º 17
0
def handle_assert_artifacts(request, fail_message=None):
    appliance = find_appliance(request)
    if isinstance(appliance, DummyAppliance):
        return
    if not fail_message:
        short_tb = '{}'.format(sys.exc_info()[1])
        short_tb = base64_from_text(short_tb)
        var_tb = traceback.format_tb(sys.exc_info()[2])
        full_tb = "".join(var_tb)
        full_tb = base64_from_text(full_tb)

    else:
        short_tb = full_tb = base64_from_text(fail_message)

    try:
        ss = cfme.utils.browser.browser().get_screenshot_as_base64()
        ss_error = None
    except Exception as b_ex:
        ss = None
        if str(b_ex):
            ss_error = '{}: {}'.format(type(b_ex).__name__, str(b_ex))
        else:
            ss_error = type(b_ex).__name__
    if ss_error:
        ss_error = base64_from_text(ss_error)

    # A simple id to match the artifacts together
    sa_id = "softassert-{}".format(fauxfactory.gen_alpha(length=3).upper())
    from cfme.fixtures.pytest_store import store
    node = request.node

    fire_art_test_hook(
        node, 'filedump',
        description="Soft Assert Traceback", contents=full_tb,
        file_type="soft_traceback", display_type="danger", display_glyph="align-justify",
        contents_base64=True, group_id=sa_id, slaveid=store.slaveid)
    fire_art_test_hook(
        node, 'filedump',
        description="Soft Assert Short Traceback", contents=short_tb,
        file_type="soft_short_tb", display_type="danger", display_glyph="align-justify",
        contents_base64=True, group_id=sa_id, slaveid=store.slaveid)
    if ss is not None:
        fire_art_test_hook(
            node, 'filedump',
            description="Soft Assert Exception screenshot",
            file_type="screenshot", mode="wb", contents_base64=True, contents=ss,
            display_glyph="camera", group_id=sa_id, slaveid=store.slaveid)
    if ss_error is not None:
        fire_art_test_hook(
            node, 'filedump',
            description="Soft Assert Screenshot error", mode="w",
            contents_base64=True, contents=ss_error, display_type="danger", group_id=sa_id,
            slaveid=store.slaveid)
Ejemplo n.º 18
0
def handle_assert_artifacts(request, fail_message=None):
    appliance = find_appliance(request)
    if isinstance(appliance, DummyAppliance):
        return
    if not fail_message:
        short_tb = '{}'.format(sys.exc_info()[1])
        short_tb = short_tb.encode('base64')
        var_tb = traceback.format_tb(sys.exc_info()[2])
        full_tb = "".join(var_tb)
        full_tb = full_tb.encode('base64')

    else:
        short_tb = full_tb = fail_message.encode('base64')

    try:
        ss = cfme.utils.browser.browser().get_screenshot_as_base64()
        ss_error = None
    except Exception as b_ex:
        ss = None
        if str(b_ex):
            ss_error = '{}: {}'.format(type(b_ex).__name__, str(b_ex))
        else:
            ss_error = type(b_ex).__name__
    if ss_error:
        ss_error = ss_error.encode('base64')

    # A simple id to match the artifacts together
    sa_id = "softassert-{}".format(fauxfactory.gen_alpha(length=3).upper())
    from cfme.fixtures.pytest_store import store
    node = request.node

    fire_art_test_hook(
        node, 'filedump',
        description="Soft Assert Traceback", contents=full_tb,
        file_type="soft_traceback", display_type="danger", display_glyph="align-justify",
        contents_base64=True, group_id=sa_id, slaveid=store.slaveid)
    fire_art_test_hook(
        node, 'filedump',
        description="Soft Assert Short Traceback", contents=short_tb,
        file_type="soft_short_tb", display_type="danger", display_glyph="align-justify",
        contents_base64=True, group_id=sa_id, slaveid=store.slaveid)
    if ss is not None:
        fire_art_test_hook(
            node, 'filedump',
            description="Soft Assert Exception screenshot",
            file_type="screenshot", mode="wb", contents_base64=True, contents=ss,
            display_glyph="camera", group_id=sa_id, slaveid=store.slaveid)
    if ss_error is not None:
        fire_art_test_hook(
            node, 'filedump',
            description="Soft Assert Screenshot error", mode="w",
            contents_base64=True, contents=ss_error, display_type="danger", group_id=sa_id,
            slaveid=store.slaveid)
Ejemplo n.º 19
0
def shutdown(config):
    app = find_appliance(config, require=False)
    if app is not None:
        with lock:
            proc = config._art_proc
            if proc and proc.returncode is None:
                if not store.slave_manager:
                    write_line('collecting artifacts')
                    fire_art_hook(config, 'finish_session')
                if not store.slave_manager:
                    config._art_client.terminate()
                    proc.wait()
Ejemplo n.º 20
0
def shutdown(config):
    app = find_appliance(config, require=False)
    if app is not None:
        with lock:
            proc = config._art_proc
            if proc and proc.returncode is None:
                if not store.slave_manager:
                    write_line('collecting artifacts')
                    fire_art_hook(config, 'finish_session')
                fire_art_hook(config, 'teardown_merkyl',
                              ip=app.hostname)
                if not store.slave_manager:
                    config._art_client.terminate()
                    proc.wait()
Ejemplo n.º 21
0
def pytest_generate_tests(metafunc):
    """
    Build a list of tuples containing (group_name, context)
    Returns:
        tuple containing (group_name, context)
        where group_name is a string and context is ViaUI/SSUI
    """
    appliance = find_appliance(metafunc)
    parameter_list = []
    # TODO: Include SSUI role_access dict and VIASSUI context
    roles_and_context = [(role_access_ui_59z if appliance.version >= '5.9' else
                          role_access_ui_58z, ViaUI)]
    for group_dict, context in roles_and_context:
        parameter_list.extend([(group, context)
                               for group in group_dict.keys()])
    metafunc.parametrize('group_name, context', parameter_list)
Ejemplo n.º 22
0
def shutdown(config):
    app = find_appliance(config, require=False)
    if app is not None:
        with lock:
            proc = config._art_proc
            if proc:
                if not store.slave_manager:
                    write_line('collecting artifacts')
                    fire_art_hook(config, 'finish_session')
                fire_art_hook(config, 'teardown_merkyl',
                              ip=app.hostname)
                if not store.slave_manager:
                    config._art_client.terminate()
                    proc = config._art_proc
                    if proc:
                        proc.wait()
def pytest_generate_tests(metafunc):
    """
    Build a list of tuples containing (group_name, context)
    Returns:
        tuple containing (group_name, context)
        where group_name is a string and context is ViaUI/SSUI
    """
    appliance = find_appliance(metafunc)
    parameter_list = []
    # TODO: Include SSUI role_access dict and VIASSUI context
    roles_and_context = [(
        role_access_ui_59z if appliance.version >= '5.9' else role_access_ui_58z, ViaUI)
    ]
    for group_dict, context in roles_and_context:
        parameter_list.extend([(group, context) for group in group_dict.keys()])
    metafunc.parametrize('group_name, context', parameter_list)
Ejemplo n.º 24
0
def add_server_roles(item, server_roles, server_roles_mode="add"):
    # Disable all server roles
    # and then figure out which ones should be enabled
    from cfme.utils.appliance import find_appliance
    current_appliance = find_appliance(item)
    server_settings = current_appliance.server.settings
    roles_with_vals = {k: False for k in available_roles}
    if server_roles is None:
        # Only user interface
        roles_with_vals['user_interface'] = True
    elif server_roles == "default":
        # The ones specified in YAML
        roles_list = cfme_data["server_roles"]["sets"]["default"]
        roles_with_vals.update({k: True for k in roles_list})
    elif server_roles_mode == "add":
        # The ones that are already enabled and enable/disable the ones specified
        # -server_role, +server_role or server_role
        roles_with_vals = server_settings.server_roles_db
        if isinstance(server_roles, six.string_types):
            server_roles = server_roles.split(' ')
        for role in server_roles:
            if role.startswith('-'):
                roles_with_vals[role[1:]] = False
            elif role.startswith('+'):
                roles_with_vals[role[1:]] = True
            else:
                roles_with_vals[role] = True
    elif server_roles_mode == "cfmedata":
        roles_list = cfme_data
        # Drills down into cfme_data YAML by selector, expecting a list
        # of roles at the end. A KeyError here probably means the YAML
        # selector is wrong
        for selector in server_roles:
            roles_list = roles_list[selector]
        roles_with_vals.update({k: True for k in roles_list})
    else:
        raise Exception('No server role changes defined.')

    if not available_roles.issuperset(set(roles_with_vals)):
        unknown_roles = ', '.join(set(roles_with_vals) - available_roles)
        raise Exception('Unknown server role(s): {}'.format(unknown_roles))

    server_settings.update_server_roles_db(roles_with_vals)
Ejemplo n.º 25
0
def pytest_generate_tests(metafunc):
    """The following lines generate appliance versions based from the current build.
    Appliance version is split and z-version is picked out for generating each version
    and appending it to the empty versions list"""
    version = find_appliance(metafunc).version
    versions = []

    old_version_pytest_arg = metafunc.config.getoption('old_version')
    if old_version_pytest_arg == 'same':
        versions.append(version)
    elif old_version_pytest_arg is None:
        split_ver = str(version).split(".")
        try:
            z_version = int(split_ver[2])
        except (IndexError, ValueError) as e:
            logger.exception("Couldn't parse version: %s, skipping", e)
            versions.append(
                pytest.param(
                    "bad:{!r}".format(version),
                    marks=pytest.mark.uncollect(
                        reason='Could not parse z_version from: {}'.format(
                            version))))
        else:
            z_version = z_version - 1
            if z_version < 0:
                reason_str = ('No previous z-stream version to update from: {}'
                              .format(version))
                logger.debug(reason_str)
                versions.append(
                    pytest.param(
                        "bad:{!r}".format(version),
                        marks=pytest.mark.uncollect(reason=reason_str)))
            else:
                versions.append(
                    "{split_ver[0]}.{split_ver[1]}.{z_version}".format(
                        split_ver=split_ver, z_version=z_version))
    else:
        versions.append(old_version_pytest_arg)
    metafunc.parametrize('old_version', versions, indirect=True)
Ejemplo n.º 26
0
def pytest_generate_tests(metafunc):
    """
    Build a list of tuples containing (group_name, context)
    Returns:
        tuple containing (group_name, context)
        where group_name is a string and context is ViaUI/SSUI
    """
    appliance = find_appliance(metafunc)
    parameter_list = []
    id_list = []
    # TODO: Include SSUI role_access dict and VIASSUI context
    role_access_ui = VersionPicker({
        Version.lowest(): role_access_ui_59z,
        '5.10': role_access_ui_510z
    }).pick(appliance.version)
    logger.info('Using the role access dict: %s', role_access_ui)
    roles_and_context = [(role_access_ui, ViaUI)]
    for role_access, context in roles_and_context:
        for group in role_access.keys():
            parameter_list.append((group, role_access, context))
            id_list.append('{}-{}'.format(group, context))
    metafunc.parametrize('group_name, role_access, context', parameter_list)
def pytest_generate_tests(metafunc):
    """The following lines generate appliance versions based from the current build.
    Appliance version is split and minor_build is picked out for generating each version
    and appending it to the empty versions list"""
    versions = []
    version = find_appliance(metafunc).version

    split_ver = str(version).split(".")
    try:
        minor_build = split_ver[2]
        assert int(minor_build) != 0
    except IndexError:
        logger.exception('Caught IndexError generating for test_appliance_update, skipping')
    except AssertionError:
        logger.debug('Caught AssertionError: No previous z-stream version to update from')
        versions.append(pytest.param("bad:{!r}".format(version), marks=pytest.mark.uncollect(
            'Could not parse minor_build version from: {}'.format(version)
        )))
    else:
        for i in range(int(minor_build) - 1, -1, -1):
            versions.append("{}.{}.{}".format(split_ver[0], split_ver[1], i))
    metafunc.parametrize('old_version', versions, indirect=True)
Ejemplo n.º 28
0
def pytest_runtest_teardown(item, nextitem):
    if not getattr(item.config, '_art_client'):
        return
    name, location = get_test_idents(item)
    app = find_appliance(item)
    ip = app.hostname
    fire_art_test_hook(item,
                       'finish_test',
                       slaveid=store.slaveid,
                       ip=ip,
                       wait_for_task=True)
    fire_art_test_hook(item, 'sanitize', words=words)
    jenkins_data = {
        'build_url': os.environ.get('BUILD_URL'),
        'build_number': os.environ.get('BUILD_NUMBER'),
        'git_commit': os.environ.get('GIT_COMMIT'),
        'job_name': os.environ.get('JOB_NAME')
    }
    param_dict = None
    try:
        caps = app.browser.widgetastic.selenium.capabilities
        param_dict = {
            'browserName':
            caps.get('browserName', 'Unknown'),
            'browserPlatform':
            caps.get('platformName', caps.get('platform', 'Unknown')),
            'browserVersion':
            caps.get('browserVersion', caps.get('version', 'Unknown'))
        }
    except Exception:
        logger.exception("Couldn't grab browser env_vars")
        pass  # already set param_dict

    fire_art_test_hook(item,
                       'ostriz_send',
                       env_params=param_dict,
                       slaveid=store.slaveid,
                       polarion_ids=extract_polarion_ids(item),
                       jenkins=jenkins_data)
Ejemplo n.º 29
0
def pytest_generate_tests(metafunc):
    """The following lines generate appliance versions based from the current build.
    Appliance version is split and minor_build is picked out for generating each version
    and appending it to the empty versions list"""
    versions = []
    version = find_appliance(metafunc).version

    split_ver = str(version).split(".")
    try:
        minor_build = split_ver[2]
        assert int(minor_build) != 0
    except IndexError:
        logger.exception('Caught IndexError generating for test_appliance_update, skipping')
    except AssertionError:
        logger.debug('Caught AssertionError: No previous z-stream version to update from')
        versions.append(pytest.param("bad:{!r}".format(version), marks=pytest.mark.uncollect(
            'Could not parse minor_build version from: {}'.format(version)
        )))
    else:
        for i in range(int(minor_build) - 1, -1, -1):
            # removing older 5.9 builds due to sprout limitation.
            if version < '5.10' and i > 1:
                versions.append("{}.{}.{}".format(split_ver[0], split_ver[1], i))
    metafunc.parametrize('old_version', versions, indirect=True)
Ejemplo n.º 30
0
def pytest_runtest_protocol(item):
    global session_ver
    global session_build
    global session_stream
    appliance = find_appliance(item)
    if not session_ver:
        session_ver = str(appliance.version)
        session_build = appliance.build
        session_stream = appliance.version.stream()
        if str(session_ver) not in session_build:
            session_build = "{}-{}".format(str(session_ver), session_build)
        session_fw_version = None
        try:
            proc = subprocess.Popen(['git', 'describe', '--tags'],
                                    stdout=subprocess.PIPE)
            proc.wait()
            session_fw_version = proc.stdout.read().strip()
        except Exception:
            pass  # already set session_fw_version to None
        fire_art_hook(
            item.config, 'session_info',
            version=session_ver,
            build=session_build,
            stream=session_stream,
            fw_version=session_fw_version
        )

    tier = item.get_marker('tier')
    if tier:
        tier = tier.args[0]

    requirement = item.get_marker('requirement')
    if requirement:
        requirement = requirement.args[0]

    param_dict = {}
    try:
        params = item.callspec.params
        param_dict = {p: get_name(v) for p, v in params.items()}
    except Exception:
        pass  # already set param_dict
    ip = appliance.hostname
    # This pre_start_test hook is needed so that filedump is able to make get the test
    # object set up before the logger starts logging. As the logger fires a nested hook
    # to the filedumper, and we can't specify order inriggerlib.
    meta = item.get_marker('meta')
    if meta and 'blockers' in meta.kwargs:
        blocker_spec = meta.kwargs['blockers']
        blockers = []
        for blocker in blocker_spec:
            if isinstance(blocker, int):
                blockers.append(BZ(blocker).url)
            else:
                blockers.append(Blocker.parse(blocker).url)
    else:
        blockers = []
    fire_art_test_hook(
        item, 'pre_start_test',
        slaveid=store.slaveid, ip=ip)
    fire_art_test_hook(
        item, 'start_test',
        slaveid=store.slaveid, ip=ip,
        tier=tier, requirement=requirement, param_dict=param_dict, issues=blockers)
    yield
Ejemplo n.º 31
0
 def managed_known_providers(self):
     appliance = find_appliance(self.config)
     return [prov.key for prov in appliance.managed_known_providers]
Ejemplo n.º 32
0
def pytest_runtest_protocol(item):
    global session_ver
    global session_build
    global session_stream
    appliance = find_appliance(item)
    if not session_ver:
        session_ver = str(appliance.version)
        session_build = appliance.build
        session_stream = appliance.version.stream()
        if str(session_ver) not in session_build:
            session_build = "{}-{}".format(str(session_ver), session_build)
        try:
            proc = subprocess.Popen(['git', 'describe', '--tags'],
                                    stdout=subprocess.PIPE)
            proc.wait()
            session_fw_version = proc.stdout.read().strip()
        except:
            session_fw_version = None
        fire_art_hook(item.config,
                      'session_info',
                      version=session_ver,
                      build=session_build,
                      stream=session_stream,
                      fw_version=session_fw_version)

    tier = item.get_marker('tier')
    if tier:
        tier = tier.args[0]

    requirement = item.get_marker('requirement')
    if requirement:
        requirement = requirement.args[0]

    try:
        params = item.callspec.params
        param_dict = {p: get_name(v) for p, v in params.items()}
    except:
        param_dict = {}
    ip = appliance.hostname
    # This pre_start_test hook is needed so that filedump is able to make get the test
    # object set up before the logger starts logging. As the logger fires a nested hook
    # to the filedumper, and we can't specify order inriggerlib.
    meta = item.get_marker('meta')
    if meta and 'blockers' in meta.kwargs:
        blocker_spec = meta.kwargs['blockers']
        blockers = []
        for blocker in blocker_spec:
            if isinstance(blocker, int):
                blockers.append(BZ(blocker).url)
            else:
                blockers.append(Blocker.parse(blocker).url)
    else:
        blockers = []
    fire_art_test_hook(item, 'pre_start_test', slaveid=store.slaveid, ip=ip)
    fire_art_test_hook(item,
                       'start_test',
                       slaveid=store.slaveid,
                       ip=ip,
                       tier=tier,
                       requirement=requirement,
                       param_dict=param_dict,
                       issues=blockers)
    yield
Ejemplo n.º 33
0
 def managed_known_providers(self):
     appliance = find_appliance(self.config)
     return [prov.key for prov in appliance.managed_known_providers]