Exemplo n.º 1
0
def is_open_bz(issue, data=None):
    """Check if specific BZ is open consulting a cached `data` dict or
    calling Bugzilla REST API.

    Arguments:
        issue {str} -- The BZ reference e.g: BZ:123456
        data {dict} -- Issue data indexed by <handler>:<number> or None
    """

    bz = try_from_cache(issue, data)
    if bz.get("is_open") is not None:  # bug has been already processed
        return bz["is_open"]

    bz = follow_duplicates(bz)

    # BZ is explicitly in OPEN status
    if bz.get('status') in OPEN_STATUSES:
        return True

    # BZ is CLOSED/WONTFIX so considered not fixed yet, BZ is open
    if bz.get('status') in CLOSED_STATUSES and bz.get('resolution') in WONTFIX_RESOLUTIONS:
        return True

    # BZ is CLOSED with a resolution in (ERRATA, CURRENT_RELEASE, ...)
    # server.version is higher or equal than BZ version
    # Consider fixed,  BZ is not open
    return get_sat_version() < extract_min_version(bz)
Exemplo n.º 2
0
def test_positive_recommended_repos(session, module_org):
    """list recommended repositories using
     On/Off 'Recommended Repositories' toggle.

    :id: 1ae197d5-88ba-4bb1-8ecf-4da5013403d7

    :expectedresults:

           1. Shows repositories as per On/Off 'Recommended Repositories'.
           2. Check last Satellite version Capsule/Tools repos do not exist.

    :CaseLevel: Integration

    :BZ: 1776108
    """
    manifests.upload_manifest_locked(module_org.id)
    with session:
        session.organization.select(module_org.name)
        rrepos_on = session.redhatrepository.read(recommended_repo='on')
        assert REPOSET['rhel7'] in [repo['name'] for repo in rrepos_on]
        v = get_sat_version()
        sat_version = f'{v.major}.{v.minor}'
        cap_tool_repos = [
            repo['name'] for repo in rrepos_on
            if 'Tools' in repo['name'] or 'Capsule' in repo['name']
        ]
        cap_tools_repos = [
            repo for repo in cap_tool_repos if repo.split()[4] != sat_version
        ]
        assert not cap_tools_repos, 'Tools/Capsule repos do not match with Satellite version'
        rrepos_off = session.redhatrepository.read(recommended_repo='off')
        assert REPOSET['fdrh8'] in [repo['name'] for repo in rrepos_off]
        assert len(rrepos_off) > len(rrepos_on)
Exemplo n.º 3
0
def pytest_collection_modifyitems(session, items, config):
    """Add markers and user_properties for testimony token metadata

    user_properties is used by the junit plugin, and thus by many test report systems
    Handle test function/class/module/session scope metadata coming from test docblocks
    Apply user_properties, ibutsu metadata, and pytest markers

    Markers for metadata use the testimony token name as the mark name
    The value of the token for the mark is the first mark arg

    Control test collection for custom options related to testimony metadata

    """
    # get RHEL version of the satellite
    rhel_version = get_sat_rhel_version().base_version
    sat_version = get_sat_version().base_version
    snap_version = settings.server.version.get('snap', '')

    # split the option string and handle no option, single option, multiple
    # config.getoption(default) doesn't work like you think it does, hence or ''
    importance = [
        i for i in (config.getoption('importance') or '').split(',') if i != ''
    ]
    component = [
        c for c in (config.getoption('component') or '').split(',') if c != ''
    ]
    assignee = [
        a for a in (config.getoption('assignee') or '').split(',') if a != ''
    ]

    selected = []
    deselected = []
    logger.info('Processing test items to add testimony token markers')
    for item in items:
        if item.nodeid.startswith(
                'tests/robottelo/') and 'test_junit' not in item.nodeid:
            # Unit test, no testimony markers
            continue

        # apply the marks for importance, component, and assignee
        # Find matches from docstrings starting at smallest scope
        item_docstrings = [
            d for d in map(inspect.getdoc, (item.function,
                                            getattr(item, 'cls', None),
                                            item.module)) if d is not None
        ]
        item_mark_names = [m.name for m in item.iter_markers()]
        for docstring in item_docstrings:
            # Add marker starting at smallest docstring scope
            # only add the mark if it hasn't already been applied at a lower scope
            doc_component = component_regex.findall(docstring)
            if doc_component and 'component' not in item_mark_names:
                item.add_marker(pytest.mark.component(doc_component[0]))
            doc_importance = importance_regex.findall(docstring)
            if doc_importance and 'importance' not in item_mark_names:
                item.add_marker(pytest.mark.importance(doc_importance[0]))
            doc_assignee = assignee_regex.findall(docstring)
            if doc_assignee and 'assignee' not in item_mark_names:
                item.add_marker(pytest.mark.assignee(doc_assignee[0]))

        # add markers as user_properties so they are recorded in XML properties of the report
        for marker in item.iter_markers():
            item.user_properties.append(
                (marker.name, next(iter(marker.args), None)))
        item.user_properties.append(("BaseOS", rhel_version))
        item.user_properties.append(("SatelliteVersion", sat_version))
        item.user_properties.append(("SnapVersion", snap_version))
        item.user_properties.append(
            ("start_time",
             datetime.datetime.utcnow().strftime(FMT_XUNIT_TIME)))

        # add custom ibutsu metadata fields for test case grouping and heatmaps
        if hasattr(item, "_ibutsu"):
            item._ibutsu["data"]["metadata"].update({
                # TODO Work with ibutsu team, better mechanism for defining 'special' data
                # TODO Add sat version to this item data at test execution time
                "component":
                item.get_closest_marker('component').args[0]
            })

        # exit early if no filters were passed
        if importance or component or assignee:
            # Filter test collection based on CLI options for filtering
            # filters should be applied together
            # such that --component Repository --importance Critical --assignee jsmith
            # only collects tests which have all three of these marks

            # https://github.com/pytest-dev/pytest/issues/1373  Will make this way easier
            # testimony requires both importance and component, this will blow up if its forgotten
            importance_marker = item.get_closest_marker('importance').args[0]
            if importance and importance_marker not in importance:
                logger.debug(
                    f'Deselected test {item.nodeid} due to "--importance {importance}",'
                    f'test has importance mark: {importance_marker}')
                deselected.append(item)
                continue
            component_marker = item.get_closest_marker('component').args[0]
            if component and component_marker not in component:
                logger.debug(
                    f'Deselected test {item.nodeid} due to "--component {component}",'
                    f'test has component mark: {component_marker}')
                deselected.append(item)
                continue
            assignee_marker = item.get_closest_marker('assignee').args[0]
            if assignee and assignee_marker not in assignee:
                logger.debug(
                    f'Deselected test {item.nodeid} due to "--assignee {assignee}",'
                    f'test has assignee mark: {assignee_marker}')
                deselected.append(item)
                continue

            selected.append(item)

    # selected will be empty if no filter option was passed, defaulting to full items list
    items[:] = selected if deselected else items
    config.hook.pytest_deselected(items=deselected)
Exemplo n.º 4
0
def pytest_collection_modifyitems(items, config):
    """
    Collects and modifies test collection based on the pytest options to select the tests marked as
    failed/skipped and user-specific tests in Report Portal
    """
    rp_url = settings.report_portal.portal_url or config.getini('rp_endpoint')
    rp_uuid = config.getini('rp_uuid') or settings.report_portal.api_key
    # prefer dynaconf setting before ini config as pytest-reportportal plugin uses default value
    # for `rp_launch` if none is set there
    rp_launch_name = settings.report_portal.launch_name or config.getini('rp_launch')
    rp_project = config.getini('rp_project') or settings.report_portal.project
    fail_args = config.getoption('only_failed', False)
    skip_arg = config.getoption('only_skipped', False)
    user_arg = config.getoption('user', False)
    ref_launch_uuid = config.getoption('rp_reference_launch_uuid', None) or config.getoption(
        'rp_rerun_of', None
    )
    tests = []
    if not any([fail_args, skip_arg, user_arg]):
        return
    rp = ReportPortal(rp_url=rp_url, rp_api_key=rp_uuid, rp_project=rp_project)

    if ref_launch_uuid:
        logger.info(f'Fetching A reference Report Portal launch {ref_launch_uuid}')
        ref_launches = rp.get_launches(uuid=ref_launch_uuid)
        if not ref_launches:
            raise LaunchError(
                f'Provided reference launch {ref_launch_uuid} was not found or is not finished'
            )
    else:
        sat_release = get_sat_version().base_version
        sat_snap = settings.server.version.get('snap', '')
        if not all([sat_release, sat_snap, (len(sat_release.split('.')) == 3)]):
            raise pytest.UsageError(
                '--failed|skipped-only requires a reference launch id or'
                ' a full satellite version (x.y.z-a.b) to be provided.'
                f' sat_release: {sat_release}, sat_snap: {sat_snap} were provided instead'
            )
        sat_version = f'{sat_release}-{sat_snap}'
        logger.info(
            f'Fetching A reference Report Portal launch by Satellite version: {sat_version}'
        )

        ref_launches = rp.get_launches(name=rp_launch_name, sat_version=sat_version)
        if not ref_launches:
            raise LaunchError(
                f'No suitable Report portal launches for name: {rp_launch_name}'
                f' and version: {sat_version} found'
            )

    test_args = {}
    test_args.setdefault('status', list())
    if skip_arg:
        test_args['status'].append('SKIPPED')
    if fail_args:
        test_args['status'].append('FAILED')
        if not fail_args == 'all':
            defect_types = fail_args.split(',')
            allowed_args = [*rp.defect_types.keys()]
            if not set(defect_types).issubset(set(allowed_args)):
                raise pytest.UsageError(
                    'Incorrect values to pytest option \'--only-failed\' are provided as '
                    f'\'{fail_args}\'. It should be none/one/mix of {allowed_args}'
                )
            test_args['defect_types'] = defect_types
    if user_arg:
        test_args['user'] = user_arg
    test_args['paths'] = config.args
    for ref_launch in ref_launches:
        _validate_launch(ref_launch)
        tests.extend(rp.get_tests(launch=ref_launch, **test_args))
    # remove inapplicable tests from the current test collection
    deselected = [
        i
        for i in items
        if f'{i.location[0]}.{i.location[2]}'.replace('::', '.')
        not in [t['name'].replace('::', '.') for t in tests]
    ]
    selected = list(set(items) - set(deselected))
    logger.debug(
        f'Selected {len(selected)} and deselected {len(deselected)} tests based on latest/given-/ '
        'launch test results.'
    )
    config.hook.pytest_deselected(items=deselected)
    items[:] = selected