コード例 #1
0
def setup_one_or_skip(request, filters=None, use_global_filters=True):
    """ Sets up one of matching providers or skips the test

    Args:
        filters: List of :py:class:`ProviderFilter` or None
        request: Needed for logging a potential skip correctly in artifactor
        use_global_filters: Will apply global filters as well if `True`, will not otherwise
    """

    filters = filters or []
    providers = list_providers(filters=filters,
                               use_global_filters=use_global_filters)

    # All providers filtered out?
    if not providers:
        global_providers = list_providers(
            filters=None, use_global_filters=use_global_filters)
        if not global_providers:
            # This can also mean that there simply are no providers in the yamls!
            pytest.skip("No provider matching global filters found")
        else:
            pytest.skip("No provider matching test-specific filters found")

    # Are all providers marked as problematic?
    if _problematic_providers.issuperset(providers):
        skip_msg = f"All providers marked as problematic: {[p.key for p in providers]}"
        _artifactor_skip_providers(request, providers, skip_msg)

    # If there is a provider already set up matching the user's requirements, reuse it
    for provider in providers:
        if provider.exists:
            return provider

    # If we have more than one provider, we create two separate groups of providers, preferred
    # and not preferred, that we shuffle separately and then join together
    if len(providers) > 1:
        only_preferred_filter = ProviderFilter(required_fields=[
            ("do_not_prefer", True)
        ],
                                               inverted=True)
        preferred_providers = list_providers(
            filters=filters + [only_preferred_filter],
            use_global_filters=use_global_filters)
        not_preferred_providers = [
            p for p in providers if p not in preferred_providers
        ]
        random.shuffle(preferred_providers)
        random.shuffle(not_preferred_providers)
        providers = preferred_providers + not_preferred_providers

    # Try to set up one of matching providers
    non_existing = [prov for prov in providers if not prov.exists]
    for provider in non_existing:
        if _setup_provider_verbose(request, provider):
            return provider

    skip_msg = f"Failed to set up any matching providers: {[p.key for p in providers]}"
    _artifactor_skip_providers(request, non_existing, skip_msg)
コード例 #2
0
ファイル: provider.py プロジェクト: apagac/cfme_tests
def setup_one_or_skip(request, filters=None, use_global_filters=True):
    """ Sets up one of matching providers or skips the test

    Args:
        filters: List of :py:class:`ProviderFilter` or None
        request: Needed for logging a potential skip correctly in artifactor
        use_global_filters: Will apply global filters as well if `True`, will not otherwise
    """

    filters = filters or []
    providers = list_providers(filters=filters, use_global_filters=use_global_filters)

    # All providers filtered out?
    if not providers:
        global_providers = list_providers(filters=None, use_global_filters=use_global_filters)
        if not global_providers:
            # This can also mean that there simply are no providers in the yamls!
            pytest.skip("No provider matching global filters found")
        else:
            pytest.skip("No provider matching test-specific filters found")

    # Are all providers marked as problematic?
    if _problematic_providers.issuperset(providers):
        skip_msg = "All providers marked as problematic: {}".format([p.key for p in providers])
        _artifactor_skip_providers(request, providers, skip_msg)

    # If there is a provider already set up matching the user's requirements, reuse it
    for provider in providers:
        if provider.exists:
            return provider

    # If we have more than one provider, we create two separate groups of providers, preferred
    # and not preferred, that we shuffle separately and then join together
    if len(providers) > 1:
        only_preferred_filter = ProviderFilter(required_fields=[("do_not_prefer", True)],
                                               inverted=True)
        preferred_providers = list_providers(
            filters=filters + [only_preferred_filter], use_global_filters=use_global_filters)
        not_preferred_providers = [p for p in providers if p not in preferred_providers]
        random.shuffle(preferred_providers)
        random.shuffle(not_preferred_providers)
        providers = preferred_providers + not_preferred_providers

    # Try to set up one of matching providers
    non_existing = [prov for prov in providers if not prov.exists]
    for provider in non_existing:
        if _setup_provider_verbose(request, provider):
            return provider

    skip_msg = "Failed to set up any matching providers: {}", [p.key for p in providers]
    _artifactor_skip_providers(request, non_existing, skip_msg)
コード例 #3
0
ファイル: test_providers.py プロジェクト: lcouzens/cfme_tests
def test_azure_multiple_subscription(appliance, request, soft_assert):
    """
    Verifies that different azure providers have different resources access

    Steps:
    1. Add all Azure providers
    2. Compare their VMs/Templates

    Metadata:
        test_flag: crud
    """
    pf = ProviderFilter(classes=[AzureProvider], required_flags=['crud'])
    providers = list_providers([pf])
    if len(providers) < 2:
        pytest.skip("this test needs at least 2 AzureProviders")
    prov_inventory = []
    for provider in providers:
        request.addfinalizer(provider.clear_providers)
        provider.create()
        provider.validate_stats()
        prov_inventory.append((provider.name,
                               provider.num_vm(),
                               provider.num_template()))

    for index, prov_a in enumerate(prov_inventory[:-1]):
        for prov_b in prov_inventory[index + 1:]:
            soft_assert(prov_a[1] != prov_b[1], "Same num_vms for {} and {}".format(prov_a[0],
                                                                               prov_b[0]))
            soft_assert(prov_a[2] != prov_b[2], "Same num_templates for {} and {}".format(prov_a[0],
                                                                                     prov_b[0]))
コード例 #4
0
def test_azure_multiple_subscription(appliance, request, soft_assert):
    """
    Verifies that different azure providers have different resources access

    Steps:
    1. Add all Azure providers
    2. Compare their VMs/Templates

    Metadata:
        test_flag: crud
    """
    pf = ProviderFilter(classes=[AzureProvider], required_flags=['crud'])
    providers = list_providers([pf])
    if len(providers) < 2:
        pytest.skip("this test needs at least 2 AzureProviders")
    prov_inventory = []
    for provider in providers:
        request.addfinalizer(provider.clear_providers)
        provider.create()
        provider.validate_stats()
        prov_inventory.append(
            (provider.name, provider.num_vm(), provider.num_template()))

    for index, prov_a in enumerate(prov_inventory[:-1]):
        for prov_b in prov_inventory[index + 1:]:
            soft_assert(
                prov_a[1] != prov_b[1],
                "Same num_vms for {} and {}".format(prov_a[0], prov_b[0]))
            soft_assert(
                prov_a[2] != prov_b[2],
                "Same num_templates for {} and {}".format(
                    prov_a[0], prov_b[0]))
コード例 #5
0
def pytest_configure(config):
    """ Filters the list of providers as part of pytest configuration

    Note:
        Additional filter is added to the global_filters dict of active filters here.
    """

    cmd_filter = config.getvalueorskip('use_provider')
    if not cmd_filter:
        cmd_filter = ["default"]

    new_filter = ProviderFilter(keys=cmd_filter, required_tags=cmd_filter, conjunctive=False)
    global_filters['use_provider'] = new_filter

    logger.debug('Filtering providers with {}, leaves {}'.format(
        cmd_filter, [prov.key for prov in list_providers()]))
コード例 #6
0
def az_pwsh_vm(appliance):
    """
    azure_pwsh contains powershell and necessary scripts to upload vhd, create VM, get ip of the
    resource and delete the VM.
    Find the provider that contains that template.

    The example of the configuration can be found in data/az_pwsh_cloudinit.cfg
    """
    filter_pwsh_template = ProviderFilter(
        required_fields=[['templates', 'powershell_vm']])
    providers = list_providers(filters=[filter_pwsh_template])
    if not providers:
        pytest.skip(
            "There's no provider that contains a template with powershell")

    # If there's more than 1 provider that has the template, we select the first
    provider = providers[0]

    vm_name = random_vm_name(context="pwsh")
    pwsh_vm = provider.data.templates.powershell_vm.name

    collection = provider.appliance.provider_based_collection(provider)

    try:
        vm = collection.instantiate(vm_name, provider, pwsh_vm)
        vm.create_on_provider(allow_skip="default")
    except IndexError:
        from cfme.exceptions import ItemNotFound
        raise ItemNotFound('VM with powershell not found!')

    vm.mgmt.ensure_state(VmState.RUNNING)

    @wait_for_decorator(timeout="10m", delay=5)
    def get_ip_address():
        ip = vm.ip_address
        return ip is not None

    yield vm

    vm.cleanup_on_provider()
コード例 #7
0
def providers(metafunc, filters=None):
    """ Gets providers based on given (+ global) filters

    Note:
        Using the default 'function' scope, each test will be run individually for each provider
        before moving on to the next test. To group all tests related to single provider together,
        parametrize tests in the 'module' scope.

    Note:
        testgen for providers now requires the usage of test_flags for collection to work.
        Please visit http://cfme-tests.readthedocs.org/guides/documenting.html#documenting-tests
        for more details.
    """
    filters = filters or []
    argnames = []
    argvalues = []
    idlist = []

    # Obtains the test's flags in form of a ProviderFilter
    meta = getattr(metafunc.function, 'meta', None)
    test_flag_str = getattr(meta, 'kwargs', {}).get('from_docs',
                                                    {}).get('test_flag')
    if test_flag_str:
        test_flags = test_flag_str.split(',')
        flags_filter = ProviderFilter(required_flags=test_flags)
        filters = filters + [flags_filter]

    for provider in list_providers(filters):
        argvalues.append([provider])
        # Use the provider key for idlist, helps with readable parametrized test output
        idlist.append(provider.key)
        # Add provider to argnames if missing
        if 'provider' in metafunc.fixturenames and 'provider' not in argnames:
            metafunc.function = pytest.mark.uses_testgen()(metafunc.function)
            argnames.append('provider')
        if metafunc.config.getoption('sauce'):
            break

    return argnames, argvalues, idlist
コード例 #8
0
ファイル: testgen.py プロジェクト: lcouzens/cfme_tests
def providers(metafunc, filters=None):
    """ Gets providers based on given (+ global) filters

    Note:
        Using the default 'function' scope, each test will be run individually for each provider
        before moving on to the next test. To group all tests related to single provider together,
        parametrize tests in the 'module' scope.

    Note:
        testgen for providers now requires the usage of test_flags for collection to work.
        Please visit http://cfme-tests.readthedocs.org/guides/documenting.html#documenting-tests
        for more details.
    """
    filters = filters or []
    argnames = []
    argvalues = []
    idlist = []

    # Obtains the test's flags in form of a ProviderFilter
    meta = getattr(metafunc.function, 'meta', None)
    test_flag_str = getattr(meta, 'kwargs', {}).get('from_docs', {}).get('test_flag')
    if test_flag_str:
        test_flags = test_flag_str.split(',')
        flags_filter = ProviderFilter(required_flags=test_flags)
        filters = filters + [flags_filter]

    for provider in list_providers(filters):
        argvalues.append([provider])
        # Use the provider key for idlist, helps with readable parametrized test output
        idlist.append(provider.key)
        # Add provider to argnames if missing
        if 'provider' in metafunc.fixturenames and 'provider' not in argnames:
            metafunc.function = pytest.mark.uses_testgen()(metafunc.function)
            argnames.append('provider')
        if metafunc.config.getoption('sauce'):
            break

    return argnames, argvalues, idlist
コード例 #9
0
ファイル: test_providers.py プロジェクト: apagac/cfme_tests
def az_pwsh_vm(appliance):
    """
    azure_pwsh contains powershell and necessary scripts to upload vhd, create VM, get ip of the
    resource and delete the VM.
    Find the provider that contains that template.

    The example of the configuration can be found in data/az_pwsh_cloudinit.cfg
    """
    filter_pwsh_template = ProviderFilter(required_fields=[['templates', 'powershell_vm']])
    providers = list_providers(filters=[filter_pwsh_template])
    if not providers:
        pytest.skip("There's no provider that contains a template with powershell")

    # If there's more than 1 provider that has the template, we select the first
    provider = providers[0]

    vm_name = random_vm_name(context="pwsh")
    pwsh_vm = provider.data.templates.powershell_vm.name

    collection = provider.appliance.provider_based_collection(provider)

    try:
        vm = collection.instantiate(vm_name, provider, pwsh_vm)
        vm.create_on_provider(allow_skip="default")
    except IndexError:
        from cfme.exceptions import ItemNotFound
        raise ItemNotFound('VM with powershell not found!')

    vm.mgmt.ensure_state(VmState.RUNNING)

    @wait_for_decorator(timeout="10m", delay=5)
    def get_ip_address():
        ip = vm.mgmt.ip
        return ip is not None
    yield vm

    vm.cleanup_on_provider()
コード例 #10
0
ファイル: cleanup_old_vms.py プロジェクト: apagac/cfme_tests
def cleanup_vms(texts, max_hours=24, providers=None, tags=None, dryrun=True):
    """
    Main method for the cleanup process
    Generates regex match objects
    Checks providers for cleanup boolean in yaml
    Checks provider connectivity (using ping)
    Process Pool for provider scanning
    Each provider process will thread vm scanning and deletion

    Args:
        texts (list): List of regex strings to match with
        max_hours (int): age limit for deletion
        providers (list): List of provider keys to scan and cleanup
        tags (list): List of tags to filter providers by
        dryrun (bool): Whether or not to actually delete VMs or just report
    Returns:
        int: return code, 0 on success, otherwise raises exception
    """
    logger.info('Matching VM names against the following case-insensitive strings: %r', texts)
    # Compile regex, strip leading/trailing single quotes from cli arg
    matchers = [re.compile(text.strip("'"), re.IGNORECASE) for text in texts]

    # setup provider filter with cleanup (default), tags, and providers (from cli opts)
    filters = [ProviderFilter(required_fields=[('cleanup', True)])]
    if tags:
        logger.info('Adding required_tags ProviderFilter for: %s', tags)
        filters.append(ProviderFilter(required_tags=tags))
    if providers:
        logger.info('Adding keys ProviderFilter for: %s', providers)
        filters.append(ProviderFilter(keys=providers))

    # Just want keys, use list_providers with no global filters to include disabled.
    with DummyAppliance():
        providers_to_scan = [prov.key for prov in list_providers(filters, use_global_filters=False)]
    logger.info('Potential providers for cleanup, filtered with given tags and provider keys: \n%s',
                '\n'.join(providers_to_scan))

    # scan providers for vms with name matches
    scan_fail_queue = manager.Queue()
    with Pool(4) as pool:
        deleted_vms = pool.starmap(
            cleanup_provider,
            ((provider_key, matchers, scan_fail_queue, max_hours, dryrun)
             for provider_key in providers_to_scan)
        )

    # flatten deleted_vms list, as its top level is by provider process
    # at same time remove None responses
    deleted_vms = [report
                   for prov_list in deleted_vms if prov_list is not None
                   for report in prov_list]

    scan_fail_vms = []
    # add the scan failures into deleted vms for reporting sake
    while not scan_fail_queue.empty():
        scan_fail_vms.append(scan_fail_queue.get())

    with open(args.outfile, 'a') as report:
        report.write('## VM/Instances deleted via:\n'
                     '##   text matches: {}\n'
                     '##   age matches: {}\n'
                     .format(texts, max_hours))
        message = tabulate(
            sorted(scan_fail_vms + deleted_vms, key=attrgetter('result')),
            headers=['Provider', 'Name', 'Age', 'Status Before', 'Delete RC'],
            tablefmt='orgtbl'
        )
        report.write(message + '\n')
    logger.info(message)
    return 0
コード例 #11
0
                provider.mgmt.remove_sa_from_scc(scc_name=scc_name,
                                                 namespace=sa_namespace,
                                                 sa=sa_name)
            else:
                logger.debug(
                    "skipping sa %s in scc %s because project exists "
                    "or it doesn't match any pattern", sa, scc_name)


if __name__ == "__main__":
    args = parse_cmd_line()
    errors = 0
    pf = ProviderFilter(classes=[OpenshiftProvider],
                        required_fields=[('use_for_sprout', True)])
    with DummyAppliance():
        providers = list_providers(filters=[pf], use_global_filters=False)
    for prov in providers:
        # ping provider
        try:
            prov.mgmt.list_project()
        except Exception as e:
            logger.error('Connection to provider %s cannot be estabilished',
                         prov.key)
            logger.error('Error: %s', e)
            errors += 1
            continue

        # remove all sa records from scc
        if args.cleanup_scc:
            try:
                text_to_match = [re.compile(r) for r in args.text_to_match]
コード例 #12
0
ファイル: provider.py プロジェクト: apagac/cfme_tests
def providers(metafunc, filters=None, selector=ALL, fixture_name='provider'):
    """ Gets providers based on given (+ global) filters

    Note:
        Using the default 'function' scope, each test will be run individually for each provider
        before moving on to the next test. To group all tests related to single provider together,
        parametrize tests in the 'module' scope.

    Note:
        testgen for providers now requires the usage of test_flags for collection to work.
        Please visit http://cfme-tests.readthedocs.org/guides/documenting.html#documenting-tests
        for more details.
    """
    filters = filters or []
    argnames = []
    argvalues = []
    idlist = []

    # Obtains the test's flags in form of a ProviderFilter
    meta = getattr(metafunc.function, 'meta', None)
    test_flag_str = getattr(meta, 'kwargs', {}).get('from_docs', {}).get('test_flag')
    if test_flag_str:
        test_flags = test_flag_str.split(',')
        flags_filter = ProviderFilter(required_flags=test_flags)
        filters = filters + [flags_filter]

    # available_providers are the ones "available" from the yamls after all of the global and
    # local filters have been applied. It will be a list of crud objects.
    available_providers = list_providers(filters)

    # supported_providers are the ones "supported" in the supportability.yaml file. It will
    # be a list of DataProvider objects and will be filtered based upon what the test has asked for
    holder = metafunc.config.pluginmanager.get_plugin('appliance-holder')
    series = holder.held_appliance.version.series()
    supported_providers = all_required(series, filters)

    def get_valid_providers(provider):
        # We now search through all the available providers looking for one that matches the
        # criteria. If we don't find one, we return None
        prov_tuples = []
        for a_prov in available_providers:
            try:
                if not a_prov.version:
                    raise ValueError("provider {p} has no version".format(p=a_prov))
                elif (a_prov.version == provider.version and
                        a_prov.type == provider.type_name and
                        a_prov.category == provider.category):
                    prov_tuples.append((provider, a_prov))
            except (KeyError, ValueError):
                if (a_prov.type == provider.type_name and
                        a_prov.category == provider.category):
                    prov_tuples.append((provider, a_prov))
        return prov_tuples

    # A small routine to check if we need to supply the idlist a provider type or
    # a real type/version
    need_prov_keys = False
    for filter in filters:
        if isinstance(filter, ProviderFilter) and filter.classes:
            for filt in filter.classes:
                if hasattr(filt, 'type_name'):
                    need_prov_keys = True
                    break

    matching_provs = [valid_provider
                      for prov in supported_providers
                      for valid_provider in get_valid_providers(prov)]

    # Now we run through the selectors and build up a list of supported providers which match our
    # requirements. This then forms the providers that the test should run against.
    if selector == ONE:
        if matching_provs:
            allowed_providers = [matching_provs[0]]
        else:
            allowed_providers = []
    elif selector == LATEST:
        allowed_providers = [sorted(
            matching_provs, key=lambda k:LooseVersion(
                str(k[0].version)), reverse=True
        )[0]]
    elif selector == ONE_PER_TYPE:
        types = set()

        def add_prov(prov):
            types.add(prov[0].type_name)
            return prov

        allowed_providers = [
            add_prov(prov) for prov in matching_provs if prov[0].type_name not in types
        ]
    elif selector == ONE_PER_CATEGORY:
        categories = set()

        def add_prov(prov):
            categories.add(prov[0].category)
            return prov

        allowed_providers = [
            add_prov(prov) for prov in matching_provs if prov[0].category not in categories
        ]
    elif selector == ONE_PER_VERSION:
        # This needs to handle versions per type
        versions = defaultdict(set)

        def add_prov(prov):
            versions[prov[0].type_name].add(prov[0].version)
            return prov

        allowed_providers = [
            add_prov(prov)
            for prov in matching_provs
            if prov[0].version not in versions[prov[0].type_name]
        ]
    else:
        # If there are no selectors, then the allowed providers are whichever are supported
        allowed_providers = matching_provs

    # Now we iterate through the required providers and try to match them to the available ones
    for data_prov, real_prov in allowed_providers:
        data_prov.key = real_prov.key
        argvalues.append(pytest.param(data_prov))

        # Use the provider key for idlist, helps with readable parametrized test output
        the_id = str(data_prov.key) if metafunc.config.getoption('legacy_ids') else data_prov.the_id

        # Now we modify the id based on what selector we chose
        if metafunc.config.getoption('disable_selectors'):
            idlist.append(the_id)
        else:
            if selector == ONE:
                if need_prov_keys:
                    idlist.append(data_prov.type_name)
                else:
                    idlist.append(data_prov.category)
            elif selector == ONE_PER_CATEGORY:
                idlist.append(data_prov.category)
            elif selector == ONE_PER_TYPE:
                idlist.append(data_prov.type_name)
            else:
                idlist.append(the_id)

        # Add provider to argnames if missing
        if fixture_name in metafunc.fixturenames and fixture_name not in argnames:
            metafunc.function = pytest.mark.uses_testgen()(metafunc.function)
            argnames.append(fixture_name)
        if metafunc.config.getoption('sauce') or selector == ONE:
            break
    return argnames, argvalues, idlist
コード例 #13
0
ファイル: ocp_cleanup.py プロジェクト: apagac/cfme_tests
            sa_namespace, sa_name = sa.split(':')[-2:]
            if match(text_to_match, sa_namespace) and not provider.mgmt.does_vm_exist(sa_namespace):
                logger.info('removing sa %s from scc %s', sa, scc_name)
                provider.mgmt.remove_sa_from_scc(scc_name=scc_name, namespace=sa_namespace,
                                                 sa=sa_name)
            else:
                logger.debug("skipping sa %s in scc %s because project exists "
                             "or it doesn't match any pattern", sa, scc_name)


if __name__ == "__main__":
    args = parse_cmd_line()
    errors = 0
    pf = ProviderFilter(classes=[OpenshiftProvider], required_fields=[('use_for_sprout', True)])
    with DummyAppliance():
        providers = list_providers(filters=[pf], use_global_filters=False)
    for prov in providers:
        # ping provider
        try:
            prov.mgmt.list_project()
        except Exception as e:
            logger.error('Connection to provider %s cannot be estabilished', prov.key)
            logger.error('Error: %s', e)
            errors += 1
            continue

        # remove all sa records from scc
        if args.cleanup_scc:
            try:
                text_to_match = [re.compile(r) for r in args.text_to_match]
                delete_stale_sa(prov, text_to_match)
コード例 #14
0
ファイル: provider.py プロジェクト: nachandr/cfme_tests
def providers(metafunc, filters=None, selector=ONE_PER_VERSION, fixture_name='provider'):
    """ Gets providers based on given (+ global) filters

    Note:
        Using the default 'function' scope, each test will be run individually for each provider
        before moving on to the next test. To group all tests related to single provider together,
        parametrize tests in the 'module' scope.

    Note:
        testgen for providers now requires the usage of test_flags for collection to work.
        Please visit http://cfme-tests.readthedocs.org/guides/documenting.html#documenting-tests
        for more details.
    """
    filters = filters or []
    argnames = []
    argvalues = []
    idlist = []

    # Obtains the test's flags in form of a ProviderFilter
    meta = getattr(metafunc.function, 'meta', None)
    test_flag_str = getattr(meta, 'kwargs', {}).get('from_docs', {}).get('test_flag')
    if test_flag_str:
        test_flags = test_flag_str.split(',')
        flags_filter = ProviderFilter(required_flags=test_flags)
        filters = filters + [flags_filter]

    # available_providers are the ones "available" from the yamls after all of the aal and
    # local filters have been applied. It will be a list of crud objects.
    available_providers = list_providers(filters)

    # supported_providers are the ones "supported" in the supportability.yaml file. It will
    # be a list of DataProvider objects and will be filtered based upon what the test has asked for
    holder = metafunc.config.pluginmanager.get_plugin('appliance-holder')
    series = holder.held_appliance.version.series()
    supported_providers = all_required(series, filters)

    def get_valid_providers(data_provider):
        """Search through all available providers in yaml for ones that match the criteria in
        data_provider."""
        valid_providers = []
        for available_provider in available_providers:
            try:
                if available_provider.version != data_provider.version:
                    continue
            except KeyError:
                pass
            if (
                available_provider.type == data_provider.type_name and
                available_provider.category == data_provider.category
            ):
                data_provider_with_key = DataProvider.get_instance(data_provider.category,
                    data_provider.type_name, data_provider.version, available_provider.key)
                valid_providers.append((data_provider_with_key, available_provider))

        return valid_providers

    # Check whether we need to supply a provider type or a real type / version to idlist.
    need_prov_keys = False
    for filter in filters:
        if isinstance(filter, ProviderFilter) and filter.classes:
            for filt in filter.classes:
                if hasattr(filt, 'type_name'):
                    need_prov_keys = True
                    break

    matching_provs = [valid_provider
                      for data_provider in supported_providers
                      for valid_provider in get_valid_providers(data_provider)]

    # Run through the selectors and build a list of supported providers which match our
    # requirements. This then forms the providers against which the test should run.
    if selector == ONE:
        if matching_provs:
            allowed_providers = [matching_provs[0]]
        else:
            allowed_providers = []
    elif selector == SECOND:
        if matching_provs:
            try:
                allowed_providers = [matching_provs[1]]
            except IndexError:
                pytest.skip("no second provider was found")
        else:
            allowed_providers = []
    elif selector == LATEST:
        allowed_providers = [sorted(
            matching_provs, key=lambda k:LooseVersion(
                str(k[0].version)), reverse=True
        )[0]]
    elif selector == ONE_PER_TYPE:
        types = set()

        def add_prov(prov):
            types.add(prov[0].type_name)
            return prov

        allowed_providers = [
            add_prov(prov) for prov in matching_provs if prov[0].type_name not in types
        ]
    elif selector == ONE_PER_CATEGORY:
        categories = set()

        def add_prov(prov):
            categories.add(prov[0].category)
            return prov

        allowed_providers = [
            add_prov(prov) for prov in matching_provs if prov[0].category not in categories
        ]
    elif selector == ONE_PER_VERSION:
        # This needs to handle versions per type
        versions = defaultdict(set)

        def add_prov(prov):
            versions[prov[0].type_name].add(prov[0].version)
            return prov

        allowed_providers = [
            add_prov(prov)
            for prov in matching_provs
            if prov[0].version not in versions[prov[0].type_name]
        ]
    else:
        # If there are no selectors, then the allowed providers are whichever are supported
        allowed_providers = matching_provs

    # Iterate through the required providers and try to match them to the available ones
    for data_prov, real_prov in allowed_providers:
        argvalues.append(pytest.param(data_prov))

        # Use the provider key for idlist, helps with readable parametrized test output
        use_legacy_ids = metafunc.config.getoption('legacy_ids')
        legacy_key = data_prov.key
        the_id = legacy_key if use_legacy_ids else data_prov.the_id

        # Modify the test id based on what selector we chose.
        if metafunc.config.getoption('disable_selectors'):
            selected_id = the_id
        else:
            if selector == ONE:
                if need_prov_keys:
                    selected_id = data_prov.type_name
                else:
                    selected_id = data_prov.category
            elif selector == ONE_PER_CATEGORY:
                selected_id = data_prov.category
            elif selector == ONE_PER_TYPE:
                selected_id = data_prov.type_name
            elif selector == ALL:
                if use_legacy_ids:
                    # if we're already using legacy there shouldn't be duplicate ids
                    selected_id = legacy_key
                else:
                    # include the key to differentiate multiple providers of same type+version
                    selected_id = f'{the_id}-{legacy_key}'
            else:
                selected_id = the_id

        idlist.append(selected_id)

        # Add provider to argnames if missing
        if fixture_name in metafunc.fixturenames and fixture_name not in argnames:
            metafunc.function = pytest.mark.uses_testgen()(metafunc.function)
            argnames.append(fixture_name)
        if metafunc.config.getoption('sauce') or selector == ONE:
            break
    return argnames, argvalues, idlist
コード例 #15
0
def cleanup_vms(texts, max_hours=24, providers=None, tags=None, prompt=True):
    """
    Main method for the cleanup process
    Generates regex match objects
    Checks providers for cleanup boolean in yaml
    Checks provider connectivity (using ping)
    Threads process_provider_vms to build list of vms to delete
    Prompts user to continue with delete
    Threads deleting of the vms

    Args:
        texts (list): List of regex strings to match with
        max_hours (int): age limit for deletion
        providers (list): List of provider keys to scan and cleanup
        tags (list): List of tags to filter providers by
        prompt (bool): Whether or not to prompt the user before deleting vms
    Returns:
        int: return code, 0 on success, otherwise raises exception
    """
    logger.info('Matching VM names against the following case-insensitive strings: %r', texts)
    # Compile regex, strip leading/trailing single quotes from cli arg
    matchers = [re.compile(text.strip("'"), re.IGNORECASE) for text in texts]

    # setup provider filter with cleanup (default), tags, and providers (from cli opts)
    filters = [ProviderFilter(required_fields=[('cleanup', True)])]
    if tags:
        logger.info('Adding required_tags ProviderFilter for: %s', tags)
        filters.append(ProviderFilter(required_tags=tags))
    if providers:
        logger.info('Adding keys ProviderFilter for: %s', providers)
        filters.append(ProviderFilter(keys=providers))

    # Just want keys, use list_providers with no global filters to include disabled.
    providers_to_scan = [prov.key for prov in list_providers(filters, use_global_filters=False)]
    logger.info('Potential providers for cleanup, filtered with given tags and provider keys: \n%s',
                '\n'.join(providers_to_scan))

    # scan providers for vms with name matches
    # manager = Manager()
    text_match_queue = manager.Queue()
    scan_fail_queue = manager.Queue()
    provider_scan_args = [
        (provider_key, matchers, text_match_queue, scan_fail_queue)
        for provider_key in providers_to_scan]
    pool_manager(scan_provider, provider_scan_args)

    text_matched = []
    while not text_match_queue.empty():
        text_matched.append(text_match_queue.get())

    # scan vms for age matches
    age_match_queue = manager.Queue()
    vm_scan_args = [
        (provider_key, vm_name, timedelta(hours=int(max_hours)), age_match_queue, scan_fail_queue)
        for provider_key, vm_name in text_matched]
    pool_manager(scan_vm, vm_scan_args)

    vms_to_delete = []
    while not age_match_queue.empty():
        vms_to_delete.append(age_match_queue.get())

    scan_fail_vms = []
    # add the scan failures into deleted vms for reporting sake
    while not scan_fail_queue.empty():
        scan_fail_vms.append(scan_fail_queue.get())

    if vms_to_delete and prompt:
        yesno = raw_input('Delete these VMs? [y/N]: ')
        if str(yesno).lower() != 'y':
            logger.info('Exiting.')
            return 0

    # initialize this even if we don't have anything to delete, for report consistency
    deleted_vms = []
    if vms_to_delete:
        delete_queue = manager.Queue()
        delete_vm_args = [(provider_key, vm_name, age, delete_queue)
                          for provider_key, vm_name, age in vms_to_delete]
        pool_manager(delete_vm, delete_vm_args)

        while not delete_queue.empty():
            deleted_vms.append(delete_queue.get())  # Each item is a VmReport tuple

    else:
        logger.info('No VMs to delete.')

    with open(args.outfile, 'a') as report:
        report.write('## VM/Instances deleted via:\n'
                     '##   text matches: {}\n'
                     '##   age matches: {}\n'
                     .format(texts, max_hours))
        message = tabulate(sorted(scan_fail_vms + deleted_vms, key=attrgetter('result')),
                           headers=['Provider', 'Name', 'Age', 'Status Before', 'Delete RC'],
                           tablefmt='orgtbl')
        report.write(message + '\n')
    logger.info(message)
    return 0
コード例 #16
0
def cleanup_vms(texts, max_hours=24, providers=None, tags=None, prompt=True):
    """
    Main method for the cleanup process
    Generates regex match objects
    Checks providers for cleanup boolean in yaml
    Checks provider connectivity (using ping)
    Threads process_provider_vms to build list of vms to delete
    Prompts user to continue with delete
    Threads deleting of the vms

    Args:
        texts (list): List of regex strings to match with
        max_hours (int): age limit for deletion
        providers (list): List of provider keys to scan and cleanup
        tags (list): List of tags to filter providers by
        prompt (bool): Whether or not to prompt the user before deleting vms
    Returns:
        int: return code, 0 on success, otherwise raises exception
    """
    logger.info(
        'Matching VM names against the following case-insensitive strings: %r',
        texts)
    # Compile regex, strip leading/trailing single quotes from cli arg
    matchers = [re.compile(text.strip("'"), re.IGNORECASE) for text in texts]

    # setup provider filter with cleanup (default), tags, and providers (from cli opts)
    filters = [ProviderFilter(required_fields=[('cleanup', True)])]
    if tags:
        logger.info('Adding required_tags ProviderFilter for: %s', tags)
        filters.append(ProviderFilter(required_tags=tags))
    if providers:
        logger.info('Adding keys ProviderFilter for: %s', providers)
        filters.append(ProviderFilter(keys=providers))

    # Just want keys, use list_providers with no global filters to include disabled.
    with DummyAppliance():
        providers_to_scan = [
            prov.key
            for prov in list_providers(filters, use_global_filters=False)
        ]
    logger.info(
        'Potential providers for cleanup, filtered with given tags and provider keys: \n%s',
        '\n'.join(providers_to_scan))

    # scan providers for vms with name matches
    # manager = Manager()
    text_match_queue = manager.Queue()
    scan_fail_queue = manager.Queue()
    provider_scan_args = [(provider_key, matchers, text_match_queue,
                           scan_fail_queue)
                          for provider_key in providers_to_scan]
    pool_manager(scan_provider, provider_scan_args)

    text_matched = []
    while not text_match_queue.empty():
        text_matched.append(text_match_queue.get())

    # scan vms for age matches
    age_match_queue = manager.Queue()
    vm_scan_args = [(provider_key, vm_name, timedelta(hours=int(max_hours)),
                     age_match_queue, scan_fail_queue)
                    for provider_key, vm_name in text_matched]
    pool_manager(scan_vm, vm_scan_args)

    vms_to_delete = []
    while not age_match_queue.empty():
        vms_to_delete.append(age_match_queue.get())

    scan_fail_vms = []
    # add the scan failures into deleted vms for reporting sake
    while not scan_fail_queue.empty():
        scan_fail_vms.append(scan_fail_queue.get())

    if vms_to_delete and prompt:
        yesno = raw_input('Delete these VMs? [y/N]: ')
        if str(yesno).lower() != 'y':
            logger.info('Exiting.')
            return 0

    # initialize this even if we don't have anything to delete, for report consistency
    deleted_vms = []
    if vms_to_delete:
        delete_queue = manager.Queue()
        delete_vm_args = [(provider_key, vm_name, age, delete_queue)
                          for provider_key, vm_name, age in vms_to_delete]
        pool_manager(delete_vm, delete_vm_args)

        while not delete_queue.empty():
            deleted_vms.append(
                delete_queue.get())  # Each item is a VmReport tuple

    else:
        logger.info('No VMs to delete.')

    with open(args.outfile, 'a') as report:
        report.write('## VM/Instances deleted via:\n'
                     '##   text matches: {}\n'
                     '##   age matches: {}\n'.format(texts, max_hours))
        message = tabulate(
            sorted(scan_fail_vms + deleted_vms, key=attrgetter('result')),
            headers=['Provider', 'Name', 'Age', 'Status Before', 'Delete RC'],
            tablefmt='orgtbl')
        report.write(message + '\n')
    logger.info(message)
    return 0
コード例 #17
0
    output_queue.put(output_list)
    return


if __name__ == "__main__":
    args = parse_cmd_line()
    # providers as a set when processing tags to ensure unique entries
    filters = []
    if args.provider:
        filters.append(ProviderFilter(keys=args.provider))
    if args.tag:
        filters.append(ProviderFilter(required_tags=args.tag))

    # don't include global filter to keep disabled in the list
    providers = [prov.key for prov in list_providers(filters, use_global_filters=False)]

    queue = Queue()  # for MP output
    proc_list = [
        Process(target=list_vms, args=(provider, queue), name='list_vms:{}'.format(provider))
        for provider in providers
    ]
    for proc in proc_list:
        proc.start()
    for proc in proc_list:
        proc.join()

    print('Done processing providers, assembling report...')

    # Now pull all the results off of the queue
    # Stacking the generator this way is equivalent to using list.extend instead of list.append
コード例 #18
0
def providers(metafunc, filters=None, selector=ALL):
    """ Gets providers based on given (+ global) filters

    Note:
        Using the default 'function' scope, each test will be run individually for each provider
        before moving on to the next test. To group all tests related to single provider together,
        parametrize tests in the 'module' scope.

    Note:
        testgen for providers now requires the usage of test_flags for collection to work.
        Please visit http://cfme-tests.readthedocs.org/guides/documenting.html#documenting-tests
        for more details.
    """
    filters = filters or []
    argnames = []
    argvalues = []
    idlist = []

    # Obtains the test's flags in form of a ProviderFilter
    meta = getattr(metafunc.function, 'meta', None)
    test_flag_str = getattr(meta, 'kwargs', {}).get('from_docs', {}).get('test_flag')
    if test_flag_str:
        test_flags = test_flag_str.split(',')
        flags_filter = ProviderFilter(required_flags=test_flags)
        filters = filters + [flags_filter]

    potential_providers = list_providers(filters)

    if selector == ONE:
        if potential_providers:
            allowed_providers = [potential_providers[0]]
        else:
            allowed_providers = []
    elif selector == LATEST:
        allowed_providers = [sorted(
            potential_providers, key=lambda k:LooseVersion(
                str(k.data.get('version', 0))), reverse=True
        )[0]]
    elif selector == ONE_PER_TYPE:
        types = set()

        def add_prov(prov):
            types.add(prov.type)
            return prov

        allowed_providers = [
            add_prov(prov) for prov in potential_providers if prov.type not in types
        ]
    elif selector == ONE_PER_CATEGORY:
        categories = set()

        def add_prov(prov):
            categories.add(prov.category)
            return prov

        allowed_providers = [
            add_prov(prov) for prov in potential_providers if prov.category not in categories
        ]
    elif selector == ONE_PER_VERSION:
        versions = set()

        def add_prov(prov):
            versions.add(prov.data.get('version', 0))
            return prov

        allowed_providers = [
            add_prov(prov) for prov in potential_providers if prov.data.get(
                'version', 0) not in versions
        ]
    else:
        allowed_providers = potential_providers

    for provider in allowed_providers:
        argvalues.append([provider])
        # Use the provider key for idlist, helps with readable parametrized test output
        idlist.append(provider.key)
        # Add provider to argnames if missing
        if 'provider' in metafunc.fixturenames and 'provider' not in argnames:
            metafunc.function = pytest.mark.uses_testgen()(metafunc.function)
            argnames.append('provider')
        if metafunc.config.getoption('sauce') or selector == ONE:
            break

    return argnames, argvalues, idlist
コード例 #19
0
def cleanup_vms(texts, max_hours=24, providers=None, tags=None, dryrun=True):
    """
    Main method for the cleanup process
    Generates regex match objects
    Checks providers for cleanup boolean in yaml
    Checks provider connectivity (using ping)
    Process Pool for provider scanning
    Each provider process will thread vm scanning and deletion

    Args:
        texts (list): List of regex strings to match with
        max_hours (int): age limit for deletion
        providers (list): List of provider keys to scan and cleanup
        tags (list): List of tags to filter providers by
        dryrun (bool): Whether or not to actually delete VMs or just report
    Returns:
        int: return code, 0 on success, otherwise raises exception
    """
    logger.info('Matching VM names against the following case-insensitive strings: %r', texts)
    # Compile regex, strip leading/trailing single quotes from cli arg
    matchers = [re.compile(text.strip("'"), re.IGNORECASE) for text in texts]

    # setup provider filter with cleanup (default), tags, and providers (from cli opts)
    filters = [ProviderFilter(required_fields=[('cleanup', True)])]
    if tags:
        logger.info('Adding required_tags ProviderFilter for: %s', tags)
        filters.append(ProviderFilter(required_tags=tags))
    if providers:
        logger.info('Adding keys ProviderFilter for: %s', providers)
        filters.append(ProviderFilter(keys=providers))

    # Just want keys, use list_providers with no global filters to include disabled.
    with DummyAppliance():
        providers_to_scan = [prov.key for prov in list_providers(filters, use_global_filters=False)]
    logger.info('Potential providers for cleanup, filtered with given tags and provider keys: \n%s',
                '\n'.join(providers_to_scan))

    # scan providers for vms with name matches
    scan_fail_queue = manager.Queue()
    with Pool(4) as pool:
        deleted_vms = pool.starmap(
            cleanup_provider,
            ((provider_key, matchers, scan_fail_queue, max_hours, dryrun)
             for provider_key in providers_to_scan)
        )

    # flatten deleted_vms list, as its top level is by provider process
    # at same time remove None responses
    deleted_vms = [report
                   for prov_list in deleted_vms if prov_list is not None
                   for report in prov_list]

    scan_fail_vms = []
    # add the scan failures into deleted vms for reporting sake
    while not scan_fail_queue.empty():
        scan_fail_vms.append(scan_fail_queue.get())

    with open(args.outfile, 'a') as report:
        report.write('## VM/Instances deleted via:\n'
                     '##   text matches: {}\n'
                     '##   age matches: {}\n'
                     .format(texts, max_hours))
        message = tabulate(
            sorted(scan_fail_vms + deleted_vms, key=attrgetter('result')),
            headers=['Provider', 'Name', 'Age', 'Status Before', 'Delete RC'],
            tablefmt='orgtbl'
        )
        report.write(message + '\n')
    logger.info(message)
    return 0
コード例 #20
0
def providers(metafunc, filters=None, selector=ALL):
    """ Gets providers based on given (+ global) filters

    Note:
        Using the default 'function' scope, each test will be run individually for each provider
        before moving on to the next test. To group all tests related to single provider together,
        parametrize tests in the 'module' scope.

    Note:
        testgen for providers now requires the usage of test_flags for collection to work.
        Please visit http://cfme-tests.readthedocs.org/guides/documenting.html#documenting-tests
        for more details.
    """
    filters = filters or []
    argnames = []
    argvalues = []
    idlist = []

    # Obtains the test's flags in form of a ProviderFilter
    meta = getattr(metafunc.function, 'meta', None)
    test_flag_str = getattr(meta, 'kwargs', {}).get('from_docs',
                                                    {}).get('test_flag')
    if test_flag_str:
        test_flags = test_flag_str.split(',')
        flags_filter = ProviderFilter(required_flags=test_flags)
        filters = filters + [flags_filter]

    potential_providers = list_providers(filters)

    if selector == ONE:
        if potential_providers:
            allowed_providers = [potential_providers[0]]
        else:
            allowed_providers = []
    elif selector == LATEST:
        allowed_providers = [
            sorted(potential_providers,
                   key=lambda k: LooseVersion(str(k.data.get('version', 0))),
                   reverse=True)[0]
        ]
    elif selector == ONE_PER_TYPE:
        types = set()

        def add_prov(prov):
            types.add(prov.type)
            return prov

        allowed_providers = [
            add_prov(prov) for prov in potential_providers
            if prov.type not in types
        ]
    elif selector == ONE_PER_CATEGORY:
        categories = set()

        def add_prov(prov):
            categories.add(prov.category)
            return prov

        allowed_providers = [
            add_prov(prov) for prov in potential_providers
            if prov.category not in categories
        ]
    elif selector == ONE_PER_VERSION:
        versions = set()

        def add_prov(prov):
            versions.add(prov.data.get('version', 0))
            return prov

        allowed_providers = [
            add_prov(prov) for prov in potential_providers
            if prov.data.get('version', 0) not in versions
        ]
    else:
        allowed_providers = potential_providers

    for provider in allowed_providers:
        argvalues.append([provider])
        # Use the provider key for idlist, helps with readable parametrized test output
        idlist.append(provider.key)
        # Add provider to argnames if missing
        if 'provider' in metafunc.fixturenames and 'provider' not in argnames:
            metafunc.function = pytest.mark.uses_testgen()(metafunc.function)
            argnames.append('provider')
        if metafunc.config.getoption('sauce') or selector == ONE:
            break

    return argnames, argvalues, idlist
コード例 #21
0
def providers(metafunc, filters=None, selector=ALL, fixture_name='provider'):
    """ Gets providers based on given (+ global) filters

    Note:
        Using the default 'function' scope, each test will be run individually for each provider
        before moving on to the next test. To group all tests related to single provider together,
        parametrize tests in the 'module' scope.

    Note:
        testgen for providers now requires the usage of test_flags for collection to work.
        Please visit http://cfme-tests.readthedocs.org/guides/documenting.html#documenting-tests
        for more details.
    """
    filters = filters or []
    argnames = []
    argvalues = []
    idlist = []

    # Obtains the test's flags in form of a ProviderFilter
    meta = getattr(metafunc.function, 'meta', None)
    test_flag_str = getattr(meta, 'kwargs', {}).get('from_docs',
                                                    {}).get('test_flag')
    if test_flag_str:
        test_flags = test_flag_str.split(',')
        flags_filter = ProviderFilter(required_flags=test_flags)
        filters = filters + [flags_filter]

    # available_providers are the ones "available" from the yamls after all of the global and
    # local filters have been applied. It will be a list of crud objects.
    available_providers = list_providers(filters)

    # supported_providers are the ones "supported" in the supportability.yaml file. It will
    # be a list of DataProvider objects and will be filtered based upon what the test has asked for
    holder = metafunc.config.pluginmanager.get_plugin('appliance-holder')
    series = holder.held_appliance.version.series()
    supported_providers = all_required(series, filters)

    def get_valid_providers(provider):
        # We now search through all the available providers looking for one that matches the
        # criteria. If we don't find one, we return None
        prov_tuples = []
        for a_prov in available_providers:
            try:
                if not a_prov.version:
                    raise ValueError(
                        "provider {p} has no version".format(p=a_prov))
                elif (a_prov.version == provider.version
                      and a_prov.type == provider.type_name
                      and a_prov.category == provider.category):
                    prov_tuples.append((provider, a_prov))
            except (KeyError, ValueError):
                if (a_prov.type == provider.type_name
                        and a_prov.category == provider.category):
                    prov_tuples.append((provider, a_prov))
        return prov_tuples

    # A small routine to check if we need to supply the idlist a provider type or
    # a real type/version
    need_prov_keys = False
    for filter in filters:
        if isinstance(filter, ProviderFilter) and filter.classes:
            for filt in filter.classes:
                if hasattr(filt, 'type_name'):
                    need_prov_keys = True
                    break

    matching_provs = [
        valid_provider for prov in supported_providers
        for valid_provider in get_valid_providers(prov)
    ]

    # Now we run through the selectors and build up a list of supported providers which match our
    # requirements. This then forms the providers that the test should run against.
    if selector == ONE:
        if matching_provs:
            allowed_providers = [matching_provs[0]]
        else:
            allowed_providers = []
    elif selector == LATEST:
        allowed_providers = [
            sorted(matching_provs,
                   key=lambda k: LooseVersion(str(k[0].version)),
                   reverse=True)[0]
        ]
    elif selector == ONE_PER_TYPE:
        types = set()

        def add_prov(prov):
            types.add(prov[0].type_name)
            return prov

        allowed_providers = [
            add_prov(prov) for prov in matching_provs
            if prov[0].type_name not in types
        ]
    elif selector == ONE_PER_CATEGORY:
        categories = set()

        def add_prov(prov):
            categories.add(prov[0].category)
            return prov

        allowed_providers = [
            add_prov(prov) for prov in matching_provs
            if prov[0].category not in categories
        ]
    elif selector == ONE_PER_VERSION:
        # This needs to handle versions per type
        versions = defaultdict(set)

        def add_prov(prov):
            versions[prov[0].type_name].add(prov[0].version)
            return prov

        allowed_providers = [
            add_prov(prov) for prov in matching_provs
            if prov[0].version not in versions[prov[0].type_name]
        ]
    else:
        # If there are no selectors, then the allowed providers are whichever are supported
        allowed_providers = matching_provs

    # Now we iterate through the required providers and try to match them to the available ones
    for data_prov, real_prov in allowed_providers:
        data_prov.key = real_prov.key
        argvalues.append(pytest.param(data_prov))

        # Use the provider key for idlist, helps with readable parametrized test output
        if metafunc.config.getoption('legacy_ids'):
            the_id = "{}".format(data_prov.key)
        else:
            ver = data_prov.version if data_prov.version else None
            if ver:
                the_id = "{}-{}".format(data_prov.type_name, data_prov.version)
            else:
                the_id = "{}".format(data_prov.type_name)

        # Now we modify the id based on what selector we chose
        if metafunc.config.getoption('disable_selectors'):
            idlist.append(the_id)
        else:
            if selector == ONE:
                if need_prov_keys:
                    idlist.append(data_prov.type_name)
                else:
                    idlist.append(data_prov.category)
            elif selector == ONE_PER_CATEGORY:
                idlist.append(data_prov.category)
            elif selector == ONE_PER_TYPE:
                idlist.append(data_prov.type_name)
            else:
                idlist.append(the_id)

        # Add provider to argnames if missing
        if fixture_name in metafunc.fixturenames and fixture_name not in argnames:
            metafunc.function = pytest.mark.uses_testgen()(metafunc.function)
            argnames.append(fixture_name)
        if metafunc.config.getoption('sauce') or selector == ONE:
            break
    return argnames, argvalues, idlist
コード例 #22
0
    output_queue.put(output_list)
    return


if __name__ == "__main__":
    args = parse_cmd_line()
    # providers as a set when processing tags to ensure unique entries
    filters = []
    if args.provider:
        filters.append(ProviderFilter(keys=args.provider))
    if args.tag:
        filters.append(ProviderFilter(required_tags=args.tag))

    # don't include global filter to keep disabled in the list
    providers = [prov.key for prov in list_providers(filters, use_global_filters=False)]

    queue = Queue()  # for MP output
    proc_list = [
        Process(target=list_vms, args=(provider, queue), name='list_vms:{}'.format(provider))
        for provider in providers
    ]
    for proc in proc_list:
        proc.start()
    for proc in proc_list:
        proc.join()

    print('Done processing providers, assembling report...')

    # Now pull all the results off of the queue
    # Stacking the generator this way is equivalent to using list.extend instead of list.append