예제 #1
0
def a_provider(request):
    pf = ProviderFilter(classes=[InfraProvider], required_fields=['large'])
    setup_one_or_skip(request, filters=[pf])
예제 #2
0
from cfme.infrastructure.pxe import get_template_from_config
from cfme.markers.env_markers.provider import providers
from cfme.tests.infrastructure.test_provisioning_dialog import check_all_tabs
from cfme.utils import ssh
from cfme.utils.blockers import BZ
from cfme.utils.generators import random_vm_name
from cfme.utils.log import logger
from cfme.utils.net import retry_connect
from cfme.utils.providers import ProviderFilter
from cfme.utils.ssh import connect_ssh
from cfme.utils.ssh import SSHClient
from cfme.utils.wait import wait_for

REQUIRED_FIELDS = [['provisioning', 'ci-template']]

pf1 = ProviderFilter(classes=[CloudProvider, InfraProvider],
                     required_fields=REQUIRED_FIELDS)
pf2 = ProviderFilter(classes=[SCVMMProvider],
                     inverted=True)  # SCVMM doesn't support cloud-init
pytestmark = [
    pytest.mark.meta(server_roles="+automate"),
    pytest.mark.provider(gen_func=providers,
                         filters=[pf1, pf2],
                         scope="module")
]


@pytest.fixture(scope="module")
def setup_ci_template(provider, appliance):
    cloud_init_template_name = provider.data['provisioning']['ci-template']
    get_template_from_config(cloud_init_template_name,
                             create=True,
            output_list.append([provider_key,
                                vm.name,
                                status or NULL,
                                creation or NULL,
                                str(vm_type) or NULL])

    output_queue.put(output_list)
    return


if __name__ == "__main__":
    args = parse_cmd_line()
    # providers as a set when processing tags to ensure unique entries
    filters = []
    if args.provider:
        filters.append(ProviderFilter(keys=args.provider))
    if args.tag:
        filters.append(ProviderFilter(required_tags=args.tag))

    # don't include global filter to keep disabled in the list
    with DummyAppliance('5.10.0.0'):
        providers = [prov.key for prov in list_providers(filters, use_global_filters=False)]

    queue = Queue()  # for MP output
    proc_list = [
        Process(target=list_vms, args=(provider, queue), name='list_vms:{}'.format(provider))
        for provider in providers
    ]
    for proc in proc_list:
        proc.start()
    for proc in proc_list:
예제 #4
0
from cfme.utils.appliance.implementations.ui import navigate_to
from cfme.utils.grafana import get_scenario_dashboard_urls
from cfme.utils.log import logger
from cfme.utils.providers import ProviderFilter
from cfme.utils.smem_memory_monitor import add_workload_quantifiers, SmemMemoryMonitor
from cfme.utils.workloads import get_memory_leak_scenarios
from cfme.markers.env_markers.provider import providers


roles_memory_leak = ['automate', 'database_operations', 'ems_inventory', 'ems_metrics_collector',
    'ems_metrics_coordinator', 'ems_metrics_processor', 'ems_operations', 'event', 'notifier',
    'reporting', 'scheduler', 'user_interface', 'web_services']


pytestmark = [pytest.mark.provider(gen_func=providers,
                                   filters=[ProviderFilter()],
                                   scope="module")]


def prepare_workers(appliance):
    """Set single instance of each worker type and maximum threshold"""
    view = navigate_to(appliance.server, 'Workers')
    view.workers.fill({
        "generic_worker_count": "1",
        "cu_data_collector_worker_count": "1",
        "ui_worker_count": "1",
        "reporting_worker_count": "1",
        "web_service_worker_count": "1",
        "priority_worker_count": "1",
        "cu_data_processor_worker_count": "1",
        "vm_analysis_collectors_worker_count": "1",
from cfme.infrastructure.provider import InfraProvider
from cfme.markers.env_markers.provider import providers
from cfme.services.service_catalogs import ServiceCatalogs
from cfme.utils.appliance import ViaSSUI
from cfme.utils.blockers import GH
from cfme.utils.providers import ProviderFilter

pytestmark = [
    pytest.mark.meta(server_roles="+automate",
                     blockers=[GH('ManageIQ/integration_tests:7297')]),
    test_requirements.ssui, pytest.mark.long_running,
    pytest.mark.ignore_stream("upstream"),
    pytest.mark.provider(gen_func=providers,
                         filters=[
                             ProviderFilter(
                                 classes=[InfraProvider, CloudProvider],
                                 required_fields=['provisioning'])
                         ])
]


@pytest.mark.rhv2
@pytest.mark.parametrize('context', [ViaSSUI])
def test_service_catalog_crud_ssui(appliance, setup_provider, context,
                                   order_service):
    """Tests Service Catalog in SSUI."""

    catalog_item = order_service
    with appliance.context.use(context):
        if appliance.version >= '5.9':
            dialog_values = {
예제 #6
0
from cfme.common.vm import VM
from cfme.infrastructure.provider.rhevm import RHEVMProvider
from cfme.infrastructure.provider.scvmm import SCVMMProvider
from cfme.utils.appliance.implementations.ui import navigate_to
from cfme.utils.generators import random_vm_name
from cfme.utils.log import logger
from cfme.utils.providers import ProviderFilter
from markers.env_markers.provider import providers

pytestmark = [
    pytest.mark.usefixtures('uses_infra_providers', 'uses_cloud_providers',
                            'provider'),
    pytest.mark.tier(2),
    pytest.mark.provider(gen_func=providers,
                         filters=[
                             ProviderFilter(classes=[BaseProvider]),
                             ProviderFilter(
                                 classes=[SCVMMProvider, RHEVMProvider],
                                 inverted=True)
                         ],
                         scope='module'),
]


@pytest.fixture(scope="function")
def vm_crud(provider, small_template):
    return VM.factory(random_vm_name(context='genealogy'),
                      provider,
                      template_name=small_template.name)

예제 #7
0
"""This module tests events that are invoked by Cloud/Infra VMs."""
import fauxfactory
import pytest

from cfme import test_requirements
from cfme.cloud.provider import CloudProvider
from cfme.cloud.provider.gce import GCEProvider
from cfme.control.explorer.policies import VMControlPolicy
from cfme.infrastructure.provider import InfraProvider
from cfme.infrastructure.provider.kubevirt import KubeVirtProvider
from cfme.markers.env_markers.provider import providers
from cfme.utils.providers import ProviderFilter
from cfme.utils.wait import wait_for


all_prov = ProviderFilter(classes=[InfraProvider, CloudProvider], required_fields=['provisioning'])
excluded = ProviderFilter(classes=[KubeVirtProvider], inverted=True)
pytestmark = [
    pytest.mark.usefixtures('uses_infra_providers', 'uses_cloud_providers'),
    pytest.mark.tier(2),
    pytest.mark.provider(gen_func=providers, filters=[all_prov, excluded],
                         scope='module'),
    test_requirements.events,
]


@pytest.fixture(scope="function")
def vm_crud(provider, setup_provider_modscope, small_template_modscope):
    template = small_template_modscope
    base_name = 'test-events-' if provider.one_of(GCEProvider) else 'test_events_'
    vm_name = base_name + fauxfactory.gen_alpha(length=8).lower()
예제 #8
0
from cfme.utils.generators import random_vm_name
from cfme.utils.log import logger
from cfme.utils.providers import ProviderFilter
from cfme.utils.timeutil import parsetime
from cfme.utils.version import LOWEST
from cfme.utils.version import VersionPicker
from cfme.utils.wait import wait_for

pytestmark = [
    pytest.mark.usefixtures('setup_provider'),
    pytest.mark.meta(blockers=[BZ(1626971, forced_streams=['5.10'])]),
    pytest.mark.tier(1),
    pytest.mark.long_running,
    test_requirements.retirement,
    pytest.mark.provider(gen_func=providers,
                         filters=[ProviderFilter(classes=[CloudProvider, InfraProvider],
                                                 required_flags=['provision', 'retire'])]),
]


RetirementWarning = namedtuple('RetirementWarning', ['id', 'string'])

warnings = [
    RetirementWarning('no_warning', 'None'),
    RetirementWarning('1_week_warning', '1 Week before retirement'),
    RetirementWarning('2_week_warning', '2 Weeks before retirement'),
    RetirementWarning('30_day_warning', '30 Days before retirement')]


@pytest.fixture(scope="function")
def retire_vm(small_template, provider):
    """Fixture for creating a generic vm/instance
import pytest

from cfme.containers.provider import ContainersProvider
from cfme.markers.env_markers.provider import providers
from cfme.utils.appliance.implementations.ui import navigate_to
from cfme.utils.providers import ProviderFilter
from cfme.utils.wait import wait_for


pytestmark = [
    pytest.mark.usefixtures('setup_provider'),
    pytest.mark.tier(1),
    pytest.mark.provider(gen_func=providers,
                         filters=[ProviderFilter(classes=[ContainersProvider],
                                                 required_flags=['cockpit'])],
                         scope='function')]


@pytest.mark.uncollectif(lambda appliance: appliance.version < "5.9",
                         reason='Cockpit Feature is only available in 5.9 and greater')
@pytest.mark.parametrize('cockpit', [False, True], ids=['disabled', 'enabled'])
def test_cockpit_button_access(appliance, provider, cockpit, request):
    """ The test verifies the existence of cockpit "Web Console"
        button on each node, click the button if enabled, verify no errors are displayed.
    """

    request.addfinalizer(lambda: appliance.server.settings.disable_server_roles('cockpit_ws'))

    if cockpit:
        appliance.server.settings.enable_server_roles('cockpit_ws')
        wait_for(lambda: appliance.server_roles['cockpit_ws'] is True, delay=10, timeout=300)
예제 #10
0
from cfme.control.explorer.policies import VMControlPolicy
from cfme.infrastructure.provider import InfraProvider
from cfme.infrastructure.provider.kubevirt import KubeVirtProvider
from cfme.infrastructure.provider.rhevm import RHEVMProvider
from cfme.infrastructure.provider.scvmm import SCVMMProvider
from cfme.infrastructure.provider.virtualcenter import VMwareProvider
from cfme.markers.env_markers.provider import providers
from cfme.utils.appliance.implementations.ui import navigate_to
from cfme.utils.blockers import BZ
from cfme.utils.generators import random_vm_name
from cfme.utils.log import logger
from cfme.utils.providers import ProviderFilter
from cfme.utils.wait import TimedOutError
from cfme.utils.wait import wait_for

all_infra_prov = ProviderFilter(classes=[InfraProvider])
# SCVMM timelines are not supported per the support matrix, KubeVirt also should not be collected
excluded = ProviderFilter(classes=[SCVMMProvider, KubeVirtProvider],
                          inverted=True)
pytestmark = [
    pytest.mark.tier(2),
    pytest.mark.provider(gen_func=providers,
                         filters=[excluded, all_infra_prov]),
    pytest.mark.usefixtures('setup_provider'),
    test_requirements.timelines,
    test_requirements.events,
]


@pytest.fixture()
def new_vm(provider):
예제 #11
0
from cfme import test_requirements
from cfme.base.credential import Credential
from cfme.cloud.provider import CloudProvider
from cfme.cloud.provider.azure import AzureProvider
from cfme.cloud.provider.ec2 import EC2Provider
from cfme.cloud.provider.gce import GCEProvider
from cfme.infrastructure.provider import InfraProvider
from cfme.infrastructure.provider.scvmm import SCVMMProvider
from cfme.markers.env_markers.provider import providers
from cfme.utils.blockers import GH
from cfme.utils.log import logger
from cfme.utils.providers import ProviderFilter
from cfme.utils.wait import wait_for


cloud_and_infra = ProviderFilter(classes=[CloudProvider, InfraProvider],
                                 required_fields=[(['cap_and_util', 'test_chargeback'], True)])
not_scvmm = ProviderFilter(classes=[SCVMMProvider], inverted=True)  # SCVMM doesn't support C&U
not_cloud = ProviderFilter(classes=[CloudProvider], inverted=True)
not_ec2_gce = ProviderFilter(classes=[GCEProvider, EC2Provider], inverted=True)


pytestmark = [
    pytest.mark.tier(2),
    pytest.mark.provider(gen_func=providers, filters=[cloud_and_infra, not_scvmm], scope='module'),
    pytest.mark.usefixtures('has_no_providers_modscope', 'setup_provider_modscope'),
    test_requirements.chargeback,
    pytest.mark.meta(blockers=[
        GH('ManageIQ/manageiq:20237', unblock=lambda provider: not provider.one_of(AzureProvider))
    ])
]
예제 #12
0
def a_provider(request):
    BaseProvider.clear_providers()
    not_scvmm = ProviderFilter(classes=[SCVMMProvider],
                               inverted=True)  # scvmm doesn't provide events
    all_prov = ProviderFilter(classes=[InfraProvider])
    return setup_one_or_skip(request, filters=[not_scvmm, all_prov])
예제 #13
0
from cfme.rest.gen_data import a_provider as _a_provider
from cfme.rest.gen_data import vm as _vm
from cfme.web_ui import InfoBlock
from cfme.utils import testgen
from cfme.utils.appliance.implementations.ui import navigate_to
from cfme.utils.blockers import BZ
from cfme.utils.generators import random_vm_name
from cfme.utils.log import logger
from cfme.utils.providers import ProviderFilter
from cfme.utils.wait import wait_for

pytestmark = [
    pytest.mark.tier(2),
    pytest.mark.usefixtures("setup_provider_modscope")
]
not_scvmm = ProviderFilter(classes=[SCVMMProvider],
                           inverted=True)  # scvmm doesn't provide events
all_prov = ProviderFilter(classes=[InfraProvider])
pytest_generate_tests = testgen.generate(gen_func=testgen.providers,
                                         filters=[not_scvmm, all_prov],
                                         scope='module')


@pytest.fixture(scope="module")
def new_vm(request, provider):
    vm = VM.factory(random_vm_name("timelines", max_length=16), provider)

    request.addfinalizer(vm.delete_from_provider)

    if not provider.mgmt.does_vm_exist(vm.name):
        logger.info("deploying %s on provider %s", vm.name, provider.key)
        vm.create_on_provider(allow_skip="default", find_in_cfme=True)
예제 #14
0
def cleanup_vms(texts, max_hours=24, providers=None, tags=None, dryrun=True):
    """
    Main method for the cleanup process
    Generates regex match objects
    Checks providers for cleanup boolean in yaml
    Checks provider connectivity (using ping)
    Process Pool for provider scanning
    Each provider process will thread vm scanning and deletion

    Args:
        texts (list): List of regex strings to match with
        max_hours (int): age limit for deletion
        providers (list): List of provider keys to scan and cleanup
        tags (list): List of tags to filter providers by
        dryrun (bool): Whether or not to actually delete VMs or just report
    Returns:
        int: return code, 0 on success, otherwise raises exception
    """
    logger.info('Matching VM names against the following case-insensitive strings: %r', texts)
    # Compile regex, strip leading/trailing single quotes from cli arg
    matchers = [re.compile(text.strip("'"), re.IGNORECASE) for text in texts]

    # setup provider filter with cleanup (default), tags, and providers (from cli opts)
    filters = [ProviderFilter(required_fields=[('cleanup', True)])]
    if tags:
        logger.info('Adding required_tags ProviderFilter for: %s', tags)
        filters.append(ProviderFilter(required_tags=tags))
    if providers:
        logger.info('Adding keys ProviderFilter for: %s', providers)
        filters.append(ProviderFilter(keys=providers))

    # Just want keys, use list_providers with no global filters to include disabled.
    with DummyAppliance():
        providers_to_scan = [prov.key for prov in list_providers(filters, use_global_filters=False)]
    logger.info('Potential providers for cleanup, filtered with given tags and provider keys: \n%s',
                '\n'.join(providers_to_scan))

    # scan providers for vms with name matches
    scan_fail_queue = manager.Queue()
    with Pool(4) as pool:
        deleted_vms = pool.starmap(
            cleanup_provider,
            ((provider_key, matchers, scan_fail_queue, max_hours, dryrun)
             for provider_key in providers_to_scan)
        )

    # flatten deleted_vms list, as its top level is by provider process
    # at same time remove None responses
    deleted_vms = [report
                   for prov_list in deleted_vms if prov_list is not None
                   for report in prov_list]

    scan_fail_vms = []
    # add the scan failures into deleted vms for reporting sake
    while not scan_fail_queue.empty():
        scan_fail_vms.append(scan_fail_queue.get())

    with open(args.outfile, 'a') as report:
        report.write('## VM/Instances deleted via:\n'
                     '##   text matches: {}\n'
                     '##   age matches: {}\n'
                     .format(texts, max_hours))
        message = tabulate(
            sorted(scan_fail_vms + deleted_vms, key=attrgetter('result')),
            headers=['Provider', 'Name', 'Age', 'Status Before', 'Delete RC'],
            tablefmt='orgtbl'
        )
        report.write(message + '\n')
    logger.info(message)
    return 0
from cfme import test_requirements
from cfme.base.credential import Credential
from cfme.cloud.provider.ec2 import EC2Provider
from cfme.cloud.provider.gce import GCEProvider
from cfme.common.provider import CloudInfraProvider
from cfme.cloud.provider import CloudProvider
from cfme.common.provider import BaseProvider
from cfme.infrastructure.provider.scvmm import SCVMMProvider
from cfme.markers.env_markers.provider import providers
from cfme.utils.blockers import BZ
from cfme.utils.log import logger
from cfme.utils.providers import ProviderFilter
from cfme.utils.wait import wait_for

pf1 = ProviderFilter(classes=[CloudInfraProvider],
    required_fields=[(['cap_and_util', 'test_chargeback'], True)])
pf2 = ProviderFilter(classes=[SCVMMProvider], inverted=True)  # SCVMM doesn't support C&U

pytestmark = [
    pytest.mark.tier(2),
    pytest.mark.ignore_stream('5.8'),
    pytest.mark.meta(blockers=[BZ(1511099, forced_streams=["5.9"],
                                  unblock=lambda provider: not provider.one_of(GCEProvider)),
                               ]),
    pytest.mark.provider(gen_func=providers, filters=[pf1, pf2], scope='module'),
    test_requirements.chargeback,
]

# Allowed deviation between the reported value in the Metering report and the estimated value.
DEVIATION = 1
예제 #16
0
def a_provider(request):
    prov_filter = ProviderFilter(classes=[VMwareProvider])
    return setup_one_or_skip(request, filters=[prov_filter])
def a_provider(request):
    prov_filter = ProviderFilter(classes=[CloudProvider],
                                 required_fields=[['provisioning', 'stacks']])
    return setup_one_or_skip(request, filters=[prov_filter])
예제 #18
0
def providers(metafunc, filters=None, selector=ALL, fixture_name='provider'):
    """ Gets providers based on given (+ global) filters

    Note:
        Using the default 'function' scope, each test will be run individually for each provider
        before moving on to the next test. To group all tests related to single provider together,
        parametrize tests in the 'module' scope.

    Note:
        testgen for providers now requires the usage of test_flags for collection to work.
        Please visit http://cfme-tests.readthedocs.org/guides/documenting.html#documenting-tests
        for more details.
    """
    filters = filters or []
    argnames = []
    argvalues = []
    idlist = []

    # Obtains the test's flags in form of a ProviderFilter
    meta = getattr(metafunc.function, 'meta', None)
    test_flag_str = getattr(meta, 'kwargs', {}).get('from_docs',
                                                    {}).get('test_flag')
    if test_flag_str:
        test_flags = test_flag_str.split(',')
        flags_filter = ProviderFilter(required_flags=test_flags)
        filters = filters + [flags_filter]

    # available_providers are the ones "available" from the yamls after all of the global and
    # local filters have been applied. It will be a list of crud objects.
    available_providers = list_providers(filters)

    # supported_providers are the ones "supported" in the supportability.yaml file. It will
    # be a list of DataProvider objects and will be filtered based upon what the test has asked for
    holder = metafunc.config.pluginmanager.get_plugin('appliance-holder')
    series = holder.held_appliance.version.series()
    supported_providers = all_required(series, filters)

    def get_valid_providers(provider):
        # We now search through all the available providers looking for one that matches the
        # criteria. If we don't find one, we return None
        prov_tuples = []
        for a_prov in available_providers:
            try:
                if not a_prov.version:
                    raise ValueError(
                        "provider {p} has no version".format(p=a_prov))
                elif (a_prov.version == provider.version
                      and a_prov.type == provider.type_name
                      and a_prov.category == provider.category):
                    prov_tuples.append((provider, a_prov))
            except (KeyError, ValueError):
                if (a_prov.type == provider.type_name
                        and a_prov.category == provider.category):
                    prov_tuples.append((provider, a_prov))
        return prov_tuples

    # A small routine to check if we need to supply the idlist a provider type or
    # a real type/version
    need_prov_keys = False
    for filter in filters:
        if isinstance(filter, ProviderFilter) and filter.classes:
            for filt in filter.classes:
                if hasattr(filt, 'type_name'):
                    need_prov_keys = True
                    break

    matching_provs = [
        valid_provider for prov in supported_providers
        for valid_provider in get_valid_providers(prov)
    ]

    # Now we run through the selectors and build up a list of supported providers which match our
    # requirements. This then forms the providers that the test should run against.
    if selector == ONE:
        if matching_provs:
            allowed_providers = [matching_provs[0]]
        else:
            allowed_providers = []
    elif selector == LATEST:
        allowed_providers = [
            sorted(matching_provs,
                   key=lambda k: LooseVersion(str(k[0].version)),
                   reverse=True)[0]
        ]
    elif selector == ONE_PER_TYPE:
        types = set()

        def add_prov(prov):
            types.add(prov[0].type_name)
            return prov

        allowed_providers = [
            add_prov(prov) for prov in matching_provs
            if prov[0].type_name not in types
        ]
    elif selector == ONE_PER_CATEGORY:
        categories = set()

        def add_prov(prov):
            categories.add(prov[0].category)
            return prov

        allowed_providers = [
            add_prov(prov) for prov in matching_provs
            if prov[0].category not in categories
        ]
    elif selector == ONE_PER_VERSION:
        # This needs to handle versions per type
        versions = defaultdict(set)

        def add_prov(prov):
            versions[prov[0].type_name].add(prov[0].version)
            return prov

        allowed_providers = [
            add_prov(prov) for prov in matching_provs
            if prov[0].version not in versions[prov[0].type_name]
        ]
    else:
        # If there are no selectors, then the allowed providers are whichever are supported
        allowed_providers = matching_provs

    # Now we iterate through the required providers and try to match them to the available ones
    for data_prov, real_prov in allowed_providers:
        data_prov.key = real_prov.key
        argvalues.append(pytest.param(data_prov))

        # Use the provider key for idlist, helps with readable parametrized test output
        if metafunc.config.getoption('legacy_ids'):
            the_id = "{}".format(data_prov.key)
        else:
            ver = data_prov.version if data_prov.version else None
            if ver:
                the_id = "{}-{}".format(data_prov.type_name, data_prov.version)
            else:
                the_id = "{}".format(data_prov.type_name)

        # Now we modify the id based on what selector we chose
        if metafunc.config.getoption('disable_selectors'):
            idlist.append(the_id)
        else:
            if selector == ONE:
                if need_prov_keys:
                    idlist.append(data_prov.type_name)
                else:
                    idlist.append(data_prov.category)
            elif selector == ONE_PER_CATEGORY:
                idlist.append(data_prov.category)
            elif selector == ONE_PER_TYPE:
                idlist.append(data_prov.type_name)
            else:
                idlist.append(the_id)

        # Add provider to argnames if missing
        if fixture_name in metafunc.fixturenames and fixture_name not in argnames:
            metafunc.function = pytest.mark.uses_testgen()(metafunc.function)
            argnames.append(fixture_name)
        if metafunc.config.getoption('sauce') or selector == ONE:
            break
    return argnames, argvalues, idlist
예제 #19
0
from cfme.infrastructure.provider.kubevirt import KubeVirtProvider
from cfme.infrastructure.provider.rhevm import RHEVMProvider
from cfme.infrastructure.provider.scvmm import SCVMMProvider
from cfme.infrastructure.provider.virtualcenter import VMwareProvider
from cfme.markers.env_markers.provider import providers
from cfme.utils.appliance.implementations.ui import navigate_to
from cfme.utils.blockers import BZ
from cfme.utils.conf import cfme_data
from cfme.utils.conf import credentials
from cfme.utils.log import logger
from cfme.utils.providers import ProviderFilter
from cfme.utils.ssh import SSHClient
from cfme.utils.update import update
from cfme.utils.wait import wait_for

pf1 = ProviderFilter(classes=[InfraProvider])
pf2 = ProviderFilter(classes=[SCVMMProvider], inverted=True)

CANDU_PROVIDER_TYPES = [VMwareProvider]

# note: RHV provider is not supported for alerts via the Cloudforms support matrix
pytestmark = [
    pytest.mark.long_running,
    pytest.mark.meta(server_roles=["+automate", "+smartproxy", "+notifier"]),
    pytest.mark.uncollectif(
        lambda provider: provider.one_of(RHEVMProvider, KubeVirtProvider)),
    pytest.mark.usefixtures("setup_provider_modscope"),
    pytest.mark.tier(3), test_requirements.alert
]

예제 #20
0
def setup_one_by_class_or_skip(request, prov_class, use_global_filters=True):
    pf = ProviderFilter(classes=[prov_class])
    return setup_one_or_skip(request,
                             filters=[pf],
                             use_global_filters=use_global_filters)
예제 #21
0
def a_provider(request):
    pf = ProviderFilter(classes=[VMwareProvider, RHEVMProvider])
    return setup_one_or_skip(request, filters=[pf])
예제 #22
0
def setup_perf_provider(request, use_global_filters=True):
    pf = ProviderFilter(required_tags=['perf'])
    return setup_one_or_skip(request,
                             filters=[pf],
                             use_global_filters=use_global_filters)
예제 #23
0
import fauxfactory
import pytest
from widgetastic_patternfly import DropdownItemNotFound

from cfme.infrastructure.provider import InfraProvider
from cfme.infrastructure.provider.virtualcenter import VMwareProvider
from cfme.markers.env_markers.provider import providers
from cfme.utils.blockers import BZ
from cfme.utils.log import logger
from cfme.utils.providers import ProviderFilter

filter_fields = {
    'required_fields': [['provisioning', 'template'], ['provisioning', 'host'],
                        ['provisioning', 'datastore']],
}
infra_filter = ProviderFilter(classes=[InfraProvider], **filter_fields)
not_vmware = ProviderFilter(classes=[VMwareProvider], inverted=True)

pytestmark = [
    pytest.mark.meta(roles="+automate"),
    pytest.mark.provider(gen_func=providers,
                         filters=[infra_filter],
                         scope='module'),
    pytest.mark.usefixtures("setup_provider"),
    pytest.mark.long_running,
]


@pytest.fixture(scope="function")
def clone_vm_name():
    clone_vm_name = 'test_cloning_{}'.format(fauxfactory.gen_alphanumeric())
import pytest
from wait_for import wait_for

from cfme.infrastructure.provider import InfraProvider
from cfme.markers.env_markers.provider import providers
from cfme.utils import ssh
from cfme.utils.conf import credentials
from cfme.utils.generators import random_vm_name
from cfme.utils.log import logger
from cfme.utils.providers import ProviderFilter

pytestmark = [
    pytest.mark.usefixtures('setup_provider'),
    pytest.mark.provider(gen_func=providers,
                         filters=[
                             ProviderFilter(classes=[InfraProvider],
                                            required_flags=['webmks_console'])
                         ],
                         scope='module'),
]


@pytest.fixture(scope="function")
def vm_obj(appliance, provider, setup_provider, console_template):
    """VM creation/deletion fixture.

    Create a VM on the provider with the given template, and return the vm_obj.

    Clean up VM when test is done.
    """
    vm_obj = appliance.collections.infra_vms.instantiate(
        random_vm_name('webmks'), provider, console_template.name)
예제 #25
0
from cfme import test_requirements
from cfme.containers.provider import ContainersProvider
from cfme.markers.env_markers.provider import providers
from cfme.utils.appliance.implementations.ui import navigate_to
from cfme.utils.log import logger
from cfme.utils.path import scripts_path
from cfme.utils.providers import ProviderFilter
from cfme.utils.wait import wait_for

pytestmark = [
    pytest.mark.usefixtures('setup_provider'),
    pytest.mark.tier(1),
    pytest.mark.provider(gen_func=providers,
                         filters=[
                             ProviderFilter(
                                 classes=[ContainersProvider],
                                 required_flags=['metrics_collection'])
                         ],
                         scope='function'), test_requirements.containers
]

SET_METRICS_CAPTURE_THRESHOLD_IN_MINUTES = 5
WAIT_FOR_METRICS_CAPTURE_THRESHOLD_IN_MINUTES = "15m"
ROLLUP_METRICS_CALC_THRESHOLD_IN_MINUTES = "50m"


@pytest.fixture(scope="module")
def reduce_metrics_collection_threshold(appliance):
    f_name = scripts_path.join(
        'openshift/change_metrics_collection_threshold.rb').strpath
    appliance.ssh_client.put_file(f_name, "/var/www/miq/vmdb")
예제 #26
0
from cfme.cloud.provider.gce import GCEProvider
from cfme.cloud.provider.openstack import OpenStackProvider
from cfme.infrastructure.provider import InfraProvider
from cfme.infrastructure.provider.rhevm import RHEVMProvider
from cfme.infrastructure.provider.scvmm import SCVMMProvider
from cfme.infrastructure.provider.virtualcenter import VMwareProvider
from cfme.infrastructure.pxe import get_template_from_config
from cfme.markers.env_markers.provider import providers
from cfme.utils import ssh
from cfme.utils.blockers import BZ
from cfme.utils.generators import random_vm_name
from cfme.utils.log import logger
from cfme.utils.providers import ProviderFilter
from cfme.utils.wait import wait_for

pf1 = ProviderFilter(classes=[CloudProvider, InfraProvider],
                     required_flags=['provision', 'cloud_init'])
pf2 = ProviderFilter(classes=[SCVMMProvider],
                     inverted=True)  # SCVMM doesn't support cloud-init
pytestmark = [
    pytest.mark.meta(server_roles="+automate"),
    pytest.mark.provider(gen_func=providers,
                         filters=[pf1, pf2],
                         scope="module")
]


def find_global_ipv6(vm):
    """
    Find global IPv6 on a VM if present.

    Args:
예제 #27
0
from cfme.containers.node import Node
from cfme.containers.node import NodeCollection
from cfme.containers.provider import ContainersProvider
from cfme.containers.provider import ContainersTestItem
from cfme.markers.env_markers.provider import providers
from cfme.utils.appliance.implementations.ui import navigate_to
from cfme.utils.providers import ProviderFilter
from cfme.utils.version import current_version

pytestmark = [
    pytest.mark.uncollectif(lambda provider: current_version() < "5.8"),
    pytest.mark.usefixtures('setup_provider'),
    pytest.mark.tier(1),
    pytest.mark.provider(gen_func=providers,
                         filters=[
                             ProviderFilter(classes=[ContainersProvider],
                                            required_flags=['cmqe_logging'])
                         ],
                         scope='function')
]

TEST_ITEMS = [
    ContainersTestItem(ContainersProvider,
                       'test_logging_containerprovider',
                       collection_obj=None),
    ContainersTestItem(Node,
                       'test_logging_node',
                       collection_obj=NodeCollection)
]

NUM_OF_DEFAULT_LOG_ROUTES = 2
예제 #28
0
from cfme.utils import normalize_text
from cfme.utils.appliance.implementations.ui import navigate_to
from cfme.utils.generators import random_vm_name
from cfme.utils.providers import ProviderFilter
from cfme.utils.log import logger
from cfme.utils.update import update
from cfme.utils.wait import wait_for

pytestmark = [
    pytest.mark.meta(server_roles="+automate +notifier"),
    test_requirements.provision,
    pytest.mark.tier(2),
    pytest.mark.provider(gen_func=providers,
                         filters=[
                             ProviderFilter(
                                 classes=[CloudProvider, InfraProvider],
                                 required_flags=['provision'])
                         ],
                         scope="function"),
    pytest.mark.usefixtures('setup_provider')
]


@pytest.fixture()
def vm_name():
    return random_vm_name(context='prov', max_length=12)


@pytest.fixture()
def instance_args(request, provider, provisioning, vm_name):
    """ Fixture to prepare instance parameters for provisioning
예제 #29
0
            if match(text_to_match, sa_namespace
                     ) and not provider.mgmt.does_vm_exist(sa_namespace):
                logger.info('removing sa %s from scc %s', sa, scc_name)
                provider.mgmt.remove_sa_from_scc(scc_name=scc_name,
                                                 namespace=sa_namespace,
                                                 sa=sa_name)
            else:
                logger.debug(
                    "skipping sa %s in scc %s because project exists "
                    "or it doesn't match any pattern", sa, scc_name)


if __name__ == "__main__":
    args = parse_cmd_line()
    errors = 0
    pf = ProviderFilter(classes=[OpenshiftProvider],
                        required_fields=[('use_for_sprout', True)])
    with DummyAppliance():
        providers = list_providers(filters=[pf], use_global_filters=False)
    for prov in providers:
        # ping provider
        try:
            prov.mgmt.list_project()
        except Exception as e:
            logger.error('Connection to provider %s cannot be estabilished',
                         prov.key)
            logger.error('Error: %s', e)
            errors += 1
            continue

        # remove all sa records from scc
        if args.cleanup_scc:
예제 #30
0
def setup_one_or_skip(request, filters=None, use_global_filters=True):
    """ Sets up one of matching providers or skips the test

    Args:
        filters: List of :py:class:`ProviderFilter` or None
        request: Needed for logging a potential skip correctly in artifactor
        use_global_filters: Will apply global filters as well if `True`, will not otherwise
    """

    filters = filters or []
    providers = list_providers(filters=filters,
                               use_global_filters=use_global_filters)

    # All providers filtered out?
    if not providers:
        global_providers = list_providers(
            filters=None, use_global_filters=use_global_filters)
        if not global_providers:
            # This can also mean that there simply are no providers in the yamls!
            pytest.skip("No provider matching global filters found")
        else:
            pytest.skip("No provider matching test-specific filters found")

    # Are all providers marked as problematic?
    if _problematic_providers.issuperset(providers):
        skip_msg = "All providers marked as problematic: {}".format(
            [p.key for p in providers])
        _artifactor_skip_providers(request, providers, skip_msg)

    # If there is a provider already set up matching the user's requirements, reuse it
    for provider in providers:
        if provider.exists:
            return provider

    # If we have more than one provider, we create two separate groups of providers, preferred
    # and not preferred, that we shuffle separately and then join together
    if len(providers) > 1:
        only_preferred_filter = ProviderFilter(required_fields=[
            ("do_not_prefer", True)
        ],
                                               inverted=True)
        preferred_providers = list_providers(
            filters=filters + [only_preferred_filter],
            use_global_filters=use_global_filters)
        not_preferred_providers = [
            p for p in providers if p not in preferred_providers
        ]
        random.shuffle(preferred_providers)
        random.shuffle(not_preferred_providers)
        providers = preferred_providers + not_preferred_providers

    # Try to set up one of matching providers
    non_existing = [prov for prov in providers if not prov.exists]
    for provider in non_existing:
        if _setup_provider_verbose(request, provider):
            return provider

    skip_msg = "Failed to set up any matching providers: {}", [
        p.key for p in providers
    ]
    _artifactor_skip_providers(request, non_existing, skip_msg)