def test_workload_capacity_and_utilization(request, scenario, appliance):
    """Runs through provider based scenarios enabling C&U and running for a set period of time.
    Memory Monitor creates graphs and summary at the end of each scenario.

    Polarion:
        assignee: rhcf3_machine
        initialEstimate: 1/4h
    """
    from_ts = int(time.time() * 1000)
    logger.debug('Scenario: {}'.format(scenario['name']))

    appliance.clean_appliance()

    quantifiers = {}
    scenario_data = {'appliance_ip': appliance.hostname,
        'appliance_name': conf.cfme_performance['appliance']['appliance_name'],
        'test_dir': 'workload-cap-and-util',
        'test_name': 'Capacity and Utilization',
        'appliance_roles': ','.join(roles_cap_and_util),
        'scenario': scenario}
    monitor_thread = SmemMemoryMonitor(appliance.ssh_client, scenario_data)

    def cleanup_workload(scenario, from_ts, quantifiers, scenario_data):
        starttime = time.time()
        to_ts = int(starttime * 1000)
        g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts)
        logger.debug('Started cleaning up monitoring thread.')
        monitor_thread.grafana_urls = g_urls
        monitor_thread.signal = False
        monitor_thread.join()
        add_workload_quantifiers(quantifiers, scenario_data)
        timediff = time.time() - starttime
        logger.info('Finished cleaning up monitoring thread in {}'.format(timediff))
    request.addfinalizer(lambda: cleanup_workload(scenario, from_ts, quantifiers, scenario_data))

    monitor_thread.start()

    appliance.wait_for_miq_server_workers_started(poll_interval=2)
    appliance.update_server_roles({role: True for role in roles_cap_and_util})
    for provider in scenario['providers']:
        get_crud(provider).create_rest()
    logger.info('Sleeping for Refresh: {}s'.format(scenario['refresh_sleep_time']))
    time.sleep(scenario['refresh_sleep_time'])
    appliance.set_cap_and_util_all_via_rails()

    # Variable amount of time for C&U collections/processing
    total_time = scenario['total_time']
    starttime = time.time()
    elapsed_time = 0
    while (elapsed_time < total_time):
        elapsed_time = time.time() - starttime
        time_left = total_time - elapsed_time
        logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2), total_time))
        if (time_left > 0 and time_left < 300):
            time.sleep(time_left)
        elif time_left > 0:
            time.sleep(300)

    quantifiers['Elapsed_Time'] = round(elapsed_time, 2)
    logger.info('Test Ending...')
Ejemplo n.º 2
0
def test_workload_capacity_and_utilization(request, scenario, appliance):
    """Runs through provider based scenarios enabling C&U and running for a set period of time.
    Memory Monitor creates graphs and summary at the end of each scenario."""
    from_ts = int(time.time() * 1000)
    logger.debug('Scenario: {}'.format(scenario['name']))

    appliance.clean_appliance()

    quantifiers = {}
    scenario_data = {'appliance_ip': appliance.hostname,
        'appliance_name': conf.cfme_performance['appliance']['appliance_name'],
        'test_dir': 'workload-cap-and-util',
        'test_name': 'Capacity and Utilization',
        'appliance_roles': ','.join(roles_cap_and_util),
        'scenario': scenario}
    monitor_thread = SmemMemoryMonitor(appliance.ssh_client, scenario_data)

    def cleanup_workload(scenario, from_ts, quantifiers, scenario_data):
        starttime = time.time()
        to_ts = int(starttime * 1000)
        g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts)
        logger.debug('Started cleaning up monitoring thread.')
        monitor_thread.grafana_urls = g_urls
        monitor_thread.signal = False
        monitor_thread.join()
        add_workload_quantifiers(quantifiers, scenario_data)
        timediff = time.time() - starttime
        logger.info('Finished cleaning up monitoring thread in {}'.format(timediff))
    request.addfinalizer(lambda: cleanup_workload(scenario, from_ts, quantifiers, scenario_data))

    monitor_thread.start()

    appliance.wait_for_miq_server_workers_started(poll_interval=2)
    appliance.update_server_roles({role: True for role in roles_cap_and_util})
    for provider in scenario['providers']:
        get_crud(provider).create_rest()
    logger.info('Sleeping for Refresh: {}s'.format(scenario['refresh_sleep_time']))
    time.sleep(scenario['refresh_sleep_time'])
    appliance.set_cap_and_util_all_via_rails()

    # Variable amount of time for C&U collections/processing
    total_time = scenario['total_time']
    starttime = time.time()
    elapsed_time = 0
    while (elapsed_time < total_time):
        elapsed_time = time.time() - starttime
        time_left = total_time - elapsed_time
        logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2), total_time))
        if (time_left > 0 and time_left < 300):
            time.sleep(time_left)
        elif time_left > 0:
            time.sleep(300)

    quantifiers['Elapsed_Time'] = round(elapsed_time, 2)
    logger.info('Test Ending...')
Ejemplo n.º 3
0
    def from_config(cls, prov_config, prov_key):
        endpoints = {}
        endpoints[RHOSEndpoint.name] = RHOSEndpoint(
            **prov_config['endpoints'][RHOSEndpoint.name])

        endp_name = EventsEndpoint.name
        if prov_config['endpoints'].get(endp_name):
            endpoints[endp_name] = EventsEndpoint(
                **prov_config['endpoints'][endp_name])

        from cfme.utils.providers import get_crud
        infra_prov_key = prov_config.get('infra_provider_key')
        infra_provider = get_crud(infra_prov_key) if infra_prov_key else None

        return cls.appliance.collections.cloud_providers.instantiate(
            prov_class=cls,
            name=prov_config['name'],
            api_port=prov_config['port'],
            api_version=prov_config.get('api_version', 'Keystone v2'),
            endpoints=endpoints,
            zone=prov_config['server_zone'],
            key=prov_key,
            keystone_v3_domain_id=prov_config.get('domain_id'),
            sec_protocol=prov_config.get('sec_protocol', "Non-SSL"),
            tenant_mapping=prov_config.get('tenant_mapping', False),
            infra_provider=infra_provider)
Ejemplo n.º 4
0
    def from_config(cls, prov_config, prov_key):
        endpoints = {
            RHOSEndpoint.name:
            RHOSEndpoint(**prov_config['endpoints'][RHOSEndpoint.name])
        }

        event_endpoint_config = prov_config['endpoints'].get(
            EventsEndpoint.name, {})
        if event_endpoint_config:
            if (event_endpoint_config.get('event_stream') == 'AMQP'
                    and BZ(1618700, forced_streams=["5.9", "5.10", "upstream"
                                                    ]).blocks):
                logger.warning('Skipping AMQP event config due to BZ 1618700')
            else:
                endpoints[EventsEndpoint.name] = EventsEndpoint(
                    **event_endpoint_config)

        from cfme.utils.providers import get_crud
        infra_prov_key = prov_config.get('infra_provider_key')
        infra_provider = get_crud(infra_prov_key) if infra_prov_key else None

        return cls.appliance.collections.cloud_providers.instantiate(
            prov_class=cls,
            name=prov_config['name'],
            api_port=prov_config['port'],
            api_version=prov_config.get('api_version', 'Keystone v2'),
            endpoints=endpoints,
            zone=prov_config['server_zone'],
            key=prov_key,
            keystone_v3_domain_id=prov_config.get('domain_id'),
            sec_protocol=prov_config.get('sec_protocol', "Non-SSL"),
            tenant_mapping=prov_config.get('tenant_mapping', False),
            infra_provider=infra_provider)
Ejemplo n.º 5
0
    def from_config(cls, prov_config, prov_key):
        endpoints = {
            RHOSEndpoint.name: RHOSEndpoint(**prov_config['endpoints'][RHOSEndpoint.name])
        }

        event_endpoint_config = prov_config['endpoints'].get(EventsEndpoint.name, {})
        if event_endpoint_config:
            endpoints[EventsEndpoint.name] = EventsEndpoint(**event_endpoint_config)

        rsa_endpoint_config = prov_config['endpoints'].get(SSHEndpoint.name, {})
        if rsa_endpoint_config:
            endpoints[SSHEndpoint.name] = SSHEndpoint(**rsa_endpoint_config)

        from cfme.utils.providers import get_crud
        infra_prov_key = prov_config.get('infra_provider_key')
        infra_provider = get_crud(infra_prov_key) if infra_prov_key else None

        return cls.appliance.collections.cloud_providers.instantiate(
            prov_class=cls,
            name=prov_config['name'],
            api_port=prov_config['port'],
            api_version=prov_config.get('api_version', 'Keystone v2'),
            endpoints=endpoints,
            zone=prov_config['server_zone'],
            key=prov_key,
            keystone_v3_domain_id=prov_config.get('domain_id'),
            sec_protocol=prov_config.get('sec_protocol', "Non-SSL"),
            tenant_mapping=prov_config.get('tenant_mapping', False),
            infra_provider=infra_provider)
Ejemplo n.º 6
0
    def from_config(cls, prov_config, prov_key, appliance=None):
        appliance = appliance or cls.appliance
        endpoints = {
            RHOSEndpoint.name: RHOSEndpoint(**prov_config['endpoints'][RHOSEndpoint.name])
        }

        event_endpoint_config = prov_config['endpoints'].get(EventsEndpoint.name, {})
        if event_endpoint_config:
            endpoints[EventsEndpoint.name] = EventsEndpoint(**event_endpoint_config)

        rsa_endpoint_config = prov_config['endpoints'].get(SSHEndpoint.name, {})
        if rsa_endpoint_config:
            endpoints[SSHEndpoint.name] = SSHEndpoint(**rsa_endpoint_config)

        from cfme.utils.providers import get_crud
        infra_prov_key = prov_config.get('infra_provider_key')
        infra_provider = get_crud(infra_prov_key) if infra_prov_key else None

        return appliance.collections.cloud_providers.instantiate(
            prov_class=cls,
            name=prov_config['name'],
            api_port=prov_config['port'],
            api_version=prov_config.get('api_version', 'Keystone v2'),
            endpoints=endpoints,
            zone=prov_config['server_zone'],
            key=prov_key,
            keystone_v3_domain_id=prov_config.get('domain_id'),
            sec_protocol=prov_config.get('sec_protocol', "Non-SSL"),
            tenant_mapping=prov_config.get('tenant_mapping', False),
            infra_provider=infra_provider)
Ejemplo n.º 7
0
def pytest_fixture_setup(fixturedef, request):
    # since we use DataProvider at collection time and BaseProvider in fixtures and tests,
    # we need to instantiate BaseProvider and replace DataProvider obj with it right before first
    # provider fixture request.
    # There were several other ways to do that. However, those bumped into different
    # scope mismatch issues.
    if fixturedef.argname == 'provider':
        kwargs = {}
        for argname in fixturedef.argnames:
            fixdef = request._get_active_fixturedef(argname)
            result, arg_cache_key, exc = fixdef.cached_result
            request._check_scope(argname, request.scope, fixdef.scope)
            kwargs[argname] = result

        fixturefunc = fixturedef.func
        if request.instance is not None:
            fixturefunc = getimfunc(fixturedef.func)
            if fixturefunc != fixturedef.func:
                fixturefunc = fixturefunc.__get__(request.instance)
        my_cache_key = request.param_index
        try:
            provider_data = call_fixture_func(fixturefunc, request, kwargs)
        except TEST_OUTCOME:
            fixturedef.cached_result = (None, my_cache_key, sys.exc_info())
            raise
        from cfme.utils.providers import get_crud
        provider = get_crud(provider_data.key)
        fixturedef.cached_result = (provider, my_cache_key, None)
        request.param = provider
        yield provider
    else:
        yield
Ejemplo n.º 8
0
    def from_config(cls, prov_config, prov_key, appliance=None):
        endpoints = {}
        endpoints[RHOSEndpoint.name] = RHOSEndpoint(**prov_config['endpoints'][RHOSEndpoint.name])

        endp_name = EventsEndpoint.name
        if prov_config['endpoints'].get(endp_name):
            endpoints[endp_name] = EventsEndpoint(**prov_config['endpoints'][endp_name])

        from cfme.utils.providers import get_crud
        infra_prov_key = prov_config.get('infra_provider_key')
        infra_provider = get_crud(infra_prov_key, appliance=appliance) if infra_prov_key else None
        api_version = prov_config.get('api_version', None)

        if not api_version:
            api_version = 'Keystone v2'

        return cls(name=prov_config['name'],
                   hostname=prov_config['hostname'],
                   ip_address=prov_config['ipaddress'],
                   api_port=prov_config['port'],
                   api_version=api_version,
                   endpoints=endpoints,
                   zone=prov_config['server_zone'],
                   key=prov_key,
                   keystone_v3_domain_id=prov_config.get('domain_id', None),
                   sec_protocol=prov_config.get('sec_protocol', "Non-SSL"),
                   tenant_mapping=prov_config.get('tenant_mapping', False),
                   infra_provider=infra_provider,
                   appliance=appliance)
Ejemplo n.º 9
0
def pytest_generate_tests(metafunc):
    types = [VMwareProvider, RHEVMProvider, SCVMMProvider]
    argnames, argvalues, idlist = testgen.providers_by_class(
        metafunc, types)

    argnames = ['providers_for_discover', 'start_ip', 'max_range']
    new_id_list = []

    providers_complete = []
    providers_final = []

    for x in idlist:
        providers_complete.append(get_crud(x))

    provider_combinations = sorted(
        find_neighbour_provider_combinations(providers_complete, len(types)), key=len)
    signatures_seen = set()

    for prov_comb in provider_combinations:
        sig = generate_signature(prov_comb)
        if sig in signatures_seen:
            continue
        signatures_seen.add(sig)
        start_ip, max_range = minmax_ip(prov_comb)
        providers_final.append([prov_comb, start_ip, max_range])
        new_id_list.append(sig)

    testgen.parametrize(metafunc, argnames, providers_final, ids=new_id_list, scope="module")
Ejemplo n.º 10
0
    def from_config(cls, prov_config, prov_key):
        endpoints = {
            RHOSEndpoint.name: RHOSEndpoint(**prov_config['endpoints'][RHOSEndpoint.name])
        }

        event_endpoint_config = prov_config['endpoints'].get(EventsEndpoint.name, {})
        if event_endpoint_config:
            if (event_endpoint_config.get('event_stream') == 'AMQP' and
                    BZ(1618700, forced_streams=["5.9", "5.10", "upstream"]).blocks):
                logger.warning('Skipping AMQP event config due to BZ 1618700')
            else:
                endpoints[EventsEndpoint.name] = EventsEndpoint(**event_endpoint_config)

        from cfme.utils.providers import get_crud
        infra_prov_key = prov_config.get('infra_provider_key')
        infra_provider = get_crud(infra_prov_key) if infra_prov_key else None

        return cls.appliance.collections.cloud_providers.instantiate(
            prov_class=cls,
            name=prov_config['name'],
            api_port=prov_config['port'],
            api_version=prov_config.get('api_version', 'Keystone v2'),
            endpoints=endpoints,
            zone=prov_config['server_zone'],
            key=prov_key,
            keystone_v3_domain_id=prov_config.get('domain_id'),
            sec_protocol=prov_config.get('sec_protocol', "Non-SSL"),
            tenant_mapping=prov_config.get('tenant_mapping', False),
            infra_provider=infra_provider)
Ejemplo n.º 11
0
def pytest_generate_tests(metafunc):
    types = [VMwareProvider, RHEVMProvider, SCVMMProvider]
    argnames, argvalues, idlist = testgen.providers_by_class(metafunc, types)

    argnames = ['providers_for_discover', 'start_ip', 'max_range']
    new_id_list = []

    providers_complete = []
    providers_final = []

    for x in idlist:
        providers_complete.append(get_crud(x))

    provider_combinations = sorted(find_neighbour_provider_combinations(
        providers_complete, len(types)),
                                   key=len)
    signatures_seen = set()

    for prov_comb in provider_combinations:
        sig = generate_signature(prov_comb)
        if sig in signatures_seen:
            continue
        signatures_seen.add(sig)
        start_ip, max_range = minmax_ip(prov_comb)
        providers_final.append([prov_comb, start_ip, max_range])
        new_id_list.append(sig)

    testgen.parametrize(metafunc,
                        argnames,
                        providers_final,
                        ids=new_id_list,
                        scope="module")
Ejemplo n.º 12
0
 def __init__(self, name=None, provider_key=None, appliance=None):
     Navigatable.__init__(self, appliance=appliance)
     self.quad_name = 'resource_pool'
     self.name = name
     if provider_key:
         self.provider = get_crud(provider_key, appliance=appliance)
     else:
         self.provider = None
Ejemplo n.º 13
0
def deploy_template(provider_key, vm_name, template_name=None, timeout=900, **deploy_args):
    """
    Args:
        provider_key: Provider key on which the VM is to be created
        vm_name: Name of the VM to be deployed
        template_name: Name of the template that the VM is deployed from
        timeout: the timeout for template deploy
    """
    allow_skip = deploy_args.pop("allow_skip", ())
    if isinstance(allow_skip, dict):
        skip_exceptions = allow_skip.keys()
        callable_mapping = allow_skip
    elif isinstance(allow_skip, six.string_types) and allow_skip.lower() == "default":
        skip_exceptions = DEFAULT_SKIP
        callable_mapping = {}
    else:
        skip_exceptions = allow_skip
        callable_mapping = {}
    provider_crud = get_crud(provider_key)

    deploy_args.update(vm_name=vm_name)

    if template_name is None:
        try:
            deploy_args.update(template=provider_crud.data['templates']['small_template']['name'])
        except KeyError:
            raise KeyError('small_template not defined for Provider {} in cfme_data.yaml'
                .format(provider_key))
    else:
        deploy_args.update(template=template_name)

    deploy_args.update(provider_crud.deployment_helper(deploy_args))

    logger.info("Getting ready to deploy VM/instance %s from template %s on provider %s",
        vm_name, deploy_args['template'], provider_crud.data['name'])
    try:
        try:
            logger.debug("Deploy args: %s", deploy_args)
            vm_name = provider_crud.mgmt.deploy_template(timeout=timeout, **deploy_args)
            logger.info("Provisioned VM/instance %s", vm_name)  # instance ID in case of EC2
        except Exception as e:
            logger.exception('Could not provisioning VM/instance %s (%s: %s)',
                vm_name, type(e).__name__, str(e))
            try:
                provider_crud.mgmt.delete_vm(vm_name)
            except Exception:
                logger.exception("Unable to clean up vm:", vm_name)
            raise
    except skip_exceptions as e:
        e_c = type(e)
        if e_c in callable_mapping and not callable_mapping[e_c](e):
            raise
        # Make it visible also in the log.
        store.write_line(
            "Skipping due to a provider error: {}: {}\n".format(e_c.__name__, str(e)), purple=True)
        logger.exception(e)
        pytest.skip("{}: {}".format(e_c.__name__, str(e)))
    return vm_name
Ejemplo n.º 14
0
def aws_provider(temp_pod_appliance):
    from cfme.utils.providers import get_crud
    prov = get_crud('ec2west')

    registry_data = prov.mgmt.get_registry_data()
    prov.endpoints['smartstate'].credentials.principal = registry_data['username']
    prov.endpoints['smartstate'].credentials.secret = registry_data['password']

    prov.create()
    yield prov
    prov.delete_if_exists()
Ejemplo n.º 15
0
def aws_provider(temp_pod_appliance):
    from cfme.utils.providers import get_crud
    prov = get_crud('ec2west')

    registry_data = prov.mgmt.get_registry_data()
    prov.endpoints['smartstate'].credentials.principal = registry_data['username']
    prov.endpoints['smartstate'].credentials.secret = registry_data['password']

    prov.create()
    yield prov
    prov.delete_if_exists()
def depot_machine_ip():
    """ Deploy vm for depot test

    This fixture uses for deploy vm on provider from yaml and then receive it's ip
    After test run vm deletes from provider
    """
    depot_machine_name = "test_long_log_depot_{}".format(fauxfactory.gen_alphanumeric())
    data = conf.cfme_data.get("log_db_operations", {})
    depot_provider_key = data["log_db_depot_template"]["provider"]
    depot_template_name = data["log_db_depot_template"]["template_name"]
    prov_crud = get_crud(depot_provider_key)
    deploy_template(depot_provider_key,
                    depot_machine_name,
                    template_name=depot_template_name)
    yield prov_crud.mgmt.get_ip_address(depot_machine_name)
    VM.factory(depot_machine_name, prov_crud).cleanup_on_provider()
Ejemplo n.º 17
0
def depot_machine_ip():
    """ Deploy vm for depot test

    This fixture uses for deploy vm on provider from yaml and then receive it's ip
    After test run vm deletes from provider
    """
    depot_machine_name = "test_long_log_depot_{}".format(
        fauxfactory.gen_alphanumeric())
    data = conf.cfme_data.get("log_db_operations", {})
    depot_provider_key = data["log_db_depot_template"]["provider"]
    depot_template_name = data["log_db_depot_template"]["template_name"]
    prov_crud = get_crud(depot_provider_key)
    deploy_template(depot_provider_key,
                    depot_machine_name,
                    template_name=depot_template_name)
    yield prov_crud.mgmt.get_ip_address(depot_machine_name)
    VM.factory(depot_machine_name, prov_crud).cleanup_on_provider()
Ejemplo n.º 18
0
def pytest_fixture_setup(fixturedef, request):
    # since we use DataProvider at collection time and BaseProvider in fixtures and tests,
    # we need to instantiate BaseProvider and replace DataProvider obj with it right before first
    # provider fixture request.
    # There were several other ways to do that. However, those bumped into different
    # scope mismatch issues.

    # As the object may not be the root object and may have a parent, we need to walk to that
    # the object to see if we can find the attribute on it or any of its parents
    if hasattr(_walk_to_obj_parent(request).function, 'provider'):
        marks = _walk_to_obj_parent(request).function.provider._marks

        for mark in marks:
            if mark.kwargs.get('fixture_name',
                               'provider') == fixturedef.argname:
                kwargs = {}
                for argname in fixturedef.argnames:
                    fixdef = request._get_active_fixturedef(argname)
                    result, arg_cache_key, exc = fixdef.cached_result
                    request._check_scope(argname, request.scope, fixdef.scope)
                    kwargs[argname] = result

                fixturefunc = fixturedef.func
                if request.instance is not None:
                    fixturefunc = getimfunc(fixturedef.func)
                    if fixturefunc != fixturedef.func:
                        fixturefunc = fixturefunc.__get__(request.instance)
                my_cache_key = request.param_index
                try:
                    provider_data = call_fixture_func(fixturefunc, request,
                                                      kwargs)
                except TEST_OUTCOME:
                    fixturedef.cached_result = (None, my_cache_key,
                                                sys.exc_info())
                    raise
                from cfme.utils.providers import get_crud
                provider = get_crud(provider_data.key)
                fixturedef.cached_result = (provider, my_cache_key, None)
                request.param = provider
                yield provider
                break
        else:
            yield
    else:
        yield
Ejemplo n.º 19
0
def pytest_fixture_setup(fixturedef, request):
    # since we use DataProvider at collection time and BaseProvider in fixtures and tests,
    # we need to instantiate BaseProvider and replace DataProvider obj with it right before first
    # provider fixture request.
    # There were several other ways to do that. However, those bumped into different
    # scope mismatch issues.

    # As the object may not be the root object and may have a parent, we need to walk to that
    # the object to see if we can find the attribute on it or any of its parents
    parent = _walk_to_obj_parent(request)
    # node has all the markers from full scope
    # default it to empty dict so loop below shorts and yields at the end
    item_marks = ProviderEnvironmentMarker.get_closest_kwarg_markers(parent.node) or {}

    for fixture_name, mark in item_marks.items():
        if fixture_name == fixturedef.argname:
            kwargs = {}
            for argname in fixturedef.argnames:
                fixdef = request._get_active_fixturedef(argname)
                result, arg_cache_key, exc = fixdef.cached_result
                request._check_scope(argname, request.scope, fixdef.scope)
                kwargs[argname] = result

            fixturefunc = fixturedef.func
            if request.instance is not None:
                fixturefunc = getimfunc(fixturedef.func)
                if fixturefunc != fixturedef.func:
                    fixturefunc = fixturefunc.__get__(request.instance)
            # Use the DataProvider instance as the cache key.
            my_cache_key = request.param
            try:
                provider_data = call_fixture_func(fixturefunc, request, kwargs)
            except TEST_OUTCOME:
                fixturedef.cached_result = (None, my_cache_key, sys.exc_info())
                raise
            from cfme.utils.providers import get_crud
            provider = get_crud(provider_data.key)
            request.param = provider
            yield provider
            # Store the cached_result after we have yielded to other pytest_fixture_setup methods.
            fixturedef.cached_result = (provider, my_cache_key, None)
            break
    else:
        yield
Ejemplo n.º 20
0
def pytest_fixture_setup(fixturedef, request):
    """Use DataProvider instances at collection time, and BaseProvider subclass instances in
    fixture / test execution. This method instantiates the BaseProvider object from the provider
    information stored in the DataProvider instance."""

    # As the object may have a parent, walk to that object to see if we can find the attribute.
    parent = _walk_to_obj_parent(request)

    # parent.node has all the markers from the full scope.
    item_marks = ProviderEnvironmentMarker.get_closest_kwarg_markers(
        parent.node) or {}

    for fixture_name in item_marks:
        if fixture_name == fixturedef.argname:
            kwargs = {}
            for argname in fixturedef.argnames:
                fixdef = request._get_active_fixturedef(argname)
                assert fixdef.cached_result is not None
                result, arg_cache_key, exc = fixdef.cached_result
                request._check_scope(argname, request.scope, fixdef.scope)
                kwargs[argname] = result

            fixturefunc = resolve_fixture_function(fixturedef, request)
            # Use the DataProvider instance as the cache key.
            my_cache_key = fixturedef.cache_key(request)
            try:
                provider_data = call_fixture_func(fixturefunc, request, kwargs)
            except TEST_OUTCOME:
                fixturedef.cached_result = (None, my_cache_key, sys.exc_info())
                raise

            # Instantiate BaseProvider subclass here, and store as the fixture result.
            from cfme.utils.providers import get_crud
            result = get_crud(provider_data.key)
            request.param = result

            yield result
            # Store the cached_result after we have yielded to other pytest_fixture_setup methods.
            fixturedef.cached_result = (result, my_cache_key, None)
            break
    else:
        yield
Ejemplo n.º 21
0
def pytest_fixture_setup(fixturedef, request):
    # since we use DataProvider at collection time and BaseProvider in fixtures and tests,
    # we need to instantiate BaseProvider and replace DataProvider obj with it right before first
    # provider fixture request.
    # There were several other ways to do that. However, those bumped into different
    # scope mismatch issues.

    # As the object may not be the root object and may have a parent, we need to walk to that
    # the object to see if we can find the attribute on it or any of its parents
    if hasattr(_walk_to_obj_parent(request).function, 'provider'):
        marks = _walk_to_obj_parent(request).function.provider._marks

        for mark in marks:
            if mark.kwargs.get('fixture_name', 'provider') == fixturedef.argname:
                kwargs = {}
                for argname in fixturedef.argnames:
                    fixdef = request._get_active_fixturedef(argname)
                    result, arg_cache_key, exc = fixdef.cached_result
                    request._check_scope(argname, request.scope, fixdef.scope)
                    kwargs[argname] = result

                fixturefunc = fixturedef.func
                if request.instance is not None:
                    fixturefunc = getimfunc(fixturedef.func)
                    if fixturefunc != fixturedef.func:
                        fixturefunc = fixturefunc.__get__(request.instance)
                my_cache_key = request.param_index
                try:
                    provider_data = call_fixture_func(fixturefunc, request, kwargs)
                except TEST_OUTCOME:
                    fixturedef.cached_result = (None, my_cache_key, sys.exc_info())
                    raise
                from cfme.utils.providers import get_crud
                provider = get_crud(provider_data.key)
                fixturedef.cached_result = (provider, my_cache_key, None)
                request.param = provider
                yield provider
                break
        else:
            yield
    else:
        yield
def db_depot_machine_ip(request, appliance):
    """ Deploy vm for depot test

    This fixture uses for deploy vm on provider from yaml and then receive it's ip
    After test run vm deletes from provider
    """
    depot_machine_name = "test_db_backup_depot_{}".format(
        fauxfactory.gen_alphanumeric())
    data = conf.cfme_data.get("log_db_operations", {})
    depot_provider_key = data["log_db_depot_template"]["provider"]
    depot_template_name = data["log_db_depot_template"]["template_name"]
    prov_crud = get_crud(depot_provider_key)
    deploy_template(depot_provider_key,
                    depot_machine_name,
                    template_name=depot_template_name)

    yield prov_crud.mgmt.get_ip_address(depot_machine_name)
    collection = appliance.provider_based_collection(prov_crud)
    collection.instantiate(depot_machine_name,
                           prov_crud).delete_from_provider()
Ejemplo n.º 23
0
    def from_config(cls, prov_config, prov_key):

        endpoints = {}
        token_creds = cls.process_credential_yaml_key(prov_config['credentials'], cred_type='token')
        for endp in prov_config['endpoints']:
            if VirtualizationEndpoint.name == endp:
                prov_config['endpoints'][endp]['token'] = token_creds.token
                endpoints[endp] = VirtualizationEndpoint(**prov_config['endpoints'][endp])

        parent_provider = get_crud(prov_config['parent_provider'])
        parent_provider.endpoints.update(endpoints)

        # passing virtualization of KubeVirt provider explicitly to ocp provider
        parent_provider.virt_type = prov_config['virt_type']

        return cls.appliance.collections.infra_providers.instantiate(
            prov_class=cls,
            name=prov_config.get('name'),
            key=prov_key,
            endpoints=endpoints,
            provider_data=prov_config,
            parent_provider=parent_provider)
Ejemplo n.º 24
0
    def from_config(cls, prov_config, prov_key):

        endpoints = {}
        token_creds = cls.process_credential_yaml_key(prov_config['credentials'], cred_type='token')
        for endp in prov_config['endpoints']:
            if VirtualizationEndpoint.name == endp:
                prov_config['endpoints'][endp]['token'] = token_creds.token
                endpoints[endp] = VirtualizationEndpoint(**prov_config['endpoints'][endp])

        parent_provider = get_crud(prov_config['parent_provider'])
        parent_provider.endpoints.update(endpoints)

        # passing virtualization of KubeVirt provider explicitly to ocp provider
        parent_provider.virt_type = prov_config['virt_type']

        return cls.appliance.collections.infra_providers.instantiate(
            prov_class=cls,
            name=prov_config.get('name'),
            key=prov_key,
            endpoints=endpoints,
            provider_data=prov_config,
            parent_provider=parent_provider)
Ejemplo n.º 25
0
def test_refresh_vms(appliance, request, scenario):
    """Refreshes all vm's then waits for a specific amount of time. Memory Monitor creates
    graphs and summary at the end of the scenario.

    Polarion:
        assignee: rhcf3_machine
        casecomponent: Appliance
        initialEstimate: 1/4h
    """
    from_ts = int(time.time() * 1000)
    logger.debug('Scenario: {}'.format(scenario['name']))

    appliance.clean_appliance()

    quantifiers = {}
    scenario_data = {'appliance_ip': appliance.hostname,
        'appliance_name': cfme_performance['appliance']['appliance_name'],
        'test_dir': 'workload-refresh-vm',
        'test_name': 'Refresh VMs',
        'appliance_roles': ', '.join(roles_refresh_vms),
        'scenario': scenario}
    monitor_thread = SmemMemoryMonitor(appliance.ssh_client(), scenario_data)

    def cleanup_workload(scenario, from_ts, quantifiers, scenario_data):
        starttime = time.time()
        to_ts = int(starttime * 1000)
        g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts)
        logger.debug('Started cleaning up monitoring thread.')
        monitor_thread.grafana_urls = g_urls
        monitor_thread.signal = False
        monitor_thread.join()
        add_workload_quantifiers(quantifiers, scenario_data)
        timediff = time.time() - starttime
        logger.info('Finished cleaning up monitoring thread in {}'.format(timediff))
    request.addfinalizer(lambda: cleanup_workload(scenario, from_ts, quantifiers, scenario_data))

    monitor_thread.start()

    appliance.wait_for_miq_server_workers_started(poll_interval=2)
    appliance.update_server_roles({role: True for role in roles_refresh_vms})
    for prov in scenario['providers']:
        get_crud(prov).create_rest()
    logger.info('Sleeping for refresh: {}s'.format(scenario['refresh_sleep_time']))
    time.sleep(scenario['refresh_sleep_time'])

    full_refresh_threshold_set = False
    if 'full_refresh_threshold' in scenario:
        if scenario['full_refresh_threshold'] != FULL_REFRESH_THRESHOLD_DEFAULT:
            appliance.set_full_refresh_threshold(scenario['full_refresh_threshold'])
            full_refresh_threshold_set = True
    if not full_refresh_threshold_set:
        logger.debug('Keeping full_refresh_threshold at default ({}).'.format(
            FULL_REFRESH_THRESHOLD_DEFAULT))

    refresh_size = scenario['refresh_size']

    vms = appliance.rest_api.collections.vms.all
    vms_iter = cycle(vms)
    logger.debug('Number of VM IDs: {}'.format(len(vms)))

    # Variable amount of time for refresh workload
    total_time = scenario['total_time']
    starttime = time.time()
    time_between_refresh = scenario['time_between_refresh']
    total_refreshed_vms = 0

    while ((time.time() - starttime) < total_time):
        start_refresh_time = time.time()
        refresh_list = [next(vms_iter) for x in range(refresh_size)]
        for vm in refresh_list:
            vm.action.reload()
        total_refreshed_vms += len(refresh_list)
        iteration_time = time.time()

        refresh_time = round(iteration_time - start_refresh_time, 2)
        elapsed_time = iteration_time - starttime
        logger.debug('Time to Queue VM Refreshes: {}'.format(refresh_time))
        logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2), total_time))

        if refresh_time < time_between_refresh:
            wait_diff = time_between_refresh - refresh_time
            time_remaining = total_time - elapsed_time
            if (time_remaining > 0 and time_remaining < time_between_refresh):
                time.sleep(time_remaining)
            elif time_remaining > 0:
                time.sleep(wait_diff)
        else:
            logger.warn('Time to Queue VM Refreshes ({}) exceeded time between '
                '({})'.format(refresh_time, time_between_refresh))

    quantifiers['Elapsed_Time'] = round(time.time() - starttime, 2)
    quantifiers['Queued_VM_Refreshes'] = total_refreshed_vms
    logger.info('Test Ending...')
Ejemplo n.º 26
0
def setup_multiregion_env(cfme_version, provider_type, provider, lease,
                          sprout_poolid, desc, remote_nodes, add_prov):
    lease_time = tot_time(lease)
    provider_type = None if provider else provider_type
    """Multi appliance setup with multi region and replication from remote to global"""

    sprout_client = SproutClient.from_config()

    required_app_count = 1  # global app
    required_app_count += remote_nodes

    if sprout_poolid:
        if sprout_client.call_method('pool_exists', sprout_poolid):
            sprout_pool = sprout_client.call_method('request_check',
                                                    sprout_poolid)
            if len(sprout_pool['appliances']) >= required_app_count:
                print("Processing pool...")
                apps = []
                for app in sprout_pool['appliances']:
                    apps.append(IPAppliance(app['ip_address']))
                sprout_client.set_pool_description(sprout_poolid, desc)
            else:
                sys.exit("Pool does not meet the minimum size requirements!")
        else:
            sys.exit("Pool not found!")

    else:
        print("Provisioning appliances")
        apps, request_id = provision_appliances(count=required_app_count,
                                                cfme_version=cfme_version,
                                                provider_type=provider_type,
                                                provider=provider,
                                                lease_time=lease_time)
        print("Appliance pool lease time is {}".format(lease))
        sprout_client.set_pool_description(request_id, desc)
        print("Appliances Provisioned")
    print("Configuring Replicated Environment")
    global_app = apps[0]
    gip = global_app.hostname

    remote_apps = apps[1:]

    print("Global Appliance Configuration")
    app_creds = {
        "username": credentials["database"]["username"],
        "password": credentials["database"]["password"],
        "sshlogin": credentials["ssh"]["username"],
        "sshpass": credentials["ssh"]["password"],
    }

    app_params = dict(region=99,
                      dbhostname='localhost',
                      username=app_creds['username'],
                      password=app_creds['password'],
                      dbname='vmdb_production',
                      dbdisk=global_app.unpartitioned_disks[0])
    global_app.appliance_console_cli.configure_appliance_internal(**app_params)
    global_app.evmserverd.wait_for_running()
    global_app.wait_for_web_ui()

    print("Done: Global @ {}".format(gip))

    for num, app in enumerate(remote_apps):
        region_n = str((num + 1) * 10)
        print("Remote Appliance Configuration")
        app_params = dict(region=region_n,
                          dbhostname='localhost',
                          username=app_creds['username'],
                          password=app_creds['password'],
                          dbname='vmdb_production',
                          dbdisk=app.unpartitioned_disks[0],
                          fetch_key=gip,
                          sshlogin=app_creds['sshlogin'],
                          sshpass=app_creds['sshpass'])

        app.appliance_console_cli.configure_appliance_internal_fetch_key(
            **app_params)
        app.evmserverd.wait_for_running()
        app.wait_for_web_ui()
        print("Done: Remote @ {}, region: {}".format(app.hostname, region_n))

        print("Configuring Replication")
        print("Setup - Replication on remote appliance")
        app.set_pglogical_replication(replication_type=':remote')

    print("Setup - Replication on global appliance")
    global_app.set_pglogical_replication(replication_type=':global')
    for app in remote_apps:
        global_app.add_pglogical_replication_subscription(app.hostname)

    random.shuffle(remote_apps)
    if add_prov:
        for app, prov_id in zip(cycle(remote_apps), add_prov):
            stack.push(app)
            prov = get_crud(prov_id)
            print("Adding provider {} to appliance {}".format(
                prov_id, app.hostname))
            prov.create_rest()
            stack.pop()

    print("Done!")
Ejemplo n.º 27
0
def deploy_template(provider_key,
                    vm_name,
                    template_name=None,
                    timeout=900,
                    **deploy_args):
    """
    Args:
        provider_key: Provider key on which the VM is to be created
        vm_name: Name of the VM to be deployed
        template_name: Name of the template that the VM is deployed from
        timeout: the timeout for template deploy
    """
    allow_skip = deploy_args.pop("allow_skip", ())
    if isinstance(allow_skip, dict):
        skip_exceptions = allow_skip.keys()
        callable_mapping = allow_skip
    elif isinstance(allow_skip,
                    six.string_types) and allow_skip.lower() == "default":
        skip_exceptions = DEFAULT_SKIP
        callable_mapping = {}
    else:
        skip_exceptions = allow_skip
        callable_mapping = {}
    provider_crud = get_crud(provider_key)

    deploy_args.update(vm_name=vm_name)

    if template_name is None:
        try:
            deploy_args.update(template=provider_crud.data['templates']
                               ['small_template']['name'])
        except KeyError:
            raise KeyError(
                'small_template not defined for Provider {} in cfme_data.yaml'.
                format(provider_key))
    else:
        deploy_args.update(template=template_name)

    deploy_args.update(provider_crud.deployment_helper(deploy_args))

    logger.info(
        "Getting ready to deploy VM/instance %s from template %s on provider %s",
        vm_name, deploy_args['template'], provider_crud.data['name'])
    try:
        try:
            logger.debug("Deploy args: %s", deploy_args)
            vm_name = provider_crud.mgmt.deploy_template(timeout=timeout,
                                                         **deploy_args)
            logger.info("Provisioned VM/instance %s",
                        vm_name)  # instance ID in case of EC2
        except Exception as e:
            logger.exception('Could not provisioning VM/instance %s (%s: %s)',
                             vm_name,
                             type(e).__name__, str(e))
            try:
                provider_crud.mgmt.delete_vm(vm_name)
            except Exception:
                logger.exception("Unable to clean up vm:", vm_name)
            raise
    except skip_exceptions as e:
        e_c = type(e)
        if e_c in callable_mapping and not callable_mapping[e_c](e):
            raise
        # Make it visible also in the log.
        store.write_line("Skipping due to a provider error: {}: {}\n".format(
            e_c.__name__, str(e)),
                         purple=True)
        logger.exception(e)
        pytest.skip("{}: {}".format(e_c.__name__, str(e)))
    return vm_name
Ejemplo n.º 28
0
def test_provisioning(appliance, request, scenario):
    """Runs through provisioning scenarios using the REST API to
    continously provision a VM for a specified period of time.
    Memory Monitor creates graphs and summary at the end of each scenario."""

    from_ts = int(time.time() * 1000)
    logger.debug('Scenario: {}'.format(scenario['name']))

    appliance.clean_appliance()

    quantifiers = {}
    scenario_data = {'appliance_ip': appliance.hostname,
        'appliance_name': cfme_performance['appliance']['appliance_name'],
        'test_dir': 'workload-provisioning',
        'test_name': 'Provisioning',
        'appliance_roles': ', '.join(roles_provisioning),
        'scenario': scenario}
    monitor_thread = SmemMemoryMonitor(appliance.ssh_client(), scenario_data)

    provision_order = []

    def cleanup_workload(scenario, from_ts, vms_to_cleanup, quantifiers, scenario_data):
        starttime = time.time()
        to_ts = int(starttime * 1000)
        g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts)
        logger.debug('Started cleaning up monitoring thread.')
        appliance.update_server_roles({role: True for role in roles_provisioning_cleanup})
        monitor_thread.grafana_urls = g_urls
        monitor_thread.signal = False
        final_vm_size = len(vms_to_cleanup)
        appliance.rest_api.collections.vms.action.delete(vms_to_cleanup)
        monitor_thread.join()
        logger.info('{} VMs were left over, and {} VMs were deleted in the finalizer.'
            .format(final_vm_size, final_vm_size - len(vms_to_cleanup)))
        logger.info('The following VMs were left over after the test: {}'
            .format(vms_to_cleanup))
        quantifiers['VMs_To_Delete_In_Finalizer'] = final_vm_size
        quantifiers['VMs_Deleted_In_Finalizer'] = final_vm_size - len(vms_to_cleanup)
        quantifiers['Leftover_VMs'] = vms_to_cleanup
        add_workload_quantifiers(quantifiers, scenario_data)
        timediff = time.time() - starttime
        logger.info('Finished cleaning up monitoring thread in {}'.format(timediff))

    request.addfinalizer(lambda: cleanup_workload(scenario, from_ts, vm_name, quantifiers,
            scenario_data))

    monitor_thread.start()

    appliance.wait_for_miq_server_workers_started(poll_interval=2)
    appliance.update_server_roles({role: True for role in roles_provisioning})
    prov = get_crud(scenario['providers'][0])
    prov.create_rest()
    logger.info('Sleeping for Refresh: {}s'.format(scenario['refresh_sleep_time']))
    time.sleep(scenario['refresh_sleep_time'])

    guid_list = prov.get_template_guids(scenario['templates'])
    guid_cycle = cycle(guid_list)
    cleanup_size = scenario['cleanup_size']
    number_of_vms = scenario['number_of_vms']
    total_time = scenario['total_time']
    time_between_provision = scenario['time_between_provision']
    total_provisioned_vms = 0
    total_deleted_vms = 0
    provisioned_vms = 0
    starttime = time.time()

    while ((time.time() - starttime) < total_time):
        start_iteration_time = time.time()
        provision_list = []
        for i in range(number_of_vms):
            total_provisioned_vms += 1
            provisioned_vms += 1
            vm_to_provision = '{}-provision-{}'.format(
                test_ts, str(total_provisioned_vms).zfill(4))
            guid_to_provision, provider_name = next(guid_cycle)
            provision_order.append((vm_to_provision, provider_name))
            provision_list.append((vm_to_provision, guid_to_provision,
                prov.data['provisioning']['vlan']))

        template = prov.data.templates.get('small_template')
        provision_data = get_provision_data(appliance.rest_api, prov, template.name)
        vm_name = provision_data["vm_fields"]["vm_name"]
        response = appliance.rest_api.collections.provision_requests.action.create(**provision_data)
        assert appliance.rest_api.response.status_code == 200
        provision_request = response[0]

        def _finished():
            provision_request.reload()
            if "error" in provision_request.status.lower():
                pytest.fail("Error when provisioning: `{}`".format(provision_request.message))
            return provision_request.request_state.lower() in ("finished", "provisioned")

        wait_for(_finished, num_sec=800, delay=5, message="REST provisioning finishes")

        vm = appliance.rest_api.collections.vms.get(name=vm_name)
        creation_time = time.time()
        provision_time = round(creation_time - start_iteration_time, 2)
        logger.debug('Time to initiate provisioning: {}'.format(provision_time))
        logger.info('{} VMs provisioned so far'.format(total_provisioned_vms))

        if provisioned_vms > cleanup_size * len(scenario['providers']):
            start_remove_time = time.time()
            if appliance.rest_api.collections.vms.action.delete(vm):
                provision_order.pop(0)
                provisioned_vms -= 1
                total_deleted_vms += 1
            deletion_time = round(time.time() - start_remove_time, 2)
            logger.debug('Time to initate deleting: {}'.format(deletion_time))
            logger.info('{} VMs deleted so far'.format(total_deleted_vms))

        end_iteration_time = time.time()
        iteration_time = round(end_iteration_time - start_iteration_time, 2)
        elapsed_time = end_iteration_time - starttime
        logger.debug('Time to initiate provisioning and deletion: {}'.format(iteration_time))
        logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2), total_time))

        if iteration_time < time_between_provision:
            wait_diff = time_between_provision - iteration_time
            time_remaining = total_time - elapsed_time
            if (time_remaining > 0 and time_remaining < time_between_provision):
                time.sleep(time_remaining)
            elif time_remaining > 0:
                time.sleep(wait_diff)
            else:
                logger.warn('Time to initiate provisioning ({}) exceeded time between '
                    '({})'.format(iteration_time, time_between_provision))

    quantifiers['Elapsed_Time'] = round(time.time() - starttime, 2)
    quantifiers['Queued_VM_Provisionings'] = total_provisioned_vms
    quantifiers['Deleted_VMs'] = total_deleted_vms
    logger.info('Provisioned {} VMs and deleted {} VMs during the scenario.'
                .format(total_provisioned_vms, total_deleted_vms))
    logger.info('Test Ending...')
Ejemplo n.º 29
0
def setup_multiregion_env(cfme_version, provider_type, provider, lease,
                          sprout_poolid, desc, remote_nodes, add_prov):
    lease_time = tot_time(lease)
    provider_type = None if provider else provider_type
    """Multi appliance setup with multi region and replication from remote to global"""

    sprout_client = SproutClient.from_config()

    required_app_count = 1  # global app
    required_app_count += remote_nodes

    if sprout_poolid:
        if sprout_client.call_method('pool_exists', sprout_poolid):
            sprout_pool = sprout_client.call_method('request_check',
                                                    sprout_poolid)
            if len(sprout_pool['appliances']) >= required_app_count:
                print("Processing pool...")
                apps = []
                for app in sprout_pool['appliances']:
                    apps.append(IPAppliance(app['ip_address']))
                sprout_client.set_pool_description(sprout_poolid, desc)
            else:
                sys.exit("Pool does not meet the minimum size requirements!")
        else:
            sys.exit("Pool not found!")

    else:
        print("Provisioning appliances")
        apps, request_id = provision_appliances(count=required_app_count,
                                                cfme_version=cfme_version,
                                                provider_type=provider_type,
                                                provider=provider,
                                                lease_time=lease_time)
        print("Appliance pool lease time is {}".format(lease))
        sprout_client.set_pool_description(request_id, desc)
        print("Appliances Provisioned")
    print("Configuring Replicated Environment")
    global_app = apps[0]
    gip = global_app.hostname

    remote_apps = apps[1:]

    print("Global Appliance Configuration")
    command_set0 = ('ap', '', '7', '1', '1', '2', 'n', '99', pwd,
                    TimedCommand(pwd, 360), '')
    global_app.appliance_console.run_commands(command_set0)
    global_app.evmserverd.wait_for_running()
    global_app.wait_for_web_ui()
    print("Done: Global @ {}".format(gip))

    for num, app in enumerate(remote_apps):
        region_n = str(num + 1 * 10)
        print("Remote Appliance Configuration")
        command_set1 = ('ap', '', '7', '2', gip, '', pwd, '', '1', '2', 'n',
                        region_n, pwd, TimedCommand(pwd, 360), '')
        app.appliance_console.run_commands(command_set1)
        app.evmserverd.wait_for_running()
        app.wait_for_web_ui()
        print("Done: Remote @ {}, region: {}".format(app.hostname, region_n))

        print("Configuring Replication")
        print("Setup - Replication on remote appliance")
        app.set_pglogical_replication(replication_type=':remote')

    print("Setup - Replication on global appliance")
    global_app.set_pglogical_replication(replication_type=':global')
    for app in remote_apps:
        global_app.add_pglogical_replication_subscription(app.hostname)

    random.shuffle(remote_apps)
    if add_prov:
        for app, prov_id in zip(cycle(remote_apps), add_prov):
            stack.push(app)
            prov = get_crud(prov_id)
            print("Adding provider {} to appliance {}".format(
                prov_id, app.hostname))
            prov.create()
            stack.pop()

    print("Done!")
Ejemplo n.º 30
0
def deploy_template(provider_key, vm_name, template_name=None, timeout=900, **deploy_args):
    """
    Args:
        provider_key: Provider key on which the VM is to be created
        vm_name: Name of the VM to be deployed
        template_name: Name of the template that the VM is deployed from
        timeout: the timeout for template deploy

    Returns:
        wrapanapi.entities.Vm or wrapanapi.entities.Instance object
    """
    allow_skip = deploy_args.pop("allow_skip", ())
    if isinstance(allow_skip, dict):
        skip_exceptions = list(allow_skip.keys())
        callable_mapping = allow_skip
    elif isinstance(allow_skip, str) and allow_skip.lower() == "default":
        skip_exceptions = DEFAULT_SKIP
        callable_mapping = {}
    else:
        skip_exceptions = allow_skip
        callable_mapping = {}
    provider_crud = get_crud(provider_key)

    deploy_args.update(vm_name=vm_name)

    if template_name is None:
        try:
            template_name = provider_crud.data['templates']['small_template']['name']
        except KeyError:
            raise KeyError('small_template not defined for Provider {} in cfme_data.yaml'
                           .format(provider_key))

    deploy_args.update(template=template_name)

    deploy_args.update(provider_crud.deployment_helper(deploy_args))

    logger.info("Getting ready to deploy VM/instance %s from template %s on provider %s",
                vm_name, deploy_args['template'], provider_crud.data['name'])
    try:
        try:
            logger.debug("Deploy args: %s", deploy_args)
            if isinstance(provider_crud.mgmt, AzureSystem):
                template = provider_crud.mgmt.get_template(
                    template_name, container=deploy_args['template_container'])
            else:
                template = provider_crud.mgmt.get_template(template_name)
            vm = template.deploy(timeout=timeout, **deploy_args)
            logger.info("Provisioned VM/instance %r", vm)
        except Exception:
            logger.exception('Could not provisioning VM/instance %s', vm_name)
            for vm_to_cleanup in provider_crud.mgmt.find_vms(vm_name):
                try:
                    vm_to_cleanup.cleanup()
                except Exception:
                    logger.exception("Unable to clean up vm: %r", vm_to_cleanup.name)
            raise
    except skip_exceptions as e:
        e_c = type(e)
        if e_c in callable_mapping and not callable_mapping[e_c](e):
            raise
        # Make it visible also in the log.
        store.write_line(
            "Skipping due to a provider error: {}: {}\n".format(e_c.__name__, str(e)), purple=True)
        logger.exception(e)
        pytest.skip("{}: {}".format(e_c.__name__, str(e)))
    return vm
def test_workload_smartstate_analysis(appliance, request, scenario):
    """Runs through provider based scenarios initiating smart state analysis against VMs, Hosts,
    and Datastores

    Polarion:
        assignee: rhcf3_machine
        initialEstimate: 1/4h
    """
    from_ts = int(time.time() * 1000)
    logger.debug('Scenario: {}'.format(scenario['name']))
    appliance.install_vddk()

    appliance.clean_appliance()

    quantifiers = {}
    scenario_data = {
        'appliance_ip': appliance.hostname,
        'appliance_name': cfme_performance['appliance']['appliance_name'],
        'test_dir': 'workload-ssa',
        'test_name': 'SmartState Analysis',
        'appliance_roles': ', '.join(roles_smartstate),
        'scenario': scenario
    }
    monitor_thread = SmemMemoryMonitor(appliance.ssh_client(), scenario_data)

    def cleanup_workload(scenario, from_ts, quantifiers, scenario_data):
        starttime = time.time()
        to_ts = int(starttime * 1000)
        g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts)
        logger.debug('Started cleaning up monitoring thread.')
        monitor_thread.grafana_urls = g_urls
        monitor_thread.signal = False
        monitor_thread.join()
        add_workload_quantifiers(quantifiers, scenario_data)
        timediff = time.time() - starttime
        logger.info(
            'Finished cleaning up monitoring thread in {}'.format(timediff))

    request.addfinalizer(lambda: cleanup_workload(scenario, from_ts,
                                                  quantifiers, scenario_data))

    monitor_thread.start()

    appliance.wait_for_miq_server_workers_started(poll_interval=2)
    appliance.update_server_roles({role: True for role in roles_smartstate})
    for provider in scenario['providers']:
        get_crud(provider).create_rest()
    logger.info('Sleeping for Refresh: {}s'.format(
        scenario['refresh_sleep_time']))
    time.sleep(scenario['refresh_sleep_time'])

    # Add host credentials and set CFME relationship for RHEVM SSA
    for provider in scenario['providers']:
        for api_host in appliance.rest_api.collections.hosts.all:
            host_collection = appliance.collections.hosts
            test_host = host_collection.instantiate(name=api_host.name,
                                                    provider=provider)
            host_data = get_host_data_by_name(get_crud(provider),
                                              api_host.name)
            credentials = host.get_credentials_from_config(
                host_data['credentials'])
            test_host.update_credentials_rest(credentials)
        appliance.set_cfme_server_relationship(
            cfme_performance['appliance']['appliance_name'])

    # Variable amount of time for SmartState Analysis workload
    total_time = scenario['total_time']
    starttime = time.time()
    time_between_analyses = scenario['time_between_analyses']
    total_scanned_vms = 0

    while ((time.time() - starttime) < total_time):
        start_ssa_time = time.time()
        for vm in scenario['vms_to_scan'].values()[0]:
            vm_api = appliance.rest_api.collections.vms.get(name=vm)
            vm_api.action.scan()
            total_scanned_vms += 1
        iteration_time = time.time()

        ssa_time = round(iteration_time - start_ssa_time, 2)
        elapsed_time = iteration_time - starttime
        logger.debug('Time to Queue SmartState Analyses: {}'.format(ssa_time))
        logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2),
                                                 total_time))

        if ssa_time < time_between_analyses:
            wait_diff = time_between_analyses - ssa_time
            time_remaining = total_time - elapsed_time
            if (time_remaining > 0 and time_remaining < time_between_analyses):
                time.sleep(time_remaining)
            elif time_remaining > 0:
                time.sleep(wait_diff)
        else:
            logger.warn(
                'Time to Queue SmartState Analyses ({}) exceeded time between '
                '({})'.format(ssa_time, time_between_analyses))

    quantifiers['Elapsed_Time'] = round(time.time() - starttime, 2)
    quantifiers['Queued_VM_Scans'] = total_scanned_vms
    logger.info('Test Ending...')
Ejemplo n.º 32
0
def test_workload_capacity_and_utilization_rep(appliance, request, scenario,
                                               setup_perf_provider):
    """Runs through provider based scenarios enabling C&U and replication, run for a set period of
    time. Memory Monitor creates graphs and summary at the end of each scenario.

    Polarion:
        assignee: rhcf3_machine
        casecomponent: CandU
        initialEstimate: 1/4h
    """
    from_ts = int(time.time() * 1000)
    ssh_client = appliance.ssh_client()

    ssh_master_args = {
        'hostname': scenario['replication_master']['ip_address'],
        'username': scenario['replication_master']['ssh']['username'],
        'password': scenario['replication_master']['ssh']['password']
    }
    master_appliance = IPAppliance(
        hostname=scenario['replication_master']['ip_address'],
        openshift_creds=ssh_master_args)

    ssh_client_master = SSHClient(**ssh_master_args)
    logger.debug('Scenario: {}'.format(scenario['name']))

    is_pglogical = True if scenario['replication'] == 'pglogical' else False

    # Turn off master pglogical replication incase rubyrep scenario follows a pglogical scenario
    appliance.set_pglogical_replication(replication_type=':none')
    # Spawn tail before hand to prevent unncessary waiting on MiqServer starting since applinace
    # under test is cleaned first, followed by master appliance
    sshtail_evm = SSHTail('/var/www/miq/vmdb/log/evm.log')
    sshtail_evm.set_initial_file_end()
    logger.info(f'Clean appliance under test ({ssh_client})')
    appliance.clean_appliance()
    logger.info(f'Clean master appliance ({ssh_client_master})')
    master_appliance.clean_appliance()  # Clean Replication master appliance

    if is_pglogical:
        scenario_data = {
            'appliance_ip': appliance.hostname,
            'appliance_name': cfme_performance['appliance']['appliance_name'],
            'test_dir': 'workload-cap-and-util-rep',
            'test_name': 'Capacity and Utilization Replication (pgLogical)',
            'appliance_roles': ', '.join(roles_cap_and_util_rep),
            'scenario': scenario
        }
    else:
        scenario_data = {
            'appliance_ip': cfme_performance['appliance']['ip_address'],
            'appliance_name': cfme_performance['appliance']['appliance_name'],
            'test_dir': 'workload-cap-and-util-rep',
            'test_name': 'Capacity and Utilization Replication (RubyRep)',
            'appliance_roles': ', '.join(roles_cap_and_util_rep),
            'scenario': scenario
        }
    quantifiers = {}
    monitor_thread = SmemMemoryMonitor(appliance.ssh_client(), scenario_data)

    def cleanup_workload(scenario, from_ts, quantifiers, scenario_data):
        starttime = time.time()
        to_ts = int(starttime * 1000)
        g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts)
        logger.debug('Started cleaning up monitoring thread.')
        monitor_thread.grafana_urls = g_urls
        monitor_thread.signal = False
        monitor_thread.join()
        add_workload_quantifiers(quantifiers, scenario_data)
        timediff = time.time() - starttime
        logger.info(f'Finished cleaning up monitoring thread in {timediff}')

    request.addfinalizer(lambda: cleanup_workload(scenario, from_ts,
                                                  quantifiers, scenario_data))

    monitor_thread.start()

    appliance.wait_for_miq_server_workers_started(evm_tail=sshtail_evm,
                                                  poll_interval=2)
    appliance.update_server_roles(
        {role: True
         for role in roles_cap_and_util_rep})
    for provider in scenario['providers']:
        get_crud(provider).create_rest()
    logger.info('Sleeping for Refresh: {}s'.format(
        scenario['refresh_sleep_time']))
    time.sleep(scenario['refresh_sleep_time'])
    appliance.set_cap_and_util_all_via_rails()

    # Configure Replication
    if is_pglogical:
        # Setup appliance under test to :remote
        appliance.set_pglogical_replication(replication_type=':remote')
        # Setup master appliance to :global
        master_appliance.set_pglogical_replication(replication_type=':global')
        # Setup master to subscribe:
        master_appliance.add_pglogical_replication_subscription(
            ssh_client_master, appliance.hostname)
    else:
        # Setup local towards Master
        appliance.set_rubyrep_replication(
            scenario['replication_master']['ip_address'])
        # Force uninstall rubyrep for this region from master (Unsure if still needed)
        # ssh_client.run_rake_command('evm:dbsync:uninstall')
        # time.sleep(30)  # Wait to quiecse
        # Turn on DB Sync role
        appliance.update_server_roles(
            {role: True
             for role in roles_cap_and_util_rep})

    # Variable amount of time for C&U collections/processing
    total_time = scenario['total_time']
    starttime = time.time()
    elapsed_time = 0
    while (elapsed_time < total_time):
        elapsed_time = time.time() - starttime
        time_left = total_time - elapsed_time
        logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2),
                                                 total_time))
        if (time_left > 0 and time_left < 300):
            time.sleep(time_left)
        elif time_left > 0:
            time.sleep(300)

    # Turn off replication:
    if is_pglogical:
        appliance.set_pglogical_replication(replication_type=':none')
    else:
        appliance.update_server_roles(
            {role: True
             for role in roles_cap_and_util_rep})

    quantifiers['Elapsed_Time'] = round(elapsed_time, 2)
    logger.info('Test Ending...')
Ejemplo n.º 33
0
def test_refresh_providers(appliance, request, scenario):
    """
    Refreshes providers then waits for a specific amount of time.
    Memory Monitor creates graphs and summary at the end of the scenario.

    Polarion:
        assignee: rhcf3_machine
        initialEstimate: 1/4h
    """
    from_ts = int(time.time() * 1000)
    logger.debug('Scenario: {}'.format(scenario['name']))

    appliance.clean_appliance()

    quantifiers = {}
    scenario_data = {
        'appliance_ip': appliance.hostname,
        'appliance_name': cfme_performance['appliance']['appliance_name'],
        'test_dir': 'workload-refresh-providers',
        'test_name': 'Refresh Providers',
        'appliance_roles': ', '.join(roles_refresh_providers),
        'scenario': scenario
    }
    monitor_thread = SmemMemoryMonitor(appliance.ssh_client(), scenario_data)

    def cleanup_workload(scenario, from_ts, quantifiers, scenario_data):
        starttime = time.time()
        to_ts = int(starttime * 1000)
        g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts)
        logger.debug('Started cleaning up monitoring thread.')
        monitor_thread.grafana_urls = g_urls
        monitor_thread.signal = False
        monitor_thread.join()
        add_workload_quantifiers(quantifiers, scenario_data)
        timediff = time.time() - starttime
        logger.info('Finished cleaning up monitoring thread in {}'.format(timediff))

    request.addfinalizer(lambda: cleanup_workload(scenario, from_ts,
                                                  quantifiers, scenario_data))
    monitor_thread.start()

    appliance.wait_for_miq_server_workers_started(poll_interval=2)
    appliance.update_server_roles({role: True for role in roles_refresh_providers})
    for prov in scenario['providers']:
        get_crud(prov).create_rest()

    # Variable amount of time for refresh workload
    total_time = scenario['total_time']
    starttime = time.time()
    time_between_refresh = scenario['time_between_refresh']
    total_refreshed_providers = 0

    while ((time.time() - starttime) < total_time):
        start_refresh_time = time.time()
        appliance.rest_api.collections.providers.reload()
        for prov in appliance.rest_api.collections.providers.all:
            prov.action.reload()
            total_refreshed_providers += 1
        iteration_time = time.time()

        refresh_time = round(iteration_time - start_refresh_time, 2)
        elapsed_time = iteration_time - starttime
        logger.debug('Time to Queue Refreshes: {}'.format(refresh_time))
        logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2), total_time))

        if refresh_time < time_between_refresh:
            wait_diff = time_between_refresh - refresh_time
            time_remaining = total_time - elapsed_time
            if (time_remaining > 0 and time_remaining < time_between_refresh):
                time.sleep(time_remaining)
            elif time_remaining > 0:
                time.sleep(wait_diff)
        else:
            logger.warn('Time to Queue Refreshes ({}) exceeded time between '
                        '({})'.format(refresh_time, time_between_refresh))

    quantifiers['Elapsed_Time'] = round(time.time() - starttime, 2)
    quantifiers['Queued_Provider_Refreshes'] = total_refreshed_providers
    logger.info('Test Ending...')
def test_workload_capacity_and_utilization_rep(appliance, request, scenario, setup_perf_provider):
    """Runs through provider based scenarios enabling C&U and replication, run for a set period of
    time. Memory Monitor creates graphs and summary at the end of each scenario.

    Polarion:
        assignee: rhcf3_machine
        casecomponent: CandU
        initialEstimate: 1/4h
    """
    from_ts = int(time.time() * 1000)
    ssh_client = appliance.ssh_client()

    ssh_master_args = {
        'hostname': scenario['replication_master']['ip_address'],
        'username': scenario['replication_master']['ssh']['username'],
        'password': scenario['replication_master']['ssh']['password']}
    master_appliance = IPAppliance(hostname=scenario['replication_master']['ip_address'],
                                   openshift_creds=ssh_master_args)

    ssh_client_master = SSHClient(**ssh_master_args)
    logger.debug('Scenario: {}'.format(scenario['name']))

    is_pglogical = True if scenario['replication'] == 'pglogical' else False

    # Turn off master pglogical replication incase rubyrep scenario follows a pglogical scenario
    appliance.set_pglogical_replication(replication_type=':none')
    # Spawn tail before hand to prevent unncessary waiting on MiqServer starting since applinace
    # under test is cleaned first, followed by master appliance
    sshtail_evm = SSHTail('/var/www/miq/vmdb/log/evm.log')
    sshtail_evm.set_initial_file_end()
    logger.info('Clean appliance under test ({})'.format(ssh_client))
    appliance.clean_appliance()
    logger.info('Clean master appliance ({})'.format(ssh_client_master))
    master_appliance.clean_appliance()  # Clean Replication master appliance

    if is_pglogical:
        scenario_data = {'appliance_ip': appliance.hostname,
            'appliance_name': cfme_performance['appliance']['appliance_name'],
            'test_dir': 'workload-cap-and-util-rep',
            'test_name': 'Capacity and Utilization Replication (pgLogical)',
            'appliance_roles': ', '.join(roles_cap_and_util_rep),
            'scenario': scenario}
    else:
        scenario_data = {'appliance_ip': cfme_performance['appliance']['ip_address'],
            'appliance_name': cfme_performance['appliance']['appliance_name'],
            'test_dir': 'workload-cap-and-util-rep',
            'test_name': 'Capacity and Utilization Replication (RubyRep)',
            'appliance_roles': ', '.join(roles_cap_and_util_rep),
            'scenario': scenario}
    quantifiers = {}
    monitor_thread = SmemMemoryMonitor(appliance.ssh_client(), scenario_data)

    def cleanup_workload(scenario, from_ts, quantifiers, scenario_data):
        starttime = time.time()
        to_ts = int(starttime * 1000)
        g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts)
        logger.debug('Started cleaning up monitoring thread.')
        monitor_thread.grafana_urls = g_urls
        monitor_thread.signal = False
        monitor_thread.join()
        add_workload_quantifiers(quantifiers, scenario_data)
        timediff = time.time() - starttime
        logger.info('Finished cleaning up monitoring thread in {}'.format(timediff))
    request.addfinalizer(lambda: cleanup_workload(scenario, from_ts, quantifiers, scenario_data))

    monitor_thread.start()

    appliance.wait_for_miq_server_workers_started(evm_tail=sshtail_evm, poll_interval=2)
    appliance.update_server_roles({role: True for role in roles_cap_and_util_rep})
    for provider in scenario['providers']:
        get_crud(provider).create_rest()
    logger.info('Sleeping for Refresh: {}s'.format(scenario['refresh_sleep_time']))
    time.sleep(scenario['refresh_sleep_time'])
    appliance.set_cap_and_util_all_via_rails()

    # Configure Replication
    if is_pglogical:
        # Setup appliance under test to :remote
        appliance.set_pglogical_replication(replication_type=':remote')
        # Setup master appliance to :global
        master_appliance.set_pglogical_replication(replication_type=':global')
        # Setup master to subscribe:
        master_appliance.add_pglogical_replication_subscription(ssh_client_master,
            appliance.hostname)
    else:
        # Setup local towards Master
        appliance.set_rubyrep_replication(scenario['replication_master']['ip_address'])
        # Force uninstall rubyrep for this region from master (Unsure if still needed)
        # ssh_client.run_rake_command('evm:dbsync:uninstall')
        # time.sleep(30)  # Wait to quiecse
        # Turn on DB Sync role
        appliance.update_server_roles({role: True for role in roles_cap_and_util_rep})

    # Variable amount of time for C&U collections/processing
    total_time = scenario['total_time']
    starttime = time.time()
    elapsed_time = 0
    while (elapsed_time < total_time):
        elapsed_time = time.time() - starttime
        time_left = total_time - elapsed_time
        logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2), total_time))
        if (time_left > 0 and time_left < 300):
            time.sleep(time_left)
        elif time_left > 0:
            time.sleep(300)

    # Turn off replication:
    if is_pglogical:
        appliance.set_pglogical_replication(replication_type=':none')
    else:
        appliance.update_server_roles({role: True for role in roles_cap_and_util_rep})

    quantifiers['Elapsed_Time'] = round(elapsed_time, 2)
    logger.info('Test Ending...')
Ejemplo n.º 35
0
def test_refresh_vms(appliance, request, scenario):
    """Refreshes all vm's then waits for a specific amount of time. Memory Monitor creates
    graphs and summary at the end of the scenario."""
    from_ts = int(time.time() * 1000)
    logger.debug('Scenario: {}'.format(scenario['name']))

    appliance.clean_appliance()

    quantifiers = {}
    scenario_data = {
        'appliance_ip': appliance.hostname,
        'appliance_name': cfme_performance['appliance']['appliance_name'],
        'test_dir': 'workload-refresh-vm',
        'test_name': 'Refresh VMs',
        'appliance_roles': ', '.join(roles_refresh_vms),
        'scenario': scenario
    }
    monitor_thread = SmemMemoryMonitor(appliance.ssh_client(), scenario_data)

    def cleanup_workload(scenario, from_ts, quantifiers, scenario_data):
        starttime = time.time()
        to_ts = int(starttime * 1000)
        g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts)
        logger.debug('Started cleaning up monitoring thread.')
        monitor_thread.grafana_urls = g_urls
        monitor_thread.signal = False
        monitor_thread.join()
        add_workload_quantifiers(quantifiers, scenario_data)
        timediff = time.time() - starttime
        logger.info(
            'Finished cleaning up monitoring thread in {}'.format(timediff))

    request.addfinalizer(lambda: cleanup_workload(scenario, from_ts,
                                                  quantifiers, scenario_data))

    monitor_thread.start()

    appliance.wait_for_miq_server_workers_started(poll_interval=2)
    appliance.update_server_roles({role: True for role in roles_refresh_vms})
    for prov in scenario['providers']:
        get_crud(prov).create_rest()
    logger.info('Sleeping for refresh: {}s'.format(
        scenario['refresh_sleep_time']))
    time.sleep(scenario['refresh_sleep_time'])

    full_refresh_threshold_set = False
    if 'full_refresh_threshold' in scenario:
        if scenario['full_refresh_threshold'] != FULL_REFRESH_THRESHOLD_DEFAULT:
            appliance.set_full_refresh_threshold(
                scenario['full_refresh_threshold'])
            full_refresh_threshold_set = True
    if not full_refresh_threshold_set:
        logger.debug('Keeping full_refresh_threshold at default ({}).'.format(
            FULL_REFRESH_THRESHOLD_DEFAULT))

    refresh_size = scenario['refresh_size']

    vms = appliance.rest_api.collections.vms.all
    vms_iter = cycle(vms)
    logger.debug('Number of VM IDs: {}'.format(len(vms)))

    # Variable amount of time for refresh workload
    total_time = scenario['total_time']
    starttime = time.time()
    time_between_refresh = scenario['time_between_refresh']
    total_refreshed_vms = 0

    while ((time.time() - starttime) < total_time):
        start_refresh_time = time.time()
        refresh_list = [next(vms_iter) for x in range(refresh_size)]
        for vm in refresh_list:
            vm.action.reload()
        total_refreshed_vms += len(refresh_list)
        iteration_time = time.time()

        refresh_time = round(iteration_time - start_refresh_time, 2)
        elapsed_time = iteration_time - starttime
        logger.debug('Time to Queue VM Refreshes: {}'.format(refresh_time))
        logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2),
                                                 total_time))

        if refresh_time < time_between_refresh:
            wait_diff = time_between_refresh - refresh_time
            time_remaining = total_time - elapsed_time
            if (time_remaining > 0 and time_remaining < time_between_refresh):
                time.sleep(time_remaining)
            elif time_remaining > 0:
                time.sleep(wait_diff)
        else:
            logger.warn(
                'Time to Queue VM Refreshes ({}) exceeded time between '
                '({})'.format(refresh_time, time_between_refresh))

    quantifiers['Elapsed_Time'] = round(time.time() - starttime, 2)
    quantifiers['Queued_VM_Refreshes'] = total_refreshed_vms
    logger.info('Test Ending...')
Ejemplo n.º 36
0
def test_refresh_providers(appliance, request, scenario):
    """
    Refreshes providers then waits for a specific amount of time.
    Memory Monitor creates graphs and summary at the end of the scenario.
    """
    from_ts = int(time.time() * 1000)
    logger.debug('Scenario: {}'.format(scenario['name']))

    appliance.clean_appliance()

    quantifiers = {}
    scenario_data = {
        'appliance_ip': appliance.hostname,
        'appliance_name': cfme_performance['appliance']['appliance_name'],
        'test_dir': 'workload-refresh-providers',
        'test_name': 'Refresh Providers',
        'appliance_roles': ', '.join(roles_refresh_providers),
        'scenario': scenario
    }
    monitor_thread = SmemMemoryMonitor(appliance.ssh_client(), scenario_data)

    def cleanup_workload(scenario, from_ts, quantifiers, scenario_data):
        starttime = time.time()
        to_ts = int(starttime * 1000)
        g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts)
        logger.debug('Started cleaning up monitoring thread.')
        monitor_thread.grafana_urls = g_urls
        monitor_thread.signal = False
        monitor_thread.join()
        add_workload_quantifiers(quantifiers, scenario_data)
        timediff = time.time() - starttime
        logger.info(
            'Finished cleaning up monitoring thread in {}'.format(timediff))

    request.addfinalizer(lambda: cleanup_workload(scenario, from_ts,
                                                  quantifiers, scenario_data))
    monitor_thread.start()

    appliance.wait_for_miq_server_workers_started(poll_interval=2)
    appliance.update_server_roles(
        {role: True
         for role in roles_refresh_providers})
    for prov in scenario['providers']:
        get_crud(prov).create_rest()

    # Variable amount of time for refresh workload
    total_time = scenario['total_time']
    starttime = time.time()
    time_between_refresh = scenario['time_between_refresh']
    total_refreshed_providers = 0

    while ((time.time() - starttime) < total_time):
        start_refresh_time = time.time()
        appliance.rest_api.collections.providers.reload()
        for prov in appliance.rest_api.collections.providers.all:
            prov.action.reload()
            total_refreshed_providers += 1
        iteration_time = time.time()

        refresh_time = round(iteration_time - start_refresh_time, 2)
        elapsed_time = iteration_time - starttime
        logger.debug('Time to Queue Refreshes: {}'.format(refresh_time))
        logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2),
                                                 total_time))

        if refresh_time < time_between_refresh:
            wait_diff = time_between_refresh - refresh_time
            time_remaining = total_time - elapsed_time
            if (time_remaining > 0 and time_remaining < time_between_refresh):
                time.sleep(time_remaining)
            elif time_remaining > 0:
                time.sleep(wait_diff)
        else:
            logger.warn('Time to Queue Refreshes ({}) exceeded time between '
                        '({})'.format(refresh_time, time_between_refresh))

    quantifiers['Elapsed_Time'] = round(time.time() - starttime, 2)
    quantifiers['Queued_Provider_Refreshes'] = total_refreshed_providers
    logger.info('Test Ending...')
Ejemplo n.º 37
0
def test_provisioning(appliance, request, scenario):
    """Runs through provisioning scenarios using the REST API to
    continously provision a VM for a specified period of time.
    Memory Monitor creates graphs and summary at the end of each scenario.

    Polarion:
        assignee: rhcf3_machine
        casecomponent: Provisioning
        initialEstimate: 1/4h
    """

    from_ts = int(time.time() * 1000)
    logger.debug('Scenario: {}'.format(scenario['name']))

    appliance.clean_appliance()

    quantifiers = {}
    scenario_data = {
        'appliance_ip': appliance.hostname,
        'appliance_name': cfme_performance['appliance']['appliance_name'],
        'test_dir': 'workload-provisioning',
        'test_name': 'Provisioning',
        'appliance_roles': ', '.join(roles_provisioning),
        'scenario': scenario
    }
    monitor_thread = SmemMemoryMonitor(appliance.ssh_client(), scenario_data)

    provision_order = []

    def cleanup_workload(scenario, from_ts, vms_to_cleanup, quantifiers,
                         scenario_data):
        starttime = time.time()
        to_ts = int(starttime * 1000)
        g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts)
        logger.debug('Started cleaning up monitoring thread.')
        appliance.update_server_roles(
            {role: True
             for role in roles_provisioning_cleanup})
        monitor_thread.grafana_urls = g_urls
        monitor_thread.signal = False
        final_vm_size = len(vms_to_cleanup)
        appliance.rest_api.collections.vms.action.delete(vms_to_cleanup)
        monitor_thread.join()
        logger.info(
            '{} VMs were left over, and {} VMs were deleted in the finalizer.'.
            format(final_vm_size, final_vm_size - len(vms_to_cleanup)))
        logger.info(
            'The following VMs were left over after the test: {}'.format(
                vms_to_cleanup))
        quantifiers['VMs_To_Delete_In_Finalizer'] = final_vm_size
        quantifiers['VMs_Deleted_In_Finalizer'] = final_vm_size - len(
            vms_to_cleanup)
        quantifiers['Leftover_VMs'] = vms_to_cleanup
        add_workload_quantifiers(quantifiers, scenario_data)
        timediff = time.time() - starttime
        logger.info(
            'Finished cleaning up monitoring thread in {}'.format(timediff))

    request.addfinalizer(lambda: cleanup_workload(scenario, from_ts, vm_name,
                                                  quantifiers, scenario_data))

    monitor_thread.start()

    appliance.wait_for_miq_server_workers_started(poll_interval=2)
    appliance.update_server_roles({role: True for role in roles_provisioning})
    prov = get_crud(scenario['providers'][0])
    prov.create_rest()
    logger.info('Sleeping for Refresh: {}s'.format(
        scenario['refresh_sleep_time']))
    time.sleep(scenario['refresh_sleep_time'])

    guid_list = prov.get_template_guids(scenario['templates'])
    guid_cycle = cycle(guid_list)
    cleanup_size = scenario['cleanup_size']
    number_of_vms = scenario['number_of_vms']
    total_time = scenario['total_time']
    time_between_provision = scenario['time_between_provision']
    total_provisioned_vms = 0
    total_deleted_vms = 0
    provisioned_vms = 0
    starttime = time.time()

    while ((time.time() - starttime) < total_time):
        start_iteration_time = time.time()
        provision_list = []
        for i in range(number_of_vms):
            total_provisioned_vms += 1
            provisioned_vms += 1
            vm_to_provision = 'test-{}-prov-{}'.format(
                test_ts,
                str(total_provisioned_vms).zfill(4))
            guid_to_provision, provider_name = next(guid_cycle)
            provision_order.append((vm_to_provision, provider_name))
            provision_list.append((vm_to_provision, guid_to_provision,
                                   prov.data['provisioning']['vlan']))

        template = prov.data.templates.get('small_template')
        provision_data = get_provision_data(appliance.rest_api, prov,
                                            template.name)
        vm_name = provision_data["vm_fields"]["vm_name"]
        response = appliance.rest_api.collections.provision_requests.action.create(
            **provision_data)
        assert_response(appliance)
        provision_request = response[0]

        def _finished():
            provision_request.reload()
            if "error" in provision_request.status.lower():
                pytest.fail("Error when provisioning: `{}`".format(
                    provision_request.message))
            return provision_request.request_state.lower() in ("finished",
                                                               "provisioned")

        wait_for(_finished,
                 num_sec=800,
                 delay=5,
                 message="REST provisioning finishes")

        vm = appliance.rest_api.collections.vms.get(name=vm_name)
        creation_time = time.time()
        provision_time = round(creation_time - start_iteration_time, 2)
        logger.debug(
            'Time to initiate provisioning: {}'.format(provision_time))
        logger.info('{} VMs provisioned so far'.format(total_provisioned_vms))

        if provisioned_vms > cleanup_size * len(scenario['providers']):
            start_remove_time = time.time()
            if appliance.rest_api.collections.vms.action.delete(vm):
                provision_order.pop(0)
                provisioned_vms -= 1
                total_deleted_vms += 1
            deletion_time = round(time.time() - start_remove_time, 2)
            logger.debug('Time to initate deleting: {}'.format(deletion_time))
            logger.info('{} VMs deleted so far'.format(total_deleted_vms))

        end_iteration_time = time.time()
        iteration_time = round(end_iteration_time - start_iteration_time, 2)
        elapsed_time = end_iteration_time - starttime
        logger.debug('Time to initiate provisioning and deletion: {}'.format(
            iteration_time))
        logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2),
                                                 total_time))

        if iteration_time < time_between_provision:
            wait_diff = time_between_provision - iteration_time
            time_remaining = total_time - elapsed_time
            if (time_remaining > 0
                    and time_remaining < time_between_provision):
                time.sleep(time_remaining)
            elif time_remaining > 0:
                time.sleep(wait_diff)
            else:
                logger.warning(
                    'Time to initiate provisioning ({}) exceeded time between '
                    '({})'.format(iteration_time, time_between_provision))

    quantifiers['Elapsed_Time'] = round(time.time() - starttime, 2)
    quantifiers['Queued_VM_Provisionings'] = total_provisioned_vms
    quantifiers['Deleted_VMs'] = total_deleted_vms
    logger.info(
        'Provisioned {} VMs and deleted {} VMs during the scenario.'.format(
            total_provisioned_vms, total_deleted_vms))
    logger.info('Test Ending...')
def test_workload_smartstate_analysis(appliance, request, scenario):
    """Runs through provider based scenarios initiating smart state analysis against VMs, Hosts,
    and Datastores"""
    from_ts = int(time.time() * 1000)
    logger.debug('Scenario: {}'.format(scenario['name']))
    appliance.install_vddk()

    appliance.clean_appliance()

    quantifiers = {}
    scenario_data = {'appliance_ip': appliance.hostname,
        'appliance_name': cfme_performance['appliance']['appliance_name'],
        'test_dir': 'workload-ssa',
        'test_name': 'SmartState Analysis',
        'appliance_roles': ', '.join(roles_smartstate),
        'scenario': scenario}
    monitor_thread = SmemMemoryMonitor(appliance.ssh_client(), scenario_data)

    def cleanup_workload(scenario, from_ts, quantifiers, scenario_data):
        starttime = time.time()
        to_ts = int(starttime * 1000)
        g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts)
        logger.debug('Started cleaning up monitoring thread.')
        monitor_thread.grafana_urls = g_urls
        monitor_thread.signal = False
        monitor_thread.join()
        add_workload_quantifiers(quantifiers, scenario_data)
        timediff = time.time() - starttime
        logger.info('Finished cleaning up monitoring thread in {}'.format(timediff))
    request.addfinalizer(lambda: cleanup_workload(scenario, from_ts, quantifiers, scenario_data))

    monitor_thread.start()

    appliance.wait_for_miq_server_workers_started(poll_interval=2)
    appliance.update_server_roles({role: True for role in roles_smartstate})
    for provider in scenario['providers']:
        get_crud(provider).create_rest()
    logger.info('Sleeping for Refresh: {}s'.format(scenario['refresh_sleep_time']))
    time.sleep(scenario['refresh_sleep_time'])

    # Add host credentials and set CFME relationship for RHEVM SSA
    for provider in scenario['providers']:
        for api_host in appliance.rest_api.collections.hosts.all:
            host_collection = appliance.collections.hosts
            test_host = host_collection.instantiate(name=api_host.name, provider=provider)
            host_data = get_host_data_by_name(get_crud(provider), api_host.name)
            credentials = host.get_credentials_from_config(host_data['credentials'])
            test_host.update_credentials_rest(credentials)
        appliance.set_cfme_server_relationship(cfme_performance['appliance']['appliance_name'])

    # Variable amount of time for SmartState Analysis workload
    total_time = scenario['total_time']
    starttime = time.time()
    time_between_analyses = scenario['time_between_analyses']
    total_scanned_vms = 0

    while ((time.time() - starttime) < total_time):
        start_ssa_time = time.time()
        for vm in scenario['vms_to_scan'].values()[0]:
            vm_api = appliance.rest_api.collections.vms.get(name=vm)
            vm_api.action.scan()
            total_scanned_vms += 1
        iteration_time = time.time()

        ssa_time = round(iteration_time - start_ssa_time, 2)
        elapsed_time = iteration_time - starttime
        logger.debug('Time to Queue SmartState Analyses: {}'.format(ssa_time))
        logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2), total_time))

        if ssa_time < time_between_analyses:
            wait_diff = time_between_analyses - ssa_time
            time_remaining = total_time - elapsed_time
            if (time_remaining > 0 and time_remaining < time_between_analyses):
                time.sleep(time_remaining)
            elif time_remaining > 0:
                time.sleep(wait_diff)
        else:
            logger.warn('Time to Queue SmartState Analyses ({}) exceeded time between '
                '({})'.format(ssa_time, time_between_analyses))

    quantifiers['Elapsed_Time'] = round(time.time() - starttime, 2)
    quantifiers['Queued_VM_Scans'] = total_scanned_vms
    logger.info('Test Ending...')