コード例 #1
0
def test_workload_capacity_and_utilization(request, scenario, appliance):
    """Runs through provider based scenarios enabling C&U and running for a set period of time.
    Memory Monitor creates graphs and summary at the end of each scenario."""
    from_ts = int(time.time() * 1000)
    logger.debug('Scenario: {}'.format(scenario['name']))

    appliance.clean_appliance()

    quantifiers = {}
    scenario_data = {'appliance_ip': appliance.hostname,
        'appliance_name': conf.cfme_performance['appliance']['appliance_name'],
        'test_dir': 'workload-cap-and-util',
        'test_name': 'Capacity and Utilization',
        'appliance_roles': ','.join(roles_cap_and_util),
        'scenario': scenario}
    monitor_thread = SmemMemoryMonitor(appliance.ssh_client, scenario_data)

    def cleanup_workload(scenario, from_ts, quantifiers, scenario_data):
        starttime = time.time()
        to_ts = int(starttime * 1000)
        g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts)
        logger.debug('Started cleaning up monitoring thread.')
        monitor_thread.grafana_urls = g_urls
        monitor_thread.signal = False
        monitor_thread.join()
        add_workload_quantifiers(quantifiers, scenario_data)
        timediff = time.time() - starttime
        logger.info('Finished cleaning up monitoring thread in {}'.format(timediff))
    request.addfinalizer(lambda: cleanup_workload(scenario, from_ts, quantifiers, scenario_data))

    monitor_thread.start()

    appliance.wait_for_miq_server_workers_started(poll_interval=2)
    appliance.update_server_roles({role: True for role in roles_cap_and_util})
    for provider in scenario['providers']:
        get_crud(provider).create_rest()
    logger.info('Sleeping for Refresh: {}s'.format(scenario['refresh_sleep_time']))
    time.sleep(scenario['refresh_sleep_time'])
    appliance.set_cap_and_util_all_via_rails()

    # Variable amount of time for C&U collections/processing
    total_time = scenario['total_time']
    starttime = time.time()
    elapsed_time = 0
    while (elapsed_time < total_time):
        elapsed_time = time.time() - starttime
        time_left = total_time - elapsed_time
        logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2), total_time))
        if (time_left > 0 and time_left < 300):
            time.sleep(time_left)
        elif time_left > 0:
            time.sleep(300)

    quantifiers['Elapsed_Time'] = round(elapsed_time, 2)
    logger.info('Test Ending...')
コード例 #2
0
 def __init__(self, name=None, provider_key=None, type=None):
     self.name = name
     self.type = type
     if provider_key:
         self.provider = get_crud(provider_key)
     else:
         self.provider = None
コード例 #3
0
ファイル: test_retirement.py プロジェクト: FilipB/cfme_tests
def existing_vm(request):
    """ Fixture will be using for set\unset retirement date for existing vm instead of
    creation a new one
    """
    list_of_existing_providers = providers.existing_providers()
    if list_of_existing_providers:
        test_provider = providers.get_crud(list_of_existing_providers[0])
    else:
        test_provider = providers.setup_a_provider()
    all_vms = test_provider.mgmt.list_vm()
    need_to_create_vm = True
    for virtual_machine in all_vms:
        if test_provider.mgmt.is_vm_running(virtual_machine):
            need_vm = VM.factory(virtual_machine, test_provider)
            need_to_create_vm = False
            break
    if need_to_create_vm:
        machine_name = 'test_retire_prov_{}'.format(fauxfactory.gen_alpha(length=8).lower())
        need_vm = VM.factory(machine_name, test_provider)
        need_vm.create_on_provider(find_in_cfme=True, allow_skip="default")

    @request.addfinalizer
    def _delete_vm():
        if need_to_create_vm:
            test_provider.mgmt.delete_vm(need_vm.name)
    return need_vm
コード例 #4
0
    def from_config(cls, prov_config, prov_key, appliance=None):
        endpoints = {}
        endpoints[RHOSEndpoint.name] = RHOSEndpoint(
            **prov_config['endpoints'][RHOSEndpoint.name])

        endp_name = EventsEndpoint.name
        if prov_config['endpoints'].get(endp_name):
            endpoints[endp_name] = EventsEndpoint(
                **prov_config['endpoints'][endp_name])

        from utils.providers import get_crud
        infra_prov_key = prov_config.get('infra_provider_key')
        infra_provider = get_crud(
            infra_prov_key, appliance=appliance) if infra_prov_key else None
        return cls(name=prov_config['name'],
                   hostname=prov_config['hostname'],
                   ip_address=prov_config['ipaddress'],
                   api_port=prov_config['port'],
                   endpoints=endpoints,
                   zone=prov_config['server_zone'],
                   key=prov_key,
                   sec_protocol=prov_config.get('sec_protocol', "Non-SSL"),
                   tenant_mapping=prov_config.get('tenant_mapping', False),
                   infra_provider=infra_provider,
                   appliance=appliance)
コード例 #5
0
def pytest_generate_tests(metafunc):
    types = [VMwareProvider, RHEVMProvider, SCVMMProvider]
    argnames, argvalues, idlist = testgen.providers_by_class(metafunc, types)

    argnames = ['providers_for_discover', 'start_ip', 'max_range']
    new_id_list = []

    providers_complete = []
    providers_final = []

    for x in idlist:
        providers_complete.append(get_crud(x))

    provider_combinations = sorted(find_neighbour_provider_combinations(
        providers_complete, len(types)),
                                   key=len)
    signatures_seen = set()

    for prov_comb in provider_combinations:
        sig = generate_signature(prov_comb)
        if sig in signatures_seen:
            continue
        signatures_seen.add(sig)
        start_ip, max_range = minmax_ip(prov_comb)
        providers_final.append([prov_comb, start_ip, max_range])
        new_id_list.append(sig)

    testgen.parametrize(metafunc,
                        argnames,
                        providers_final,
                        ids=new_id_list,
                        scope="module")
コード例 #6
0
def setup_provider(provider, original_provider_key):
    original_provider = get_crud(original_provider_key)
    if original_provider.exists:
        # Delete original provider's hosts first
        for host in original_provider.hosts:
            if host.exists:
                host.delete(cancel=False)
        # Get rid of the original provider, it would make a mess.
        original_provider.delete(cancel=False)
        provider.wait_for_delete()
    provider.create()
    provider.refresh_provider_relationships()
    try:
        wait_for(
            lambda: any([
                provider.num_vm() > 0,
                provider.num_template() > 0,
                provider.num_datastore() > 0,
                provider.num_host() > 0,
            ]), num_sec=400, delay=5)
    except:
        provider.delete(cancel=False)
        raise
    yield
    for host in provider.hosts:
        if host.exists:
            host.delete(cancel=False)
    provider.delete(cancel=False)
    provider.wait_for_delete()
コード例 #7
0
def pytest_generate_tests(metafunc):
    types = [VMwareProvider, RHEVMProvider, SCVMMProvider]
    argnames, argvalues, idlist = testgen.providers_by_class(
        metafunc, types)

    argnames = ['providers_for_discover', 'start_ip', 'max_range']
    new_id_list = []

    providers_complete = []
    providers_final = []

    for x in idlist:
        providers_complete.append(get_crud(x))

    provider_combinations = sorted(
        find_neighbour_provider_combinations(providers_complete, len(types)), key=len)
    signatures_seen = set()

    for prov_comb in provider_combinations:
        sig = generate_signature(prov_comb)
        if sig in signatures_seen:
            continue
        signatures_seen.add(sig)
        start_ip, max_range = minmax_ip(prov_comb)
        providers_final.append([prov_comb, start_ip, max_range])
        new_id_list.append(sig)

    testgen.parametrize(metafunc, argnames, providers_final, ids=new_id_list, scope="module")
コード例 #8
0
def pytest_generate_tests(metafunc):
    types = ["virtualcenter", "rhevm", "scvmm"]
    argnames, argvalues, idlist = testgen.provider_by_type(metafunc, types)

    argnames = ["providers_for_discover", "start_ip", "max_range"]
    new_id_list = []

    providers_complete = []
    providers_final = []

    for x in idlist:
        providers_complete.append(get_crud(x))

    provider_combinations = sorted(find_neighbour_provider_combinations(providers_complete, len(types)), key=len)
    signatures_seen = set()

    for prov_comb in provider_combinations:
        sig = generate_signature(prov_comb)
        if sig in signatures_seen:
            continue
        signatures_seen.add(sig)
        start_ip, max_range = minmax_ip(prov_comb)
        providers_final.append([prov_comb, start_ip, max_range])
        new_id_list.append(sig)

    testgen.parametrize(metafunc, argnames, providers_final, ids=new_id_list, scope="module")
コード例 #9
0
def setup_provider(provider, original_provider_key):
    original_provider = get_crud(original_provider_key)
    if original_provider.exists:
        # Delete original provider's hosts first
        for host in original_provider.hosts:
            if host.exists:
                host.delete(cancel=False)
        # Get rid of the original provider, it would make a mess.
        original_provider.delete(cancel=False)
        provider.wait_for_delete()
    provider.create()
    provider.refresh_provider_relationships()
    try:
        wait_for(
            lambda: any([
                provider.num_vm() > 0,
                provider.num_template() > 0,
                provider.num_datastore() > 0,
                provider.num_host() > 0,
            ]), num_sec=400, delay=5)
    except:
        provider.delete(cancel=False)
        raise
    yield
    for host in provider.hosts:
        if host.exists:
            host.delete(cancel=False)
    provider.delete(cancel=False)
    provider.wait_for_delete()
コード例 #10
0
ファイル: datastore.py プロジェクト: MattLombana/cfme_tests
 def __init__(self, name=None, provider_key=None, type=None):
     self.name = name
     self.type = type
     if provider_key:
         self.provider = get_crud(provider_key)
     else:
         self.provider = None
コード例 #11
0
 def messagings_in_mgmt(cls, provider=None, server=None):
     if provider is None:
         messagings = []
         for _provider in list_providers('hawkular'):
             messagings.extend(cls._messagings_in_mgmt(get_crud(_provider), server))
         return messagings
     else:
         return cls._messagings_in_mgmt(provider, server)
コード例 #12
0
 def datasources_in_mgmt(cls, provider=None, server=None):
     if provider is None:
         datasources = []
         for _provider in list_middleware_providers():
             datasources.extend(cls._datasources_in_mgmt(get_crud(_provider), server))
         return datasources
     else:
         return cls._datasources_in_mgmt(provider, server)
コード例 #13
0
 def domains_in_mgmt(cls, provider=None):
     if provider is None:
         deployments = []
         for _provider in list_providers('hawkular'):
             deployments.extend(cls._domains_in_mgmt(get_crud(_provider)))
         return deployments
     else:
         return cls._domains_in_mgmt(provider)
コード例 #14
0
 def __init__(self, name=None, provider_key=None, appliance=None):
     Navigatable.__init__(self, appliance=appliance)
     self.quad_name = 'resource_pool'
     self.name = name
     if provider_key:
         self.provider = get_crud(provider_key)
     else:
         self.provider = None
コード例 #15
0
 def servers_in_mgmt(cls, provider=None):
     if provider is None:
         deployments = []
         for _provider in list_middleware_providers():
             deployments.extend(cls._servers_in_mgmt(get_crud(_provider)))
         return deployments
     else:
         return cls._servers_in_mgmt(provider)
コード例 #16
0
 def __init__(self, name=None, provider_key=None, appliance=None):
     Navigatable.__init__(self, appliance=appliance)
     self.quad_name = 'resource_pool'
     self.name = name
     if provider_key:
         self.provider = get_crud(provider_key, appliance=appliance)
     else:
         self.provider = None
コード例 #17
0
ファイル: datasource.py プロジェクト: jteehan/cfme_tests
 def datasources_in_mgmt(cls, provider=None, server=None):
     if provider is None:
         datasources = []
         for _provider in list_providers('hawkular'):
             datasources.extend(cls._datasources_in_mgmt(get_crud(_provider), server))
         return datasources
     else:
         return cls._datasources_in_mgmt(provider, server)
コード例 #18
0
 def deployments_in_mgmt(cls, provider=None, server=None):
     if provider is None:
         deployments = []
         for _provider in list_providers('hawkular'):
             deployments.extend(cls._deployments_in_mgmt(get_crud(_provider), server))
         return deployments
     else:
         return cls._deployments_in_mgmt(provider, server)
コード例 #19
0
 def servers_in_mgmt(cls, provider=None, server_group=None):
     if provider is None:
         servers = []
         for _provider in list_providers('hawkular'):
             servers.extend(cls._servers_in_mgmt(get_crud(_provider), server_group))
         return servers
     else:
         return cls._servers_in_mgmt(provider, server_group)
コード例 #20
0
 def deployments_in_mgmt(cls, provider=None, server=None):
     if provider is None:
         deployments = []
         for _provider in list_middleware_providers():
             deployments.extend(cls._deployments_in_mgmt(get_crud(_provider), server))
         return deployments
     else:
         return cls._deployments_in_mgmt(provider, server)
コード例 #21
0
 def __init__(self, name=None, provider_key=None, type=None, appliance=None):
     Navigatable.__init__(self, appliance)
     self.name = name
     self.type = type
     self.quad_name = 'datastore'
     if provider_key:
         self.provider = get_crud(provider_key)
     else:
         self.provider = None
コード例 #22
0
 def deployment_in_db(self):
     deployment = _db_select_query(name=self.name, server=self.server,
                                   provider=self.provider).first()
     if deployment:
         _provider = get_crud(get_provider_key(deployment.provider_name))
         _server = MiddlewareServer(name=deployment.server_name, feed=deployment.feed,
                                    provider=_provider)
         return MiddlewareDeployment(nativeid=deployment.nativeid, name=deployment.name,
                                     server=_server, provider=_provider, db_id=deployment.id)
     return None
コード例 #23
0
 def deployment_in_db(self):
     deployment = _db_select_query(name=self.name, server=self.server,
                                   provider=self.provider).first()
     if deployment:
         _provider = get_crud(get_provider_key(deployment.provider_name))
         _server = MiddlewareServer(name=deployment.server_name, feed=deployment.feed,
                                    provider=_provider)
         return MiddlewareDeployment(nativeid=deployment.nativeid, name=deployment.name,
                                     server=_server, provider=_provider)
     return None
コード例 #24
0
def deploy_template(provider_key, vm_name, template_name=None, timeout=900,
        **deploy_args):
    """
    Args:
        provider_key: Provider key on which the VM is to be created
        vm_name: Name of the VM to be deployed
        template_name: Name of the template that the VM is deployed from
    """
    allow_skip = deploy_args.pop("allow_skip", ())
    if isinstance(allow_skip, dict):
        skip_exceptions = allow_skip.keys()
        callable_mapping = allow_skip
    elif isinstance(allow_skip, basestring) and allow_skip.lower() == "default":
        skip_exceptions = (OSOverLimit, RHEVRequestError, exceptions.VMInstanceNotCloned, SSLError)
        callable_mapping = {}
    else:
        skip_exceptions = allow_skip
        callable_mapping = {}
    provider_crud = get_crud(provider_key)

    deploy_args.update(vm_name=vm_name)

    if template_name is None:
        try:
            deploy_args.update(template=provider_crud.data['small_template'])
        except KeyError:
            raise ValueError('small_template not defined for Provider {} in cfme_data.yaml'.format(
                provider_key))
    else:
        deploy_args.update(template=template_name)

    deploy_args.update(provider_crud.deployment_helper(deploy_args))

    logger.info("Getting ready to deploy VM/instance %s from template %s on provider %s",
        vm_name, deploy_args['template'], provider_crud.data['name'])
    try:
        try:
            logger.debug("Deploy args: %s", deploy_args)
            vm_name = provider_crud.mgmt.deploy_template(timeout=timeout, **deploy_args)
            logger.info("Provisioned VM/instance %s", vm_name)  # instance ID in case of EC2
        except Exception as e:
            logger.error('Could not provisioning VM/instance %s (%s: %s)',
                vm_name, type(e).__name__, str(e))
            _vm_cleanup(provider_crud.mgmt, vm_name)
            raise
    except skip_exceptions as e:
        e_c = type(e)
        if e_c in callable_mapping and not callable_mapping[e_c](e):
            raise
        # Make it visible also in the log.
        store.write_line(
            "Skipping due to a provider error: {}: {}\n".format(e_c.__name__, str(e)), purple=True)
        logger.exception(e)
        pytest.skip("{}: {}".format(e_c.__name__, str(e)))
    return vm_name
コード例 #25
0
ファイル: server.py プロジェクト: akrzos/cfme_tests
 def servers_in_db(cls, name=None, feed=None, provider=None, strict=True):
     servers = []
     rows = _db_select_query(name=name, feed=feed, provider=provider).all()
     for server in rows:
         if strict:
             _provider = get_crud(get_provider_key(server.provider_name))
         servers.append(MiddlewareServer(name=server.name, hostname=server.hostname,
                                         feed=server.feed, product=server.product,
                                         db_id=server.id, provider=_provider,
                                         properties=parse_properties(server.properties)))
     return servers
コード例 #26
0
 def deployments_in_db(cls, server=None, provider=None, strict=True):
     deployments = []
     rows = _db_select_query(server=server, provider=provider).all()
     _provider = provider
     for deployment in rows:
         if strict:
             _provider = get_crud(get_provider_key(deployment.provider_name))
         _server = MiddlewareServer(name=deployment.server_name, feed=deployment.feed,
                                    provider=provider)
         deployments.append(MiddlewareDeployment(nativeid=deployment.nativeid,
                                                 name=deployment.name,
                                                 server=_server, provider=_provider))
     return deployments
コード例 #27
0
 def deployments_in_db(cls, server=None, provider=None, strict=True):
     deployments = []
     rows = _db_select_query(server=server, provider=provider).all()
     _provider = provider
     for deployment in rows:
         if strict:
             _provider = get_crud(get_provider_key(deployment.provider_name))
         _server = MiddlewareServer(name=deployment.server_name, feed=deployment.feed,
                                    provider=provider)
         deployments.append(MiddlewareDeployment(nativeid=deployment.nativeid,
                                                 name=deployment.name, db_id=deployment.id,
                                                 server=_server, provider=_provider))
     return deployments
コード例 #28
0
ファイル: domain.py プロジェクト: FilipB/cfme_tests
 def domains_in_db(cls, name=None, feed=None, provider=None, strict=True):
     domains = []
     rows = _db_select_query(name=name, feed=feed, provider=provider).all()
     for domain in rows:
         if strict:
             _provider = get_crud(get_provider_key(domain.provider_name))
         domains.append(MiddlewareDomain(
             name=domain.name,
             feed=domain.feed,
             db_id=domain.id,
             provider=_provider,
             properties=parse_properties(domain.properties)))
     return domains
コード例 #29
0
 def __init__(self,
              name=None,
              provider_key=None,
              type=None,
              appliance=None):
     Navigatable.__init__(self, appliance)
     self.name = name
     self.type = type
     self.quad_name = 'datastore'
     if provider_key:
         self.provider = get_crud(provider_key, appliance=appliance)
     else:
         self.provider = None
コード例 #30
0
ファイル: server.py プロジェクト: akrzos/cfme_tests
 def servers(cls, provider=None, strict=True):
     servers = []
     _get_servers_page(provider=provider)
     if sel.is_displayed(list_tbl):
         _provider = provider
         for _ in paginator.pages():
             for row in list_tbl.rows():
                 if strict:
                     _provider = get_crud(get_provider_key(row.provider.text))
                 servers.append(MiddlewareServer(name=row.server_name.text,
                                                 feed=row.feed.text,
                                                 product=row.product.text,
                                                 provider=_provider))
     return servers
コード例 #31
0
 def domains(cls, provider=None, strict=True):
     domains = []
     _get_domains_page(provider=provider)
     if sel.is_displayed(list_tbl):
         _provider = provider
         for _ in paginator.pages():
             for row in list_tbl.rows():
                 if strict:
                     _provider = get_crud(get_provider_key(row.provider.text))
                 domains.append(MiddlewareDomain(
                     name=row.domain_name.text,
                     feed=row.feed.text,
                     provider=_provider))
     return domains
コード例 #32
0
ファイル: datasource.py プロジェクト: jistefan/cfme_tests
 def datasources_in_db(cls, server=None, provider=None, strict=True):
     datasources = []
     rows = _db_select_query(server=server, provider=provider).all()
     _provider = provider
     for datasource in rows:
         if strict:
             _provider = get_crud(get_provider_key(datasource.provider_name))
         _server = MiddlewareServer(name=datasource.server_name, feed=datasource.feed,
                                    provider=provider)
         datasources.append(MiddlewareDatasource(nativeid=datasource.nativeid,
                                                 name=datasource.name,
                                                 server=_server, provider=_provider,
                                                 properties=datasource.properties))
     return datasources
コード例 #33
0
 def datasources_in_db(cls, server=None, provider=None, strict=True):
     datasources = []
     rows = _db_select_query(server=server, provider=provider).all()
     _provider = provider
     for datasource in rows:
         if strict:
             _provider = get_crud(get_provider_key(datasource.provider_name))
         _server = MiddlewareServer(name=datasource.server_name, feed=datasource.feed,
                                    provider=provider)
         datasources.append(MiddlewareDatasource(nativeid=datasource.nativeid,
                                         name=datasource.name,
                                         server=_server, provider=_provider,
                                         properties=parse_properties(datasource.properties)))
     return datasources
コード例 #34
0
ファイル: cluster.py プロジェクト: rrasouli/cfme_tests
    def __init__(self, name=None, provider_key=None):
        self.name = name
        self._short_name = self.name.split('in')[0].strip()

        col = rest_api().collections
        cluster = [cl for cl in col.clusters.all if cl.name == self._short_name][-1]
        self._cluster_id = cluster.id
        provider_id = cluster.ems_id

        #  fixme: to remove this part when provider_key becomes mandatory field
        if not provider_key:
            provider_name = [pr.name for pr in col.providers.all if pr.id == provider_id][-1]
            provider_key = get_provider_key(provider_name)

        self.provider = get_crud(provider_key)
コード例 #35
0
 def servers_in_db(cls, name=None, feed=None, provider=None, strict=True):
     servers = []
     rows = _db_select_query(name=name, feed=feed, provider=provider).all()
     for server in rows:
         if strict:
             _provider = get_crud(get_provider_key(server.provider_name))
         servers.append(
             MiddlewareServer(name=server.name,
                              hostname=server.hostname,
                              feed=server.feed,
                              product=server.product,
                              provider=_provider,
                              properties=parse_properties(
                                  server.properties)))
     return servers
コード例 #36
0
 def servers(cls, provider=None, strict=True):
     servers = []
     _get_servers_page(provider=provider)
     if sel.is_displayed(list_tbl):
         _provider = provider
         for _ in paginator.pages():
             for row in list_tbl.rows():
                 if strict:
                     _provider = get_crud(
                         get_provider_key(row.provider.text))
                 servers.append(
                     MiddlewareServer(name=row.server_name.text,
                                      feed=row.feed.text,
                                      product=row.product.text,
                                      provider=_provider))
     return servers
コード例 #37
0
ファイル: test_alerts.py プロジェクト: FilipB/cfme_tests
def initialize_provider(provider, setup_provider_modscope):
    # Remove other providers
    for provider_key in existing_providers():
        if provider_key == provider.key:
            continue
        provider_to_delete = get_crud(provider_key)
        if provider_to_delete.exists:
            provider_to_delete.delete(cancel=False)
    # Take care of C&U settings
    if provider.type not in CANDU_PROVIDER_TYPES:
        yield provider
    else:
        try:
            with server_roles_enabled(
                    'ems_metrics_coordinator', 'ems_metrics_collector', 'ems_metrics_processor'):
                candu.enable_all()
                yield provider
        finally:
            candu.disable_all()
コード例 #38
0
 def messagings_in_db(cls, server=None, provider=None, strict=True):
     messagings = []
     rows = _db_select_query(server=server, provider=provider).all()
     _provider = provider
     for messaging in rows:
         if strict:
             _provider = get_crud(get_provider_key(messaging.provider_name))
         _server = MiddlewareServer(
             name=messaging.server_name,
             feed=messaging.feed,
             provider=provider)
         messagings.append(MiddlewareMessaging(
             nativeid=messaging.nativeid,
             name=messaging.name,
             db_id=messaging.id,
             server=_server,
             provider=_provider,
             messaging_type=messaging.messaging_type,
             properties=parse_properties(messaging.properties)))
     return messagings
コード例 #39
0
ファイル: test_alerts.py プロジェクト: vprusa/cfme_tests
def initialize_provider(provider, setup_provider_modscope):
    # Remove other providers
    for provider_key in existing_providers():
        if provider_key == provider.key:
            continue
        provider_to_delete = get_crud(provider_key)
        if provider_to_delete.exists:
            provider_to_delete.delete(cancel=False)
    # Take care of C&U settings
    if provider.type not in CANDU_PROVIDER_TYPES:
        yield provider
    else:
        try:
            with server_roles_enabled('ems_metrics_coordinator',
                                      'ems_metrics_collector',
                                      'ems_metrics_processor'):
                candu.enable_all()
                yield provider
        finally:
            candu.disable_all()
コード例 #40
0
ファイル: openstack.py プロジェクト: rananda/cfme_tests
 def from_config(cls, prov_config, prov_key):
     from utils.providers import get_crud
     credentials_key = prov_config['credentials']
     credentials = cls.process_credential_yaml_key(credentials_key)
     creds = {'default': credentials}
     if 'amqp_credentials' in prov_config:
         amqp_credentials = cls.process_credential_yaml_key(
             prov_config['amqp_credentials'], cred_type='amqp')
         creds['amqp'] = amqp_credentials
     infra_prov_key = prov_config.get('infra_provider_key')
     infra_provider = get_crud(infra_prov_key) if infra_prov_key else None
     return cls(name=prov_config['name'],
         hostname=prov_config['hostname'],
         ip_address=prov_config['ipaddress'],
         api_port=prov_config['port'],
         credentials=creds,
         zone=prov_config['server_zone'],
         key=prov_key,
         sec_protocol=prov_config.get('sec_protocol', "Non-SSL"),
         tenant_mapping=prov_config.get('tenant_mapping', False),
         infra_provider=infra_provider)
コード例 #41
0
ファイル: openstack.py プロジェクト: dajohnso/cfme_tests
    def from_config(cls, prov_config, prov_key, appliance=None):
        endpoints = {}
        endpoints[RHOSEndpoint.name] = RHOSEndpoint(**prov_config['endpoints'][RHOSEndpoint.name])

        endp_name = EventsEndpoint.name
        if prov_config['endpoints'].get(endp_name):
            endpoints[endp_name] = EventsEndpoint(**prov_config['endpoints'][endp_name])

        from utils.providers import get_crud
        infra_prov_key = prov_config.get('infra_provider_key')
        infra_provider = get_crud(infra_prov_key, appliance=appliance) if infra_prov_key else None
        return cls(name=prov_config['name'],
                   hostname=prov_config['hostname'],
                   ip_address=prov_config['ipaddress'],
                   api_port=prov_config['port'],
                   endpoints=endpoints,
                   zone=prov_config['server_zone'],
                   key=prov_key,
                   sec_protocol=prov_config.get('sec_protocol', "Non-SSL"),
                   tenant_mapping=prov_config.get('tenant_mapping', False),
                   infra_provider=infra_provider,
                   appliance=appliance)
コード例 #42
0
 def from_config(cls, prov_config, prov_key, appliance=None):
     from utils.providers import get_crud
     credentials_key = prov_config['credentials']
     credentials = cls.process_credential_yaml_key(credentials_key)
     creds = {'default': credentials}
     if 'amqp_credentials' in prov_config:
         amqp_credentials = cls.process_credential_yaml_key(
             prov_config['amqp_credentials'], cred_type='amqp')
         creds['amqp'] = amqp_credentials
     infra_prov_key = prov_config.get('infra_provider_key')
     infra_provider = get_crud(
         infra_prov_key, appliance=appliance) if infra_prov_key else None
     return cls(name=prov_config['name'],
                hostname=prov_config['hostname'],
                ip_address=prov_config['ipaddress'],
                api_port=prov_config['port'],
                credentials=creds,
                zone=prov_config['server_zone'],
                key=prov_key,
                sec_protocol=prov_config.get('sec_protocol', "Non-SSL"),
                tenant_mapping=prov_config.get('tenant_mapping', False),
                infra_provider=infra_provider,
                appliance=appliance)
コード例 #43
0
def deploy_template(provider_key,
                    vm_name,
                    template_name=None,
                    timeout=900,
                    **deploy_args):
    """
    Args:
        provider_key: Provider key on which the VM is to be created
        vm_name: Name of the VM to be deployed
        template_name: Name of the template that the VM is deployed from
        timeout: the timeout for template deploy
    """
    allow_skip = deploy_args.pop("allow_skip", ())
    if isinstance(allow_skip, dict):
        skip_exceptions = allow_skip.keys()
        callable_mapping = allow_skip
    elif isinstance(allow_skip,
                    basestring) and allow_skip.lower() == "default":
        skip_exceptions = (OSOverLimit, RHEVRequestError,
                           exceptions.VMInstanceNotCloned, SSLError)
        callable_mapping = {}
    else:
        skip_exceptions = allow_skip
        callable_mapping = {}
    provider_crud = get_crud(provider_key)

    deploy_args.update(vm_name=vm_name)

    if template_name is None:
        try:
            deploy_args.update(template=provider_crud.data['small_template'])
        except KeyError:
            raise ValueError(
                'small_template not defined for Provider {} in cfme_data.yaml'.
                format(provider_key))
    else:
        deploy_args.update(template=template_name)

    deploy_args.update(provider_crud.deployment_helper(deploy_args))

    logger.info(
        "Getting ready to deploy VM/instance %s from template %s on provider %s",
        vm_name, deploy_args['template'], provider_crud.data['name'])
    try:
        try:
            logger.debug("Deploy args: %s", deploy_args)
            vm_name = provider_crud.mgmt.deploy_template(timeout=timeout,
                                                         **deploy_args)
            logger.info("Provisioned VM/instance %s",
                        vm_name)  # instance ID in case of EC2
        except Exception as e:
            logger.error('Could not provisioning VM/instance %s (%s: %s)',
                         vm_name,
                         type(e).__name__, str(e))
            _vm_cleanup(provider_crud.mgmt, vm_name)
            raise
    except skip_exceptions as e:
        e_c = type(e)
        if e_c in callable_mapping and not callable_mapping[e_c](e):
            raise
        # Make it visible also in the log.
        store.write_line("Skipping due to a provider error: {}: {}\n".format(
            e_c.__name__, str(e)),
                         purple=True)
        logger.exception(e)
        pytest.skip("{}: {}".format(e_c.__name__, str(e)))
    return vm_name
コード例 #44
0
ファイル: testgen.py プロジェクト: prachiyadav/cfme_tests
def provider_by_type(metafunc, provider_types, *fields, **options):
    """Get the values of the named field keys from ``cfme_data.get('management_systems', {})``

    Args:
        provider_types: A list of provider types to include. If None, all providers are considered
        *fields: Names of keys in an individual provider dict whose values will be returned when
            used as test function arguments
        **options: Explained below

    The ``**options`` available are defined below:

    * ``require_fields``: when fields passed are not present, skip them, boolean
    * ``template_location``: Specification where a required tempalte lies in the yaml, If not
      found in the provider, warning is printed and the test not collected. The spec
      is a tuple or list where each item is a key to the next field (str or int).

    The following test function arguments are special:

        ``provider``
            the provider's CRUD object, either a :py:class:`cfme.cloud.provider.Provider`
            or a :py:class:`cfme.infrastructure.provider.Provider`

    Returns:
        An tuple of ``(argnames, argvalues, idlist)`` for use in a pytest_generate_tests hook, or
        with the :py:func:`parametrize` helper.

    Usage:

        # In the function itself
        def pytest_generate_tests(metafunc):
            argnames, argvalues, idlist = testgen.provider_by_type(
                ['openstack', 'ec2'],
                'type', 'name', 'credentials', 'provider', 'hosts'
            )
        metafunc.parametrize(argnames, argvalues, ids=idlist, scope='module')

        # Using the parametrize wrapper
        pytest_generate_tests = testgen.parametrize(testgen.provider_by_type, ['openstack', 'ec2'],
            'type', 'name', 'credentials', 'provider', 'hosts', scope='module')

    Note:

        Using the default 'function' scope, each test will be run individually for each provider
        before moving on to the next test. To group all tests related to single provider together,
        parametrize tests in the 'module' scope.

    Note:

        testgen for providers now requires the usage of test_flags for collection to work.
        Please visit http://cfme-tests.readthedocs.org/guides/documenting.html#documenting-tests
        for more details.

    """

    metafunc.function = pytest.mark.uses_testgen()(metafunc.function)

    argnames = list(fields)
    argvalues = []
    idlist = []
    template_location = options.pop("template_location", None)

    if 'provider' in metafunc.fixturenames and 'provider' not in argnames:
        argnames.append('provider')

    for provider, data in cfme_data.get('management_systems', {}).iteritems():

        # Check provider hasn't been filtered out with --use-provider
        if provider not in filtered:
            continue

        try:
            prov_obj = get_crud(provider)
        except UnknownProviderType:
            continue

        if not prov_obj:
            logger.debug("Whilst trying to create an object for %s we failed", provider)
            continue

        skip = False
        if provider_types is not None and prov_obj.type not in provider_types:
            # Skip unwanted types
            continue

        restricted_version = data.get('restricted_version', None)
        if restricted_version:
            logger.info('we found a restricted version')
            for op, comparator in _version_operator_map.items():
                # split string by op; if the split works, version won't be empty
                head, op, ver = restricted_version.partition(op)
                if not ver:  # This means that the operator was not found
                    continue
                if not comparator(version.current_version(), ver):
                    skip = True
                break
            else:
                raise Exception('Operator not found in {}'.format(restricted_version))

        # Test to see the test has meta data, if it does and that metadata contains
        # a test_flag kwarg, then check to make sure the provider contains that test_flag
        # if not, do not collect the provider for this particular test.

        # Obtain the tests flags
        meta = getattr(metafunc.function, 'meta', None)

        test_flags = getattr(meta, 'kwargs', {}) \
            .get('from_docs', {}).get('test_flag', '').split(',')
        if test_flags != ['']:
            test_flags = [flag.strip() for flag in test_flags]

            defined_flags = cfme_data.get('test_flags', '').split(',')
            defined_flags = [flag.strip() for flag in defined_flags]

            excluded_flags = data.get('excluded_test_flags', '').split(',')
            excluded_flags = [flag.strip() for flag in excluded_flags]

            allowed_flags = set(defined_flags) - set(excluded_flags)

            if set(test_flags) - allowed_flags:
                logger.info("Skipping Provider %s for test %s in module %s because "
                    "it does not have the right flags, "
                    "%s does not contain %s",
                    provider, metafunc.function.func_name, metafunc.function.__module__,
                    list(allowed_flags), list(set(test_flags) - allowed_flags))
                continue

        try:
            if "since_version" in data:
                # Ignore providers that are not supported in this version yet
                if version.current_version() < data["since_version"]:
                    continue
        except Exception:  # No SSH connection
            continue

        # Get values for the requested fields, filling in with None for undefined fields
        data_values = {field: data.get(field, None) for field in fields}

        # Go through the values and handle the special 'data' name
        # report the undefined fields to the log
        for key in data_values.keys():
            if data_values[key] is None:
                if 'require_fields' not in options:
                    options['require_fields'] = True
                if options['require_fields']:
                    skip = True
                    logger.warning('Field "%s" not defined for provider "%s", skipping' %
                        (key, provider)
                    )
                else:
                    logger.debug('Field "%s" not defined for provider "%s", defaulting to None' %
                        (key, provider)
                    )
        if skip:
            continue

        # Check the template presence if requested
        if template_location is not None:
            o = data
            try:
                for field in template_location:
                    o = o[field]
            except (IndexError, KeyError):
                logger.info("Cannot apply %s to %s in the template specification, ignoring.",
                    repr(field), repr(o))
            else:
                if not isinstance(o, basestring):
                    raise ValueError("{} is not a string! (for template)".format(repr(o)))
                templates = TEMPLATES.get(provider, None)
                if templates is not None:
                    if o not in templates:
                        logger.info(
                            "Wanted template %s on %s but it is not there!\n", o, provider)
                        # Skip collection of this one
                        continue

        values = []
        for arg in argnames:
            if arg == 'provider':
                metafunc.function = pytest.mark.provider_related()(metafunc.function)
                values.append(prov_obj)
            elif arg in data_values:
                values.append(data_values[arg])

        # skip when required field is not present and option['require_field'] == True
        argvalues.append(values)

        # Use the provider name for idlist, helps with readable parametrized test output
        idlist.append(provider)

    return argnames, argvalues, idlist
コード例 #45
0
def test_refresh_providers(appliance, request, scenario):
    """
    Refreshes providers then waits for a specific amount of time.
    Memory Monitor creates graphs and summary at the end of the scenario.
    """
    from_ts = int(time.time() * 1000)
    logger.debug('Scenario: {}'.format(scenario['name']))

    appliance.clean_appliance()

    quantifiers = {}
    scenario_data = {
        'appliance_ip': appliance.hostname,
        'appliance_name': cfme_performance['appliance']['appliance_name'],
        'test_dir': 'workload-refresh-providers',
        'test_name': 'Refresh Providers',
        'appliance_roles': ', '.join(roles_refresh_providers),
        'scenario': scenario
    }
    monitor_thread = SmemMemoryMonitor(appliance.ssh_client(), scenario_data)

    def cleanup_workload(scenario, from_ts, quantifiers, scenario_data):
        starttime = time.time()
        to_ts = int(starttime * 1000)
        g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts)
        logger.debug('Started cleaning up monitoring thread.')
        monitor_thread.grafana_urls = g_urls
        monitor_thread.signal = False
        monitor_thread.join()
        add_workload_quantifiers(quantifiers, scenario_data)
        timediff = time.time() - starttime
        logger.info(
            'Finished cleaning up monitoring thread in {}'.format(timediff))

    request.addfinalizer(lambda: cleanup_workload(scenario, from_ts,
                                                  quantifiers, scenario_data))
    monitor_thread.start()

    appliance.wait_for_miq_server_workers_started(poll_interval=2)
    appliance.update_server_roles(
        {role: True
         for role in roles_refresh_providers})
    for prov in scenario['providers']:
        get_crud(prov).create_rest()

    # Variable amount of time for refresh workload
    total_time = scenario['total_time']
    starttime = time.time()
    time_between_refresh = scenario['time_between_refresh']
    total_refreshed_providers = 0

    while ((time.time() - starttime) < total_time):
        start_refresh_time = time.time()
        appliance.rest_api.collections.providers.reload()
        for prov in appliance.rest_api.collections.providers.all:
            prov.action.reload()
            total_refreshed_providers += 1
        iteration_time = time.time()

        refresh_time = round(iteration_time - start_refresh_time, 2)
        elapsed_time = iteration_time - starttime
        logger.debug('Time to Queue Refreshes: {}'.format(refresh_time))
        logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2),
                                                 total_time))

        if refresh_time < time_between_refresh:
            wait_diff = time_between_refresh - refresh_time
            time_remaining = total_time - elapsed_time
            if (time_remaining > 0 and time_remaining < time_between_refresh):
                time.sleep(time_remaining)
            elif time_remaining > 0:
                time.sleep(wait_diff)
        else:
            logger.warn('Time to Queue Refreshes ({}) exceeded time between '
                        '({})'.format(refresh_time, time_between_refresh))

    quantifiers['Elapsed_Time'] = round(time.time() - starttime, 2)
    quantifiers['Queued_Provider_Refreshes'] = total_refreshed_providers
    logger.info('Test Ending...')
def test_workload_capacity_and_utilization_rep(appliance, request, scenario,
                                               setup_perf_provider):
    """Runs through provider based scenarios enabling C&U and replication, run for a set period of
    time. Memory Monitor creates graphs and summary at the end of each scenario."""
    from_ts = int(time.time() * 1000)
    ssh_client = appliance.ssh_client()

    ssh_master_args = {
        'hostname': scenario['replication_master']['ip_address'],
        'username': scenario['replication_master']['ssh']['username'],
        'password': scenario['replication_master']['ssh']['password']
    }
    master_appliance = IPAppliance(
        address=scenario['replication_master']['ip_address'],
        openshift_creds=ssh_master_args)

    ssh_client_master = SSHClient(**ssh_master_args)
    logger.debug('Scenario: {}'.format(scenario['name']))

    is_pglogical = True if scenario['replication'] == 'pglogical' else False

    # Turn off master pglogical replication incase rubyrep scenario follows a pglogical scenario
    appliance.set_pglogical_replication(replication_type=':none')
    # Spawn tail before hand to prevent unncessary waiting on MiqServer starting since applinace
    # under test is cleaned first, followed by master appliance
    sshtail_evm = SSHTail('/var/www/miq/vmdb/log/evm.log')
    sshtail_evm.set_initial_file_end()
    logger.info('Clean appliance under test ({})'.format(ssh_client))
    appliance.clean_appliance()
    logger.info('Clean master appliance ({})'.format(ssh_client_master))
    master_appliance.clean_appliance()  # Clean Replication master appliance

    if is_pglogical:
        scenario_data = {
            'appliance_ip': appliance.hostname,
            'appliance_name': cfme_performance['appliance']['appliance_name'],
            'test_dir': 'workload-cap-and-util-rep',
            'test_name': 'Capacity and Utilization Replication (pgLogical)',
            'appliance_roles': ', '.join(roles_cap_and_util_rep),
            'scenario': scenario
        }
    else:
        scenario_data = {
            'appliance_ip': cfme_performance['appliance']['ip_address'],
            'appliance_name': cfme_performance['appliance']['appliance_name'],
            'test_dir': 'workload-cap-and-util-rep',
            'test_name': 'Capacity and Utilization Replication (RubyRep)',
            'appliance_roles': ', '.join(roles_cap_and_util_rep),
            'scenario': scenario
        }
    quantifiers = {}
    monitor_thread = SmemMemoryMonitor(appliance.ssh_client(), scenario_data)

    def cleanup_workload(scenario, from_ts, quantifiers, scenario_data):
        starttime = time.time()
        to_ts = int(starttime * 1000)
        g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts)
        logger.debug('Started cleaning up monitoring thread.')
        monitor_thread.grafana_urls = g_urls
        monitor_thread.signal = False
        monitor_thread.join()
        add_workload_quantifiers(quantifiers, scenario_data)
        timediff = time.time() - starttime
        logger.info(
            'Finished cleaning up monitoring thread in {}'.format(timediff))

    request.addfinalizer(lambda: cleanup_workload(scenario, from_ts,
                                                  quantifiers, scenario_data))

    monitor_thread.start()

    appliance.wait_for_miq_server_workers_started(evm_tail=sshtail_evm,
                                                  poll_interval=2)
    appliance.update_server_roles(
        {role: True
         for role in roles_cap_and_util_rep})
    for provider in scenario['providers']:
        get_crud(provider).create_rest()
    logger.info('Sleeping for Refresh: {}s'.format(
        scenario['refresh_sleep_time']))
    time.sleep(scenario['refresh_sleep_time'])
    appliance.set_cap_and_util_all_via_rails()

    # Configure Replication
    if is_pglogical:
        # Setup appliance under test to :remote
        appliance.set_pglogical_replication(replication_type=':remote')
        # Setup master appliance to :global
        master_appliance.set_pglogical_replication(replication_type=':global')
        # Setup master to subscribe:
        master_appliance.add_pglogical_replication_subscription(
            ssh_client_master, appliance.address)
    else:
        # Setup local towards Master
        appliance.set_rubyrep_replication(
            scenario['replication_master']['ip_address'])
        # Force uninstall rubyrep for this region from master (Unsure if still needed)
        # ssh_client.run_rake_command('evm:dbsync:uninstall')
        # time.sleep(30)  # Wait to quiecse
        # Turn on DB Sync role
        appliance.update_server_roles(
            {role: True
             for role in roles_cap_and_util_rep})

    # Variable amount of time for C&U collections/processing
    total_time = scenario['total_time']
    starttime = time.time()
    elapsed_time = 0
    while (elapsed_time < total_time):
        elapsed_time = time.time() - starttime
        time_left = total_time - elapsed_time
        logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2),
                                                 total_time))
        if (time_left > 0 and time_left < 300):
            time.sleep(time_left)
        elif time_left > 0:
            time.sleep(300)

    # Turn off replication:
    if is_pglogical:
        appliance.set_pglogical_replication(replication_type=':none')
    else:
        appliance.update_server_roles(
            {role: True
             for role in roles_cap_and_util_rep})

    quantifiers['Elapsed_Time'] = round(elapsed_time, 2)
    logger.info('Test Ending...')
コード例 #47
0
def test_workload_smartstate_analysis(appliance, request, scenario):
    """Runs through provider based scenarios initiating smart state analysis against VMs, Hosts,
    and Datastores"""
    from_ts = int(time.time() * 1000)
    logger.debug('Scenario: {}'.format(scenario['name']))
    appliance.install_vddk()

    appliance.clean_appliance()

    quantifiers = {}
    scenario_data = {'appliance_ip': appliance.hostname,
        'appliance_name': cfme_performance['appliance']['appliance_name'],
        'test_dir': 'workload-ssa',
        'test_name': 'SmartState Analysis',
        'appliance_roles': ', '.join(roles_smartstate),
        'scenario': scenario}
    monitor_thread = SmemMemoryMonitor(appliance.ssh_client(), scenario_data)

    def cleanup_workload(scenario, from_ts, quantifiers, scenario_data):
        starttime = time.time()
        to_ts = int(starttime * 1000)
        g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts)
        logger.debug('Started cleaning up monitoring thread.')
        monitor_thread.grafana_urls = g_urls
        monitor_thread.signal = False
        monitor_thread.join()
        add_workload_quantifiers(quantifiers, scenario_data)
        timediff = time.time() - starttime
        logger.info('Finished cleaning up monitoring thread in {}'.format(timediff))
    request.addfinalizer(lambda: cleanup_workload(scenario, from_ts, quantifiers, scenario_data))

    monitor_thread.start()

    appliance.wait_for_miq_server_workers_started(poll_interval=2)
    appliance.update_server_roles({role: True for role in roles_smartstate})
    for provider in scenario['providers']:
        get_crud(provider).create_rest()
    logger.info('Sleeping for Refresh: {}s'.format(scenario['refresh_sleep_time']))
    time.sleep(scenario['refresh_sleep_time'])

    # Add host credentials and set CFME relationship for RHEVM SSA
    for provider in scenario['providers']:
        for api_host in appliance.rest_api.collections.hosts.all:
            test_host = host.Host(name=api_host.name, provider=provider)
            host_data = get_host_data_by_name(get_crud(provider), api_host.name)
            credentials = host.get_credentials_from_config(host_data['credentials'])
            test_host.update_credentials_rest(credentials)
        appliance.set_cfme_server_relationship(cfme_performance['appliance']['appliance_name'])

    # Variable amount of time for SmartState Analysis workload
    total_time = scenario['total_time']
    starttime = time.time()
    time_between_analyses = scenario['time_between_analyses']
    total_scanned_vms = 0

    while ((time.time() - starttime) < total_time):
        start_ssa_time = time.time()
        for vm in scenario['vms_to_scan'].values()[0]:
            vm_api = appliance.rest_api.collections.vms.get(name=vm)
            vm_api.action.scan()
            total_scanned_vms += 1
        iteration_time = time.time()

        ssa_time = round(iteration_time - start_ssa_time, 2)
        elapsed_time = iteration_time - starttime
        logger.debug('Time to Queue SmartState Analyses: {}'.format(ssa_time))
        logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2), total_time))

        if ssa_time < time_between_analyses:
            wait_diff = time_between_analyses - ssa_time
            time_remaining = total_time - elapsed_time
            if (time_remaining > 0 and time_remaining < time_between_analyses):
                time.sleep(time_remaining)
            elif time_remaining > 0:
                time.sleep(wait_diff)
        else:
            logger.warn('Time to Queue SmartState Analyses ({}) exceeded time between '
                '({})'.format(ssa_time, time_between_analyses))

    quantifiers['Elapsed_Time'] = round(time.time() - starttime, 2)
    quantifiers['Queued_VM_Scans'] = total_scanned_vms
    logger.info('Test Ending...')
コード例 #48
0
def deploy_template(provider_key,
                    vm_name,
                    template_name=None,
                    timeout=900,
                    **deploy_args):
    """
    Args:
        provider_key: Provider key on which the VM is to be created
        vm_name: Name of the VM to be deployed
        template_name: Name of the template that the VM is deployed from
    """
    allow_skip = deploy_args.pop("allow_skip", ())
    if isinstance(allow_skip, dict):
        skip_exceptions = allow_skip.keys()
        callable_mapping = allow_skip
    elif isinstance(allow_skip,
                    basestring) and allow_skip.lower() == "default":
        skip_exceptions = (OSOverLimit, RHEVRequestError,
                           exceptions.VMInstanceNotCloned, SSLError)
        callable_mapping = {}
    else:
        skip_exceptions = allow_skip
        callable_mapping = {}
    provider_crud = get_crud(provider_key)

    mgmt = provider_crud.get_mgmt_system()
    data = provider_crud.get_yaml_data()

    deploy_args.update(vm_name=vm_name)

    if template_name is None:
        try:
            deploy_args.update(template=data['small_template'])
        except KeyError:
            raise ValueError(
                'small_template not defined for Provider {} in cfme_data.yaml'
            ).format(provider_key)
    else:
        deploy_args.update(template=template_name)

    if isinstance(mgmt, RHEVMSystem):
        if 'default_cluster' not in deploy_args:
            deploy_args.update(cluster=data['default_cluster'])
    elif isinstance(mgmt, VMWareSystem):
        if "allowed_datastores" not in deploy_args and "allowed_datastores" in data:
            deploy_args.update(allowed_datastores=data['allowed_datastores'])
    elif isinstance(mgmt, SCVMMSystem):
        if 'host_group' not in deploy_args:
            deploy_args.update(host_group=data.get("host_group", "All Hosts"))
    elif isinstance(mgmt, EC2System):
        pass
    elif isinstance(mgmt, OpenstackSystem):
        if ('network_name' not in deploy_args) and data.get('network'):
            deploy_args.update(network_name=data['network'])
    else:
        raise Exception("Unsupported provider type: %s" %
                        mgmt.__class__.__name__)

    logger.info(
        "Getting ready to deploy VM/instance %s from template %s on provider %s"
        % (vm_name, deploy_args['template'], data['name']))
    try:
        try:
            logger.debug("Deploy args: {}".format(deploy_args))
            vm_name = mgmt.deploy_template(timeout=timeout, **deploy_args)
            logger.info("Provisioned VM/instance %s" %
                        vm_name)  # instance ID in case of EC2
        except Exception as e:
            logger.error(
                'Could not provisioning VM/instance {} ({}: {})'.format(
                    vm_name,
                    type(e).__name__, str(e)))
            _vm_cleanup(mgmt, vm_name)
            raise
    except skip_exceptions as e:
        e_c = type(e)
        if e_c in callable_mapping and not callable_mapping[e_c](e):
            raise
        # Make it visible also in the log.
        store.write_line("Skipping due to a provider error: {}: {}\n".format(
            e_c.__name__, str(e)),
                         purple=True)
        pytest.skip("{}: {}".format(e_c.__name__, str(e)))
    return vm_name
コード例 #49
0
def deploy_template(provider_key,
                    vm_name,
                    template_name=None,
                    timeout=900,
                    **deploy_args):

    allow_skip = deploy_args.pop("allow_skip", ())
    if isinstance(allow_skip, dict):
        skip_exceptions = allow_skip.keys()
        callable_mapping = allow_skip
    elif isinstance(allow_skip,
                    basestring) and allow_skip.lower() == "default":
        skip_exceptions = (OSOverLimit, RHEVRequestError, VMInstanceNotCloned,
                           SSLError)
        callable_mapping = {}
    else:
        skip_exceptions = allow_skip
        callable_mapping = {}
    provider_crud = get_crud(provider_key)

    mgmt = provider_crud.get_mgmt_system()
    data = provider_crud.get_yaml_data()

    deploy_args.update(vm_name=vm_name)
    if isinstance(mgmt, RHEVMSystem):
        if 'default_cluster' not in deploy_args:
            deploy_args.update(cluster=data['default_cluster'])
    elif isinstance(mgmt, VMWareSystem):
        if "allowed_datastores" not in deploy_args and "allowed_datastores" in data:
            deploy_args.update(allowed_datastores=data['allowed_datastores'])
    elif isinstance(mgmt, SCVMMSystem):
        if 'host_group' not in deploy_args:
            deploy_args.update(host_group=data.get("host_group", "All Hosts"))
    elif isinstance(mgmt, EC2System):
        pass
    elif isinstance(mgmt, OpenstackSystem):
        if ('network_name' not in deploy_args) and data.get('network'):
            deploy_args.update(network_name=data['network'])
    else:
        raise Exception("Unsupported provider type: %s" %
                        mgmt.__class__.__name__)

    if template_name is None:
        template_name = data['small_template']

    logger.info(
        "Getting ready to deploy VM/instance %s from template %s on provider %s"
        % (vm_name, template_name, data['name']))
    try:
        try:
            logger.debug("Deploy args: " + str(deploy_args))
            vm_name = mgmt.deploy_template(template_name,
                                           timeout=timeout,
                                           **deploy_args)
            logger.info("Provisioned VM/instance %s" %
                        vm_name)  # instance ID in case of EC2
        except Exception as e:
            logger.error('Could not provisioning VM/instance %s (%s)', vm_name,
                         e)
            try:
                logger.info("VM/Instance status: {}".format(
                    mgmt.vm_status(vm_name)))
            except Exception as f:
                logger.error(
                    "Could not retrieve VM/Instance status: {}: {}".format(
                        type(f).__name__, str(f)))
            logger.info('Attempting cleanup on VM/instance %s', vm_name)
            try:
                if mgmt.does_vm_exist(vm_name):
                    # Stop the vm first
                    logger.warning('Destroying VM/instance %s', vm_name)
                    if mgmt.delete_vm(vm_name):
                        logger.info('VM/instance %s destroyed', vm_name)
                    else:
                        logger.error('Error destroying VM/instance %s',
                                     vm_name)
            except Exception as f:
                logger.error(
                    'Could not destroy VM/instance {} ({}: {})'.format(
                        vm_name,
                        type(f).__name__, str(f)))
            finally:
                raise e
    except skip_exceptions as e:
        e_c = type(e)
        if e_c in callable_mapping and not callable_mapping[e_c](e):
            raise
        # Make it visible also in the log.
        store.write_line("Skipping due to a provider error: {}: {}\n".format(
            e_c.__name__, str(e)),
                         purple=True)
        pytest.skip("{}: {}".format(e_c.__name__, str(e)))
    return vm_name
コード例 #50
0
def test_workload_capacity_and_utilization_rep(appliance, request, scenario, setup_perf_provider):
    """Runs through provider based scenarios enabling C&U and replication, run for a set period of
    time. Memory Monitor creates graphs and summary at the end of each scenario."""
    from_ts = int(time.time() * 1000)
    ssh_client = appliance.ssh_client()

    ssh_master_args = {
        'hostname': scenario['replication_master']['ip_address'],
        'username': scenario['replication_master']['ssh']['username'],
        'password': scenario['replication_master']['ssh']['password']}
    master_appliance = IPAppliance(address=scenario['replication_master']['ip_address'],
                                   openshift_creds=ssh_master_args)

    ssh_client_master = SSHClient(**ssh_master_args)
    logger.debug('Scenario: {}'.format(scenario['name']))

    is_pglogical = True if scenario['replication'] == 'pglogical' else False

    # Turn off master pglogical replication incase rubyrep scenario follows a pglogical scenario
    appliance.set_pglogical_replication(replication_type=':none')
    # Spawn tail before hand to prevent unncessary waiting on MiqServer starting since applinace
    # under test is cleaned first, followed by master appliance
    sshtail_evm = SSHTail('/var/www/miq/vmdb/log/evm.log')
    sshtail_evm.set_initial_file_end()
    logger.info('Clean appliance under test ({})'.format(ssh_client))
    appliance.clean_appliance()
    logger.info('Clean master appliance ({})'.format(ssh_client_master))
    master_appliance.clean_appliance()  # Clean Replication master appliance

    if is_pglogical:
        scenario_data = {'appliance_ip': appliance.hostname,
            'appliance_name': cfme_performance['appliance']['appliance_name'],
            'test_dir': 'workload-cap-and-util-rep',
            'test_name': 'Capacity and Utilization Replication (pgLogical)',
            'appliance_roles': ', '.join(roles_cap_and_util_rep),
            'scenario': scenario}
    else:
        scenario_data = {'appliance_ip': cfme_performance['appliance']['ip_address'],
            'appliance_name': cfme_performance['appliance']['appliance_name'],
            'test_dir': 'workload-cap-and-util-rep',
            'test_name': 'Capacity and Utilization Replication (RubyRep)',
            'appliance_roles': ', '.join(roles_cap_and_util_rep),
            'scenario': scenario}
    quantifiers = {}
    monitor_thread = SmemMemoryMonitor(appliance.ssh_client(), scenario_data)

    def cleanup_workload(scenario, from_ts, quantifiers, scenario_data):
        starttime = time.time()
        to_ts = int(starttime * 1000)
        g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts)
        logger.debug('Started cleaning up monitoring thread.')
        monitor_thread.grafana_urls = g_urls
        monitor_thread.signal = False
        monitor_thread.join()
        add_workload_quantifiers(quantifiers, scenario_data)
        timediff = time.time() - starttime
        logger.info('Finished cleaning up monitoring thread in {}'.format(timediff))
    request.addfinalizer(lambda: cleanup_workload(scenario, from_ts, quantifiers, scenario_data))

    monitor_thread.start()

    appliance.wait_for_miq_server_workers_started(evm_tail=sshtail_evm, poll_interval=2)
    appliance.update_server_roles({role: True for role in roles_cap_and_util_rep})
    for provider in scenario['providers']:
        get_crud(provider).create_rest()
    logger.info('Sleeping for Refresh: {}s'.format(scenario['refresh_sleep_time']))
    time.sleep(scenario['refresh_sleep_time'])
    appliance.set_cap_and_util_all_via_rails()

    # Configure Replication
    if is_pglogical:
        # Setup appliance under test to :remote
        appliance.set_pglogical_replication(replication_type=':remote')
        # Setup master appliance to :global
        master_appliance.set_pglogical_replication(replication_type=':global')
        # Setup master to subscribe:
        master_appliance.add_pglogical_replication_subscription(ssh_client_master,
            appliance.address)
    else:
        # Setup local towards Master
        appliance.set_rubyrep_replication(scenario['replication_master']['ip_address'])
        # Force uninstall rubyrep for this region from master (Unsure if still needed)
        # ssh_client.run_rake_command('evm:dbsync:uninstall')
        # time.sleep(30)  # Wait to quiecse
        # Turn on DB Sync role
        appliance.update_server_roles({role: True for role in roles_cap_and_util_rep})

    # Variable amount of time for C&U collections/processing
    total_time = scenario['total_time']
    starttime = time.time()
    elapsed_time = 0
    while (elapsed_time < total_time):
        elapsed_time = time.time() - starttime
        time_left = total_time - elapsed_time
        logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2), total_time))
        if (time_left > 0 and time_left < 300):
            time.sleep(time_left)
        elif time_left > 0:
            time.sleep(300)

    # Turn off replication:
    if is_pglogical:
        appliance.set_pglogical_replication(replication_type=':none')
    else:
        appliance.update_server_roles({role: True for role in roles_cap_and_util_rep})

    quantifiers['Elapsed_Time'] = round(elapsed_time, 2)
    logger.info('Test Ending...')
コード例 #51
0
def provider_by_type(metafunc, provider_types, *fields, **options):
    """Get the values of the named field keys from ``cfme_data.get('management_systems', {})``

    Args:
        provider_types: A list of provider types to include. If None, all providers are considered
        *fields: Names of keys in an individual provider dict whose values will be returned when
            used as test function arguments
        **options: Explained below

    The ``**options`` available are defined below:

    * ``required_fields``: when fields passed are not present, skip them
    * ``choose_random``: choose a single provider from the list
    * ``template_location``: Specification where a required tempalte lies in the yaml, If not
      found in the provider, warning is printed and the test not collected. The spec
      is a tuple or list where each item is a key to the next field (str or int).

    The following test function arguments are special:

        ``provider``
            the provider's CRUD object, either a :py:class:`cfme.cloud.provider.Provider`
            or a :py:class:`cfme.infrastructure.provider.Provider`

    Returns:
        An tuple of ``(argnames, argvalues, idlist)`` for use in a pytest_generate_tests hook, or
        with the :py:func:`parametrize` helper.

    Usage:

        # In the function itself
        def pytest_generate_tests(metafunc):
            argnames, argvalues, idlist = testgen.provider_by_type(
                ['openstack', 'ec2'],
                'type', 'name', 'credentials', 'provider', 'hosts'
            )
        metafunc.parametrize(argnames, argvalues, ids=idlist, scope='module')

        # Using the parametrize wrapper
        pytest_generate_tests = testgen.parametrize(testgen.provider_by_type, ['openstack', 'ec2'],
            'type', 'name', 'credentials', 'provider', 'hosts', scope='module')

    Note:

        Using the default 'function' scope, each test will be run individually for each provider
        before moving on to the next test. To group all tests related to single provider together,
        parametrize tests in the 'module' scope.

    Note:

        testgen for providers now requires the usage of test_flags for collection to work.
        Please visit http://cfme-tests.readthedocs.org/guides/documenting.html#documenting-tests
        for more details.

    """

    metafunc.function = pytest.mark.uses_testgen()(metafunc.function)

    argnames = list(fields)
    argvalues = []
    idlist = []
    template_location = options.pop("template_location", None)

    if 'provider' in metafunc.fixturenames and 'provider' not in argnames:
        argnames.append('provider')

    for provider, data in cfme_data.get('management_systems', {}).iteritems():

        # Check provider hasn't been filtered out with --use-provider
        if provider not in filtered:
            continue

        try:
            prov_obj = get_crud(provider)
        except UnknownProviderType:
            continue

        if not prov_obj:
            logger.debug(
                "Whilst trying to create an object for {} we failed".format(
                    provider))
            continue

        skip = False
        if provider_types is not None and prov_obj.type not in provider_types:
            # Skip unwanted types
            continue

        restricted_version = data.get('restricted_version', None)
        if restricted_version:
            logger.info('we found a restricted version')
            for op, comparator in _version_operator_map.items():
                # split string by op; if the split works, version won't be empty
                head, op, ver = restricted_version.partition(op)
                if not ver:  # This means that the operator was not found
                    continue
                if not comparator(version.current_version(), ver):
                    skip = True
                break
            else:
                raise Exception(
                    'Operator not found in {}'.format(restricted_version))

        # Test to see the test has meta data, if it does and that metadata contains
        # a test_flag kwarg, then check to make sure the provider contains that test_flag
        # if not, do not collect the provider for this particular test.

        # Obtain the tests flags
        meta = getattr(metafunc.function, 'meta', None)

        test_flags = getattr(meta, 'kwargs', {}) \
            .get('from_docs', {}).get('test_flag', '').split(',')
        if test_flags != ['']:
            test_flags = [flag.strip() for flag in test_flags]

            defined_flags = cfme_data.get('test_flags', '').split(',')
            defined_flags = [flag.strip() for flag in defined_flags]

            excluded_flags = data.get('excluded_test_flags', '').split(',')
            excluded_flags = [flag.strip() for flag in excluded_flags]

            allowed_flags = set(defined_flags) - set(excluded_flags)

            if set(test_flags) - allowed_flags:
                logger.info(
                    "Skipping Provider {} for test {} in module {} because "
                    "it does not have the right flags, "
                    "{} does not contain {}".format(
                        provider, metafunc.function.func_name,
                        metafunc.function.__module__, list(allowed_flags),
                        list(set(test_flags) - allowed_flags)))
                continue

        try:
            if prov_obj.type == "scvmm" and version.current_version() < "5.3":
                # Ignore SCVMM on 5.2
                continue
            if "since_version" in data:
                # Ignore providers that are not supported in this version yet
                if version.current_version() < data["since_version"]:
                    continue
        except Exception:  # No SSH connection
            continue

        # Get values for the requested fields, filling in with None for undefined fields
        data_values = {field: data.get(field, None) for field in fields}

        # Go through the values and handle the special 'data' name
        # report the undefined fields to the log
        for key in data_values.keys():
            if data_values[key] is None:
                if 'require_fields' not in options:
                    options['require_fields'] = True
                if options['require_fields']:
                    skip = True
                    logger.warning(
                        'Field "%s" not defined for provider "%s", skipping' %
                        (key, provider))
                else:
                    logger.debug(
                        'Field "%s" not defined for provider "%s", defaulting to None'
                        % (key, provider))
        if skip:
            continue

        # Check the template presence if requested
        if template_location is not None:
            o = data
            try:
                for field in template_location:
                    o = o[field]
            except (IndexError, KeyError):
                logger.info(
                    "Cannot apply {} to {} in the template specification, ignoring."
                    .format(repr(field), repr(o)))
            else:
                if not isinstance(o, basestring):
                    raise ValueError(
                        "{} is not a string! (for template)".format(repr(o)))
                templates = TEMPLATES.get(provider, None)
                if templates is not None:
                    if o not in templates:
                        logger.info(
                            "Wanted template {} on {} but it is not there!\n".
                            format(o, provider))
                        # Skip collection of this one
                        continue

        values = []
        for arg in argnames:
            if arg == 'provider':
                metafunc.function = pytest.mark.provider_related()(
                    metafunc.function)
                values.append(prov_obj)
            elif arg in data_values:
                values.append(data_values[arg])

        # skip when required field is not present and option['require_field'] == True
        argvalues.append(values)

        # Use the provider name for idlist, helps with readable parametrized test output
        idlist.append(provider)

    # pick a single provider if option['choose_random'] == True
    if 'choose_random' not in options:
        options['choose_random'] = False
    if idlist and options['choose_random']:
        single_index = idlist.index(random.choice(idlist))
        new_idlist = ['random_provider']
        new_argvalues = [argvalues[single_index]]
        logger.debug('Choosing random provider, "%s" selected, ' % (provider))
        return argnames, new_argvalues, new_idlist

    return argnames, argvalues, idlist
コード例 #52
0
def test_refresh_vms(appliance, request, scenario):
    """Refreshes all vm's then waits for a specific amount of time. Memory Monitor creates
    graphs and summary at the end of the scenario."""
    from_ts = int(time.time() * 1000)
    logger.debug('Scenario: {}'.format(scenario['name']))

    appliance.clean_appliance()

    quantifiers = {}
    scenario_data = {
        'appliance_ip': appliance.hostname,
        'appliance_name': cfme_performance['appliance']['appliance_name'],
        'test_dir': 'workload-refresh-vm',
        'test_name': 'Refresh VMs',
        'appliance_roles': ', '.join(roles_refresh_vms),
        'scenario': scenario
    }
    monitor_thread = SmemMemoryMonitor(appliance.ssh_client(), scenario_data)

    def cleanup_workload(scenario, from_ts, quantifiers, scenario_data):
        starttime = time.time()
        to_ts = int(starttime * 1000)
        g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts)
        logger.debug('Started cleaning up monitoring thread.')
        monitor_thread.grafana_urls = g_urls
        monitor_thread.signal = False
        monitor_thread.join()
        add_workload_quantifiers(quantifiers, scenario_data)
        timediff = time.time() - starttime
        logger.info(
            'Finished cleaning up monitoring thread in {}'.format(timediff))

    request.addfinalizer(lambda: cleanup_workload(scenario, from_ts,
                                                  quantifiers, scenario_data))

    monitor_thread.start()

    appliance.wait_for_miq_server_workers_started(poll_interval=2)
    appliance.update_server_roles({role: True for role in roles_refresh_vms})
    for prov in scenario['providers']:
        get_crud(prov).create_rest()
    logger.info('Sleeping for refresh: {}s'.format(
        scenario['refresh_sleep_time']))
    time.sleep(scenario['refresh_sleep_time'])

    full_refresh_threshold_set = False
    if 'full_refresh_threshold' in scenario:
        if scenario['full_refresh_threshold'] != FULL_REFRESH_THRESHOLD_DEFAULT:
            appliance.set_full_refresh_threshold(
                scenario['full_refresh_threshold'])
            full_refresh_threshold_set = True
    if not full_refresh_threshold_set:
        logger.debug('Keeping full_refresh_threshold at default ({}).'.format(
            FULL_REFRESH_THRESHOLD_DEFAULT))

    refresh_size = scenario['refresh_size']

    vms = appliance.rest_api.collections.vms.all
    vms_iter = cycle(vms)
    logger.debug('Number of VM IDs: {}'.format(len(vms)))

    # Variable amount of time for refresh workload
    total_time = scenario['total_time']
    starttime = time.time()
    time_between_refresh = scenario['time_between_refresh']
    total_refreshed_vms = 0

    while ((time.time() - starttime) < total_time):
        start_refresh_time = time.time()
        refresh_list = [next(vms_iter) for x in range(refresh_size)]
        for vm in refresh_list:
            vm.action.reload()
        total_refreshed_vms += len(refresh_list)
        iteration_time = time.time()

        refresh_time = round(iteration_time - start_refresh_time, 2)
        elapsed_time = iteration_time - starttime
        logger.debug('Time to Queue VM Refreshes: {}'.format(refresh_time))
        logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2),
                                                 total_time))

        if refresh_time < time_between_refresh:
            wait_diff = time_between_refresh - refresh_time
            time_remaining = total_time - elapsed_time
            if (time_remaining > 0 and time_remaining < time_between_refresh):
                time.sleep(time_remaining)
            elif time_remaining > 0:
                time.sleep(wait_diff)
        else:
            logger.warn(
                'Time to Queue VM Refreshes ({}) exceeded time between '
                '({})'.format(refresh_time, time_between_refresh))

    quantifiers['Elapsed_Time'] = round(time.time() - starttime, 2)
    quantifiers['Queued_VM_Refreshes'] = total_refreshed_vms
    logger.info('Test Ending...')
コード例 #53
0
ファイル: events.py プロジェクト: MattLombana/cfme_tests
def setup_for_event_testing(ssh_client, db, listener_info, providers):
    domain_name = "EventTesting"
    domain = Domain(name=domain_name, enabled=True)
    if not domain.exists():
        domain.create()

    # FIX THE ENV ERROR IF PRESENT
    if ssh_client.run_command("ruby -v")[0] != 0:
        logger.info("Pathing env to correctly source EVM environment")
        success = ssh_client.run_command("echo 'source /etc/default/evm' >> .bashrc")[0] == 0
        assert success, "Issuing the patch command was unsuccessful"
        # Verify it works
        assert ssh_client.run_command("ruby -v")[0] == 0, "Patch failed"

    # INSTALL REST-CLIENT - REQUIRED FOR THE EVENT DISPATCHER SCRIPT
    if ssh_client.run_rails_command("\"require 'rest-client'\"")[0] != 0:
        # We have to install the gem
        logger.info("Installing rest-client ruby gem that is required by the event dispatcher.")
        success = ssh_client.run_command("gem install rest-client")[0] == 0
        assert success, "Could not install 'rest-client' gem"
        # Verify it works
        assert ssh_client.run_rails_command("\"require 'rest-client'\"")[0] == 0

    # IMPORT AUTOMATE NAMESPACE
    qe_automate_namespace_xml = "qe_event_handler.xml"
    qe_automate_namespace_script = "qe_event_handler.rb"
    local_automate_script = local(__file__)\
        .new(basename="../data/{}".format(qe_automate_namespace_script))\
        .strpath
    local_automate_file = local(__file__)\
        .new(basename="../data/{}".format(qe_automate_namespace_xml))\
        .strpath
    tmp_automate_file = "/tmp/{}".format(qe_automate_namespace_xml)

    # Change the information
    with open(local_automate_file, "r") as input_xml, \
            open(tmp_automate_file, "w") as output_xml:
        tree = etree.parse(input_xml)
        root = tree.getroot()

        def set_text(xpath, text):
            field = root.xpath(xpath)
            assert len(field) == 1
            field[0].text = text
        set_text("//MiqAeSchema/MiqAeField[@name='url']",
                 re.sub(r"^http://([^/]+)/?$", "\\1", listener_info.host))
        set_text("//MiqAeSchema/MiqAeField[@name='port']", str(listener_info.port))

        # Put the custom script from an external file
        with open(local_automate_script, "r") as script:
            set_text("//MiqAeMethod[@name='relay_events']",
                     etree.CDATA(script.read()))

        et = etree.ElementTree(root)
        et.write(output_xml)

    # copy xml file to appliance
    # but before that, let's check whether it's there because we may have already applied this file
    if ssh_client.run_command("ls /root/{}".format(qe_automate_namespace_xml))[0] != 0:
        ssh_client.put_file(tmp_automate_file, '/root/')

        # We have to convert it first for new version
        convert_cmd = version.pick({
            version.LOWEST: None,

            "5.3.0.0":
            "evm:automate:convert DOMAIN={} FILE=/root/{} ZIP_FILE=/root/{}.zip".format(
                domain_name, qe_automate_namespace_xml, qe_automate_namespace_xml),
        })
        if convert_cmd is not None:
            logger.info("Converting namespace for use on newer appliance...")
            return_code, stdout = ssh_client.run_rake_command(convert_cmd)
            if return_code != 0:
                logger.error("Namespace conversion was unsuccessful")
                logger.error(stdout)
                # We didn't successfully do that so remove the file to know
                # that it's needed to do it again when run again
                ssh_client.run_command("rm -f /root/{}*".format(qe_automate_namespace_xml))
                raise AutomateImportError(stdout)

        # run rake cmd on appliance to import automate namespace
        rake_cmd = version.pick({
            version.LOWEST: "evm:automate:import FILE=/root/{}".format(qe_automate_namespace_xml),

            "5.3.0.0":
            "evm:automate:import ZIP_FILE=/root/{}.zip DOMAIN={} OVERWRITE=true "
            "PREVIEW=false".format(qe_automate_namespace_xml, domain_name),
        })
        logger.info("Importing the QE Automation namespace ...")
        return_code, stdout = ssh_client.run_rake_command(rake_cmd)
        if return_code != 0:
            logger.error("Namespace import was unsuccessful")
            logger.error(stdout)
            # We didn't successfully do that so remove the file to know
            # that it's needed to do it again when run again
            ssh_client.run_command("rm -f /root/{}*".format(qe_automate_namespace_xml))
            raise AutomateImportError(stdout)

    # CREATE AUTOMATE INSTANCE HOOK
    if db is None or db.session.query(db['miq_ae_instances'].name)\
            .filter(db['miq_ae_instances'].name == "RelayEvents").count() == 0:
        original_class = Class(
            name=version.pick({
                version.LOWEST: "Automation Requests (Request)",
                "5.3": "Request"
            }),
            namespace=Namespace("System", domain=Domain("ManageIQ (Locked)")))
        copied_class = original_class.copy_to(domain)
        instance = Instance(
            name="RelayEvents",
            display_name="RelayEvents",
            description="relationship hook to link to custom QE events relay namespace",
            values={
                "rel2": {
                    "value": "/QE/Automation/APIMethods/relay_events?event=$evm.object['event']"
                }
            },
            cls=copied_class,
        )
        instance.create()

    # IMPORT POLICIES
    policy_yaml = "profile_relay_events.yaml"
    policy_path = local(__file__).new(basename="../data/{}".format(policy_yaml))
    if not is_imported("Automate event policies"):
        import_file(policy_path.strpath)

    # ASSIGN POLICY PROFILES
    for provider in providers:
        prov_obj = get_crud(provider)
        if not prov_obj.exists:
            prov_obj.create()
        prov_obj.assign_policy_profiles("Automate event policies")
        flash.assert_no_errors()
コード例 #54
0
def test_provisioning(appliance, request, scenario):
    """Runs through provisioning scenarios using the REST API to
    continously provision a VM for a specified period of time.
    Memory Monitor creates graphs and summary at the end of each scenario."""

    from_ts = int(time.time() * 1000)
    logger.debug('Scenario: {}'.format(scenario['name']))

    appliance.clean_appliance()

    quantifiers = {}
    scenario_data = {
        'appliance_ip': appliance.hostname,
        'appliance_name': cfme_performance['appliance']['appliance_name'],
        'test_dir': 'workload-provisioning',
        'test_name': 'Provisioning',
        'appliance_roles': ', '.join(roles_provisioning),
        'scenario': scenario
    }
    monitor_thread = SmemMemoryMonitor(appliance.ssh_client(), scenario_data)

    provision_order = []

    def cleanup_workload(scenario, from_ts, vms_to_cleanup, quantifiers,
                         scenario_data):
        starttime = time.time()
        to_ts = int(starttime * 1000)
        g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts)
        logger.debug('Started cleaning up monitoring thread.')
        appliance.update_server_roles(
            {role: True
             for role in roles_provisioning_cleanup})
        monitor_thread.grafana_urls = g_urls
        monitor_thread.signal = False
        final_vm_size = len(vms_to_cleanup)
        appliance.rest_api.collections.vms.action.delete(vms_to_cleanup)
        monitor_thread.join()
        logger.info(
            '{} VMs were left over, and {} VMs were deleted in the finalizer.'.
            format(final_vm_size, final_vm_size - len(vms_to_cleanup)))
        logger.info(
            'The following VMs were left over after the test: {}'.format(
                vms_to_cleanup))
        quantifiers['VMs_To_Delete_In_Finalizer'] = final_vm_size
        quantifiers['VMs_Deleted_In_Finalizer'] = final_vm_size - len(
            vms_to_cleanup)
        quantifiers['Leftover_VMs'] = vms_to_cleanup
        add_workload_quantifiers(quantifiers, scenario_data)
        timediff = time.time() - starttime
        logger.info(
            'Finished cleaning up monitoring thread in {}'.format(timediff))

    request.addfinalizer(lambda: cleanup_workload(scenario, from_ts, vm_name,
                                                  quantifiers, scenario_data))

    monitor_thread.start()

    appliance.wait_for_miq_server_workers_started(poll_interval=2)
    appliance.update_server_roles({role: True for role in roles_provisioning})
    prov = get_crud(scenario['providers'][0])
    prov.create_rest()
    logger.info('Sleeping for Refresh: {}s'.format(
        scenario['refresh_sleep_time']))
    time.sleep(scenario['refresh_sleep_time'])

    guid_list = prov.get_template_guids(scenario['templates'])
    guid_cycle = cycle(guid_list)
    cleanup_size = scenario['cleanup_size']
    number_of_vms = scenario['number_of_vms']
    total_time = scenario['total_time']
    time_between_provision = scenario['time_between_provision']
    total_provisioned_vms = 0
    total_deleted_vms = 0
    provisioned_vms = 0
    starttime = time.time()

    while ((time.time() - starttime) < total_time):
        start_iteration_time = time.time()
        provision_list = []
        for i in range(number_of_vms):
            total_provisioned_vms += 1
            provisioned_vms += 1
            vm_to_provision = '{}-provision-{}'.format(
                test_ts,
                str(total_provisioned_vms).zfill(4))
            guid_to_provision, provider_name = next(guid_cycle)
            provision_order.append((vm_to_provision, provider_name))
            provision_list.append((vm_to_provision, guid_to_provision,
                                   prov.data['provisioning']['vlan']))

        template = prov.data.get('small_template')
        provision_data = get_provision_data(appliance.rest_api, prov, template)
        vm_name = provision_data["vm_fields"]["vm_name"]
        response = appliance.rest_api.collections.provision_requests.action.create(
            **provision_data)
        assert appliance.rest_api.response.status_code == 200
        provision_request = response[0]

        def _finished():
            provision_request.reload()
            if "error" in provision_request.status.lower():
                pytest.fail("Error when provisioning: `{}`".format(
                    provision_request.message))
            return provision_request.request_state.lower() in ("finished",
                                                               "provisioned")

        wait_for(_finished,
                 num_sec=800,
                 delay=5,
                 message="REST provisioning finishes")

        vm = appliance.rest_api.collections.vms.get(name=vm_name)
        creation_time = time.time()
        provision_time = round(creation_time - start_iteration_time, 2)
        logger.debug(
            'Time to initiate provisioning: {}'.format(provision_time))
        logger.info('{} VMs provisioned so far'.format(total_provisioned_vms))

        if provisioned_vms > cleanup_size * len(scenario['providers']):
            start_remove_time = time.time()
            if appliance.rest_api.collections.vms.action.delete(vm):
                provision_order.pop(0)
                provisioned_vms -= 1
                total_deleted_vms += 1
            deletion_time = round(time.time() - start_remove_time, 2)
            logger.debug('Time to initate deleting: {}'.format(deletion_time))
            logger.info('{} VMs deleted so far'.format(total_deleted_vms))

        end_iteration_time = time.time()
        iteration_time = round(end_iteration_time - start_iteration_time, 2)
        elapsed_time = end_iteration_time - starttime
        logger.debug('Time to initiate provisioning and deletion: {}'.format(
            iteration_time))
        logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2),
                                                 total_time))

        if iteration_time < time_between_provision:
            wait_diff = time_between_provision - iteration_time
            time_remaining = total_time - elapsed_time
            if (time_remaining > 0
                    and time_remaining < time_between_provision):
                time.sleep(time_remaining)
            elif time_remaining > 0:
                time.sleep(wait_diff)
            else:
                logger.warn(
                    'Time to initiate provisioning ({}) exceeded time between '
                    '({})'.format(iteration_time, time_between_provision))

    quantifiers['Elapsed_Time'] = round(time.time() - starttime, 2)
    quantifiers['Queued_VM_Provisionings'] = total_provisioned_vms
    quantifiers['Deleted_VMs'] = total_deleted_vms
    logger.info(
        'Provisioned {} VMs and deleted {} VMs during the scenario.'.format(
            total_provisioned_vms, total_deleted_vms))
    logger.info('Test Ending...')
コード例 #55
0
ファイル: testgen.py プロジェクト: vnugent/cfme_tests
def provider_by_type(metafunc, provider_types, required_fields=None):
    """Get the values of the named field keys from ``cfme_data.get('management_systems', {})``

    ``required_fields`` is special and can take many forms, it is used to ensure that
    yaml data is present for a particular key, or path of keys, and can even validate
    the values as long as they are not None.


    Args:
        provider_types: A list of provider types to include. If None, all providers are considered

    Returns:
        An tuple of ``(argnames, argvalues, idlist)`` for use in a pytest_generate_tests hook, or
        with the :py:func:`parametrize` helper.

    Usage:

        # In the function itself
        def pytest_generate_tests(metafunc):
            argnames, argvalues, idlist = testgen.provider_by_type(
                ['openstack', 'ec2'], required_fields=['provisioning']
            )
        metafunc.parametrize(argnames, argvalues, ids=idlist, scope='module')

        # Using the parametrize wrapper
        pytest_generate_tests = testgen.parametrize(testgen.provider_by_type, ['openstack', 'ec2'],
            scope='module')

        # Using required_fields

        # Ensures that ``provisioning`` exists as a yaml field
        testgen.provider_by_type(
            ['openstack', 'ec2'], required_fields=['provisioning']
        )

        # Ensures that ``provisioning`` exists as a yaml field and has another field in it called
        # ``host``
        testgen.provider_by_type(
            ['openstack', 'ec2'], required_fields=[['provisioning', 'host']]
        )

        # Ensures that ``powerctl`` exists as a yaml field and has a value 'True'
        testgen.provider_by_type(
            ['openstack', 'ec2'], required_fields=[('powerctl', True)]
        )



    Note:

        Using the default 'function' scope, each test will be run individually for each provider
        before moving on to the next test. To group all tests related to single provider together,
        parametrize tests in the 'module' scope.

    Note:

        testgen for providers now requires the usage of test_flags for collection to work.
        Please visit http://cfme-tests.readthedocs.org/guides/documenting.html#documenting-tests
        for more details.

    """

    metafunc.function = pytest.mark.uses_testgen()(metafunc.function)

    argnames = []
    argvalues = []
    idlist = []

    if 'provider' in metafunc.fixturenames and 'provider' not in argnames:
        argnames.append('provider')

    for provider in cfme_data.get('management_systems', {}):

        # Check provider hasn't been filtered out with --use-provider
        if provider not in filtered:
            continue

        try:
            prov_obj = get_crud(provider)
        except UnknownProviderType:
            continue

        if not prov_obj:
            logger.debug("Whilst trying to create an object for %s we failed", provider)
            continue

        if provider_types is not None and prov_obj.type not in provider_types:
            continue

        # Run through all the testgen skip fns
        skip = False
        skip_fns = [_skip_restricted_version, _check_required_fields, _skip_test_flags,
            _skip_since_version]
        for fn in skip_fns:
            if fn(prov_obj.data, metafunc, required_fields):
                skip = True
                break
        if skip:
            continue

        # skip when required field is not present and option['require_field'] == True
        argvalues.append([prov_obj])

        # Use the provider name for idlist, helps with readable parametrized test output
        idlist.append(provider)

    return argnames, argvalues, idlist
コード例 #56
0
ファイル: testgen.py プロジェクト: pavelzag/cfme_tests
def provider_by_type(metafunc, provider_types, required_fields=None):
    """Get the values of the named field keys from ``cfme_data.get('management_systems', {})``

    ``required_fields`` is special and can take many forms, it is used to ensure that
    yaml data is present for a particular key, or path of keys, and can even validate
    the values as long as they are not None.


    Args:
        provider_types: A list of provider types to include. If None, all providers are considered

    Returns:
        An tuple of ``(argnames, argvalues, idlist)`` for use in a pytest_generate_tests hook, or
        with the :py:func:`parametrize` helper.

    Usage:

        # In the function itself
        def pytest_generate_tests(metafunc):
            argnames, argvalues, idlist = testgen.provider_by_type(
                ['openstack', 'ec2'], required_fields=['provisioning']
            )
        metafunc.parametrize(argnames, argvalues, ids=idlist, scope='module')

        # Using the parametrize wrapper
        pytest_generate_tests = testgen.parametrize(testgen.provider_by_type, ['openstack', 'ec2'],
            scope='module')

        # Using required_fields

        # Ensures that ``provisioning`` exists as a yaml field
        testgen.provider_by_type(
            ['openstack', 'ec2'], required_fields=['provisioning']
        )

        # Ensures that ``provisioning`` exists as a yaml field and has another field in it called
        # ``host``
        testgen.provider_by_type(
            ['openstack', 'ec2'], required_fields=[['provisioning', 'host']]
        )

        # Ensures that ``powerctl`` exists as a yaml field and has a value 'True'
        testgen.provider_by_type(
            ['openstack', 'ec2'], required_fields=[('powerctl', True)]
        )



    Note:

        Using the default 'function' scope, each test will be run individually for each provider
        before moving on to the next test. To group all tests related to single provider together,
        parametrize tests in the 'module' scope.

    Note:

        testgen for providers now requires the usage of test_flags for collection to work.
        Please visit http://cfme-tests.readthedocs.org/guides/documenting.html#documenting-tests
        for more details.

    """

    argnames = []
    argvalues = []
    idlist = []

    for provider in cfme_data.get('management_systems', {}):

        # Check provider hasn't been filtered out with --use-provider
        if provider not in filtered:
            continue

        try:
            prov_obj = get_crud(provider)
        except UnknownProviderType:
            continue

        if not prov_obj:
            logger.debug("Whilst trying to create an object for %s we failed",
                         provider)
            continue

        if provider_types is not None:
            if not (prov_obj.type_tclass in provider_types
                    or prov_obj.type_name in provider_types):
                continue

        # Run through all the testgen uncollect fns
        uncollect = False
        uncollect_fns = [
            _uncollect_restricted_version, _check_required_fields,
            _uncollect_test_flags, _uncollect_since_version
        ]
        for fn in uncollect_fns:
            if fn(prov_obj.data, metafunc, required_fields):
                uncollect = True
                break
        if uncollect:
            continue

        if 'provider' in metafunc.fixturenames and 'provider' not in argnames:
            metafunc.function = pytest.mark.uses_testgen()(metafunc.function)
            argnames.append('provider')

        # uncollect when required field is not present and option['require_field'] == True
        argvalues.append([prov_obj])

        # Use the provider name for idlist, helps with readable parametrized test output
        idlist.append(provider)

    return argnames, argvalues, idlist
コード例 #57
0
def test_workload_capacity_and_utilization(request, scenario, appliance):
    """Runs through provider based scenarios enabling C&U and running for a set period of time.
    Memory Monitor creates graphs and summary at the end of each scenario."""
    from_ts = int(time.time() * 1000)
    logger.debug('Scenario: {}'.format(scenario['name']))

    appliance.clean_appliance()

    quantifiers = {}
    scenario_data = {
        'appliance_ip': appliance.hostname,
        'appliance_name': conf.cfme_performance['appliance']['appliance_name'],
        'test_dir': 'workload-cap-and-util',
        'test_name': 'Capacity and Utilization',
        'appliance_roles': ','.join(roles_cap_and_util),
        'scenario': scenario
    }
    monitor_thread = SmemMemoryMonitor(appliance.ssh_client, scenario_data)

    def cleanup_workload(scenario, from_ts, quantifiers, scenario_data):
        starttime = time.time()
        to_ts = int(starttime * 1000)
        g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts)
        logger.debug('Started cleaning up monitoring thread.')
        monitor_thread.grafana_urls = g_urls
        monitor_thread.signal = False
        monitor_thread.join()
        add_workload_quantifiers(quantifiers, scenario_data)
        timediff = time.time() - starttime
        logger.info(
            'Finished cleaning up monitoring thread in {}'.format(timediff))

    request.addfinalizer(lambda: cleanup_workload(scenario, from_ts,
                                                  quantifiers, scenario_data))

    monitor_thread.start()

    appliance.wait_for_miq_server_workers_started(poll_interval=2)
    appliance.update_server_roles({role: True for role in roles_cap_and_util})
    for provider in scenario['providers']:
        get_crud(provider).create_rest()
    logger.info('Sleeping for Refresh: {}s'.format(
        scenario['refresh_sleep_time']))
    time.sleep(scenario['refresh_sleep_time'])
    appliance.set_cap_and_util_all_via_rails()

    # Variable amount of time for C&U collections/processing
    total_time = scenario['total_time']
    starttime = time.time()
    elapsed_time = 0
    while (elapsed_time < total_time):
        elapsed_time = time.time() - starttime
        time_left = total_time - elapsed_time
        logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2),
                                                 total_time))
        if (time_left > 0 and time_left < 300):
            time.sleep(time_left)
        elif time_left > 0:
            time.sleep(300)

    quantifiers['Elapsed_Time'] = round(elapsed_time, 2)
    logger.info('Test Ending...')
コード例 #58
0
ファイル: events.py プロジェクト: vprusa/cfme_tests
def setup_for_event_testing(ssh_client, db, listener_info, providers):
    domain_name = "EventTesting"
    domain = Domain(name=domain_name, enabled=True)
    if not domain.exists():
        domain.create()

    # FIX THE ENV ERROR IF PRESENT
    if ssh_client.run_command("ruby -v")[0] != 0:
        logger.info("Pathing env to correctly source EVM environment")
        success = ssh_client.run_command(
            "echo 'source /etc/default/evm' >> .bashrc")[0] == 0
        assert success, "Issuing the patch command was unsuccessful"
        # Verify it works
        assert ssh_client.run_command("ruby -v")[0] == 0, "Patch failed"

    # INSTALL REST-CLIENT - REQUIRED FOR THE EVENT DISPATCHER SCRIPT
    if ssh_client.run_rails_command("\"require 'rest-client'\"")[0] != 0:
        # We have to install the gem
        logger.info(
            "Installing rest-client ruby gem that is required by the event dispatcher."
        )
        success = ssh_client.run_command("gem install rest-client")[0] == 0
        assert success, "Could not install 'rest-client' gem"
        # Verify it works
        assert ssh_client.run_rails_command(
            "\"require 'rest-client'\"")[0] == 0

    # IMPORT AUTOMATE NAMESPACE
    qe_automate_namespace_xml = "qe_event_handler.xml"
    qe_automate_namespace_script = "qe_event_handler.rb"
    local_automate_script = local(__file__)\
        .new(basename="../data/{}".format(qe_automate_namespace_script))\
        .strpath
    local_automate_file = local(__file__)\
        .new(basename="../data/{}".format(qe_automate_namespace_xml))\
        .strpath
    tmp_automate_file = "/tmp/{}".format(qe_automate_namespace_xml)

    # Change the information
    with open(local_automate_file, "r") as input_xml, \
            open(tmp_automate_file, "w") as output_xml:
        tree = etree.parse(input_xml)
        root = tree.getroot()

        def set_text(xpath, text):
            field = root.xpath(xpath)
            assert len(field) == 1
            field[0].text = text

        set_text("//MiqAeSchema/MiqAeField[@name='url']",
                 re.sub(r"^http://([^/]+)/?$", "\\1", listener_info.host))
        set_text("//MiqAeSchema/MiqAeField[@name='port']",
                 str(listener_info.port))

        # Put the custom script from an external file
        with open(local_automate_script, "r") as script:
            set_text("//MiqAeMethod[@name='relay_events']",
                     etree.CDATA(script.read()))

        et = etree.ElementTree(root)
        et.write(output_xml)

    # copy xml file to appliance
    # but before that, let's check whether it's there because we may have already applied this file
    if ssh_client.run_command(
            "ls /root/{}".format(qe_automate_namespace_xml))[0] != 0:
        ssh_client.put_file(tmp_automate_file, '/root/')

        # We have to convert it first for new version
        convert_cmd = version.pick({
            version.LOWEST:
            None,
            "5.3.0.0":
            "evm:automate:convert DOMAIN={} FILE=/root/{} ZIP_FILE=/root/{}.zip"
            .format(domain_name, qe_automate_namespace_xml,
                    qe_automate_namespace_xml),
        })
        if convert_cmd is not None:
            logger.info("Converting namespace for use on newer appliance...")
            return_code, stdout = ssh_client.run_rake_command(convert_cmd)
            if return_code != 0:
                logger.error("Namespace conversion was unsuccessful")
                logger.error(stdout)
                # We didn't successfully do that so remove the file to know
                # that it's needed to do it again when run again
                ssh_client.run_command(
                    "rm -f /root/{}*".format(qe_automate_namespace_xml))
                raise AutomateImportError(stdout)

        # run rake cmd on appliance to import automate namespace
        rake_cmd = version.pick({
            version.LOWEST:
            "evm:automate:import FILE=/root/{}".format(
                qe_automate_namespace_xml),
            "5.3.0.0":
            "evm:automate:import ZIP_FILE=/root/{}.zip DOMAIN={} OVERWRITE=true "
            "PREVIEW=false".format(qe_automate_namespace_xml, domain_name),
        })
        logger.info("Importing the QE Automation namespace ...")
        return_code, stdout = ssh_client.run_rake_command(rake_cmd)
        if return_code != 0:
            logger.error("Namespace import was unsuccessful")
            logger.error(stdout)
            # We didn't successfully do that so remove the file to know
            # that it's needed to do it again when run again
            ssh_client.run_command(
                "rm -f /root/{}*".format(qe_automate_namespace_xml))
            raise AutomateImportError(stdout)

    # CREATE AUTOMATE INSTANCE HOOK
    if db is None or db.session.query(db['miq_ae_instances'].name)\
            .filter(db['miq_ae_instances'].name == "RelayEvents").count() == 0:
        original_class = Class(name=version.pick({
            version.LOWEST: "Automation Requests (Request)",
            "5.3": "Request"
        }),
                               namespace=Namespace(
                                   "System",
                                   domain=Domain("ManageIQ (Locked)")))
        copied_class = original_class.copy_to(domain)
        instance = Instance(
            name="RelayEvents",
            display_name="RelayEvents",
            description=
            "relationship hook to link to custom QE events relay namespace",
            values={
                "rel2": {
                    "value":
                    "/QE/Automation/APIMethods/relay_events?event=$evm.object['event']"
                }
            },
            cls=copied_class,
        )
        instance.create()

    # IMPORT POLICIES
    policy_yaml = "profile_relay_events.yaml"
    policy_path = local(__file__).new(
        basename="../data/{}".format(policy_yaml))
    if not is_imported("Automate event policies"):
        import_file(policy_path.strpath)

    # ASSIGN POLICY PROFILES
    for provider in providers:
        prov_obj = get_crud(provider)
        if not prov_obj.exists:
            prov_obj.create()
        prov_obj.assign_policy_profiles("Automate event policies")
        flash.assert_no_errors()