Exemplo n.º 1
0
def update_password(self, new_password, verify_password=None, user=None, method=LOGIN_METHODS[1]):
    if not user:
        username = conf.credentials['default']['username']
        password = conf.credentials['default']['password']
        cred = Credential(principal=username, secret=password)
        user = self.appliance.collections.users.instantiate(credential=cred, name='Administrator')

    logged_in_view = self.appliance.browser.create_view(BaseLoggedInPage)

    if not logged_in_view.logged_in_as_user(user):
        if logged_in_view.logged_in:
            logged_in_view.logout()

        from cfme.utils.appliance.implementations.ui import navigate_to
        login_view = navigate_to(self.appliance.server, 'LoginScreen')

        logger.debug('Changing password for user %s', user.credential.principal)

        login_view.update_password(user=user,
                                   new_password=new_password,
                                   verify_password=verify_password,
                                   method=method)

        try:
            assert logged_in_view.is_displayed
        except AssertionError:
            login_view.flash.assert_no_error()

    return logged_in_view
Exemplo n.º 2
0
def configure_auth(appliance, auth_mode, auth_provider, user_type, request, fix_missing_hostname):
    """Given auth_mode, auth_provider, user_type parametrization, configure auth for login
    testing.

    Saves original auth settings
    Configures external or internal auth modes
    Separate freeipa / openldap config methods and finalizers
    Restores original auth settings after yielding
    """
    original_config = appliance.server.authentication.auth_settings
    logger.debug('Original auth settings before configure_auth fixture: %r', original_config)
    if auth_mode.lower() != 'external':
        appliance.server.authentication.configure(auth_mode=auth_mode,
                                                  auth_provider=auth_provider,
                                                  user_type=user_type)
    elif auth_mode.lower() == 'external':  # extra explicit
        if auth_provider.auth_type == 'freeipa':
            appliance.configure_freeipa(auth_provider)
            request.addfinalizer(appliance.disable_freeipa)
        elif auth_provider.auth_type == 'openldaps':
            appliance.configure_openldap(auth_provider)
            request.addfinalizer(appliance.disable_openldap)

    # Auth reconfigure is super buggy and sensitive to timing
    # Just waiting on sssd to be running, or an httpd restart isn't sufficient
    sleep(30)
    yield
    # return to original auth config
    appliance.server.authentication.auth_settings = original_config
    appliance.httpd.restart()
    appliance.wait_for_web_ui()
def messages_to_statistics_csv(messages, statistics_file_name):
    all_statistics = []
    for msg_id in messages:
        msg = messages[msg_id]

        added = False
        if len(all_statistics) > 0:
            for msg_statistics in all_statistics:
                if msg_statistics.cmd == msg.msg_cmd:

                    if msg.del_time > 0:
                        msg_statistics.delivertimes.append(float(msg.del_time))
                        msg_statistics.gets += 1
                    msg_statistics.dequeuetimes.append(float(msg.deq_time))
                    msg_statistics.totaltimes.append(float(msg.total_time))
                    msg_statistics.puts += 1
                    added = True
                    break

        if not added:
            msg_statistics = MiqMsgLists()
            msg_statistics.cmd = msg.msg_cmd
            if msg.del_time > 0:
                msg_statistics.delivertimes.append(float(msg.del_time))
                msg_statistics.gets = 1
            msg_statistics.dequeuetimes.append(float(msg.deq_time))
            msg_statistics.totaltimes.append(float(msg.total_time))
            msg_statistics.puts = 1
            all_statistics.append(msg_statistics)

    csvdata_path = log_path.join('csv_output', statistics_file_name)
    outputfile = csvdata_path.open('w', ensure=True)

    try:
        csvfile = csv.writer(outputfile)
        metrics = ['samples', 'min', 'avg', 'median', 'max', 'std', '90', '99']
        measurements = ['deq_time', 'del_time', 'total_time']
        headers = ['cmd', 'puts', 'gets']
        for measurement in measurements:
            for metric in metrics:
                headers.append('{}_{}'.format(measurement, metric))

        csvfile.writerow(headers)

        # Contents of CSV
        for msg_statistics in sorted(all_statistics, key=lambda x: x.cmd):
            if msg_statistics.gets > 1:
                logger.debug('Samples/Avg/90th/Std: %s: %s : %s : %s,Cmd: %s',
                    str(len(msg_statistics.totaltimes)).rjust(7),
                    str(round(numpy.average(msg_statistics.totaltimes), 3)).rjust(7),
                    str(round(numpy.percentile(msg_statistics.totaltimes, 90), 3)).rjust(7),
                    str(round(numpy.std(msg_statistics.totaltimes), 3)).rjust(7),
                    msg_statistics.cmd)
            stats = [msg_statistics.cmd, msg_statistics.puts, msg_statistics.gets]
            stats.extend(generate_statistics(msg_statistics.dequeuetimes, 3))
            stats.extend(generate_statistics(msg_statistics.delivertimes, 3))
            stats.extend(generate_statistics(msg_statistics.totaltimes, 3))
            csvfile.writerow(stats)
    finally:
        outputfile.close()
Exemplo n.º 4
0
    def is_displayed(self):
        """Accounts for both Provider and HostCollection contexts"""
        from cfme.modeling.base import BaseEntity, BaseCollection
        expected_title = "{} (All Managed Hosts)"
        obj = self.context['object']
        is_entity = getattr(obj, 'name', False) and isinstance(obj, BaseEntity)
        is_filtered = isinstance(obj, BaseCollection) and obj.filters  # empty dict on not filtered
        filter = obj.filters.get('parent') or obj.filters.get('provider') if is_filtered else None

        # could condense the following logic in a more pythonic way, but would lose the logging
        if is_entity:
            # object has name attribute and is BaseEntity derived, assuming its a provider
            logger.debug('Hosts view context object is assumed to be provider: %r', obj)
            matched_title = self.title.text == expected_title.format(obj.name)
        elif filter and hasattr(filter, 'name'):
            # filtered collection, use filter object's name
            logger.debug(
                'Hosts view context object has filter related to view with name attribute: %r',
                obj.filters
            )
            matched_title = self.title.text == expected_title.format(filter.name)
        else:
            matched_title = False  # not an entity with a name, or a filtered collection

        return (
            matched_title and
            self.navigation.currently_selected == ['Compute', 'Infrastructure', 'Providers']
        )
Exemplo n.º 5
0
def checkbox(loc, set_to=False):
    """
    Checks or unchecks a given checkbox

    Finds an element given by loc and checks it

    Args:
        loc: The locator of the element
        value: The value the checkbox should represent as a bool (or None to do nothing)

    Returns: Previous state of the checkbox
    """
    if set_to is not None:
        el = move_to_element(loc)
        if el.tag_name == 'img':
            # Yeah, CFME sometimes uses images for check boxen. *sigh*
            # item_chk0 = unchecked, item_chk1 = checked
            selected = 'item_chk1' in el.get_attribute('src')
        else:
            selected = el.is_selected()

        if selected is not set_to:
            logger.debug("Setting checkbox to {}".format(set_to))
            click(el)
        return selected
def test_provider_filter_save_and_load(rails_delete_filter, advanced_search_view):
    """
    Polarion:
        assignee: anikifor
        casecomponent: WebUI
        caseimportance: medium
        initialEstimate: 1/10h
    """
    # bind filter_name to the function for fixture cleanup
    test_provider_filter_save_and_load.filter_name = fauxfactory.gen_alphanumeric()
    logger.debug('Set filter_name to: {}'.format(test_provider_filter_save_and_load.filter_name))

    # Save filter
    assert advanced_search_view.entities.search.save_filter(
        "fill_count(Infrastructure Provider.VMs, >, 0)",
        test_provider_filter_save_and_load.filter_name)
    advanced_search_view.flash.assert_no_error()

    # Reset filter
    assert advanced_search_view.entities.search.reset_filter()

    # Load filter
    assert advanced_search_view.entities.search.load_filter(
        test_provider_filter_save_and_load.filter_name
    )
    advanced_search_view.flash.assert_no_error()
Exemplo n.º 7
0
def delete_stale_sa(provider, text_to_match):
    """ Checks global Security Context Constrains for stale Service Account records created during
        appliance deployment and removes such records if appliance doesn't exist any longer

    Args:
        provider: provider object
        text_to_match: (list) regexps which sa should match to

    Returns: None
    """
    logger.info("Checking scc in provider %s", provider.key)
    for scc_name in global_scc_names:
        scc = provider.mgmt.get_scc(scc_name)
        if not scc.users:
            logger.info("nothing to check. scc %s is empty", scc_name)
            continue
        for sa in scc.users:
            sa_namespace, sa_name = sa.split(':')[-2:]
            if match(text_to_match, sa_namespace) and not provider.mgmt.does_vm_exist(sa_namespace):
                logger.info('removing sa %s from scc %s', sa, scc_name)
                provider.mgmt.remove_sa_from_scc(scc_name=scc_name, namespace=sa_namespace,
                                                 sa=sa_name)
            else:
                logger.debug("skipping sa %s in scc %s because project exists "
                             "or it doesn't match any pattern", sa, scc_name)
Exemplo n.º 8
0
 def provision_appliances(
         self, count=1, preconfigured=False, version=None, stream=None, provider=None,
         lease_time=120, ram=None, cpu=None):
     # If we specify version, stream is ignored because we will get that specific version
     if version:
         stream = get_stream(version)
     # If we specify stream but not version, sprout will give us latest version of that stream
     elif stream:
         pass
     # If we dont specify either, we will get the same version as current appliance
     else:
         stream = get_stream(current_appliance.version)
         version = current_appliance.version.vstring
     request_id = self.call_method(
         'request_appliances', preconfigured=preconfigured, version=version,
         group=stream, provider=provider, lease_time=lease_time, ram=ram, cpu=cpu, count=count
     )
     wait_for(
         lambda: self.call_method('request_check', str(request_id))['finished'], num_sec=300,
         message='provision {} appliance(s) from sprout'.format(count))
     data = self.call_method('request_check', str(request_id))
     logger.debug(data)
     appliances = []
     for appliance in data['appliances']:
         appliances.append(IPAppliance(hostname=appliance['ip_address']))
     return appliances, request_id
Exemplo n.º 9
0
def set_rails_loglevel(level, validate_against_worker='MiqUiWorker'):
    """Sets the logging level for level_rails and detects when change occured."""
    ui_worker_pid = '#{}'.format(get_worker_pid(validate_against_worker))

    logger.info('Setting log level_rails on appliance to {}'.format(level))
    yaml = store.current_appliance.advanced_settings
    if not str(yaml['log']['level_rails']).lower() == level.lower():
        logger.info('Opening /var/www/miq/vmdb/log/evm.log for tail')
        evm_tail = SSHTail('/var/www/miq/vmdb/log/evm.log')
        evm_tail.set_initial_file_end()

        log_yaml = yaml.get('log', {})
        log_yaml['level_rails'] = level
        store.current_appliance.update_advanced_settings({'log': log_yaml})

        attempts = 0
        detected = False
        while (not detected and attempts < 60):
            logger.debug('Attempting to detect log level_rails change: {}'.format(attempts))
            for line in evm_tail:
                if ui_worker_pid in line:
                    if 'Log level for production.log has been changed to' in line:
                        # Detects a log level change but does not validate the log level
                        logger.info('Detected change to log level for production.log')
                        detected = True
                        break
            time.sleep(1)  # Allow more log lines to accumulate
            attempts += 1
        if not (attempts < 60):
            # Note the error in the logger but continue as the appliance could be slow at logging
            # that the log level changed
            logger.error('Could not detect log level_rails change.')
        evm_tail.close()
    else:
        logger.info('Log level_rails already set to {}'.format(level))
Exemplo n.º 10
0
    def _check_item_visibility(vis_object, vis_expect):
        """
        Args:
            vis_object: the object with a tag to check
            vis_expect: bool, True if tag should be visible

        Returns: None
        """
        if vis_expect:
            vis_object.add_tag(tag=tag)
        else:
            tags = vis_object.get_tags()
            tag_assigned = any(
                object_tags.category.display_name == tag.category.display_name and
                object_tags.display_name == tag.display_name for object_tags in tags
            )
            if tag_assigned:
                vis_object.remove_tag(tag=tag)
        with user_restricted:
            try:
                navigate_to(vis_object, 'Details')
                actual_visibility = True
            except Exception:
                logger.debug('Tagged item is not visible')
                actual_visibility = False

        assert actual_visibility == vis_expect
def test_workload_capacity_and_utilization(request, scenario, appliance):
    """Runs through provider based scenarios enabling C&U and running for a set period of time.
    Memory Monitor creates graphs and summary at the end of each scenario.

    Polarion:
        assignee: rhcf3_machine
        initialEstimate: 1/4h
    """
    from_ts = int(time.time() * 1000)
    logger.debug('Scenario: {}'.format(scenario['name']))

    appliance.clean_appliance()

    quantifiers = {}
    scenario_data = {'appliance_ip': appliance.hostname,
        'appliance_name': conf.cfme_performance['appliance']['appliance_name'],
        'test_dir': 'workload-cap-and-util',
        'test_name': 'Capacity and Utilization',
        'appliance_roles': ','.join(roles_cap_and_util),
        'scenario': scenario}
    monitor_thread = SmemMemoryMonitor(appliance.ssh_client, scenario_data)

    def cleanup_workload(scenario, from_ts, quantifiers, scenario_data):
        starttime = time.time()
        to_ts = int(starttime * 1000)
        g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts)
        logger.debug('Started cleaning up monitoring thread.')
        monitor_thread.grafana_urls = g_urls
        monitor_thread.signal = False
        monitor_thread.join()
        add_workload_quantifiers(quantifiers, scenario_data)
        timediff = time.time() - starttime
        logger.info('Finished cleaning up monitoring thread in {}'.format(timediff))
    request.addfinalizer(lambda: cleanup_workload(scenario, from_ts, quantifiers, scenario_data))

    monitor_thread.start()

    appliance.wait_for_miq_server_workers_started(poll_interval=2)
    appliance.update_server_roles({role: True for role in roles_cap_and_util})
    for provider in scenario['providers']:
        get_crud(provider).create_rest()
    logger.info('Sleeping for Refresh: {}s'.format(scenario['refresh_sleep_time']))
    time.sleep(scenario['refresh_sleep_time'])
    appliance.set_cap_and_util_all_via_rails()

    # Variable amount of time for C&U collections/processing
    total_time = scenario['total_time']
    starttime = time.time()
    elapsed_time = 0
    while (elapsed_time < total_time):
        elapsed_time = time.time() - starttime
        time_left = total_time - elapsed_time
        logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2), total_time))
        if (time_left > 0 and time_left < 300):
            time.sleep(time_left)
        elif time_left > 0:
            time.sleep(300)

    quantifiers['Elapsed_Time'] = round(elapsed_time, 2)
    logger.info('Test Ending...')
def test_provider_filter_save_and_cancel_load_with_user_input(
        rails_delete_filter, advanced_search_view):
    """
    Polarion:
        assignee: anikifor
        casecomponent: WebUI
        caseimportance: medium
        initialEstimate: 1/10h
    """
    # bind filter_name to the function for fixture cleanup
    test_provider_filter_save_and_cancel_load_with_user_input.filter_name = \
        fauxfactory.gen_alphanumeric()
    logger.debug('Set filter_name to: {}'.format(
        test_provider_filter_save_and_cancel_load_with_user_input.filter_name))
    # Save filter
    assert advanced_search_view.entities.search.save_filter(
        "fill_count(Infrastructure Provider.VMs, >)",
        test_provider_filter_save_and_cancel_load_with_user_input.filter_name)
    advanced_search_view.flash.assert_no_error()

    # Reset Filter
    assert advanced_search_view.entities.search.reset_filter()
    advanced_search_view.entities.search.load_filter(
        test_provider_filter_save_and_cancel_load_with_user_input.filter_name,
        fill_callback={"COUNT": 0},
        cancel_on_user_filling=True,
        apply_filter=True
    )
    advanced_search_view.flash.assert_no_error()
Exemplo n.º 13
0
    def find_request(self, cells, partial_check=False):
        """Finds the request and returns the row element
        Args:
            cells: Search data for the requests table.
            partial_check: If to use the ``__contains`` operator
        Returns: row
        """
        contains = '' if not partial_check else '__contains'
        column_list = self.table.attributized_headers
        cells = copy(cells)
        for key in cells.keys():
            for column_name, column_text in column_list.items():
                if key == column_text:
                    cells['{}{}'.format(column_name, contains)] = cells.pop(key)
                    break

        for _ in self.paginator.pages():
            rows = list(self.table.rows(**cells))
            if len(rows) == 0:
                # row not on this page, assume it has yet to appear
                # it might be nice to add an option to fail at this point
                continue
            elif len(rows) > 1:
                raise RequestException(
                    'Multiple requests with matching content found - be more specific!')
            else:
                # found the row!
                row = rows[0]
                logger.debug(' Request Message: %s', row.last_message.text)
                return row
        else:
            raise Exception("The requst specified by {} not found!".format(str(cells)))
Exemplo n.º 14
0
 def provision_appliances(
         self, count=1, preconfigured=False, version=None, stream=None, provider=None,
         provider_type=None, lease_time=120, ram=None, cpu=None, **kwargs):
     # provisioning may take more time than it is expected in some cases
     wait_time = kwargs.get('wait_time', 300)
     # If we specify version, stream is ignored because we will get that specific version
     if version:
         stream = get_stream(version)
     # If we specify stream but not version, sprout will give us latest version of that stream
     elif stream:
         pass
     # If we dont specify either, we will get the same version as current appliance
     else:
         stream = get_stream(current_appliance.version)
         version = current_appliance.version.vstring
     request_id = self.call_method(
         'request_appliances', preconfigured=preconfigured, version=version,
         provider_type=provider_type, group=stream, provider=provider, lease_time=lease_time,
         ram=ram, cpu=cpu, count=count, **kwargs
     )
     wait_for(
         lambda: self.call_method('request_check', str(request_id))['finished'],
         num_sec=wait_time,
         message='provision {} appliance(s) from sprout'.format(count))
     data = self.call_method('request_check', str(request_id))
     logger.debug(data)
     appliances = []
     for appliance in data['appliances']:
         app_args = {'hostname': appliance['ip_address'],
                     'project': appliance['project'],
                     'container': appliance['container'],
                     'db_host': appliance['db_host']}
         appliances.append(IPAppliance(**app_args))
     return appliances, request_id
Exemplo n.º 15
0
def new_vm(request, provider):
    vm = VM.factory(random_vm_name('timelines', max_length=16), provider)
    logger.debug('Fixture new_vm set up! Name: %r', vm.name)
    logger.info('Will create  %r on Provider: %r', vm.name, vm.provider.name)
    vm.create_on_provider(find_in_cfme=False, timeout=500)
    yield vm
    logger.debug('Fixture new_vm teardown! Name: %r Provider: %r', vm.name, vm.provider.name)
    vm.provider.mgmt.delete_vm(vm.name)
Exemplo n.º 16
0
def new_vm(provider):
    vm = provider.appliance.collections.infra_vms.instantiate(
        random_vm_name('timelines', max_length=16), provider
    )
    vm.create_on_provider(find_in_cfme=True)
    logger.debug('Fixture new_vm set up! Name: %r Provider: %r', vm.name, vm.provider.name)
    yield vm
    vm.cleanup_on_provider()
Exemplo n.º 17
0
 def refresh_provider_relationships(self, from_list_view=False):
     # from_list_view is ignored as it is included here for sake of compatibility with UI call.
     logger.debug('Refreshing provider relationships')
     col = self.appliance.rest_api.collections.providers.find_by(name=self.name)
     try:
         col[0].action.refresh()
     except IndexError:
         raise Exception("Provider collection empty")
Exemplo n.º 18
0
def deploy_template(provider_key, vm_name, template_name=None, timeout=900, **deploy_args):
    """
    Args:
        provider_key: Provider key on which the VM is to be created
        vm_name: Name of the VM to be deployed
        template_name: Name of the template that the VM is deployed from
        timeout: the timeout for template deploy
    """
    allow_skip = deploy_args.pop("allow_skip", ())
    if isinstance(allow_skip, dict):
        skip_exceptions = allow_skip.keys()
        callable_mapping = allow_skip
    elif isinstance(allow_skip, six.string_types) and allow_skip.lower() == "default":
        skip_exceptions = DEFAULT_SKIP
        callable_mapping = {}
    else:
        skip_exceptions = allow_skip
        callable_mapping = {}
    provider_crud = get_crud(provider_key)

    deploy_args.update(vm_name=vm_name)

    if template_name is None:
        try:
            deploy_args.update(template=provider_crud.data['templates']['small_template']['name'])
        except KeyError:
            raise KeyError('small_template not defined for Provider {} in cfme_data.yaml'
                .format(provider_key))
    else:
        deploy_args.update(template=template_name)

    deploy_args.update(provider_crud.deployment_helper(deploy_args))

    logger.info("Getting ready to deploy VM/instance %s from template %s on provider %s",
        vm_name, deploy_args['template'], provider_crud.data['name'])
    try:
        try:
            logger.debug("Deploy args: %s", deploy_args)
            vm_name = provider_crud.mgmt.deploy_template(timeout=timeout, **deploy_args)
            logger.info("Provisioned VM/instance %s", vm_name)  # instance ID in case of EC2
        except Exception as e:
            logger.exception('Could not provisioning VM/instance %s (%s: %s)',
                vm_name, type(e).__name__, str(e))
            try:
                provider_crud.mgmt.delete_vm(vm_name)
            except Exception:
                logger.exception("Unable to clean up vm:", vm_name)
            raise
    except skip_exceptions as e:
        e_c = type(e)
        if e_c in callable_mapping and not callable_mapping[e_c](e):
            raise
        # Make it visible also in the log.
        store.write_line(
            "Skipping due to a provider error: {}: {}\n".format(e_c.__name__, str(e)), purple=True)
        logger.exception(e)
        pytest.skip("{}: {}".format(e_c.__name__, str(e)))
    return vm_name
def new_instance(provider):
    inst = Instance.factory(random_vm_name('cloud-timeline', max_length=20), provider)
    logger.debug('Fixture new_instance set up! Name: %r Provider: %r', inst.name,
                 inst.provider.name)
    inst.create_on_provider(allow_skip="default", find_in_cfme=True)
    yield inst
    logger.debug('Fixture new_vm teardown! Name: %r Provider: %r', inst.name, inst.provider.name)
    if inst.provider.mgmt.does_vm_exist(inst.name):
        inst.provider.mgmt.delete_vm(inst.name)
Exemplo n.º 20
0
 def checkout(self):
     if self.docker_id is not None:
         return self.docker_id
     checkout = self._get('checkout')
     self.docker_id, self.config = checkout.items()[0]
     self._start_renew_thread()
     log.info('Checked out webdriver container %s', self.docker_id)
     log.debug("%r", checkout)
     return self.docker_id
Exemplo n.º 21
0
def test_workload_memory_leak(request, scenario, appliance, provider):
    """Runs through provider based scenarios setting one worker instance and maximum threshold and
    running for a set period of time. Memory Monitor creates graphs and summary info.

    Polarion:
        assignee: rhcf3_machine
        casecomponent: CandU
        initialEstimate: 1/4h
    """
    from_ts = int(time.time() * 1000)
    logger.debug('Scenario: {}'.format(scenario['name']))

    appliance.clean_appliance()

    quantifiers = {}
    scenario_data = {'appliance_ip': appliance.hostname,
        'appliance_name': conf.cfme_performance['appliance']['appliance_name'],
        'test_dir': 'workload-memory-leak',
        'test_name': 'Memory Leak',
        'appliance_roles': ','.join(roles_memory_leak),
        'scenario': scenario}
    monitor_thread = SmemMemoryMonitor(appliance.ssh_client, scenario_data)

    def cleanup_workload(scenario, from_ts, quantifiers, scenario_data):
        starttime = time.time()
        to_ts = int(starttime * 1000)
        g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts)
        logger.debug('Started cleaning up monitoring thread.')
        monitor_thread.grafana_urls = g_urls
        monitor_thread.signal = False
        monitor_thread.join()
        add_workload_quantifiers(quantifiers, scenario_data)
        timediff = time.time() - starttime
        logger.info('Finished cleaning up monitoring thread in {}'.format(timediff))
    request.addfinalizer(lambda: cleanup_workload(scenario, from_ts, quantifiers, scenario_data))

    monitor_thread.start()

    appliance.wait_for_miq_server_workers_started(poll_interval=2)
    appliance.update_server_roles({role: True for role in roles_memory_leak})
    prepare_workers(appliance)
    provider.create()

    total_time = scenario['total_time']
    starttime = time.time()
    elapsed_time = 0
    while (elapsed_time < total_time):
        elapsed_time = time.time() - starttime
        time_left = total_time - elapsed_time
        logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2), total_time))
        if (time_left > 0 and time_left < 300):
            time.sleep(time_left)
        elif time_left > 0:
            time.sleep(300)

    quantifiers['Elapsed_Time'] = round(elapsed_time, 2)
    logger.info('Test Ending...')
Exemplo n.º 22
0
def gen_events(new_instance):
    logger.debug('Starting, stopping VM')
    mgmt = new_instance.provider.mgmt
    mgmt.stop_vm(new_instance.name)
    if new_instance.provider.one_of(EC2Provider):
        ec2_sleep()
    mgmt.start_vm(new_instance.name)
    if new_instance.provider.one_of(EC2Provider):
        ec2_sleep()
Exemplo n.º 23
0
    def cleanup_on_provider(self):
        """Clean up entity on the provider if it has been created on the provider

        Helper method to avoid NotFoundError's during test case tear down.
        """
        if self.exists_on_provider:
            self.mgmt.cleanup()
        else:
            logger.debug('cleanup_on_provider: entity "%s" does not exist', self.name)
Exemplo n.º 24
0
    def update(self, updates, validate_credentials=False):
        """Updates a host in the UI. Better to use utils.update.update context manager than call
        this directly.

        Args:
           updates (dict): fields that are changing.
        """

        view = navigate_to(self, "Edit")
        changed = view.fill({
            "name":
            updates.get("name"),
            "hostname":
            updates.get("hostname") or updates.get("ip_address"),
            "custom_ident":
            updates.get("custom_ident"),
            "ipmi_address":
            updates.get("ipmi_address"),
            "mac_address":
            updates.get("mac_address")
        })
        credentials = updates.get("credentials")
        ipmi_credentials = updates.get("ipmi_credentials")
        credentials_changed = False
        ipmi_credentials_changed = False
        if credentials is not None:
            for creds_type in credentials:
                cred_endpoint = getattr(view.endpoints, creds_type)
                if cred_endpoint.change_stored_password.is_displayed:
                    cred_endpoint.change_stored_password.click()
                credentials_changed = cred_endpoint.fill_with(
                    credentials[creds_type].view_value_mapping)
                if validate_credentials:
                    cred_endpoint.validate_button.click()
        if ipmi_credentials is not None:
            if view.endpoints.ipmi.change_stored_password.is_displayed:
                view.endpoints.ipmi.change_stored_password.click()
            ipmi_credentials_changed = view.endpoints.ipmi.fill(
                ipmi_credentials.view_value_mapping)
            if validate_credentials:
                view.endpoints.ipmi.validate_button.click()
        view.flash.assert_no_error()
        changed = any([changed, credentials_changed, ipmi_credentials_changed])
        if changed:
            view.save_button.click()
            logger.debug("Trying to save update for host with id: %s",
                         str(self.get_db_id))
            view = self.create_view(HostDetailsView)
            view.flash.assert_success_message(
                'Host / Node "{}" was saved'.format(
                    updates.get("name", self.name)))
        else:
            view.cancel_button.click()
            view.flash.assert_success_message(
                'Edit of Host / Node "{}" was cancelled by the user'.format(
                    updates.get("name", self.name)))
Exemplo n.º 25
0
 def get_vm_id(self, vm_name):
     """
     Return the ID associated with the specified VM name
     """
     # TODO: Get Provider object from VMCollection.find, then use VM.id to get the id
     logger.debug('Retrieving the ID for VM: {}'.format(vm_name))
     for vm_id in self.get_all_vm_ids():
         details = self.get_vm_details(vm_id)
         if details['name'] == vm_name:
             return vm_id
Exemplo n.º 26
0
def new_vm(provider, big_template):
    vm_collection = provider.appliance.provider_based_collection(provider)
    vm = vm_collection.instantiate(random_vm_name(context='ansible'),
                                   provider,
                                   template_name=big_template.name)
    vm.create_on_provider(find_in_cfme=True)
    logger.debug("Fixture new_vm set up! Name: %r Provider: %r", vm.name,
                 vm.provider.name)
    yield vm
    vm.cleanup_on_provider()
Exemplo n.º 27
0
    def cleanup_on_provider(self):
        """Clean up entity on the provider if it has been created on the provider

        Helper method to avoid NotFoundError's during test case tear down.
        """
        if self.exists_on_provider:
            self.mgmt.cleanup()
        else:
            logger.debug('cleanup_on_provider: entity "%s" does not exist',
                         self.name)
Exemplo n.º 28
0
 def get_vm_id(self, vm_name):
     """
     Return the ID associated with the specified VM name
     """
     # TODO: Get Provider object from VMCollection.find, then use VM.id to get the id
     logger.debug('Retrieving the ID for VM: {}'.format(vm_name))
     for vm_id in self.get_all_vm_ids():
         details = self.get_vm_details(vm_id)
         if details['name'] == vm_name:
             return vm_id
Exemplo n.º 29
0
def get_mgmt(provider_key, providers=None, credentials=None):
    """ Provides a ``wrapanapi`` object, based on the request.

    Args:
        provider_key: The name of a provider, as supplied in the yaml configuration files.
            You can also use the dictionary if you want to pass the provider data directly.
        providers: A set of data in the same format as the ``management_systems`` section in the
            configuration yamls. If ``None`` then the configuration is loaded from the default
            locations. Expects a dict.
        credentials: A set of credentials in the same format as the ``credentials`` yamls files.
            If ``None`` then credentials are loaded from the default locations. Expects a dict.
    Return: A provider instance of the appropriate ``wrapanapi.WrapanapiAPIBase``
        subclass
    """
    if providers is None:
        providers = providers_data
    # provider_key can also be provider_data for some reason
    # TODO rename the parameter; might break things
    if isinstance(provider_key, Mapping):
        provider_data = provider_key
        provider_key = provider_data['name']
    else:
        provider_data = providers[provider_key]

    if credentials is None:
        # We need to handle the in-place credentials

        if provider_data.get('endpoints'):
            credentials = provider_data['endpoints']['default']['credentials']
        else:
            credentials = provider_data['credentials']
        # If it is not a mapping, it most likely points to a credentials yaml (as by default)
        if not isinstance(credentials, Mapping):
            credentials = conf.credentials[credentials]
        # Otherwise it is a mapping and therefore we consider it credentials

    # Munge together provider dict and creds,
    # Let the provider do whatever they need with them
    provider_kwargs = provider_data.copy()
    provider_kwargs.update(credentials)

    if not provider_kwargs.get('username') and provider_kwargs.get('principal'):
        provider_kwargs['username'] = provider_kwargs['principal']
        provider_kwargs['password'] = provider_kwargs['secret']

    if isinstance(provider_key, six.string_types):
        provider_kwargs['provider_key'] = provider_key
    provider_kwargs['logger'] = logger

    if provider_key not in PROVIDER_MGMT_CACHE:
        mgmt_instance = get_class_from_type(provider_data['type']).mgmt_class(**provider_kwargs)
        PROVIDER_MGMT_CACHE[provider_key] = mgmt_instance
    else:
        logger.debug("returning cached mgmt class for '%s'", provider_key)
    return PROVIDER_MGMT_CACHE[provider_key]
Exemplo n.º 30
0
def test_workload_capacity_and_utilization(request, scenario, appliance):
    """Runs through provider based scenarios enabling C&U and running for a set period of time.
    Memory Monitor creates graphs and summary at the end of each scenario."""
    from_ts = int(time.time() * 1000)
    logger.debug('Scenario: {}'.format(scenario['name']))

    appliance.clean_appliance()

    quantifiers = {}
    scenario_data = {'appliance_ip': appliance.hostname,
        'appliance_name': conf.cfme_performance['appliance']['appliance_name'],
        'test_dir': 'workload-cap-and-util',
        'test_name': 'Capacity and Utilization',
        'appliance_roles': ','.join(roles_cap_and_util),
        'scenario': scenario}
    monitor_thread = SmemMemoryMonitor(appliance.ssh_client, scenario_data)

    def cleanup_workload(scenario, from_ts, quantifiers, scenario_data):
        starttime = time.time()
        to_ts = int(starttime * 1000)
        g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts)
        logger.debug('Started cleaning up monitoring thread.')
        monitor_thread.grafana_urls = g_urls
        monitor_thread.signal = False
        monitor_thread.join()
        add_workload_quantifiers(quantifiers, scenario_data)
        timediff = time.time() - starttime
        logger.info('Finished cleaning up monitoring thread in {}'.format(timediff))
    request.addfinalizer(lambda: cleanup_workload(scenario, from_ts, quantifiers, scenario_data))

    monitor_thread.start()

    appliance.wait_for_miq_server_workers_started(poll_interval=2)
    appliance.update_server_roles({role: True for role in roles_cap_and_util})
    for provider in scenario['providers']:
        get_crud(provider).create_rest()
    logger.info('Sleeping for Refresh: {}s'.format(scenario['refresh_sleep_time']))
    time.sleep(scenario['refresh_sleep_time'])
    appliance.set_cap_and_util_all_via_rails()

    # Variable amount of time for C&U collections/processing
    total_time = scenario['total_time']
    starttime = time.time()
    elapsed_time = 0
    while (elapsed_time < total_time):
        elapsed_time = time.time() - starttime
        time_left = total_time - elapsed_time
        logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2), total_time))
        if (time_left > 0 and time_left < 300):
            time.sleep(time_left)
        elif time_left > 0:
            time.sleep(300)

    quantifiers['Elapsed_Time'] = round(elapsed_time, 2)
    logger.info('Test Ending...')
Exemplo n.º 31
0
def uncollectif(item):
    """ Evaluates if an item should be uncollected

    Tests markers against a supplied lambda from the markers object to determine
    if the item should be uncollected or not.
    """
    from cfme.utils.appliance import find_appliance

    from cfme.utils.pytest_shortcuts import extract_fixtures_values
    markers = item.get_marker('uncollectif')
    if not markers:
        return False, None
    for mark in markers:
        log_msg = 'Trying uncollecting {}: {}'.format(
            item.name,
            mark.kwargs.get('reason', 'No reason given'))
        logger.debug(log_msg)
        try:
            arg_names = inspect.getargspec(get_uncollect_function(mark)).args
        except TypeError:
            logger.debug(log_msg)
            return not bool(mark.args[0]), mark.kwargs.get('reason', 'No reason given')

        app = find_appliance(item, require=False)
        if app:
            global_vars = {'appliance': app}
        else:
            logger.info("while uncollecting %s - appliance not known", item)
            global_vars = {}

        try:
            values = extract_fixtures_values(item)
            values.update(global_vars)
            # The test has already been uncollected
            if arg_names and not values:
                return True, None
            args = [values[arg] for arg in arg_names]
        except KeyError:
            missing_argnames = list(set(arg_names) - set(item._request.funcargnames))
            func_name = item.name
            if missing_argnames:
                raise Exception("You asked for a fixture which wasn't in the function {} "
                                "prototype {}".format(func_name, missing_argnames))
            else:
                raise Exception("Failed to uncollect {}, best guess a fixture wasn't "
                                "ready".format(func_name))
        retval = mark.args[0](*args)
        if retval:
            # shortcut
            return retval, mark.kwargs.get('reason', "No reason given")
        else:
            return False, None

    else:
        return False, None
Exemplo n.º 32
0
def get_mgmt(provider_key, providers=None, credentials=None):
    """ Provides a ``wrapanapi`` object, based on the request.

    Args:
        provider_key: The name of a provider, as supplied in the yaml configuration files.
            You can also use the dictionary if you want to pass the provider data directly.
        providers: A set of data in the same format as the ``management_systems`` section in the
            configuration yamls. If ``None`` then the configuration is loaded from the default
            locations. Expects a dict.
        credentials: A set of credentials in the same format as the ``credentials`` yamls files.
            If ``None`` then credentials are loaded from the default locations. Expects a dict.
    Return: A provider instance of the appropriate ``wrapanapi.WrapanapiAPIBase``
        subclass
    """
    if providers is None:
        providers = providers_data
    # provider_key can also be provider_data for some reason
    # TODO rename the parameter; might break things
    if isinstance(provider_key, Mapping):
        provider_data = provider_key
        provider_key = provider_data['name']
    else:
        provider_data = providers[provider_key]

    if credentials is None:
        # We need to handle the in-place credentials

        if provider_data.get('endpoints'):
            credentials = provider_data['endpoints']['default']['credentials']
        else:
            credentials = provider_data['credentials']
        # If it is not a mapping, it most likely points to a credentials yaml (as by default)
        if not isinstance(credentials, Mapping):
            credentials = conf.credentials[credentials]
        # Otherwise it is a mapping and therefore we consider it credentials

    # Munge together provider dict and creds,
    # Let the provider do whatever they need with them
    provider_kwargs = provider_data.copy()
    provider_kwargs.update(credentials)

    if not provider_kwargs.get('username') and provider_kwargs.get('principal'):
        provider_kwargs['username'] = provider_kwargs['principal']
        provider_kwargs['password'] = provider_kwargs['secret']

    if isinstance(provider_key, str):
        provider_kwargs['provider_key'] = provider_key
    provider_kwargs['logger'] = logger

    if provider_key not in PROVIDER_MGMT_CACHE:
        mgmt_instance = get_class_from_type(provider_data['type']).mgmt_class(**provider_kwargs)
        PROVIDER_MGMT_CACHE[provider_key] = mgmt_instance
    else:
        logger.debug("returning cached mgmt class for '%s'", provider_key)
    return PROVIDER_MGMT_CACHE[provider_key]
Exemplo n.º 33
0
    def check_fullfilled(self):
        try:
            result = self.request_check()
        except SproutException as e:
            # TODO: ensure we only exit this way on sprout usage
            self.destroy_pool()
            log.error("sprout pool could not be fulfilled\n%s", str(e))
            pytest.exit(1)

        log.debug("fulfilled at %f %%", result['progress'])
        return result["fulfilled"]
Exemplo n.º 34
0
 def _is_alive(self):
     log.debug("alive check")
     try:
         self.browser.current_url
     except UnexpectedAlertPresentException:
         # We shouldn't think that an Unexpected alert means the browser is dead
         return True
     except Exception:
         log.exception("browser in unknown state, considering dead")
         return False
     return True
Exemplo n.º 35
0
 def _is_alive(self):
     log.debug("alive check")
     try:
         self.browser.current_url
     except UnexpectedAlertPresentException:
         # We shouldn't think that an Unexpected alert means the browser is dead
         return True
     except Exception:
         log.exception("browser in unknown state, considering dead")
         return False
     return True
Exemplo n.º 36
0
 def cleanup_workload(scenario, from_ts, quantifiers, scenario_data):
     starttime = time.time()
     to_ts = int(starttime * 1000)
     g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts)
     logger.debug('Started cleaning up monitoring thread.')
     monitor_thread.grafana_urls = g_urls
     monitor_thread.signal = False
     monitor_thread.join()
     add_workload_quantifiers(quantifiers, scenario_data)
     timediff = time.time() - starttime
     logger.info('Finished cleaning up monitoring thread in {}'.format(timediff))
Exemplo n.º 37
0
    def update_server_roles_db(self, roles):
        """ Set server roles on Configure / Configuration pages.

        Args:
            roles: Roles specified as in server_roles dict in this module. Set to True or False
        """
        if self.server_roles_db == roles:
            logger.debug(' Roles already match, returning...')
            return
        else:
            self.appliance.server_roles = roles
Exemplo n.º 38
0
    def check_fullfilled(self):
        try:
            result = self.request_check()
        except SproutException as e:
            # TODO: ensure we only exit this way on sprout usage
            self.destroy_pool()
            log.error("sprout pool could not be fulfilled\n%s", str(e))
            pytest.exit(1)

        log.debug("fulfilled at %f %%", result['progress'])
        return result["finished"]
Exemplo n.º 39
0
    def update_server_roles_db(self, roles):
        """ Set server roles on Configure / Configuration pages.

        Args:
            roles: Roles specified as in server_roles dict in this module. Set to True or False
        """
        if self.server_roles_db == roles:
            logger.debug(' Roles already match, returning...')
            return
        else:
            self.appliance.server_roles = roles
Exemplo n.º 40
0
 def cleanup_workload(from_ts, quantifiers, scenario_data):
     starttime = time.time()
     to_ts = int(starttime * 1000)
     g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts)
     logger.debug('Started cleaning up monitoring thread.')
     monitor_thread.grafana_urls = g_urls
     monitor_thread.signal = False
     monitor_thread.join()
     add_workload_quantifiers(quantifiers, scenario_data)
     timediff = time.time() - starttime
     logger.info('Finished cleaning up monitoring thread in {}'.format(timediff))
Exemplo n.º 41
0
def new_instance(provider):
    inst = Instance.factory(random_vm_name('cloud-timeline', max_length=20),
                            provider)
    logger.debug('Fixture new_instance set up! Name: %r Provider: %r',
                 inst.name, inst.provider.name)
    inst.create_on_provider(allow_skip="default", find_in_cfme=True)
    yield inst
    logger.debug('Fixture new_vm teardown! Name: %r Provider: %r', inst.name,
                 inst.provider.name)
    if inst.provider.mgmt.does_vm_exist(inst.name):
        inst.provider.mgmt.delete_vm(inst.name)
Exemplo n.º 42
0
def uncollectif(item):
    """ Evaluates if an item should be uncollected

    Tests markers against a supplied lambda from the markers object to determine
    if the item should be uncollected or not.
    """

    from cfme.utils.pytest_shortcuts import extract_fixtures_values
    markers = item.get_marker('uncollectif')
    if not markers:
        return False, None
    for mark in markers:
        log_msg = 'Trying uncollecting {}: {}'.format(
            item.name,
            mark.kwargs.get('reason', 'No reason given'))
        logger.debug(log_msg)
        try:
            arg_names = inspect.getargspec(get_uncollect_function(mark)).args
        except TypeError:
            logger.debug(log_msg)
            return not bool(mark.args[0]), mark.kwargs.get('reason', 'No reason given')

        holder = item.config.pluginmanager.getplugin('appliance-holder')
        if holder:
            global_vars = {'appliance': holder.held_appliance}
        else:
            logger.info("while uncollecting %s - appliance not known", item)
            global_vars = {}

        try:
            values = extract_fixtures_values(item)
            values.update(global_vars)
            # The test has already been uncollected
            if arg_names and not values:
                return True, None
            args = [values[arg] for arg in arg_names]
        except KeyError:
            missing_argnames = list(set(arg_names) - set(item._request.funcargnames))
            func_name = item.name
            if missing_argnames:
                raise Exception("You asked for a fixture which wasn't in the function {} "
                                "prototype {}".format(func_name, missing_argnames))
            else:
                raise Exception("Failed to uncollect {}, best guess a fixture wasn't "
                                "ready".format(func_name))
        retval = mark.args[0](*args)
        if retval:
            # shortcut
            return retval, mark.kwargs.get('reason', "No reason given")
        else:
            return False, None

    else:
        return False, None
Exemplo n.º 43
0
def ensure_advanced_search_closed():
    """Checks if the advanced search box is open and if it does, closes it."""
    if is_advanced_search_opened():
        logger.debug(
            'search.ensure_advanced_search_closed: search was open, closing')
        sel.click(search_box.close_button)
        wait_for(is_advanced_search_opened,
                 fail_condition=True,
                 num_sec=10,
                 delay=2,
                 fail_func=check_and_click_close,
                 message='Waiting for advanced search to close')
Exemplo n.º 44
0
 def configure_ipa(self, ipaserver, ipaprincipal, ipapassword, ipadomain=None, iparealm=None):
     cmd_result = self._run(
         '--ipaserver {s} --ipaprincipal {u} --ipapassword {p} {d} {r}'
         .format(s=ipaserver, u=ipaprincipal, p=ipapassword,
                 d='--ipadomain {}'.format(ipadomain) if ipadomain else '',
                 r='--iparealm {}'.format(iparealm) if iparealm else ''), timeout=90)
     logger.debug('IPA configuration output: %s', str(cmd_result))
     assert cmd_result.success
     assert 'ipa-client-install exit code: 1' not in cmd_result.output
     self.appliance.sssd.wait_for_running()
     assert self.appliance.ssh_client.run_command("cat /etc/ipa/default.conf "
                                                  "| grep 'enable_ra = True'")
Exemplo n.º 45
0
        def _transition():
            if in_desired_state():
                return True
            elif in_state_requiring_prep():
                do_prep()
            elif in_actionable_state():
                do_action()

            logger.debug(
                "Sleeping {}sec... (current state: {}, needed state: {})".
                format(delay, self.provider.mgmt.vm_status(self.name), state))
            return False
Exemplo n.º 46
0
    def _wait_for_vm_running():
        if provider.mgmt.is_vm_running(vm_name):
            return True
        elif provider.mgmt.is_vm_stopped(vm_name) or \
                provider.mgmt.can_suspend and provider.mgmt.is_vm_suspended(vm_name) or \
                provider.mgmt.can_pause and provider.mgmt.is_vm_paused(vm_name):
            provider.mgmt.start_vm(vm_name)

        logger.debug(
            "Sleeping 15secs...(current state: {}, needed state: running)".
            format(provider.mgmt.vm_status(vm_name)))
        return False
Exemplo n.º 47
0
    def process_env_mark(self, metafunc):
        """ Process the provider env marks
        Notes:
            provider markers can be applied at multiple layers (module, class, function)
            provider markers automatically override at lower layers (function overrides all)
            provider markers can supply their own fixture_name, to support multiple providers
        Args:
            metafunc: pytest metafunc object

        Returns:
            Parametrizes metafunc object directly, returns nothing
        """

        # organize by fixture_name kwarg to the marker
        # iter_markers returns most local mark first, maybe don't need override
        marks_by_fixture = self.get_closest_kwarg_markers(metafunc.definition)
        if marks_by_fixture is None:
            return

        # process each mark, defaulting fixture_name
        for fixture_name, mark in marks_by_fixture.items():

            # mark is either the lowest marker (automatic override), or has custom fixture_name
            logger.debug(f'Parametrizing provider env mark {mark}')
            args = mark.args
            kwargs = mark.kwargs.copy()
            if kwargs.pop('override', False):
                logger.warning(
                    'provider marker included override kwarg, this is unnecessary'
                )
            scope = kwargs.pop('scope', 'function')
            indirect = kwargs.pop('indirect', False)
            filter_unused = kwargs.pop('filter_unused', True)
            selector = kwargs.pop('selector', ONE_PER_VERSION)
            gen_func = kwargs.pop('gen_func', providers_by_class)

            # If parametrize doesn't get you what you need, steal this and modify as needed
            kwargs.update({'selector': selector})
            argnames, argvalues, idlist = gen_func(metafunc, *args, **kwargs)
            # Filter out argnames that aren't requested on the metafunc test item,
            # so not all tests need all fixtures to run, and tests not using gen_func's
            # fixtures aren't parametrized.
            if filter_unused:
                argnames, argvalues = fixture_filter(metafunc, argnames,
                                                     argvalues)
                # See if we have to parametrize at all after filtering
            parametrize(metafunc,
                        argnames,
                        argvalues,
                        indirect=indirect,
                        ids=idlist,
                        scope=scope,
                        selector=selector)
Exemplo n.º 48
0
    def cleanup_on_provider(self, handle_cleanup_exception=True):
        """Clean up entity on the provider if it has been created on the provider

        Helper method to avoid NotFoundError's during test case tear down.
        """
        if self.exists_on_provider:
            wait_for(lambda: self.mgmt.cleanup,
                     handle_exception=handle_cleanup_exception,
                     timeout=300)
        else:
            logger.debug('cleanup_on_provider: entity "%s" does not exist',
                         self.name)
Exemplo n.º 49
0
def report():
    # TODO parameterize on path, for now test infrastructure reports
    path = ["Configuration Management", "Hosts", "Virtual Infrastructure Platforms"]
    report = CannedSavedReport.new(path)
    report_time = report.datetime
    logger.debug('Created report for path {} and time {}'.format(path, report_time))
    yield report

    try:
        report.delete()
    except Exception:
        logger.warning('Failed to delete report for path {} and time {}'.format(path, report_time))
Exemplo n.º 50
0
    def get_all_template_ids(self):
        """Returns an integer list of template ID's via the Rest API"""
        # TODO: Move to TemplateCollection
        logger.debug('Retrieving the list of template ids')

        template_ids = []
        try:
            for template in self.appliance.rest_api.collections.templates.all:
                template_ids.append(template.id)
        except APIException:
            return None
        return template_ids
Exemplo n.º 51
0
def pytest_unconfigure(config):
    yield  # since hookwrapper, let hookimpl run
    if config.getoption('--collect-logs'):
        logger.info('Starting log collection on appliances')
        log_files = DEFAULT_FILES
        local_dir = DEFAULT_LOCAL
        try:
            log_files = env.log_collector.log_files
        except (AttributeError, KeyError):
            logger.info(
                'No log_collector.log_files in env, use default files: %s',
                log_files)
            pass
        try:
            local_dir = log_path.join(env.log_collector.local_dir)
        except (AttributeError, KeyError):
            logger.info(
                'No log_collector.local_dir in env, use default local_dir: %s',
                local_dir)
            pass

        # Handle local dir existing
        local_dir.ensure(dir=True)
        from cfme.test_framework.appliance import PLUGIN_KEY
        holder = config.pluginmanager.get_plugin(PLUGIN_KEY)
        if holder is None:
            # No appliances to fetch logs from
            logger.warning('No logs collected, appliance holder is empty')
            return

        written_files = []
        for app in holder.appliances:
            with app.ssh_client as ssh_client:
                tar_file = 'log-collector-{}.tar.gz'.format(app.hostname)
                logger.debug(
                    'Creating tar file on app %s:%s with log files %s', app,
                    tar_file, ' '.join(log_files))
                # wrap the files in ls, redirecting stderr, to ignore files that don't exist
                tar_result = ssh_client.run_command(
                    'tar -czvf {tar} $(ls {files} 2>/dev/null)'.format(
                        tar=tar_file, files=' '.join(log_files)))
                try:
                    assert tar_result.success
                except AssertionError:
                    logger.exception(
                        'Tar command non-zero RC when collecting logs on %s: %s',
                        app, tar_result.output)
                    continue
                ssh_client.get_file(tar_file, local_dir.strpath)
            written_files.append(tar_file)
        logger.info('Wrote the following files to local log path: %s',
                    written_files)
Exemplo n.º 52
0
def _get_vm_name(request):
    """Helper function to get vm name from test requirement mark.

    At first we try to get a requirement value from ``pytestmark`` module list. If it's missing we
    can try to look up it in the test function itself. There is one restriction for it. We cannot
    get the test function mark from module scoped fixtures.
    """
    try:
        req = [
            mark.args[0] for mark in request.module.pytestmark
            if mark.name == "requirement"
        ]
    except AttributeError:
        req = None
        logger.debug("Could not get the requirement from pytestmark")
    if not req and request.scope == "function":
        try:
            req = [
                mark.args for mark in request.function.pytestmark
                if mark.name == 'requirement'
            ][0]
        except AttributeError:
            raise CFMEException("VM name can not be obtained")

    vm_name = random_vm_name(req[0])

    if not request.config.getoption('--no-assignee-vm-name'):
        if isinstance(request.node, pytest.Function):
            assignee = get_parsed_docstring(
                request.node,
                request.session._docstrings_cache).get('assignee', '')
        else:
            # Fetch list of tests in the module object
            test_list = [
                item for item in dir(request.module) if
                item.startswith('test_') and not ('test_requirements' == item)
            ]
            # Find out assignee for each test in test_list
            assignee_list = list()
            for test in test_list:
                nodeid = f'{request.node.fspath.strpath}::{test}'
                try:
                    assignee_list.append(
                        request.session._docstrings_cache[nodeid]['assignee'])
                except KeyError:
                    continue
            # If all tests have same assignee, set length will be 1, else set assignee='module'
            assignee = assignee_list[0] if len(
                set(assignee_list)) == 1 else 'module'
        vm_name = f'{vm_name}-{assignee}'

    return vm_name
Exemplo n.º 53
0
def pytest_sessionfinish(session, exitstatus):
    """Loop through the appliance stack and close ssh connections"""

    for ssh_client in store.ssh_clients_to_close:
        logger.debug('Closing ssh connection on %r', ssh_client)
        try:
            ssh_client.close()
        except Exception:
            logger.debug('Closing ssh connection on %r failed, but ignoring', ssh_client)
    for session in ssh._client_session:
        with diaper:
            session.close()
    yield
Exemplo n.º 54
0
 def edit_request(self, values, cancel=False):
     """Opens the request for editing and saves or cancels depending on success.
     """
     view = navigate_to(self, 'Edit')
     if view.form.fill(values):
         if not cancel:
             view.form.submit_button.click()
             self.update()
         else:
             view.cancel_button.click()
     else:
         logger.debug('Nothing was changed in current request')
     view.flash.assert_no_error()
def test_provider_filter_with_user_input(advanced_search_view):
    """
    Polarion:
        assignee: anikifor
        casecomponent: WebUI
        caseimportance: medium
        initialEstimate: 1/10h
    """
    # Set up the filter
    logger.debug('DEBUG: test_with_user_input: fill and apply')
    advanced_search_view.entities.search.advanced_search(
        "fill_count(Infrastructure Provider.VMs, >=)", {'COUNT': 0})
    advanced_search_view.flash.assert_no_error()
Exemplo n.º 56
0
 def timezone_check(self, timezone):
     channel = self.appliance.ssh_client.invoke_shell()
     channel.settimeout(20)
     channel.send("ap")
     result = ''
     try:
         while True:
             result += str(channel.recv(1))
             if ("{}".format(timezone[0])) in result:
                 break
     except socket.timeout:
         pass
     logger.debug(result)
def test_can_delete_provider_filter(advanced_search_view):
    filter_name = fauxfactory.gen_alphanumeric()
    logger.debug('Set filter_name to: {}'.format(filter_name))
    assert advanced_search_view.entities.search.save_filter(
        "fill_count(Infrastructure Provider.VMs, >, 0)", filter_name)
    advanced_search_view.flash.assert_no_error()
    advanced_search_view.entities.search.reset_filter()
    advanced_search_view.flash.assert_no_error()
    advanced_search_view.entities.search.load_filter(filter_name)
    advanced_search_view.flash.assert_no_error()
    if not advanced_search_view.entities.search.delete_filter():
        raise pytest.fail("Cannot delete filter! Probably the delete button is not present!")
    advanced_search_view.flash.assert_no_error()
Exemplo n.º 58
0
def _fill_multibox_str(multi, string):
    """ Filler function for MultiBoxSelect

    Designed for `string`. Selects item with the name.

    Args:
        multi: :py:class:`MultiBoxSelect` to fill
        string: String to select

    Returns: :py:class:`bool` with success.
    """
    logger.debug('  Filling in %s with value %s', str(multi), string)
    return multi.add(string)
Exemplo n.º 59
0
 def _copy_certificate():
     is_succeed = True
     try:
         # Copy certificate to the appliance
         provider_ssh.get_file("/etc/origin/master/ca.crt", "/tmp/ca.crt")
         appliance_ssh.put_file("/tmp/ca.crt",
                                "/etc/pki/ca-trust/source/anchors/{crt}".format(
                                    crt=cert_name))
     except URLError:
         logger.debug("Fail to deploy certificate from Openshift to CFME")
         is_succeed = False
     finally:
         return is_succeed
def new_instance(appliance, provider):
    inst = appliance.collections.cloud_instances.instantiate(
        random_vm_name('cloud-timeline', max_length=20), provider)
    logger.debug('Fixture new_instance set up! Name: %r Provider: %r',
                 inst.name, inst.provider.name)
    inst.create_on_provider(allow_skip="default", find_in_cfme=True)
    yield inst
    logger.debug('Fixture new_instance teardown! Name: %r Provider: %r',
                 inst.name, inst.provider.name)
    try:
        inst.mgmt.cleanup()
    except NotFoundError:
        pass