예제 #1
0
def templatize_vm(mgmt, template_name, cluster, temp_vm_name, provider):
    """Templatizes temporary VM. Result is template with two disks.

    Args:
        mgmt: A ``RHEVMSystem`` instance from wrapanapi.
        template_name: Name of the final template.
        cluster: Cluster to save the final template onto.
    """
    try:
        if mgmt.does_template_exist(template_name):
            logger.info("RHEVM:%r Warning: found finished template with this name (%r).",
                    provider, template_name)
            logger.info("RHEVM:%r Skipping this step, attempting to continue", provider)
            return
        vm = mgmt.get_vm(temp_vm_name)
        template = vm.mark_as_template(
            temporary_name=template_name,
            cluster=cluster,
            delete=False
        )
        # check, if template is really there
        if not template.exists:
            logger.error("RHEVM:%r templatizing temporary VM failed", provider)
            sys.exit(127)
        logger.info("RHEVM:%r successfully templatized the temporary VM", provider)
    except Exception:
        logger.exception("RHEVM:%r templatizing temporary VM failed", provider)
        raise
def upload_ova(hostname, username, password, name, datastore,
               cluster, datacenter, url, provider, proxy,
               ovf_tool_client, default_user, default_pass):

    cmd_args = []
    cmd_args.append('ovftool --noSSLVerify')
    cmd_args.append("--datastore={}".format(datastore))
    cmd_args.append("--name={}".format(name))
    cmd_args.append("--vCloudTemplate=True")
    cmd_args.append("--overwrite")  # require when failures happen and it retries
    if proxy:
        cmd_args.append("--proxy={}".format(proxy))
    cmd_args.append(url)
    cmd_args.append(
        "'vi://{}:{}@{}/{}/host/{}'"
        .format(username, password, hostname, datacenter, cluster)
    )
    logger.info("VSPHERE:%r Running OVFTool", provider)

    command = ' '.join(cmd_args)
    with make_ssh_client(ovf_tool_client, default_user, default_pass) as ssh_client:
        try:
            result = ssh_client.run_command(command)
        except Exception:
            logger.exception("VSPHERE:%r Exception during upload", provider)
            return False

    if "successfully" in result.output:
        logger.info(" VSPHERE:%r Upload completed", provider)
        return True
    else:
        logger.error("VSPHERE:%r Upload failed: %r", provider, result.output)
        return False
def add_disk_to_vm(api, sdomain, disk_size, disk_format, disk_interface, temp_vm_name,
                   provider):
    """Adds second disk to a temporary VM.

    Args:
        api: API to chosen RHEVM provider.
        sdomain: Storage domain to save new disk onto.
        disk_size: Size of the new disk (in B).
        disk_format: Format of the new disk.
        disk_interface: Interface of the new disk.
    """
    try:
        if len(api.vms.get(temp_vm_name).disks.list()) > 1:
            logger.info("RHEVM:%r Warning: found more than one disk in existing VM (%r).",
                    provider, temp_vm_name)
            logger.info("RHEVM:%r Skipping this step, attempting to continue...", provider)
            return
        actual_sdomain = api.storagedomains.get(sdomain)
        temp_vm = api.vms.get(temp_vm_name)
        storage_id = params.StorageDomains(storage_domain=[params.StorageDomain
            (id=actual_sdomain.get_id())])
        params_disk = params.Disk(storage_domains=storage_id, size=disk_size,
                                  interface=disk_interface, format=disk_format)
        temp_vm.disks.add(params_disk)

        wait_for(check_disks, [api, temp_vm_name], fail_condition=False, delay=5, num_sec=900)

        # check, if there are two disks
        if len(api.vms.get(temp_vm_name).disks.list()) < 2:
            logger.error("RHEVM:%r Disk failed to add", provider)
            sys.exit(127)
        logger.info("RHEVM:%r Successfully added disk", provider)
    except Exception:
        logger.exception("RHEVM:%r add_disk_to_temp_vm failed:", provider)
예제 #4
0
    def all(self):
        """returning all backup objects for respective storage manager type"""
        view = navigate_to(self, 'All')
        view.toolbar.view_selector.select("List View")
        backups = []

        try:
            if 'provider' in self.filters:
                for item in view.entities.elements.read():
                    if self.filters.get('provider').name in item['Storage Manager']:
                        backups.append(self.instantiate(name=item['Name'],
                                                        provider=self.filters.get('provider')))
            else:
                for item in view.entities.elements.read():
                    provider_name = item['Storage Manager'].split()[0]
                    provider = get_crud_by_name(provider_name)
                    backups.append(self.instantiate(name=item['Name'], provider=provider))
        except NoSuchElementException:
            if backups:
                # In the middle of reading, that may be bad
                logger.error(
                    'VolumeBackupCollection: NoSuchElementException in the middle of entities read')
                raise
            else:
                # This is probably fine, just warn
                logger.warning('The volume backup table is probably not present (=empty)')
        return backups
def templatize_vm(api, template_name, cluster, temp_vm_name, provider):
    """Templatizes temporary VM. Result is template with two disks.

    Args:
        api: API to chosen RHEVM provider.
        template_name: Name of the final template.
        cluster: Cluster to save the final template onto.
    """
    try:
        if api.templates.get(template_name) is not None:
            logger.info("RHEVM:%r Warning: found finished template with this name (%r).",
                    provider, template_name)
            logger.info("RHEVM:%r Skipping this step, attempting to continue", provider)
            return
        temporary_vm = api.vms.get(temp_vm_name)
        actual_cluster = api.clusters.get(cluster)
        new_template = params.Template(name=template_name, vm=temporary_vm, cluster=actual_cluster)
        api.templates.add(new_template)

        wait_for(check_disks, [api, temp_vm_name], fail_condition=False, delay=5, num_sec=900)

        # check, if template is really there
        if not api.templates.get(template_name):
            logger.error("RHEVM:%r templatizing temporary VM failed", provider)
            sys.exit(127)
        logger.info("RHEVM:%r successfully templatized the temporary VM", provider)
    except Exception:
        logger.exception("RHEVM:%r templatizing temporary VM failed", provider)
def make_kwargs(args, **kwargs):
    args_kwargs = dict(args._get_kwargs())

    if len(kwargs) is 0:
        return args_kwargs

    template_name = kwargs.get('template_name')
    if template_name is None:
        template_name = cfme_data['basic_info']['appliance_template']
        kwargs.update({'template_name': template_name})

    for kkey, kval in kwargs.items():
        for akey, aval in args_kwargs.items():
            if aval is not None:
                if kkey == akey:
                    if kval != aval:
                        kwargs[akey] = aval

    for akey, aval in args_kwargs.items():
        if akey not in kwargs:
            kwargs[akey] = aval

    for key, val in kwargs.items():
        if val is None:
            logger.error("ERROR: please supply required parameter '%r'.", key)
            sys.exit(127)

    return kwargs
예제 #7
0
    def upload_template(self):
        cmd_args = [
            "ovftool --noSSLVerify",
            # prefer the datastore from template_upload
            "--datastore={}".format(self.provider_data.provisioning.datastore),  # move later
            "--name={}".format(self.temp_template_name),
            "--vCloudTemplate=True",
            "--overwrite",
            self.raw_image_url,
            "'vi://{}:{}@{}/{}/host/{}/'".format(self.mgmt.username,
                                                 self.mgmt.password,
                                                 self.mgmt.hostname,
                                                 self.template_upload_data.datacenter,
                                                 self.template_upload_data.cluster)
        ]

        if 'proxy' in self.template_upload_data.keys():
            cmd_args.append("--proxy={}".format(self.template_upload_data.proxy))

        command = ' '.join(cmd_args)

        for i in range(0, 1):
            # run command against the tool client machine
            upload_result = self.execute_ssh_command(command, client_args=self.tool_client_args)
            if upload_result.success:
                return True
            else:
                logger.error('Failure running ovftool: %s', upload_result.output)
                logger.warning('Retrying template upload via ovftool')
예제 #8
0
 def num_server(self):
     view = navigate_to(self, 'Details')
     try:
         num = view.entities.summary('Relationships').get_text_of('Physical Servers')
     except NoSuchElementException:
         logger.error("Couldn't find number of servers")
     return int(num)
예제 #9
0
def test_cluster_graph_screen(provider, cluster, host, graph_type, interval, enable_candu):
    """Test Cluster graphs for Hourly and Daily Interval

    prerequisites:
        * C&U enabled appliance

    Steps:
        * Navigate to Cluster
        * Check graph displayed or not
        * Select interval Hourly/Daily
        * Zoom graph to get Table
        * Compare table and graph data

    Polarion:
        assignee: nachandr
        caseimportance: medium
        casecomponent: CandU
        initialEstimate: 1/4h
    """
    host.capture_historical_data()
    cluster.wait_candu_data_available(timeout=1200)

    view = navigate_to(cluster, "Utilization")
    view.options.interval.fill(interval)

    # Check garph displayed or not
    try:
        graph = getattr(view, graph_type)
    except AttributeError as e:
        logger.error(e)
    assert graph.is_displayed

    def refresh():
        provider.browser.refresh()
        view.options.interval.fill(interval)

    # wait, some time graph take time to load
    wait_for(lambda: len(graph.all_legends) > 0, delay=5, timeout=200, fail_func=refresh)

    # zoom in button not available with normal graph except Host and VM.
    # We have to use vm or host average graph for zoom in operation.
    graph_zoom = ["cluster_host", "cluster_vm"]
    avg_graph = graph_type if graph_type in graph_zoom else "{}_vm_host_avg".format(graph_type)
    try:
        avg_graph = getattr(view, avg_graph)
    except AttributeError as e:
        logger.error(e)
    avg_graph.zoom_in()
    view = view.browser.create_view(UtilizationZoomView)

    # wait, some time graph take time to load
    wait_for(lambda: len(view.chart.all_legends) > 0, delay=5, timeout=300, fail_func=refresh)
    assert view.chart.is_displayed
    view.flush_widget_cache()
    legends = view.chart.all_legends
    graph_data = view.chart.all_data
    # Clear cache of table widget before read else it will mismatch headers.
    view.table.clear_cache()
    table_data = view.table.read()
    compare_data(table_data=table_data, graph_data=graph_data, legends=legends)
예제 #10
0
 def num_host_ui(self):
     try:
         num = self.get_detail("Relationships", 'Hosts')
     except sel.NoSuchElementException:
         logger.error("Couldn't find number of hosts using key [Hosts] trying Nodes")
         num = self.get_detail("Relationships", 'Nodes')
     return int(num)
예제 #11
0
def add_disk_to_vm(mgmt, sdomain, disk_size, disk_format, disk_interface, temp_vm_name,
                   provider):
    """Adds second disk to a temporary VM.

    Args:
        mgmt: A ``RHEVMSystem`` instance from wrapanapi.
        sdomain: Storage domain to save new disk onto.
        disk_size: Size of the new disk (in B).
        disk_format: Format of the new disk.
        disk_interface: Interface of the new disk.
    """
    try:
        vm = mgmt.get_vm(temp_vm_name)
        if vm.get_disks_count() > 1:
            logger.info("RHEVM:%r Warning: found more than one disk in existing VM (%r).",
                    provider, temp_vm_name)
            logger.info("RHEVM:%r Skipping this step, attempting to continue...", provider)
            return
        vm.add_disk(
            storage_domain=sdomain,
            size=disk_size,
            interface=disk_interface,
            format=disk_format
        )
        # check, if there are two disks
        if vm.get_disks_count() < 2:
            logger.error("RHEVM:%r Disk failed to add", provider)
            sys.exit(127)
        logger.info("RHEVM:%r Successfully added disk", provider)
    except Exception:
        logger.exception("RHEVM:%r add_disk_to_temp_vm failed:", provider)
        raise
예제 #12
0
def set_rails_loglevel(level, validate_against_worker='MiqUiWorker'):
    """Sets the logging level for level_rails and detects when change occured."""
    ui_worker_pid = '#{}'.format(get_worker_pid(validate_against_worker))

    logger.info('Setting log level_rails on appliance to {}'.format(level))
    yaml = store.current_appliance.advanced_settings
    if not str(yaml['log']['level_rails']).lower() == level.lower():
        logger.info('Opening /var/www/miq/vmdb/log/evm.log for tail')
        evm_tail = SSHTail('/var/www/miq/vmdb/log/evm.log')
        evm_tail.set_initial_file_end()

        log_yaml = yaml.get('log', {})
        log_yaml['level_rails'] = level
        store.current_appliance.update_advanced_settings({'log': log_yaml})

        attempts = 0
        detected = False
        while (not detected and attempts < 60):
            logger.debug('Attempting to detect log level_rails change: {}'.format(attempts))
            for line in evm_tail:
                if ui_worker_pid in line:
                    if 'Log level for production.log has been changed to' in line:
                        # Detects a log level change but does not validate the log level
                        logger.info('Detected change to log level for production.log')
                        detected = True
                        break
            time.sleep(1)  # Allow more log lines to accumulate
            attempts += 1
        if not (attempts < 60):
            # Note the error in the logger but continue as the appliance could be slow at logging
            # that the log level changed
            logger.error('Could not detect log level_rails change.')
        evm_tail.close()
    else:
        logger.info('Log level_rails already set to {}'.format(level))
def get_datastores_per_host(provider_key):
    print('{} processing to get datastores per host'.format(provider_key))
    try:
        provider = get_mgmt(provider_key)

        vm_registered_files = get_registered_vm_files(provider_key)
        hosts = provider.list_host()
        host_datastore_url = {host: provider.list_host_datastore_url(host) for host in hosts}
        unregistered_files = []

        print("\n*********************UNREGISTERED FILES ON: {}**********************\n".format(
            provider_key))
        print('HOST_NAME\t\tFILE_PATH\t\tTEMPLATE_VM_ISO\t\tNUMBER_OF_FILES\n')
        for host in host_datastore_url:
            try:
                list_orphaned_files_per_host(host, host_datastore_url[host],
                                             provider_key, vm_registered_files,
                                             unregistered_files)
            except Exception as e:
                logger.error(e)
                continue

    except Exception as ex:
            # Print out the error message too because logs in the job get deleted
        print('{} failed ({}: {})'.format(provider_key, type(ex).__name__, str(ex)))
        logger.error('failed to process vms from provider {}'.format(provider_key))
        logger.exception(ex)
def pytest_runtest_teardown(item, nextitem):
    name, location = get_test_idents(item)
    holder = item.config.pluginmanager.getplugin('appliance-holder')
    app = holder.held_appliance
    ip = app.hostname
    fire_art_test_hook(
        item, 'finish_test',
        slaveid=store.slaveid, ip=ip, wait_for_task=True)
    fire_art_test_hook(item, 'sanitize', words=words)
    jenkins_data = {
        'build_url': os.environ.get('BUILD_URL'),
        'build_number': os.environ.get('BUILD_NUMBER'),
        'git_commit': os.environ.get('GIT_COMMIT'),
        'job_name': os.environ.get('JOB_NAME')
    }
    try:
        caps = app.browser.widgetastic.selenium.capabilities
        param_dict = {
            'browserName': caps['browserName'],
            'browserPlatform': caps['platform'],
            'browserVersion': caps['version']
        }
    except Exception as e:
        logger.error("Couldn't grab browser env_vars")
        logger.error(e)
        param_dict = None

    fire_art_test_hook(
        item, 'ostriz_send', env_params=param_dict,
        slaveid=store.slaveid, polarion_ids=extract_polarion_ids(item), jenkins=jenkins_data)
예제 #15
0
def fix_missing_hostname(appliance):
    """Fix for hostname missing from the /etc/hosts file

    Note: Affects RHOS-based appliances but can't hurt the others so
          it's applied on all.
    """
    if isinstance(appliance, DummyAppliance) or appliance.is_dev:
        return
    ssh_client = appliance.ssh_client
    logger.info("Checking appliance's /etc/hosts for its own hostname")
    if ssh_client.run_command('grep $(hostname) /etc/hosts').failed:
        logger.info('Setting appliance hostname')
        host_out = appliance.ssh_client.run_command('host {}'.format(appliance.hostname))
        if host_out.success and 'domain name pointer' in host_out.output:
            # resolvable and reverse lookup, hostname property is an IP addr
            fqdn = host_out.output.split(' ')[-1].rstrip('\n').rstrip('.')
        elif host_out.success and 'has address' in host_out.output:
            # resolvable and address returned, hostname property is name
            fqdn = appliance.hostname
        else:
            # not resolvable, just use hostname output through appliance_console_cli to modify
            ret = ssh_client.run_command('hostname')
            logger.warning('Unable to resolve hostname, using output from `hostname`: %s',
                           ret.output)
            fqdn = ret.output.rstrip('\n')
        logger.info('Setting hostname: %s', fqdn)
        appliance.appliance_console_cli.set_hostname(fqdn)
        if ssh_client.run_command('grep $(hostname) /etc/hosts').failed:
            logger.error('Failed to mangle /etc/hosts')
예제 #16
0
def wait_analysis_finished_multiple_tasks(
        task_name, task_type, expected_num_of_tasks, delay=5, timeout='5M'):
    """ Wait until analysis is finished (or timeout exceeded)"""
    row_completed = []
    # get view for reload button
    view = navigate_to(Tasks, 'AllTasks')

    def tasks_finished(output_rows, task_name, task_type, expected_num_of_tasks):

        is_succeed, num_of_succeed_tasks = are_all_tasks_match_status(
            task_name, expected_num_of_tasks, task_type)
        output_rows.append(num_of_succeed_tasks)
        return is_succeed

    try:
        wait_for(tasks_finished,
                 func_kwargs={'output_rows': row_completed,
                              'task_name': task_name,
                              'task_type': task_type,
                              'expected_num_of_tasks': expected_num_of_tasks},
                 delay=delay,
                 timeout=timeout,
                 fail_func=view.reload.click)
        return row_completed[-1]
    except TimedOutError as e:
        logger.error("Only {}  Tasks out of {}, Finished".format(row_completed[-1],
                                                                 expected_num_of_tasks))
        raise TimedOutError('exception {}'.format(e))
예제 #17
0
파일: ssh.py 프로젝트: lcouzens/cfme_tests
    def run_command(self, command, timeout=RUNCMD_TIMEOUT, reraise=False, ensure_host=False,
                    ensure_user=False, container=None):
        """Run a command over SSH.

        Args:
            command: The command. Supports taking dicts as version picking.
            timeout: Timeout after which the command execution fails.
            reraise: Does not muffle the paramiko exceptions in the log.
            ensure_host: Ensure that the command is run on the machine with the IP given, not any
                container or such that we might be using by default.
            ensure_user: Ensure that the command is run as the user we logged in, so in case we are
                not root, setting this to True will prevent from running sudo.
            container: allows to temporarily override default container
        Returns:
            A :py:class:`SSHResult` instance.
        """
        # paramiko hangs on *_ready calls if destination has become unavailable
        # this is some kind of watchdog to handle this issue
        try:
            with gevent.Timeout(timeout):
                return self._run_command(command, timeout, reraise, ensure_host, ensure_user,
                                         container)
        except gevent.Timeout:
            logger.error("command %s couldn't finish in given timeout %s", command, timeout)
            raise
예제 #18
0
    def update_tags(self):
        result = self.execute_ssh_command(
            'find {} -type f -name "cfme-openshift-*" -exec tail -1 {{}} \;'
            .format(self.destination_directory))

        if result.failed or not result.output:
            logger.error('Unable to find cfme-openshift-* files: %r'.format(result))
            return False

        tags = {}
        for img_url in str(result).split():
            update_img_cmd = 'docker pull {url}'
            logger.info("updating image stream to tag {t}".format(t=img_url))
            result = self.execute_ssh_command(update_img_cmd.format(url=img_url))
            # url ex:
            # brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-httpd:2.4.6-14
            tag_name, tag_value = img_url.split('/')[-1].split(':')
            tag_url = img_url.rpartition(':')[0]
            tags[tag_name] = {'tag': tag_value, 'url': tag_url}
            if result.failed:
                logger.exception('%s: could not update image stream with url: %s',
                                 self.provider_key, img_url)
                return False
        self.tags = tags
        return True
    def all(self):
        """returning all Snapshot objects for respective storage manager type"""
        view = navigate_to(self, 'All')
        view.toolbar.view_selector.select("List View")
        snapshots = []

        try:
            if 'provider' in self.filters:
                for item in view.entities.elements.read():
                    if self.filters.get('provider').name in item['Storage Manager']:
                        snapshots.append(self.instantiate(name=item['Name'],
                                                          provider=self.filters.get('provider')))
            else:
                for item in view.entities.elements.read():
                    provider_name = item['Storage Manager'].split()[0]
                    provider = get_crud_by_name(provider_name)
                    snapshots.append(self.instantiate(name=item['Name'], provider=provider))

        except NoSuchElementException:
            if snapshots:
                logger.error('VolumeSnapshotCollection: '
                             'NoSuchElementException in the middle of entities read')
            else:
                logger.warning('The snapshot table is probably not present or empty')
        return snapshots
예제 #20
0
파일: cli.py 프로젝트: hhovsepy/cfme_tests
def fqdn_appliance(appliance, preconfigured):
    sp = SproutClient.from_config()
    available_providers = set(sp.call_method('available_providers'))
    required_providers = set(cfme_data['fqdn_providers'])
    usable_providers = available_providers & required_providers
    version = appliance.version.vstring
    stream = get_stream(appliance.version)
    for provider in usable_providers:
        try:
            apps, pool_id = sp.provision_appliances(
                count=1, preconfigured=preconfigured, version=version, stream=stream,
                provider=provider
            )
            break
        except Exception as e:
            logger.warning("Couldn't provision appliance with following error:")
            logger.warning("{}".format(e))
            continue
    else:
        logger.error("Couldn't provision an appliance at all")
        raise SproutException('No provision available')
    yield apps[0]

    apps[0].ssh_client.close()
    sp.destroy_pool(pool_id)
예제 #21
0
파일: image.py 프로젝트: apagac/cfme_tests
    def check_compliance_multiple_images(self, image_entities, check_on_entity=True, timeout=240):
        """Initiates compliance check and waits for it to finish on several Images.

        Args:
            image_entities: list of Image entities that need to perform compliance check on them
            check_on_entity (bool): check the compliance status on the entity summary view if True,
                                    only run compliance otherwise.
            timeout (seconds): time for waiting for compliance status
        """

        # Chose Check Compliance of Last Known Configuration
        images_view = navigate_to(self, 'All')
        self.check_image_entities(image_entities)
        wait_for(lambda: images_view.toolbar.policy.is_enabled, num_sec=5,
                 message='Policy drop down menu is disabled after checking some Images')
        images_view.toolbar.policy.item_select('Check Compliance of Last Known Configuration',
                                  handle_alert=True)
        images_view.flash.assert_no_error()

        # Verify Image summary
        if check_on_entity:
            for image_instance in image_entities:
                original_state = 'never verified'
                try:
                    wait_for(
                        lambda: image_instance.compliance_status.lower() != original_state,
                        num_sec=timeout, delay=5,
                        message='compliance state of Image ID, "{}", still matches {}'
                                .format(image_instance.id, original_state)
                    )
                except TimedOutError:
                    logger.error('compliance state of Image ID, "{}", is {}'
                                 .format(image_instance.id, image_instance.compliance_status))
                    raise TimedOutError('Timeout exceeded, Waited too much'
                                        ' time for check Compliance finish ({}).'.format(timeout))
예제 #22
0
파일: image.py 프로젝트: apagac/cfme_tests
    def perform_smartstate_analysis_multiple_images(
            self, image_entities, wait_for_finish=False, timeout='20M'):
        """Performing SmartState Analysis on this Image
        """

        # task_name change from str to regular expression
        # the str compile on tasks module
        image_entities_names = []
        images_view = navigate_to(self, 'All')
        self.check_image_entities(image_entities)

        images_view.toolbar.configuration.item_select(
            'Perform SmartState Analysis', handle_alert=True)
        for image_entity in image_entities:
            image_entities_names.append("Container Image Analysis: '{}'".format(image_entity.name))
            images_view.flash.assert_success_message(
                '"{}": Analysis successfully initiated'.format(image_entity.name), partial=True
            )

        if wait_for_finish:
            try:
                col = self.appliance.collections.tasks.filter({'tab': 'AllTasks'})
                col.wait_for_finished(5, timeout, *image_entities_names)

                # check all task passed successfully with no error
                if col.is_successfully_finished(True, *image_entities_names):
                    return True
                else:
                    logger.error('Some Images SSA tasks finished with error message,'
                                 ' see logger for more details.')
                    return False

            except TimedOutError:
                raise TimedOutError('Timeout exceeded, Waited too much time for SSA to finish ({}).'
                                    .format(timeout))
예제 #23
0
def _get_template(provider, template_type_name):
    """Get the template name for the given template type
    YAML is expected to have structure with a templates section in the provider:
    provider:
        templates:
            small_template:
                name:
                creds:
            big_template:
                name:
                creds:
    Args:
        provider (obj): Provider object to lookup template on
        template_type_name (str): Template type to lookup (small_template, big_template, etc)
    Returns:
         (dict) template dictionary from the yaml, with name and creds key:value pairs
    """
    try:
        template_type = provider.data.templates.get(template_type_name)
    except (AttributeError, KeyError):
        logger.error("Wanted template %s on %s but it is not there!", template, provider.key)
        pytest.skip('No {} for provider {}'.format(template_type_name, provider.key))
    if not isinstance(template_type, Mapping):
        pytest.skip('Template mapping is incorrect, {} on provider {}'
                    .format(template_type_name, provider.key))
    return template_type
예제 #24
0
    def wait_for_collected_metrics(self, timeout="50m", table_name="metrics"):
        """Check the db if gathering collection data

        Args:
            timeout: timeout in minutes
        Return:
            Bool: is collected metrics count is greater than 0
        """

        def is_collected():
            metrics_count = self.get_metrics(table=table_name).count()
            logger.info("Current metrics found count is {count}".format(count=metrics_count))
            return metrics_count > 0

        logger.info("Monitoring DB for metrics collection")

        result = True
        try:
            wait_for(is_collected, timeout=timeout, delay=30)
        except TimedOutError:
            logger.error(
                "Timeout exceeded, No metrics found in MIQ DB for the provider \"{name}\"".format(
                    name=self.name))
            result = False
        finally:
            return result
def cleanup_empty_dir_on_edomain(path, edomainip, sshname, sshpass, provider_ip, provider):
    """Cleanup all the empty directories on the edomain/edomain_id/master/vms
    else api calls will result in 400 Error with ovf not found,
    Args:
        path: path for vms directory on edomain.
        edomain: Export domain of chosen RHEVM provider.
        edomainip: edomainip to connect through ssh.
        sshname: edomain ssh credentials.
        sshpass: edomain ssh credentials.
        provider: provider under execution
        provider_ip: provider ip address
    """
    try:
        edomain_path = edomainip + ':' + path
        temp_path = '~/tmp_filemount'
        command = 'mkdir -p {} &&'.format(temp_path)
        command += 'mount -O tcp {} {} &&'.format(edomain_path, temp_path)
        command += 'cd {}/master/vms &&'.format(temp_path)
        command += 'find . -maxdepth 1 -type d -empty -delete &&'
        command += 'cd ~ && umount {} &&'.format(temp_path)
        command += 'rmdir {}'.format(temp_path)
        logger.info("RHEVM:%r Deleting the empty directories on edomain/vms file...", provider)

        with make_ssh_client(provider_ip, sshname, sshpass) as ssh_client:
            result = ssh_client.run_command(command)
        if result.failed:
            logger.error("RHEVM:%r Error while deleting the empty directories on path: \n %r",
                provider, str(result))
        else:
            logger.info("RHEVM:%r successfully deleted the empty directories on path..", provider)
    except Exception:
        logger.exception('RHEVM:%r Exception cleaning up empty dir on edomain', provider)
        return False
예제 #26
0
def delete_stacks(provider_mgmt, excluded_stacks, stack_template, output):
    stack_list = []
    provider_name = provider_mgmt.kwargs['name']
    try:
        for stack in provider_mgmt.list_stacks():
            if (excluded_stacks and
                    stack.name in excluded_stacks or
                    not stack.name.startswith(stack_template)):
                logger.info("  Excluding Stack name: %r", stack.name)
                continue
            else:
                today = datetime.utcnow().replace(tzinfo=None)
                some_date = today - timedelta(days=1)
                if stack.creation_time < some_date:
                    stack_list.append([provider_name, stack.name])
                    try:
                        stack.cleanup()
                    except Exception as e:
                        logger.error(e)
                        continue
        logger.info("  Deleted CloudFormation Stacks: %r", stack_list)
        with open(output, 'a+') as report:
            if stack_list:
                report.write(tabulate(tabular_data=stack_list,
                                      headers=['Provider Key', 'Stack Name'],
                                      tablefmt='orgtbl'))
    except Exception:
        # TODO don't diaper this whole method
        logger.exception('Exception in %r', delete_stacks.__name__)
예제 #27
0
파일: ssui.py 프로젝트: lcouzens/cfme_tests
 def do_nav(self, _tries=0, *args, **kwargs):
     """Describes how the navigation should take place."""
     try:
         self.step(*args, **kwargs)
     except Exception as e:
         logger.error(e)
         raise
         self.go(_tries, *args, **kwargs)
예제 #28
0
 def num_host_ui(self):
     view = navigate_to(self, "Details")
     try:
         num = view.entities.summary("Relationships").get_text_of("Hosts")
     except NoSuchElementException:
         logger.error("Couldn't find number of hosts using key [Hosts] trying Nodes")
         num = view.entities.summary("Relationships").get_text_of("Nodes")
     return int(num)
예제 #29
0
 def get_detail(self, label):
     view = navigate_to(self, 'Details')
     try:
         stat = view.entities.summary('Relationships').get_text_of(label)
         logger.info("{}: {}".format(label, stat))
     except NoSuchElementException:
         logger.error("Couldn't find number of {}".format(label))
     return stat
예제 #30
0
def test_vm_graph_screen(provider, interval, graph_type, enable_candu):
    """Test VM graphs for hourly and Daily

    prerequisites:
        * C&U enabled appliance

    Steps:
        * Navigate to VM (cu-24x7) Utilization Page
        * Check graph displayed or not
        * Zoom graph
        * Compare data of Table and Graph

    Polarion:
        assignee: nachandr
        caseimportance: medium
        initialEstimate: 1/4h
        casecomponent: CandU
    """
    collection = provider.appliance.provider_based_collection(provider)
    vm = collection.instantiate('cu-24x7', provider)

    if not provider.one_of(CloudProvider):
        wait_for(
            vm.capture_historical_data,
            delay=20,
            timeout=1000,
            message="wait for capturing VM historical data"
        )
    vm.wait_candu_data_available(timeout=1200)

    view = navigate_to(vm, 'candu')
    view.options.interval.fill(interval)

    try:
        graph = getattr(view, graph_type)
    except AttributeError as e:
        logger.error(e)
    assert graph.is_displayed

    def refresh():
        provider.browser.refresh()
        view = navigate_to(vm, 'candu')
        view.options.interval.fill(interval)

    # wait, some time graph took time to load
    wait_for(lambda: len(graph.all_legends) > 0,
             delay=5, timeout=600, fail_func=refresh)

    graph.zoom_in()
    view = view.browser.create_view(UtilizationZoomView)
    assert view.chart.is_displayed

    graph_data = view.chart.all_data
    # Clear cache of table widget before read else it will mismatch headers.
    view.table.clear_cache()
    table_data = view.table.read()
    legends = view.chart.all_legends
    compare_data(table_data=table_data, graph_data=graph_data, legends=legends)
예제 #31
0
def list_orphaned_files_per_host(host_name, host_datastore_urls, provider_key,
                                 vm_registered_files, unregistered_files):
    try:
        providers_data = cfme_data.get("management_systems", {})
        hosts = providers_data[provider_key]['hosts']
        hostname = [
            host['name'] for host in hosts if host_name in host['name']
        ]
        # check if hostname returned is ipaddress
        if not hostname:
            hostname = re.findall(r'[0-9]+(?:\.[0-9]+){3}', host_name)
        connect_kwargs = {
            'username': credentials['host_default']['username'],
            'password': credentials['host_default']['password'],
            'hostname': hostname[0]
        }
        with SSHClient(**connect_kwargs) as ssh_client:
            for datastore_url in host_datastore_urls:
                datastore_path = re.findall(r'([^ds:`/*].*)',
                                            str(datastore_url))

                command = 'ls ~/{}'.format(datastore_path[0])
                result = ssh_client.run_command(command)
                files_in_datastore = result.output.splitlines(
                ) if result.success else []
                for fil in files_in_datastore:
                    if fil not in vm_registered_files:
                        file_type = 'UNKNOWN'
                        number_of_files = 0
                        command = 'test -d ~/{}/{}; echo $?'.format(
                            datastore_path[0], fil)
                        result = ssh_client.run_command(command)
                        file_extension = re.findall(r'.*\.(\w*)', fil)
                        if file_extension:
                            file_type = file_extension[0]
                            number_of_files = 1
                        if int(result.output.strip()) == 0:
                            command = 'ls ~/{}/{} | wc -l'.format(
                                datastore_path[0], fil)
                            result = ssh_client.run_command(command)
                            number_of_files = result.output.strip()
                            command = 'find ~/{}/{} -name "*.vmx" | wc -l'.format(
                                datastore_path[0], fil)
                            vmx_result = ssh_client.run_command(command)
                            command = 'find ~/{}/{} -name "*.vmtx" | wc -l'.format(
                                datastore_path[0], fil)
                            vmtx_result = ssh_client.run_command(command)
                            command = 'find ~/{}/{} -name "*.vmdk" | wc -l'.format(
                                datastore_path[0], fil)
                            vmdk_result = ssh_client.run_command(command)

                            if int(vmx_result.output.strip()) > 0:
                                file_type = 'VirtualMachine'
                            elif int(vmtx_result.output.strip()) > 0:
                                file_type = 'Template'
                            elif int(vmdk_result.output.strip()) > 0:
                                file_type = 'VMDK'
                                # delete_this = '~/' + datastore_path[0] + fil
                                # command = 'rm -rf {}'.format(delete_this)
                                # result = ssh_client.run_command(command)
                                # logger.info(result.output)

                        file_path = '~/' + datastore_path[0] + fil
                        if file_path not in unregistered_files:
                            unregistered_files.append(file_path)
                            print('{}\t\t{}\t\t{}\t\t{}'.format(
                                hostname[0], file_path, file_type,
                                number_of_files))

    except Exception as e:
        logger.error(e)
        return False
예제 #32
0
def test_host_graph_screen(provider, interval, graph_type, host, enable_candu):
    """Test Host graphs for hourly and Daily

    prerequisites:
        * C&U enabled appliance

    Steps:
        * Navigate to Host Utilization Page
        * Check graph displayed or not
        * Select interval(Hourly or Daily)
        * Zoom graph to get Table
        * Compare table and graph data

    Polarion:
        assignee: nachandr
        caseimportance: medium
        initialEstimate: 1/4h
        casecomponent: CandU
    """
    wait_for(
        host.capture_historical_data,
        delay=20,
        timeout=1000,
        message="wait for capturing host historical data")
    host.wait_candu_data_available(timeout=1200)

    view = navigate_to(host, 'candu')
    view.options.interval.fill(interval)

    # Check graph displayed or not
    try:
        graph = getattr(view.interval_type, graph_type)
    except AttributeError as e:
        logger.error(e)
    assert graph.is_displayed

    def refresh():
        provider.browser.refresh()
        view.options.interval.fill(interval)

    # wait, some time graph take time to load
    wait_for(lambda: len(graph.all_legends) > 0,
             delay=5, timeout=200, fail_func=refresh)

    # zoom in button not available with normal graph in Host Utilization page.
    # We have to use vm average graph for zoom in operation.
    try:
        vm_avg_graph = getattr(view.interval_type, "{}_vm_avg".format(graph_type))
    except AttributeError as e:
        logger.error(e)
    vm_avg_graph.zoom_in()
    view = view.browser.create_view(UtilizationZoomView)

    # wait, some time graph take time to load
    wait_for(lambda: len(view.chart.all_legends) > 0,
             delay=5, timeout=300, fail_func=refresh)
    assert view.chart.is_displayed
    view.flush_widget_cache()
    legends = view.chart.all_legends
    graph_data = view.chart.all_data
    # Clear cache of table widget before read else it will mismatch headers.
    view.table.clear_cache()
    table_data = view.table.read()
    compare_data(table_data=table_data, graph_data=graph_data, legends=legends)
def test_cluster_graph_screen(provider, cluster, host, graph_type, interval,
                              enable_candu):
    """Test Cluster graphs for Hourly and Daily Interval

    prerequisites:
        * C&U enabled appliance

    Steps:
        * Navigate to Cluster
        * Check graph displayed or not
        * Select interval Hourly/Daily
        * Zoom graph to get Table
        * Compare table and graph data

    Polarion:
        assignee: nachandr
        caseimportance: medium
        casecomponent: CandU
        initialEstimate: 1/4h
    """
    host.capture_historical_data()
    cluster.wait_candu_data_available(timeout=1200)

    view = navigate_to(cluster, "Utilization")
    view.options.interval.fill(interval)

    # Check garph displayed or not
    try:
        graph = getattr(view, graph_type)
    except AttributeError as e:
        logger.error(e)
    assert graph.is_displayed

    def refresh():
        provider.browser.refresh()
        view.options.interval.fill(interval)

    # wait, some time graph take time to load
    wait_for(lambda: len(graph.all_legends) > 0,
             delay=5,
             timeout=200,
             fail_func=refresh)

    # zoom in button not available with normal graph except Host and VM.
    # We have to use vm or host average graph for zoom in operation.
    graph_zoom = ["cluster_host", "cluster_vm"]
    avg_graph = graph_type if graph_type in graph_zoom else "{}_vm_host_avg".format(
        graph_type)
    try:
        avg_graph = getattr(view, avg_graph)
    except AttributeError as e:
        logger.error(e)
    avg_graph.zoom_in()
    view = view.browser.create_view(UtilizationZoomView)

    # wait, some time graph take time to load
    wait_for(lambda: len(view.chart.all_legends) > 0,
             delay=5,
             timeout=300,
             fail_func=refresh)
    assert view.chart.is_displayed
    view.flush_widget_cache()
    legends = view.chart.all_legends
    graph_data = view.chart.all_data
    # Clear cache of table widget before read else it will mismatch headers.
    view.table.clear_cache()
    table_data = view.table.read()
    compare_data(table_data=table_data, graph_data=graph_data, legends=legends)
예제 #34
0
    def check_for_badness(self, fn, _tries, nav_args, *args, **kwargs):
        if getattr(fn, '_can_skip_badness_test', False):
            # self.log_message('Op is a Nop! ({})'.format(fn.__name__))
            return

        # TODO: Uncomment after resolving the issue in widgetastic. Shouldn't be needed though :)
        # if self.VIEW:
        #     self.view.flush_widget_cache()
        go_kwargs = kwargs.copy()
        go_kwargs.update(nav_args)
        self.appliance.browser.open_browser(
            url_key=self.obj.appliance.server.address())

        # check for MiqQE javascript patch on first try and patch the appliance if necessary
        if self.appliance.is_miqqe_patch_candidate and not self.appliance.miqqe_patch_applied:
            self.appliance.patch_with_miqqe()
            self.appliance.browser.quit_browser()
            _tries -= 1
            self.go(_tries, *args, **go_kwargs)

        br = self.appliance.browser

        try:
            br.widgetastic.execute_script('miqSparkleOff();', silent=True)
        except:  # noqa
            # miqSparkleOff undefined, so it's definitely off.
            # Or maybe it is alerts? Let's only do this when we get an exception.
            self.appliance.browser.widgetastic.dismiss_any_alerts()
            # If we went so far, let's put diapers on one more miqSparkleOff just to be sure
            # It can be spinning in the back
            try:
                br.widgetastic.execute_script('miqSparkleOff();', silent=True)
            except:  # noqa
                pass

        # Check if the page is blocked with blocker_div. If yes, let's headshot the browser right
        # here
        if (br.widgetastic.is_displayed(
                "//div[@id='blocker_div' or @id='notification']")
                or br.widgetastic.is_displayed(".modal-backdrop.fade.in")):
            logger.warning(
                "Page was blocked with blocker div on start of navigation, recycling."
            )
            self.appliance.browser.quit_browser()
            self.go(_tries, *args, **go_kwargs)

        # Check if modal window is displayed
        if (br.widgetastic.is_displayed(
                "//div[contains(@class, 'modal-dialog') and contains(@class, 'modal-lg')]"
        )):
            logger.warning("Modal window was open; closing the window")
            br.widgetastic.click(
                "//button[contains(@class, 'close') and contains(@data-dismiss, 'modal')]"
            )

        # Check if jQuery present
        try:
            br.widgetastic.execute_script("jQuery", silent=True)
        except Exception as e:
            if "jQuery" not in str(e):
                logger.error("Checked for jQuery but got something different.")
                logger.exception(e)
            # Restart some workers
            logger.warning("Restarting UI and VimBroker workers!")
            with self.appliance.ssh_client as ssh:
                # Blow off the Vim brokers and UI workers
                ssh.run_rails_command(
                    "\"(MiqVimBrokerWorker.all + MiqUiWorker.all).each &:kill\""
                )
            logger.info("Waiting for web UI to come back alive.")
            sleep(10)  # Give it some rest
            self.appliance.wait_for_web_ui()
            self.appliance.browser.quit_browser()
            self.appliance.browser.open_browser(
                url_key=self.obj.appliance.server.address())
            self.go(_tries, *args, **go_kwargs)

        # Same with rails errors
        view = br.widgetastic.create_view(ErrorView)
        rails_e = view.get_rails_error()

        if rails_e is not None:
            logger.warning("Page was blocked by rails error, renavigating.")
            logger.error(rails_e)
            # RHEL7 top does not know -M and -a
            logger.debug('Top CPU consumers:')
            logger.debug(
                store.current_appliance.ssh_client.run_command(
                    'top -c -b -n1 | head -30').output)
            logger.debug('Top Memory consumers:')
            logger.debug(
                store.current_appliance.ssh_client.run_command(
                    'top -c -b -n1 -o "%MEM" | head -30').output)  # noqa
            logger.debug('Managed known Providers:')
            logger.debug('%r', [
                prov.key
                for prov in store.current_appliance.managed_known_providers
            ])
            self.appliance.browser.quit_browser()
            self.appliance.browser.open_browser()
            self.go(_tries, *args, **go_kwargs)
            # If there is a rails error past this point, something is really awful

        # Set this to True in the handlers below to trigger a browser restart
        recycle = False

        # Set this to True in handlers to restart evmserverd on the appliance
        # Includes recycling so you don't need to specify recycle = False
        restart_evmserverd = False

        try:
            self.log_message("Invoking {}, with {} and {}".format(
                fn.func_name, args, kwargs),
                             level="debug")
            return fn(*args, **kwargs)
        except (KeyboardInterrupt, ValueError):
            # KeyboardInterrupt: Don't block this while navigating
            raise
        except UnexpectedAlertPresentException:
            if _tries == 1:
                # There was an alert, accept it and try again
                br.widgetastic.handle_alert(wait=0)
                self.go(_tries, *args, **go_kwargs)
            else:
                # There was still an alert when we tried again, shoot the browser in the head
                logger.debug('Unxpected alert, recycling browser')
                recycle = True
        except (ErrorInResponseException, InvalidSwitchToTargetException):
            # Unable to switch to the browser at all, need to recycle
            logger.info('Invalid browser state, recycling browser')
            recycle = True
        except exceptions.CFMEExceptionOccured as e:
            # We hit a Rails exception
            logger.info('CFME Exception occured')
            logger.exception(e)
            recycle = True
        except exceptions.CannotContinueWithNavigation as e:
            # The some of the navigation steps cannot succeed
            logger.info('Cannot continue with navigation due to: {}; '
                        'Recycling browser'.format(str(e)))
            recycle = True
        except (NoSuchElementException, InvalidElementStateException,
                WebDriverException, StaleElementReferenceException) as e:
            # First check - if jquery is not found, there can be also another
            # reason why this happened so do not put the next branches in elif
            if isinstance(e, WebDriverException) and "jQuery" in str(e):
                # UI failed in some way, try recycling the browser
                logger.exception(
                    "UI failed in some way, jQuery not found, (probably) recycling the browser."
                )
                recycle = True
            # If the page is blocked, then recycle...
            # TODO .modal-backdrop.fade.in catches the 'About' modal resulting in nav loop
            if (br.widgetastic.is_displayed(
                    "//div[@id='blocker_div' or @id='notification']")
                    or br.widgetastic.is_displayed(".modal-backdrop.fade.in")):
                logger.warning("Page was blocked with blocker div, recycling.")
                recycle = True
            elif br.widgetastic.is_displayed("//div[@id='exception_div']"):
                logger.exception(
                    "CFME Exception before force navigate started!: {}".format(
                        br.widgetastic.text(
                            "//div[@id='exception_div']//td[@id='maincol']/div[2]/h3[2]"
                        )))
                recycle = True
            elif br.widgetastic.is_displayed(
                    "//body/h1[normalize-space(.)='Proxy Error']"):
                # 502
                logger.exception(
                    "Proxy error detected. Killing browser and restarting evmserverd."
                )
                req = br.widgetastic.elements("/html/body/p[1]//a")
                req = br.widgetastic.text(
                    req[0]) if req else "No request stated"
                reason = br.widgetastic.elements("/html/body/p[2]/strong")
                reason = br.widgetastic.text(
                    reason[0]) if reason else "No reason stated"
                logger.info("Proxy error: {} / {}".format(req, reason))
                restart_evmserverd = True
            elif br.widgetastic.is_displayed(
                    "//body[./h1 and ./p and ./hr and ./address]"):
                # 503 and similar sort of errors
                title = br.widgetastic.text("//body/h1")
                body = br.widgetastic.text("//body/p")
                logger.exception("Application error {}: {}".format(
                    title, body))
                sleep(5)  # Give it a little bit of rest
                recycle = True
            elif br.widgetastic.is_displayed(
                    "//body/div[@class='dialog' and ./h1 and ./p]"):
                # Rails exception detection
                logger.exception(
                    "Rails exception before force navigate started!: %r:%r at %r",
                    br.widgetastic.text("//body/div[@class='dialog']/h1"),
                    br.widgetastic.text("//body/div[@class='dialog']/p"),
                    getattr(manager.browser, 'current_url',
                            "error://dead-browser"))
                recycle = True
            elif br.widgetastic.elements("//ul[@id='maintab']/li[@class='inactive']") and not\
                    br.widgetastic.elements("//ul[@id='maintab']/li[@class='active']/ul/li"):
                # If upstream and is the bottom part of menu is not displayed
                logger.exception("Detected glitch from BZ#1112574. HEADSHOT!")
                recycle = True
            elif not self.obj.appliance.server.logged_in():
                # Session timeout or whatever like that, login screen appears.
                logger.exception("Looks like we are logged out. Try again.")
                recycle = True
            else:
                logger.error(
                    "Could not determine the reason for failing the navigation. "
                    + " Reraising.  Exception: {}".format(str(e)))
                logger.debug(
                    store.current_appliance.ssh_client.run_command(
                        'systemctl status evmserverd').output)
                raise

        if restart_evmserverd:
            logger.info("evmserverd restart requested")
            self.appliance.restart_evm_service()
            self.appliance.wait_for_web_ui()
            self.go(_tries, *args, **go_kwargs)

        if recycle or restart_evmserverd:
            self.appliance.browser.quit_browser()
            logger.debug('browser killed on try {}'.format(_tries))
            # If given a "start" nav destination, it won't be valid after quitting the browser
            self.go(_tries, *args, **go_kwargs)
예제 #35
0
 def skip_and_log(self, message="Skipping due to flash message"):
     logger.error("Flash message error: %s", str(self))
     pytest.skip("{}: {}".format(message, str(self)))
def check_kwargs(**kwargs):
    for key, val in kwargs.items():
        if val is None:
            logger.error("RHEVM: please supply required parameter '%r'.", key)
            sys.exit(127)
예제 #37
0
def pytest_exception_interact(node, call, report):
    from fixtures.pytest_store import store
    from six.moves.http_client import BadStatusLine
    from socket import error
    import urllib2

    val = safe_string(call.excinfo.value.message).decode('utf-8', 'ignore')

    if isinstance(call.excinfo.value,
                  (urllib2.URLError, BadStatusLine, error)):
        logger.error("internal Exception:\n %s", str(call.excinfo))
        from cfme.utils.browser import manager
        manager.start()  # start will quit first and cycle wharf as well

    last_lines = "\n".join(report.longreprtext.split("\n")[-4:])

    short_tb = '{}\n{}\n{}'.format(last_lines, call.excinfo.type.__name__,
                                   val.encode('ascii', 'xmlcharrefreplace'))
    fire_art_test_hook(node,
                       'filedump',
                       description="Traceback",
                       contents=report.longreprtext,
                       file_type="traceback",
                       display_type="danger",
                       display_glyph="align-justify",
                       group_id="pytest-exception",
                       slaveid=store.slaveid)
    fire_art_test_hook(node,
                       'filedump',
                       description="Short traceback",
                       contents=short_tb,
                       file_type="short_tb",
                       display_type="danger",
                       display_glyph="align-justify",
                       group_id="pytest-exception",
                       slaveid=store.slaveid)
    exception_name = call.excinfo.type.__name__
    exception_lineno = call.excinfo.traceback[-1].lineno
    exception_filename = str(call.excinfo.traceback[-1].path).replace(
        project_path.strpath + "/", '')
    exception_location = "{}:{}".format(exception_filename, exception_lineno)
    fire_art_test_hook(node,
                       'tb_info',
                       exception=exception_name,
                       file_line=exception_location,
                       short_tb=short_tb,
                       slave_id=store.slaveid)

    # base64 encoded to go into a data uri, same for screenshots
    full_tb = report.longreprtext.encode('base64').strip()
    # errors are when exceptions are thrown outside of the test call phase
    report.when = getattr(report, 'when', 'setup')
    is_error = report.when != 'call'

    template_data = {
        'name': node.name,
        'file': node.fspath,
        'is_error': is_error,
        'fail_stage': report.when,
        'short_tb': short_tb,
        'full_tb': full_tb,
    }

    # Before trying to take a screenshot, we used to check if one of the browser_fixtures was
    # in this node's fixturenames, but that was too limited and preventing the capture of
    # screenshots. If removing that conditional now makes this too broad, we should consider
    # an isinstance(val, WebDriverException) check in addition to the browser fixture check that
    # exists here in commit 825ef50fd84a060b58d7e4dc316303a8b61b35d2

    screenshot = take_screenshot()
    template_data['screenshot'] = screenshot.png
    template_data['screenshot_error'] = screenshot.error
    if screenshot.png:
        fire_art_test_hook(node,
                           'filedump',
                           description="Exception screenshot",
                           file_type="screenshot",
                           mode="wb",
                           contents_base64=True,
                           contents=template_data['screenshot'],
                           display_glyph="camera",
                           group_id="pytest-exception",
                           slaveid=store.slaveid)
    if screenshot.error:
        fire_art_test_hook(node,
                           'filedump',
                           description="Screenshot error",
                           mode="w",
                           contents_base64=False,
                           contents=template_data['screenshot_error'],
                           display_type="danger",
                           group_id="pytest-exception",
                           slaveid=store.slaveid)

    failed_test_tracking['tests'].append(template_data)
    if is_error:
        failed_test_tracking['total_errored'] += 1
    else:
        failed_test_tracking['total_failed'] += 1