Beispiel #1
0
def generate_local_html_folder(piglit_binary):
    """Generate a local HTML folder.

	Function objective: to generated a local HTML folder for later
	uploaded to linuxgraphics.

	What is the HTML folder for?
	- The HTML folder is generated by piglit from a json file after an
	intel-gpu-tools execution finished, this folder contains all the results
	in a visual content in HTML web pages.

	:param piglit_binary: which is the path to piglit binary.
	"""

    cmd = 'python {piglit} summary html {output_folder} {input_folder}'.format(
        piglit=piglit_binary,
        output_folder=os.path.join(OUTPUT_DIR, 'html'),
        input_folder=os.path.join(OUTPUT_DIR, JSON_UNCOMPRESSED_NAME))

    LOGGER.info('generating HTML folder')

    code, stdout = bash.run_command(cmd)

    if code:
        LOGGER.error('could not generate HTML folder')
        LOGGER.error(stdout)
        sys.exit(1)

    LOGGER.info('HTML folder was generated successfully')
Beispiel #2
0
def upload_local_content(local_content, remote_folder):
    """Upload local content to linuxgraphics

	The aim of this function is to upload a local content to linuxgraphics
	in order to get up-to-date the information.

	:param local_content: the local content to be uploaded.
	:param remote_folder: the remote folder to upload the local content.
	"""

    LOGGER.info('uploading : {0}'.format(local_content))

    cmd = 'scp -r {local_content} {user}@{ip}:{remote_folder}'.format(
        local_content=local_content,
        user=LINUXGRAPHICS_USER,
        ip=LINUXGRAPHICS_IP,
        remote_folder=remote_folder)

    code, stdout = bash.run_command(cmd)

    if code:
        LOGGER.error(
            'an error was occurred trying to upload local content : {0}'.
            format(local_content))
        sys.exit(1)

    LOGGER.info(
        'the local content ({local_content}) was successfully uploaded to : {server}'
        .format(local_content=local_content, server=LINUXGRAPHICS_CNAME))
Beispiel #3
0
def isfile(path, sudo=True):
    """Validates if a file exist in a host.

	:param path: the absolute path of the file to be validated
	:param sudo: this needs to be set to True for files that require
	root permission
	:return: True if the file exists, False otherwise
	"""
    LOGGER.debug('validating if file {file} exists'.format(file=path))
    status, _ = bash.run_command('{prefix}test -f {path}'.format(
        path=path, prefix='sudo ' if sudo else ''))
    exist = True if not status else False
    LOGGER.debug('file exists: {exist}'.format(exist=exist))
    return exist
Beispiel #4
0
def isdir(path, sudo=True):
    """Validates if a directory exist in a host.

	:param path: the path of the directory to be validated
	:param sudo: this needs to be set to True for directories that require
	root permission
	:return: True if the directory exists, False otherwise
	"""
    LOGGER.debug('validating if directory {dir} exists'.format(dir=path))
    status, _ = bash.run_command('{prefix}test -d {path}'.format(
        path=path, prefix='sudo ' if sudo else ''))
    exist = True if not status else False
    LOGGER.debug('directory exists: {exist}'.format(exist=exist))
    return exist
Beispiel #5
0
def check_piglit():
    """Check for piglit repository in the system.

	The aim of this function is to check if the piglit repository in the current
	system exists, otherwise this will clone it into OUTPUT_DIR.
	Piglit is used to generate the HTML folder and this does not need to be
	compiled for that.

	:return: the path to the piglit binary.
	"""

    # setting git flag to avoid issues during cloning some repository.
    os.environ['GIT_SSL_NO_VERIFY'] = '1'

    piglit_folder = None
    piglit_url = 'https://anongit.freedesktop.org/git/piglit.git'

    # in the following nested loop, is necessary to break them all because
    # piglit folder also will be present in the system cache and we want for
    # piglit repository in order to use it.
    try:
        for root, dirs, files in os.walk('/'):
            for folder in dirs:
                if folder == 'piglit':
                    if 'src' not in os.path.join(root, folder):
                        if os.path.isfile(os.path.join(root, folder,
                                                       'piglit')):
                            piglit_folder = os.path.join(root, folder)
                            raise StopIteration()
    except StopIteration:
        pass

    if not piglit_folder:
        LOGGER.info(
            'no piglit folder was detected in the system, downloading it into : {0}'
            .format(OUTPUT_DIR))
        cmd = 'git -C {output_dir} clone {url}'.format(output_dir=OUTPUT_DIR,
                                                       url=piglit_url)
        code, stdout = bash.run_command(cmd)

        if code:
            LOGGER.error('piglit repository could not be downloaded')
            LOGGER.info(stdout)
            sys.exit(1)
        piglit_folder = os.path.join(OUTPUT_DIR, 'piglit')

    piglit_binary = os.path.join(piglit_folder, 'piglit')

    return piglit_binary
Beispiel #6
0
def clean_workspace():
    """Clean the workspace in order to avoid any kind of issues"""

    LOGGER.info('cleaning the workspace')

    to_delete_list = ['{0}*'.format(JSON_UNCOMPRESSED_NAME), 'html']

    for item in to_delete_list:
        item_to_delete = os.path.join(OUTPUT_DIR, item)

        code, stdout = bash.run_command('rm -rf {0}'.format(item_to_delete))

        if code:
            LOGGER.error('{0} : could not be deleted'.format(item_to_delete))
            sys.exit(1)
        else:
            LOGGER.debug(
                '{0} : was deleted successfully'.format(item_to_delete))
Beispiel #7
0
 def test_run_command_bad_command(self):
     exit_code, output = bash.run_command('bad command')
     self.assertEqual(127, exit_code)
     self.assertEqual('/bin/bash: bad: command not found', output)
Beispiel #8
0
 def test_run_command(self):
     exit_code, output = bash.run_command('echo "hello"')
     self.assertEqual(0, exit_code)
     self.assertEqual('hello', output)
Beispiel #9
0
def download_json_file_from_linuxgraphics(url):
    """Download a json file from linuxgraphics.

	The aim of this function is to download a json file from linuxgraphics
	from a specific url.

	:param url: the url to linuxgraphics.
	"""

    json_output_dir = os.path.join(OUTPUT_DIR, JSON_COMPRESSED_NAME)
    url_path = url.split('igt-reports')[1]

    json_remote_file = '{base_path}{path}/{json}'.format(
        base_path=LINUXGRAPHICS_REPORTS_PATH,
        path=url_path,
        json=JSON_COMPRESSED_NAME)

    if not remote_client.RemoteClient(
            user=LINUXGRAPHICS_USER,
            host_ip=LINUXGRAPHICS_IP).isfile(json_remote_file):
        LOGGER.error('{json} : does not exists in {cname}'.format(
            json=os.path.join(url, JSON_COMPRESSED_NAME),
            cname=LINUXGRAPHICS_CNAME))

    LOGGER.info('downloading : {0}'.format(JSON_COMPRESSED_NAME))
    params = '--no-check-certificate --directory-prefix={dir}'.format(
        dir=OUTPUT_DIR)

    code, stdout = bash.run_command('wget {params} {url}/{json}'.format(
        params=params, url=url, json=JSON_COMPRESSED_NAME))

    if code:
        LOGGER.error('{url}/{json} : could not be downloaded'.format(
            url=url, json=JSON_COMPRESSED_NAME))
        LOGGER.info('{0}'.format(stdout))
        sys.exit(1)

    json_location_after_unzipping = bash.get_output(
        'tar -tzf {json}'.format(json=json_output_dir))
    LOGGER.info('unzipping : {0}'.format(JSON_COMPRESSED_NAME))

    # decompressing the json file
    cmd = 'tar -C {output_dir} -xvf {json_output_dir}'.format(
        output_dir=OUTPUT_DIR, json_output_dir=json_output_dir)

    code, stdout = bash.run_command(cmd)

    if code:
        LOGGER.error('{0} : could not be decompressed'.format(json_output_dir))
        LOGGER.info(stdout)
        sys.exit(1)

    # moving the json to output_dir
    cmd = 'mv {decompressed_json} {output_dir}'.format(
        decompressed_json=os.path.join(OUTPUT_DIR,
                                       json_location_after_unzipping),
        output_dir=OUTPUT_DIR)

    code, stdout = bash.run_command(cmd)

    if code:
        LOGGER.error(
            'an error was occurred trying to moving the decompressed json file to : {0}'
            .format(OUTPUT_DIR))
        sys.exit(1)

    LOGGER.info(
        '{url}/{json} : was downloaded and decompressed into /tmp'.format(
            url=url, json=JSON_COMPRESSED_NAME))
    def get_statistics(self):
        """Collects platform and test execution data.

		This is the main function of the dut_watcher module. It's main purpose is
		to collect platform based data that is independent from the test execution
		and also gather statistics from the current execution.
		:return: An HTTP response that contains a dictionary with all the data
		from the platform and test execution. Even in the case when for some reason
		there is no data available, it still returns a dictionary with all the keys
		but empty values.
		"""

        raw_data = {}

        # Get platform data (status-independent data)
        # -------------------------------------------
        app.logger.info('collecting the platform data (status independent)')

        # Get the DUT's time, this can be useful to troubleshoot issues in the
        # watchdog side
        raw_data['dut_time'] = str(datetime.datetime.now())

        # Platform distribution
        sys_info = platform.platform()
        distro = sys_info.split(
            'with-')[1] if 'with-' in sys_info else sys_info
        raw_data['distro'] = distro
        app.logger.debug('distro: {0}'.format(distro))

        # Platform uptime
        uptime_minutes = bash.get_output("awk '{print $0/60;}' /proc/uptime")
        raw_data['uptime_minutes'] = uptime_minutes
        app.logger.debug('platform uptime: {0}'.format(uptime_minutes))

        # Getting the networking boot time
        net_boot_time = bash.get_output(
            "systemd-analyze blame | grep networking.service").split()
        net_boot_time = net_boot_time[0] if len(net_boot_time) > 0 else 'N/A'
        raw_data['net_boot_time'] = net_boot_time
        app.logger.debug('networking boot time: {0}'.format(net_boot_time))

        # Getting the displays attached to the platform
        displays = bash.get_output(
            "sudo cat /sys/kernel/debug/dri/0/i915_display_info 2> /dev/null | "
            "grep \"^connector\" | grep -we \"connected\" | awk -F \"type \" "
            "'{{print $2}}' | awk '{{print $1}}' | sed 's/,//g'").split()
        raw_data['displays'] = displays
        app.logger.debug('displays: {0}'.format(displays))

        # Getting information related i915 intel driver
        # how this works
        # =====================================================================
        # when i915 module is loaded, usually the variable "check_i915_module"
        # will contains a value > 0 and this mean that X modules is using this
        # module (which mean that the driver is loaded), otherwise if the value
        # is 0 this mean that there is not modules using i915 module
        # (which mean that the driver is unloaded).
        check_i915_module = int(
            bash.get_output('lsmod | grep ^i915').split()[2])
        i915_module = True if check_i915_module else False
        raw_data['i915_module'] = i915_module

        # Get test data (status-independent data)
        # ---------------------------------------
        # Getting a list of all the tests
        test_list_file = os.path.join(self.tests_path, 'test-list.txt')
        test_list = bash.get_output(
            "cat {testlist} | sed -e '/TESTLIST/d'".format(
                testlist=test_list_file)).split()

        # Get a list of tests with their sub-tests and store them as test@subtest,
        # if there are no sub-tests for the test, then just store test in the list
        overall_test_list = []
        for test in test_list:
            sub_tests = bash.get_output(
                "{tests_path}/{test} --list-subtests".format(
                    tests_path=self.tests_path, test=test)).split()
            if sub_tests:
                for sub_test in sub_tests:
                    overall_test_list.append(test + '@' + sub_test)
            else:
                overall_test_list.append(test)

        # Get the total number of tests + sub-tests
        total_tests = len(overall_test_list)
        raw_data['total_tests'] = total_tests
        app.logger.debug('tests: {0}'.format(total_tests))

        # Total number of tests to run
        testlist = os.path.join(self.tests_path, 'intel-ci',
                                self.current_testlist)
        with open(testlist) as file_mgr:
            tests_in_scope = sum(1 for _ in file_mgr)
        raw_data['tests_in_scope'] = tests_in_scope
        app.logger.debug('tests in scope: {0}'.format(tests_in_scope))

        # out of scope tests
        tests_out_of_scope = total_tests - tests_in_scope
        raw_data['tests_out_of_scope'] = tests_out_of_scope
        app.logger.debug('tests out of scope: {0}'.format(tests_out_of_scope))

        # Get the overall time taken so far
        overall_time = self.get_overall_time()
        raw_data['overall_time'] = overall_time
        app.logger.info('overall time: {0}'.format(overall_time))

        # Get test execution data (status-dependent data)
        # -----------------------------------------------
        app.logger.info(
            'collecting the test execution data (status dependent)')

        # get information about Piglit
        raw_data['piglit_running'] = bash.is_process_running('piglit')
        piglit_uptime = '0'
        if raw_data['piglit_running']:
            err_code, piglit_process = bash.run_command("pgrep -of piglit")
            if not err_code:
                piglit_uptime = bash.run_command(
                    'ps -o etimes= -p {0}'.format(piglit_process))[1]
        raw_data['piglit_uptime'] = piglit_uptime

        execution_data = self.get_execution_status(tests_in_scope)
        raw_data.update(execution_data)

        # If there is execution data calculate the current execution progress
        if execution_data.get('tests_executed', False):
            current_progress = (round(
                int(execution_data['tests_executed']) / tests_in_scope * 100,
                2))
            raw_data['current_progress'] = current_progress
            app.logger.info(
                'execution progress: {0}%'.format(current_progress))

        # Format the data in a standardized response
        # ------------------------------------------
        formatted_data = self.format_data(raw_data)

        # print and return the formatted data
        print(json.dumps(formatted_data, sort_keys=True))
        return json.dumps(formatted_data, sort_keys=True)
def get_firmware_version():
    """Gets the firmware version loaded into the kernel.

	Collects the required GUC, HUC and DMC firmware versions for the
	host's
	kernel, and its load status.
	:return: a tuple with the required values for GUC, HUC and DMC
	firmwares,
	if the firmwares failed to be loaded or have a different version than
	the
	expected one, it returns None for that firmware
	"""
    # initialize values
    guc_version = None
    huc_version = None
    dmc_version = None

    # Make sure the dri data is available
    dri_path = '/sys/kernel/debug/dri/0'
    if utils.isdir(dri_path):

        # get the GUC requirements
        guc_file = os.path.join(dri_path, 'i915_guc_load_status')
        if utils.isfile(guc_file):

            # depending on the kernel version we might have a couple of
            # variations in the content of the file, so we need to consider both
            error_code, output = bash.run_command(
                "sudo cat {guc} | grep 'GuC firmware:'".format(guc=guc_file))

            # if there is no error code, it means the content of the file should
            # contain something similar to this:
            # status: fetch SUCCESS, load SUCCESS\n\t
            # version: wanted 9.39, found 9.39
            # or when not loaded:
            # status: fetch NONE, load NONE\n\t
            # version: wanted 9.39, found 0.0
            if not error_code:
                error_code, output = bash.run_command(
                    "sudo cat {guc} | egrep 'version:|status:'".format(
                        guc=guc_file))
                if not error_code:
                    output = output.split('\n')
                    status = output[0]
                    version = output[1].replace(',', '').split()
                    # grab the firmware version only if the version found
                    # matches the wanted version
                    guc_version = version[4] if version[2] == version[
                        4] else None
                    # finally verify "fetch" and "load" have both SUCCESS
                    # status, if they don't then return None as firmware version
                    guc_version = guc_version if status.count(
                        'SUCCESS') == 2 else None

            # if there an error code, it means the content of the file should
            # contain something similar to this:
            # fetch: SUCCESS\n\t
            # load: SUCCESS\n\t
            # version wanted: 6.1\n\t
            # version found: 6.1\n\t
            else:
                error_code, output = bash.run_command(
                    "sudo cat {guc} | egrep 'fetch:|load:|version wanted:|version found:'"
                    .format(guc=guc_file))
                if not error_code:
                    output = output.replace('\t', '').split('\n')
                    loaded = True if 'SUCCESS' in output[0] and output[
                        1] else False
                    version_wanted = output[2].replace('version wanted: ', '')
                    version_found = output[3].replace('version found: ', '')
                    correct_version = True if version_wanted == version_found else False
                    guc_version = version_found if correct_version and loaded else None

        # get the HUC requirements
        huc_file = os.path.join(dri_path, 'i915_huc_load_status')
        if utils.isfile(huc_file):

            error_code, output = bash.run_command(
                "sudo cat {huc} | grep 'HuC firmware:'".format(huc=huc_file))

            if not error_code:
                error_code, output = bash.run_command(
                    "sudo cat {huc} | egrep 'version:|status:'".format(
                        huc=huc_file))
                if not error_code:
                    output = output.split('\n')
                    status = output[0]
                    version = output[1].replace(',', '').split()
                    huc_version = version[4] if version[2] == version[
                        4] else None
                    huc_version = huc_version if status.count(
                        'SUCCESS') == 2 else None

            else:
                error_code, output = bash.run_command(
                    "sudo cat {huc} | egrep 'fetch:|load:|version wanted:|version found:'"
                    .format(huc=huc_file))
                if not error_code:
                    output = output.replace('\t', '').split('\n')
                    loaded = True if 'SUCCESS' in output[0] and output[
                        1] else False
                    version_wanted = output[2].replace('version wanted: ', '')
                    version_found = output[3].replace('version found: ', '')
                    correct_version = True if version_wanted == version_found else False
                    huc_version = version_found if correct_version and loaded else None

        # get the DMC requirements
        dmc_file = os.path.join(dri_path, 'i915_dmc_info')
        if utils.isfile(dmc_file):

            # the content of the file should contain something similar to this:
            # fw loaded: yes\nversion: 1.4
            # or when not loaded:
            # fw loaded: no
            error_code, output = bash.run_command(
                "sudo cat {dmc} | egrep 'loaded:|version:'".format(
                    dmc=dmc_file))
            if not error_code:
                output = output.split('\n')
                status = output[0].split()[2]
                version = output[1].split()[1] if len(output) > 1 else None
                dmc_version = version if status == 'yes' else None

    firmwares = {'guc': guc_version, 'huc': huc_version, 'dmc': dmc_version}

    # print and return the formatted data
    print(json.dumps(firmwares))
    return json.dumps(firmwares)