Esempio n. 1
0
 def test_is_process_running_with_command(self):
     command = 'sleep 10 &'
     subprocess.Popen(command,
                      stdout=subprocess.PIPE,
                      stderr=subprocess.PIPE,
                      shell=True,
                      executable='/bin/bash')
     self.assertTrue(bash.is_process_running('sleep 10'))
Esempio n. 2
0
	def check_pid(self):
		"""Check the PID before compile a kernel

		The aim of this function is to check if any other instance of this script
		is running in order to not run due to the following valid reason:
		- when this script is running it will use all the cores in the system,
		and if other instance is running it can crash the system due to overload.

		:return
			- self.kernel_commit_built: this value will contains a kernel commit
			id (7 digits) if this script built a new kernel from drm-tip
			otherwise the default value is None.
		"""
		script_name = os.path.basename(__file__)
		current_pid = os.getpid()

		if bash.is_process_running(script_name, current_pid):
			self.log.error('another instance of : {0} is running'.format(script_name))
			sys.exit(1)

		self.check_kernel()

		return self.kernel_commit_built
    def run_rendercheck(self):
        """Run rendercheck test suite in the current system.

		The aim of this function is to run automatically rendercheck with
		ezbench tool.
		"""
        ezbench_path = os.path.join('/home', self.dut_user, 'ezbench')
        ezbench_script = os.path.join(ezbench_path, 'ezbench')
        ezbench_campaigns = self.data['suite_conf']['igt_iterations']
        ezbench_folder_results = 'sbench_rendercheck'
        ezbench_folder_results_full_path = os.path.join(
            ezbench_path, 'logs', ezbench_folder_results)
        ezbench_wait_for_results = 20
        ezbench_cmd_setup_environment = '{script} -c HEAD -r {campaigns} ' \
         '-b x11:rendercheck -p x11-gl {results}'.format(
          script=ezbench_script, campaigns=ezbench_campaigns,
          results=ezbench_folder_results)
        ezbench_cmd_run_tests = '{script} sbench_rendercheck start'.format(
            script=ezbench_script)

        self.log.info('setting the environment for rendercheck')
        output = os.system('{cmd} 2>> {log}'.format(
            cmd=ezbench_cmd_setup_environment, log=self.log_file))

        if output:
            self.log.error('the environment could not be set')
            self.log.info('closing the log')
            sys.exit(1)

        # wait for runner.sh finished in order to apply the second command
        # for run rendercheck, since the second command check if runner.sh is
        # running
        self.log.info('waiting for (runner.sh) to finish')

        while bash.is_process_running('runner.sh'):
            continue

        self.log.info('runner.sh: is not running')

        self.log.info('the environment has been set successfully')
        utils.timer('start')
        self.log.info('running rendercheck')
        output = os.system('{cmd} 2>> {log}'.format(cmd=ezbench_cmd_run_tests,
                                                    log=self.log_file))

        if output:
            self.log.error('an error occurred while running rendercheck')
            self.log.info('closing the log')
            sys.exit(1)

        # check if ezbench generated the folder for results files
        if utils.isdir(ezbench_folder_results_full_path):
            # check if ezbench generate the results files
            if utils.wait_for_file_existence(ezbench_folder_results_full_path,
                                             '\#', ezbench_wait_for_results):
                self.log.info('rendercheck ran successfully')
                elapsed_time = utils.timer('stop', print_elapsed_time=False)
                self.log.info(elapsed_time)

                # reporting to TestReportCenter
                ezbench_csv_file_path = self.report_to_trc(
                    ezbench_path, ezbench_folder_results)
                # updating the watchdog
                trc_link, pass_test, fail_test, total_test, \
                pass_rate_of_executed = self.update_watchdog(
                 ezbench_csv_file_path, elapsed_time)
                # sending a email notification
                self.send_email(trc_link, pass_test, fail_test, total_test,
                                pass_rate_of_executed, elapsed_time)
                # unlock the system
                self.unlock_system()

                # creating a control file in order to not run again this script
                self.log.info('creating a control file')
                with open(self.control_file, 'w') as ctl_file:
                    ctl_file.write('rendercheck has finished')

            else:
                self.log.error(
                    'ezbench did not generate the results files in {0} seconds'
                    .format(ezbench_wait_for_results))
                sys.exit(1)
        else:
            self.log.error(
                'ezbench did not generate the folder for results files'.format(
                    ezbench_folder_results_full_path))
            sys.exit(1)
Esempio n. 4
0
 def test_is_process_running_with_non_running_command(self):
     self.assertFalse(bash.is_process_running('random process name'))
Esempio n. 5
0
 def test_is_process_running_with_pid(self):
     self.assertTrue(bash.is_process_running('1'))
    def get_statistics(self):
        """Collects platform and test execution data.

		This is the main function of the dut_watcher module. It's main purpose is
		to collect platform based data that is independent from the test execution
		and also gather statistics from the current execution.
		:return: An HTTP response that contains a dictionary with all the data
		from the platform and test execution. Even in the case when for some reason
		there is no data available, it still returns a dictionary with all the keys
		but empty values.
		"""

        raw_data = {}

        # Get platform data (status-independent data)
        # -------------------------------------------
        app.logger.info('collecting the platform data (status independent)')

        # Get the DUT's time, this can be useful to troubleshoot issues in the
        # watchdog side
        raw_data['dut_time'] = str(datetime.datetime.now())

        # Platform distribution
        sys_info = platform.platform()
        distro = sys_info.split(
            'with-')[1] if 'with-' in sys_info else sys_info
        raw_data['distro'] = distro
        app.logger.debug('distro: {0}'.format(distro))

        # Platform uptime
        uptime_minutes = bash.get_output("awk '{print $0/60;}' /proc/uptime")
        raw_data['uptime_minutes'] = uptime_minutes
        app.logger.debug('platform uptime: {0}'.format(uptime_minutes))

        # Getting the networking boot time
        net_boot_time = bash.get_output(
            "systemd-analyze blame | grep networking.service").split()
        net_boot_time = net_boot_time[0] if len(net_boot_time) > 0 else 'N/A'
        raw_data['net_boot_time'] = net_boot_time
        app.logger.debug('networking boot time: {0}'.format(net_boot_time))

        # Getting the displays attached to the platform
        displays = bash.get_output(
            "sudo cat /sys/kernel/debug/dri/0/i915_display_info 2> /dev/null | "
            "grep \"^connector\" | grep -we \"connected\" | awk -F \"type \" "
            "'{{print $2}}' | awk '{{print $1}}' | sed 's/,//g'").split()
        raw_data['displays'] = displays
        app.logger.debug('displays: {0}'.format(displays))

        # Getting information related i915 intel driver
        # how this works
        # =====================================================================
        # when i915 module is loaded, usually the variable "check_i915_module"
        # will contains a value > 0 and this mean that X modules is using this
        # module (which mean that the driver is loaded), otherwise if the value
        # is 0 this mean that there is not modules using i915 module
        # (which mean that the driver is unloaded).
        check_i915_module = int(
            bash.get_output('lsmod | grep ^i915').split()[2])
        i915_module = True if check_i915_module else False
        raw_data['i915_module'] = i915_module

        # Get test data (status-independent data)
        # ---------------------------------------
        # Getting a list of all the tests
        test_list_file = os.path.join(self.tests_path, 'test-list.txt')
        test_list = bash.get_output(
            "cat {testlist} | sed -e '/TESTLIST/d'".format(
                testlist=test_list_file)).split()

        # Get a list of tests with their sub-tests and store them as test@subtest,
        # if there are no sub-tests for the test, then just store test in the list
        overall_test_list = []
        for test in test_list:
            sub_tests = bash.get_output(
                "{tests_path}/{test} --list-subtests".format(
                    tests_path=self.tests_path, test=test)).split()
            if sub_tests:
                for sub_test in sub_tests:
                    overall_test_list.append(test + '@' + sub_test)
            else:
                overall_test_list.append(test)

        # Get the total number of tests + sub-tests
        total_tests = len(overall_test_list)
        raw_data['total_tests'] = total_tests
        app.logger.debug('tests: {0}'.format(total_tests))

        # Total number of tests to run
        testlist = os.path.join(self.tests_path, 'intel-ci',
                                self.current_testlist)
        with open(testlist) as file_mgr:
            tests_in_scope = sum(1 for _ in file_mgr)
        raw_data['tests_in_scope'] = tests_in_scope
        app.logger.debug('tests in scope: {0}'.format(tests_in_scope))

        # out of scope tests
        tests_out_of_scope = total_tests - tests_in_scope
        raw_data['tests_out_of_scope'] = tests_out_of_scope
        app.logger.debug('tests out of scope: {0}'.format(tests_out_of_scope))

        # Get the overall time taken so far
        overall_time = self.get_overall_time()
        raw_data['overall_time'] = overall_time
        app.logger.info('overall time: {0}'.format(overall_time))

        # Get test execution data (status-dependent data)
        # -----------------------------------------------
        app.logger.info(
            'collecting the test execution data (status dependent)')

        # get information about Piglit
        raw_data['piglit_running'] = bash.is_process_running('piglit')
        piglit_uptime = '0'
        if raw_data['piglit_running']:
            err_code, piglit_process = bash.run_command("pgrep -of piglit")
            if not err_code:
                piglit_uptime = bash.run_command(
                    'ps -o etimes= -p {0}'.format(piglit_process))[1]
        raw_data['piglit_uptime'] = piglit_uptime

        execution_data = self.get_execution_status(tests_in_scope)
        raw_data.update(execution_data)

        # If there is execution data calculate the current execution progress
        if execution_data.get('tests_executed', False):
            current_progress = (round(
                int(execution_data['tests_executed']) / tests_in_scope * 100,
                2))
            raw_data['current_progress'] = current_progress
            app.logger.info(
                'execution progress: {0}%'.format(current_progress))

        # Format the data in a standardized response
        # ------------------------------------------
        formatted_data = self.format_data(raw_data)

        # print and return the formatted data
        print(json.dumps(formatted_data, sort_keys=True))
        return json.dumps(formatted_data, sort_keys=True)