def __output_is_set(self, output):
        query_cmd = "%s -q | grep '%s[0-9] connected' -n" % \
            (self.XRANDR_PATH, output)
        start_line = int(
            utils.system_output(
                graphics_utils.xcommand(query_cmd)).split(':')[0])

        # Gets 100 lines (to be safe) after context to get output after
        query_cmd = \
            "%s -q | grep '%s[0-9] connected' -n -A 100 | grep connected" % \
                (self.XRANDR_PATH, output)

        try:
            end_line = int(
                utils.system_output(graphics_utils_ui.xcommand(
                    query_cmd)).split('\n')[1].split('-')[0])
        except:
            logging.info("End line not found, assuming last output")
            end_line = -1

        if end_line != -1:
            lines_between = end_line - start_line - 1
        else:
            line_between = 100
        query_cmd = "%s -q | grep '%s[0-9] connected' -A %d | grep \\*" % \
                (self.XRANDR_PATH, output, lines_between)
        try:
            utils.system(graphics_utils.xcommand(query_cmd))
        except:
            raise error.TestFail("%s not set with monitor_reconfigure" %
                                 output)
 def __configure_and_check_output(self, output):
     connected = self.__output_connected(output)
     if not connected:
         logging.warning("%s port detected but no connected device" %
                         output)
         return False
     else:
         #TODO([email protected]) - Verify this is synchronous.
         utils.system(graphics_utils.xcommand(self.RECONFIG_PATH))
         self.__output_is_set(output)
         return True
 def run_once(self):
     cmd = os.path.join(self.srcdir, 'gbmtest')
     cmd = graphics_utils.xcommand(cmd)
     result = utils.run(cmd,
                        stderr_is_expected=False,
                        stdout_tee=utils.TEE_TO_LOGS,
                        stderr_tee=utils.TEE_TO_LOGS,
                        ignore_status=True)
     report = re.findall(r'\[  PASSED  \]', result.stdout)
     if not report:
         raise error.TestFail('Gbm test failed (' + result.stdout + ')')
 def run_v4l2_capture_test(self, fail_okay, options):
     executable = os.path.join(self.bindir, "media_v4l2_test")
     try:
         cmd = "%s %s" % (executable, " ".join(options))
         cmd = graphics_utils.xcommand(cmd)
         logging.info("Running %s" % cmd)
         stdout = utils.system_output(cmd, retain_output=True)
     except:
         if fail_okay:
             stdout = ""
             return (False, stdout)
         else:
             raise
     else:
         return (True, stdout)
Exemplo n.º 5
0
    def run_once(self):
        cmd_gl = os.path.join(self.srcdir, 'SanOGL')
        cmd_gles = os.path.join(self.srcdir, 'SanOGLES')
        cmd_gles_s = os.path.join(self.srcdir, 'SanOGLES_S')
        if os.path.isfile(cmd_gl):
            cmd = cmd_gl
        elif os.path.isfile(cmd_gles):
            cmd = cmd_gles
        elif os.path.isfile(cmd_gles_s):
            cmd = cmd_gles_s
        else:
            raise error.TestFail(
                'Failed: Could not locate SanAngeles executable: '
                '%s, %s or %s.  Test setup error.' %
                (cmd_gl, cmd_gles, cmd_gles_s))

        cmd += ' ' + utils.graphics_platform()
        cmd = graphics_utils.xcommand(cmd)
        result = utils.run(cmd,
                           stderr_is_expected=False,
                           stdout_tee=utils.TEE_TO_LOGS,
                           stderr_tee=utils.TEE_TO_LOGS,
                           ignore_status=True)

        report = re.findall(r'frame_rate = ([0-9.]+)', result.stdout)
        if not report:
            raise error.TestFail(
                'Failed: Could not find frame_rate in stdout (' +
                result.stdout + ') ' + result.stderr)

        frame_rate = float(report[0])
        logging.info('frame_rate = %.1f', frame_rate)
        self.write_perf_keyval({'frames_per_sec_rate_san_angeles': frame_rate})
        self.output_perf_value(description='fps',
                               value=frame_rate,
                               units='fps',
                               higher_is_better=True)
        if 'error' in result.stderr.lower():
            raise error.TestFail('Failed: stderr while running SanAngeles: ' +
                                 result.stderr + ' (' + report[0] + ')')
Exemplo n.º 6
0
 def __run_x_cmd(self, cmd):
     cmd = graphics_utils.xcommand(cmd)
     result = utils.system_output(cmd,
                                  retain_output=True,
                                  ignore_status=True)
     return result
Exemplo n.º 7
0
    def run_once(self, test='cros-driver.py', args=[]):
        parser = OptionParser()
        parser.add_option('-t',
                          '--test-name',
                          dest='testName',
                          default='',
                          help='Run a specific piglit test.')
        options, args = parser.parse_args(args)
        gpu_family = utils.get_gpu_family()
        logging.info('Detected gpu family %s.', gpu_family)
        # TODO(djkurtz): Delete this once piglit runs on mali/tegra.
        if gpu_family in ['mali', 'tegra']:
            logging.info('Not running any tests, passing by default.')
            return

        # Keep a copy of stdout in piglit-run.log.
        log_path = os.path.join(self.outputdir, 'piglit-run.log')
        # Keep the html results in the cros-driver directory.
        results_path = os.path.join(self.outputdir, 'cros-driver')
        # The location of the piglit executable script.
        run_path = os.path.join(self.piglit_path, 'bin/piglit')
        summary = ''
        if not (os.path.exists(run_path)):
            raise error.TestError('piglit not found at %s' % self.piglit_path)

        os.chdir(self.piglit_path)
        logging.info('cd %s', os.getcwd())
        # Piglit by default wants to run multiple tests in separate processes
        # concurrently. Strictly serialize this using --no-concurrency.
        # Now --dmesg also implies no concurrency but we want to be explicit.
        flags = 'run -v --dmesg --no-concurrency'
        if (options.testName != ''):
            flags = flags + ' -t ' + options.testName
        cmd = 'python %s %s %s %s' % (run_path, flags, test, self.outputdir)
        # Pipe stdout and stderr into piglit-run.log for later analysis.
        cmd = cmd + ' | tee ' + log_path
        cmd = graphics_utils.xcommand(cmd)
        logging.info(cmd)
        utils.run(cmd,
                  stderr_is_expected=False,
                  stdout_tee=utils.TEE_TO_LOGS,
                  stderr_tee=utils.TEE_TO_LOGS)

        # Make sure logs get written before continuing.
        utils.run('sync')
        # Convert results.json file to human readable html.
        cmd = ('python %s summary html --overwrite -e all %s %s/results.json' %
               (run_path, results_path, self.outputdir))
        utils.run(cmd,
                  stderr_is_expected=False,
                  stdout_tee=utils.TEE_TO_LOGS,
                  stderr_tee=utils.TEE_TO_LOGS)
        # Make sure logs get written before continuing.
        utils.run('sync')

        # Count number of pass, fail, warn and skip in piglit-run.log (could
        # also use results.json)
        f = open(log_path, 'r')
        summary = f.read()
        f.close()
        if not summary:
            raise error.TestError('Test summary was empty')

        # Output counts for future processing.
        keyvals = {}
        for k in ['pass', 'fail', 'crash', 'warn', 'skip']:
            num = len(re.findall(r'' + k + ' :: ', summary))
            keyvals['count_subtests_' + k] = num
            logging.info('Piglit: %d ' + k, num)
            self.output_perf_value(description=k,
                                   value=num,
                                   units='count',
                                   higher_is_better=(k == 'pass'))

        self.write_perf_keyval(keyvals)
 def __output_connected(self, output):
     query_cmd = "%s -q | grep '%s[0-9] connected' -c" % \
         (self.XRANDR_PATH, output)
     xrandr_out = utils.system_output(graphics_utils.xcommand(query_cmd),
                                      ignore_status=True)
     return int(xrandr_out) > 0
 def __query_for_output(self, output):
     query_cmd = "%s -q | grep %s -c" % (self.XRANDR_PATH, output)
     xrandr_out = utils.system_output(graphics_utils.xcommand(query_cmd),
                                      ignore_status=True)
     return int(xrandr_out) > 0