示例#1
0
    def run_once(self, arc_mode=None):
        # If we are in arc_mode, do not report failures to perf dashboard.
        if arc_mode:
            self._test_failure_report_enable = False

        # We use kiosk mode to make sure Chrome is idle.
        self.add_failures('Graphics_Idle')
        with chrome.Chrome(logged_in=False,
                           extra_browser_args=['--kiosk'],
                           arc_mode=arc_mode):
            # Try to protect against runaway previous tests.
            if not utils.wait_for_idle_cpu(20.0, 0.1):
                logging.warning('Could not get idle CPU before running tests.')
            self._gpu_type = utils.get_gpu_family()
            self._cpu_type = utils.get_cpu_soc_family()
            self._board = utils.get_board()
            errors = ''
            errors += self.verify_graphics_dvfs()
            errors += self.verify_graphics_fbc()
            errors += self.verify_graphics_psr()
            errors += self.verify_graphics_gem_idle()
            errors += self.verify_graphics_i915_min_clock()
            errors += self.verify_graphics_rc6()
            errors += self.verify_lvds_downclock()
            errors += self.verify_short_blanking()
            if errors:
                raise error.TestFail('Failed: %s' % errors)
        self.remove_failures('Graphics_Idle')
    def run_once(self):
        gpu_family = utils.get_gpu_family()
        if not self._executables:
            raise error.TestFail('Failed: No executables found on %s' %
                                 gpu_family)

        logging.debug('Running %d executables', len(self._executables))
        for executable in self._executables:
            try:
                cmd = '%s %s' % (executable, _OPTION)
                stdout = arc._android_shell(cmd)
            except Exception:
                logging.error('Exception running %s', cmd)
                raise error.TestFail('Failed: gralloc on %s' % gpu_family)
            # Look for the regular expression indicating failure.
            for line in stdout.splitlines():
                match = re.search(r'\[  FAILED  \]', stdout)
                if match:
                    self.add_failures(line)
                    logging.error(line)
                else:
                    logging.debug(stdout)

        if self.get_failures():
            raise error.TestFail('Failed: gralloc on %s in %s.' %
                                 (gpu_family, self.get_failures()))
    def run_once(self):
        failures = []
        # TODO(ihf): shard this test into multiple control files.
        test_names = [
            'alloc_varying_sizes', 'alloc_usage', 'api', 'gralloc_order',
            'uninitialized_handle', 'freed_handle', 'mapping', 'perform',
            'ycbcr', 'async'
        ]

        # Run the tests and capture stdout.
        for test_name in test_names:
            try:
                cmd = '%s %s' % (_ANDROID_EXEC, test_name)
                stdout = arc._android_shell(cmd)
            except Exception:
                logging.error('Exception running %s', cmd)
            # Look for the regular expression indicating success.
            match = re.search(r'\[  PASSED  \]', stdout)
            if not match:
                failures.append(test_name)
                logging.error(stdout)
            else:
                logging.debug(stdout)

        if failures:
            gpu_family = utils.get_gpu_family()
            raise error.TestFail('Failed: gralloc on %s in %s.' %
                                 (gpu_family, failures))
    def run_once(self, test_duration_secs=2700, fullscreen=True):
        """Finds a brower with telemetry, and run the test.

        @param test_duration_secs: The test duration in seconds.
        @param fullscreen: Whether to run the test in fullscreen.
        """
        # To avoid 0ms on fast machines like samus the workload was increased.
        # Unfortunately that makes running on slow machines impractical without
        # deviating from upstream too much.
        if utils.get_gpu_family() == 'pinetrail':
            # TODO(ihf): return a TestPass(message) once available.
            logging.warning('Test is too slow to run regularly.')
            return

        self._test_duration_secs = test_duration_secs
        ext_paths = []
        if fullscreen:
            ext_paths.append(
                os.path.join(self.autodir, 'deps', 'graphics',
                             'graphics_test_extension'))

        with chrome.Chrome(logged_in=False,
                           extension_paths=ext_paths,
                           init_network_controller=True) as cr:
            websrc_dir = os.path.join(self.autodir, 'deps', 'webgl_perf', 'src')
            if not cr.browser.platform.SetHTTPServerDirectories(websrc_dir):
                raise error.TestFail('Failed: Unable to start HTTP server')
            test_url = cr.browser.platform.http_server.UrlOf(
                os.path.join(websrc_dir, 'index.html'))
            self.run_performance_test(cr.browser, test_url)
 def should_run(self):
     """Indicate if the test should be run on current configuration."""
     supported_apis = graphics_utils.GraphicsApiHelper().get_supported_apis(
     )
     num_displays = graphics_utils.get_num_outputs_on()
     gpu_type = utils.get_gpu_family()
     soc = utils.get_cpu_soc_family()
     kernel_version = os.uname()[2]
     if num_displays == 0 and self._opts['display_required']:
         # If a test needs a display and we don't have a display,
         # consider it a pass.
         logging.warning('No display connected, skipping test.')
         return False
     if self._opts['vulkan_required'] and 'vk' not in supported_apis:
         # If a test needs vulkan to run and we don't have it,
         # consider it a pass
         logging.warning('Vulkan is required by test but is not '
                         'available on system. Skipping test.')
         return False
     if self._opts['min_kernel_version']:
         min_kernel_version = self._opts['min_kernel_version']
         if utils.compare_versions(kernel_version, min_kernel_version) < 0:
             logging.warning('Test requires kernel version >= %s,'
                             'have version %s. Skipping test.' %
                             (min_kernel_version, kernel_version))
             return False
     if self.name == 'atomictest' and gpu_type == 'baytrail':
         logging.warning('Baytrail is on kernel v4.4, but there is no '
                         'intention to enable atomic.')
         return False
     return True
示例#6
0
    def run_once(self, arc_mode=None):
        # If we are in arc_mode, do not report failures to perf dashboard.
        if arc_mode:
            self._test_failure_report_enable = False

        self.add_failures('graphics_Idle')
        with chrome.Chrome(logged_in=True, arc_mode=arc_mode) as cr:
            # The New Tab Page contains the Google doodle which can cause
            # arbitrary side effects. Hide it by going to a neutral page.
            if not cr.browser.tabs:
                cr.browser.tabs.New()
            tab = cr.browser.tabs[0]
            tab.Navigate('chrome://version')
            # Try to protect against runaway previous tests.
            if not utils.wait_for_idle_cpu(60.0, 0.1):
                logging.warning('Could not get idle CPU before running tests.')
            self._gpu_type = utils.get_gpu_family()
            self._cpu_type = utils.get_cpu_soc_family()
            self._board = utils.get_board()
            errors = ''
            errors += self.verify_graphics_dvfs()
            errors += self.verify_graphics_fbc()
            errors += self.verify_graphics_psr()
            errors += self.verify_graphics_gem_idle()
            errors += self.verify_graphics_i915_min_clock()
            errors += self.verify_graphics_rc6()
            errors += self.verify_lvds_downclock()
            errors += self.verify_short_blanking()
            if errors:
                raise error.TestFail('Failed: %s' % errors)
        self.remove_failures('graphics_Idle')
示例#7
0
    def initialize(self):
        self._board = utils.get_board()
        self._cpu_type = utils.get_cpu_soc_family()
        self._gpu_type = utils.get_gpu_family()

        # deqp may depend on libraries that are present only on test images.
        # Those libraries are installed in /usr/local.
        self._env = os.environ.copy()
        old_ld_path = self._env.get('LD_LIBRARY_PATH', '')
        if old_ld_path:
            self._env[
                'LD_LIBRARY_PATH'] = '/usr/local/lib:/usr/local/lib64:' + old_ld_path
        else:
            self._env['LD_LIBRARY_PATH'] = '/usr/local/lib:/usr/local/lib64'

        # Determine which executable should be run. Right now never egl.
        major, minor = graphics_utils.get_gles_version()
        logging.info('Found gles%d.%d.', major, minor)
        if major is None or minor is None:
            raise error.TestFail(
                'Failed: Could not get gles version information (%d, %d).' %
                (major, minor))
        if major >= 2:
            self._can_run_executables.append('gles2/deqp-gles2')
        if major >= 3:
            self._can_run_executables.append('gles3/deqp-gles3')
            if major > 3 or minor >= 1:
                self._can_run_executables.append('gles31/deqp-gles31')

        # If libvulkan is installed, then assume the board supports vulkan.
        has_libvulkan = False
        for libdir in ('/usr/lib', '/usr/lib64', '/usr/local/lib',
                       '/usr/local/lib64'):
            if os.path.exists(os.path.join(libdir, 'libvulkan.so')):
                has_libvulkan = True

        if (has_libvulkan and os.path.exists(
                '/usr/local/deqp/external/vulkancts/modules/vulkan/deqp-vk')):
            self._can_run_executables.append(
                'external/vulkancts/modules/vulkan/deqp-vk')

        self._services = service_stopper.ServiceStopper(['ui', 'powerd'])
        # Valid choices are fbo and pbuffer. The latter avoids dEQP assumptions.
        self._surface = 'pbuffer'
示例#8
0
    def run_once(self):
        try:
            cmd = '%s %s' % (_ANDROID_EXEC, _OPTION)
            stdout = arc._android_shell(cmd)
        except Exception:
            logging.error('Exception running %s', cmd)
        # Look for the regular expression indicating failure.
        for line in stdout.splitlines():
            match = re.search(r'\[  FAILED  \]', stdout)
            if match:
                self.add_failures(line)
                logging.error(line)
            else:
                logging.debug(stdout)

        if self.get_failures():
            gpu_family = utils.get_gpu_family()
            raise error.TestFail('Failed: gralloc on %s in %s.' %
                                 (gpu_family, self.get_failures()))
示例#9
0
 def run_once(self):
     # Try to protect against runaway previous tests.
     if not utils.wait_for_idle_cpu(20.0, 0.1):
         logging.warning('Could not get idle CPU before running tests.')
     # We use kiosk mode to make sure Chrome is idle.
     with chrome.Chrome(logged_in=False, extra_browser_args=['--kiosk']):
         self._gpu_type = utils.get_gpu_family()
         self._cpu_type = utils.get_cpu_soc_family()
         self._board = utils.get_board()
         errors = ''
         errors += self.verify_graphics_dvfs()
         errors += self.verify_graphics_fbc()
         errors += self.verify_graphics_psr()
         errors += self.verify_graphics_gem_idle()
         errors += self.verify_graphics_i915_min_clock()
         errors += self.verify_graphics_rc6()
         errors += self.verify_lvds_downclock()
         errors += self.verify_short_blanking()
         if errors:
             raise error.TestFail(errors)
示例#10
0
    def initialize(self):
        super(graphics_dEQP, self).initialize()
        self._api_helper = graphics_utils.GraphicsApiHelper()
        self._board = utils.get_board()
        self._cpu_type = utils.get_cpu_soc_family()
        self._gpu_type = utils.get_gpu_family()

        # deqp may depend on libraries that are present only on test images.
        # Those libraries are installed in /usr/local.
        self._env = os.environ.copy()
        old_ld_path = self._env.get('LD_LIBRARY_PATH', '')
        if old_ld_path:
            self._env['LD_LIBRARY_PATH'] = '/usr/local/lib:/usr/local/lib64:' + old_ld_path
        else:
            self._env['LD_LIBRARY_PATH'] = '/usr/local/lib:/usr/local/lib64'

        self._services = service_stopper.ServiceStopper(['ui', 'powerd'])
        # Valid choices are fbo and pbuffer. The latter avoids dEQP assumptions.
        self._surface = 'pbuffer'
        self._services.stop_services()
    def run_once(self, test_slice):
        # TODO(ihf): Remove this once Piglit works on freon.
        if utils.is_freon():
            return

        gpu_family = utils.get_gpu_family()
        family = gpu_family
        logging.info('Detected gpu family %s.', gpu_family)

        # TODO(ihf): Delete this once we have a piglit that runs on ARM.
        if gpu_family in ['mali', 'tegra']:
            logging.info('Not running any tests, passing by default.')
            return

        scripts_dir = os.path.join(self.bindir, self.test_scripts)
        family_dir = os.path.join(scripts_dir, family)
        # We don't want to introduce too many combinations, so fall back.
        if not os.path.isdir(family_dir):
            family = 'other'
            family_dir = os.path.join(scripts_dir, family)
        logging.info('Using scripts for gpu family %s.', family)
        scripts_dir = os.path.join(self.bindir, self.test_scripts)
        # Mark scripts executable if they are not.
        utils.system('chmod +x ' + scripts_dir + '*/graphics_PiglitBVT_*.sh')

        # Kick off test script.
        cmd = (
            'source ' +
            os.path.join(family_dir, 'graphics_PiglitBVT_%d.sh' % test_slice))
        logging.info('Executing cmd = %s', cmd)
        result = utils.run(cmd,
                           stdout_tee=utils.TEE_TO_LOGS,
                           stderr_tee=utils.TEE_TO_LOGS,
                           ignore_status=True)
        tests_failed = result.exit_status
        if tests_failed:
            reason = '%d tests failed on "%s" in slice %d' % (
                tests_failed, gpu_family, test_slice)
            raise error.TestError(reason)
示例#12
0
 def initialize(self):
     self._gpu_type = utils.get_gpu_family()
     self._cpu_type = utils.get_cpu_soc_family()
     self._board = utils.get_board()
     self._services = service_stopper.ServiceStopper(['ui', 'powerd'])
示例#13
0
  def run_once(self, options=''):
    gpu_family = bin_utils.get_gpu_family()
    if gpu_family == 'stoney':
        options = '-s 7 -t 1'
        exefile = 'amdgpu_test'
    else:
        options = ''
        exefile = os.path.join(self.srcdir, 'gpureset')
        if not os.path.isfile(exefile):
          raise error.TestFail('Failed: could not locate gpureset executable (' +
                               exefile + ').')

    cmd = '%s %s' % (exefile, options)

    # If UI is running, we must stop it and restore later.
    need_restart_ui = False
    status_output = utils.system_output('initctl status ui')
    # If chrome is running, result will be similar to:
    #   ui start/running, process 11895
    logging.info('initctl status ui returns: %s', status_output)
    need_restart_ui = status_output.startswith('ui start')
    summary = ''

    # Run the gpureset test in a loop to stress the recovery.
    for i in range(1, self.loops + 1):
      summary += 'graphics_GpuReset iteration %d of %d\n' % (i, self.loops)
      if need_restart_ui:
        summary += 'initctl stop ui\n'
        utils.system('initctl stop ui', ignore_status=True)
        # TODO(ihf): Remove this code if no improvement for issue 409019.
        logging.info('Make sure chrome is dead before triggering hang.')
        utils.system('killall -9 chrome', ignore_status=True)
        time.sleep(3)
      try:
        summary += utils.system_output(cmd, retain_output=True)
        summary += '\n'
      finally:
        if need_restart_ui:
          summary += 'initctl start ui\n'
          utils.system('initctl start ui')

    # Write a copy of stdout to help debug failures.
    results_path = os.path.join(self.outputdir, 'summary.txt')
    f = open(results_path, 'w+')
    f.write('# need ui restart: %s\n' % need_restart_ui)
    f.write('# ---------------------------------------------------\n')
    f.write('# [' + cmd + ']\n')
    f.write(summary)
    f.write('\n# -------------------------------------------------\n')
    f.write('# [graphics_GpuReset.py postprocessing]\n')

    # Analyze the output. Sample:
    # [       OK ] graphics_GpuReset
    # [  FAILED  ] graphics_GpuReset
    results = summary.splitlines()
    if not results:
      f.close()
      raise error.TestFail('Failed: No output from test. Check /tmp/' +
                           'test_that_latest/graphics_GpuReset/summary.txt' +
                           ' for details.')
    # Analyze summary and count number of passes.
    pass_count = 0
    for line in results:
      if gpu_family == 'stoney':
        if "passed" in line:
          pass_count += 1
        if "failed" in line:
          raise error.TestFail('Failed: %s' % line)
      else:
        if line.strip().startswith('[       OK ] graphics_GpuReset'):
          pass_count += 1
        if line.strip().startswith('[  FAILED  ] graphics_GpuReset'):
          msg = line.strip()[30:]
          failed_msg = 'Test failed with %s' % msg
          raise error.TestFail('Failed: %s' % failed_msg)
    f.close()

    # Final chance to fail.
    if pass_count != self.loops:
      failed_msg = 'Test failed with incomplete output. System hung? '
      failed_msg += '(pass_count=%d of %d)' % (pass_count, self.loops)
      raise error.TestFail('Failed: %s' % failed_msg)

    # We need to wait a bit for X to come back after the 'start ui'.
    time.sleep(5)
示例#14
0
    def run_once(self, test='cros-driver.py', args=[]):
        parser = OptionParser()
        parser.add_option('-t',
                          '--test-name',
                          dest='testName',
                          default='',
                          help='Run a specific piglit test.')
        options, args = parser.parse_args(args)
        gpu_family = utils.get_gpu_family()
        logging.info('Detected gpu family %s.', gpu_family)
        # TODO(djkurtz): Delete this once piglit runs on mali/tegra.
        if gpu_family in ['mali', 'tegra']:
            logging.info('Not running any tests, passing by default.')
            return

        # Keep a copy of stdout in piglit-run.log.
        log_path = os.path.join(self.outputdir, 'piglit-run.log')
        # Keep the html results in the cros-driver directory.
        results_path = os.path.join(self.outputdir, 'cros-driver')
        # The location of the piglit executable script.
        run_path = os.path.join(self.piglit_path, 'bin/piglit')
        summary = ''
        if not (os.path.exists(run_path)):
            raise error.TestError('piglit not found at %s' % self.piglit_path)

        os.chdir(self.piglit_path)
        logging.info('cd %s', os.getcwd())
        # Piglit by default wants to run multiple tests in separate processes
        # concurrently. Strictly serialize this using --no-concurrency.
        # Now --dmesg also implies no concurrency but we want to be explicit.
        flags = 'run -v --dmesg --no-concurrency'
        if (options.testName != ''):
            flags = flags + ' -t ' + options.testName
        cmd = 'python %s %s %s %s' % (run_path, flags, test, self.outputdir)
        # Pipe stdout and stderr into piglit-run.log for later analysis.
        cmd = cmd + ' | tee ' + log_path
        cmd = graphics_utils.xcommand(cmd)
        logging.info(cmd)
        utils.run(cmd,
                  stderr_is_expected=False,
                  stdout_tee=utils.TEE_TO_LOGS,
                  stderr_tee=utils.TEE_TO_LOGS)

        # Make sure logs get written before continuing.
        utils.run('sync')
        # Convert results.json file to human readable html.
        cmd = ('python %s summary html --overwrite -e all %s %s/results.json' %
               (run_path, results_path, self.outputdir))
        utils.run(cmd,
                  stderr_is_expected=False,
                  stdout_tee=utils.TEE_TO_LOGS,
                  stderr_tee=utils.TEE_TO_LOGS)
        # Make sure logs get written before continuing.
        utils.run('sync')

        # Count number of pass, fail, warn and skip in piglit-run.log (could
        # also use results.json)
        f = open(log_path, 'r')
        summary = f.read()
        f.close()
        if not summary:
            raise error.TestError('Test summary was empty')

        # Output counts for future processing.
        keyvals = {}
        for k in ['pass', 'fail', 'crash', 'warn', 'skip']:
            num = len(re.findall(r'' + k + ' :: ', summary))
            keyvals['count_subtests_' + k] = num
            logging.info('Piglit: %d ' + k, num)
            self.output_perf_value(description=k,
                                   value=num,
                                   units='count',
                                   higher_is_better=(k == 'pass'))

        self.write_perf_keyval(keyvals)