Esempio n. 1
0
    def _find_all_gtestsuites(self, use_whitelist=False, filter_tests=None):
        """Find all the gTest Suites installed on the DUT.

        @param use_whitelist: Only whitelisted tests found on the system will
                              be used.
        @param filter_tests: Only tests that match these globs will be used.
        """
        list_cmd = LIST_TEST_BINARIES_TEMPLATE % {'path': NATIVE_TESTS_PATH}
        gtest_suites_path = self.host.run_output(list_cmd).splitlines()
        gtest_suites = [
            GtestSuite(os.path.basename(path), path, True, '')
            for path in gtest_suites_path
        ]

        if use_whitelist:
            try:
                whitelisted = self._get_whitelisted_tests(WHITELIST_FILE)
                suites_to_run = []
                for suite in gtest_suites:
                    if whitelisted.get(suite.name):
                        whitelisted_suite = whitelisted.get(suite.name)
                        # Get the name and path from the suites on the DUT and
                        # get the other args from the whitelist map.
                        suites_to_run.append(
                            GtestSuite(suite.name, suite.path,
                                       whitelisted_suite.run_as_root,
                                       whitelisted_suite.args))
                gtest_suites = suites_to_run
                if (len(suites_to_run) != len(whitelisted)):
                    whitelist_test_names = set(whitelisted.keys())
                    found_test_names = set([t.name for t in suites_to_run])
                    diff_tests = list(whitelist_test_names - found_test_names)
                    for t in diff_tests:
                        logging.warning('Could not find %s', t)
                    raise error.TestWarn(
                        'Not all whitelisted tests found on the DUT. '
                        'Expected %i tests but only found %i' %
                        (len(whitelisted), len(suites_to_run)))
            except error.GenericHostRunError:
                logging.error('Failed to read whitelist %s', WHITELIST_FILE)

        if filter_tests:
            gtest_suites = [
                t for t in gtest_suites if any(
                    fnmatch.fnmatch(t.path, n) for n in filter_tests)
            ]
            logging.info('Running tests:\n  %s',
                         '\n  '.join(t.path for t in gtest_suites))

        if not gtest_suites:
            raise error.TestWarn('No test executables found on the DUT')
        logging.debug('Test executables found:\n%s',
                      '\n'.join([str(t) for t in gtest_suites]))
        return gtest_suites
 def postprocess_iteration(self):
     if self._suspender.successes:
         keyvals = {'suspend_iterations': len(self._suspender.successes)}
         for key in self._suspender.successes[0]:
             values = [result[key] for result in self._suspender.successes]
             keyvals[key + '_mean'] = numpy.mean(values)
             keyvals[key + '_stddev'] = numpy.std(values)
             keyvals[key + '_min'] = numpy.amin(values)
             keyvals[key + '_max'] = numpy.amax(values)
         self.write_perf_keyval(keyvals)
     if self._suspender.failures:
         total = len(self._suspender.failures)
         iterations = len(self._suspender.successes) + total
         timeout = kernel = firmware = spurious = 0
         for failure in self._suspender.failures:
             if type(failure) is sys_power.SuspendTimeout: timeout += 1
             if type(failure) is sys_power.KernelError: kernel += 1
             if type(failure) is sys_power.FirmwareError: firmware += 1
             if type(failure) is sys_power.SpuriousWakeupError: spurious += 1
         if total == kernel + timeout:
             raise error.TestWarn('%d non-fatal suspend failures in %d '
                     'iterations (%d timeouts, %d kernel warnings)' %
                     (total, iterations, timeout, kernel))
         if total == 1:
             # just throw it as is, makes aggregation on dashboards easier
             raise self._suspender.failures[0]
         raise error.TestFail('%d suspend failures in %d iterations (%d '
                 'timeouts, %d kernel warnings, %d firmware errors, %d '
                 'spurious wakeups)' %
                 (total, iterations, timeout, kernel, firmware, spurious))
Esempio n. 3
0
    def run_telemetry_benchmark(self,
                                benchmark,
                                perf_value_writer=None,
                                *args):
        """Runs a telemetry benchmark on a dut.

        @param benchmark: Benchmark we want to run.
        @param perf_value_writer: Should be an instance with the function
                                  output_perf_value(), if None, no perf value
                                  will be written. Typically this will be the
                                  job object from an autotest test.
        @param args: additional list of arguments to pass to the telemetry
                     execution script.

        @returns A TelemetryResult Instance with the results of this telemetry
                 execution.
        """
        logging.debug('Running telemetry benchmark: %s', benchmark)
        telemetry_script = os.path.join(self._telemetry_path,
                                        TELEMETRY_RUN_BENCHMARKS_SCRIPT)
        result = self._run_telemetry(telemetry_script, benchmark, *args)
        result.parse_benchmark_results()

        if perf_value_writer:
            self._output_perf_value(perf_value_writer, result.perf_data)

        if result.status is WARNING_STATUS:
            raise error.TestWarn('Telemetry Benchmark: %s'
                                 ' exited with Warnings.' % benchmark)
        if result.status is FAILED_STATUS:
            raise error.TestFail('Telemetry Benchmark: %s'
                                 ' failed to run.' % benchmark)

        return result
 def run_once(self):
     if utils.get_board() not in ['samus', 'gandof']:
         raise error.TestNAError(
             'Trying to run PSR tests on unsupported board.')
     psr_enabled = self._is_psr_enabled()
     if (not psr_enabled
             and graphics_utils.call_xrandr('--output eDP1 --set psr on')):
         error.TestFail('Unable to enable PSR via xrandr.')
     # Start chrome in full screen mode so that there is no blinking cursor
     # or ticking clock on the screen.
     with chrome.Chrome(logged_in=False, extra_browser_args=['--kiosk']):
         # Sample the PSR performance count from debugfs and wait for 20s.
         # At the end of 20s, re-sample the PSR performance count. The time
         # spent in PSR should be close to (20s - <vblankoffdelay>).
         sleep_time_milliseconds = 20 * 1000
         min_occupancy = 0.9 * (sleep_time_milliseconds -
                                self._get_vblank_timeout())
         perf_count_old = self._get_perf_count()
         time.sleep(sleep_time_milliseconds / 1000)
         perf_count_new = self._get_perf_count()
         occupancy_time = perf_count_new - perf_count_old
         if occupancy_time < min_occupancy:
             raise error.TestFail(
                 'PSR occupancy time %dms less than expected.' %
                 occupancy_time)
         # Disable PSR if it was not enabled to begin with.
         if (not psr_enabled and
                 graphics_utils.call_xrandr('--output eDP1 --set psr off')):
             raise error.TestWarn('Unable to disable PSR via xrandr.')
 def get_device(self, test):
     if getattr(test, 'device', None):
         device = test.device
     else:
         if self.device:
             device = self.device
         else:
             raise error.TestWarn('No device specified for blktrace')
     return device
Esempio n. 6
0
 def NoDeviceFailure(self, forgive_flaky, message):
     """
     No WiFi device found. Forgiveable in some suites, for some boards.
     """
     board = utils.get_board()
     if forgive_flaky and board in self.EXCEPTION_BOARDS:
         return error.TestWarn('Exception (%s): %s' % (board, message))
     else:
         return error.TestFail(message)
Esempio n. 7
0
    def run_once(self):
        """Run the test."""
        errors = ''
        warnings = ''
        funcs = [self.test_cpu, self.test_gpu, self.test_mem]
        for func in funcs:
            error_msg, warning_msg = func()
            errors += error_msg
            warnings += warning_msg

        if errors:
            raise error.TestFail('Failed: %s' % (errors + warnings))
        if warnings:
            raise error.TestWarn('Warning: %s' % warnings)
    def finalize(self):
        """
        Analyzes the state of the GPU, log history and emits warnings or errors
        if the state changed since initialize. Also makes a note of the Chrome
        version for later usage in the perf-dashboard.
        """
        utils.set_dirty_writeback_centisecs(self.dirty_writeback_centisecs)
        new_gpu_hang = False
        new_gpu_warning = False
        if utils.get_cpu_arch() != 'arm':
            logging.info('Cleanup: Checking for new GPU hangs...')
            messages = open(self._MESSAGES_FILE, 'r')
            for line in messages:
                for hang in self._HANGCHECK:
                    if hang in line:
                        if not line in self.existing_hangs.keys():
                            logging.info(line)
                            for warn in self._HANGCHECK_WARNING:
                                if warn in line:
                                    new_gpu_warning = True
                                    logging.warning(
                                        'Saw GPU hang warning during test.')
                                else:
                                    logging.warning(
                                        'Saw GPU hang during test.')
                                    new_gpu_hang = True
            messages.close()

            if not self._run_on_sw_rasterizer and is_sw_rasterizer():
                logging.warning('Finished test on SW rasterizer.')
                raise error.TestFail('Finished test on SW rasterizer.')
            if self._raise_error_on_hang and new_gpu_hang:
                raise error.TestError('Detected GPU hang during test.')
            if new_gpu_hang:
                raise error.TestWarn('Detected GPU hang during test.')
            if new_gpu_warning:
                raise error.TestWarn('Detected GPU warning during test.')
    def run_telemetry_benchmark(self,
                                benchmark,
                                perf_value_writer=None,
                                *args):
        """Runs a telemetry benchmark on a dut.

        @param benchmark: Benchmark we want to run.
        @param perf_value_writer: Should be an instance with the function
                                  output_perf_value(), if None, no perf value
                                  will be written. Typically this will be the
                                  job object from an autotest test.
        @param args: additional list of arguments to pass to the telemetry
                     execution script.

        @returns A TelemetryResult Instance with the results of this telemetry
                 execution.
        """
        logging.debug('Running telemetry benchmark: %s', benchmark)

        if benchmark not in ON_DUT_WHITE_LIST:
            self._telemetry_on_dut = False

        if self._telemetry_on_dut:
            telemetry_script = os.path.join(DUT_CHROME_ROOT,
                                            TELEMETRY_RUN_BENCHMARKS_SCRIPT)
            self._ensure_deps(self._host, benchmark)
        else:
            telemetry_script = os.path.join(self._telemetry_path,
                                            TELEMETRY_RUN_BENCHMARKS_SCRIPT)

        result = self._run_telemetry(telemetry_script, benchmark, *args)

        if result.status is WARNING_STATUS:
            raise error.TestWarn('Telemetry Benchmark: %s'
                                 ' exited with Warnings.' % benchmark)
        if result.status is FAILED_STATUS:
            raise error.TestFail('Telemetry Benchmark: %s'
                                 ' failed to run.' % benchmark)
        if perf_value_writer:
            self._run_scp(perf_value_writer.resultsdir)
        return result
Esempio n. 10
0
    def _find_all_gtestsuites(self, use_whitelist=False):
        """Find all the gTest Suites installed on the DUT.

        @param use_whitelist: Only whitelisted tests found on the system will
                              be used.
        """
        list_cmd = LIST_TEST_BINARIES_TEMPLATE % {'path': NATIVE_TESTS_PATH}
        gtest_suites_path = self.host.run_output(list_cmd).splitlines()
        gtest_suites = [GtestSuite(path, True) for path in gtest_suites_path]

        if use_whitelist:
            try:
                whitelisted = self._get_whitelisted_tests(WHITELIST_FILE)
                gtest_suites = [
                    t for t in whitelisted if t.path in gtest_suites_path
                ]
            except error.AutoservRunError:
                logging.error('Failed to read whitelist %s', WHITELIST_FILE)

        if not gtest_suites:
            raise error.TestWarn('No test executables found on the DUT')
        logging.debug('Test executables found:\n%s',
                      '\n'.join([str(t) for t in gtest_suites]))
        return gtest_suites
Esempio n. 11
0
    def run_once(self,
                 target_package=None,
                 target_plan=None,
                 target_class=None,
                 target_method=None,
                 needs_push_media=False,
                 max_retry=None,
                 timeout=_CTS_TIMEOUT_SECONDS):
        """Runs the specified CTS once, but with several retries.

        There are four usages:
        1. Test the whole package named |target_package|.
        2. Test with a plan named |target_plan|.
        3. Run all the test cases of class named |target_class|.
        4. Run a specific test method named |target_method| of class
           |target_class|.

        @param target_package: the name of test package to run.
        @param target_plan: the name of the test plan to run.
        @param target_class: the name of the class to be tested.
        @param target_method: the name of the method to be tested.
        @param needs_push_media: need to push test media streams.
        @param max_retry: number of retry steps before reporting results.
        @param timeout: time after which tradefed can be interrupted.
        """
        # Don't download media for tests that don't need it. b/29371037
        # TODO(ihf): This can be removed once the control file generator is
        # aware of this constraint.
        if target_package is not None and target_package.startswith(
                'android.mediastress'):
            needs_push_media = True

        # On dev and beta channels timeouts are sharp, lenient on stable.
        self._timeout = timeout
        if self._get_release_channel == 'stable':
            self._timeout += 3600
        # Retries depend on channel.
        self._max_retry = (max_retry if max_retry is not None else
                           self._get_channel_retry())
        session_id = 0

        steps = -1  # For historic reasons the first iteration is not counted.
        pushed_media = False
        total_tests = 0
        self.summary = ''
        if target_package is not None:
            test_name = 'package.%s' % target_package
            test_command = self._tradefed_run_command(package=target_package)
        elif target_plan is not None:
            test_name = 'plan.%s' % target_plan
            test_command = self._tradefed_run_command(plan=target_plan)
        elif target_class is not None:
            test_name = 'testcase.%s' % target_class
            if target_method is not None:
                test_name += '.' + target_method
            test_command = self._tradefed_run_command(
                test_class=target_class, test_method=target_method)
        else:
            test_command = self._tradefed_run_command()
            test_name = 'all_CTS'

        # Unconditionally run CTS package until we see some tests executed.
        while steps < self._max_retry and total_tests == 0:
            steps += 1
            with self._login_chrome(dont_override_profile=pushed_media):
                self._ready_arc()

                # Only push media for tests that need it. b/29371037
                if needs_push_media and not pushed_media:
                    self._push_media(_CTS_URI)
                    # copy_media.sh is not lazy, but we try to be.
                    pushed_media = True

                # Start each valid iteration with a clean repository. This
                # allows us to track session_id blindly.
                self._clean_repository()
                if target_plan is not None:
                    self._install_plan(target_plan)
                logging.info('Running %s:', test_name)

                # The list command is not required. It allows the reader to
                # inspect the tradefed state when examining the autotest logs.
                commands = [['list', 'results'], test_command]
                counts = self._run_cts_tradefed(commands)
                tests, passed, failed, notexecuted, waived = counts
                logging.info(
                    'RESULT: tests=%d, passed=%d, failed=%d, '
                    'notexecuted=%d, waived=%d', *counts)
                self.summary += 'run(t=%d, p=%d, f=%d, ne=%d, w=%d)' % counts
                if tests == 0 and target_package in self.notest_packages:
                    logging.info('Package has no tests as expected.')
                    return
                if tests > 0 and target_package in self.notest_packages:
                    # We expected no tests, but the new bundle drop must have
                    # added some for us. Alert us to the situation.
                    raise error.TestFail('Failed: Remove package %s from '
                                         'notest_packages directory!' %
                                         target_package)
                if tests == 0 and target_package not in self.notest_packages:
                    logging.error('Did not find any tests in package. Hoping '
                                  'this is transient. Retry after reboot.')
                # An internal self-check. We really should never hit this.
                if tests != passed + failed + notexecuted:
                    raise error.TestFail('Error: Test count inconsistent. %s' %
                                         self.summary)
                # Keep track of global counts as each continue/retry step below
                # works on local failures.
                total_tests = tests
                total_passed = passed
            # The DUT has rebooted at this point and is in a clean state.
        if total_tests == 0:
            raise error.TestFail('Error: Could not find any tests in package.')

        # If the results were not completed or were failing then continue or
        # retry them iteratively MAX_RETRY times.
        while steps < self._max_retry and (notexecuted > 0 or failed > waived):
            # First retry until there is no test is left that was not executed.
            while notexecuted > 0 and steps < self._max_retry:
                steps += 1
                with self._login_chrome(dont_override_profile=pushed_media):
                    self._ready_arc()
                    logging.info('Continuing session %d:', session_id)
                    # 'Continue' reports as passed all passing results in the
                    # current session (including all tests passing before
                    # continue). Hence first subtract the old count before
                    # adding the new count. (Same for failed.)
                    previously_passed = passed
                    previously_failed = failed
                    previously_notexecuted = notexecuted
                    # TODO(ihf): For increased robustness pass in datetime_id of
                    # session we are continuing.
                    counts = self._tradefed_continue(session_id)
                    tests, passed, failed, notexecuted, waived = counts
                    # Unfortunately tradefed sometimes encounters an error
                    # running the tests for instance timing out on downloading
                    # the media files. Check for this condition and give it one
                    # extra chance.
                    if not (tests == previously_notexecuted
                            and tests == passed + failed + notexecuted):
                        logging.warning('Tradefed inconsistency - retrying.')
                        counts = self._tradefed_continue(session_id)
                        tests, passed, failed, notexecuted, waived = counts
                    newly_passed = passed - previously_passed
                    newly_failed = failed - previously_failed
                    total_passed += newly_passed
                    logging.info(
                        'RESULT: total_tests=%d, total_passed=%d, step'
                        '(tests=%d, passed=%d, failed=%d, notexecuted=%d,'
                        ' waived=%d)', total_tests, total_passed, tests,
                        newly_passed, newly_failed, notexecuted, waived)
                    self.summary += ' cont(t=%d, p=%d, f=%d, ne=%d, w=%d)' % (
                        tests, newly_passed, newly_failed, notexecuted, waived)
                    # An internal self-check. We really should never hit this.
                    if not (tests == previously_notexecuted and tests
                            == newly_passed + newly_failed + notexecuted):
                        logging.warning('Test count inconsistent. %s',
                                        self.summary)
                # The DUT has rebooted at this point and is in a clean state.

            if notexecuted > 0:
                # This likely means there were too many crashes/reboots to
                # attempt running all tests. Don't attempt to retry as it is
                # impossible to pass at this stage (and also inconsistent).
                raise error.TestFail(
                    'Failed: Ran out of steps with %d total '
                    'passed and %d remaining not executed tests. %s' %
                    (total_passed, notexecuted, self.summary))

            # Managed to reduce notexecuted to zero. Now create a new test plan
            # to rerun only the failures we did encounter.
            if failed > waived:
                with self._login_chrome(dont_override_profile=pushed_media):
                    steps += 1
                    self._ready_arc()
                    logging.info('Retrying failures of %s with session_id %d:',
                                 test_name, session_id)
                    previously_failed = failed
                    session_id, counts = self._tradefed_retry(
                        test_name, session_id)
                    tests, passed, failed, notexecuted, waived = counts
                    # Unfortunately tradefed sometimes encounters an error
                    # running the tests for instance timing out on downloading
                    # the media files. Check for this condition and give it one
                    # extra chance.
                    if not (tests == previously_failed
                            and tests == passed + failed + notexecuted):
                        logging.warning('Tradefed inconsistency - retrying.')
                        session_id, counts = self._tradefed_retry(
                            test_name, session_id)
                        tests, passed, failed, notexecuted, waived = counts
                    total_passed += passed
                    logging.info(
                        'RESULT: total_tests=%d, total_passed=%d, step'
                        '(tests=%d, passed=%d, failed=%d, notexecuted=%d,'
                        ' waived=%d)', total_tests, total_passed, *counts)
                    self.summary += (' retry(t=%d, p=%d, f=%d, ne=%d, w=%d)' %
                                     counts)
                    # An internal self-check. We really should never hit this.
                    if not (previously_failed == tests
                            and tests == passed + failed + notexecuted):
                        logging.warning('Test count inconsistent. %s',
                                        self.summary)
                # The DUT has rebooted at this point and is in a clean state.

        # Final classification of test results.
        if total_passed + waived == 0 or notexecuted > 0 or failed > waived:
            raise error.TestFail(
                'Failed: after %d retries giving up. '
                'total_passed=%d, failed=%d, notexecuted=%d, waived=%d. %s' %
                (steps, total_passed, failed, notexecuted, waived,
                 self.summary))
        if steps > 0:
            # TODO(ihf): Make this error.TestPass('...') once available.
            raise error.TestWarn(
                'Passed: after %d retries passing %d tests, waived=%d. %s' %
                (steps, total_passed, waived, self.summary))
Esempio n. 12
0
    def _get_debug_log_ts(self, debug_file_path):
        """Parse client side test start and end timestamp from debug log.

        @param debug_file_path: path to client side test debug log.
        @return (start_ts, end_ts)
                start_ts: the start timestamp of the client side test in seconds
                          since epoch or None.
                end_ts: the end timestamp of the client side test in seconds
                        since epoch or None.
        """
        default_test_events = collections.defaultdict(dict)
        custom_test_events = collections.defaultdict(dict)
        default_test_events['start']['str'] = self.DEFAULT_START
        default_test_events['end']['str'] = self.DEFAULT_END
        custom_test_events['start']['str'] = power_telemetry_utils.CUSTOM_START
        custom_test_events['end']['str'] = power_telemetry_utils.CUSTOM_END
        for event in default_test_events:
            default_test_events[event]['re'] = re.compile(
                r'([\d\s\./:]+).+' + default_test_events[event]['str'])
            default_test_events[event]['match'] = False
        for event in custom_test_events:
            custom_test_events[event]['re'] = re.compile(
                r'.*' + custom_test_events[event]['str'] + r'\s+([\d\.]+)')
        events_ts = {
            'start': None,
            'end': None,
        }

        try:
            with open(debug_file_path, 'r') as debug_log:

                for line in debug_log:
                    for event in default_test_events:
                        match = default_test_events[event]['re'].match(line)
                        if match:
                            default_test_events[event]['ts'] = \
                                    ts_processing(match.group(1))
                            default_test_events[event]['match'] = True
                    for event in custom_test_events:
                        match = custom_test_events[event]['re'].match(line)
                        if match:
                            custom_test_events[event]['ts'] = \
                                    float(match.group(1))

            for event in default_test_events:
                if not default_test_events[event]['match']:
                    raise error.TestWarn('Cannot find %s timestamp in client '
                                         'side test debug log.')

            for event in events_ts:
                events_ts[event] = default_test_events[event].get(
                    'ts', events_ts[event])
                events_ts[event] = custom_test_events[event].get(
                    'ts', events_ts[event])

            return (events_ts['start'], events_ts['end'])

        except Exception as exc:
            logging.warning(
                'Client side test debug log %s does not contain '
                'valid start and end timestamp, see exception: %s',
                debug_file_path, exc)
            return (None, None)
Esempio n. 13
0
    def run_once(self, opts=None):
        options = dict(
            filter='',
            test_names='',  # e.g., dEQP-GLES3.info.version,
            # dEQP-GLES2.functional,
            # dEQP-GLES3.accuracy.texture, etc.
            test_names_file='',
            timeout=self._timeout,
            subset_to_run='Pass',  # Pass, Fail, Timeout, NotPass...
            hasty='False',
            shard_number='0',
            shard_count='1',
            debug='False')
        if opts is None:
            opts = []
        options.update(utils.args_to_dict(opts))
        logging.info('Test Options: %s', options)

        self._hasty = (options['hasty'] == 'True')
        self._timeout = int(options['timeout'])
        self._test_names_file = options['test_names_file']
        self._test_names = options['test_names']
        self._shard_number = int(options['shard_number'])
        self._shard_count = int(options['shard_count'])
        self._debug = (options['debug'] == 'True')
        if not (self._test_names_file or self._test_names):
            self._filter = options['filter']
            if not self._filter:
                raise error.TestFail('Failed: No dEQP test filter specified')

        # Some information to help postprocess logs into blacklists later.
        logging.info('ChromeOS BOARD = %s', self._board)
        logging.info('ChromeOS CPU family = %s', self._cpu_type)
        logging.info('ChromeOS GPU family = %s', self._gpu_type)

        # Create a place to put detailed test output logs.
        if self._filter:
            logging.info('dEQP test filter = %s', self._filter)
            self._log_path = os.path.join(tempfile.gettempdir(),
                                          '%s-logs' % self._filter)
        else:
            base = os.path.basename(self._test_names_file)
            # TODO(ihf): Clean this up.
            logging.info('dEQP test filter = %s', os.path.splitext(base)[0])
            self._log_path = os.path.join(tempfile.gettempdir(),
                                          '%s-logs' % base)
        shutil.rmtree(self._log_path, ignore_errors=True)
        os.mkdir(self._log_path)

        self._services.stop_services()
        if self._test_names_file:
            test_cases = [
                line.rstrip('\n') for line in open(
                    os.path.join(self.bindir, self._test_names_file))
            ]
            test_cases = [
                test for test in test_cases if test and not test.isspace()
            ]
        if self._test_names:
            test_cases = []
            for name in self._test_names.split(','):
                test_cases.extend(self._get_test_cases(name, 'Pass'))
        if self._filter:
            test_cases = self._get_test_cases(self._filter,
                                              options['subset_to_run'])

        if self._debug:
            # LogReader works on /var/log/messages by default.
            self._log_reader = cros_logging.LogReader()
            self._log_reader.set_start_by_current()

        test_results = {}
        if self._hasty:
            logging.info('Running in hasty mode.')
            test_results = self.run_tests_hasty(test_cases)
        else:
            logging.info('Running each test individually.')
            test_results = self.run_tests_individually(test_cases)

        logging.info('Test results:')
        logging.info(test_results)
        self.write_perf_keyval(test_results)

        test_count = 0
        test_failures = 0
        test_passes = 0
        test_skipped = 0
        for result in test_results:
            test_count += test_results[result]
            if result.lower() in ['pass']:
                test_passes += test_results[result]
            if result.lower() not in [
                    'pass', 'notsupported', 'internalerror', 'qualitywarning',
                    'compatibilitywarning', 'skipped'
            ]:
                test_failures += test_results[result]
            if result.lower() in ['skipped']:
                test_skipped += test_results[result]
        # The text "Completed all tests." is used by the process_log.py script
        # and should always appear at the end of a completed test run.
        logging.info(
            'Completed all tests. Saw %d tests, %d passes and %d failures.',
            test_count, test_passes, test_failures)

        if self._filter and test_count == 0 and options[
                'subset_to_run'] != 'NotPass':
            logging.warning('No test cases found for filter: %s!',
                            self._filter)

        if options['subset_to_run'] == 'NotPass':
            if test_passes:
                # TODO(ihf): Make this an annotated TestPass once available.
                raise error.TestWarn(
                    '%d formerly failing tests are passing now.' % test_passes)
        elif test_failures:
            # TODO(ihf): Delete this once hasty expectations have been
            # checked in.
            if self._gpu_type.startswith('tegra'):
                raise error.TestWarn(
                    'Failed: on %s %d/%d tests failed.' %
                    (self._gpu_type, test_failures, test_count))
            raise error.TestFail('Failed: on %s %d/%d tests failed.' %
                                 (self._gpu_type, test_failures, test_count))
        if test_skipped > 0:
            raise error.TestFail('Failed: on %s %d tests skipped, %d passes' %
                                 (self._gpu_type, test_skipped, test_passes))
Esempio n. 14
0
    """
    srcdir = params.get("srcdir", test.srcdir)
    params["srcdir"] = srcdir

    # Flag if a installer minor failure ocurred
    minor_failure = False
    minor_failure_reasons = []

    try:
        for name in params.get("installers", "").split():
            installer_obj = installer.make_installer(name, params, test)
            installer_obj.install()
            if installer_obj.minor_failure == True:
                minor_failure = True
                reason = "%s_%s: %s" % (installer_obj.name,
                                        installer_obj.mode,
                                        installer_obj.minor_failure_reason)
                minor_failure_reasons.append(reason)
            env.register_installer(installer_obj)

    except Exception, e:
        # if the build/install fails, don't allow other tests
        # to get a installer.
        msg = "Virtualization software install failed: %s" % (e)
        env.register_installer(base_installer.FailedInstaller(msg))
        raise

    if minor_failure:
        raise error.TestWarn("Minor (worked around) failures during build "
                             "test: %s" % ", ".join(minor_failure_reasons))
Esempio n. 15
0
    def run_once(self,
                 target_module=None,
                 target_plan=None,
                 target_class=None,
                 target_method=None,
                 needs_push_media=False,
                 max_retry=None,
                 cts_tradefed_args=None,
                 pre_condition_commands=[],
                 warn_on_test_retry=True,
                 timeout=_CTS_TIMEOUT_SECONDS):
        """Runs the specified CTS once, but with several retries.

        There are four usages:
        1. Test the whole module named |target_module|.
        2. Test with a plan named |target_plan|.
        3. Run all the test cases of class named |target_class|.
        4. Run a specific test method named |target_method| of class
           |target_class|.
        5. Run an arbitrary tradefed command.

        @param target_module: the name of test module to run.
        @param target_plan: the name of the test plan to run.
        @param target_class: the name of the class to be tested.
        @param target_method: the name of the method to be tested.
        @param needs_push_media: need to push test media streams.
        @param max_retry: number of retry steps before reporting results.
        @param timeout: time after which tradefed can be interrupted.
        @param pre_condition_command: a list of scripts to be run on the
        dut before the test is run, the scripts must already be installed.
        @param warn_on_test_retry: False if you want to skip warning message
        about tradefed retries.
        @param cts_tradefed_args: a list of args to pass to tradefed.
        """

        # On dev and beta channels timeouts are sharp, lenient on stable.
        self._timeout = timeout
        if self._get_release_channel == 'stable':
            self._timeout += 3600
        # Retries depend on channel.
        self._max_retry = (max_retry if max_retry is not None else
                           self._get_channel_retry())
        logging.info('Maximum number of retry steps %d.', self._max_retry)
        session_id = 0

        self.result_history = {}
        steps = -1  # For historic reasons the first iteration is not counted.
        pushed_media = False
        total_tests = 0
        total_passed = 0
        self.summary = ''
        if target_module is not None:
            test_name = 'module.%s' % target_module
            test_command = self._tradefed_run_command(module=target_module,
                                                      session_id=session_id)
        elif target_plan is not None:
            test_name = 'plan.%s' % target_plan
            test_command = self._tradefed_run_command(plan=target_plan,
                                                      session_id=session_id)
        elif target_class is not None:
            test_name = 'testcase.%s' % target_class
            if target_method is not None:
                test_name += '.' + target_method
            test_command = self._tradefed_run_command(
                test_class=target_class,
                test_method=target_method,
                session_id=session_id)
        elif cts_tradefed_args is not None:
            test_name = 'run tradefed %s' % ' '.join(cts_tradefed_args)
            test_command = cts_tradefed_args

        else:
            test_command = self._tradefed_run_command()
            test_name = 'all_CTS'

        # Unconditionally run CTS module until we see some tests executed.
        while total_tests == 0 and steps < self._max_retry:
            steps += 1
            with self._login_chrome(dont_override_profile=pushed_media):
                self._ready_arc()
                self._run_precondition_scripts(self._host,
                                               pre_condition_commands)

                # Only push media for tests that need it. b/29371037
                if needs_push_media and not pushed_media:
                    self._push_media(_CTS_URI)
                    # copy_media.sh is not lazy, but we try to be.
                    pushed_media = True

                # Start each valid iteration with a clean repository. This
                # allows us to track session_id blindly.
                self._clean_repository()
                if target_plan is not None:
                    self._install_plan(target_plan)
                logging.info('Running %s:', test_name)

                # The list command is not required. It allows the reader to
                # inspect the tradefed state when examining the autotest logs.
                commands = [['list', 'results'], test_command]
                counts = self._run_cts_tradefed(commands)
                tests, passed, failed, notexecuted, waived = counts
                self.result_history[steps] = counts
                msg = 'run(t=%d, p=%d, f=%d, ne=%d, w=%d)' % counts
                logging.info('RESULT: %s', msg)
                self.summary += msg
                if tests == 0 and target_module in self.notest_modules:
                    logging.info('Package has no tests as expected.')
                    return
                if tests > 0 and target_module in self.notest_modules:
                    # We expected no tests, but the new bundle drop must have
                    # added some for us. Alert us to the situation.
                    raise error.TestFail('Failed: Remove module %s from '
                                         'notest_modules directory!' %
                                         target_module)
                if tests == 0 and target_module not in self.notest_modules:
                    logging.error('Did not find any tests in module. Hoping '
                                  'this is transient. Retry after reboot.')
                if not self._consistent(tests, passed, failed, notexecuted):
                    # Try to figure out what happened. Example: b/35605415.
                    self._run_cts_tradefed([['list', 'results']],
                                           collect_results=False)
                    logging.warning('Test count inconsistent. %s',
                                    self.summary)
                # Keep track of global count, we can't trust continue/retry.
                if total_tests == 0:
                    total_tests = tests
                total_passed += passed
            # The DUT has rebooted at this point and is in a clean state.
        if total_tests == 0:
            raise error.TestFail('Error: Could not find any tests in module.')

        retry_inconsistency_error = None
        # If the results were not completed or were failing then continue or
        # retry them iteratively MAX_RETRY times.
        while steps < self._max_retry and failed > waived:
            steps += 1
            with self._login_chrome(dont_override_profile=pushed_media):
                self._ready_arc()
                self._run_precondition_scripts(self._host,
                                               pre_condition_commands)
                logging.info('Retrying failures of %s with session_id %d:',
                             test_name, session_id)
                expected_tests = failed + notexecuted
                session_id, counts = self._tradefed_retry(
                    test_name, session_id)
                tests, passed, failed, notexecuted, waived = counts
                self.result_history[steps] = counts
                # Consistency check, did we really run as many as we thought
                # initially?
                if expected_tests != tests:
                    msg = (
                        'Retry inconsistency - '
                        'initially saw %d failed+notexecuted, ran %d tests. '
                        'passed=%d, failed=%d, notexecuted=%d, waived=%d.' %
                        (expected_tests, tests, passed, failed, notexecuted,
                         waived))
                    logging.warning(msg)
                    if expected_tests > tests:
                        # See b/36523200#comment8. Due to the existence of the
                        # multiple tests having the same ID, more cases may be
                        # run than previous fail count. As a workaround, making
                        # it an error only when the tests run were less than
                        # expected.
                        # TODO(kinaba): Find a way to handle this dup.
                        retry_inconsistency_error = msg
                if not self._consistent(tests, passed, failed, notexecuted):
                    logging.warning('Tradefed inconsistency - retrying.')
                    session_id, counts = self._tradefed_retry(
                        test_name, session_id)
                    tests, passed, failed, notexecuted, waived = counts
                    self.result_history[steps] = counts
                msg = 'retry(t=%d, p=%d, f=%d, ne=%d, w=%d)' % counts
                logging.info('RESULT: %s', msg)
                self.summary += ' ' + msg
                if not self._consistent(tests, passed, failed, notexecuted):
                    logging.warning('Test count inconsistent. %s',
                                    self.summary)
                total_passed += passed
                if tests > expected_tests:
                    total_tests += tests - expected_tests
            # The DUT has rebooted at this point and is in a clean state.

        # Final classification of test results.
        if total_passed + waived == 0 or failed > waived:
            raise error.TestFail(
                'Failed: after %d retries giving up. '
                'passed=%d, failed=%d, notexecuted=%d, waived=%d. %s' %
                (steps, total_passed, failed, notexecuted, waived,
                 self.summary))
        if not self._consistent(total_tests, total_passed, failed,
                                notexecuted):
            raise error.TestFail('Error: Test count inconsistent. %s' %
                                 self.summary)
        if retry_inconsistency_error:
            raise error.TestFail('Error: %s %s' %
                                 (retry_inconsistency_error, self.summary))
        if steps > 0 and warn_on_test_retry:
            # TODO(ihf): Make this error.TestPass('...') once available.
            raise error.TestWarn(
                'Passed: after %d retries passing %d tests, waived=%d. %s' %
                (steps, total_passed, waived, self.summary))
Esempio n. 16
0
    def run_once(self, host, test_mirrored=False):
        factory = remote_facade_factory.RemoteFacadeFactory(host)
        display_facade = factory.create_display_facade()
        chameleon_board = host.chameleon

        chameleon_board.reset()
        finder = chameleon_port_finder.ChameleonVideoInputFinder(
                chameleon_board, display_facade)

        errors = []
        warns = []
        for chameleon_port in finder.iterate_all_ports():
            screen_test = chameleon_screen_test.ChameleonScreenTest(
                    chameleon_port, display_facade, self.outputdir)

            logging.info('See the display on Chameleon: port %d (%s)',
                         chameleon_port.get_connector_id(),
                         chameleon_port.get_connector_type())

            logging.info('Set mirrored: %s', test_mirrored)
            display_facade.set_mirrored(test_mirrored)

            # Keep the original connector name, for later comparison.
            expected_connector = display_facade.get_external_connector_name()
            resolution = display_facade.get_external_resolution()
            logging.info('See the display on DUT: %s %r',
                         expected_connector, resolution)

            for (plugged_before_noise,
                 plugged_after_noise) in self.PLUG_CONFIGS:
                logging.info('TESTING THE CASE: %s > noise > %s',
                             'plug' if plugged_before_noise else 'unplug',
                             'plug' if plugged_after_noise else 'unplug')

                chameleon_port.set_plug(plugged_before_noise)

                if screen_test.check_external_display_connected(
                        expected_connector if plugged_before_noise else False,
                        errors):
                    # Skip the following test if an unexpected display detected.
                    continue

                chameleon_port.fire_mixed_hpd_pulses(
                        self.PULSES_PLUGGED if plugged_after_noise
                                            else self.PULSES_UNPLUGGED)

                if plugged_after_noise:
                    chameleon_port.wait_video_input_stable()
                    if test_mirrored:
                        # Wait for resolution change to make sure the resolution
                        # is stable before moving on. This is to deal with the
                        # case where DUT may respond slowly after the noise.
                        # If the resolution doesn't change, then we are
                        # confident that it is stable. Otherwise, a slow
                        # response is caught.
                        r = display_facade.get_internal_resolution()
                        utils.wait_for_value_changed(
                                display_facade.get_internal_resolution,
                                old_value=r)

                    err = screen_test.check_external_display_connected(
                            expected_connector)

                    if not err:
                        err = screen_test.test_screen_with_image(
                                resolution, test_mirrored)
                    if err:
                        # When something goes wrong after the noise, a normal
                        # user would try to re-plug the cable to recover.
                        # We emulate this behavior below and report error if
                        # the problem persists.
                        logging.warn('Possibly flaky: %s', err)
                        warns.append('Possibly flaky: %s' % err)
                        logging.info('Replug and retry the screen test...')
                        chameleon_port.unplug()
                        time.sleep(self.REPLUG_DELAY_SEC)
                        chameleon_port.plug()
                        chameleon_port.wait_video_input_stable()
                        screen_test.test_screen_with_image(
                                resolution, test_mirrored, errors)
                else:
                    screen_test.check_external_display_connected(False, errors)
                    time.sleep(1)

        if errors:
            raise error.TestFail('; '.join(set(errors)))
        elif warns:
            raise error.TestWarn('; '.join(set(warns)))
Esempio n. 17
0
def check_image(params, root_dir):
    """
    Check an image using the appropriate tools for each virt backend.

    @param params: Dictionary containing the test parameters.
    @param root_dir: Base directory for relative filenames.

    @note: params should contain:
           image_name -- the name of the image file, without extension
           image_format -- the format of the image (qcow2, raw etc)

    @raise VMImageCheckError: In case qemu-img check fails on the image.
    """
    vm_type = params.get("vm_type")
    if vm_type == 'kvm':
        image_filename = get_image_filename(params, root_dir)
        logging.debug("Checking image file %s", image_filename)
        qemu_img_cmd = virt_utils.get_path(
            root_dir, params.get("qemu_img_binary", "qemu-img"))
        image_is_qcow2 = params.get("image_format") == 'qcow2'
        if os.path.exists(image_filename) and image_is_qcow2:
            # Verifying if qemu-img supports 'check'
            q_result = utils.run(qemu_img_cmd, ignore_status=True)
            q_output = q_result.stdout
            check_img = True
            if not "check" in q_output:
                logging.error("qemu-img does not support 'check', "
                              "skipping check")
                check_img = False
            if not "info" in q_output:
                logging.error("qemu-img does not support 'info', "
                              "skipping check")
                check_img = False
            if check_img:
                try:
                    utils.system("%s info %s" % (qemu_img_cmd, image_filename))
                except error.CmdError:
                    logging.error("Error getting info from image %s",
                                  image_filename)

                cmd_result = utils.run("%s check %s" %
                                       (qemu_img_cmd, image_filename),
                                       ignore_status=True)
                # Error check, large chances of a non-fatal problem.
                # There are chances that bad data was skipped though
                if cmd_result.exit_status == 1:
                    for e_line in cmd_result.stdout.splitlines():
                        logging.error("[stdout] %s", e_line)
                    for e_line in cmd_result.stderr.splitlines():
                        logging.error("[stderr] %s", e_line)
                    if params.get("backup_image_on_check_error",
                                  "no") == "yes":
                        backup_image(params, root_dir, 'backup', False)
                    raise error.TestWarn(
                        "qemu-img check error. Some bad data "
                        "in the image may have gone unnoticed")
                # Exit status 2 is data corruption for sure, so fail the test
                elif cmd_result.exit_status == 2:
                    for e_line in cmd_result.stdout.splitlines():
                        logging.error("[stdout] %s", e_line)
                    for e_line in cmd_result.stderr.splitlines():
                        logging.error("[stderr] %s", e_line)
                    if params.get("backup_image_on_check_error",
                                  "no") == "yes":
                        backup_image(params, root_dir, 'backup', False)
                    raise VMImageCheckError(image_filename)
                # Leaked clusters, they are known to be harmless to data
                # integrity
                elif cmd_result.exit_status == 3:
                    raise error.TestWarn("Leaked clusters were noticed during "
                                         "image check. No data integrity "
                                         "problem was found though.")

                # Just handle normal operation
                if params.get("backup_image", "no") == "yes":
                    backup_image(params, root_dir, 'backup', True)

        else:
            if not os.path.exists(image_filename):
                logging.debug("Image file %s not found, skipping check",
                              image_filename)
            elif not image_is_qcow2:
                logging.debug("Image file %s not qcow2, skipping check",
                              image_filename)
Esempio n. 18
0
    def run_once(self, opts=None):
        options = dict(
            filter='',
            test_names='',  # e.g., dEQP-GLES3.info.version,
            # dEQP-GLES2.functional,
            # dEQP-GLES3.accuracy.texture, etc.
            timeout=self._timeout,
            subset_to_run='Pass',  # Pass, Fail, Timeout, NotPass...
            hasty='False',
            shard_number='0',
            shard_count='1')
        if opts is None:
            opts = []
        options.update(utils.args_to_dict(opts))
        logging.info('Test Options: %s', options)

        self._hasty = (options['hasty'] == 'True')
        self._timeout = int(options['timeout'])
        self._test_names = options['test_names']
        self._shard_number = int(options['shard_number'])
        self._shard_count = int(options['shard_count'])
        if not self._test_names:
            self._filter = options['filter']
            if not self._filter:
                raise error.TestError('No dEQP test filter specified')

        # Some information to help postprocess logs into blacklists later.
        logging.info('ChromeOS BOARD = %s', self._board)
        logging.info('ChromeOS CPU family = %s', self._cpu_type)
        logging.info('ChromeOS GPU family = %s', self._gpu_type)
        logging.info('dEQP test filter = %s', self._filter)

        if self._gpu_type == 'pinetrail':
            raise error.TestNAError('dEQP not implemented on pinetrail. '
                                    'crbug.com/532691')
        if self._gpu_type == 'mali':
            raise error.TestNAError('dEQP not implemented on mali. '
                                    'crbug.com/543372')
        if self._gpu_type == 'tegra':
            raise error.TestNAError('dEQP not implemented on tegra. '
                                    'crbug.com/543373')

        # Determine module from test_names or filter.
        if self._test_names:
            test_prefix = self._test_names.split('.')[0]
            self._filter = '%s.filter_args' % test_prefix
        elif self._filter:
            test_prefix = self._filter.split('.')[0]
        if test_prefix in self.DEQP_MODULES:
            module = self.DEQP_MODULES[test_prefix]
        elif self._test_names:
            raise error.TestError('Invalid test names: %s' % self._test_names)
        else:
            raise error.TestError('Invalid test filter: %s' % self._filter)

        executable_path = os.path.join(self.DEQP_BASEDIR, 'modules', module)
        executable = os.path.join(executable_path, 'deqp-%s' % module)

        self._services.stop_services()

        # Must be in the executable directory when running for it to find it's
        # test data files!
        os.chdir(executable_path)
        if self._test_names:
            test_cases = []
            for name in self._test_names.split(','):
                test_cases.extend(
                    self._get_test_cases(executable, name, 'Pass'))
        else:
            test_cases = self._get_test_cases(executable, self._filter,
                                              options['subset_to_run'])

        test_results = {}
        if self._hasty:
            logging.info('Running in hasty mode.')
            test_results = self.run_tests_hasty(executable, test_cases)
        else:
            logging.info('Running each test individually.')
            test_results = self.run_tests_individually(executable, test_cases)

        logging.info('Test results:')
        logging.info(test_results)
        self.write_perf_keyval(test_results)

        test_count = 0
        test_failures = 0
        test_passes = 0
        for result in test_results:
            test_count += test_results[result]
            if result.lower() in ['pass']:
                test_passes += test_results[result]
            if result.lower() not in ['pass', 'notsupported', 'internalerror']:
                test_failures += test_results[result]
        # The text "Completed all tests." is used by the process_log.py script
        # and should always appear at the end of a completed test run.
        logging.info(
            'Completed all tests. Saw %d tests, %d passes and %d failures.',
            test_count, test_passes, test_failures)

        if test_count == 0:
            raise error.TestWarn('No test cases found for filter: %s!' %
                                 self._filter)

        if options['subset_to_run'] == 'NotPass':
            if test_passes:
                raise error.TestWarn(
                    '%d formerly failing tests are passing now.' % test_passes)
        elif test_failures:
            raise error.TestFail('%d/%d tests failed.' %
                                 (test_failures, test_count))
Esempio n. 19
0
    def compare_extensions(self):
        """Compare installed extensions to the expected set.

        Find the set of expected IDs.
        Find the set of observed IDs.
        Do set comparison to find the unexpected, and the expected/missing.

        """
        test_fail = False
        combined_baseline = (self._bundled_crx_baseline +
                             self._component_extension_baseline)
        # Filter out any baseline entries that don't apply to this board.
        # If there is no 'boards' limiter on a given record, the record applies.
        # If there IS a 'boards' limiter, check that it applies.
        board = utils.get_current_board()
        combined_baseline = [
            x for x in combined_baseline
            if ((not 'boards' in x) or ('boards' in x and board in x['boards'])
                )
        ]

        observed_extensions = self._get_extensions_info()
        observed_ids = set([x['id'] for x in observed_extensions])
        expected_ids = set([x['id'] for x in combined_baseline])

        missing_ids = expected_ids - observed_ids
        missing_names = [
            '%s (%s)' % (x['name'], x['id']) for x in combined_baseline
            if x['id'] in missing_ids
        ]

        unexpected_ids = observed_ids - expected_ids
        unexpected_names = [
            '%s (%s)' % (x['name'], x['id']) for x in observed_extensions
            if x['id'] in unexpected_ids
        ]

        good_ids = expected_ids.intersection(observed_ids)

        if missing_names:
            logging.error('Missing: %s', '; '.join(missing_names))
            test_fail = True
        if unexpected_names:
            logging.error('Unexpected: %s', '; '.join(unexpected_names))
            test_fail = True

        # For those IDs in both the expected-and-observed, ie, "good":
        #   Compare sets of expected-vs-actual API permissions, report diffs.
        #   Do same for host permissions.
        for good_id in good_ids:
            baseline = [x for x in combined_baseline if x['id'] == good_id][0]
            actual = [x for x in observed_extensions if x['id'] == good_id][0]
            # Check the API permissions.
            baseline_apis = set(baseline['apiPermissions'])
            actual_apis = set(actual['apiPermissions'])
            missing_apis = baseline_apis - actual_apis
            unexpected_apis = actual_apis - baseline_apis
            if missing_apis or unexpected_apis:
                test_fail = True
                self._report_attribute_diffs(missing_apis, unexpected_apis,
                                             actual)
            # Check the host permissions.
            baseline_hosts = set(baseline['effectiveHostPermissions'])
            actual_hosts = set(actual['effectiveHostPermissions'])
            missing_hosts = baseline_hosts - actual_hosts
            unexpected_hosts = actual_hosts - baseline_hosts
            if missing_hosts or unexpected_hosts:
                test_fail = True
                self._report_attribute_diffs(missing_hosts, unexpected_hosts,
                                             actual)
        if test_fail:
            # TODO(jorgelo): make this fail again, see crbug.com/343271.
            raise error.TestWarn('Baseline mismatch, see error log.')
    def run_screenshot_comparison_test(self):
        """
        Template method to run screenshot comparison tests for ui pieces.

        1. Set up test dirs.
        2. Create folder name
        3. Download golden image.
        4. Capture test image.
        5. Compare images locally, if FAIL upload to remote for analysis later.
        6. Clean up test dirs.

        """

        img_comp_conf_path = os.path.join(ui_TestBase.AUTOTEST_CROS_UI_DIR,
                                          ui_TestBase.IMG_COMP_CONF_FILE)

        img_comp_factory = image_comparison_factory.ImageComparisonFactory(
            img_comp_conf_path)

        golden_image_local_dir = os.path.join(ui_TestBase.WORKING_DIR,
                                              'golden_images')

        file_utils.make_leaf_dir(golden_image_local_dir)

        filename = '%s.png' % self.tagged_testname

        golden_image_remote_path = os.path.join(
            ui_TestBase.REMOTE_DIR, 'ui',
            lsbrelease_utils.get_chrome_milestone(), self.folder_name,
            filename)

        golden_image_local_path = os.path.join(golden_image_local_dir,
                                               filename)

        test_image_filepath = os.path.join(ui_TestBase.WORKING_DIR, filename)

        try:
            file_utils.download_file(golden_image_remote_path,
                                     golden_image_local_path)
        except urllib2.HTTPError as e:
            warn = "No screenshot found for {0} on milestone {1}. ".format(
                self.tagged_testname, lsbrelease_utils.get_chrome_milestone())
            warn += e.msg
            raise error.TestWarn(warn)

        self.capture_screenshot(test_image_filepath)

        comparer = img_comp_factory.make_pdiff_comparer()
        comp_res = comparer.compare(golden_image_local_path,
                                    test_image_filepath)

        if comp_res.diff_pixel_count > img_comp_factory.pixel_thres:
            publisher = img_comp_factory.make_imagediff_publisher(
                self.resultsdir)

            # get chrome version
            version_string = utils.system_output(
                constants.CHROME_VERSION_COMMAND, ignore_status=True)
            version_string = utils.parse_chrome_version(version_string)[0]

            # tags for publishing
            tags = {
                'testname': self.tagged_testname,
                'chromeos_version': utils.get_chromeos_release_version(),
                'chrome_version': version_string,
                'board': utils.get_board(),
                'date': datetime.date.today().strftime("%m/%d/%y"),
                'diff_pixels': comp_res.diff_pixel_count
            }

            publisher.publish(golden_image_local_path, test_image_filepath,
                              comp_res.pdiff_image_path, tags)

            raise error.TestFail('Test Failed. Please see image comparison '
                                 'result by opening index.html from the '
                                 'results directory.')

        file_utils.rm_dir_if_exists(ui_TestBase.WORKING_DIR)
Esempio n. 21
0
    def suspend(self, duration=10, ignore_kernel_warns=False):
        """
        Do a single suspend for 'duration' seconds. Estimates the amount of time
        it takes to suspend for a board (see _SUSPEND_DELAY), so the actual RTC
        wakeup delay will be longer. Returns None on errors, or raises the
        exception when _throw is set. Returns a dict of general measurements,
        or a tuple (general_measurements, individual_device_times) when
        _device_times is set.

        @param duration: time in seconds to do a suspend prior to waking.
        @param ignore_kernel_warns: Ignore kernel errors.  Defaults to false.
        """

        if power_utils.get_sleep_state() == 'freeze':
            self._s0ix_residency_stats = power_status.S0ixResidencyStats()

        try:
            iteration = len(self.failures) + len(self.successes) + 1
            # Retry suspend in case we hit a known (whitelisted) bug
            for _ in xrange(10):
                self._reset_logs()
                utils.system('sync')
                board_delay = self._SUSPEND_DELAY.get(self._get_board(),
                        self._DEFAULT_SUSPEND_DELAY)
                try:
                    alarm = self._suspend(duration + board_delay)
                except sys_power.SpuriousWakeupError:
                    # might be another error, we check for it ourselves below
                    alarm = self._ALARM_FORCE_EARLY_WAKEUP

                if os.path.exists('/sys/firmware/log'):
                    for msg in re.findall(r'^.*ERROR.*$',
                            utils.read_file('/sys/firmware/log'), re.M):
                        for board, pattern in sys_power.FirmwareError.WHITELIST:
                            if (re.search(board, utils.get_board()) and
                                    re.search(pattern, msg)):
                                logging.info('Whitelisted FW error: ' + msg)
                                break
                        else:
                            firmware_log = os.path.join(self._logdir,
                                    'firmware.log.' + str(iteration))
                            shutil.copy('/sys/firmware/log', firmware_log)
                            logging.info('Saved firmware log: ' + firmware_log)
                            raise sys_power.FirmwareError(msg.strip('\r\n '))

                self._update_logs()
                if not self._check_for_errors(ignore_kernel_warns):
                    hwclock_ts = self._hwclock_ts(alarm)
                    if hwclock_ts:
                        break

            else:
                raise error.TestWarn('Ten tries failed due to whitelisted bug')

            # calculate general measurements
            start_resume = self._ts('start_resume_time')
            kernel_down = (self._ts('end_suspend_time') -
                           self._ts('start_suspend_time'))
            kernel_up = self._ts('end_resume_time') - start_resume
            devices_up = self._device_resume_time()
            total_up = hwclock_ts - alarm
            firmware_up = self._firmware_resume_time()
            board_up = total_up - kernel_up - firmware_up
            try:
                cpu_up = self._ts('cpu_ready_time', 0) - start_resume
            except error.TestError:
                # can be missing on non-SMP machines
                cpu_up = None
            if total_up > self._MAX_RESUME_TIME:
                raise error.TestError('Sanity check failed: missed RTC wakeup.')

            logging.info('Success(%d): %g down, %g up, %g board, %g firmware, '
                         '%g kernel, %g cpu, %g devices',
                         iteration, kernel_down, total_up, board_up,
                         firmware_up, kernel_up, cpu_up, devices_up)

            if hasattr(self, '_s0ix_residency_stats'):
                s0ix_residency_secs = \
                        self._s0ix_residency_stats.\
                                get_accumulated_residency_secs()
                if not s0ix_residency_secs:
                    raise sys_power.S0ixResidencyNotChanged(
                        'S0ix residency did not change.')
                logging.info('S0ix residency : %d secs.', s0ix_residency_secs)

            self.successes.append({
                'seconds_system_suspend': kernel_down,
                'seconds_system_resume': total_up,
                'seconds_system_resume_firmware': firmware_up + board_up,
                'seconds_system_resume_firmware_cpu': firmware_up,
                'seconds_system_resume_firmware_ec': board_up,
                'seconds_system_resume_kernel': kernel_up,
                'seconds_system_resume_kernel_cpu': cpu_up,
                'seconds_system_resume_kernel_dev': devices_up,
                })

            if hasattr(self, 'device_times'):
                self._individual_device_times(start_resume)
                return (self.successes[-1], self.device_times[-1])
            else:
                return self.successes[-1]

        except sys_power.SuspendFailure as ex:
            message = '%s(%d): %s' % (type(ex).__name__, iteration, ex)
            logging.error(message)
            self.failures.append(ex)
            if self._throw:
                if type(ex).__name__ in ['KernelError', 'SuspendTimeout']:
                    raise error.TestWarn(message)
                else:
                    raise error.TestFail(message)
            return None
    def _run_tradefed_with_retries(self,
                                   target_module,
                                   test_command,
                                   test_name,
                                   target_plan=None,
                                   needs_push_media=False,
                                   cts_uri=None,
                                   login_precondition_commands=[],
                                   precondition_commands=[]):
        """Run CTS/GTS with retry logic.

        We first kick off the specified module. Then rerun just the failures
        on the next MAX_RETRY iterations.
        """
        if self._should_skip_test():
            logging.warning('Skipped test %s', ' '.join(test_command))
            return

        steps = -1  # For historic reasons the first iteration is not counted.
        pushed_media = False
        self.summary = ''
        board = self._get_board_name(self._host)
        session_id = None

        self._setup_result_directories()

        while steps < self._max_retry:
            steps += 1
            self._run_precondition_scripts(self._host,
                                           login_precondition_commands, steps)
            with self._login_chrome(
                    board=board,
                    reboot=self._should_reboot(steps),
                    dont_override_profile=pushed_media) as current_login:
                self._ready_arc()
                self._run_precondition_scripts(self._host,
                                               precondition_commands, steps)

                # Only push media for tests that need it. b/29371037
                if needs_push_media and not pushed_media:
                    self._push_media(cts_uri)
                    # copy_media.sh is not lazy, but we try to be.
                    pushed_media = True

                # Run tradefed.
                if session_id == None:
                    if target_plan is not None:
                        self._install_plan(target_plan)

                    logging.info('Running %s:', test_name)
                    commands = [test_command]
                else:
                    logging.info('Retrying failures of %s with session_id %d:',
                                 test_name, session_id)
                    commands = [test_command + ['--retry', '%d' % session_id]]

                legacy_counts = self._run_and_parse_tradefed(commands)
                result = self._run_tradefed_list_results()
                if not result:
                    logging.error('Did not find any test results. Retry.')
                    current_login.need_reboot()
                    continue

                # TODO(kinaba): stop parsing |legacy_counts| except for waivers,
                # and rely more on |result| for generating the message.
                ltests, lpassed, lfailed, lnotexecuted, lwaived = legacy_counts
                last_session_id, passed, failed, all_done = result

                msg = 'run' if session_id == None else ' retry'
                msg += '(t=%d, p=%d, f=%d, ne=%d, w=%d)' % legacy_counts
                self.summary += msg
                logging.info('RESULT: %s %s', msg, result)

                # Check for no-test modules
                notest = (passed + failed == 0 and all_done)
                if target_module in self._notest_modules:
                    if notest:
                        logging.info('Package has no tests as expected.')
                        return
                    else:
                        # We expected no tests, but the new bundle drop must
                        # have added some for us. Alert us to the situation.
                        raise error.TestFail('Failed: Remove module %s from '
                                             'notest_modules directory!' %
                                             target_module)
                elif notest:
                    logging.error('Did not find any tests in module. Hoping '
                                  'this is transient. Retry after reboot.')
                    current_login.need_reboot()
                    continue

                session_id = last_session_id

                # Check if all the tests passed.
                if failed <= lwaived and all_done:
                    # TODO(ihf): Make this error.TestPass('...') once available.
                    if steps > 0 and self._warn_on_test_retry:
                        raise error.TestWarn(
                            'Passed: after %d retries passing %d tests, waived='
                            '%d. %s' % (steps, passed, lwaived, self.summary))
                    return

        if session_id == None:
            raise error.TestFail('Error: Could not find any tests in module.')
        raise error.TestFail(
            'Failed: after %d retries giving up. '
            'passed=%d, failed=%d, notexecuted=%d, waived=%d. %s' %
            (steps, passed, failed, lnotexecuted, lwaived, self.summary))