示例#1
0
    def verify_short_blanking(self):
        """On baytrail systems with a known panel, checks the kernel log for a
        message that a short blanking mode has been added."""
        logging.info('Running verify_short_blanking')
        if self._gpu_type != 'baytrail' or utils.has_no_monitor():
            return ''

        # Open the EDID to find the panel model.
        param_path = '/sys/class/drm/card0-eDP-1/edid'
        if not os.path.exists(param_path):
            logging.error('Error: %s not found.', param_path)
            return self.handle_error(
                'Short blanking not added (no EDID found). ')

        with open(param_path, 'r') as edp_edid_file:
            edp_edid_file.seek(8)
            data = edp_edid_file.read(2)
            manufacturer = int(struct.unpack('<H', data)[0])
            data = edp_edid_file.read(2)
            product_code = int(struct.unpack('<H', data)[0])
        # This is not the panel we are looking for (AUO B116XTN02.2)
        if manufacturer != 0xaf06 or product_code != 0x225c:
            return ''
        # Get the downclock message from the logs.
        reader = cros_logging.LogReader()
        reader.set_start_by_reboot(-1)
        if not reader.can_find(
                'Modified preferred into a short blanking mode'):
            return self.handle_error('Short blanking not added. ')
        return ''
    def _verify_lvds_downclock_mode_added(self):
        """Checks the kernel log for a message that an LVDS downclock mode has
        been added.

        This test is specific to alex/lumpy/parrot/stout since they use the i915
	driver (which has downclocking ability) and use known LCDs. These LCDs
	are special, in that they support a downclocked refresh rate, but don't
        advertise it in the EDID.

        To counteract this, I added a quirk in drm to add a downclocked mode to
        the panel. Unfortunately, upstream doesn't want this patch, and we have
        to carry it locally. The quirk patch was dropped inadvertently from
        chromeos-3.4, so this test ensures we don't regress again.

        I plan on writing an upstream friendly patch sometime in the near
        future, at which point I'll revert my drm hack and this test.

        Returns:
            0 if no errors, otherwise the number of errors that occurred.
        """
        cmd = 'cat /etc/lsb-release | grep CHROMEOS_RELEASE_BOARD'
        output = utils.system_output(cmd)
        if ('lumpy' not in output and 'alex' not in output
                and 'parrot' not in output and 'stout' not in output):
            return 0

        # Get the downclock message from the logs
        reader = cros_logging.LogReader()
        reader.set_start_by_reboot(-1)
        if not reader.can_find('Adding LVDS downclock mode'):
            logging.error('Error, LVDS downclock quirk not applied!')
            return 1

        return 0
示例#3
0
    def run_once(self):
        if not 'asan' in utils.read_file('/etc/ui_use_flags.txt'):
            raise error.TestFail('Current image not built with ASAN')

        extension_path = os.path.join(os.path.dirname(__file__),
                                      'asan_crash_ext')

        with chrome.Chrome(extension_paths=[extension_path]) as cr:
            pid = utils.get_oldest_pid_by_name('chrome')
            asan_log_name = '/var/log/chrome/asan_log.%d' % pid
            logging.info('Browser PID under telemetry control is %d. '
                         'So ASAN log is expected at %s.', pid, asan_log_name)

            logging.info('Initiate simulating memory bug to be caught by ASAN.')
            extension = cr.get_extension(extension_path)
            if not extension:
                raise error.TestFail('Failed to find extension %s'
                                     % extension_path)

            # Catch the exception raised when the browser crashes.
            cr.did_browser_crash(lambda: extension.ExecuteJavaScript(
                    'chrome.autotestPrivate.simulateAsanMemoryBug();'))

            utils.poll_for_condition(
                    lambda: os.path.isfile(asan_log_name),
                    timeout=10,
                    exception=error.TestFail(
                            'Found no asan log file %s during 10s'
                            % asan_log_name))
            ui_log = cros_logging.LogReader(asan_log_name)
            ui_log.read_all_logs()

            # We must wait some time until memory bug is simulated (happens
            # immediately after the return on the call) and caught by ASAN.
            try:
                utils.poll_for_condition(
                        lambda: ui_log.can_find('ERROR: AddressSanitizer'),
                        timeout=10,
                        exception=error.TestFail(
                                'Found no asan log message about '
                                'Address Sanitizer catch'))
                # An example error string is like this
                # 'testarray:228' <== Memory access at offset 52 overflows
                # this variable
                utils.poll_for_condition(
                        lambda: ui_log.can_find("'testarray"),
                        timeout=10,
                        exception=error.TestFail(
                                'ASAN caught bug but did not mention '
                                'the cause in the log'))

            except:
                logging.debug('ASAN log content: ' + ui_log.get_logs())
                raise

            # The cbuildbot logic will look for asan logs and process them.
            # Remove the simulated log file to avoid that.
            os.remove(asan_log_name)
示例#4
0
 def verify_lvds_downclock(self):
     """On systems which support LVDS downclock, checks the kernel log for
     a message that an LVDS downclock mode has been added."""
     logging.info('Running verify_lvds_downclock')
     board = utils.get_board()
     if not (board == 'alex' or board == 'lumpy' or board == 'stout'):
         return ''
     # Get the downclock message from the logs.
     reader = cros_logging.LogReader()
     reader.set_start_by_reboot(-1)
     if not reader.can_find('Adding LVDS downclock mode'):
         return self.handle_error('LVDS downclock quirk not applied. ')
     return ''
    def serve_swf_to_browser(self, browser):
        """
        Tries to serve a sample swf to browser.

        A failure of this function does not imply a problem with Flash.
        @param browser: The Browser object to run the test with.
        @return: True if we managed to send swf to browser, False otherwise.
        """
        # Prepare index.html/Trivial.swf to be served.
        browser.platform.SetHTTPServerDirectories(self.bindir)
        test_url = browser.platform.http_server.UrlOf(
            os.path.join(self.bindir, 'index.html'))
        tab = None
        # BUG(485108): Work around a telemetry timing out after login.
        try:
            logging.info('Getting tab from telemetry...')
            tab = browser.tabs[0]
        except:
            logging.warning('Unexpected exception getting tab: %s',
                            pprint.pformat(sys.exc_info()[0]))
        if tab is None:
            return False

        logging.info('Initialize reading system logs.')
        self._messages_log_reader = cros_logging.LogReader()
        self._messages_log_reader.set_start_by_current()
        self._ui_log_reader = cros_logging.LogReader('/var/log/ui/ui.LATEST')
        self._ui_log_reader.set_start_by_current()
        logging.info('Done initializing system logs.')

        # Verify that the swf got pulled.
        try:
            tab.Navigate(test_url)
            tab.WaitForDocumentReadyStateToBeComplete()
            return True
        except:
            logging.warning('Unexpected exception waiting for document: %s',
                            pprint.pformat(sys.exc_info()[0]))
            return False
    def run_once(self):
        # Create magic file to enable browser liveness checking and
        # bounce the session manager to pick up the flag file.
        cros_ui.stop()
        os.mknod(constants.ENABLE_BROWSER_HANG_DETECTION_FILE)
        cros_ui.start()

        browser_pid = self._get_oldest_pid_by_name(constants.BROWSER)
        sm_pid = self._get_oldest_pid_by_name(constants.SESSION_MANAGER)

        # Reading the log is the best way to watch for the hang detector.
        reader = cros_logging.LogReader()
        reader.set_start_by_current()

        # To simulate a hang, STOP the browser and wait for it to get
        # hit by the session manager.  It won't actually exit until it gets
        # a SIGCONT, though.
        try:
            os.kill(browser_pid, signal.SIGSTOP)  # Simulate hang.
        except OSError as e:
            raise error.TestError('Cannot STOP browser: %r' % e)

        # Watch for hang detection.
        utils.poll_for_condition(
            condition=lambda: reader.can_find('Aborting browser process.'),
            exception=utils.TimeoutError('Waiting for hang detector.'),
            sleep_interval=5,
            timeout=60)

        try:
            os.kill(browser_pid, signal.SIGCONT)  # Allow browser to die.
        except OSError as e:
            raise error.TestError('Cannot CONT browser: %r' % e)

        # Wait for old browser process to be gone.
        utils.poll_for_condition(
            condition= lambda: utils.pid_is_alive(browser_pid),
            exception=utils.TimeoutError(
                'Browser does not seem to have restarted!'),
            timeout=60)

        # Wait for new browser to come up.
        login.wait_for_browser()
        if sm_pid != self._get_oldest_pid_by_name(constants.SESSION_MANAGER):
            raise error.TestFail('session_manager seems to have restarted')
    def run_once(self, ro_reps=1):

        fw_dict = self._index_firmware_avail()

        self._usbpd = cros_ec.EC_USBPD()
        self._reader = cros_logging.LogReader()

        for port in self._usbpd.ports:
            if not port.is_dfp():
                continue

            logging.info('Port%d: is a DFP', port.index)

            if not self._is_gfu(port):
                continue

            logging.info('Port%d: supports GFU', port.index)

            self._test_rw_rollback(port, fw_dict)
            self._test_ro_only(port, ro_reps)
示例#8
0
    def run_flash_sanity_test(self, browser, time_to_wait_secs):
        """Run the Flash sanity test.

        @param browser: The Browser object to run the test with.
        @param time_to_wait_secs: wait time for swf file to load.

        """
        tab = None
        # BUG(485108): Work around a telemetry timing out after login.
        try:
            logging.info('Getting tab from telemetry...')
            tab = browser.tabs[0]
        except:
            logging.warning('Unexpected exception getting tab: %s',
                            pprint.pformat(sys.exc_info()[0]))
        if tab is None:
            return False

        logging.info('Initialize reading system logs.')
        self._messages_log_reader = cros_logging.LogReader()
        self._messages_log_reader.set_start_by_current()
        self._ui_log_reader = cros_logging.LogReader('/var/log/ui/ui.LATEST')
        self._ui_log_reader.set_start_by_current()
        logging.info('Done initializing system logs.')

        # Ensure that the swf got pulled.
        pulled = False
        try:
            latch = self._testServer.add_wait_url('/Trivial.swf')
            tab.Navigate(self._test_url)
            tab.WaitForDocumentReadyStateToBeComplete()
            logging.info('Waiting up to %ds for document.', time_to_wait_secs)
            latch.wait(time_to_wait_secs)
            pulled = True
        except:
            logging.warning('Unexpected exception wating for document: %s',
                            pprint.pformat(sys.exc_info()[0]))
        if not pulled:
            return False

        logging.info('Waiting for Pepper process.')
        # Verify that we see a ppapi process and assume it is Flash.
        ppapi = utils.wait_for_value_changed(
            lambda: (utils.get_process_list('chrome', '--type=ppapi')),
            old_value=[],
            timeout_sec=5)
        logging.info('ppapi process list at start: %s', ', '.join(ppapi))
        if not ppapi:
            msg = 'flash/platform/pepper/pep_'
            if not self._ui_log_reader.can_find(msg):
                raise error.TestFail(
                    'Flash did not start (logs) and no ppapi process found.')
            # There is a chrome bug where the command line of the ppapi and
            # other processes is shown as "type=zygote". Bail out if we see more
            # than 2. Notice, we already did the waiting, so there is no need to
            # do more of it.
            zygote = utils.get_process_list('chrome', '--type=zygote')
            if len(zygote) > 2:
                logging.warning(
                    'Flash probably launched by Chrome as zygote: '
                    '<%s>.', ', '.join(zygote))
                return False

        # We have a ppapi process. Let it run for a little and see if it is
        # still alive.
        logging.info('Running Flash content for a little while.')
        time.sleep(5)
        logging.info('Verifying the Pepper process is still around.')
        ppapi = utils.wait_for_value_changed(
            lambda: (utils.get_process_list('chrome', '--type=ppapi')),
            old_value=[],
            timeout_sec=3)
        # Notice that we are not checking for equality of ppapi on purpose.
        logging.info('PPapi process list found: <%s>', ', '.join(ppapi))

        # Any better pattern matching?
        msg = ' Received crash notification for ' + constants.BROWSER
        if self._messages_log_reader.can_find(msg):
            raise error.TestFail('Browser crashed during test.')

        if not ppapi:
            raise error.TestFail('Pepper process disappeared during test.')

        # At a minimum Flash identifies itself during process start.
        msg = 'flash/platform/pepper/pep_'
        if not self._ui_log_reader.can_find(msg):
            raise error.TestFail('Saw ppapi process but no Flash output.')

        return True
示例#9
0
    def run_once(self, opts=None):
        options = dict(filter='',
                       test_names='',  # e.g., dEQP-GLES3.info.version,
                                       # dEQP-GLES2.functional,
                                       # dEQP-GLES3.accuracy.texture, etc.
                       test_names_file='',
                       timeout=self._timeout,
                       subset_to_run='Pass',  # Pass, Fail, Timeout, NotPass...
                       hasty='False',
                       shard_number='0',
                       shard_count='1',
                       debug='False',
                       perf_failure_description=None)
        if opts is None:
            opts = []
        options.update(utils.args_to_dict(opts))
        logging.info('Test Options: %s', options)

        self._hasty = (options['hasty'] == 'True')
        self._timeout = int(options['timeout'])
        self._test_names_file = options['test_names_file']
        self._test_names = options['test_names']
        self._shard_number = int(options['shard_number'])
        self._shard_count = int(options['shard_count'])
        self._debug = (options['debug'] == 'True')
        if not (self._test_names_file or self._test_names):
            self._filter = options['filter']
            if not self._filter:
                raise error.TestFail('Failed: No dEQP test filter specified')
        if options['perf_failure_description']:
            self._test_failure_description = options['perf_failure_description']
        else:
            # Do not report failure if failure description is not specified.
            self._test_failure_report_enable = False

        # Some information to help post-process logs into blacklists later.
        logging.info('ChromeOS BOARD = %s', self._board)
        logging.info('ChromeOS CPU family = %s', self._cpu_type)
        logging.info('ChromeOS GPU family = %s', self._gpu_type)

        # Create a place to put detailed test output logs.
        filter_name = self._filter or os.path.basename(self._test_names_file)
        logging.info('dEQP test filter = %s', filter_name)
        self._log_path = os.path.join(tempfile.gettempdir(), '%s-logs' %
                                                             filter_name)
        shutil.rmtree(self._log_path, ignore_errors=True)
        os.mkdir(self._log_path)

        # Load either tests specified by test_names_file, test_names or filter.
        test_cases = []
        if self._test_names_file:
            test_cases = self._get_test_cases_from_names_file()
        elif self._test_names:
            test_cases = []
            for name in self._test_names.split(','):
                test_cases.extend(self._get_test_cases(name, 'Pass'))
        elif self._filter:
            test_cases = self._get_test_cases(self._filter,
                                              options['subset_to_run'])

        if self._debug:
            # LogReader works on /var/log/messages by default.
            self._log_reader = cros_logging.LogReader()
            self._log_reader.set_start_by_current()

        # Assume all tests failed at the beginning.
        for test_case in test_cases:
            self.add_failures(test_case)

        test_results, failing_test = self._run_once(test_cases)
        # Rerun the test if we are in hasty mode.
        if self._hasty and len(failing_test) > 0:
            if len(failing_test) < sum(test_results.values()) * RERUN_RATIO:
                logging.info("Because we are in hasty mode, we will rerun the "
                             "failing tests one at a time")
                rerun_results, failing_test = self._run_once(failing_test)
                # Update failing test result from the test_results
                for result in test_results:
                    if result.lower() not in self.TEST_RESULT_FILTER:
                        test_results[result] = 0
                for result in rerun_results:
                    test_results[result] = (test_results.get(result, 0) +
                                            rerun_results[result])
            else:
                logging.info("There are too many failing tests. It would "
                             "take too long to rerun them. Giving up.")

        # Update failing tests to the chrome perf dashboard records.
        for test_case in test_cases:
            if test_case not in failing_test:
                self.remove_failures(test_case)

        logging.info('Test results:')
        logging.info(test_results)
        logging.debug('Test Failed: %s', failing_test)
        self.write_perf_keyval(test_results)

        test_count = 0
        test_failures = 0
        test_passes = 0
        test_skipped = 0
        for result in test_results:
            test_count += test_results[result]
            if result.lower() in ['pass']:
                test_passes += test_results[result]
            if result.lower() not in self.TEST_RESULT_FILTER:
                test_failures += test_results[result]
            if result.lower() in ['skipped']:
                test_skipped += test_results[result]
        # The text "Completed all tests." is used by the process_log.py script
        # and should always appear at the end of a completed test run.
        logging.info(
            'Completed all tests. Saw %d tests, %d passes and %d failures.',
            test_count, test_passes, test_failures)

        if self._filter and test_count == 0 and options[
                'subset_to_run'] != 'NotPass':
            logging.warning('No test cases found for filter: %s!', self._filter)

        if test_failures:
            raise error.TestFail('Failed: on %s %d/%d tests failed.' %
                                 (self._gpu_type, test_failures, test_count))
        if test_skipped > 0:
            logging.info('On %s %d tests skipped, %d passes' %
                         (self._gpu_type, test_skipped, test_passes))
示例#10
0
    def run_once(self, opts=None):
        options = dict(
            filter='',
            test_names='',  # e.g., dEQP-GLES3.info.version,
            # dEQP-GLES2.functional,
            # dEQP-GLES3.accuracy.texture, etc.
            test_names_file='',
            timeout=self._timeout,
            subset_to_run='Pass',  # Pass, Fail, Timeout, NotPass...
            hasty='False',
            shard_number='0',
            shard_count='1',
            debug='False')
        if opts is None:
            opts = []
        options.update(utils.args_to_dict(opts))
        logging.info('Test Options: %s', options)

        self._hasty = (options['hasty'] == 'True')
        self._timeout = int(options['timeout'])
        self._test_names_file = options['test_names_file']
        self._test_names = options['test_names']
        self._shard_number = int(options['shard_number'])
        self._shard_count = int(options['shard_count'])
        self._debug = (options['debug'] == 'True')
        if not (self._test_names_file or self._test_names):
            self._filter = options['filter']
            if not self._filter:
                raise error.TestFail('Failed: No dEQP test filter specified')

        # Some information to help postprocess logs into blacklists later.
        logging.info('ChromeOS BOARD = %s', self._board)
        logging.info('ChromeOS CPU family = %s', self._cpu_type)
        logging.info('ChromeOS GPU family = %s', self._gpu_type)

        # Create a place to put detailed test output logs.
        if self._filter:
            logging.info('dEQP test filter = %s', self._filter)
            self._log_path = os.path.join(tempfile.gettempdir(),
                                          '%s-logs' % self._filter)
        else:
            base = os.path.basename(self._test_names_file)
            # TODO(ihf): Clean this up.
            logging.info('dEQP test filter = %s', os.path.splitext(base)[0])
            self._log_path = os.path.join(tempfile.gettempdir(),
                                          '%s-logs' % base)
        shutil.rmtree(self._log_path, ignore_errors=True)
        os.mkdir(self._log_path)

        self._services.stop_services()
        if self._test_names_file:
            test_cases = [
                line.rstrip('\n') for line in open(
                    os.path.join(self.bindir, self._test_names_file))
            ]
            test_cases = [
                test for test in test_cases if test and not test.isspace()
            ]
        if self._test_names:
            test_cases = []
            for name in self._test_names.split(','):
                test_cases.extend(self._get_test_cases(name, 'Pass'))
        if self._filter:
            test_cases = self._get_test_cases(self._filter,
                                              options['subset_to_run'])

        if self._debug:
            # LogReader works on /var/log/messages by default.
            self._log_reader = cros_logging.LogReader()
            self._log_reader.set_start_by_current()

        test_results = {}
        if self._hasty:
            logging.info('Running in hasty mode.')
            test_results = self.run_tests_hasty(test_cases)
        else:
            logging.info('Running each test individually.')
            test_results = self.run_tests_individually(test_cases)

        logging.info('Test results:')
        logging.info(test_results)
        self.write_perf_keyval(test_results)

        test_count = 0
        test_failures = 0
        test_passes = 0
        test_skipped = 0
        for result in test_results:
            test_count += test_results[result]
            if result.lower() in ['pass']:
                test_passes += test_results[result]
            if result.lower() not in [
                    'pass', 'notsupported', 'internalerror', 'qualitywarning',
                    'compatibilitywarning', 'skipped'
            ]:
                test_failures += test_results[result]
            if result.lower() in ['skipped']:
                test_skipped += test_results[result]
        # The text "Completed all tests." is used by the process_log.py script
        # and should always appear at the end of a completed test run.
        logging.info(
            'Completed all tests. Saw %d tests, %d passes and %d failures.',
            test_count, test_passes, test_failures)

        if self._filter and test_count == 0 and options[
                'subset_to_run'] != 'NotPass':
            logging.warning('No test cases found for filter: %s!',
                            self._filter)

        if options['subset_to_run'] == 'NotPass':
            if test_passes:
                # TODO(ihf): Make this an annotated TestPass once available.
                raise error.TestWarn(
                    '%d formerly failing tests are passing now.' % test_passes)
        elif test_failures:
            # TODO(ihf): Delete this once hasty expectations have been
            # checked in.
            if self._gpu_type.startswith('tegra'):
                raise error.TestWarn(
                    'Failed: on %s %d/%d tests failed.' %
                    (self._gpu_type, test_failures, test_count))
            raise error.TestFail('Failed: on %s %d/%d tests failed.' %
                                 (self._gpu_type, test_failures, test_count))
        if test_skipped > 0:
            raise error.TestFail('Failed: on %s %d tests skipped, %d passes' %
                                 (self._gpu_type, test_skipped, test_passes))