def get_filter_option(self, profile, size):
        """Get option of filtering test.

        @param profile: The profile to encode into.
        @param size: The size of test stream in pair format (width, height).
        """

        # Profiles used in blacklist to filter test for specific profiles.
        H264 = 1
        VP8 = 11
        VP9 = 12

        blacklist = {
                # (board, profile, size): [tests to skip...]

                # "board" supports Unix shell-type wildcards.

                # Use None for "profile" or "size" to indicate no filter on it.

                # It is possible to match multiple keys for board/profile/size
                # in the blacklist, e.g. veyron_minnie could match both
                # "veyron_*" and "veyron_minnie".

                # rk3399 doesn't support HW encode for plane sizes not multiple
                # of cache line.
                ('kevin', None, None): ['CacheLineUnalignedInputTest/*'],
                ('bob', None, None): ['CacheLineUnalignedInputTest/*'],

                # Still high failure rate of VP8 EncoderPerf for veyrons,
                # disable it for now. crbug/720386
                ('veyron_*', VP8, None): ['EncoderPerf/*'],

                # Disable mid_stream_bitrate_switch test cases for elm/hana.
                # crbug/725087
                ('elm', None, None): ['MidStreamParamSwitchBitrate/*',
                                      'MultipleEncoders/*'],
                ('hana', None, None): ['MidStreamParamSwitchBitrate/*',
                                       'MultipleEncoders/*'],

                # Around 40% failure on elm and hana 320x180 test stream.
                # crbug/728906
                ('elm', H264, (320, 180)): ['ForceBitrate/*'],
                ('elm', VP8, (320, 180)): ['ForceBitrate/*'],
                ('hana', H264, (320, 180)): ['ForceBitrate/*'],
                ('hana', VP8, (320, 180)): ['ForceBitrate/*'],
                }

        board = utils.get_current_board()

        filter_list = []
        for (board_key, profile_key, size_key), value in blacklist.items():
            if (fnmatch.fnmatch(board, board_key) and
                (profile_key is None or profile == profile_key) and
                (size_key is None or size == size_key)):
                filter_list += value

        if filter_list:
            return '-' + ':'.join(filter_list)

        return ''
    def run_once(self):
        """Entry point of this test."""

        # The test is flaky on x86-* boards. Mash doesn't target hardware this
        # old, so skip the test. http://crbug.com/679213
        # The test is also flaky on nyan_* boards. Temporarily skip the test
        # until this can be fixed. http://crbug.com/717275
        boards_to_skip = [
            'x86-mario', 'x86-alex', 'x86-alex_he', 'x86-zgb', 'x86-zgb_he',
            'nyan_big', 'nyan_kitty', 'nyan_blaze'
        ]
        if utils.get_current_board() in boards_to_skip:
            logging.warning('Skipping test run on this board.')
            return

        # GPU info collection via devtools SystemInfo.getInfo does not work
        # under mus due to differences in how the GPU process is configured.
        # http://crbug.com/669965
        mus_browser_args = ['--mus', '--gpu-no-complete-info-collection']

        logging.info('Testing Chrome --mus startup.')
        with chrome.Chrome(auto_login=False,
                           extra_browser_args=mus_browser_args):
            logging.info('Chrome --mus started and loaded OOBE.')

        logging.info('Testing Chrome --mus login.')
        with chrome.Chrome(extra_browser_args=mus_browser_args):
            logging.info('Chrome login with --mus succeeded.')
Beispiel #3
0
 def run_once(self):
     """Runs the test."""
     self.board = utils.get_current_board()
     with chrome.Chrome(extra_browser_args=EXTRA_BROWSER_ARGS,
                        init_network_controller=True) as cr:
         self.start_getusermedia(cr)
         self.print_perf_results()
Beispiel #4
0
    def is_skipping_test(self, codec, is_switchres):
        """Determine whether this test should skip.

        @param codec: the codec to be tested, ex. 'vp8', 'vp9', 'h264'.
        @param is_switchres: bool, True if using switch resolution video.
        """
        blacklist = [
            # (board, codec, is_switchres); None if don't care.

            # "board" supports Unix shell-type wildcards

            # Disable vp8 switchres for nyan devices temporarily due to:
            # crbug/699260
            ('nyan', 'vp8', True),
            ('nyan_*', 'vp8', True)
        ]

        board = utils.get_current_board()

        for entry in blacklist:
            if ((entry[0] is None or fnmatch.fnmatch(board, entry[0]))
                    and (entry[1] is None or codec == entry[1])
                    and (entry[2] is None or is_switchres == entry[2])):
                return True

        return False
Beispiel #5
0
    def run_once(self):
        """Entry point of this test."""

        # Mash requires a connected display to start chrome. Chromebox and
        # Chromebit devices in the lab run without a connected display.
        # Limit this test to devices with a built-in display until we can fix
        # mash. http://crbug.com/673561
        if site_utils.get_board_type() not in ['CHROMEBOOK', 'CHROMEBASE']:
            logging.warning('chrome --mash requires a display, skipping test.')
            return

        # The test is sometimes flaky on these boards. Mash doesn't target
        # hardware this old, so skip the test. http://crbug.com/679213
        boards_to_skip = [
            'x86-mario', 'x86-alex', 'x86-alex_he', 'x86-zgb', 'x86-zgb_he'
        ]
        if utils.get_current_board() in boards_to_skip:
            logging.warning('Skipping test run on this board.')
            return

        # GPU info collection via devtools SystemInfo.getInfo does not work
        # under mash due to differences in how the GPU process is configured.
        # http://crbug.com/669965
        mash_browser_args = ['--mash', '--gpu-no-complete-info-collection']

        logging.info('Testing Chrome --mash startup.')
        with chrome.Chrome(auto_login=False,
                           extra_browser_args=mash_browser_args):
            logging.info('Chrome --mash started and loaded OOBE.')

        logging.info('Testing Chrome --mash login.')
        with chrome.Chrome(extra_browser_args=mash_browser_args):
            logging.info('Chrome login with --mash succeeded.')
Beispiel #6
0
    def run_once(self):
        """Entry point of this test."""

        # Flaky on nyan_* boards. http://crbug.com/717275
        boards_to_skip = ['nyan_big', 'nyan_kitty', 'nyan_blaze']
        if utils.get_current_board() in boards_to_skip:
            logging.warning('Skipping test run on this board.')
            return

        # GPU info collection via devtools SystemInfo.getInfo does not work
        # under mash due to differences in how the GPU process is configured
        # with mus hosting viz. http://crbug.com/669965
        mash_browser_args = [
            '--enable-features=Mash', '--disable-features=SingleProcessMash',
            '--gpu-no-complete-info-collection'
        ]

        logging.info('Testing Chrome with Mash startup.')
        with chrome.Chrome(auto_login=False,
                           extra_browser_args=mash_browser_args):
            logging.info('Chrome with Mash started and loaded OOBE.')
            self.__screen_visual_sanity_test()

        logging.info('Testing Chrome with Mash login.')
        with chrome.Chrome(extra_browser_args=mash_browser_args):
            logging.info('Chrome login with Mash succeeded.')
Beispiel #7
0
    def run_once(self):
        """
        Entry point of this test.
        """
        self.job.install_pkg(self.dep, 'dep', self.dep_dir)

        with service_stopper.ServiceStopper([self.adapter_service]):
            cmd = [os.path.join(self.dep_dir, 'bin', self.test_binary)]
            xml_content = utils.system_output(' '.join(
                ['android-sh', '-c', '\"cat',
                 self.media_profiles_path + '\"']))
            root = xml.etree.ElementTree.fromstring(xml_content)
            recording_params = Set()
            for camcorder_profiles in root.findall('CamcorderProfiles'):
                for encoder_profile in camcorder_profiles.findall(
                        'EncoderProfile'):
                    video = encoder_profile.find('Video')
                    recording_params.add(
                        '%s:%s:%s:%s' %
                        (camcorder_profiles.get('cameraId'),
                         video.get('width'), video.get('height'),
                         video.get('frameRate')))
            if recording_params:
                cmd.append('--recording_params=' + ','.join(recording_params))
            if utils.get_current_board() in self.tablet_board_list:
                cmd.append('--gtest_filter=-*SensorOrientationTest/*')

            utils.system(' '.join(cmd), timeout=self.timeout)
    def run_once(self):
        """ Run the regression test and collect the results.
        """
        board = utils.get_current_board()
        platform = _PLATFORM_MAPPINGS.get(board, board)

        # find paths for touchpad tests
        root = os.path.join(self.autodir, 'deps', 'touchpad-tests')
        framework_dir = os.path.join(root, 'framework')
        tests_dir = os.path.join(root, 'tests')

        # create test runner
        sys.path.append(framework_dir)
        sys.path.append(root)
        from test_runner import ParallelTestRunner
        runner = ParallelTestRunner(tests_dir)

        # run all tests for this platform and extract results
        results = runner.RunAll('%s*/*' % platform, verbose=True)
        # TODO(dennisjeffrey): Remove all uses of self.test_results below,
        # including the call to self.write_perf_keyval(), once we're ready to
        # switch over completely from perf keyvals to output_perf_value().
        self.test_results = {}
        for key, value in results.items():
            score = value['score']
            not_integer = isinstance(score, bool) or not isinstance(score, int)
            if not_integer and not isinstance(score, float):
                score = 0.0
            self.test_results[key.replace('/', '-')] = score
            self.output_perf_value(key.replace('/', '-'), score, 'points')

        # write converted test results out
        if self.test_results:
            self.write_perf_keyval(self.test_results)
    def run_once(self):
        """Tries to create a socket with every possible combination of
        protocol family and socket type.
        Fails if it can create a socket for one or more protocol families
        not in the baseline.
        """

        unexpected_protocol_families = []

        # Protocol families currently go up to 40, but this way we make sure
        # to catch new families that might get added to the kernel.
        for pfn in range(256):
            pf_available = self.is_protocol_family_available(pfn)
            protocol_family = self.pf_name(pfn)

            if pf_available:
                # If PF is in baseline, continue.
                if protocol_family in self.PF_BASELINE:
                    continue

                # Check the board-specific whitelist.
                current_board = utils.get_current_board()
                board_pfs = self.PER_BOARD.get(current_board, None)
                if not board_pfs or protocol_family not in board_pfs:
                    unexpected_protocol_families.append(protocol_family)

        if len(unexpected_protocol_families) > 0:
            failure_string = "Unexpected protocol families available: "
            failure_string += ", ".join(unexpected_protocol_families)
            logging.error(failure_string)
            raise error.TestFail(failure_string)
 def run_once(self):
     # TODO(scottz): Remove this when crbug.com/220147 is fixed.
     dut_board = utils.get_current_board()
     if dut_board == 'x86-mario':
        raise error.TestNAError('This test is not available on %s' %
                                 dut_board)
     with chrome.Chrome() as cr:
         self.run_video_tests(cr.browser)
Beispiel #11
0
 def run_once(self):
     """Runs the video_WebRtcPeerConnectionWithCamera test."""
     self.board = utils.get_current_board()
     with chrome.Chrome(extra_browser_args=EXTRA_BROWSER_ARGS) as cr:
         # Open WebRTC loopback page and start the loopback.
         self.start_loopback(cr)
         if not self.check_loopback_result():
             raise error.TestFail('Failed webrtc camera test')
Beispiel #12
0
 def run_once(self):
     """ Compare expected and actual kernel versions. """
     board = utils.get_current_board()
     actual = self._actual_kernel(board)
     expected = self._expected_kernel(board)
     if not actual.startswith(expected):
         raise error.TestFail('%s: Expected kernel version %s; Found %s' %
                              (board, expected, actual))
Beispiel #13
0
def skip_devices_to_test(*boards):
    """Devices to skip due to hardware or test compatibility issues.

    @param boards: the boards to skip testing.
    """
    # TODO(scottz): Remove this when crbug.com/220147 is fixed.
    dut_board = utils.get_current_board()
    if dut_board in boards:
        raise error.TestNAError('This test is not available on %s' % dut_board)
 def run_once(self):
     boards_to_skip = ['x86-mario', 'x86-zgb']
     # TODO(scottz): Remove this when crbug.com/220147 is fixed.
     dut_board = utils.get_current_board()
     if dut_board in boards_to_skip:
         logging.info("Skipping test run on this board.")
         return
     with chrome.Chrome() as cr:
         self.run_video_sanity_test(cr.browser)
Beispiel #15
0
 def run_once(self):
     # TODO(scottz): Remove this when crbug.com/220147 is fixed.
     dut_board = utils.get_current_board()
     if dut_board == 'x86-mario':
         raise error.TestNAError('This test is not available on %s' %
                                 dut_board)
     with chrome.Chrome(
             extra_browser_args=helper_logger.chrome_vmodule_flag(),
             init_network_controller=True) as cr:
         self.run_video_tests(cr.browser)
    def run_once(self, video_file, arc_mode=False):
        """
        Tests whether the requested video is playable

        @param video_file: Sample video file to be played in Chrome.

        """
        blacklist = [
            # (board, arc_mode) # None means don't care
            ('x86-mario', None),
            ('x86-zgb', None),
            # The result on elm and oak is flaky in arc mode.
            # TODO(wuchengli): remove them once crbug.com/679864 is fixed.
            ('elm', True),
            ('oak', True)
        ]

        dut_board = utils.get_current_board()
        for (blacklist_board, blacklist_arc_mode) in blacklist:
            if blacklist_board == dut_board:
                if blacklist_arc_mode is None or blacklist_arc_mode == arc_mode:
                    logging.info("Skipping test run on this board.")
                    return
                break

        if arc_mode:
            arc_mode_str = arc_common.ARC_MODE_ENABLED
        else:
            arc_mode_str = arc_common.ARC_MODE_DISABLED
        with chrome.Chrome(
                extra_browser_args=helper_logger.chrome_vmodule_flag(),
                arc_mode=arc_mode_str,
                init_network_controller=True) as cr:
            shutil.copy2(constants.VIDEO_HTML_FILEPATH, self.bindir)
            video_path = os.path.join(constants.CROS_VIDEO_DIR, 'files',
                                      video_file)
            shutil.copy2(video_path, self.bindir)
            cr.browser.platform.SetHTTPServerDirectories(self.bindir)
            tab = cr.browser.tabs.New()
            html_fullpath = os.path.join(self.bindir, 'video.html')
            url = cr.browser.platform.http_server.UrlOf(html_fullpath)

            player = native_html5_player.NativeHtml5Player(
                tab,
                full_url=url,
                video_id='video',
                video_src_path=video_file,
                event_timeout=120)
            player.load_video()
            player.play()
            player.verify_video_can_play(constants.PLAYBACK_TEST_TIME_S)
    def _load_baseline(self, bltype):
        """Load the list of expected files for a given baseline type.

        @param bltype the baseline to load.
        @returns a set containing the names of the files in the board's
        baseline.
        """
        # Baseline common to all boards.
        blname = 'baseline.' + bltype
        blset = self._load_baseline_file(blname)
        # Board-specific baseline.
        board_blname = 'baseline.%s.%s' % (utils.get_current_board(), bltype)
        blset |= self._load_baseline_file(board_blname)
        return blset
    def get_filter_option(self):
        """Get option of filtering test
        """

        blacklist = {
            # board: [tests to skip...]

            # Kevin doesn't support HW encode for plane sizes not multiple
            # of cache line
            'kevin': ['CacheLineUnalignedInputTest/*']
        }

        board = utils.get_current_board()
        if board in blacklist:
            return ' --gtest_filter=-' + ':'.join(blacklist[board])

        return ''
def _can_switch_bitrate():
    """Determine whether the board can switch the bitrate.

    The bitrate switch test case is mainly for ARC++. We don't have much control
    over some devices that do not run ARC++. Therefore we whitelist the boards
    that should pass the bitrate switch test. (crbug.com/890125)
    """
    # Most Intel chipsets are able to switch bitrate except these two old
    # chipsets, so we blacklist the devices.
    intel_blacklist = [
        # Rambi Bay Trial
        'cranky',
        'banjo',
        'candy',
        'clapper',
        'enguarde',
        'expresso',
        'glimmer',
        'gnawty',
        'heli',
        'hoofer',
        'kip',
        'kip14',
        'ninja',
        'orco',
        'quawks',
        'squawks',
        'sumo',
        'swanky',
        'winky',

        # Haswell
        'falco',
        'leon',
        'mccloud',
        'monroe',
        'panther',
        'peppy',
        'tricky',
        'wolf',
        'zako',
    ]
    return (_run_on_intel_cpu()
            and utils.get_current_board() not in intel_blacklist)
    def run_once(self):
        """Entry point of this test."""

        # Flaky on nyan_* boards. http://crbug.com/717275
        boards_to_skip = ['nyan_big', 'nyan_kitty', 'nyan_blaze']
        if utils.get_current_board() in boards_to_skip:
            logging.warning('Skipping test run on this board.')
            return

        # GPU info collection via devtools SystemInfo.getInfo does not work
        # under mus due to differences in how the GPU process is configured.
        # http://crbug.com/669965
        mus_browser_args = ['--mus', '--gpu-no-complete-info-collection']

        logging.info('Testing Chrome --mus startup.')
        with chrome.Chrome(auto_login=False,
                           extra_browser_args=mus_browser_args):
            logging.info('Chrome --mus started and loaded OOBE.')

        logging.info('Testing Chrome --mus login.')
        with chrome.Chrome(extra_browser_args=mus_browser_args):
            logging.info('Chrome login with --mus succeeded.')
    def run_once(self, capability):
        """Runs the test.

        @param capability: Capability required for executing this test.
        """
        device_capability.DeviceCapability().ensure_capability(capability)

        self.board = utils.get_current_board()
        with chrome.Chrome(extra_browser_args=EXTRA_BROWSER_ARGS +\
                           [helper_logger.chrome_vmodule_flag()],
                           init_network_controller=True) as cr:

            # TODO(keiichiw): vivid should be loaded in self.setup() after
            # crbug.com/871185 is fixed
            if utils.is_virtual_machine():
                try:
                    utils.run('sudo modprobe vivid n_devs=1 node_types=0x1')
                except Exception as e:
                    raise error.TestFail('Failed to load vivid', e)

            self.start_getusermedia(cr)
            self.print_perf_results()
def _can_encode_nv12():
    """
    Determine whether the board can encode NV12.

    NV12 format is a mostly common input format driver supports in video
    encoding.
    There are devices that cannot encode NV12 input buffer because of chromium
    code base or driver issue.
    """
    # Although V4L2VEA supports NV12, some devices cannot encode NV12 probably
    # due to a driver issue.
    nv12_black_list = [
        r'^daisy.*',
        r'^nyan.*',
        r'^peach.*',
        r'^veyron.*',
    ]

    board = utils.get_current_board()
    for p in nv12_black_list:
        if re.match(p, board):
            return False
    return True
Beispiel #23
0
    def run_once(self):
        # TODO(apronin): crbug.com/618392. This test flakes on these boards.
        boards_to_skip = ['tricky', 'peach_pit', 'falco']
        board = utils.get_current_board()
        if board in boards_to_skip:
            logging.info("Skipping test run on %s.", board)
            return

        listener = session_manager.OwnershipSignalListener(gobject.MainLoop())
        listener.listen_for_new_key_and_policy()
        # Sign in. Sign out happens automatically when cr goes out of scope.
        with chrome.Chrome(clear_enterprise_policy=False) as cr:
            listener.wait_for_signals(desc='Owner settings written to disk.')

        key = open(constants.OWNER_KEY_FILE, 'rb')
        hash = hashlib.md5(key.read())
        key.close()
        mtime = os.stat(constants.OWNER_KEY_FILE).st_mtime

        # Sign in/sign out as a second user.
        with chrome.Chrome(clear_enterprise_policy=False,
                           username=self._TEST_USER,
                           password=self._TEST_PASS,
                           gaia_id=self._TEST_GAIAID) as cr:
            pass

        # Checking mtime to see if key file was touched during second sign in.
        if os.stat(constants.OWNER_KEY_FILE).st_mtime > mtime:
            raise error.TestFail("Owner key was touched on second login!")

        # Sanity check.
        key2 = open(constants.OWNER_KEY_FILE, 'rb')
        hash2 = hashlib.md5(key2.read())
        key2.close()
        if hash.hexdigest() != hash2.hexdigest():
            raise error.TestFail("Owner key was touched on second login!")
Beispiel #24
0
    def run_once(self):
        boards_to_skip = ['x86-mario']
        dut_board = utils.get_current_board()
        if dut_board in boards_to_skip:
            logging.info("Skipping test run on this board.")
            return
        # Check for existing cras crashes which might occur during UI bring up.
        # TODO: (rohitbm) check if we need to reboot the DUT before the test
        #       start to verify cras crashes during boot.
        existing_crash_reports = self.collect_cras_crash()
        if len(existing_crash_reports) == 0:
            self._check['crashes_on_boot'] = True

        # Capturing cras pid before startig the test.
        cras_pid_1 = utils.get_oldest_pid_by_name('/usr/bin/cras')

        with chrome.Chrome(init_network_controller=True) as self._cr:
            try:
                # This will be used on Chrome PFQ since it's using a more recent
                # version of Chrome. crbug.com/537655.
                self._cr.browser.platform.SetHTTPServerDirectories(self.bindir)
            except:
                # This will be used on ChromeOS CQ since Chrome hasn't uprev'ed
                # yet. crbug.com/538140.
                self._cr.browser.SetHTTPServerDirectories(self.bindir)
            for test_file in self._audio:
                url = _DOWNLOAD_BASE + 'audio_test/' + test_file
                self.push_new_stream(self._cr.browser.tabs.New(), url)

            # Capturing cras pid before opening a new set of audio streams.
            cras_pid_2 = utils.get_oldest_pid_by_name('/usr/bin/cras')
            for test_file in self._video:
                url = _DOWNLOAD_BASE + 'traffic/' + test_file
                self.push_new_stream(self._cr.browser.tabs.New(), url)

            # Let's play audio for sometime to ensure that
            # long playback is good.
            time.sleep(10)

            total_tests = len(self._audio) + len(self._video)
            active_streams = cras_utils.get_active_stream_count()
            logging.debug(
                'Number of active streams after opening all tabs: %d.',
                active_streams)
            if active_streams >= total_tests:
                self._check['stream_activation'] = True

            # Capturing cras pid after opening all audio/video streams.
            cras_pid_3 = utils.get_oldest_pid_by_name('/usr/bin/cras')

            # Close all open audio streams.
            while total_tests > 0:
                self._cr.browser.tabs[total_tests].Close()
                total_tests -= 1
                time.sleep(1)
            active_streams = cras_utils.get_active_stream_count()
            logging.debug(
                'Number of active streams after closing all tabs: %d.',
                active_streams)

            # Capturing cras pid after closing all audio/stream streams.
            cras_pid_4 = utils.get_oldest_pid_by_name('/usr/bin/cras')

            if cras_pid_1 == cras_pid_2 == cras_pid_3 == cras_pid_4:
                self._check['cras_status'] = True

        new_crash_reports = self.collect_cras_crash()
        new_reports = list(
            set(new_crash_reports) - set(existing_crash_reports))
        if len(new_reports) == 0:
            self._check['crashes_at_end'] = True

        err_msg = ''
        if self._check.values().count(False) > 0:
            if not self._check['crashes_on_boot']:
                err_msg = ('1. Found cras crashes on boot: %s.\n' %
                           existing_crash_reports)
            if not self._check['stream_activation']:
                err_msg += ('2. CRAS stream count is not matching with '
                            'number of streams.\n')
            if not self._check['cras_status']:
                err_msg += ('CRAS PID changed during the test. CRAS might be '
                            'crashing while adding/removing streams.\n')
            if not self._check['crashes_at_end']:
                err_msg += ('Found cras crashes at the end of the test : %s.' %
                            new_reports)
            raise error.TestError(err_msg)
Beispiel #25
0
    def run_once(self,
                 source_path,
                 codec,
                 resolution,
                 host,
                 args,
                 collect_only=False):

        board = utils.get_current_board()

        file_utils.make_leaf_dir(constants.TEST_DIR)

        with chrome.Chrome(
                extension_paths=[cros_constants.MULTIMEDIA_TEST_EXTENSION],
                autotest_ext=True) as cr:

            cr.browser.platform.SetHTTPServerDirectories(self.bindir)
            html_fullpath = os.path.join(self.bindir, 'video.html')
            player = native_html5_player.NativeHtml5Player(
                tab=cr.browser.tabs[0],
                full_url=cr.browser.platform.http_server.UrlOf(html_fullpath),
                video_id='video',
                video_src_path=source_path,
                event_timeout=120)

            chameleon_board = chameleon.create_chameleon_board(
                host.hostname, args)
            display_facade = local_facade_factory.LocalFacadeFactory(
                cr).create_display_facade()

            finder = chameleon_port_finder.ChameleonVideoInputFinder(
                chameleon_board, display_facade)

            capturer = chameleon_video_capturer.ChameleonVideoCapturer(
                finder.find_port(interface='hdmi'), display_facade)

            with capturer:
                player.load_video()

                player.verify_video_can_play()

                display_facade.move_to_display(
                    display_facade.get_first_external_display_index())
                display_facade.set_fullscreen(True)
                # HACK: Unset and reset fullscreen. There is a bug in Chrome
                # that fails to move the window to a correct position.
                # Resetting fullscren helps, check http://crbug.com/574284
                display_facade.set_fullscreen(False)
                display_facade.set_fullscreen(True)
                time.sleep(5)

                box = (0, 0, constants.DESIRED_WIDTH, constants.DESIRED_HEIGHT)

                #TODO: mussa, Revisit once crbug/580736 is fixed
                for n in xrange(constants.NUM_CAPTURE_TRIES):
                    logging.debug('Trying to capture frames. TRY #%d', n + 1)
                    raw_test_checksums = capturer.capture_only(
                        player, max_frame_count=constants.FCOUNT, box=box)

                    raw_test_checksums = [
                        tuple(checksum) for checksum in raw_test_checksums
                    ]

                    overreach_counts = self.overreach_frame_counts(
                        raw_test_checksums, constants.MAX_FRAME_REPEAT_COUNT)

                    if not overreach_counts:  # no checksums exceeded threshold
                        break

                    player.pause()
                    player.seek_to(datetime.timedelta(seconds=0))

                else:
                    msg = ('Framecount overreach detected even after %d '
                           'tries. Checksums: %s' %
                           (constants.NUM_CAPTURE_TRIES, overreach_counts))
                    raise error.TestFail(msg)

                # produces unique checksums mapped to their occur. indices
                test_checksum_indices = frame_checksum_utils.checksum_indices(
                    raw_test_checksums)

                test_checksums = test_checksum_indices.keys()

                test_indices = test_checksum_indices.values()

                golden_checksums_filepath = os.path.join(
                    constants.TEST_DIR, constants.GOLDEN_CHECKSUMS_FILENAME)

                if collect_only:
                    capturer.write_images(test_indices, constants.TEST_DIR,
                                          constants.IMAGE_FORMAT)

                    logging.debug("Write golden checksum file to %s",
                                  golden_checksums_filepath)

                    with open(golden_checksums_filepath, "w+") as f:
                        for checksum in test_checksums:
                            f.write(' '.join([str(i)
                                              for i in checksum]) + '\n')
                    return

                golden_checksums_remote_filepath = os.path.join(
                    constants.GOLDEN_CHECKSUM_REMOTE_BASE_DIR, board,
                    codec + '_' + resolution,
                    constants.GOLDEN_CHECKSUMS_FILENAME)

                file_utils.download_file(golden_checksums_remote_filepath,
                                         golden_checksums_filepath)

                golden_checksums = self.read_checksum_file(
                    golden_checksums_filepath)

                golden_checksum_count = len(golden_checksums)
                test_checksum_count = len(test_checksums)

                eps = constants.MAX_DIFF_TOTAL_FCOUNT
                if golden_checksum_count - test_checksum_count > eps:
                    msg = ('Expecting about %d checksums, received %d. '
                           'Allowed delta is %d') % (golden_checksum_count,
                                                     test_checksum_count, eps)
                    raise error.TestFail(msg)

                # Some frames might be missing during either golden frame
                # collection or during a test run. Using LCS ensures we
                # ignore a few missing frames while comparing test vs golden

                lcs_len = sequence_utils.lcs_length(golden_checksums,
                                                    test_checksums)

                missing_frames_count = len(golden_checksums) - lcs_len
                unknown_frames_count = len(test_checksums) - lcs_len

                msg = ('# of matching frames : %d. # of missing frames : %d. '
                       '# of unknown test frames : %d. Max allowed # of '
                       'missing frames : %d. # of golden frames : %d. # of '
                       'test_checksums : %d' %
                       (lcs_len, missing_frames_count,
                        unknown_frames_count, constants.MAX_NONMATCHING_FCOUNT,
                        len(golden_checksums), len(test_checksums)))
                logging.debug(msg)

                if (missing_frames_count + unknown_frames_count >
                        constants.MAX_NONMATCHING_FCOUNT):
                    unknown_frames = set(test_checksums) - set(
                        golden_checksums)

                    store_indices = [
                        test_checksum_indices[c] for c in unknown_frames
                    ]

                    paths = capturer.write_images(store_indices,
                                                  constants.TEST_DIR,
                                                  constants.IMAGE_FORMAT)

                    path_publish = publisher.ImageDiffPublisher(
                        self.resultsdir)
                    path_publish.publish_paths(paths, self.tagged_testname)

                    raise error.TestFail("Too many non-matching frames")
 def get_current_board(self):
     """Returns the current device board name."""
     return utils.get_current_board()
    def run_once(self,
                 cmd_timeout=600,
                 camera_hals=None,
                 options=None,
                 capability=None,
                 test_config=None):
        """
        Entry point of this test.

        @param cmd_timeout: Seconds. Timeout for running the test command.
        @param camera_hals: The camera HALs to be tested. e.g. ['usb.so']
        @param options: Option strings passed to test command. e.g. ['--v=1']
        @param capability: Capability required for executing this test.
        """
        if options is None:
            options = []

        if test_config is None:
            test_config = {}

        if capability:
            device_capability.DeviceCapability().ensure_capability(capability)

        self.job.install_pkg(self.dep, 'dep', self.dep_dir)

        camera_hal_paths = camera_utils.get_camera_hal_paths_for_test()
        if camera_hals is not None:
            name_map = dict(
                (os.path.basename(path), path) for path in camera_hal_paths)
            camera_hal_paths = []
            for name in camera_hals:
                path = name_map.get(name)
                if path is None:
                    msg = 'HAL %r is not available for test' % name
                    raise error.TestNAError(msg)
                camera_hal_paths.append(path)

        binary_path = os.path.join(self.dep_dir, 'bin', self.test_binary)

        with service_stopper.ServiceStopper([self.cros_camera_service]), \
                self.set_test_config(test_config):
            has_facing_option = False
            cmd = [binary_path]
            for option in options:
                if 'gtest_filter' in option:
                    filters = option.split('=')[1]
                    if 'Camera3DeviceTest' in filters.split('-')[0]:
                        if utils.get_current_board() in self.tablet_board_list:
                            option += (':' if '-' in filters else '-')
                            option += '*SensorOrientationTest/*'
                    if any(name in filters.split('-')[0]
                           for name in ('Camera3ModuleFixture',
                                        'Camera3RecordingFixture')):
                        cmd.append(self.get_recording_params())
                elif 'camera_facing' in option:
                    has_facing_option = True
                cmd.append(option)

            if has_facing_option:
                utils.system(cmd, timeout=cmd_timeout)
            else:
                for camera_hal_path in camera_hal_paths:
                    logging.info('Run test with %r', camera_hal_path)
                    cmd.append('--camera_hal_path=%s' % camera_hal_path)
                    utils.system(cmd, timeout=cmd_timeout)
                    cmd.pop()
Beispiel #28
0
    def run_once(self):
        boards_to_skip = ['x86-mario']
        dut_board = utils.get_current_board()
        if dut_board in boards_to_skip:
            logging.info("Skipping test run on this board.")
            return
        # Check for existing cras crashes which might occur during UI bring up.
        # TODO: (rohitbm) check if we need to reboot the DUT before the test
        #       start to verify cras crashes during boot.
        existing_crash_reports = self.collect_cras_crash()
        if len(existing_crash_reports) == 0:
            self._check['crashes_on_boot'] = True

        # Capturing cras pid before startig the test.
        cras_pid_1 = utils.get_oldest_pid_by_name('/usr/bin/cras')

        with chrome.Chrome(init_network_controller=True) as self._cr:
            # Push the 1st stream
            self.push_new_stream(self._cr.browser.tabs.New())

            # Capturing cras pid before opening a new set of audio streams.
            cras_pid_2 = utils.get_oldest_pid_by_name('/usr/bin/cras')

            # Push the 2nd stream
            self.push_new_stream(self._cr.browser.tabs.New())

            # Let's play audio for sometime to ensure that
            # long playback is good.
            time.sleep(10)

            total_tests = 2
            active_streams = cras_utils.get_active_stream_count()
            logging.debug(
                'Number of active streams after opening all tabs: %d.',
                active_streams)
            if active_streams >= total_tests:
                self._check['stream_activation'] = True

            # Capturing cras pid after opening all audio/video streams.
            cras_pid_3 = utils.get_oldest_pid_by_name('/usr/bin/cras')

            # Close all open audio streams.
            while total_tests > 0:
                self._cr.browser.tabs[total_tests].Close()
                total_tests -= 1
                time.sleep(1)
            active_streams = cras_utils.get_active_stream_count()
            logging.debug(
                'Number of active streams after closing all tabs: %d.',
                active_streams)

            # Capturing cras pid after closing all audio/stream streams.
            cras_pid_4 = utils.get_oldest_pid_by_name('/usr/bin/cras')

            if cras_pid_1 == cras_pid_2 == cras_pid_3 == cras_pid_4:
                self._check['cras_status'] = True

        new_crash_reports = self.collect_cras_crash()
        new_reports = list(
            set(new_crash_reports) - set(existing_crash_reports))
        if len(new_reports) == 0:
            self._check['crashes_at_end'] = True

        err_msg = ''
        if self._check.values().count(False) > 0:
            if not self._check['crashes_on_boot']:
                err_msg = ('1. Found cras crashes on boot: %s.\n' %
                           existing_crash_reports)
            if not self._check['stream_activation']:
                err_msg += ('2. CRAS stream count is not matching with '
                            'number of streams.\n')
            if not self._check['cras_status']:
                err_msg += ('CRAS PID changed during the test. CRAS might be '
                            'crashing while adding/removing streams.\n')
            if not self._check['crashes_at_end']:
                err_msg += ('Found cras crashes at the end of the test : %s.' %
                            new_reports)
            raise error.TestError(err_msg)
Beispiel #29
0
    def compare_extensions(self):
        """Compare installed extensions to the expected set.

        Find the set of expected IDs.
        Find the set of observed IDs.
        Do set comparison to find the unexpected, and the expected/missing.

        """
        test_fail = False
        combined_baseline = (self._bundled_crx_baseline +
                             self._component_extension_baseline)
        # Filter out any baseline entries that don't apply to this board.
        # If there is no 'boards' limiter on a given record, the record applies.
        # If there IS a 'boards' limiter, check that it applies.
        board = utils.get_current_board()
        combined_baseline = [
            x for x in combined_baseline
            if ((not 'boards' in x) or ('boards' in x and board in x['boards'])
                )
        ]

        observed_extensions = self._get_extensions_info()
        observed_ids = set([x['id'] for x in observed_extensions])
        expected_ids = set([x['id'] for x in combined_baseline])

        missing_ids = expected_ids - observed_ids
        missing_names = [
            '%s (%s)' % (x['name'], x['id']) for x in combined_baseline
            if x['id'] in missing_ids
        ]

        unexpected_ids = observed_ids - expected_ids
        unexpected_names = [
            '%s (%s)' % (x['name'], x['id']) for x in observed_extensions
            if x['id'] in unexpected_ids
        ]

        good_ids = expected_ids.intersection(observed_ids)

        if missing_names:
            logging.error('Missing: %s', '; '.join(missing_names))
            test_fail = True
        if unexpected_names:
            logging.error('Unexpected: %s', '; '.join(unexpected_names))
            test_fail = True

        # For those IDs in both the expected-and-observed, ie, "good":
        #   Compare sets of expected-vs-actual API permissions, report diffs.
        #   Do same for host permissions.
        for good_id in good_ids:
            baseline = [x for x in combined_baseline if x['id'] == good_id][0]
            actual = [x for x in observed_extensions if x['id'] == good_id][0]
            # Check the API permissions.
            baseline_apis = set(baseline['apiPermissions'])
            actual_apis = set(actual['apiPermissions'])
            missing_apis = baseline_apis - actual_apis
            unexpected_apis = actual_apis - baseline_apis
            if missing_apis or unexpected_apis:
                test_fail = True
                self._report_attribute_diffs(missing_apis, unexpected_apis,
                                             actual)
            # Check the host permissions.
            baseline_hosts = set(baseline['effectiveHostPermissions'])
            actual_hosts = set(actual['effectiveHostPermissions'])
            missing_hosts = baseline_hosts - actual_hosts
            unexpected_hosts = actual_hosts - baseline_hosts
            if missing_hosts or unexpected_hosts:
                test_fail = True
                self._report_attribute_diffs(missing_hosts, unexpected_hosts,
                                             actual)
        if test_fail:
            # TODO(jorgelo): make this fail again, see crbug.com/343271.
            raise error.TestWarn('Baseline mismatch, see error log.')