Beispiel #1
0
 def __init__(self, device, tool, shard_index, build_type):
     """
   Args:
     device: Tests will run on the device of this ID.
     shard_index: Index number of the shard on which the test suite will run.
     build_type: 'Release' or 'Debug'.
 """
     self.device = device
     self.adb = android_commands.AndroidCommands(device=device)
     self.tool = CreateTool(tool, self.adb)
     self._http_server = None
     self._forwarder = None
     self._forwarder_device_port = 8000
     self.forwarder_base_url = ('http://localhost:%d' %
                                self._forwarder_device_port)
     self.flags = FlagChanger(self.adb)
     self.shard_index = shard_index
     self.flags.AddFlags(['--disable-fre'])
     self._spawning_server = None
     self._spawner_forwarder = None
     # We will allocate port for test server spawner when calling method
     # LaunchChromeTestServerSpawner and allocate port for test server when
     # starting it in TestServerThread.
     self.test_server_spawner_port = 0
     self.test_server_port = 0
     self.build_type = build_type
Beispiel #2
0
 def __init__(self, device, tool, shard_index):
     """
   Args:
     device: Tests will run on the device of this ID.
     shard_index: Index number of the shard on which the test suite will run.
 """
     self.device = device
     self.adb = android_commands.AndroidCommands(device=device)
     self.tool = CreateTool(tool, self.adb)
     # Synchronize date/time between host and device. Otherwise same file on
     # host and device may have different timestamp which may cause
     # AndroidCommands.PushIfNeeded failed, or a test which may compare timestamp
     # got from http head and local time could be failed.
     self.adb.SynchronizeDateTime()
     self._http_server = None
     self._forwarder = None
     self._forwarder_device_port = 8000
     self.forwarder_base_url = ('http://localhost:%d' %
                                self._forwarder_device_port)
     self.flags = FlagChanger(self.adb)
     self.shard_index = shard_index
     self.flags.AddFlags(['--disable-fre'])
     self._spawning_server = None
     self._spawner_forwarder = None
     # We will allocate port for test server spawner when calling method
     # LaunchChromeTestServerSpawner and allocate port for test server when
     # starting it in TestServerThread.
     self.test_server_spawner_port = 0
     self.test_server_port = 0
Beispiel #3
0
 def __init__(self, adb, device, test_suite, timeout, rebaseline,
              performance_test, cleanup_test_files, tool, dump_debug_info):
     self.adb = adb
     self.device = device
     self.test_suite = os.path.splitext(test_suite)[0]
     self.test_suite_basename = os.path.basename(self.test_suite)
     self.test_suite_dirname = os.path.dirname(self.test_suite)
     self.rebaseline = rebaseline
     self.performance_test = performance_test
     self.cleanup_test_files = cleanup_test_files
     self.tool = CreateTool(tool, self.adb)
     if timeout == 0:
         if self.test_suite_basename == 'page_cycler_tests':
             timeout = 900
         else:
             timeout = 60
     self.timeout = timeout * self.tool.GetTimeoutScale()
     self.dump_debug_info = dump_debug_info
 def __init__(self, adb, device, test_suite, timeout, rebaseline,
              performance_test, cleanup_test_files, tool, dump_debug_info):
     self.adb = adb
     self.device = device
     self.test_suite = os.path.splitext(test_suite)[0]
     self.test_suite_basename = os.path.basename(self.test_suite)
     self.test_suite_dirname = os.path.dirname(self.test_suite)
     self.rebaseline = rebaseline
     self.performance_test = performance_test
     self.cleanup_test_files = cleanup_test_files
     self.tool = CreateTool(tool, self.adb)
     if timeout == 0:
         if self.test_suite_basename == 'page_cycler_tests':
             timeout = 900
         else:
             timeout = 60
     # On a VM (e.g. chromium buildbots), this timeout is way too small.
     if os.environ.get('BUILDBOT_SLAVENAME'):
         timeout = timeout * 2
     self.timeout = timeout * self.tool.GetTimeoutScale()
     self.dump_debug_info = dump_debug_info
Beispiel #5
0
class TestPackage(object):
    """A helper base class for both APK and stand-alone executables.

  Args:
    adb: ADB interface the tests are using.
    device: Device to run the tests.
    test_suite: A specific test suite to run, empty to run all.
    timeout: Timeout for each test.
    rebaseline: Whether or not to run tests in isolation and update the filter.
    performance_test: Whether or not performance test(s).
    cleanup_test_files: Whether or not to cleanup test files on device.
    tool: Name of the Valgrind tool.
    dump_debug_info: A debug_info object.
  """
    def __init__(self, adb, device, test_suite, timeout, rebaseline,
                 performance_test, cleanup_test_files, tool, dump_debug_info):
        self.adb = adb
        self.device = device
        self.test_suite_full = test_suite
        self.test_suite = os.path.splitext(test_suite)[0]
        self.test_suite_basename = self._GetTestSuiteBaseName()
        self.test_suite_dirname = os.path.dirname(self.test_suite)
        self.rebaseline = rebaseline
        self.performance_test = performance_test
        self.cleanup_test_files = cleanup_test_files
        self.tool = CreateTool(tool, self.adb)
        if timeout == 0:
            if self.test_suite_basename == 'page_cycler_tests':
                timeout = 900
            else:
                timeout = 60
        # On a VM (e.g. chromium buildbots), this timeout is way too small.
        if os.environ.get('BUILDBOT_SLAVENAME'):
            timeout = timeout * 2
        self.timeout = timeout * self.tool.GetTimeoutScale()
        self.dump_debug_info = dump_debug_info

    def _BeginGetIOStats(self):
        """Gets I/O statistics before running test.

    Return:
      Tuple of (I/O stats object, flag of ready to continue). When encountering
      error, ready-to-continue flag is False, True otherwise. The I/O stats
      object may be None if the test is not performance test.
    """
        initial_io_stats = None
        # Try to get the disk I/O statistics for all performance tests.
        if self.performance_test and not self.rebaseline:
            initial_io_stats = self.adb.GetIoStats()
            # Get rid of the noise introduced by launching Chrome for page cycler.
            if self.test_suite_basename == 'page_cycler_tests':
                try:
                    chrome_launch_done_re = re.compile(
                        re.escape('Finish waiting for browser launch!'))
                    self.adb.WaitForLogMatch(chrome_launch_done_re)
                    initial_io_stats = self.adb.GetIoStats()
                except pexpect.TIMEOUT:
                    logging.error(
                        'Test terminated because Chrome launcher has no'
                        'response after 120 second.')
                    return (None, False)
                finally:
                    if self.dump_debug_info:
                        self.dump_debug_info.TakeScreenshot('_Launch_Chrome_')
        return (initial_io_stats, True)

    def _EndGetIOStats(self, initial_io_stats):
        """Gets I/O statistics after running test and calcuate the I/O delta.

    Args:
      initial_io_stats: I/O stats object got from _BeginGetIOStats.

    Return:
      String for formated diso I/O statistics.
    """
        disk_io = ''
        if self.performance_test and initial_io_stats:
            final_io_stats = self.adb.GetIoStats()
            for stat in final_io_stats:
                disk_io += '\n' + PrintPerfResult(
                    stat, stat,
                    [final_io_stats[stat] - initial_io_stats[stat]],
                    stat.split('_')[1], True, False)
            logging.info(disk_io)
        return disk_io

    def GetDisabledPrefixes(self):
        return ['DISABLED_', 'FLAKY_', 'FAILS_']

    def _ParseGTestListTests(self, all_tests):
        ret = []
        current = ''
        disabled_prefixes = self.GetDisabledPrefixes()
        for test in all_tests:
            if not test:
                continue
            if test[0] != ' ':
                current = test
                continue
            if 'YOU HAVE' in test:
                break
            test_name = test[2:]
            if not any([test_name.startswith(x) for x in disabled_prefixes]):
                ret += [current + test_name]
        return ret

    def _WatchTestOutput(self, p):
        """Watches the test output.
    Args:
      p: the process generating output as created by pexpect.spawn.
    """
        ok_tests = []
        failed_tests = []
        crashed_tests = []
        timed_out = False
        overall_fail = False
        re_run = re.compile('\[ RUN      \] ?(.*)\r\n')
        # APK tests rely on the END tag.
        re_end = re.compile('\[ END      \] ?(.*)\r\n')
        # Signal handlers are installed before starting tests
        # to output the CRASHED marker when a crash happens.
        re_crash = re.compile('\[ CRASHED      \](.*)\r\n')
        re_fail = re.compile('\[  FAILED  \] ?(.*)\r\n')
        re_runner_fail = re.compile('\[ RUNNER_FAILED \] ?(.*)\r\n')
        re_ok = re.compile('\[       OK \] ?(.*)\r\n')
        (io_stats_before, ready_to_continue) = self._BeginGetIOStats()
        while ready_to_continue:
            found = p.expect([re_run, pexpect.EOF, re_end, re_runner_fail],
                             timeout=self.timeout)
            if found == 1:  # matched pexpect.EOF
                break
            if found == 2:  # matched END.
                break
            if found == 3:  # RUNNER_FAILED
                logging.error('RUNNER_FAILED')
                overall_fail = True
                break
            if self.dump_debug_info:
                self.dump_debug_info.TakeScreenshot('_Test_Start_Run_')
            full_test_name = p.match.group(1)
            found = p.expect(
                [re_ok, re_fail, re_crash, pexpect.EOF, pexpect.TIMEOUT],
                timeout=self.timeout)
            if found == 0:  # re_ok
                ok_tests += [
                    BaseTestResult(full_test_name.replace('\r', ''), p.before)
                ]
                continue
            if found == 2:  # re_crash
                crashed_tests += [
                    BaseTestResult(full_test_name.replace('\r', ''), p.before)
                ]
                overall_fail = True
                break
            # The test failed.
            failed_tests += [
                BaseTestResult(full_test_name.replace('\r', ''), p.before)
            ]
            if found >= 3:
                # The test bailed out (i.e., didn't print OK or FAIL).
                if found == 4:  # pexpect.TIMEOUT
                    logging.error('Test terminated after %d second timeout.',
                                  self.timeout)
                    timed_out = True
                break
        p.close()
        if not self.rebaseline and ready_to_continue:
            ok_tests += self._EndGetIOStats(io_stats_before)
            ret_code = self._GetGTestReturnCode()
            if ret_code:
                failed_tests += [
                    BaseTestResult(
                        'gtest exit code: %d' % ret_code, 'pexpect.before: %s'
                        '\npexpect.after: %s' % (p.before, p.after))
                ]
        # Create TestResults and return
        return TestResults.FromRun(ok=ok_tests,
                                   failed=failed_tests,
                                   crashed=crashed_tests,
                                   timed_out=timed_out,
                                   overall_fail=overall_fail)