class TestServerThread(threading.Thread): """A thread to run the test server in a separate process.""" def __init__(self, ready_event, arguments, adb, tool, build_type): """Initialize TestServerThread with the following argument. Args: ready_event: event which will be set when the test server is ready. arguments: dictionary of arguments to run the test server. adb: instance of AndroidCommands. tool: instance of runtime error detection tool. build_type: 'Release' or 'Debug'. """ threading.Thread.__init__(self) self.wait_event = threading.Event() self.stop_flag = False self.ready_event = ready_event self.ready_event.clear() self.arguments = arguments self.adb = adb self.tool = tool self.test_server_process = None self.is_ready = False self.host_port = self.arguments['port'] assert isinstance(self.host_port, int) self._test_server_forwarder = None # The forwarder device port now is dynamically allocated. self.forwarder_device_port = 0 # Anonymous pipe in order to get port info from test server. self.pipe_in = None self.pipe_out = None self.command_line = [] self.build_type = build_type def _WaitToStartAndGetPortFromTestServer(self): """Waits for the Python test server to start and gets the port it is using. The port information is passed by the Python test server with a pipe given by self.pipe_out. It is written as a result to |self.host_port|. Returns: Whether the port used by the test server was successfully fetched. """ assert self.host_port == 0 and self.pipe_out and self.pipe_in (in_fds, _, _) = select.select([ self.pipe_in, ], [], [], TEST_SERVER_STARTUP_TIMEOUT) if len(in_fds) == 0: logging.error( 'Failed to wait to the Python test server to be started.') return False # First read the data length as an unsigned 4-byte value. This # is _not_ using network byte ordering since the Python test server packs # size as native byte order and all Chromium platforms so far are # configured to use little-endian. # TODO(jnd): Change the Python test server and local_test_server_*.cc to # use a unified byte order (either big-endian or little-endian). data_length = os.read(self.pipe_in, struct.calcsize('=L')) if data_length: (data_length, ) = struct.unpack('=L', data_length) assert data_length if not data_length: logging.error('Failed to get length of server data.') return False port_json = os.read(self.pipe_in, data_length) if not port_json: logging.error('Failed to get server data.') return False logging.info('Got port json data: %s', port_json) port_json = json.loads(port_json) if port_json.has_key('port') and isinstance(port_json['port'], int): self.host_port = port_json['port'] return _CheckPortStatus(self.host_port, True) logging.error('Failed to get port information from the server data.') return False def _GenerateCommandLineArguments(self): """Generates the command line to run the test server. Note that all options are processed by following the definitions in testserver.py. """ if self.command_line: return # The following arguments must exist. type_cmd = _GetServerTypeCommandLine(self.arguments['server-type']) if type_cmd: self.command_line.append(type_cmd) self.command_line.append('--port=%d' % self.host_port) # Use a pipe to get the port given by the instance of Python test server # if the test does not specify the port. if self.host_port == 0: (self.pipe_in, self.pipe_out) = os.pipe() self.command_line.append('--startup-pipe=%d' % self.pipe_out) self.command_line.append('--host=%s' % self.arguments['host']) data_dir = self.arguments['data-dir'] or 'chrome/test/data' if not os.path.isabs(data_dir): data_dir = os.path.join(constants.CHROME_DIR, data_dir) self.command_line.append('--data-dir=%s' % data_dir) # The following arguments are optional depending on the individual test. if self.arguments.has_key('log-to-console'): self.command_line.append('--log-to-console') if self.arguments.has_key('auth-token'): self.command_line.append('--auth-token=%s' % self.arguments['auth-token']) if self.arguments.has_key('https'): self.command_line.append('--https') if self.arguments.has_key('cert-and-key-file'): self.command_line.append( '--cert-and-key-file=%s' % os.path.join(constants.CHROME_DIR, self.arguments['cert-and-key-file'])) if self.arguments.has_key('ocsp'): self.command_line.append('--ocsp=%s' % self.arguments['ocsp']) if self.arguments.has_key('https-record-resume'): self.command_line.append('--https-record-resume') if self.arguments.has_key('ssl-client-auth'): self.command_line.append('--ssl-client-auth') if self.arguments.has_key('tls-intolerant'): self.command_line.append('--tls-intolerant=%s' % self.arguments['tls-intolerant']) if self.arguments.has_key('ssl-client-ca'): for ca in self.arguments['ssl-client-ca']: self.command_line.append( '--ssl-client-ca=%s' % os.path.join(constants.CHROME_DIR, ca)) if self.arguments.has_key('ssl-bulk-cipher'): for bulk_cipher in self.arguments['ssl-bulk-cipher']: self.command_line.append('--ssl-bulk-cipher=%s' % bulk_cipher) def run(self): logging.info('Start running the thread!') self.wait_event.clear() self._GenerateCommandLineArguments() command = constants.CHROME_DIR if self.arguments['server-type'] == 'sync': command = [ os.path.join(command, 'sync', 'tools', 'testserver', 'sync_testserver.py') ] + self.command_line else: command = [ os.path.join(command, 'net', 'tools', 'testserver', 'testserver.py') ] + self.command_line logging.info('Running: %s', command) self.process = subprocess.Popen(command) if self.process: if self.pipe_out: self.is_ready = self._WaitToStartAndGetPortFromTestServer() else: self.is_ready = _CheckPortStatus(self.host_port, True) if self.is_ready: self._test_server_forwarder = Forwarder(self.adb, self.build_type) self._test_server_forwarder.Run([(0, self.host_port)], self.tool, '127.0.0.1') # Check whether the forwarder is ready on the device. self.is_ready = False device_port = self._test_server_forwarder.DevicePortForHostPort( self.host_port) if device_port: for timeout in range(1, 5): if ports.IsDevicePortUsed(self.adb, device_port, 'LISTEN'): self.is_ready = True self.forwarder_device_port = device_port break time.sleep(timeout) # Wake up the request handler thread. self.ready_event.set() # Keep thread running until Stop() gets called. while not self.stop_flag: time.sleep(1) if self.process.poll() is None: self.process.kill() if self._test_server_forwarder: self._test_server_forwarder.Close() self.process = None self.is_ready = False if self.pipe_out: os.close(self.pipe_in) os.close(self.pipe_out) self.pipe_in = None self.pipe_out = None logging.info('Test-server has died.') self.wait_event.set() def Stop(self): """Blocks until the loop has finished. Note that this must be called in another thread. """ if not self.process: return self.stop_flag = True self.wait_event.wait()
class TestRunner(BaseTestRunner): """Responsible for running a series of tests connected to a single device.""" _DEVICE_DATA_DIR = 'chrome/test/data' _EMMA_JAR = os.path.join(os.environ.get('ANDROID_BUILD_TOP', ''), 'external/emma/lib/emma.jar') _COVERAGE_MERGED_FILENAME = 'unittest_coverage.es' _COVERAGE_WEB_ROOT_DIR = os.environ.get('EMMA_WEB_ROOTDIR') _COVERAGE_FILENAME = 'coverage.ec' _COVERAGE_RESULT_PATH = ( '/data/data/com.google.android.apps.chrome/files/' + _COVERAGE_FILENAME) _COVERAGE_META_INFO_PATH = os.path.join( os.environ.get('ANDROID_BUILD_TOP', ''), 'out/target/common/obj/APPS', 'Chrome_intermediates/coverage.em') _HOSTMACHINE_PERF_OUTPUT_FILE = '/tmp/chrome-profile' _DEVICE_PERF_OUTPUT_SEARCH_PREFIX = (constants.DEVICE_PERF_OUTPUT_DIR + '/chrome-profile*') _DEVICE_HAS_TEST_FILES = {} def __init__(self, options, device, tests_iter, coverage, shard_index, apks, ports_to_forward): """Create a new TestRunner. Args: options: An options object with the following required attributes: - build_type: 'Release' or 'Debug'. - install_apk: Re-installs the apk if opted. - save_perf_json: Whether or not to save the JSON file from UI perf tests. - screenshot_failures: Take a screenshot for a test failure - tool: Name of the Valgrind tool. - wait_for_debugger: blocks until the debugger is connected. - disable_assertions: Whether to disable java assertions on the device. device: Attached android device. tests_iter: A list of tests to be run. coverage: Collects coverage information if opted. shard_index: shard # for this TestRunner, used to create unique port numbers. apks: A list of ApkInfo objects need to be installed. The first element should be the tests apk, the rests could be the apks used in test. The default is ChromeTest.apk. ports_to_forward: A list of port numbers for which to set up forwarders. Can be optionally requested by a test case. Raises: FatalTestException: if coverage metadata is not available. """ BaseTestRunner.__init__(self, device, options.tool, shard_index, options.build_type) if not apks: apks = [ apk_info.ApkInfo(options.test_apk_path, options.test_apk_jar_path) ] self.build_type = options.build_type self.install_apk = options.install_apk self.test_data = options.test_data self.save_perf_json = options.save_perf_json self.screenshot_failures = options.screenshot_failures self.wait_for_debugger = options.wait_for_debugger self.disable_assertions = options.disable_assertions self.tests_iter = tests_iter self.coverage = coverage self.apks = apks self.test_apk = apks[0] self.instrumentation_class_path = self.test_apk.GetPackageName() self.ports_to_forward = ports_to_forward self.test_results = TestResults() self.forwarder = None if self.coverage: if os.path.exists(TestRunner._COVERAGE_MERGED_FILENAME): os.remove(TestRunner._COVERAGE_MERGED_FILENAME) if not os.path.exists(TestRunner._COVERAGE_META_INFO_PATH): raise FatalTestException('FATAL ERROR in ' + sys.argv[0] + ' : Coverage meta info [' + TestRunner._COVERAGE_META_INFO_PATH + '] does not exist.') if (not TestRunner._COVERAGE_WEB_ROOT_DIR or not os.path.exists(TestRunner._COVERAGE_WEB_ROOT_DIR)): raise FatalTestException( 'FATAL ERROR in ' + sys.argv[0] + ' : Path specified in $EMMA_WEB_ROOTDIR [' + TestRunner._COVERAGE_WEB_ROOT_DIR + '] does not exist.') def _GetTestsIter(self): if not self.tests_iter: # multiprocessing.Queue can't be pickled across processes if we have it as # a member set during constructor. Grab one here instead. self.tests_iter = (BaseTestSharder.tests_container) assert self.tests_iter return self.tests_iter def CopyTestFilesOnce(self): """Pushes the test data files to the device. Installs the apk if opted.""" if TestRunner._DEVICE_HAS_TEST_FILES.get(self.device, False): logging.warning( 'Already copied test files to device %s, skipping.', self.device) return for dest_host_pair in self.test_data: dst_src = dest_host_pair.split(':', 1) dst_layer = dst_src[0] host_src = dst_src[1] host_test_files_path = constants.CHROME_DIR + '/' + host_src if os.path.exists(host_test_files_path): self.adb.PushIfNeeded( host_test_files_path, self.adb.GetExternalStorage() + '/' + TestRunner._DEVICE_DATA_DIR + '/' + dst_layer) if self.install_apk: for apk in self.apks: self.adb.ManagedInstall(apk.GetApkPath(), package_name=apk.GetPackageName()) self.tool.CopyFiles() TestRunner._DEVICE_HAS_TEST_FILES[self.device] = True def SaveCoverageData(self, test): """Saves the Emma coverage data before it's overwritten by the next test. Args: test: the test whose coverage data is collected. """ if not self.coverage: return if not self.adb.Adb().Pull(TestRunner._COVERAGE_RESULT_PATH, constants.CHROME_DIR): logging.error('ERROR: Unable to find file ' + TestRunner._COVERAGE_RESULT_PATH + ' on the device for test ' + test) pulled_coverage_file = os.path.join(constants.CHROME_DIR, TestRunner._COVERAGE_FILENAME) if os.path.exists(TestRunner._COVERAGE_MERGED_FILENAME): cmd = [ 'java', '-classpath', TestRunner._EMMA_JAR, 'emma', 'merge', '-in', pulled_coverage_file, '-in', TestRunner._COVERAGE_MERGED_FILENAME, '-out', TestRunner._COVERAGE_MERGED_FILENAME ] cmd_helper.RunCmd(cmd) else: shutil.copy(pulled_coverage_file, TestRunner._COVERAGE_MERGED_FILENAME) os.remove(pulled_coverage_file) def GenerateCoverageReportIfNeeded(self): """Uses the Emma to generate a coverage report and a html page.""" if not self.coverage: return cmd = [ 'java', '-classpath', TestRunner._EMMA_JAR, 'emma', 'report', '-r', 'html', '-in', TestRunner._COVERAGE_MERGED_FILENAME, '-in', TestRunner._COVERAGE_META_INFO_PATH ] cmd_helper.RunCmd(cmd) new_dir = os.path.join(TestRunner._COVERAGE_WEB_ROOT_DIR, time.strftime('Coverage_for_%Y_%m_%d_%a_%H:%M')) shutil.copytree('coverage', new_dir) latest_dir = os.path.join(TestRunner._COVERAGE_WEB_ROOT_DIR, 'Latest_Coverage_Run') if os.path.exists(latest_dir): shutil.rmtree(latest_dir) os.mkdir(latest_dir) webserver_new_index = os.path.join(new_dir, 'index.html') webserver_new_files = os.path.join(new_dir, '_files') webserver_latest_index = os.path.join(latest_dir, 'index.html') webserver_latest_files = os.path.join(latest_dir, '_files') # Setup new softlinks to last result. os.symlink(webserver_new_index, webserver_latest_index) os.symlink(webserver_new_files, webserver_latest_files) cmd_helper.RunCmd(['chmod', '755', '-R', latest_dir, new_dir]) def _GetInstrumentationArgs(self): ret = {} if self.coverage: ret['coverage'] = 'true' if self.wait_for_debugger: ret['debug'] = 'true' return ret def _TakeScreenshot(self, test): """Takes a screenshot from the device.""" screenshot_name = os.path.join(constants.SCREENSHOTS_DIR, test + '.png') logging.info('Taking screenshot named %s', screenshot_name) self.adb.TakeScreenshot(screenshot_name) def SetUp(self): """Sets up the test harness and device before all tests are run.""" super(TestRunner, self).SetUp() if not self.adb.IsRootEnabled(): logging.warning( 'Unable to enable java asserts for %s, non rooted device', self.device) else: if self.adb.SetJavaAssertsEnabled( enable=not self.disable_assertions): self.adb.Reboot(full_reboot=False) # We give different default value to launch HTTP server based on shard index # because it may have race condition when multiple processes are trying to # launch lighttpd with same port at same time. http_server_ports = self.LaunchTestHttpServer( os.path.join(constants.CHROME_DIR), (constants.LIGHTTPD_RANDOM_PORT_FIRST + self.shard_index)) if self.ports_to_forward: port_pairs = [(port, port) for port in self.ports_to_forward] # We need to remember which ports the HTTP server is using, since the # forwarder will stomp on them otherwise. port_pairs.append(http_server_ports) self.forwarder = Forwarder(self.adb, self.build_type) self.forwarder.Run(port_pairs, self.tool, '127.0.0.1') self.CopyTestFilesOnce() self.flags.AddFlags(['--enable-test-intents']) def TearDown(self): """Cleans up the test harness and saves outstanding data from test run.""" if self.forwarder: self.forwarder.Close() self.GenerateCoverageReportIfNeeded() super(TestRunner, self).TearDown() def TestSetup(self, test): """Sets up the test harness for running a particular test. Args: test: The name of the test that will be run. """ self.SetupPerfMonitoringIfNeeded(test) self._SetupIndividualTestTimeoutScale(test) self.tool.SetupEnvironment() # Make sure the forwarder is still running. self.RestartHttpServerForwarderIfNecessary() def _IsPerfTest(self, test): """Determines whether a test is a performance test. Args: test: The name of the test to be checked. Returns: Whether the test is annotated as a performance test. """ return _PERF_TEST_ANNOTATION in self.test_apk.GetTestAnnotations(test) def SetupPerfMonitoringIfNeeded(self, test): """Sets up performance monitoring if the specified test requires it. Args: test: The name of the test to be run. """ if not self._IsPerfTest(test): return self.adb.Adb().SendCommand( 'shell rm ' + TestRunner._DEVICE_PERF_OUTPUT_SEARCH_PREFIX) self.adb.StartMonitoringLogcat() def TestTeardown(self, test, test_result): """Cleans up the test harness after running a particular test. Depending on the options of this TestRunner this might handle coverage tracking or performance tracking. This method will only be called if the test passed. Args: test: The name of the test that was just run. test_result: result for this test. """ self.tool.CleanUpEnvironment() # The logic below relies on the test passing. if not test_result or test_result.GetStatusCode(): return self.TearDownPerfMonitoring(test) self.SaveCoverageData(test) def TearDownPerfMonitoring(self, test): """Cleans up performance monitoring if the specified test required it. Args: test: The name of the test that was just run. Raises: FatalTestException: if there's anything wrong with the perf data. """ if not self._IsPerfTest(test): return raw_test_name = test.split('#')[1] # Wait and grab annotation data so we can figure out which traces to parse regex = self.adb.WaitForLogMatch( re.compile('\*\*PERFANNOTATION\(' + raw_test_name + '\)\:(.*)'), None) # If the test is set to run on a specific device type only (IE: only # tablet or phone) and it is being run on the wrong device, the test # just quits and does not do anything. The java test harness will still # print the appropriate annotation for us, but will add --NORUN-- for # us so we know to ignore the results. # The --NORUN-- tag is managed by MainActivityTestBase.java if regex.group(1) != '--NORUN--': # Obtain the relevant perf data. The data is dumped to a # JSON formatted file. json_string = self.adb.GetFileContents( '/data/data/com.google.android.apps.chrome/files/PerfTestData.txt' ) if json_string: json_string = '\n'.join(json_string) else: raise FatalTestException( 'Perf file does not exist or is empty') if self.save_perf_json: json_local_file = '/tmp/chromium-android-perf-json-' + raw_test_name with open(json_local_file, 'w') as f: f.write(json_string) logging.info('Saving Perf UI JSON from test ' + test + ' to ' + json_local_file) raw_perf_data = regex.group(1).split(';') for raw_perf_set in raw_perf_data: if raw_perf_set: perf_set = raw_perf_set.split(',') if len(perf_set) != 3: raise FatalTestException( 'Unexpected number of tokens in ' 'perf annotation string: ' + raw_perf_set) # Process the performance data result = GetAverageRunInfoFromJSONString( json_string, perf_set[0]) PrintPerfResult(perf_set[1], perf_set[2], [result['average']], result['units']) def _SetupIndividualTestTimeoutScale(self, test): timeout_scale = self._GetIndividualTestTimeoutScale(test) valgrind_tools.SetChromeTimeoutScale(self.adb, timeout_scale) def _GetIndividualTestTimeoutScale(self, test): """Returns the timeout scale for the given |test|.""" annotations = self.apks[0].GetTestAnnotations(test) timeout_scale = 1 if 'TimeoutScale' in annotations: for annotation in annotations: scale_match = re.match('TimeoutScale:([0-9]+)', annotation) if scale_match: timeout_scale = int(scale_match.group(1)) if self.wait_for_debugger: timeout_scale *= 100 return timeout_scale def _GetIndividualTestTimeoutSecs(self, test): """Returns the timeout in seconds for the given |test|.""" annotations = self.apks[0].GetTestAnnotations(test) if 'Manual' in annotations: return 600 * 60 if 'External' in annotations: return 10 * 60 if 'LargeTest' in annotations or _PERF_TEST_ANNOTATION in annotations: return 5 * 60 if 'MediumTest' in annotations: return 3 * 60 return 1 * 60 def RunTests(self): """Runs the tests, generating the coverage if needed. Returns: A TestResults object. """ instrumentation_path = (self.instrumentation_class_path + '/android.test.InstrumentationTestRunner') instrumentation_args = self._GetInstrumentationArgs() for test in self._GetTestsIter(): test_result = None start_date_ms = None try: self.TestSetup(test) start_date_ms = int(time.time()) * 1000 args_with_filter = dict(instrumentation_args) args_with_filter['class'] = test # |test_results| is a list that should contain # a single TestResult object. logging.warn(args_with_filter) (test_results, _) = self.adb.Adb().StartInstrumentation( instrumentation_path=instrumentation_path, instrumentation_args=args_with_filter, timeout_time=(self._GetIndividualTestTimeoutSecs(test) * self._GetIndividualTestTimeoutScale(test) * self.tool.GetTimeoutScale())) duration_ms = int(time.time()) * 1000 - start_date_ms assert len(test_results) == 1 test_result = test_results[0] status_code = test_result.GetStatusCode() if status_code: log = test_result.GetFailureReason() if not log: log = 'No information.' if self.screenshot_failures or log.find( 'INJECT_EVENTS perm') >= 0: self._TakeScreenshot(test) self.test_results.failed += [ SingleTestResult(test, start_date_ms, duration_ms, log) ] else: result = [ SingleTestResult(test, start_date_ms, duration_ms) ] self.test_results.ok += result # Catch exceptions thrown by StartInstrumentation(). # See ../../third_party/android/testrunner/adb_interface.py except (errors.WaitForResponseTimedOutError, errors.DeviceUnresponsiveError, errors.InstrumentationError), e: if start_date_ms: duration_ms = int(time.time()) * 1000 - start_date_ms else: start_date_ms = int(time.time()) * 1000 duration_ms = 0 message = str(e) if not message: message = 'No information.' self.test_results.crashed += [ SingleTestResult(test, start_date_ms, duration_ms, message) ] test_result = None self.TestTeardown(test, test_result) return self.test_results
def _CreateAndRunForwarder(self, adb, port_pairs, tool, host_name, build_type): """Creates and run a forwarder.""" forwarder = Forwarder(adb, build_type) forwarder.Run(port_pairs, tool, host_name) return forwarder