def run_test(self): # Traps SIGTERM and kills all child processes of cros_run_test when it's # caught. This will allow us to capture logs from the device if a test hangs # and gets timeout-killed by swarming. See also: # https://chromium.googlesource.com/infra/luci/luci-py/+/master/appengine/swarming/doc/Bot.md#graceful-termination_aka-the-sigterm-and-sigkill-dance test_proc = None def _kill_child_procs(trapped_signal, _): logging.warning( 'Received signal %d. Killing child processes of test.', trapped_signal) if not test_proc or not test_proc.pid: # This shouldn't happen? logging.error('Test process not running.') return for child in psutil.Process(test_proc.pid).children(): logging.warning('Killing process %s', child) child.kill() signal.signal(signal.SIGTERM, _kill_child_procs) for i in xrange(self._retries + 1): logging.info('########################################') logging.info('Test attempt #%d', i) logging.info('########################################') test_proc = subprocess42.Popen(self._test_cmd, stdout=sys.stdout, stderr=sys.stderr, env=self._test_env) try: test_proc.wait(timeout=self._timeout) except subprocess42.TimeoutExpired: logging.error('Test timed out. Sending SIGTERM.') # SIGTERM the proc and wait 10s for it to close. test_proc.terminate() try: test_proc.wait(timeout=10) except subprocess42.TimeoutExpired: # If it hasn't closed in 10s, SIGKILL it. logging.error( 'Test did not exit in time. Sending SIGKILL.') test_proc.kill() test_proc.wait() logging.info('Test exitted with %d.', test_proc.returncode) if test_proc.returncode == 0: break ret = self.post_run(test_proc.returncode) # Allow post_run to override test proc return code. (Useful when the host # side Tast bin returns 0 even for failed tests.) if ret is not None: return ret return test_proc.returncode
def _run_test_with_timeout(config, shell, args, apptest, env, seconds=10): '''Run the test with a timeout; return the output or raise an exception.''' if config.target_os == Config.OS_ANDROID: return _run_test_with_timeout_on_android(shell, args, apptest, seconds) output = '' error = [] command = _build_command_line(config, args, apptest) proc = subprocess42.Popen(command, detached=True, stdout=subprocess42.PIPE, stderr=subprocess42.STDOUT, env=env) try: output = proc.communicate(timeout=seconds)[0] or '' if proc.duration() > seconds: error.append('ERROR: Test timeout with duration: %s.' % proc.duration()) raise subprocess42.TimeoutExpired(proc.args, seconds, output, None) except subprocess42.TimeoutExpired as e: output = e.output or '' logging.getLogger().debug('Terminating the test for timeout.') error.append('ERROR: Test timeout after %d seconds.' % proc.duration()) proc.terminate() try: output += proc.communicate(timeout=30)[0] or '' except subprocess42.TimeoutExpired as e: output += e.output or '' logging.getLogger().debug( 'Test termination failed; attempting to kill.') proc.kill() try: output += proc.communicate(timeout=30)[0] or '' except subprocess42.TimeoutExpired as e: output += e.output or '' logging.getLogger().debug('Failed to kill the test process!') if proc.returncode: error.append('ERROR: Test exited with code: %d.' % proc.returncode) elif proc.returncode is None: error.append('ERROR: Failed to kill the test process!') if not output: error.append('ERROR: Test exited with no output.') elif output.startswith('This program contains tests'): error.append('ERROR: GTest printed help; check the command line.') if error: raise Exception(output + '\n'.join(error)) return output
def _execute(self, *args): """Runs a raw command. Separate so it's easily mockable.""" LOGGER.info('Running: %s', args) process = subprocess42.Popen(args, stdout=subprocess42.PIPE, stderr=subprocess42.PIPE) output, stderr = process.communicate() retcode = process.poll() if retcode: if output and stderr: new_output = 'STDOUT\n%s\nSTDERR\n%s' % (output, stderr) else: new_output = output or stderr raise subprocess42.CalledProcessError(retcode, args, new_output) return output
def _run_cmd(self, cmd, timeout, handles, env, cwd): """Runs cmd (subprocess-style). Args: cmd: a subprocess-style command list, with command first then args. handles: A dictionary from ('stdin', 'stdout', 'stderr'), each value being *either* a stream.StreamEngine.Stream or a python file object to direct that subprocess's filehandle to. env: the full environment to run the command in -- this is passed unaltered to subprocess.Popen. cwd: the working directory of the command. """ fhandles = {} # If we are given StreamEngine.Streams, map them to PIPE for subprocess. # We will manually forward them to their corresponding stream. for key in ('stdout', 'stderr'): handle = handles.get(key) if isinstance(handle, stream.StreamEngine.Stream): fhandles[key] = subprocess42.PIPE else: fhandles[key] = handle # stdin must be a real handle, if it exists fhandles['stdin'] = handles.get('stdin') with _modify_lookup_path(env.get('PATH')): proc = subprocess42.Popen( cmd, env=env, cwd=cwd, detached=True, universal_newlines=True, **fhandles) # Safe to close file handles now that subprocess has inherited them. for handle in fhandles.itervalues(): if isinstance(handle, file): handle.close() outstreams = {} linebufs = {} for key in ('stdout', 'stderr'): handle = handles.get(key) if isinstance(handle, stream.StreamEngine.Stream): outstreams[key] = handle linebufs[key] = _streamingLinebuf() if linebufs: # manually check the timeout, because we poll start_time = time.time() for pipe, data in proc.yield_any(timeout=1): if timeout and time.time() - start_time > timeout: # Don't know the name of the step, so raise this and it'll get caught raise subprocess42.TimeoutExpired(cmd, timeout) if pipe is None: continue buf = linebufs.get(pipe) if not buf: continue buf.ingest(data) for line in buf.get_buffered(): outstreams[pipe].write_line(line) else: proc.wait(timeout) return proc.returncode
def vm_test(args, unknown_args): is_sanity_test = args.test_exe == 'cros_vm_sanity_test' # To keep things easy for us, ensure both types of output locations are # the same. if args.test_launcher_summary_output and args.vm_logs_dir: json_output_dir = os.path.dirname( args.test_launcher_summary_output) or '.' if os.path.abspath(json_output_dir) != os.path.abspath( args.vm_logs_dir): logging.error( '--test-launcher-summary-output and --vm-logs-dir must point to ' 'the same directory.') return 1 cros_run_vm_test_cmd = [ CROS_RUN_VM_TEST_PATH, '--start', '--board', args.board, '--cache-dir', args.cros_cache, ] # cros_run_vm_test has trouble with relative paths that go up directories, so # cd to src/, which should be the root of all data deps. os.chdir(CHROMIUM_SRC_PATH) runtime_files = read_runtime_files(args.runtime_deps_path, args.path_to_outdir) if args.vpython_dir: # --vpython-dir is relative to the out dir, but --files expects paths # relative to src dir, so fix the path up a bit. runtime_files.append( os.path.relpath( os.path.abspath( os.path.join(args.path_to_outdir, args.vpython_dir)), CHROMIUM_SRC_PATH)) runtime_files.append('.vpython') # If we're pushing files, we need to set the cwd. if runtime_files: cros_run_vm_test_cmd.extend( ['--cwd', os.path.relpath(args.path_to_outdir, CHROMIUM_SRC_PATH)]) for f in runtime_files: cros_run_vm_test_cmd.extend(['--files', f]) if args.vm_logs_dir: cros_run_vm_test_cmd += [ '--results-src', '/var/log/', '--results-dest-dir', args.vm_logs_dir, ] if args.test_launcher_summary_output and not is_sanity_test: result_dir, result_file = os.path.split( args.test_launcher_summary_output) # If args.test_launcher_summary_output is a file in cwd, result_dir will be # an empty string, so replace it with '.' when this is the case so # cros_run_vm_test can correctly handle it. if not result_dir: result_dir = '.' vm_result_file = '/tmp/%s' % result_file cros_run_vm_test_cmd += [ '--results-src', vm_result_file, '--results-dest-dir', result_dir, ] if is_sanity_test: # run_cros_vm_test's default behavior when no cmd is specified is the sanity # test that's baked into the VM image. This test smoke-checks the system # browser, so deploy our locally-built chrome to the VM before testing. cros_run_vm_test_cmd += [ '--deploy', '--build-dir', os.path.relpath(args.path_to_outdir, CHROMIUM_SRC_PATH), ] else: pre_test_cmds = [ # /home is mounted with "noexec" in the VM, but some of our tools # and tests use the home dir as a workspace (eg: vpython downloads # python binaries to ~/.vpython-root). /tmp doesn't have this # restriction, so change the location of the home dir for the # duration of the test. 'export HOME=/tmp', '\;', ] if args.vpython_dir: vpython_spec_path = os.path.relpath( os.path.join(CHROMIUM_SRC_PATH, '.vpython'), args.path_to_outdir) pre_test_cmds += [ # Backslash is needed to prevent $PATH from getting prematurely # executed on the host. 'export PATH=\$PATH:\$PWD/%s' % args.vpython_dir, '\;', # Initialize the vpython cache. This can take 10-20s, and some tests # can't afford to wait that long on the first invocation. 'vpython', '-vpython-spec', vpython_spec_path, '-vpython-tool', 'install', '\;', ] cros_run_vm_test_cmd += [ # Some tests fail as root, so run as the less privileged user 'chronos'. '--as-chronos', '--cmd', '--', # Wrap the cmd to run in the VM around quotes (") so that the # interpreter on the host doesn't stop at any ";" or "&&" tokens in the # cmd. '"', ] + pre_test_cmds + [ './' + args.test_exe, '--test-launcher-shard-index=%d' % args.test_launcher_shard_index, '--test-launcher-total-shards=%d' % args.test_launcher_total_shards, ] + unknown_args + [ '"', ] if args.test_launcher_summary_output and not is_sanity_test: cros_run_vm_test_cmd += [ '--test-launcher-summary-output=%s' % vm_result_file, ] logging.info('Running the following command:') logging.info(' '.join(cros_run_vm_test_cmd)) # deploy_chrome needs a set of GN args used to build chrome to determine if # certain libraries need to be pushed to the VM. It looks for the args via an # env var. To trigger the default deploying behavior, give it a dummy set of # args. # TODO(crbug.com/823996): Make the GN-dependent deps controllable via cmd-line # args. env_copy = os.environ.copy() if not env_copy.get('GN_ARGS'): env_copy['GN_ARGS'] = 'is_chromeos = true' env_copy['PATH'] = env_copy['PATH'] + ':' + os.path.join( CHROMITE_PATH, 'bin') # Traps SIGTERM and kills all child processes of cros_run_vm_test when it's # caught. This will allow us to capture logs from the VM if a test hangs # and gets timeout-killed by swarming. See also: # https://chromium.googlesource.com/infra/luci/luci-py/+/master/appengine/swarming/doc/Bot.md#graceful-termination_aka-the-sigterm-and-sigkill-dance test_proc = None def _kill_child_procs(trapped_signal, _): logging.warning('Received signal %d. Killing child processes of test.', trapped_signal) if not test_proc or not test_proc.pid: # This shouldn't happen? logging.error('Test process not running.') return for child in psutil.Process(test_proc.pid).children(): logging.warning('Killing process %s', child) child.kill() # Standard GTests should handle retries and timeouts themselves. retries, timeout = 0, None if is_sanity_test: # 5 min should be enough time for the sanity test to pass. retries, timeout = 2, 300 signal.signal(signal.SIGTERM, _kill_child_procs) for i in xrange(retries + 1): logging.info('########################################') logging.info('Test attempt #%d', i) logging.info('########################################') test_proc = subprocess42.Popen(cros_run_vm_test_cmd, stdout=sys.stdout, stderr=sys.stderr, env=env_copy) try: test_proc.wait(timeout=timeout) except subprocess42.TimeoutExpired: logging.error('Test timed out. Sending SIGTERM.') # SIGTERM the proc and wait 10s for it to close. test_proc.terminate() try: test_proc.wait(timeout=10) except subprocess42.TimeoutExpired: # If it hasn't closed in 10s, SIGKILL it. logging.error('Test did not exit in time. Sending SIGKILL.') test_proc.kill() test_proc.wait() logging.info('Test exitted with %d.', test_proc.returncode) if test_proc.returncode == 0: break rc = test_proc.returncode # Create a simple json results file for the sanity test if needed. The results # will contain only one test ('cros_vm_sanity_test'), and will either be a # PASS or FAIL depending on the return code of cros_run_vm_test above. if args.test_launcher_summary_output and is_sanity_test: result = (base_test_result.ResultType.FAIL if rc else base_test_result.ResultType.PASS) sanity_test_result = base_test_result.BaseTestResult( 'cros_vm_sanity_test', result) run_results = base_test_result.TestRunResults() run_results.AddResult(sanity_test_result) with open(args.test_launcher_summary_output, 'w') as f: json.dump(json_results.GenerateResultsDict([run_results]), f) return rc