def testTimeout(self): """Tests that we can nest Timeout correctly.""" self.assertFalse('mock' in str(time.sleep).lower()) with timeout_util.Timeout(30): with timeout_util.Timeout(20): with timeout_util.Timeout(1): self.assertRaises(timeout_util.TimeoutError, time.sleep, 10) # Should not raise a timeout exception as 20 > 2. time.sleep(1)
def testTimeoutNested(self): """Tests that we still re-raise an alarm if both are reached.""" with timeout_util.Timeout(1): try: with timeout_util.Timeout(2): self.assertRaises(timeout_util.TimeoutError, time.sleep, 1) # Craziness to catch nested timeouts. except timeout_util.TimeoutError: pass else: self.fail('Should have thrown an exception')
def Upload(cls, stats, url=None, timeout=None): """Upload |stats| to |url|. Does nothing if upload conditions aren't met. Args: stats: A Stats object to upload. url: The url to send the request to. timeout: A timeout value to set, in seconds. """ if url is None: url = cls.URL if timeout is None: timeout = cls.UPLOAD_TIMEOUT if not cls._UploadConditionsMet(stats): return with timeout_util.Timeout(timeout): try: cls._Upload(stats, url) # Stats upload errors are silenced, for the sake of user experience. except timeout_util.TimeoutError: logging.debug(cls.TIMEOUT_ERROR, timeout) except urllib2.HTTPError as e: # HTTPError has a geturl() method, but it relies on self.url, which # is not always set. In looking at source, self.filename equals url. logging.debug(cls.HTTPURL_ERROR, e.filename, exc_info=True) except EnvironmentError: logging.debug(cls.ENVIRONMENT_ERROR, exc_info=True)
def PostForDeduplication(symbols, dedupe_namespace): """Send a symbol file to the swarming service Notify the isolate service of a successful upload. If the notification fails for any reason, we ignore it. We don't care as it just means we'll upload it again later on, and the symbol server will handle that graciously. Args: symbols: An iterable of SymbolFiles to be uploaded. dedupe_namespace: String id for the comparison namespace. Yields: Each symbol from symbols, unmodified. """ storage_query = OpenDeduplicateConnection(dedupe_namespace) for s in symbols: # If we can talk to isolate, and we uploaded this symbol, and we # queried for it's presence before, upload to isolate now. if storage_query and s.status == SymbolFile.UPLOADED and s.dedupe_item: s.dedupe_item.prepare(DedupeItem.ALGO) try: with timeout_util.Timeout(DEDUPE_NOTIFY_TIMEOUT): storage_query.push(s.dedupe_item, s.dedupe_push_state, s.dedupe_item.content()) logging.info('sent %s', s.display_name) except Exception: logging.warning('posting %s to dedupe server failed', os.path.basename(s.display_path), exc_info=True) storage_query = None yield s
def PerformStage(self): chrome_version = self.DetermineChromeVersion() logging.PrintBuildbotStepText('tag %s' % chrome_version) sync_chrome = os.path.join( self._orig_root, 'chromite', 'bin', 'sync_chrome') # Branched gclient can use git-cache incompatibly, so use a temp one. with osutils.TempDir(prefix='dummy') as git_cache: # --reset tells sync_chrome to blow away local changes and to feel # free to delete any directories that get in the way of syncing. This # is needed for unattended operation. # --ignore-locks tells sync_chrome to ignore git-cache locks. # --gclient is not specified here, sync_chrome will locate the one # on the $PATH. cmd = [sync_chrome, '--reset', '--ignore_locks', '--tag', chrome_version, '--git_cache_dir', git_cache] if constants.USE_CHROME_INTERNAL in self._run.config.useflags: cmd += ['--internal'] cmd += [self._run.options.chrome_root] with timeout_util.Timeout(self.SYNC_CHROME_TIMEOUT): retry_util.RunCommandWithRetries( constants.SYNC_RETRIES, cmd, cwd=self._build_root)
def UploadDummyArtifact(self, path, faft_hack=False): """Upload artifacts to the dummy build results.""" logging.info('UploadDummyArtifact: %s', path) with osutils.TempDir(prefix='dummy') as tempdir: artifact_path = os.path.join( tempdir, '%s/%s' % (self._current_board, os.path.basename(path))) logging.info('Rename: %s -> %s', path, artifact_path) os.mkdir(os.path.join(tempdir, self._current_board)) shutil.copyfile(path, artifact_path) logging.info('Main artifact from: %s', artifact_path) if faft_hack: # We put the firmware artifact in a directory named by board so that # immutable FAFT infrastructure can find it. We should remove this. self.UploadArtifact(artifact_path, archive=True, prefix=self._current_board) else: self.UploadArtifact(artifact_path, archive=True) gs_context = gs.GSContext(dry_run=self._run.options.debug_forced) for url in self.GetDummyArchiveUrls(): logging.info('Uploading dummy artifact to %s...', url) with timeout_util.Timeout(20 * 60): logging.info('Dummy artifact from: %s', path) gs_context.CopyInto(path, url, parallel=True, recursive=True)
def _KillProcsIfNeeded(self): if self._CheckUiJobStarted(): logging.info('Shutting down Chrome...') self.device.RunCommand('stop ui') # Developers sometimes run session_manager manually, in which case we'll # need to help shut the chrome processes down. try: with timeout_util.Timeout(self.options.process_timeout): while self._ChromeFileInUse(): logging.warning( 'The chrome binary on the device is in use.') logging.warning( 'Killing chrome and session_manager processes...\n') self.device.RunCommand("pkill 'chrome|session_manager'", error_code_ok=True) # Wait for processes to actually terminate time.sleep(POST_KILL_WAIT) logging.info('Rechecking the chrome binary...') except timeout_util.TimeoutError: msg = ( 'Could not kill processes after %s seconds. Please exit any ' 'running chrome processes and try again.' % self.options.process_timeout) raise DeployFailure(msg)
def _optional_timer_context(timeout): """Use the timeout_util.Timeout contextmanager if timeout is set.""" if timeout: with timeout_util.Timeout(timeout): yield else: yield
def PerformStage(self): # These directories are used later to archive test artifacts. test_results_root = commands.CreateTestRoot(self._build_root) test_basename = _GCE_TEST_RESULTS % dict(attempt=self._attempt) if self._test_basename: test_basename = self._test_basename try: if not self._gce_tests: self._gce_tests = self._run.config.gce_tests for gce_test in self._gce_tests: logging.info('Running GCE test %s.', gce_test.test_type) if gce_test.test_type == constants.GCE_SUITE_TEST_TYPE: per_test_results_dir = os.path.join( test_results_root, gce_test.test_suite) else: per_test_results_dir = os.path.join( test_results_root, gce_test.test_type) with cgroups.SimpleContainChildren('GCETest'): r = ' Reached GCETestStage test run timeout.' with timeout_util.Timeout(self.TEST_TIMEOUT, reason_message=r): self._RunTest(gce_test, per_test_results_dir) except Exception: # pylint: disable=logging-not-lazy logging.error( _ERROR_MSG % dict(test_name='GCETests', test_results=test_basename)) raise finally: self._ArchiveTestResults(test_results_root, test_basename)
def _KillAshChromeIfNeeded(self): """This method kills ash-chrome on the device, if it's running. This method calls 'stop ui', and then also manually pkills both ash-chrome and the session manager. """ if self._CheckUiJobStarted(): logging.info('Shutting down Chrome...') self.device.run('stop ui') # Developers sometimes run session_manager manually, in which case we'll # need to help shut the chrome processes down. try: with timeout_util.Timeout(self.options.process_timeout): while self._ChromeFileInUse(): logging.warning( 'The chrome binary on the device is in use.') logging.warning( 'Killing chrome and session_manager processes...\n') self.device.run("pkill 'chrome|session_manager'", check=False) # Wait for processes to actually terminate time.sleep(POST_KILL_WAIT) logging.info('Rechecking the chrome binary...') except timeout_util.TimeoutError: msg = ( 'Could not kill processes after %s seconds. Please exit any ' 'running chrome processes and try again.' % self.options.process_timeout) raise DeployFailure(msg)
def SymbolDeduplicatorNotify(dedupe_namespace, dedupe_queue): """Send a symbol file to the swarming service Notify the swarming service of a successful upload. If the notification fails for any reason, we ignore it. We don't care as it just means we'll upload it again later on, and the symbol server will handle that graciously. This func runs in a different process from the main one, so we cannot share the storage object. Instead, we create our own. This func stays alive for the life of the process, so we only create one here overall. Args: dedupe_namespace: The isolateserver namespace to dedupe uploaded symbols. dedupe_queue: The queue to read SymbolElements from """ if dedupe_queue is None: return sym_file = '' try: with timeout_util.Timeout(DEDUPE_TIMEOUT): storage = isolateserver.get_storage_api(constants.ISOLATESERVER, dedupe_namespace) for symbol_element in iter(dedupe_queue.get, None): if not symbol_element or not symbol_element.symbol_item: continue symbol_item = symbol_element.symbol_item push_state = symbol_element.opaque_push_state sym_file = symbol_item.sym_file if symbol_item.sym_file else '' if push_state is not None: with timeout_util.Timeout(DEDUPE_TIMEOUT): logging.debug('sending %s to dedupe server', sym_file) symbol_item.prepare(SymbolItem.ALGO) storage.push(symbol_item, push_state, symbol_item.content()) logging.debug('sent %s', sym_file) logging.info('dedupe notification finished; exiting') except Exception: logging.warning('posting %s to dedupe server failed', os.path.basename(sym_file), exc_info=True) # Keep draining the queue though so it doesn't fill up. while dedupe_queue.get() is not None: continue
def testSimple(self): """Test we can Wait/Post.""" # If this fails, we'd just hang :). with timeout_util.Timeout(30): lock = locking.PipeLock() lock.Post() lock.Post() lock.Wait() lock.Wait() del lock
def run(self, *args, **kwargs): """Run the requested test suite. If the test suite fails, raise a BackgroundFailure. """ with timeout_util.Timeout(constants.MAX_TIMEOUT_SECONDS): test_result = super(_LessBacktracingTestRunner, self).run(*args, **kwargs) if test_result is None or not test_result.wasSuccessful(): msg = 'Test harness failed. See logs for details.' raise parallel.BackgroundFailure(msg)
def set_signal_level(self, client_context, requested_sig_level, min_sig_level_allowed=-85, tolerance_percent=3, timeout=240): """Set wifi signal to desired level by changing attenuation. @param client_context: Client context object. @param requested_sig_level: Negative int value in dBm for wifi signal level to be set. @param min_sig_level_allowed: Minimum signal level allowed; this is to ensure that we don't set a signal that is too weak and DUT can not associate. @param tolerance_percent: Percentage to be used to calculate the desired range for the wifi signal level. """ atten_db = 0 starting_sig_level = client_context.wifi_signal_level if not starting_sig_level: raise error.TestError("No signal detected.") if not (min_sig_level_allowed <= requested_sig_level <= starting_sig_level): raise error.TestError("Requested signal level (%d) is either " "higher than current signal level (%r) with " "0db attenuation or lower than minimum " "signal level (%d) allowed." % (requested_sig_level, starting_sig_level, min_sig_level_allowed)) try: with timeout_util.Timeout(timeout): while True: client_context.reassociate(timeout_seconds=1) current_sig_level = client_context.wifi_signal_level logging.info("Current signal level %r", current_sig_level) if not current_sig_level: raise error.TestError("No signal detected.") if self.signal_in_range(requested_sig_level, current_sig_level, tolerance_percent): logging.info("Signal level set to %r.", current_sig_level) break if current_sig_level > requested_sig_level: self.set_variable_attenuation(atten_db) atten_db += 1 if current_sig_level < requested_sig_level: self.set_variable_attenuation(atten_db) atten_db -= 1 except (timeout_util.TimeoutError, error.TestError, error.TestFail) as e: raise error.TestError("Not able to set wifi signal to requested " "level. \n%s" % e)
def FindDuplicates(symbols, dedupe_namespace): """Mark symbol files we've already uploaded as duplicates. Using the swarming service, ask it to tell us which symbol files we've already uploaded in previous runs and/or by other bots. If the query fails for any reason, we'll just upload all symbols. This is fine as the symbol server will do the right thing and this phase is purely an optimization. Args: symbols: An iterable of SymbolFiles to be uploaded. dedupe_namespace: String id for the comparison namespace. Yields: All SymbolFiles from symbols, but duplicates have status updated to DUPLICATE. """ storage_query = OpenDeduplicateConnection(dedupe_namespace) # We query isolate in batches, to reduce the number of network queries. for batch in BatchGenerator(symbols, DEDUPE_LIMIT): query_results = None if storage_query: # Convert SymbolFiles into DedupeItems. items = [DedupeItem(x) for x in batch] for item in items: item.prepare(DedupeItem.ALGO) # Look for duplicates. try: with timeout_util.Timeout(DEDUPE_TIMEOUT): query_results = storage_query.contains(items) except Exception: logging.warning('talking to dedupe server failed', exc_info=True) storage_query = None if query_results is not None: for b in batch: b.status = SymbolFile.DUPLICATE # Only the non-duplicates appear in the query_results. for item, push_state in query_results.iteritems(): # Remember the dedupe state, so we can mark the symbol as uploaded # later on. item.symbol.status = SymbolFile.INITIAL item.symbol.dedupe_item = item item.symbol.dedupe_push_state = push_state # Yield all symbols we haven't shown to be duplicates. for b in batch: if b.status == SymbolFile.DUPLICATE: logging.debug('Found duplicate: %s', b.display_name) yield b
def PerformStage(self): # These directories are used later to archive test artifacts. if not self._run.options.vmtests: return test_results_root = commands.CreateTestRoot(self._build_root) test_basename = _VM_TEST_RESULTS % dict(attempt=self._attempt) if self._test_basename: test_basename = self._test_basename try: if not self._vm_tests: self._vm_tests = self._run.config.vm_tests failed_tests = [] for vm_test in self._vm_tests: logging.info('Running VM test %s.', vm_test.test_type) if vm_test.test_type == constants.VM_SUITE_TEST_TYPE: per_test_results_dir = os.path.join( test_results_root, vm_test.test_suite) else: per_test_results_dir = os.path.join( test_results_root, vm_test.test_type) try: with cgroups.SimpleContainChildren('VMTest'): r = ' Reached VMTestStage test run timeout.' with timeout_util.Timeout(vm_test.timeout, reason_message=r): self._RunTest(vm_test, per_test_results_dir) except Exception: failed_tests.append(vm_test) if vm_test.warn_only: logging.warning( 'Optional test failed. Forgiving the failure.') else: raise if failed_tests: # If any of the tests failed but not raise an exception, mark # the stage as warning. self._stage_exception_handler = self._HandleExceptionAsWarning raise failures_lib.TestWarning( 'VMTestStage succeeded, but some optional tests failed.') except Exception as e: if not isinstance(e, failures_lib.TestWarning): # pylint: disable=logging-not-lazy logging.error( _ERROR_MSG % dict(test_name='VMTests', test_results=test_basename)) self._ArchiveVMFiles(test_results_root) raise finally: if self._run.config.vm_test_report_to_dashboards: self._ReportResultsToDashboards(test_results_root) self._ArchiveTestResults(test_results_root, test_basename)
def testFractionTimeout(self): # Capture setitimer arguments. mock_setitimer = self.PatchObject(signal, 'setitimer', autospec=True, return_value=(0, 0)) with timeout_util.Timeout(0.5): pass # The timeout should be fraction, rather than rounding up to int seconds. self.assertEqual(mock_setitimer.call_args_list, [((signal.ITIMER_REAL, 0), ), ((signal.ITIMER_REAL, 0.5, 0), ), ((signal.ITIMER_REAL, 0), )])
def PerformStage(self): extra_env = {} if self._run.config.useflags: extra_env['USE'] = ' '.join(self._run.config.useflags) r = ' Reached UnitTestStage timeout.' with timeout_util.Timeout(self.UNIT_TEST_TIMEOUT, reason_message=r): commands.RunUnitTests(self._build_root, self._current_board, blacklist=self._run.config.unittest_blacklist, extra_env=extra_env) if os.path.exists(os.path.join(self.GetImageDirSymlink(), 'au-generator.zip')): commands.TestAuZip(self._build_root, self.GetImageDirSymlink())
def PerformStage(self): extra_env = {} if self._run.config.useflags: extra_env['USE'] = ' '.join(self._run.config.useflags) r = ' Reached UnitTestStage timeout.' with timeout_util.Timeout(self.UNIT_TEST_TIMEOUT, reason_message=r): commands.RunUnitTests( self._build_root, self._current_board, blacklist=self._run.config.unittest_blacklist, extra_env=extra_env, build_stage=self._run.config.build_packages) # Package UnitTest binaries. tarball = commands.BuildUnitTestTarball( self._build_root, self._current_board, self.archive_path) self.UploadArtifact(tarball, archive=False)
def PerformStage(self): # These directories are used later to archive test artifacts. test_results_dir = commands.CreateTestRoot(self._build_root) test_basename = constants.GCE_TEST_RESULTS % dict(attempt=self._attempt) try: logging.info('Running GCE tests...') with cgroups.SimpleContainChildren('GCETest'): r = ' Reached GCETestStage test run timeout.' with timeout_util.Timeout(self.TEST_TIMEOUT, reason_message=r): self._RunTest(constants.GCE_VM_TEST_TYPE, test_results_dir) except Exception: logging.error(_GCE_TEST_ERROR_MSG % dict(gce_test_results=test_basename)) raise finally: self._ArchiveTestResults(test_results_dir, test_basename)
def _DebugRunCommand(cls, cmd, **kwargs): """Swallow any exception RunCommand raises. Since these commands are for purely informational purposes, we don't random issues causing the bot to die. Returns: Stdout on success """ log_level = kwargs['debug_level'] try: with timeout_util.Timeout(cls.DEBUG_CMD_TIMEOUT): return cros_build_lib.RunCommand(cmd, **kwargs).output except (cros_build_lib.RunCommandError, timeout_util.TimeoutError) as e: logging.log(log_level, 'Running %s failed: %s', cmd[0], str(e)) return ''
def GeneratePayload(payload, log_file): """Returns the error code from generating an update with the devserver.""" # Base command. command = ['start_devserver', '--pregenerate_update', '--exit'] in_chroot_key = in_chroot_base = None in_chroot_target = path_util.ToChrootPath(payload.target) if payload.base: in_chroot_base = path_util.ToChrootPath(payload.base) if payload.key: in_chroot_key = path_util.ToChrootPath(payload.key) command.append('--image=%s' % in_chroot_target) if payload.base: command.append('--src_image=%s' % in_chroot_base) if payload.key: command.append('--private_key=%s' % in_chroot_key) if payload.base: debug_message = 'delta payload from %s to %s' % ( payload.base, payload.target) else: debug_message = 'full payload to %s' % payload.target if payload.for_vm: debug_message += ' and not patching the kernel.' if in_chroot_key: debug_message = 'Generating a signed %s' % debug_message else: debug_message = 'Generating an unsigned %s' % debug_message logging.info(debug_message) try: with timeout_util.Timeout(constants.MAX_TIMEOUT_SECONDS): cros_build_lib.SudoRunCommand(command, log_stdout_to_file=log_file, combine_stdout_stderr=True, enter_chroot=True, print_cmd=False, cwd=constants.SOURCE_ROOT) except (timeout_util.TimeoutError, cros_build_lib.RunCommandError): # Print output first, then re-raise the exception. if os.path.isfile(log_file): logging.error(osutils.ReadFile(log_file)) raise
def run(self, call, **dargs): if retry_util is None: raise ImportError('Unable to import chromite. Please consider to ' 'run build_externals to build site packages.') # exc_retry: We retry if this exception is raised. # blacklist: Exceptions that we raise immediately if caught. exc_retry = Exception blacklist = (ImportError, error.RPCException, proxy.JSONRPCException, timeout_util.TimeoutError) backoff = 2 max_retry = convert_timeout_to_retry(backoff, self.timeout_min, self.delay_sec) def _run(self, call, **dargs): return super(RetryingAFE, self).run(call, **dargs) def handler(exc): """Check if exc is an exc_retry or if it's blacklisted. @param exc: An exception. @return: True if exc is an exc_retry and is not blacklisted. False otherwise. """ is_exc_to_check = isinstance(exc, exc_retry) is_blacklisted = isinstance(exc, blacklist) return is_exc_to_check and not is_blacklisted # If the call is not in main thread, signal can't be used to abort the # call. In that case, use a basic retry which does not enforce timeout # if the process hangs. @retry.retry(Exception, timeout_min=self.timeout_min, delay_sec=self.delay_sec, blacklist=[ImportError, error.RPCException, proxy.ValidationError]) def _run_in_child_thread(self, call, **dargs): return super(RetryingAFE, self).run(call, **dargs) if isinstance(threading.current_thread(), threading._MainThread): # Set the keyword argument for GenericRetry dargs['sleep'] = self.delay_sec dargs['backoff_factor'] = backoff with timeout_util.Timeout(self.timeout_min * 60): return retry_util.GenericRetry(handler, max_retry, _run, self, call, **dargs) else: return _run_in_child_thread(self, call, **dargs)
def OpenDeduplicateConnection(dedupe_namespace): """Open a connection to the isolate server for Dedupe use. Args: dedupe_namespace: String id for the comparison namespace. Returns: Connection proxy, or None on failure. """ try: with timeout_util.Timeout(DEDUPE_TIMEOUT): return isolateserver.get_storage_api(constants.ISOLATESERVER, dedupe_namespace) except Exception: logging.warning('initializing isolate server connection failed', exc_info=True) return None
def PerformStage(self): test_results_dir = commands.CreateTestRoot(self._build_root) # CreateTestRoot returns a temp directory inside chroot. # We bring that back out to the build root. test_results_dir = os.path.join(self._build_root, test_results_dir[1:]) test_results_dir = os.path.join(test_results_dir, 'image_test_results') osutils.SafeMakedirs(test_results_dir) try: with timeout_util.Timeout(self.IMAGE_TEST_TIMEOUT): commands.RunTestImage( self._build_root, self._current_board, self.GetImageDirSymlink(), test_results_dir, ) finally: self.SendPerfValues(test_results_dir)
def PerformStage(self): extra_env = {} if self._run.config.useflags: extra_env['USE'] = ' '.join(self._run.config.useflags) r = ' Reached UnitTestStage timeout.' with timeout_util.Timeout(self.UNIT_TEST_TIMEOUT, reason_message=r): try: commands.RunUnitTests( self._build_root, self._current_board, blacklist=self._run.config.unittest_blacklist, build_stage=self._run.config.build_packages, chroot_args=ChrootArgs(self._run.options), extra_env=extra_env) except failures_lib.BuildScriptFailure: logging.PrintBuildbotStepWarnings() logging.warning('Unittests failed. Ignored crbug.com/936123.')
def PerformStage(self): # These directories are used later to archive test artifacts. test_results_dir = commands.CreateTestRoot(self._build_root) test_basename = constants.VM_TEST_RESULTS % dict(attempt=self._attempt) try: for vm_test in self._run.config.vm_tests: logging.info('Running VM test %s.', vm_test.test_type) with cgroups.SimpleContainChildren('VMTest'): r = ' Reached VMTestStage test run timeout.' with timeout_util.Timeout(vm_test.timeout, reason_message=r): self._RunTest(vm_test.test_type, test_results_dir) except Exception: logging.error(_VM_TEST_ERROR_MSG % dict(vm_test_results=test_basename)) self._ArchiveVMFiles(test_results_dir) raise finally: self._ArchiveTestResults(test_results_dir, test_basename)
def _PerformStage(self, workdir, results_dir): """Actually performs this stage. Args: workdir: The workspace directory to use for all temporary files. results_dir: The directory to use to drop test results into. """ dut_target_image = self._SubDutTargetImage() osutils.SafeMakedirsNonRoot(self._Workspace(workdir)) vms = moblab_vm.MoblabVm(self._Workspace(workdir)) try: r = ' reached %s test run timeout.' % self with timeout_util.Timeout(self._PERFORM_TIMEOUT_S, reason_message=r): start_time = datetime.datetime.now() vms.Create(self.GetImageDirSymlink(), self.GetImageDirSymlink()) payload_dir = self._GenerateTestArtifactsInMoblabDisk(vms) vms.Start() elapsed = (datetime.datetime.now() - start_time).total_seconds() RunMoblabTests( moblab_board=self._current_board, moblab_ip=vms.moblab_ssh_port, dut_target_image=dut_target_image, results_dir=results_dir, local_image_cache=payload_dir, timeout_m=(self._PERFORM_TIMEOUT_S - elapsed) // 60, ) vms.Stop() ValidateMoblabTestSuccess(results_dir) except: # Ignore errors while arhiving images, but re-raise the original error. try: vms.Stop() self._ArchiveMoblabVMWorkspace(self._Workspace(workdir)) except Exception as e: logging.error( 'Failed to archive VM images after test failure: %s', e) raise finally: vms.Destroy()
def main(argv): options = ParseArguments(argv) # Use process group id as the unique id in track and log files, since # os.setsid is executed before the current process is run. pid = os.getpid() pgid = os.getpgid(pid) # Setting log files for CrOS auto-update process. # Log file: file to record every details of CrOS auto-update process. log_file = cros_update_progress.GetExecuteLogFile(options.host_name, pgid) logging.info('Writing executing logs into file: %s', log_file) logConfig.SetFileHandler(log_file) # Create a progress_tracker for tracking CrOS auto-update progress. progress_tracker = cros_update_progress.AUProgress(options.host_name, pgid) # Create a dir for temporarily storing devserver codes and logs. au_tempdir = cros_update_progress.GetAUTempDirectory(options.host_name, pgid) # Create cros_update instance to run CrOS auto-update. cros_updater_trigger = CrOSUpdateTrigger( options.host_name, options.build_name, options.static_dir, progress_tracker=progress_tracker, log_file=log_file, au_tempdir=au_tempdir, force_update=options.force_update, full_update=options.full_update, original_build=options.original_build, payload_filename=options.payload_filename, clobber_stateful=options.clobber_stateful, quick_provision=options.quick_provision, devserver_url=options.devserver_url, static_url=options.static_url) # Set timeout the cros-update process. try: with timeout_util.Timeout(CROS_UPDATE_TIMEOUT_MIN * 60): cros_updater_trigger.TriggerAU() except timeout_util.TimeoutError as e: error_msg = ('%s. The CrOS auto-update process is timed out, thus will be ' 'terminated' % str(e)) progress_tracker.WriteStatus(CROS_ERROR_TEMPLATE % error_msg)
def PerformStage(self): extra_env = {} if self._run.config.useflags: extra_env['USE'] = ' '.join(self._run.config.useflags) r = ' Reached UnitTestStage timeout.' with timeout_util.Timeout(self.UNIT_TEST_TIMEOUT, reason_message=r): commands.RunUnitTests(self._build_root, self._current_board, blacklist=self._run.config.unittest_blacklist, extra_env=extra_env) # Package UnitTest binaries. tarball = commands.BuildUnitTestTarball( self._build_root, self._current_board, self.archive_path) self.UploadArtifact(tarball, archive=False) if os.path.exists(os.path.join(self.GetImageDirSymlink(), 'au-generator.zip')): commands.TestAuZip(self._build_root, self.GetImageDirSymlink())