def Kill(self, sig, log_level, first=False): """Kill process with signal, ignoring if the process is dead. Args: sig: Signal to send. log_level: The log level of log messages. first: Whether this is the first signal we've sent. """ self._killing.set() self._WaitForStartup() if logging.getLogger().isEnabledFor(log_level): # Dump debug information about the hanging process. logging.log(log_level, 'Killing %r (sig=%r %s)', self.pid, sig, signals.StrSignal(sig)) if first: ppid = str(self.pid) output = self._DebugRunCommand( ('pgrep', '-P', ppid), debug_level=log_level, print_cmd=False, error_code_ok=True, capture_output=True) for pid in [ppid] + output.splitlines(): self._DumpDebugPid(log_level, pid) try: os.kill(self.pid, sig) except OSError as ex: if ex.errno != errno.ESRCH: raise
def Kill(self, sig, log_level, first=False): """Kill process with signal, ignoring if the process is dead. Args: sig: Signal to send. log_level: The log level of log messages. first: Whether this is the first signal we've sent. """ self._killing.set() self._WaitForStartup() if logging.getLogger().isEnabledFor(log_level): # Dump debug information about the hanging process. logging.log(log_level, 'Killing %r (sig=%r %s)', self.pid, sig, signals.StrSignal(sig)) if first: ppid = str(self.pid) output = self._DebugRunCommand( ('pgrep', '-P', ppid), debug_level=log_level, print_cmd=False, error_code_ok=True, capture_output=True) for pid in [ppid] + output.splitlines(): self._DumpDebugPid(log_level, pid) try: os.kill(self.pid, sig) except OSError as ex: if ex.errno != errno.ESRCH: raise
def CreateTunnel(self, to_local=None, to_remote=None, connect_settings=None): """Establishes a SSH tunnel to the remote device as a background process. Args: to_local: A list of PortForwardSpec objects to forward from the local machine to the remote machine. to_remote: A list of PortForwardSpec to forward from the remote machine to the local machine. connect_settings: The SSH connect settings to use. Returns: A Popen object. Note that it represents an already started background process. Calling poll() on the return value can be used to check that the tunnel is still running. To close the tunnel call terminate(). """ ssh_cmd = self._GetSSHCmd(connect_settings=connect_settings) if to_local is not None: ssh_cmd.extend( token for spec in to_local for token in ('-L', spec.command_line_spec)) if to_remote is not None: ssh_cmd.extend( token for spec in to_remote for token in ('-R', spec.command_line_spec)) ssh_cmd.append('-N') ssh_cmd.append(self.target_ssh_url) logging.log(self.debug_level, '%s', cros_build_lib.CmdToStr(ssh_cmd)) return RemoteAccess._mockable_popen(ssh_cmd)
def Log(*args, **kwargs): """Conditional logging. Args: silent: If set to True, then logs with level DEBUG. logs with level INFO otherwise. Defaults to False. """ silent = kwargs.pop('silent', False) level = logging.DEBUG if silent else logging.INFO logging.log(level, *args, **kwargs)
def Log(*args, **kwargs): """Conditional logging. Args: silent: If set to True, then logs with level DEBUG. logs with level INFO otherwise. Defaults to False. """ silent = kwargs.pop('silent', False) level = logging.DEBUG if silent else logging.INFO logging.log(level, *args, **kwargs)
def _DebugRunCommand(cls, cmd, **kwargs): """Swallow any exception RunCommand raises. Since these commands are for purely informational purposes, we don't random issues causing the bot to die. Returns: Stdout on success """ log_level = kwargs['debug_level'] try: with timeout_util.Timeout(cls.DEBUG_CMD_TIMEOUT): return cros_build_lib.RunCommand(cmd, **kwargs).output except (cros_build_lib.RunCommandError, timeout_util.TimeoutError) as e: logging.log(log_level, 'Running %s failed: %s', cmd[0], str(e)) return ''
def _DebugRunCommand(cls, cmd, **kwargs): """Swallow any exception RunCommand raises. Since these commands are for purely informational purposes, we don't random issues causing the bot to die. Returns: Stdout on success """ log_level = kwargs['debug_level'] try: with timeout_util.Timeout(cls.DEBUG_CMD_TIMEOUT): return cros_build_lib.RunCommand(cmd, **kwargs).output except (cros_build_lib.RunCommandError, timeout_util.TimeoutError) as e: logging.log(log_level, 'Running %s failed: %s', cmd[0], str(e)) return ''
def main(argv): parser = GetParser() opts = parser.parse_args(argv) if not opts.files: logging.warning('No files provided to lint. Doing nothing.') return 0 extensions = set(opts.extensions.split(',')) num_files = 0 for gypfile in FilterPaths(opts.files, extensions): results = CheckGypFile(gypfile) if results: logging.error('**** %s: found %i issue(s)', gypfile, len(results)) for result in results: logging.log(result.type, '%s: %s', result.linter, result.msg) num_files += 1 if num_files: logging.error('%i file(s) failed linting', num_files) return 1 if num_files else 0
def _KillChildren(cls, bg_tasks, log_level=logging.WARNING): """Kill a deque of background tasks. This is needed to prevent hangs in the case where child processes refuse to exit. Args: bg_tasks: A list filled with _BackgroundTask objects. log_level: The log level of log messages. """ logging.log(log_level, 'Killing tasks: %r', bg_tasks) siglist = ( (signal.SIGXCPU, cls.SIGTERM_TIMEOUT), (signal.SIGTERM, cls.SIGKILL_TIMEOUT), (signal.SIGKILL, None), ) first = True for sig, timeout in siglist: # Send signal to all tasks. for task in bg_tasks: task.Kill(sig, log_level, first) first = False # Wait for all tasks to exit, if requested. if timeout is None: for task in bg_tasks: task.join() task.Cleanup() break # Wait until timeout expires. end_time = time.time() + timeout while bg_tasks: time_left = end_time - time.time() if time_left <= 0: break task = bg_tasks[-1] task.join(time_left) if task.exitcode is not None: task.Cleanup() bg_tasks.pop()
def _KillChildren(cls, bg_tasks, log_level=logging.WARNING): """Kill a deque of background tasks. This is needed to prevent hangs in the case where child processes refuse to exit. Args: bg_tasks: A list filled with _BackgroundTask objects. log_level: The log level of log messages. """ logging.log(log_level, 'Killing tasks: %r', bg_tasks) siglist = ( (signal.SIGXCPU, cls.SIGTERM_TIMEOUT), (signal.SIGTERM, cls.SIGKILL_TIMEOUT), (signal.SIGKILL, None), ) first = True for sig, timeout in siglist: # Send signal to all tasks. for task in bg_tasks: task.Kill(sig, log_level, first) first = False # Wait for all tasks to exit, if requested. if timeout is None: for task in bg_tasks: task.join() task.Cleanup() break # Wait until timeout expires. end_time = time.time() + timeout while bg_tasks: time_left = end_time - time.time() if time_left <= 0: break task = bg_tasks[-1] task.join(time_left) if task.exitcode is not None: task.Cleanup() bg_tasks.pop()
def main(argv): parser = GetParser() opts = parser.parse_args(argv) if not opts.files: logging.warning('No files provided to lint. Doing nothing.') return 0 extensions = set(opts.extensions.split(',')) num_files = 0 for gnfile in FilterPaths(opts.files, extensions): logging.debug('Checking %s', gnfile) issues = CheckGnFile(gnfile) if issues: logging.error('**** %s: found %i issue(s)', gnfile, len(issues)) for issue in issues: logging.log(issue.type, '%s: %s', issue.linter, issue.msg) num_files += 1 if num_files: logging.error('%i file(s) failed linting', num_files) return 1 if num_files else 0