def ReportMetrics(self, wait_for_report=False): """Reports the collected metrics using a separate async process.""" if not self._metrics: return temp_metrics_file = tempfile.NamedTemporaryFile(delete=False) with temp_metrics_file: pickle.dump(self._metrics, temp_metrics_file) self._metrics = [] # TODO(user): make this not depend on the file. reporting_script_path = os.path.join(os.path.dirname(__file__), 'metrics_reporter.py') execution_args = execution_utils.ArgsForPythonTool( reporting_script_path, temp_metrics_file.name) exec_env = os.environ.copy() exec_env['PYTHONPATH'] = os.pathsep.join(sys.path) try: p = subprocess.Popen(execution_args, env=exec_env, **self._async_popen_args) log.debug('Metrics reporting process started...') except OSError: # This can happen specifically if the Python executable moves between the # start of this process and now. log.debug('Metrics reporting process failed to start.') if wait_for_report: # NOTE: p.wait() can cause a deadlock. p.communicate() is recommended. # See python docs for more information. p.communicate() log.debug('Metrics reporting process finished.')
def RunOpenSSL(self, cmd_args, cmd_input=None): """Run an openssl command with optional input and return the output.""" command = [self.openssl_executable] command.extend(cmd_args) try: p = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) output, stderr = p.communicate(cmd_input) log.debug('Ran command "{0}" with standard error of:\n{1}'.format( ' '.join(command), stderr)) except OSError as e: # This should be rare. Generally, OSError will show up when openssl # doesn't exist or can't be executed. However, in the code, we use # "FindExecutableOnPath" which already checks for these things. raise OpenSSLException('[{0}] exited with [{1}].'.format( command[0], e.strerror)) if p.returncode: # This will happen whenever there is an openssl failure (e.g. a failure # to decrypt a message with the given key). raise OpenSSLException( '[{0}] exited with return code [{1}]:\n{2}.'.format( command[0], p.returncode, stderr)) return output
def _GetProcess(docker_args, stdin_file, stdout_file, stderr_file): # Wraps the construction of a docker subprocess object with the specified # arguments and I/O files. return subprocess.Popen(['docker'] + docker_args, stdin=stdin_file, stdout=stdout_file, stderr=stderr_file)
def _RunExecutable(cmd_args, strict_error_checking=True): """Run the given command, handling errors appropriately. Args: cmd_args: list of str, the arguments (including executable path) to run strict_error_checking: bool, whether a non-zero, non-255 exit code should be considered a failure. Returns: int, the return code of the command Raises: SshLikeCmdFailed: if the command failed (based on the command exit code and the strict_error_checking flag) """ outfile = SSH_OUTPUT_FILE or os.devnull with open(outfile, 'w') as output_file: if log.IsUserOutputEnabled() and not SSH_OUTPUT_FILE: stdout, stderr = None, None else: stdout, stderr = output_file, output_file if (platforms.OperatingSystem.IsWindows() and not cmd_args[0].endswith('winkeygen.exe')): # TODO(user): b/25126583 will drop StrictHostKeyChecking=no and 'y'. # PuTTY and friends always prompt on fingerprint mismatch. A 'y' response # adds/updates the fingerprint registry entry and proceeds. The prompt # will appear once for each new/changed host. Redirecting stdin is not a # problem. Even interactive ssh is not a problem because a separate PuTTY # term is used and it ignores the calling process stdin. stdin = subprocess.PIPE else: stdin = None try: proc = subprocess.Popen(cmd_args, stdin=stdin, stdout=stdout, stderr=stderr) if stdin == subprocess.PIPE: # Max one prompt per host and there can't be more hosts than args. proc.communicate('y\n' * len(cmd_args)) returncode = proc.wait() except OSError as e: raise SshLikeCmdFailed(cmd_args[0], message=e.strerror) if ((returncode and strict_error_checking) or returncode == _SSH_ERROR_EXIT_CODE): raise SshLikeCmdFailed(cmd_args[0], return_code=returncode) return returncode
def More(contents, out=None, prompt=None, check_pager=True): """Run a user specified pager or fall back to the internal pager. Args: contents: The entire contents of the text lines to page. out: The output stream, log.out (effectively) if None. prompt: The page break prompt. check_pager: Checks the PAGER env var and uses it if True. """ if not IsInteractive(output=True): if not out: out = log.out out.write(contents) return if not out: # Rendered help to the log file. log.file_only_logger.info(contents) # Paging shenanigans to stdout. out = sys.stdout if check_pager: pager = os.environ.get('PAGER', None) if pager == '-': # Use the fallback Pager. pager = None elif not pager: # Search for a pager that handles ANSI escapes. for command in ('less', 'pager'): if files.FindExecutableOnPath(command): pager = command break if pager: less = os.environ.get('LESS', None) if less is None: os.environ['LESS'] = '-R' p = subprocess.Popen(pager, stdin=subprocess.PIPE, shell=True) encoding = console_attr.GetConsoleAttr().GetEncoding() p.communicate(input=contents.encode(encoding)) p.wait() if less is None: os.environ.pop('LESS') return # Fall back to the internal pager. console_pager.Pager(contents, out, prompt).Run()
def _GenerateKeyNoPassphraseOnWindows(keygen_args): """Generate a passphrase-less key on Windows. Windows ssh-keygen does not support arguments for the '-P' flag, so we communicate with it to have no passphrase. Args: keygen_args: list of str, the arguments (including path to ssh-keygen executable) for the ssh-keygen command. Raises: SshLikeCmdFailed: if the ssh-keygen process fails. """ err_msg = ('SSH Key Generation failed. Please run this command again in ' 'interactive mode.') keygen_process = subprocess.Popen(keygen_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE) keygen_output = '' for prompt_keywords in [('enter', 'passphrase'), ('enter', 'passphrase', 'again')]: chunk = '' while not chunk.endswith(': '): char = keygen_process.stdout.read(1) chunk += char if not char: # Process terminated break keygen_output += chunk if not all([keyword in chunk.lower() for keyword in prompt_keywords]): # If we don't get the output we're expecting, we don't know how to # generate keys. log.error(err_msg) raise SshLikeCmdFailed(keygen_args[0], message=keygen_output) keygen_process.stdin.write('\n') # empty passphrase chunk, _ = keygen_process.communicate() # stderr is not piped keygen_output += chunk if keygen_process.returncode != 0: log.error(err_msg) raise SshLikeCmdFailed(keygen_args[0], message=keygen_output, return_code=keygen_process.returncode)
def Exec(args): """Starts subprocess with given args and ensures its termination upon exit. This starts a subprocess with the given args. The stdout and stderr of the subprocess are piped. The subprocess is terminated upon exit. Args: args: [str], The arguments to execute. The first argument is the command. Returns: process, The process handle of the subprocess that has been started. """ process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) def Terminate(): if process.poll() is None: process.terminate() process.wait() atexit.register(Terminate) return process
def _SubprocessSucceeds(cmd): p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE) return p.wait() == 0
def _RunSubprocess(cmd): p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE) if p.wait() != 0: raise RubyConfigError('Unable to run script: [{0}]'.format(cmd)) return p.stdout.read()
def KillSubprocess(p): """Kills a subprocess using an OS specific method when python can't do it. This also kills all processes rooted in this process. Args: p: the Popen or multiprocessing.Process object to kill Raises: RuntimeError: if it fails to kill the process """ # This allows us to kill a Popen object or a multiprocessing.Process object code = None if hasattr(p, 'returncode'): code = p.returncode elif hasattr(p, 'exitcode'): code = p.exitcode if code is not None: # already dead return if platforms.OperatingSystem.Current() == platforms.OperatingSystem.WINDOWS: # Consume stdout so it doesn't show in the shell taskkill_process = subprocess.Popen( ['taskkill', '/F', '/T', '/PID', str(p.pid)], stdout=subprocess.PIPE, stderr=subprocess.PIPE) (stdout, stderr) = taskkill_process.communicate() if taskkill_process.returncode != 0 and _IsTaskKillError(stderr): # Sometimes taskkill does things in the wrong order and the processes # disappear before it gets a chance to kill it. This is exposed as an # error even though it's the outcome we want. raise RuntimeError( 'Failed to call taskkill on pid {0}\nstdout: {1}\nstderr: {2}' .format(p.pid, stdout, stderr)) else: # Create a mapping of ppid to pid for all processes, then kill all # subprocesses from the main process down get_pids_process = subprocess.Popen(['ps', '-e', '-o', 'ppid=', '-o', 'pid='], stdout=subprocess.PIPE, stderr=subprocess.PIPE) (stdout, stderr) = get_pids_process.communicate() if get_pids_process.returncode != 0: raise RuntimeError('Failed to get subprocesses of process: {0}' .format(p.pid)) # Create the process map pid_map = {} for line in stdout.strip().split('\n'): (ppid, pid) = re.match(r'\s*(\d+)\s+(\d+)', line).groups() ppid = int(ppid) pid = int(pid) children = pid_map.get(ppid) if not children: pid_map[ppid] = [pid] else: children.append(pid) # Expand all descendants of the main process all_pids = [p.pid] to_process = [p.pid] while to_process: current = to_process.pop() children = pid_map.get(current) if children: to_process.extend(children) all_pids.extend(children) # Kill all the subprocesses we found for pid in all_pids: _KillPID(pid)
def Exec(args, env=None, no_exit=False, pipe_output_through_logger=False, file_only_logger=False): """Emulates the os.exec* set of commands, but uses subprocess. This executes the given command, waits for it to finish, and then exits this process with the exit code of the child process. Args: args: [str], The arguments to execute. The first argument is the command. env: {str: str}, An optional environment for the child process. no_exit: bool, True to just return the exit code of the child instead of exiting. pipe_output_through_logger: bool, True to feed output from the called command through the standard logger instead of raw stdout/stderr. file_only_logger: bool, If piping through the logger, log to the file only instead of log.out and log.err. Returns: int, The exit code of the child if no_exit is True, else this method does not return. """ log.debug('Executing command: %s', args) # We use subprocess instead of execv because windows does not support process # replacement. The result of execv on windows is that a new processes is # started and the original is killed. When running in a shell, the prompt # returns as soon as the parent is killed even though the child is still # running. subprocess waits for the new process to finish before returning. env = _GetToolEnv(env=env) process_holder = _ProcessHolder() old_handler = signal.signal(signal.SIGTERM, process_holder.Handler) try: extra_popen_kwargs = {} if pipe_output_through_logger: extra_popen_kwargs['stderr'] = subprocess.PIPE extra_popen_kwargs['stdout'] = subprocess.PIPE p = subprocess.Popen(args, env=env, **extra_popen_kwargs) process_holder.process = p if pipe_output_through_logger: if file_only_logger: out = cStringIO.StringIO() err = cStringIO.StringIO() else: out = log.out err = log.err ret_val = None while ret_val is None: stdout, stderr = p.communicate() out.write(stdout) err.write(stderr) ret_val = p.returncode if file_only_logger: log.file_only_logger.debug(out.getvalue()) log.file_only_logger.debug(err.getvalue()) else: ret_val = p.wait() finally: # Restore the original signal handler. signal.signal(signal.SIGTERM, old_handler) if no_exit: return ret_val sys.exit(ret_val)
def Exec(args, env=None, no_exit=False, out_func=None, err_func=None, **extra_popen_kwargs): """Emulates the os.exec* set of commands, but uses subprocess. This executes the given command, waits for it to finish, and then exits this process with the exit code of the child process. Args: args: [str], The arguments to execute. The first argument is the command. env: {str: str}, An optional environment for the child process. no_exit: bool, True to just return the exit code of the child instead of exiting. out_func: str->None, a function to call with the stdout of the executed process. This can be e.g. log.file_only_logger.debug or log.out.write. err_func: str->None, a function to call with the stderr of the executed process. This can be e.g. log.file_only_logger.debug or log.err.write. **extra_popen_kwargs: Any additional kwargs will be passed through directly to subprocess.Popen Returns: int, The exit code of the child if no_exit is True, else this method does not return. Raises: PermissionError: if user does not have execute permission for cloud sdk bin files. InvalidCommandError: if the command entered cannot be found. """ log.debug('Executing command: %s', args) # We use subprocess instead of execv because windows does not support process # replacement. The result of execv on windows is that a new processes is # started and the original is killed. When running in a shell, the prompt # returns as soon as the parent is killed even though the child is still # running. subprocess waits for the new process to finish before returning. env = _GetToolEnv(env=env) process_holder = _ProcessHolder() with _ReplaceSignal(signal.SIGTERM, process_holder.Handler): if out_func: extra_popen_kwargs['stdout'] = subprocess.PIPE if err_func: extra_popen_kwargs['stderr'] = subprocess.PIPE try: p = subprocess.Popen(args, env=env, **extra_popen_kwargs) except OSError as err: if err.errno == errno.EACCES: raise PermissionError(err.strerror) elif err.errno == errno.ENOENT: raise InvalidCommandError(args[0]) raise process_holder.process = p stdout, stderr = p.communicate() if out_func: out_func(stdout) if err_func: err_func(stderr) ret_val = p.returncode if no_exit: return ret_val sys.exit(ret_val)
def Run(self, args): """Run the helper command.""" if args.method not in GitHelper.METHODS: if args.ignore_unknown: return raise c_exc.ToolException( 'Unexpected method [{meth}]. One of [{methods}] expected.'. format(meth=args.method, methods=', '.join(GitHelper.METHODS))) info = self._ParseInput() credentialed_domains = [ 'code.google.com', 'source.developers.google.com' ] extra = properties.VALUES.core.credentialed_hosted_repo_domains.Get() if extra: credentialed_domains.extend(extra.split(',')) if info.get('host') not in credentialed_domains: if args.ignore_unknown: return raise c_exc.ToolException( 'Unknown host [{host}].'.format(host=info.get('host'))) if args.method == GitHelper.GET: account = properties.VALUES.core.account.Get() try: cred = c_store.Load(account) c_store.Refresh(cred) c_store.Store(cred, account) except c_store.Error as e: sys.stderr.write( textwrap.dedent("""\ ERROR: {error} Run 'gcloud auth login' to log in. """.format(error=str(e)))) return self._CheckNetrc() sys.stdout.write( textwrap.dedent("""\ username={username} password={password} """).format(username=account, password=cred.access_token)) elif args.method == GitHelper.STORE: # On OSX, there is an additional credential helper that gets called before # ours does. When we return a token, it gets cached there. Git continues # to get it from there first until it expires. That command then fails, # and the token is deleted, but it does not retry the operation. The next # command gets a new token from us and it starts working again, for an # hour. This erases our credential from the other cache whenever 'store' # is called on us. Because they are called first, the token will already # be stored there, and so we can successfully erase it to prevent caching. if (platforms.OperatingSystem.Current() == platforms.OperatingSystem.MACOSX): log.debug('Clearing OSX credential cache.') try: input_string = 'protocol={protocol}\nhost={host}\n\n'.format( protocol=info.get('protocol'), host=info.get('host')) log.debug('Calling erase with input:\n%s', input_string) p = subprocess.Popen( ['git-credential-osxkeychain', 'erase'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (out, err) = p.communicate(input_string) if p.returncode: log.debug( 'Failed to clear OSX keychain:\nstdout: {%s}\nstderr: {%s}', out, err) # pylint:disable=broad-except, This can fail and should only be done as # best effort. except Exception as e: log.debug('Failed to clear OSX keychain', exc_info=True)