def _clone(self): log.info("Cloning {}", self.clone_url) # git clone args = ['git', 'clone', '-q', self.clone_url, str(self.directory)] process = yield from asyncio.create_subprocess_exec( *args, stderr=asyncio.subprocess.PIPE) _, stderr = yield from process.communicate() return_code = yield from process.wait() if return_code != 0: print(stderr, file=sys.stderr) raise GitError("git clone returned %d" % return_code) # git checkout if self.checkout is not None: args = ['git', '-C', str(self.directory), 'checkout', '-q', self.checkout] process = yield from asyncio.create_subprocess_exec( *args, stderr=asyncio.subprocess.PIPE) _, stderr = yield from process.communicate() return_code = yield from process.wait() if return_code != 0: print(stderr, file=sys.stderr) raise GitError("git checkout returned %d" % return_code) # git log args = ['git', '-C', str(self.directory), 'log', '-n', '1', '--pretty=format:%h %H'] process = yield from asyncio.create_subprocess_exec( *args, stderr=asyncio.subprocess.PIPE, stdout=asyncio.subprocess.PIPE) output, _ = yield from process.communicate() self.shorthash, self.hash = output.decode().strip().split() log.info("Checked out {} from {}", self.shorthash, self.clone_url)
def i3status_reader(self): def handle_i3status_payload(line): self.update(json.loads(line)) self.repaint() if configure_i3_status: # use a custom i3 status configuration to ensure we get json output cfg_file = tempfile.NamedTemporaryFile(mode='w+b') cfg_file.write(I3STATUS_CFG.encode('utf8')) cfg_file.flush() create = asyncio.create_subprocess_exec('i3status', '-c', cfg_file.name, stdout=asyncio.subprocess.PIPE) else: create = asyncio.create_subprocess_exec('i3status', stdout=asyncio.subprocess.PIPE) i3status = yield from create # forward first line, version information sys.stdout.write((yield from i3status.stdout.readline()).decode('utf8')) # forward second line, an opening list bracket (no idea why this # exists) sys.stdout.write((yield from i3status.stdout.readline()).decode('utf8')) # third line is a json payload handle_i3status_payload((yield from i3status.stdout.readline()).decode('utf8')) while True: # all subsequent lines are json payload with a leading comma handle_i3status_payload((yield from i3status.stdout.readline()).decode('utf8')[1:])
def _start_vnc(self): """ Start a VNC server for this container """ self._display = self._get_free_display_port() if shutil.which("Xvfb") is None or shutil.which("x11vnc") is None: raise DockerError("Please install Xvfb and x11vnc before using the VNC support") self._xvfb_process = yield from asyncio.create_subprocess_exec("Xvfb", "-nolisten", "tcp", ":{}".format(self._display), "-screen", "0", self._console_resolution + "x16") # We pass a port for TCPV6 due to a crash in X11VNC if not here: https://github.com/GNS3/gns3-server/issues/569 self._x11vnc_process = yield from asyncio.create_subprocess_exec("x11vnc", "-forever", "-nopw", "-shared", "-geometry", self._console_resolution, "-display", "WAIT:{}".format(self._display), "-rfbport", str(self.console), "-rfbportv6", str(self.console), "-noncache", "-listen", self._manager.port_manager.console_host) x11_socket = os.path.join("/tmp/.X11-unix/", "X{}".format(self._display)) yield from wait_for_file_creation(x11_socket)
def store_problems(self, problems: Set[Problem]) -> None: with tempfile.NamedTemporaryFile() as problem_file: result = sorted([problem.to_json() for problem in problems], key=lambda x: str(x)) problem_file.write(json.dumps(result).encode()) problem_file.flush() notes_proc = yield from asyncio.create_subprocess_exec( 'git', 'notes', 'append', '-F', problem_file.name, **GIT_SUBPROCESS_KWARGS) yield from notes_proc.wait() if self.remote: push_proc = yield from asyncio.create_subprocess_exec( 'git', 'push', '-f', '-q', self.remote, NOTES_REF, **GIT_SUBPROCESS_KWARGS) yield from push_proc.wait()
def test_pause_reading(): code = '\n'.join(( 'import sys', 'sys.stdout.write("x" * %s)' % size, 'sys.stdout.flush()', )) connect_read_pipe = self.loop.connect_read_pipe @asyncio.coroutine def connect_read_pipe_mock(*args, **kw): transport, protocol = yield from connect_read_pipe(*args, **kw) transport.pause_reading = mock.Mock() transport.resume_reading = mock.Mock() return (transport, protocol) self.loop.connect_read_pipe = connect_read_pipe_mock proc = yield from asyncio.create_subprocess_exec( sys.executable, '-c', code, stdin=asyncio.subprocess.PIPE, stdout=asyncio.subprocess.PIPE, limit=limit, loop=self.loop) stdout_transport = proc._transport.get_pipe_transport(1) stdout, stderr = yield from proc.communicate() # The child process produced more than limit bytes of output, # the stream reader transport should pause the protocol to not # allocate too much memory. return (stdout, stdout_transport)
def _execute(self, subcommand, args, timeout=120, log_level=logging.INFO): if self.host_type is None: yield from self.check_vmware_version() vmrun_path = self.vmrun_path if not vmrun_path: vmrun_path = self.find_vmrun() command = [vmrun_path, "-T", self.host_type, subcommand] command.extend(args) command_string = " ".join([shlex.quote(c) for c in command]) log.log(log_level, "Executing vmrun with command: {}".format(command_string)) try: process = yield from asyncio.create_subprocess_exec(*command, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE) except (OSError, subprocess.SubprocessError) as e: raise VMwareError("Could not execute vmrun: {}".format(e)) try: stdout_data, _ = yield from asyncio.wait_for(process.communicate(), timeout=timeout) except asyncio.TimeoutError: raise VMwareError("vmrun has timed out after {} seconds!\nTry to run {} in a terminal to see more informations.\n\nMake sure GNS3 and VMware run under the same user and whitelist vmrun.exe in your antivirus.".format(timeout, command_string)) if process.returncode: # vmrun print errors on stdout vmrun_error = stdout_data.decode("utf-8", errors="ignore") raise VMwareError("vmrun has returned an error: {}\nTry to run {} in a terminal to see more informations.\nAnd make sure GNS3 and VMware run under the same user.".format(vmrun_error, command_string)) return stdout_data.decode("utf-8", errors="ignore").splitlines()
def expect_ok(cmd, desc="", env=None, stdout=None, stderr="log_on_error", cwd=None): if env is None: sub_env = None else: # Only partially override the existing environment sub_env = os.environ.copy() sub_env.update(env) p = yield from asyncio.create_subprocess_exec( *cmd, stdin=asyncio.subprocess.DEVNULL, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.STDOUT if stderr == "stdout" else asyncio.subprocess.PIPE, env=sub_env, cwd=cwd ) stdout_data, stderr_data = yield from p.communicate() stderr_text = stderr_data.decode("utf-8") if stderr_text != "" and (stderr == "log" or (stderr == "log_on_error" and p.returncode != 0)): logger.error(stderr_text) if not p.returncode == 0: raise exc_type( desc=desc, cmd=cmd, exit_code=p.returncode, stdout=stdout_data.decode("utf-8"), stderr=stderr_text, ) else: return _convert_bytes(stdout_data, stdout)
def _plugin_job(plugin_context, module_type, module_fields, command, env, stdout, stderr): global DEBUG_PARALLEL_COUNT, DEBUG_PARALLEL_MAX definition = _get_plugin_definition(module_type, module_fields, command, plugin_context.plugin_paths) complete_env = _plugin_env(definition, module_fields) complete_env.update({ 'PERU_PLUGIN_CACHE': _plugin_cache_path( plugin_context, definition, module_fields)}) complete_env.update(env) # Use a lock to protect the plugin cache. It would be unsafe for two jobs # to read/write to the same plugin cache dir at the same time. The lock # (and the cache dir) are both keyed off the module's "cache fields" as # defined by plugin.yaml. For plugins that don't define cacheable fields, # there is no cache dir (it's set to /dev/null) and the cache lock is a # no-op. cache_lock = _plugin_cache_lock(plugin_context, definition, module_fields) with (yield from cache_lock): # Use a semaphore to limit the number of jobs that can run in parallel. # Most plugin fetches hit the network, and for performance reasons we # don't want to fire off too many network requests at once. See # DEFAULT_PARALLEL_FETCH_LIMIT. This also lets the user control # parallelism with the --jobs flag. with (yield from plugin_context.parallelism_semaphore): DEBUG_PARALLEL_COUNT += 1 DEBUG_PARALLEL_MAX = max(DEBUG_PARALLEL_COUNT, DEBUG_PARALLEL_MAX) proc = yield from asyncio.create_subprocess_exec( definition.executable_path, cwd=plugin_context.cwd, env=complete_env, stdout=stdout, stderr=stderr) output, _ = yield from proc.communicate() DEBUG_PARALLEL_COUNT -= 1 if output is not None: output = output.decode('utf8') _throw_if_error(proc, definition.executable_path, output) return output
def execute(self, subcommand, args, timeout=60): # We use a lock prevent parallel execution due to strange errors # reported by a user and reproduced by us. # https://github.com/GNS3/gns3-gui/issues/261 with (yield from self._execute_lock): vboxmanage_path = self.vboxmanage_path if not vboxmanage_path: vboxmanage_path = self.find_vboxmanage() command = [vboxmanage_path, "--nologo", subcommand] command.extend(args) command_string = " ".join(command) log.info("Executing VBoxManage with command: {}".format(command_string)) try: process = yield from asyncio.create_subprocess_exec(*command, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE) except (OSError, subprocess.SubprocessError) as e: raise VirtualBoxError("Could not execute VBoxManage: {}".format(e)) try: stdout_data, stderr_data = yield from asyncio.wait_for(process.communicate(), timeout=timeout) except asyncio.TimeoutError: raise VirtualBoxError("VBoxManage has timed out after {} seconds!".format(timeout)) if process.returncode: vboxmanage_error = stderr_data.decode("utf-8", errors="ignore") raise VirtualBoxError("VirtualBox has returned an error: {}".format(vboxmanage_error)) return stdout_data.decode("utf-8", errors="ignore").splitlines()
async def run_sigmac(): sigmac = asyncio.create_subprocess_exec( sigmac_cmd, "-t", "es-qs", "-v", "-I", "-r", "rules/", stdout=asyncio.subprocess.PIPE, ) print("* Launching sigmac") proc = await sigmac print("* sigmac launched with PID {}".format(proc.pid)) cur_rule = None while True: line = await proc.stdout.readline() if not line: print("* sigmac finished") await queries.put((None, None)) break else: strline = str(line, 'utf-8').rstrip() if strline.startswith(sigmac_processing_prefix): cur_rule = strline[len(sigmac_processing_prefix):] else: await queries.put((cur_rule, strline)) await proc.wait() exitcode = proc.returncode print("* sigmac returned with exit code {}".format(exitcode)) return exitcode
def test_send_signal(self): # bpo-31034: Make sure that we get the default signal handler (killing # the process). The parent process may have decided to ignore SIGHUP, # and signal handlers are inherited. old_handler = signal.signal(signal.SIGHUP, signal.SIG_DFL) try: code = 'import time; print("sleeping", flush=True); time.sleep(3600)' args = [sys.executable, '-c', code] create = asyncio.create_subprocess_exec(*args, stdout=subprocess.PIPE, loop=self.loop) proc = self.loop.run_until_complete(create) async def send_signal(proc): # basic synchronization to wait until the program is sleeping line = await proc.stdout.readline() self.assertEqual(line, b'sleeping\n') proc.send_signal(signal.SIGHUP) returncode = await proc.wait() return returncode returncode = self.loop.run_until_complete(send_signal(proc)) self.assertEqual(-signal.SIGHUP, returncode) finally: signal.signal(signal.SIGHUP, old_handler)
def start(self): """ Starts the uBridge hypervisor process. """ yield from self._check_ubridge_version() env = os.environ.copy() if sys.platform.startswith("win"): # add the Npcap directory to $PATH to force Dynamips to use npcap DLL instead of Winpcap (if installed) system_root = os.path.join(os.path.expandvars("%SystemRoot%"), "System32", "Npcap") if os.path.isdir(system_root): env["PATH"] = system_root + ';' + env["PATH"] try: command = self._build_command() log.info("starting ubridge: {}".format(command)) self._stdout_file = os.path.join(self._working_dir, "ubridge.log") log.info("logging to {}".format(self._stdout_file)) with open(self._stdout_file, "w", encoding="utf-8") as fd: self._process = yield from asyncio.create_subprocess_exec(*command, stdout=fd, stderr=subprocess.STDOUT, cwd=self._working_dir, env=env) log.info("ubridge started PID={}".format(self._process.pid)) except (OSError, subprocess.SubprocessError) as e: ubridge_stdout = self.read_stdout() log.error("Could not start ubridge: {}\n{}".format(e, ubridge_stdout)) raise UBridgeHypervisor("Could not start ubridge: {}\n{}".format(e, ubridge_stdout))
def do_capture(url, proxy, loop): result = CaptureResult(url) if result.status: return result start = time.monotonic() proc = yield from asyncio.create_subprocess_exec( *proxy.adjust_command([ "isolate", "ISOL_RL_MEM=unlimited", "ISOL_RL_STACK=8388608", "PHANTOMJS_DISABLE_CRASH_DUMPS=1", "MALLOC_CHECK_=0", "phantomjs", "--local-url-access=no", "--load-images=false", pj_trace_redir, "--capture", result.original_url ]), stdin = subprocess.DEVNULL, stdout = subprocess.PIPE, stderr = subprocess.PIPE, loop = loop) stdout, stderr = yield from proc.communicate() elapsed = time.monotonic() - start result.set_result(proc.returncode, stdout, stderr, elapsed) return result
def run(self): self.log.debug("Started sending thread") while True: filename = yield from self.queue.get() if filename in (QUEUE_FINISHED, QUEUE_ERROR): break self.log.debug("Sending file {}".format(filename)) # This tar used for sending data out need to be as simple, as # simple, as featureless as possible. It will not be # verified before untaring. tar_final_cmd = ["tar", "-cO", "--posix", "-C", self.base_dir, filename] final_proc = yield from asyncio.create_subprocess_exec( *tar_final_cmd, stdout=self.backup_stdout) retcode = yield from final_proc.wait() if retcode >= 2: # handle only exit code 2 (tar fatal error) or # greater (call failed?) raise qubes.exc.QubesException( "ERROR: Failed to write the backup, out of disk space? " "Check console output or ~/.xsession-errors for details.") # Delete the file as we don't need it anymore self.log.debug("Removing file {}".format(filename)) os.remove(os.path.join(self.base_dir, filename)) self.log.debug("Finished sending thread")
def get_neterr_details(url, nnsp, *, loop): surl = urlsplit(url) cmd = ["firejail", "--netns=" + nnsp, "--", "neterr-details"] if surl.scheme == "https": cmd.extend(["--tls", "--alpn=h2:http/1.1"]) port = 443 elif surl.scheme == "http": port = 80 else: raise RuntimeError("get_neterr_details: don't know how to handle " "scheme " + surl.scheme) if surl.port is not None: port = surl.port cmd.extend([surl.hostname, str(port)]) proc = yield from asyncio.create_subprocess_exec( cmd, loop=loop, stdin=asyncio.subprocess.DEVNULL, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE) (stdout_data, stderr_data) = yield from proc.communicate() exitcode = yield from proc.wait() if exitcode != 0 or stderr_data != b"": raise RuntimeError("get_neterr_details: subprocess exit {}, " "errors:\n{}" .format(exitcode, stderr_data.decode("utf-8"))) details = collections.defaultdict(list) for line in stdout_data.splitlines(): k, _, v = line.partition(":") details[k].append(v) return details
def test_read_all_from_pipe_reader(self): # See asyncio issue 168. This test is derived from the example # subprocess_attach_read_pipe.py, but we configure the # StreamReader's limit so that twice it is less than the size # of the data writter. Also we must explicitly attach a child # watcher to the event loop. code = """\ import os, sys fd = int(sys.argv[1]) os.write(fd, b'data') os.close(fd) """ rfd, wfd = os.pipe() args = [sys.executable, "-c", code, str(wfd)] pipe = open(rfd, "rb", 0) reader = asyncio.StreamReader(loop=self.loop, limit=1) protocol = asyncio.StreamReaderProtocol(reader, loop=self.loop) transport, _ = self.loop.run_until_complete(self.loop.connect_read_pipe(lambda: protocol, pipe)) watcher = asyncio.SafeChildWatcher() watcher.attach_loop(self.loop) try: asyncio.set_child_watcher(watcher) create = asyncio.create_subprocess_exec(*args, pass_fds={wfd}, loop=self.loop) proc = self.loop.run_until_complete(create) self.loop.run_until_complete(proc.wait()) finally: asyncio.set_child_watcher(None) os.close(wfd) data = self.loop.run_until_complete(reader.read(-1)) self.assertEqual(data, b"data")
def execute(self, subcommand, args, timeout=120, host_type=None): with (yield from self._execute_lock): vmrun_path = self.vmrun_path if not vmrun_path: vmrun_path = self.find_vmrun() if host_type is None: host_type = self.host_type command = [vmrun_path, "-T", host_type, subcommand] command.extend(args) command_string = " ".join(command) log.info("Executing vmrun with command: {}".format(command_string)) try: process = yield from asyncio.create_subprocess_exec(*command, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE) except (OSError, subprocess.SubprocessError) as e: raise VMwareError("Could not execute vmrun: {}".format(e)) try: stdout_data, _ = yield from asyncio.wait_for(process.communicate(), timeout=timeout) except asyncio.TimeoutError: raise VMwareError("vmrun has timed out after {} seconds!".format(timeout)) if process.returncode: # vmrun print errors on stdout vmrun_error = stdout_data.decode("utf-8", errors="ignore") raise VMwareError("vmrun has returned an error: {}".format(vmrun_error)) return stdout_data.decode("utf-8", errors="ignore").splitlines()
def Popen(args, **kwargs): kwargs.setdefault('encoding', 'utf8') shell = kwargs.pop('shell', None) if shell: return asyncio.create_subprocess_shell(args, **kwargs) else: return asyncio.create_subprocess_exec(*args, **kwargs)
def execute(self, subcommand, args, timeout=60): # We use a lock prevent parallel execution due to strange errors # reported by a user and reproduced by us. # https://github.com/GNS3/gns3-gui/issues/261 with (yield from self._execute_lock): vboxmanage_path = self.vboxmanage_path if not vboxmanage_path: vboxmanage_path = self.find_vboxmanage() command = [vboxmanage_path, "--nologo", subcommand] command.extend(args) log.debug("Executing VBoxManage with command: {}".format(command)) try: vbox_user = self.config.get_section_config("VirtualBox").get("vbox_user") if vbox_user: # TODO: test & review this part sudo_command = "sudo -i -u {}".format(vbox_user) + " ".join(command) process = yield from asyncio.create_subprocess_shell(sudo_command, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE) else: process = yield from asyncio.create_subprocess_exec(*command, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE) except (OSError, subprocess.SubprocessError) as e: raise VirtualBoxError("Could not execute VBoxManage: {}".format(e)) try: stdout_data, stderr_data = yield from asyncio.wait_for(process.communicate(), timeout=timeout) except asyncio.TimeoutError: raise VirtualBoxError("VBoxManage has timed out after {} seconds!".format(timeout)) if process.returncode: # only the first line of the output is useful vboxmanage_error = stderr_data.decode("utf-8", errors="ignore") raise VirtualBoxError("VirtualBox has returned an error: {}".format(vboxmanage_error)) return stdout_data.decode("utf-8", errors="ignore").splitlines()
def run_pylint(self, to_analize: analize_sub) -> plugin.Task: while True: event = yield from to_analize.get() action = event.value.get("type", None) if action in ("created", "modified"): logger.debug("Analyzing %s", event) filename = event.key.key proc = yield from asyncio.create_subprocess_exec( "pylint", filename, stdout=subprocess.PIPE, stderr=subprocess.STDOUT ) try: stdout, _ = yield from proc.communicate() except: proc.kill() yield from proc.wait() raise exitcode = yield from proc.wait() state = self.new_state(key=filename, exitcode=exitcode, stdout=stdout.decode("utf-8", "ignore")) logger.debug("Putting state %r", state) yield from self.hub.put(state) elif action in "deleted": state = self.new_state(key=filename) yield from self.hub.put(state) else: logger.error("Unknown file action %s", action)
def create_disk(self, qemu_img, path, options): """ Create a qemu disk with qemu-img :param qemu_img: qemu-img binary path :param path: Image path :param options: Disk image creation options """ try: img_format = options.pop("format") img_size = options.pop("size") if not os.path.isabs(path): directory = self.get_images_directory() os.makedirs(directory, exist_ok=True) path = os.path.join(directory, os.path.basename(path)) if os.path.exists(path): raise QemuError("Could not create disk image {} already exist".format(path)) command = [qemu_img, "create", "-f", img_format] for option in sorted(options.keys()): command.extend(["-o", "{}={}".format(option, options[option])]) command.append(path) command.append("{}M".format(img_size)) process = yield from asyncio.create_subprocess_exec(*command) yield from process.wait() except (OSError, subprocess.SubprocessError) as e: raise QemuError("Could not create disk image {}:{}".format(path, e))
def test_terminate(self): args = self.PROGRAM_BLOCKED create = asyncio.create_subprocess_exec(*args, loop=self.loop) proc = self.loop.run_until_complete(create) proc.terminate() returncode = self.loop.run_until_complete(proc.wait()) self.assertEqual(-signal.SIGTERM, returncode)
def ffmpegd(bitrate): global queue ffmpeg = asyncio.create_subprocess_exec( "ffmpeg", "-v", "repeat+info", "-i", "/dev/video0", "-s", "720x576", "-r", "15", "-vcodec", "libx264", "-f", "flv", "-b:v", "300k", "-preset", "fast", "rtmp://192.168.2.6:1935/video/1" , stderr=asyncio.subprocess.PIPE ) proc = yield from ffmpeg while True: try: data = yield from proc.stderr.readline() if queue.full(): yield from queue.get() yield from queue.put(data.decode("utf8").rstrip()) print(">>" + str(data.rstrip())) except: break yield from proc.wait()
def start(self): """ Starts the Dynamips hypervisor process. """ self._command = self._build_command() env = os.environ.copy() if sys.platform.startswith("win"): # add the Npcap directory to $PATH to force Dynamips to use npcap DLL instead of Winpcap (if installed) system_root = os.path.join(os.path.expandvars("%SystemRoot%"), "System32", "Npcap") if os.path.isdir(system_root): env["PATH"] = system_root + ';' + env["PATH"] try: log.info("Starting Dynamips: {}".format(self._command)) self._stdout_file = os.path.join(self.working_dir, "dynamips_i{}_stdout.txt".format(self._id)) log.info("Dynamips process logging to {}".format(self._stdout_file)) with open(self._stdout_file, "w", encoding="utf-8") as fd: self._process = yield from asyncio.create_subprocess_exec(*self._command, stdout=fd, stderr=subprocess.STDOUT, cwd=self._working_dir, env=env) log.info("Dynamips process started PID={}".format(self._process.pid)) self._started = True except (OSError, subprocess.SubprocessError) as e: log.error("Could not start Dynamips: {}".format(e)) raise DynamipsError("Could not start Dynamips: {}".format(e))
def test_send_signal(self): args = PROGRAM_BLOCKED create = asyncio.create_subprocess_exec(*args, loop=self.loop) proc = self.loop.run_until_complete(create) proc.send_signal(signal.SIGHUP) returncode = self.loop.run_until_complete(proc.wait()) self.assertEqual(-signal.SIGHUP, returncode)
def start(self): """ Starts the VPCS process. """ yield from self._check_requirements() if not self.is_running(): if not self._ethernet_adapter.get_nio(0): raise VPCSError("This VPCS instance must be connected in order to start") command = self._build_command() try: log.info("Starting VPCS: {}".format(command)) self._vpcs_stdout_file = os.path.join(self.working_dir, "vpcs.log") log.info("Logging to {}".format(self._vpcs_stdout_file)) flags = 0 if sys.platform.startswith("win32"): flags = subprocess.CREATE_NEW_PROCESS_GROUP with open(self._vpcs_stdout_file, "w", encoding="utf-8") as fd: self.command_line = ' '.join(command) self._process = yield from asyncio.create_subprocess_exec(*command, stdout=fd, stderr=subprocess.STDOUT, cwd=self.working_dir, creationflags=flags) monitor_process(self._process, self._termination_callback) log.info("VPCS instance {} started PID={}".format(self.name, self._process.pid)) self._started = True self.status = "started" except (OSError, subprocess.SubprocessError) as e: vpcs_stdout = self.read_vpcs_stdout() log.error("Could not start VPCS {}: {}\n{}".format(self.vpcs_path, e, vpcs_stdout)) raise VPCSError("Could not start VPCS {}: {}\n{}".format(self.vpcs_path, e, vpcs_stdout))
def mycoro(): process = yield from asyncio.create_subprocess_exec( sys.executable or 'python', '-c', 'print(input())', stdout=subprocess.PIPE, stdin=subprocess.PIPE) received_stdout, received_stderr = yield from process.communicate(b'Hello async world!\n') yield from process.wait() assert process.returncode == 0 assert received_stdout.strip() == b'Hello async world!'
def execute(cls, *args): proc = yield from asyncio.create_subprocess_exec( *args, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.STDOUT) yield from proc.wait() data = yield from proc.stdout.read() return data.decode('utf-8').strip()
def execute(command, cwd=None, env=None, log_output=None): """Execute external shell commad. :param cdw: currennt working directory :param env: environment :param log_output: * opened log file or path to this file, or * None if output should be redirectored to stdout/stderr """ env = env or os.environ close = False if log_output: if isinstance(log_output, str): close = True log_output = open(log_output, 'a', encoding='utf-8') log_output.write('\n# command executed on {}'.format(datetime.now().isoformat())) log_output.write('\n$ {}\n'.format(command)) log_output.flush() if isinstance(command, str): command = split(command) log.debug('invoking: %s in %s', command, cwd) create = asyncio.create_subprocess_exec(*command, stdout=log_output, stderr=log_output, cwd=cwd, env=env) proc = yield from create yield from proc.wait() close and log_output.close() return proc.returncode
def mycoro(): process = yield from asyncio.create_subprocess_exec( sys.executable or 'python', '-c', 'print("Hello async world!")', stdout=subprocess.PIPE) received_stdout = yield from process.stdout.readexactly(len(b'Hello async world!\n')) yield from process.wait() assert process.returncode == 0 assert received_stdout.strip() == b'Hello async world!'
def _call_lines(self, args, env, line_callback, call_id): self.logger.debug("%s: invoking %s", call_id, args) proc = yield from asyncio.create_subprocess_exec( *args, stderr=subprocess.PIPE, stdin=subprocess.DEVNULL, env=env, ) self.logger.debug("%s: started", call_id) try: potential_information = bytearray() try: while True: line = yield from readuntil_either(proc.stderr, b"\r\n") self.logger.debug("%s: << %r", call_id, line.strip()) # detect and raise errors earily self._raise_common_errors(line) potential_information.extend(line) line_callback(line.decode()) except asyncio.streams.IncompleteReadError as exc: if LOCK_TIMEOUT_FINGERPRINT_1_0 in exc.partial: raise backend.RepositoryLocked() from None potential_information.extend(exc.partial) yield from proc.wait() except: self.logger.debug("%s: failure / cancellation", call_id) if proc.returncode is None: self.logger.debug("%s: terminating child", call_id) try: proc.terminate() except ProcessLookupError: pass self.logger.debug("%s: waiting for child", call_id) yield from proc.wait() raise self.logger.debug("%s: finished", call_id) if proc.returncode != 0: self.logger.debug("%s: non-zero exit code", call_id) self._raise_common_errors(potential_information) raise subprocess.CalledProcessError(proc.returncode, args, stderr=potential_information)
def coro(): proc = yield from asyncio.create_subprocess_exec( 'cat', stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE, loop=glib_loop) proc.stdin.write(b'hey\n') yield from proc.stdin.drain() proc.stdin.close() out = yield from proc.stdout.read() assert out == b'hey\n' yield from proc.wait()
def test_send_signal(self): code = 'import time; print("sleeping", flush=True); time.sleep(3600)' args = [sys.executable, b'-W', b'ignore', b'-c', code] create = asyncio.create_subprocess_exec(*args, stdout=subprocess.PIPE) proc = self.loop.run_until_complete(create) async def send_signal(proc): # basic synchronization to wait until the program is sleeping line = await proc.stdout.readline() self.assertEqual(line, b'sleeping\n') proc.send_signal(signal.SIGHUP) returncode = (await proc.wait()) return returncode returncode = self.loop.run_until_complete(send_signal(proc)) self.assertEqual(-signal.SIGHUP, returncode)
def run_server(self, readiness, done): self.process = yield from asyncio.create_subprocess_exec( *SERVER, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE) while True: l = yield from self.process.stdout.readline() if l == b"": raise RuntimeError( "OSCOAP server process terminated during startup.") if l == b'Plugtest server ready.\n': break readiness.set_result(True) out, err = yield from self.process.communicate() done.set_result((out, err))
def launch_second_instances(): temp_dir = sys.argv[1] if not os.path.exists(temp_dir): os.makedirs(temp_dir) job1 = subprocess.Popen([ sys.executable, __file__, os.path.join(temp_dir, "one"), os.path.join(temp_dir, "two"), os.path.join(temp_dir, "three") ]) loop = asyncio.get_event_loop() job2 = asyncio.create_subprocess_exec(sys.executable, __file__, os.path.join(temp_dir, "four"), os.path.join(temp_dir, "five"), os.path.join(temp_dir, "six")) loop.run_until_complete(job2) job1.wait()
def sandbox(self, script, phpbin="php7.0"): if not os.path.isfile(script): raise Exception("Sample not found: {0}".format(script)) try: cmd = [phpbin, "sandbox.php", script] self.proc = yield from asyncio.create_subprocess_exec(*cmd, stdout=PIPE) self.stdout_value = b'' yield from asyncio.wait_for(self.read_process(), timeout=3) except Exception as e: try: self.proc.kill() except Exception: pass print("Error executing the sandbox: {}".format(e)) # raise e return {'stdout': self.stdout_value.decode('utf-8')}
def test_002_list_keys(self): self.genkey() p = self.loop.run_until_complete( asyncio.create_subprocess_exec('gpg', '--with-colons', '-K', self.key_uid, env=self.test_environ, stderr=subprocess.PIPE, stdout=subprocess.PIPE)) stdout, stderr = self.loop.run_until_complete(p.communicate()) if p.returncode: self.fail('generated key not found: {}{}'.format( stdout.decode(), stderr.decode())) self.assertIn(b'sec:u:', stdout) self.assertIn(self.key_uid.encode(), stdout)
def test_210_time_sync(self): """Test time synchronization mechanism""" if self.template.startswith('whonix-'): self.skipTest('qvm-sync-clock disabled for Whonix VMs') self.loop.run_until_complete( asyncio.wait([ self.testvm1.start(), self.testvm2.start(), ])) start_time = subprocess.check_output(['date', '-u', '+%s']) try: self.app.clockvm = self.testvm1 self.app.save() # break vm and dom0 time, to check if qvm-sync-clock would fix it subprocess.check_call( ['sudo', 'date', '-s', '2001-01-01T12:34:56'], stdout=subprocess.DEVNULL) self.loop.run_until_complete( self.testvm2.run_for_stdio('date -s 2001-01-01T12:34:56', user='******')) self.loop.run_until_complete( self.testvm2.run_for_stdio('qvm-sync-clock', user='******')) p = self.loop.run_until_complete( asyncio.create_subprocess_exec( 'sudo', 'qvm-sync-clock', stdout=asyncio.subprocess.DEVNULL)) self.loop.run_until_complete(p.wait()) self.assertEqual(p.returncode, 0) vm_time, _ = self.loop.run_until_complete( self.testvm2.run_for_stdio('date -u +%s')) self.assertAlmostEquals(int(vm_time), int(start_time), delta=30) dom0_time = subprocess.check_output(['date', '-u', '+%s']) self.assertAlmostEquals(int(dom0_time), int(start_time), delta=30) except: # reset time to some approximation of the real time subprocess.Popen( ["sudo", "date", "-u", "-s", "@" + start_time.decode()]) raise finally: self.app.clockvm = None
def start(self): log.debug( "SubProcess.start(): create_subprocess_exec...", extra={"task": self.defname}, ) if sys.platform == "win32": # To prevent engines opening console window startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW else: startupinfo = None create = asyncio.create_subprocess_exec( *self.argv, stdin=asyncio.subprocess.PIPE, stdout=asyncio.subprocess.PIPE, startupinfo=startupinfo, env=self.env, cwd=self.cwd ) try: self.proc = yield from asyncio.wait_for(create, TIME_OUT_SECOND) self.pid = self.proc.pid # print(self.pid, self.path) if self.lowPriority: proc = psutil.Process(self.pid) if sys.platform == "win32": niceness = psutil.BELOW_NORMAL_PRIORITY_CLASS else: niceness = 15 # The higher, the lower the priority if psutil.__version__ >= "2.0.0": proc.nice(niceness) else: proc.set_nice(niceness) self.read_stdout_task = create_task(self.read_stdout(self.proc.stdout)) self.write_task = None except asyncio.TimeoutError: log.warning("TimeoutError", extra={"task": self.defname}) raise except GLib.GError: log.warning("GLib.GError", extra={"task": self.defname}) raise except Exception: e = sys.exc_info()[0] log.warning("%s" % e, extra={"task": self.defname}) raise
def test_popen_error(self): # Issue #24763: check that the subprocess transport is closed # when BaseSubprocessTransport fails if sys.platform == 'win32': target = 'asyncio.windows_utils.Popen' else: target = 'subprocess.Popen' with mock.patch(target) as popen: exc = ZeroDivisionError popen.side_effect = exc create = asyncio.create_subprocess_exec(sys.executable, '-c', 'pass', loop=self.loop) with warnings.catch_warnings(record=True) as warns: with self.assertRaises(exc): self.loop.run_until_complete(create) self.assertEqual(warns, [])
def execute(commandline, print_only=False): if print_only: print(commandline) return # To make sure we have no discrepency between std{out,err} and logged data from clients process create = asyncio.create_subprocess_exec(*commandline.split(" "), stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE) proc = yield from create tasks = [asyncio.async(log_stream(proc.stdout)), asyncio.async(log_stream(proc.stderr)), asyncio.async(proc.wait())] yield from asyncio.wait(tasks)
def __init__(self): super().__init__() # Always start with a fresh state try: os.remove("ircd.db") except FileNotFoundError: pass subprocess.run([ "oragono", "mkcerts", "--conf", os.getcwd() + "/../tests/end_to_end/ircd.yaml" ]) self.create = asyncio.create_subprocess_exec( "oragono", "run", "--conf", os.getcwd() + "/../tests/end_to_end/ircd.yaml", stderr=asyncio.subprocess.PIPE)
def test_pipe_to_log(context, event_loop): cmd = r""">&2 echo "foo" && echo "bar" && exit 0""" proc = event_loop.run_until_complete( asyncio.create_subprocess_exec("bash", "-c", cmd, stdout=PIPE, stderr=PIPE, stdin=None)) tasks = [] with swlog.get_log_filehandle(context) as log_fh: tasks.append(swlog.pipe_to_log(proc.stderr, filehandles=[log_fh])) tasks.append(swlog.pipe_to_log(proc.stdout, filehandles=[log_fh])) event_loop.run_until_complete(asyncio.wait(tasks)) event_loop.run_until_complete(proc.wait()) log_file = swlog.get_log_filename(context) assert read(log_file) in ("foo\nbar\n", "bar\nfoo\n")
def _pref_set(self, name, value, valid=True): cmd = ['qvm-prefs'] if value != '-D': cmd.append('--') cmd.extend((self.testvm.name, name, value)) p = yield from asyncio.create_subprocess_exec(*cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (stdout, stderr) = yield from p.communicate() if valid: self.assertEqual( p.returncode, 0, "qvm-prefs .. '{}' '{}' failed: {}{}".format( name, value, stdout, stderr)) else: self.assertNotEquals( p.returncode, 0, "qvm-prefs should reject value '{}' for " "property '{}'".format(value, name))
def run_service(self, service, source=None, user=None, filter_esc=False, autostart=False, gui=False, **kwargs): '''Run service on this VM :param str service: service name :param qubes.vm.qubesvm.QubesVM source: source domain as presented to this VM :param str user: username to run service as :param bool filter_esc: filter escape sequences to protect terminal \ emulator :param bool autostart: if :py:obj:`True`, machine will be started if \ it is not running :param bool gui: when autostarting, also start gui daemon :rtype: asyncio.subprocess.Process .. note:: User ``root`` is redefined to ``SYSTEM`` in the Windows agent code ''' # pylint: disable=unused-argument source = 'dom0' if source is None else self.app.domains[source].name if filter_esc: raise NotImplementedError( 'filter_esc=True not supported on calls to dom0') if user is None: user = '******' yield from self.fire_event_async('domain-cmd-pre-run', pre_event=True, start_guid=gui) if user != 'root': cmd = ['runuser', '-u', user, '--'] else: cmd = [] cmd.extend([ qubes.config.system_path['qrexec_rpc_multiplexer'], service, source, 'name', self.name, ]) return (yield from asyncio.create_subprocess_exec( *cmd, **kwargs))
def cancel_wait(): proc = yield from asyncio.create_subprocess_exec(*PROGRAM_BLOCKED, loop=self.loop) # Create an internal future waiting on the process exit task = self.loop.create_task(proc.wait()) self.loop.call_soon(task.cancel) try: yield from task except asyncio.CancelledError: pass # Cancel the future task.cancel() # Kill the process and wait until it is done proc.kill() yield from proc.wait()
async def handle_request(self, reader, writer): req_host, req_port = writer.get_extra_info('peername') peername = f'{req_host}:{req_port}' _logger.info(f'Connection from {peername}') data = await reader.readline() nw, port, worker_loop = data.split() num_workers = int(nw) or os.cpu_count() _logger.info(f'Starting up {num_workers} processors for {peername}') # start processors that will connect back to the remote server asyncio.gather(*[asyncio.create_subprocess_exec( sys.executable, '-m', 'distex.processor', '-H', req_host, '-p', port, '-l', worker_loop, stdout=None, stderr=None, loop=self._loop) for _ in range(num_workers)]) writer.close()
def handle_exists(self, location, is_directory): logging.info("Spotted new start script") if self.active: logging.error("Already started a script") return log_file = os.path.join(log_directory, 'job.out') err_file = os.path.join(log_directory, 'job.err') target_directory = os.path.join(output_directory, 'run') try: os.makedirs(target_directory) except FileExistsError: pass with tarfile.open(location) as tar: for name in tar.getnames(): if not os.path.abspath(os.path.join( target_directory, name)).startswith(target_directory): logging.error( "This archive contains unsafe filenames: %s %s" % (os.path.abspath(os.path.join( target_directory, name)), target_directory)) return tar.extractall(path=target_directory) location = os.path.join(target_directory, start_script) if not os.path.exists(location): logging.error("This archive is missing a %s" % start_script) try: self.process = asyncio.create_subprocess_exec( *["/usr/bin/python", location], stdout=open(log_file, 'w'), stderr=open(err_file, 'w'), cwd=target_directory) process = yield from self.process asyncio. async (process.wait()).add_done_callback(self._exit) self.active = True except Exception as e: logging.error("Exception raised launching user script: %s" % str(e)) self._loop.call_soon_threadsafe(partial(self._exit, None))
def build(versions_file, *, loop): with open(versions_file) as f: config = yaml.load(f.read()) procs = [] for version_map in config['versions']: args = shlex.split('make docker-build ' 'IMAGE_NAME={image_name} ' 'KAFKA_VERSION={kafka} ' 'SCALA_VERSION={scala}'.format( image_name=config['image_name'], **version_map)) proc = yield from asyncio.create_subprocess_exec(*args, loop=loop) procs.append(proc.wait()) return (yield from asyncio.gather(*procs, loop=loop))
def async_check_ha_config_file(hass): """Check if Home Assistant configuration file is valid. This method is a coroutine. """ proc = yield from asyncio.create_subprocess_exec( sys.executable, '-m', 'homeassistant', '--script', 'check_config', '--config', hass.config.config_dir, stdout=asyncio.subprocess.PIPE, loop=hass.loop) # Wait for the subprocess exit stdout_data, dummy = yield from proc.communicate() result = yield from proc.wait() if not result: return None return re.sub(r'\033\[[^m]*m', '', str(stdout_data, 'utf-8'))
def _spawn(bot, event, *args): """Execute a generic command""" config = bot.get_config_suboption(event.conv_id, "spawn") cmd_config = config["commands"][event.command_name.lower()] home_env = cmd_config.get("home", config.get("home")) if home_env: os.environ["HOME"] = home_env executable = cmd_config.get("command") if not executable: yield from bot.coro_send_message(event.conv_id, "Not configured") return if cmd_config.get("allow_args"): executable = executable + list(args) executable = tuple(executable) logger.info("%s executing: %s", event.user.full_name, executable) environment = { 'HANGOUT_USER_CHATID': event.user_id.chat_id, 'HANGOUT_USER_FULLNAME': event.user.full_name, 'HANGOUT_CONV_ID': event.conv_id, 'HANGOUT_CONV_TAGS': ','.join(bot.tags.useractive(event.user_id.chat_id, event.conv_id)) } environment.update(dict(os.environ)) proc = yield from asyncio.create_subprocess_exec(*executable, stdout=PIPE, stderr=PIPE, env=environment) (stdout_data, stderr_data) = yield from proc.communicate() stdout_str = stdout_data.decode().rstrip() stderr_str = stderr_data.decode().rstrip() if len(stderr_str) > 0: yield from bot.coro_send_to_user_and_conversation( event.user.id_.chat_id, event.conv_id, stderr_str, stdout_str) else: yield from bot.coro_send_message(event.conv_id, stdout_str)
def _get_events_reader(self, vm=None) -> (asyncio.StreamReader, callable): '''Make connection to qubesd and return stream to read events from :param vm: Specific VM for which events should be handled, use None to handle events from all VMs (and non-VM objects) :return stream to read events from and a cleanup function (call it to terminate qubesd connection)''' if vm is not None: dest = vm.name else: dest = 'dom0' if self.app.qubesd_connection_type == 'socket': reader, writer = yield from asyncio.open_unix_connection( qubesadmin.config.QUBESD_SOCKET) writer.write(b'dom0\0') # source writer.write(self._api_method.encode() + b'\0') # method writer.write(dest.encode('ascii') + b'\0') # dest writer.write(b'\0') # arg writer.write_eof() def cleanup_func(): '''Close connection to qubesd''' writer.close() elif self.app.qubesd_connection_type == 'qrexec': proc = yield from asyncio.create_subprocess_exec( 'qrexec-client-vm', dest, self._api_method, stdin=subprocess.PIPE, stdout=subprocess.PIPE) proc.stdin.write_eof() reader = proc.stdout def cleanup_func(): '''Close connection to qubesd''' try: proc.kill() except ProcessLookupError: pass else: raise NotImplementedError('Unsupported qubesd connection type: ' + self.app.qubesd_connection_type) return reader, cleanup_func
def start(self): """ Starts the VPCS process. """ yield from self._check_requirements() if not self.is_running(): nio = self._ethernet_adapter.get_nio(0) command = self._build_command() try: log.info("Starting VPCS: {}".format(command)) self._vpcs_stdout_file = os.path.join(self.working_dir, "vpcs.log") log.info("Logging to {}".format(self._vpcs_stdout_file)) flags = 0 if sys.platform.startswith("win32"): flags = subprocess.CREATE_NEW_PROCESS_GROUP with open(self._vpcs_stdout_file, "w", encoding="utf-8") as fd: self.command_line = ' '.join(command) self._process = yield from asyncio.create_subprocess_exec( *command, stdout=fd, stderr=subprocess.STDOUT, cwd=self.working_dir, creationflags=flags) monitor_process(self._process, self._termination_callback) yield from self._start_ubridge() if nio: yield from self.add_ubridge_udp_connection( "VPCS-{}".format(self._id), self._local_udp_tunnel[1], nio) yield from self.start_wrap_console() log.info("VPCS instance {} started PID={}".format( self.name, self._process.pid)) self._started = True self.status = "started" except (OSError, subprocess.SubprocessError) as e: vpcs_stdout = self.read_vpcs_stdout() log.error("Could not start VPCS {}: {}\n{}".format( self._vpcs_path(), e, vpcs_stdout)) raise VPCSError("Could not start VPCS {}: {}\n{}".format( self._vpcs_path(), e, vpcs_stdout))
def _ensure_path_and_version(self): if self._whatweb_path: return is_found = False for whatweb_path in self._search_path: proc = None try: proc = yield from asyncio.create_subprocess_exec( whatweb_path, '--version', stdout=asyncio.subprocess.PIPE) while True: line = yield from proc.stdout.readline() line = line.decode('utf8') match_info = regex_whatweb_banner.match(line) if match_info is None: continue is_found = True self._whatweb_path = whatweb_path versions = match_info.groups() if len(versions) == 2: self._version_info = _VersionInfo( major=int(versions[0]), minor=int(versions[1])) else: self._version_info = _VersionInfo( major=int(versions[0]), minor=int(versions[1]), micro=int(versions[2])) break if proc.stdout.at_eof(): break except: pass else: if is_found: break finally: if proc: try: proc.terminate() except ProcessLookupError: pass yield from proc.wait() if not is_found: raise WhatWebError('whatweb was not found in path')
def run_cmd_return_dict_async(self, cmd, host, namespace, future, stage): with make_slave_pty() as slave_pty: process = yield from asyncio.create_subprocess_exec( *cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, stdin=slave_pty, env={'TERM': 'linux'}) stdout = b'' stderr = b'' try: stdout, stderr = yield from asyncio.wait_for( process.communicate(), self.process_timeout) except asyncio.TimeoutError: try: process.terminate() except ProcessLookupError: log.info('process with pid {} not found'.format( process.pid)) log.error('timeout of {} sec reached. PID {} killed'.format( self.process_timeout, process.pid)) # For each possible line in stderr, match from the beginning of the line for the # the confusing warning: "Warning: Permanently added ...". If the warning exists, # remove it from the string. err_arry = stderr.decode().split('\r') stderr = bytes( '\n'.join([ line for line in err_arry if not line.startswith('Warning: Permanently added') ]), 'utf-8') process_output = { '{}:{}'.format(host.ip, host.port): { "cmd": cmd, "stdout": stdout.decode().split('\n'), "stderr": stderr.decode().split('\n'), "returncode": process.returncode, "pid": process.pid, "stage": stage } } future.set_result((namespace, process_output, host)) return process_output
def run_cmd_async(cmd, cwd, env=None, fail=True, shell=False, liveupdate=True): """ Run a command asynchronously. """ # pylint: disable=too-many-arguments env = env or {} cmdstr = cmd if not shell: cmdstr = ' '.join(cmd) logging.info('%s$ %s', cwd, cmdstr) logo = LogOutput(liveupdate) if shell: process = yield from asyncio.create_subprocess_shell( cmd, env=env, cwd=cwd, universal_newlines=True, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE) else: process = yield from asyncio.create_subprocess_exec( *cmd, cwd=cwd, env=env, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE) yield from asyncio.wait([ _read_stream(process.stdout, logo.log_stdout), _read_stream(process.stderr, logo.log_stderr) ]) ret = yield from process.wait() if ret and fail: msg = 'Command "{cwd}$ {cmd}" failed'.format(cwd=cwd, cmd=cmdstr) if logo.stderr: msg += '\n--- Error summary ---\n' for line in logo.stderr: msg += line logging.error(msg) return (ret, ''.join(logo.stdout))
def _streamlink(url, quality=None, **kwargs): kwargs = {'--{}'.format(key): value for key, value in kwargs.items()} parameters = list( itertools.chain.from_iterable( (key, str(value)) if not isinstance(value, bool) else (key, ) for key, value in kwargs.items())) parameters.append(url) if quality: parameters.append(quality) return asyncio.create_subprocess_exec( 'streamlink', *parameters, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.DEVNULL, )
def create_and_run_worker(loop): ''' Create the worker subprocess, then send data to it as requested ''' # Create the worker process # TODO in production code the '--worker' wants to be a constant of some sort command_and_args = [sys.executable] + sys.argv + ['--worker'] worker_process_create = asyncio.create_subprocess_exec( *command_and_args, stdout=asyncio.subprocess.PIPE, stdin=asyncio.subprocess.PIPE) worker_process = yield from worker_process_create # Send data as requested try: yield from frame_sender(loop, worker_process) except KeyboardInterrupt: # We're quite happy to quit on keyboard interrupt worker_process.terminate() pass
def run_process(self, binary, args=[]): return asyncio.create_subprocess_exec( *([binary] + args), # Buffer limit is 64KB by default, but we need a larger buffer: limit=1024 * 256, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.STDOUT, cwd=self.configuration.build_directory, env={ **os.environ, **self.configuration.environment_variables, **self.configuration.get("env", {}), "ARTIFACT_DIR": str(self.artifact_dir), "CMAKE_FLAGS": " ".join(self.configuration.cmake_flags), }, )
def _test_popen_error(self, stdin): if sys.platform == 'win32': target = 'asyncio.windows_utils.Popen' else: target = 'subprocess.Popen' with mock.patch(target) as popen: exc = ZeroDivisionError popen.side_effect = exc create = asyncio.create_subprocess_exec(sys.executable, '-c', 'pass', stdin=stdin, loop=self.loop) with warnings.catch_warnings(record=True) as warns: with self.assertRaises(exc): self.loop.run_until_complete(create) self.assertEqual(warns, [])