def exec_cmd(cmd, env=None): """ Execute cmd in an external process, collect its output and returncode :param cmd: an iterator of strings to be passed as exec(2)'s argv :param env: an optional dictionary to be placed as environment variables of the external process. If None, the environment of the calling process is used. :returns: a 3-tuple of the process's (returncode, stdout content, stderr content.) This is a bare-bones version of `commands.execCmd`. Unlike the latter, this function * uses Vdsm cpu pinning, and must not be used for long CPU-bound processes. * does not guarantee to kill underlying process if CPopen.communicate() raises. Commands that access shared storage may not use this api. * does not hide passwords in logs if they are passed in cmd """ logging.debug(command_log_line(cmd)) p = CPopen( cmd, close_fds=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env) out, err = p.communicate() logging.debug(retcode_log_line(p.returncode, err=err)) return p.returncode, out, err
def test_kill(self): p = CPopen(["sleep", "1"], stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE) p.kill() list(cmdutils.receive(p)) self.assertEqual(p.returncode, -signal.SIGKILL)
def execCmd(command, sudo=False, cwd=None, data=None, raw=False, printable=None, env=None, sync=True, nice=None, ioclass=None, ioclassdata=None, setsid=False, execCmdLogger=logging.root, deathSignal=None, resetCpuAffinity=True): """ Executes an external command, optionally via sudo. IMPORTANT NOTE: the new process would receive `deathSignal` when the controlling thread dies, which may not be what you intended: if you create a temporary thread, spawn a sync=False sub-process, and have the thread finish, the new subprocess would die immediately. """ command = cmdutils.wrap_command(command, with_ioclass=ioclass, ioclassdata=ioclassdata, with_nice=nice, with_setsid=setsid, with_sudo=sudo, reset_cpu_affinity=resetCpuAffinity) # Unsubscriptable objects (e.g. generators) need conversion if not callable(getattr(command, '__getitem__', None)): command = tuple(command) if not printable: printable = command execCmdLogger.debug(command_log_line(printable, cwd=cwd)) extra = {} extra['stderr'] = subprocess.PIPE extra['stdout'] = subprocess.PIPE if deathSignal is not None: extra['deathSignal'] = deathSignal p = CPopen(command, close_fds=True, cwd=cwd, env=env, **extra) if not sync: p = AsyncProc(p) if data is not None: p.stdin.write(data) p.stdin.flush() return p with terminating(p): (out, err) = p.communicate(data) if out is None: # Prevent splitlines() from barfing later on out = "" execCmdLogger.debug(retcode_log_line(p.returncode, err=err)) if not raw: out = out.splitlines(False) err = err.splitlines(False) return p.returncode, out, err
def exec_sync(cmds): logging.debug(cmdutils.command_log_line(cmds)) p = Popen( cmds, close_fds=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = p.communicate() logging.debug(cmdutils.retcode_log_line(p.returncode, err=err)) return p.returncode, out, err
def _child_processes(self): proc = CPopen(self.PGREP_CMD, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = proc.communicate() # EXIT STATUS # 0 One or more processes matched the criteria. # 1 No processes matched. if proc.returncode not in (0, 1): raise RuntimeError("Error running pgrep: [%d] %s" % (proc.returncode, err)) return frozenset(int(pid) for pid in out.splitlines())
def test_without_affinity(self): args = [EXT_SLEEP, "3"] popen = Popen(args, close_fds=True) stats = proc.pidstat(popen.pid) pid = int(stats.pid) # procName comes in the format of (procname) name = stats.comm self.assertEqual(pid, popen.pid) self.assertEqual(name, args[0]) popen.kill() popen.wait()
def exec_sync_bytes(cmds): logging.debug(cmdutils.command_log_line(cmds)) p = Popen(cmds, close_fds=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = p.communicate() logging.debug(cmdutils.retcode_log_line(p.returncode, err=err)) return p.returncode, out, err
def __init__(self, cmd, cwd=None): self._lock = threading.Lock() self._aborted = False self._progress = 0.0 self._stdout = bytearray() self._stderr = bytearray() self.cmd = wrap_command(cmd, with_nice=utils.NICENESS.HIGH, with_ioclass=utils.IOCLASS.IDLE) _log.debug(cmdutils.command_log_line(self.cmd, cwd=cwd)) self._command = CPopen(self.cmd, cwd=cwd, deathSignal=signal.SIGKILL) self._stream = utils.CommandStream(self._command, self._recvstdout, self._recvstderr)
def test_write(self): p = CPopen(["dd", "of=/dev/null", "bs=%d" % self.BUFSIZE], stdin=subprocess.PIPE, stdout=None, stderr=subprocess.PIPE) start = monotonic_time() total = self.COUNT * self.BUFSIZE sent = 0 with io.open("/dev/zero", "rb") as f: while sent < total: n = min(total - sent, self.BUFSIZE) data = f.read(n) if not data: raise RuntimeError("/dev/zero closed?!") p.stdin.write(data) sent += len(data) p.stdin.flush() p.stdin.close() for _, data in cmdutils.receive(p, 10): pass elapsed = monotonic_time() - start sent_gb = sent / float(1024**3) print("%.2fg in %.2f seconds (%.2fg/s)" % (sent_gb, elapsed, sent_gb / elapsed), end=" ") self.assertEqual(p.returncode, 0)
def _simple_exec_cmd(command, env=None, nice=None, ioclass=None, stdin=None, stdout=None, stderr=None): command = wrap_command(command, with_ioclass=ioclass, ioclassdata=None, with_nice=nice, with_setsid=False, with_sudo=False, reset_cpu_affinity=True) logging.debug(cmdutils.command_log_line(command, cwd=None)) p = CPopen(command, close_fds=True, cwd=None, env=env, stdin=stdin, stdout=stdout, stderr=stderr) return p
def test_no_output_error(self): p = CPopen(["false"], stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE) received = list(cmdutils.receive(p)) self.assertEqual(received, []) self.assertEqual(p.returncode, 1)
def test(self): sleepProcs = [] try: for i in range(3): popen = Popen([EXT_SLEEP, "3"]) sleepProcs.append(popen) # There is no guarantee which process run first after forking a # child process, make sure all the children are runing before we # look for them. time.sleep(0.5) pids = proc.pgrep(EXT_SLEEP) for popen in sleepProcs: self.assertIn(popen.pid, pids) finally: for popen in sleepProcs: popen.kill() popen.wait()
def test_stderr(self): p = CPopen(["sh", "-c", "echo error >/dev/stderr"], stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE) received = list(cmdutils.receive(p)) self.assertEqual(received, [(cmdutils.ERR, b"error\n")]) self.assertEqual(p.returncode, 0)
def test_stdout(self): p = CPopen(["echo", "output"], stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE) received = list(cmdutils.receive(p)) self.assertEqual(received, [(cmdutils.OUT, b"output\n")]) self.assertEqual(p.returncode, 0)
def _start_process(self): """ Starts a dd process performing direct I/O to path, reading the process stderr. When stderr has closed, _read_completed will be called. """ cmd = [ constants.EXT_DD, "if=%s" % self._path, "of=/dev/null", "bs=4096", "count=1", "iflag=direct" ] cmd = cmdutils.wrap_command(cmd) self._proc = CPopen(cmd, stdin=None, stdout=None, stderr=subprocess.PIPE) self._reader = self._loop.create_dispatcher(asyncevent.BufferedReader, self._proc.stderr, self._read_completed)
def test_both_stdout_stderr(self): p = CPopen(["sh", "-c", "echo output; echo error >/dev/stderr;"], stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE) received = list(cmdutils.receive(p)) self.assertEqual( sorted(received), sorted([(cmdutils.OUT, b"output\n"), (cmdutils.ERR, b"error\n")])) self.assertEqual(p.returncode, 0)
def _start_process(self): """ Starts a dd process performing direct I/O to path, reading the process stderr. When stderr has closed, _read_completed will be called. """ cmd = [constants.EXT_DD, "if=%s" % self._path, "of=/dev/null", "bs=4096", "count=1", "iflag=direct"] cmd = cmdutils.wrap_command(cmd) self._proc = CPopen(cmd, stdin=None, stdout=None, stderr=subprocess.PIPE) self._reader = self._loop.create_dispatcher( asyncevent.BufferedReader, self._proc.stderr, self._read_completed)
def test_no_fds(self): p = CPopen(["sleep", "1"], stdin=None, stdout=None, stderr=None) try: with self.assertRaises(cmdutils.TimeoutExpired): for _ in cmdutils.receive(p, 0.5): pass finally: p.kill() p.wait()
def test_plain_read(self): p = CPopen([ "dd", "if=/dev/zero", "bs=%d" % self.BUFSIZE, "count=%d" % self.COUNT ], stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE) start = monotonic_time() received = 0 while True: data = os.read(p.stdout.fileno(), self.BUFSIZE) if not data: break received += len(data) p.wait() elapsed = monotonic_time() - start received_gb = received / float(1024**3) print("%.2fg in %.2f seconds (%.2fg/s)" % (received_gb, elapsed, received_gb / elapsed), end=" ") self.assertEqual(received, self.COUNT * self.BUFSIZE) self.assertEqual(p.returncode, 0)
def __init__(self, cmd, cwd=None): self._lock = threading.Lock() self._aborted = False self._progress = 0.0 self._stdout = bytearray() self._stderr = bytearray() self.cmd = wrap_command( cmd, with_nice=utils.NICENESS.HIGH, with_ioclass=utils.IOCLASS.IDLE) _log.debug(cmdutils.command_log_line(self.cmd, cwd=cwd)) self._command = CPopen(self.cmd, cwd=cwd, deathSignal=signal.SIGKILL) self._stream = utils.CommandStream( self._command, self._recvstdout, self._recvstderr)
def test_timeout_with_data(self): p = CPopen(["yes"], stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE) try: with self.assertRaises(cmdutils.TimeoutExpired): for _ in cmdutils.receive(p, 0.5): pass finally: p.kill() p.wait()
def test_read(self): p = CPopen([ "dd", "if=/dev/zero", "bs=%d" % self.BUFSIZE, "count=%d" % self.COUNT ], stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE) start = monotonic_time() received = 0 for src, data in cmdutils.receive(p, bufsize=self.BUFSIZE): if src == cmdutils.OUT: received += len(data) elapsed = monotonic_time() - start received_gb = received / float(1024**3) print("%.2fg in %.2f seconds (%.2fg/s)" % (received_gb, elapsed, received_gb / elapsed), end=" ") self.assertEqual(received, self.COUNT * self.BUFSIZE) self.assertEqual(p.returncode, 0)
def test_fds_closed(self): cmd = [ "python", "-c", "import os, time; os.close(1); os.close(2); time.sleep(1)" ] p = CPopen(cmd, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE) try: with self.assertRaises(cmdutils.TimeoutExpired): for _ in cmdutils.receive(p, 0.5): pass finally: p.kill() p.wait()
class QemuImgOperation(object): REGEXPR = re.compile(r'\s*\(([\d.]+)/100%\)\s*') def __init__(self, cmd, cwd=None): self._lock = threading.Lock() self._aborted = False self._progress = 0.0 self._stdout = bytearray() self._stderr = bytearray() self.cmd = wrap_command( cmd, with_nice=utils.NICENESS.HIGH, with_ioclass=utils.IOCLASS.IDLE) _log.debug(cmdutils.command_log_line(self.cmd, cwd=cwd)) self._command = CPopen(self.cmd, cwd=cwd, deathSignal=signal.SIGKILL) self._stream = utils.CommandStream( self._command, self._recvstdout, self._recvstderr) def _recvstderr(self, buffer): self._stderr += buffer def _recvstdout(self, buffer): self._stdout += buffer # Checking the presence of '\r' before splitting will prevent # generating the array when it's not needed. try: idx = self._stdout.rindex('\r') except ValueError: return # qemu-img updates progress by printing \r (0.00/100%) to standard out. # The output could end with a partial progress so we must discard # everything after the last \r and then try to parse a progress record. valid_progress = self._stdout[:idx] last_progress = valid_progress.rsplit('\r', 1)[-1] # No need to keep old progress information around del self._stdout[:idx + 1] m = self.REGEXPR.match(last_progress) if m is None: raise ValueError('Unable to parse: "%r"' % last_progress) self._progress = float(m.group(1)) @property def progress(self): """ Returns operation progress as float between 0 and 100. This method is threadsafe and may be called from any thread. """ return self._progress @property def error(self): return str(self._stderr) @property def finished(self): return self._command.poll() is not None def poll(self, timeout=None): self._stream.receive(timeout=timeout) if not self._stream.closed: return self._command.wait() if self._aborted: raise exception.ActionStopped() cmdutils.retcode_log_line(self._command.returncode, self.error) if self._command.returncode != 0: raise QImgError(self.cmd, self._command.returncode, "", self.error) def wait_for_completion(self): timeout = config.getint("irs", "progress_interval") while not self.finished: self.poll(timeout) _log.debug('qemu-img operation progress: %s%%', self.progress) def abort(self): """ Aborts running operation by sending a termination signal to the underlying qemu-img process. Note: this is asynchronous operation, returning before the process was terminated. You must use wait_for_completion to wait for the underlying qemu-img process. This method is threadsafe and may be called from any thread. """ with self._lock: if self._command is None: return if self._command.poll() is None: self._aborted = True self._command.terminate() def close(self): with self._lock: self._stream.close() self._command = None
class QemuImgOperation(object): REGEXPR = re.compile(r'\s*\(([\d.]+)/100%\)\s*') def __init__(self, cmd, cwd=None): self._lock = threading.Lock() self._aborted = False self._progress = 0.0 self._stdout = bytearray() self._stderr = bytearray() self.cmd = wrap_command(cmd, with_nice=utils.NICENESS.HIGH, with_ioclass=utils.IOCLASS.IDLE) _log.debug(cmdutils.command_log_line(self.cmd, cwd=cwd)) self._command = CPopen(self.cmd, cwd=cwd, deathSignal=signal.SIGKILL) self._stream = utils.CommandStream(self._command, self._recvstdout, self._recvstderr) def _recvstderr(self, buffer): self._stderr += buffer def _recvstdout(self, buffer): self._stdout += buffer # Checking the presence of '\r' before splitting will prevent # generating the array when it's not needed. try: idx = self._stdout.rindex('\r') except ValueError: return # qemu-img updates progress by printing \r (0.00/100%) to standard out. # The output could end with a partial progress so we must discard # everything after the last \r and then try to parse a progress record. valid_progress = self._stdout[:idx] last_progress = valid_progress.rsplit('\r', 1)[-1] # No need to keep old progress information around del self._stdout[:idx + 1] m = self.REGEXPR.match(last_progress) if m is None: raise ValueError('Unable to parse: "%r"' % last_progress) self._progress = float(m.group(1)) @property def progress(self): """ Returns operation progress as float between 0 and 100. This method is threadsafe and may be called from any thread. """ return self._progress @property def error(self): return str(self._stderr) @property def finished(self): return self._command.poll() is not None def poll(self, timeout=None): self._stream.receive(timeout=timeout) if not self._stream.closed: return self._command.wait() if self._aborted: raise exception.ActionStopped() cmdutils.retcode_log_line(self._command.returncode, self.error) if self._command.returncode != 0: raise QImgError(self.cmd, self._command.returncode, "", self.error) def wait_for_completion(self): timeout = config.getint("irs", "progress_interval") while not self.finished: self.poll(timeout) _log.debug('qemu-img operation progress: %s%%', self.progress) def abort(self): """ Aborts running operation by sending a termination signal to the underlying qemu-img process. Note: this is asynchronous operation, returning before the process was terminated. You must use wait_for_completion to wait for the underlying qemu-img process. This method is threadsafe and may be called from any thread. """ with self._lock: if self._command is None: return if self._command.poll() is None: self._aborted = True self._command.terminate() def close(self): with self._lock: self._stream.close() self._command = None
class DirectioChecker(object): """ Check path availability using direct I/O. DirectioChecker is created with a complete callback. Each time a check cycle is completed, the complete callback will be invoked with a CheckResult instance. CheckResult provides a delay() method returning the read delay in seconds. If the check failed, the delay() method will raise the appropriate exception that can be reported to engine. Note that the complete callback must not block as it will block the entire event loop thread. The checker runs exactly every interval seconds. If a check did not complete before the next check is scheduled, the next check will be delayed to the next interval. Checker is not thread safe. Use EventLoop.call_soon_threadsafe() to start or stop a checker. The only thread safe method is wait(). Usage:: # Start the event loop thread loop = asyncevent.EventLoop() concurrent.thread(loop.run_forever).start() # The complete callback def complete(result): try: check_delay = result.delay() except Exception as e: check_error = e check_time = time.time() # Start a checker on the event loop thread checker = DirectioChecker(loop, path, complete) loop.call_soon_threadsafe(checker.start) ... # Stop a checker from another thread loop.call_soon_threadsafe(checker.stop) # If needed, wait until a checker actually stopped. checker.wait(30) """ log = logging.getLogger("storage.directiochecker") def __init__(self, loop, path, complete, interval=10.0): self._loop = loop self._path = path self._complete = complete self._interval = interval self._looper = asyncutils.LoopingCall(loop, self._check) self._check_time = None self._proc = None self._reader = None self._reaper = None self._err = None self._state = IDLE self._stopped = threading.Event() def start(self): """ Start the checker. Raises RuntimeError if the checker is running. """ if self._state is not IDLE: raise RuntimeError("Checker is %s", self._state) self._state = RUNNING _log.debug("Checker %r started", self._path) self._stopped.clear() self._looper.start(self._interval) def stop(self): """ Stop the checker. If the checker is waiting for the next check, the next check will be cancelled. If the checker is in the middle of a check, it will stop when the check completes. If the checker is not running, the call is ignored silently. """ if self._state is not RUNNING: return _log.debug("Checker %r stopping", self._path) self._state = STOPPING self._looper.stop() if self._proc is None: self._stop_completed() def wait(self, timeout=None): """ Wait until a checker has stopped. Returns True if checker has stopped, False if timeout expired. """ return self._stopped.wait(timeout) def is_running(self): return self._state is not IDLE def _stop_completed(self): self._state = IDLE _log.debug("Checker %r stopped", self._path) self._stopped.set() def _check(self): """ Called when starting the checker, and then every interval seconds until the checker is stopped. """ assert self._state is RUNNING if self._proc: _log.warning("Checker %r is blocked for %.2f seconds", self._path, self._loop.time() - self._check_time) return self._check_time = self._loop.time() _log.debug("START check %r (delay=%.2f)", self._path, self._check_time - self._looper.deadline) try: self._start_process() except Exception as e: self._err = "Error starting process: %s" % e self._check_completed(EXEC_ERROR) def _start_process(self): """ Starts a dd process performing direct I/O to path, reading the process stderr. When stderr has closed, _read_completed will be called. """ cmd = [ constants.EXT_DD, "if=%s" % self._path, "of=/dev/null", "bs=4096", "count=1", "iflag=direct" ] cmd = cmdutils.wrap_command(cmd) self._proc = CPopen(cmd, stdin=None, stdout=None, stderr=subprocess.PIPE) self._reader = self._loop.create_dispatcher(asyncevent.BufferedReader, self._proc.stderr, self._read_completed) def _read_completed(self, data): """ Called when dd process has closed stderr. At this point the process may be still running. """ assert self._state is not IDLE self._reader = None self._err = data rc = self._proc.poll() # About 95% of runs, the process has terminated at this point. If not, # start the reaper to wait for it. if rc is None: self._reaper = asyncevent.Reaper(self._loop, self._proc, self._check_completed) return self._check_completed(rc) def _check_completed(self, rc): """ Called when the dd process has exited with exit code rc. """ assert self._state is not IDLE now = self._loop.time() elapsed = now - self._check_time _log.debug("FINISH check %r (rc=%s, elapsed=%.02f)", self._path, rc, elapsed) self._reaper = None self._proc = None if self._state is STOPPING: self._stop_completed() return result = CheckResult(self._path, rc, self._err, self._check_time, elapsed) self._complete(result) def __repr__(self): info = [self.__class__.__name__, self._path, self._state] if self._state is RUNNING: info.append("next_check=%.2f" % self._looper.deadline) return "<%s at 0x%x>" % (" ".join(info), id(self))
class DirectioChecker(object): """ Check path availability using direct I/O. DirectioChecker is created with a complete callback. Each time a check cycle is completed, the complete callback will be invoked with a CheckResult instance. CheckResult provides a delay() method returning the read delay in seconds. If the check failed, the delay() method will raise the appropriate exception that can be reported to engine. Note that the complete callback must not block as it will block the entire event loop thread. The checker runs exactly every interval seconds. If a check did not complete before the next check is scheduled, the next check will be delayed to the next interval. Checker is not thread safe. Use EventLoop.call_soon_threadsafe() to start or stop a checker. The only thread safe method is wait(). Usage:: # Start the event loop thread loop = asyncevent.EventLoop() concurrent.thread(loop.run_forever).start() # The complete callback def complete(result): try: check_delay = result.delay() except Exception as e: check_error = e check_time = time.time() # Start a checker on the event loop thread checker = DirectioChecker(loop, path, complete) loop.call_soon_threadsafe(checker.start) ... # Stop a checker from another thread loop.call_soon_threadsafe(checker.stop) # If needed, wait until a checker actually stopped. checker.wait(30) """ log = logging.getLogger("storage.directiochecker") def __init__(self, loop, path, complete, interval=10.0): self._loop = loop self._path = path self._complete = complete self._interval = interval self._looper = asyncutils.LoopingCall(loop, self._check) self._check_time = None self._proc = None self._reader = None self._reaper = None self._err = None self._state = IDLE self._stopped = threading.Event() def start(self): """ Start the checker. Raises RuntimeError if the checker is running. """ if self._state is not IDLE: raise RuntimeError("Checker is %s", self._state) self._state = RUNNING _log.debug("Checker %r started", self._path) self._stopped.clear() self._looper.start(self._interval) def stop(self): """ Stop the checker. If the checker is waiting for the next check, the next check will be cancelled. If the checker is in the middle of a check, it will stop when the check completes. If the checker is not running, the call is ignored silently. """ if self._state is not RUNNING: return _log.debug("Checker %r stopping", self._path) self._state = STOPPING self._looper.stop() if self._proc is None: self._stop_completed() def wait(self, timeout=None): """ Wait until a checker has stopped. Returns True if checker has stopped, False if timeout expired. """ return self._stopped.wait(timeout) def is_running(self): return self._state is not IDLE def _stop_completed(self): self._state = IDLE _log.debug("Checker %r stopped", self._path) self._stopped.set() def _check(self): """ Called when starting the checker, and then every interval seconds until the checker is stopped. """ assert self._state is RUNNING if self._proc: _log.warning("Checker %r is blocked for %.2f seconds", self._path, self._loop.time() - self._check_time) return self._check_time = self._loop.time() _log.debug("START check %r (delay=%.2f)", self._path, self._check_time - self._looper.deadline) try: self._start_process() except Exception as e: self._err = "Error starting process: %s" % e self._check_completed(EXEC_ERROR) def _start_process(self): """ Starts a dd process performing direct I/O to path, reading the process stderr. When stderr has closed, _read_completed will be called. """ cmd = [constants.EXT_DD, "if=%s" % self._path, "of=/dev/null", "bs=4096", "count=1", "iflag=direct"] cmd = cmdutils.wrap_command(cmd) self._proc = CPopen(cmd, stdin=None, stdout=None, stderr=subprocess.PIPE) self._reader = self._loop.create_dispatcher( asyncevent.BufferedReader, self._proc.stderr, self._read_completed) def _read_completed(self, data): """ Called when dd process has closed stderr. At this point the process may be still running. """ assert self._state is not IDLE self._reader = None self._err = data rc = self._proc.poll() # About 95% of runs, the process has terminated at this point. If not, # start the reaper to wait for it. if rc is None: self._reaper = asyncevent.Reaper(self._loop, self._proc, self._check_completed) return self._check_completed(rc) def _check_completed(self, rc): """ Called when the dd process has exited with exit code rc. """ assert self._state is not IDLE now = self._loop.time() elapsed = now - self._check_time _log.debug("FINISH check %r (rc=%s, elapsed=%.02f)", self._path, rc, elapsed) self._reaper = None self._proc = None if self._state is STOPPING: self._stop_completed() return result = CheckResult(self._path, rc, self._err, self._check_time, elapsed) self._complete(result) def __repr__(self): info = [self.__class__.__name__, self._path, self._state] if self._state is RUNNING: info.append("next_check=%.2f" % self._looper.deadline) return "<%s at 0x%x>" % (" ".join(info), id(self))