def test_write(self): p = Popen(["dd", "of=/dev/null", "bs=%d" % self.BUFSIZE], stdin=subprocess.PIPE, stdout=None, stderr=subprocess.PIPE) start = monotonic_time() total = self.COUNT * self.BUFSIZE sent = 0 with io.open("/dev/zero", "rb") as f: while sent < total: n = min(total - sent, self.BUFSIZE) data = f.read(n) if not data: raise RuntimeError("/dev/zero closed?!") p.stdin.write(data) sent += len(data) p.stdin.flush() p.stdin.close() for _, data in cmdutils.receive(p, 10): pass elapsed = monotonic_time() - start sent_gb = sent / float(1024**3) print("%.2fg in %.2f seconds (%.2fg/s)" % (sent_gb, elapsed, sent_gb / elapsed), end=" ") self.assertEqual(p.returncode, 0)
def wait(self, timeout=None): """ Wait for all processes to terminate. If timeout is provided, it is set as the upper limit for the wait period. Note that the timeout granularity is 1 sec, therefore, the actual applied timeout may drift by 1 sec. """ if timeout is not None: deadline = monotonic_time() + timeout # NOTE: we do not want to use Popen's auto-killing timeout if self.returncode is not None: return True while monotonic_time() < deadline: time.sleep(1) if self.returncode is not None: return True else: for p in self._procs: p.wait() return True return False
def test_write(self): p = Popen(["dd", "of=/dev/null", "bs=%d" % self.BUFSIZE], stdin=subprocess.PIPE, stdout=None, stderr=subprocess.PIPE) start = monotonic_time() total = self.COUNT * self.BUFSIZE sent = 0 with io.open("/dev/zero", "rb") as f: while sent < total: n = min(total - sent, self.BUFSIZE) data = f.read(n) if not data: raise RuntimeError("/dev/zero closed?!") p.stdin.write(data) sent += len(data) p.stdin.flush() p.stdin.close() for _, data in cmdutils.receive(p, 10): pass elapsed = monotonic_time() - start sent_gb = sent / float(1024**3) print("%.2fg in %.2f seconds (%.2fg/s)" % (sent_gb, elapsed, sent_gb / elapsed), end=" ") self.assertEqual(p.returncode, 0)
def assertMaxDuration(self, maxtime): start = time.monotonic_time() try: yield finally: elapsed = time.monotonic_time() - start if maxtime < elapsed: self.fail("Operation was too slow %.2fs > %.2fs" % (elapsed, maxtime))
def stopwatch(message, level=logging.DEBUG, log=logging.getLogger('vds.stopwatch')): if log.isEnabledFor(level): start = vdsm_time.monotonic_time() yield elapsed = vdsm_time.monotonic_time() - start log.log(level, "%s: %.2f seconds", message, elapsed) else: yield
def stopwatch(message, level=logging.DEBUG, log=logging.getLogger('vds.stopwatch')): if log.isEnabledFor(level): start = vdsm_time.monotonic_time() yield elapsed = vdsm_time.monotonic_time() - start log.log(level, "%s: %.2f seconds", message, elapsed) else: yield
def _work(): invocations[0] += 1 invocations[1] = monotonic_time() if invocations[0] == BLOCK_AT: # must be > (PERIOD * TIMES) ~= forever time.sleep(10 * PERIOD * TIMES) executions[0] += 1 executions[1] = monotonic_time() if invocations[0] == TIMES: done.set()
def _work(): invocations[0] += 1 invocations[1] = monotonic_time() if invocations[0] == BLOCK_AT: # must be > (PERIOD * TIMES) ~= forever time.sleep(10 * PERIOD * TIMES) executions[0] += 1 executions[1] = monotonic_time() if invocations[0] == TIMES: done.set()
def test_timeout_not_triggered(self): time_start = monotonic_time() with monitor.object_monitor(timeout=self.TIMEOUT) as mon: with dummy_device(): pass for event in mon: break assert (monotonic_time() - time_start) <= self.TIMEOUT assert mon.is_stopped()
def _wait_for_link_up(devname, timeout): """ Waiting for link-up, no longer than the specified timeout period. The time waited (in seconds) is returned. """ if timeout > 0 and not iface_obj(devname).is_oper_up(): time_start = monotonic_time() with waitfor.waitfor_linkup(devname, timeout=timeout): pass return monotonic_time() - time_start return 0
def _wait_for_link_up(devname, timeout): """ Waiting for link-up, no longer than the specified timeout period. The time waited (in seconds) is returned. """ if timeout > 0 and not iface_obj(devname).is_oper_up(): time_start = monotonic_time() with waitfor.waitfor_linkup(devname, timeout=timeout): pass return monotonic_time() - time_start return 0
def _serveRequest(self, ctx, req): start_time = monotonic_time() response = self._handle_request(req, ctx) error = getattr(response, "error", None) if error is None: response_log = "succeeded" else: response_log = "failed (error %s)" % (error.code,) self.log.info("RPC call %s %s in %.2f seconds", req.method, response_log, monotonic_time() - start_time) if response is not None: ctx.requestDone(response)
def _serveRequest(self, ctx, req): start_time = monotonic_time() response = self._handle_request(req, ctx) error = getattr(response, "error", None) if error is None: response_log = "succeeded" else: response_log = "failed (error %s)" % (error.code,) self.log.info("RPC call %s %s in %.2f seconds", req.method, response_log, monotonic_time() - start_time) if response is not None: ctx.requestDone(response)
def test_timeout_not_triggered(self): time_start = monotonic_time() with monitor.Monitor(timeout=self.TIMEOUT) as mon: dummy = Dummy() dummy.create() dummy.remove() for event in mon: break assert (monotonic_time() - time_start) <= self.TIMEOUT assert mon.is_stopped()
def test_timeout_not_triggered(self): time_start = monotonic_time() with monitor.Monitor(timeout=self.TIMEOUT) as mon: dummy = Dummy() dummy.create() dummy.remove() for event in mon: break self.assertTrue((monotonic_time() - time_start) <= self.TIMEOUT) self.assertTrue(mon.is_stopped())
def _wait_for_socket(sock, timeout): start = monotonic_time() elapsed = 0.0 while elapsed < timeout: if os.path.exists(sock): log.debug("Waited for socket %.3f seconds", elapsed) return True # Socket is usually availble after 20 milliseconds. time.sleep(0.02) elapsed = monotonic_time() - start return False
def _wait_for_socket(sock, timeout): start = monotonic_time() elapsed = 0.0 while elapsed < timeout: if os.path.exists(sock): log.debug("Waited for socket %.3f seconds", elapsed) return True # Socket is usually availble after 20 milliseconds. time.sleep(0.02) elapsed = monotonic_time() - start return False
def _serveRequest(self, ctx, req): start_time = monotonic_time() response = self._handle_request(req, ctx) duration = monotonic_time() - start_time error = getattr(response, "error", None) if error is not None: self.log.info("RPC call %s failed (error %s) in %.2f seconds", req.method, error.code, duration) elif duration > _SLOW_CALL_THRESHOLD: self.log.info( "RPC call %s took more than %.2f seconds " "to succeed: %.2f", req.method, _SLOW_CALL_THRESHOLD, duration) if response is not None: ctx.requestDone(response)
def _wait(p, deadline=None): """ Wait until process terminates, or if deadline is specified, `common.time.monotonic_time` exceeds deadline. Raises: `cmdutils.TimeoutExpired` if process did not terminate within deadline. """ log.debug("Waiting for process (pid=%d)", p.pid) if deadline is None: p.wait() else: # We need to wait until deadline, Popen.wait() does not support # timeout. Python 3 is using busy wait in this case with a timeout of # 0.0005 seocnds. In vdsm we cannot allow such busy loops, and we don't # have a need to support very exact wait time. This loop uses # exponential backoff to detect termination quickly if the process # terminates quickly, and avoid busy loop if the process is stuck for # long time. Timeout will double from 0.0078125 to 1.0, and then # continue at 1.0 seconds, until deadline is reached. timeout = 1.0 / 256 while p.poll() is None: remaining = deadline - monotonic_time() if remaining <= 0: raise TimeoutExpired(p.pid) time.sleep(min(timeout, remaining)) if timeout < 1.0: timeout *= 2 log.debug("Process (pid=%d) terminated", p.pid)
def tick(self): now = monotonic_time() result = self._result(now) self._counter = (self._counter + 1) % self._interval if result: self._last_time = now return result
def _scan(self): with closing(select.epoll()) as epoll: with _monitoring_socket(self._queue, self._groups, epoll) as sock: with _pipetrick(epoll) as self._pipetrick: self._scanning_started.set() while True: if self._timeout: timeout = self._end_time - monotonic_time() # timeout expired if timeout <= 0: self._scanning_stopped.set() self._queue.put(_TIMEOUT_FLAG) break else: timeout = -1 events = uninterruptible_poll(epoll.poll, timeout=timeout) # poll timeouted if len(events) == 0: self._scanning_stopped.set() self._queue.put(_TIMEOUT_FLAG) break # stopped by pipetrick elif (self._pipetrick[0], select.POLLIN) in events: uninterruptible(os.read, self._pipetrick[0], 1) self._queue.put(_STOP_FLAG) break libnl.nl_recvmsgs_default(sock)
def _poller(self): for vm_id, vm_obj in six.viewitems(self._cif.getVMs()): now = monotonic_time() # Ensure we know guest agent's capabilities self._on_boot(vm_obj, now) if not self._runnable_on_vm(vm_obj): self.log.debug( 'Skipping vm-id=%s in this run and not querying QEMU-GA', vm_id) continue caps = self.get_caps(vm_id) # Update capabilities -- if we just got the caps above then this # will fall through if (now - self.last_check(vm_id, VDSM_GUEST_INFO) >= _QEMU_COMMAND_PERIODS[VDSM_GUEST_INFO]): self._qga_capability_check(vm_obj, now) caps = self.get_caps(vm_id) if caps['version'] is None: # If we don't know about the agent there is no reason to # proceed any further continue # Update guest info types = 0 have_disk_mapping = False for command in _QEMU_COMMANDS.keys(): if _QEMU_COMMANDS[command] not in caps['commands']: continue if now - self.last_check(vm_id, command) \ < _QEMU_COMMAND_PERIODS[command]: continue # Commands that have special handling go here if command == VIR_DOMAIN_GUEST_INFO_FILESYSTEM and \ _QEMU_DISKS_COMMAND in caps['commands']: disk_info = self._qga_call_get_disks(vm_obj) if len(disk_info.get('diskMapping', {})) > 0: self.update_guest_info(vm_id, disk_info) have_disk_mapping = True if command == VDSM_GUEST_INFO_DRIVERS: self.update_guest_info(vm_id, self._qga_call_get_devices(vm_obj)) self.set_last_check(vm_id, command, now) elif command == VDSM_GUEST_INFO_NETWORK: self.update_guest_info( vm_id, self._qga_call_network_interfaces(vm_obj)) self.set_last_check(vm_id, command, now) # Commands handled by libvirt guestInfo() go here else: types |= command info = self._libvirt_get_guest_info(vm_obj, types, not have_disk_mapping) if info is None: self.log.debug('Failed to query QEMU-GA for vm=%s', vm_id) self.set_failure(vm_id) else: self.update_guest_info(vm_id, info) for command in _QEMU_COMMANDS.keys(): if types & command: self.set_last_check(vm_id, command, now) # Remove stale info self._cleanup()
def _runnable_on_vm(self, vm): last_failure = self.last_failure(vm.id) if (monotonic_time() - last_failure) < _THROTTLING_INTERVAL: return False if not vm.isDomainRunning(): return False return True
def _scan(self): with closing(select.epoll()) as epoll: with _monitoring_socket(self._queue, self._groups, epoll) as sock: with _pipetrick(epoll) as self._pipetrick: self._scanning_started.set() while True: if self._timeout: timeout = self._end_time - monotonic_time() # timeout expired if timeout <= 0: self._scanning_stopped.set() self._queue.put(_TIMEOUT_FLAG) break else: timeout = -1 events = uninterruptible_poll(epoll.poll, timeout=timeout) # poll timeouted if len(events) == 0: self._scanning_stopped.set() self._queue.put(_TIMEOUT_FLAG) break # stopped by pipetrick elif (self._pipetrick[0], select.POLLIN) in events: uninterruptible(os.read, self._pipetrick[0], 1) self._queue.put(_STOP_FLAG) break libnl.nl_recvmsgs_default(sock)
def tick(self): now = monotonic_time() result = self._result(now) self._counter = (self._counter + 1) % self._interval if result: self._last_time = now return result
def _attempt_log_stats(self): self._counter += 1 if monotonic_time() > self._next_report: self.log.info('%s requests processed during %s seconds', self._counter, self._timeout) self._next_report += self._timeout self._counter = 0
def _wait(p, deadline=None): """ Wait until process terminates, or if deadline is specified, `common.time.monotonic_time` exceeds deadline. Raises: `cmdutils.TimeoutExpired` if process did not terminate within deadline. """ log.debug("Waiting for process (pid=%d)", p.pid) if deadline is None: p.wait() else: # We need to wait until deadline, Popen.wait() does not support # timeout. Python 3 is using busy wait in this case with a timeout of # 0.0005 seocnds. In vdsm we cannot allow such busy loops, and we don't # have a need to support very exact wait time. This loop uses # exponential backoff to detect termination quickly if the process # terminates quickly, and avoid busy loop if the process is stuck for # long time. Timeout will double from 0.0078125 to 1.0, and then # continue at 1.0 seconds, until deadline is reached. timeout = 1.0 / 256 while p.poll() is None: remaining = deadline - monotonic_time() if remaining <= 0: raise TimeoutExpired(p.pid) time.sleep(min(timeout, remaining)) if timeout < 1.0: timeout *= 2 log.debug("Process (pid=%d) terminated", p.pid)
def _attempt_log_stats(self): self._counter += 1 if monotonic_time() > self._next_report: self.log.info('%s requests processed during %s seconds', self._counter, self._timeout) self._next_report += self._timeout self._counter = 0
def runnable(self): if not self._vm.isDomainReadyForCommands(): return False last_failure = self._qga_poller.last_failure(self._vm.id) if last_failure is not None and \ (monotonic_time() - last_failure) < _THROTTLING_INTERVAL: return False return True
def runnable(self): if not self._vm.isDomainReadyForCommands(): return False last_failure = self._qga_poller.last_failure(self._vm.id) if last_failure is not None and \ (monotonic_time() - last_failure) < _THROTTLING_INTERVAL: return False return True
def test_read(self): p = Popen(["dd", "if=/dev/zero", "bs=%d" % self.BUFSIZE, "count=%d" % self.COUNT], stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE) start = monotonic_time() received = 0 for src, data in cmdutils.receive(p, bufsize=self.BUFSIZE): if src == cmdutils.OUT: received += len(data) elapsed = monotonic_time() - start received_gb = received / float(1024**3) print("%.2fg in %.2f seconds (%.2fg/s)" % (received_gb, elapsed, received_gb / elapsed), end=" ") self.assertEqual(received, self.COUNT * self.BUFSIZE) self.assertEqual(p.returncode, 0)
def __init__(self, bridge, timeout, cif, threadFactory=None): self._bridge = bridge self._cif = cif self._workQueue = queue.Queue() self._threadFactory = threadFactory self._timeout = timeout self._next_report = monotonic_time() + self._timeout self._counter = 0
def receive(self, timeout=None): """ Receiving data from the command can raise OSError exceptions as described in read(2). """ if timeout is None: poll_remaining = -1 else: endtime = vdsm_time.monotonic_time() + timeout while not self.closed: if timeout is not None: poll_remaining = endtime - vdsm_time.monotonic_time() if poll_remaining <= 0: break self._poll_timeout(poll_remaining)
def _wait_for_for_all_devices_up(links): timeout = monotonic_time() + _ALL_DEVICES_UP_TIMEOUT down_links = _get_links_with_state_down(links) # TODO: use netlink monitor here might be more elegant (not available in # TODO: 3.5) while down_links and monotonic_time() < timeout: logging.debug("waiting for %s to be up.", down_links) time.sleep(1) down_links = _get_links_with_state_down(links) if down_links: logging.warning("Not all devices are up. VDSM might restore them " "although they were not changed since they were " "persisted.") else: logging.debug("All devices are up.")
def receive(self, timeout=None): """ Receiving data from the command can raise OSError exceptions as described in read(2). """ if timeout is None: poll_remaining = -1 else: endtime = vdsm_time.monotonic_time() + timeout while not self.closed: if timeout is not None: poll_remaining = endtime - vdsm_time.monotonic_time() if poll_remaining <= 0: break self._poll_timeout(poll_remaining)
def __init__(self, bridge, timeout, cif, threadFactory=None): self._bridge = bridge self._cif = cif self._workQueue = queue.Queue() self._threadFactory = threadFactory self._timeout = timeout self._next_report = monotonic_time() + self._timeout self._counter = 0
def _wait_for_for_all_devices_up(links): timeout = monotonic_time() + _ALL_DEVICES_UP_TIMEOUT down_links = _get_links_with_state_down(links) # TODO: use netlink monitor here might be more elegant (not available in # TODO: 3.5) while down_links and monotonic_time() < timeout: logging.debug("waiting for %s to be up.", down_links) time.sleep(1) down_links = _get_links_with_state_down(links) if down_links: logging.warning("Not all devices are up. VDSM might restore them " "although they were not changed since they were " "persisted.") else: logging.debug("All devices are up.")
def retry(func, expectedException=Exception, tries=None, timeout=None, sleep=1, stopCallback=None): """ Retry a function. Wraps the retry logic so you don't have to implement it each time you need it. :param func: The callable to run. :param expectedException: The exception you expect to receive when the function fails. :param tries: The number of times to try. None\0,-1 means infinite. :param timeout: The time you want to spend waiting. This **WILL NOT** stop the method. It will just not run it if it ended after the timeout. :param sleep: Time to sleep between calls in seconds. :param stopCallback: A function that takes no parameters and causes the method to stop retrying when it returns with a positive value. """ if tries in [0, None]: tries = -1 if timeout in [0, None]: timeout = -1 startTime = vdsm_time.monotonic_time() while True: tries -= 1 try: return func() except expectedException: if tries == 0: raise if (timeout > 0) and ( (vdsm_time.monotonic_time() - startTime) > timeout): raise if stopCallback is not None and stopCallback(): raise time.sleep(sleep)
def _wait_for_state(self, state, deadline): while self._state != state: if deadline is not None: now = time.monotonic_time() if now >= deadline: raise Timeout("Timeout waiting for barrier") self._cond.wait(deadline - now) else: self._cond.wait()
def check_estimate(self, filename, compat): start = time.monotonic_time() estimate = qcow2.estimate_size(filename) estimate_time = time.monotonic_time() - start start = time.monotonic_time() actual = converted_size(filename, compat) convert_time = time.monotonic_time() - start original_size = os.stat(filename).st_size error_pct = 100 * float(estimate - actual) / original_size print('estimate=%d, ' 'actual=%s, ' 'error_pct=%.2f%%, ' 'estimate_time=%.2f, ' 'convert_time=%.2f' % (estimate, actual, error_pct, estimate_time, convert_time), end=" ") assert estimate >= actual assert error_pct <= 0.1, error_pct
def _runnable_on_vm(self, vm): last_failure = self.last_failure(vm.id) if (monotonic_time() - last_failure) < _THROTTLING_INTERVAL: return False if not vm.isDomainRunning(): return False if self._channel_state[vm.id] != CHANNEL_CONNECTED: return False return True
def _wait_for_state(self, state, deadline): while self._state != state: if deadline is not None: now = time.monotonic_time() if now >= deadline: raise Timeout("Timeout waiting for barrier") self._cond.wait(deadline - now) else: self._cond.wait()
def check_estimate(self, filename, compat): start = time.monotonic_time() estimate = qcow2.estimate_size(filename) estimate_time = time.monotonic_time() - start start = time.monotonic_time() actual = converted_size(filename, compat) convert_time = time.monotonic_time() - start original_size = os.stat(filename).st_size error_pct = 100 * float(estimate - actual) / original_size print('estimate=%d, ' 'actual=%s, ' 'error_pct=%.2f%%, ' 'estimate_time=%.2f, ' 'convert_time=%.2f' % (estimate, actual, error_pct, estimate_time, convert_time), end=" ") self.assertGreaterEqual(estimate, actual) self.assertGreaterEqual(0.1, error_pct)
def safe_poll(mp_connection, timeout): """ This is a workaround until we get the PEP-475 fix for EINTR. It ensures that a multiprocessing.connection.poll() will not return before the timeout due to an interruption. Returns True if there is any data to read from the pipe or if the pipe was closed. Returns False if the timeout expired. """ deadline = time.monotonic_time() + timeout remaining = timeout while not mp_connection.poll(remaining): remaining = deadline - time.monotonic_time() if remaining <= 0: return False return True
def safe_poll(mp_connection, timeout): """ This is a workaround until we get the PEP-475 fix for EINTR. It ensures that a multiprocessing.connection.poll() will not return before the timeout due to an interruption. Returns True if there is any data to read from the pipe or if the pipe was closed. Returns False if the timeout expired. """ deadline = time.monotonic_time() + timeout remaining = timeout while not mp_connection.poll(remaining): remaining = deadline - time.monotonic_time() if remaining <= 0: return False return True
def test_asyncproc_read(self): p = commands.execCmd(["dd", "if=/dev/zero", "bs=%d" % self.BUFSIZE, "count=%d" % self.COUNT], sync=False, raw=True) start = monotonic_time() p.blocking = True received = 0 while True: data = p.stdout.read(self.BUFSIZE) if not data: break received += len(data) p.wait() elapsed = monotonic_time() - start received_gb = received / float(1024**3) print("%.2fg in %.2f seconds (%.2fg/s)" % (received_gb, elapsed, received_gb / elapsed), end=" ") self.assertEqual(received, self.COUNT * self.BUFSIZE) self.assertEqual(p.returncode, 0)
def retry(func, expectedException=Exception, tries=None, timeout=None, sleep=1, stopCallback=None): """ Retry a function. Wraps the retry logic so you don't have to implement it each time you need it. :param func: The callable to run. :param expectedException: The exception you expect to receive when the function fails. :param tries: The number of times to try. None\0,-1 means infinite. :param timeout: The time you want to spend waiting. This **WILL NOT** stop the method. It will just not run it if it ended after the timeout. :param sleep: Time to sleep between calls in seconds. :param stopCallback: A function that takes no parameters and causes the method to stop retrying when it returns with a positive value. """ if tries in [0, None]: tries = -1 if timeout in [0, None]: timeout = -1 startTime = vdsm_time.monotonic_time() while True: tries -= 1 try: return func() except expectedException: if tries == 0: raise if (timeout > 0) and ((vdsm_time.monotonic_time() - startTime) > timeout): raise if stopCallback is not None and stopCallback(): raise time.sleep(sleep)
def wait(self, timeout=None): """ Wait for all processes to terminate. If timeout is provided, it is set as the upper limit for the wait period. """ if timeout is not None: deadline = monotonic_time() + timeout for p in self._procs: try: p.wait(deadline - monotonic_time()) except subprocess.TimeoutExpired: return False else: for p in self._procs: p.wait() return True
def uninterruptible_poll(pollfun, timeout=-1): """ This wrapper is used to handle the interrupt exceptions that might occur during a poll system call. The wrapped function must be defined as poll([timeout]) where the special timeout value 0 is used to return immediately and -1 is used to wait indefinitely. """ # When the timeout < 0 we shouldn't compute a new timeout after an # interruption. endtime = None if timeout < 0 else time.monotonic_time() + timeout while True: try: return pollfun(timeout) except (IOError, select.error) as e: if e.args[0] != errno.EINTR: raise if endtime is not None: timeout = max(0, endtime - time.monotonic_time())
def wait(self, timeout=None): """ Wait for all processes to terminate. If timeout is provided, it is set as the upper limit for the wait period. """ if timeout is not None: deadline = monotonic_time() + timeout for p in self._procs: try: p.wait(deadline - monotonic_time()) except subprocess.TimeoutExpired: return False else: for p in self._procs: p.wait() return True
def test_plain_read(self): p = Popen(["dd", "if=/dev/zero", "bs=%d" % self.BUFSIZE, "count=%d" % self.COUNT], stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE) start = monotonic_time() received = 0 while True: data = os.read(p.stdout.fileno(), self.BUFSIZE) if not data: break received += len(data) p.wait() elapsed = monotonic_time() - start received_gb = received / float(1024**3) print("%.2fg in %.2f seconds (%.2fg/s)" % (received_gb, elapsed, received_gb / elapsed), end=" ") self.assertEqual(received, self.COUNT * self.BUFSIZE) self.assertEqual(p.returncode, 0)
def time(self): """ Return the time according to the event loop's clock. This is a float expressed in seconds since an epoch, but the epoch, precision, accuracy and drift are unspecified and may differ per event loop. Changes from Python 3: - Use Python 2 compatible monotonic time """ return time.monotonic_time()
def test_asyncproc_write(self): p = commands.execCmd(["dd", "of=/dev/null", "bs=%d" % self.COUNT], sync=False, raw=True) start = monotonic_time() total = self.COUNT * self.BUFSIZE sent = 0 with io.open("/dev/zero", "rb") as f: while sent < total: n = min(total - sent, self.BUFSIZE) data = f.read(n) if not data: raise RuntimeError("/dev/zero closed?!") p.stdin.write(data) sent += len(data) p.stdin.flush() p.stdin.close() p.wait() elapsed = monotonic_time() - start sent_gb = sent / float(1024**3) print("%.2fg in %.2f seconds (%.2fg/s)" % (sent_gb, elapsed, sent_gb / elapsed), end=" ") self.assertEqual(p.returncode, 0)
def __init__( self, sslctx, handshake_finished_handler, handshake_timeout=SSL_HANDSHAKE_TIMEOUT, ): self._give_up_at = monotonic_time() + handshake_timeout self._has_been_set_up = False self._is_handshaking = True self.want_read = True self.want_write = True self._sslctx = sslctx self._handshake_finished_handler = handshake_finished_handler
def wait(self, timeout=None): if timeout is not None: deadline = monotonic_time() + timeout else: deadline = None for p in self._procs: if deadline is not None: # NOTE: CPopen doesn't support timeout argument. while monotonic_time() < deadline: p.poll() if p.returncode is not None: break time.sleep(1) else: p.wait() if deadline is not None: if deadline < monotonic_time() or self.returncode is None: # Timed out return False return True
def _nic_traffic(vm_obj, nic, start_sample, start_index, end_sample, end_index): """ Return per-nic statistics packed into a dictionary - macAddr - name - speed - state - {rx,tx}Errors - {rx,tx}Dropped - {rx,tx}Rate - {rx,tx} - sampleTime Produce as many statistics as possible, skipping errors. Expect two samplings `start_sample' and `end_sample' which must be data in the format of the libvirt bulk stats. Expects the indexes of the nic whose statistics needs to be produced, for each sampling: `start_index' for `start_sample', `end_index' for `end_sample'. `vm_obj' is the Vm instance to which the nic belongs. `name', `model' and `mac' are the attributes of the said nic. Those three value are reported in the output stats. Return None on error, if any needed data is missing or wrong. Return the `stats' dictionary on success. """ if_stats = nic_info(nic) with _skip_if_missing_stats(vm_obj): if_stats['rxErrors'] = str(end_sample['net.%d.rx.errs' % end_index]) if_stats['rxDropped'] = str(end_sample['net.%d.rx.drop' % end_index]) if_stats['txErrors'] = str(end_sample['net.%d.tx.errs' % end_index]) if_stats['txDropped'] = str(end_sample['net.%d.tx.drop' % end_index]) with _skip_if_missing_stats(vm_obj): if_stats['rx'] = str(end_sample['net.%d.rx.bytes' % end_index]) if_stats['tx'] = str(end_sample['net.%d.tx.bytes' % end_index]) if_stats['sampleTime'] = monotonic_time() return if_stats