def submit_job(self, times, fn, *args, **kwargs): self.sw = timeutils.StopWatch() self.sw.start() self.times = times return [ self.submit(measure_job, fn, *args, **kwargs) for i in six.moves.range(times) ]
def test_pause_resume(self, mock_now): mock_now.side_effect = monotonic_iter() watch = timeutils.StopWatch() watch.start() watch.stop() elapsed = watch.elapsed() self.assertAlmostEqual(elapsed, watch.elapsed()) watch.resume() self.assertNotEqual(elapsed, watch.elapsed())
def _run_until_dead(self, executor, max_dispatches=None): total_dispatched = 0 if max_dispatches is None: # NOTE(TheSriram): if max_dispatches is not set, # then the conductor will run indefinitely, and not # stop after 'n' number of dispatches max_dispatches = -1 dispatch_gen = iter_utils.iter_forever(max_dispatches) is_stopped = self._wait_timeout.is_stopped try: # Don't even do any work in the first place... if max_dispatches == 0: raise StopIteration fresh_period = timeutils.StopWatch( duration=self.REFRESH_PERIODICITY) fresh_period.start() while not is_stopped(): any_dispatched = False if fresh_period.expired(): ensure_fresh = True fresh_period.restart() else: ensure_fresh = False job_it = itertools.takewhile( self._can_claim_more_jobs, self._jobboard.iterjobs(ensure_fresh=ensure_fresh)) for job in job_it: self._log.debug("Trying to claim job: %s", job) try: self._jobboard.claim(job, self._name) except (excp.UnclaimableJob, excp.NotFound): self._log.debug("Job already claimed or" " consumed: %s", job) else: try: fut = executor.submit(self._dispatch_job, job) except RuntimeError: with excutils.save_and_reraise_exception(): self._log.warn("Job dispatch submitting" " failed: %s", job) self._try_finish_job(job, False) else: fut.job = job self._dispatched.add(fut) any_dispatched = True fut.add_done_callback( functools.partial(self._on_job_done, job)) total_dispatched = next(dispatch_gen) if not any_dispatched and not is_stopped(): self._wait_timeout.wait() except StopIteration: # This will be raised from 'dispatch_gen' if it reaches its # max dispatch number (which implies we should do no more work). with excutils.save_and_reraise_exception(): if max_dispatches >= 0 and total_dispatched >= max_dispatches: self._log.info("Maximum dispatch limit of %s reached", max_dispatches)
def pop(self, timeout): with timeutils.StopWatch(timeout) as stop_watcher: with self._lock: while len(self._queue) == 0: if stop_watcher.expired() or not self._started: return None self._pop_wake_condition.wait( stop_watcher.leftover(return_none=True)) return self._queue.pop()
def map_job(self, fn, iterable, **kwargs): self.sw = timeutils.StopWatch() r = [] self.times = 0 self.sw.start() for item in iterable: r.append(self.submit(measure_job, fn, item, **kwargs)) self.times += 1 return r
def take_action(self, parsed_args): pool = BenchmarkPool(parsed_args.workers) LOG.info("Sending measures") if parsed_args.timestamp_end <= parsed_args.timestamp_start: raise ValueError("End timestamp must be after start timestamp") # If batch size is bigger than the number of measures to send, we # reduce it to make sure we send something. if parsed_args.batch > parsed_args.count: parsed_args.batch = parsed_args.count start = int(parsed_args.timestamp_start.strftime("%s")) end = int(parsed_args.timestamp_end.strftime("%s")) count = parsed_args.count if (end - start) < count: raise ValueError("The specified time range is not large enough " "for the number of points") random_values = (random.randint(-2**32, 2**32) for _ in six.moves.range(count)) all_measures = ({ "timestamp": ts, "value": v } for ts, v in six.moves.zip( six.moves.range(start, end, (end - start) // count), random_values)) measures = grouper(all_measures, parsed_args.batch) futures = pool.map_job(functools.partial( self.app.client.metric.add_measures, parsed_args.metric), measures, resource_id=parsed_args.resource_id) _, runtime, stats = pool.wait_job("push", futures) stats['measures per request'] = parsed_args.batch stats['measures push speed'] = ( "%.2f push/s" % (parsed_args.batch * pool.statistics.executed / runtime)) if parsed_args.wait: with timeutils.StopWatch() as sw: while True: status = self.app.client.status.get() remaining = int(status['storage']['summary']['measures']) if remaining == 0: stats['extra wait to process measures'] = ( "%s seconds" % sw.elapsed()) break else: LOG.info("Remaining measures to be processed: %d" % remaining) time.sleep(1) return self.dict2columns(stats)
def run(self): time.sleep(self.workerId) while not self.closeEvent.is_set(): with timeutils.StopWatch() as timer: self.process() elapsedTime = self.interval - timer.elapsed(0) timeout = max(0, elapsedTime) self.closeEvent.wait(timeout) self.closeEvent.set()
def join(self, timeout=None): with timeutils.StopWatch(duration=timeout) as w, self._running_cond: while self._running: self._running_cond.wait(w.leftover(return_none=True)) # Thread.join() does not raise an exception on timeout. It is # the caller's responsibility to check is_alive(). if w.expired(): return
def _receiver(self, item_type, item_name, state): if state == states.PENDING: self._timers[item_type].pop(item_name, None) elif state in STARTING_STATES: self._timers[item_type][item_name] = timeutils.StopWatch().start() elif state in FINISHED_STATES: timer = self._timers[item_type].pop(item_name, None) if timer is not None: timer.stop() self._record_ending(timer, item_type, item_name, state)
def _report_state(self): try: with timeutils.StopWatch() as w: self.state_rpc.report_state(self.context, self.agent_state) LOG.debug("Reporting state took {:1.3g}s".format(w.elapsed())) self.agent_state.pop('start_flag', None) except (oslo_messaging.MessagingTimeout, oslo_messaging.RemoteError, oslo_messaging.MessageDeliveryFailure): LOG.exception(_LE("Failed reporting state!"))
def wait_n(self, n, timeout=1.0): """Wait until at least n notifications have been received, and return them. May return less than n notifications if timeout is reached. """ with timeutils.StopWatch(timeout) as timer: with self._cond: while len(self._notifications) < n and not timer.expired(): self._cond.wait(timer.leftover()) return copy.copy(self._notifications)
def wait(self, timeout=None): with timeutils.StopWatch(timeout) as sw: while True: event = self._event with _eventlet.timeout.Timeout(sw.leftover(return_none=True), False): event.wait() if event is not self._event: continue return self.is_set()
def run(self): if self._coordinator: return self._stop.clear() self._coordinator = LeaderElectionCoordinator() self._coordinator.start() self._coordinator.ensure_group(self.election_key) self._coordinator.join_group() self._coordinator. \ register_on_start_leading_callback(self. callbacks.on_started_leading) # Register internal callback to notify being a leader self._coordinator. \ register_on_start_leading_callback(self.set_leader_callback) while not self._stop.is_set(): with timeutils.StopWatch() as w: LOG.debug("sending heartbeats for leader election") wait_until_next_beat = self._coordinator.send_heartbeat() ran_for = w.elapsed() has_to_sleep_for = wait_until_next_beat - ran_for if has_to_sleep_for < 0: LOG.warning( "Heart beating took too long to execute (it ran for" " %0.2f seconds which is %0.2f seconds longer than" " the next heartbeat idle time). This may cause" " timeouts (in locks, leadership, ...) to" " happen (which will not end well).", ran_for, ran_for - wait_until_next_beat) # Check if coordinator is still a leader if self.leader and not self._coordinator.is_still_leader(): self.on_stopped_leading() self.leader = False return self._coordinator.start_leader_watch() if self.leader: # Adjust time for leader has_to_sleep_for = has_to_sleep_for / 2 LOG.debug( 'resting after leader watch as leader=%(leader)s ' 'for heartbeat timeout of %(timeout)s sec', { 'timeout': has_to_sleep_for, 'leader': self.leader }) self._stop.wait(has_to_sleep_for)
def __init__(self, context, node_id, shared=False, driver_name=None, purpose='unspecified action'): """Create a new TaskManager. Acquire a lock on a node. The lock can be either shared or exclusive. Shared locks may be used for read-only or non-disruptive actions only, and must be considerate to what other threads may be doing on the same node at the same time. :param context: request context :param node_id: ID or UUID of node to lock. :param shared: Boolean indicating whether to take a shared or exclusive lock. Default: False. :param driver_name: The name of the driver to load, if different from the Node's current driver. :param purpose: human-readable purpose to put to debug logs. :raises: DriverNotFound :raises: NodeNotFound :raises: NodeLocked """ self._spawn_method = None self._on_error_method = None self.context = context self._node = None self.node_id = node_id self.shared = shared self.fsm = states.machine.copy() self._purpose = purpose self._debug_timer = timeutils.StopWatch() try: node = objects.Node.get(context, node_id) LOG.debug("Attempting to get %(type)s lock on node %(node)s (for " "%(purpose)s)", {'type': 'shared' if shared else 'exclusive', 'node': node.uuid, 'purpose': purpose}) if not self.shared: self._lock() else: self._debug_timer.restart() self.node = node self.ports = objects.Port.list_by_node_id(context, self.node.id) self.portgroups = objects.Portgroup.list_by_node_id(context, self.node.id) self.driver = driver_factory.build_driver_for_task( self, driver_name=driver_name) except Exception: with excutils.save_and_reraise_exception(): self.release_resources()
def test_send_call_message(self): message = pika_drv_msg.RpcPikaOutgoingMessage( self._pika_engine, self._message, self._context ) expiration = 1 stopwatch = timeutils.StopWatch(duration=expiration).start() result = "it is a result" reply_queue_name = "reply_queue_name" future = futures.Future() future.set_result(result) reply_listener = mock.Mock() reply_listener.register_reply_waiter.return_value = future reply_listener.get_reply_qname.return_value = reply_queue_name res = message.send( exchange=self._exchange, routing_key=self._routing_key, reply_listener=reply_listener, stopwatch=stopwatch, retrier=None ) self.assertEqual(result, res) self._pika_engine.connection_with_confirmation_pool.acquire( ).__enter__().channel.publish.assert_called_once_with( body=mock.ANY, exchange=self._exchange, mandatory=True, properties=mock.ANY, routing_key=self._routing_key ) body = self._pika_engine.connection_with_confirmation_pool.acquire( ).__enter__().channel.publish.call_args[1]["body"] self.assertEqual( b'{"_$_request_id": 555, "_$_token": "it is a token", ' b'"msg_str": "hello", "msg_type": 1}', body ) props = self._pika_engine.connection_with_confirmation_pool.acquire( ).__enter__().channel.publish.call_args[1]["properties"] self.assertEqual(props.content_encoding, 'utf-8') self.assertEqual(props.content_type, 'application/json') self.assertEqual(props.delivery_mode, 1) self.assertTrue(expiration * 1000 - float(props.expiration) < 100) self.assertEqual(props.headers, {'version': '1.0'}) self.assertEqual(props.correlation_id, message.msg_id) self.assertEqual(props.reply_to, reply_queue_name) self.assertTrue(props.message_id)
def __init__(self, listener, ctxt, message, unique_id, msg_id, reply_q, obsolete_reply_queues): super(AMQPIncomingMessage, self).__init__(ctxt, message) self.listener = listener self.unique_id = unique_id self.msg_id = msg_id self.reply_q = reply_q self._obsolete_reply_queues = obsolete_reply_queues self.stopwatch = timeutils.StopWatch() self.stopwatch.start()
def run(self): self._configure() # Delay startup so workers are jittered. time.sleep(self.startup_delay) while not self._shutdown.is_set(): with timeutils.StopWatch() as timer: self._run_job() self._shutdown.wait( max(0, self.interval_delay - timer.elapsed())) self._shutdown_done.set()
def setUp(self): self._pika_engine = mock.MagicMock() self._exchange = "it is exchange" self._routing_key = "it is routing key" self._expiration = 1 self._stopwatch = (timeutils.StopWatch( duration=self._expiration).start()) self._mandatory = object() self._message = {"msg_type": 1, "msg_str": "hello"} self._context = {"request_id": 555, "token": "it is a token"}
def _task_receiver(self, state, details): task_name = details['task_name'] if state == states.PENDING: self._timers.pop(task_name, None) elif state in STARTING_STATES: self._timers[task_name] = timeutils.StopWatch().start() elif state in FINISHED_STATES: timer = self._timers.pop(task_name, None) if timer is not None: timer.stop() self._record_ending(timer, task_name)
def read(id): global rwlock with timeutils.StopWatch(duration=dur) as w: while not w.expired(): with rwlock.read_lock() as l: print("%d reading start:" % id) time.sleep(1 + random.random()) n = cache[-1] if len(cache) else -1 time.sleep(1 + random.random()) print("%d reading over : %d" % (id, n)) time.sleep(1)
def _provisioning_timer(self, timeout): # REVISIT(ivc): consider integrating with Retry interval = 3 max_interval = 15 with timeutils.StopWatch(duration=timeout) as timer: while not timer.expired(): yield timer.leftover() interval = interval * 2 * random.gauss(0.8, 0.05) interval = min(interval, max_interval) interval = min(interval, timer.leftover()) if interval: time.sleep(interval)
def acquire(self, blocking=True, shared=False): if shared: raise tooz.NotImplemented blocking, timeout = utils.convert_blocking(blocking) if timeout is not None: watch = timeutils.StopWatch(duration=timeout) watch.start() else: watch = None while True: if self.acquired: # We already acquired the lock. Just go ahead and wait for ever # if blocking != False using the last index. lastindex = self._node['modifiedIndex'] else: try: reply = self.client.put( self._lock_url, make_url=False, timeout=watch.leftover() if watch else None, data={ "ttl": self.ttl, "prevExist": "false" }) except requests.exceptions.RequestException: if not watch or watch.leftover() == 0: return False # We got the lock! if reply.get("errorCode") is None: with self._lock: self._node = reply['node'] self.coord._acquired_locks.add(self) return True # No lock, somebody got it, wait for it to be released lastindex = reply['index'] + 1 # We didn't get the lock and we don't want to wait if not blocking: return False # Ok, so let's wait a bit (or forever!) try: reply = self.client.get( self._lock_url + "?wait=true&waitIndex=%d" % lastindex, make_url=False, timeout=watch.leftover() if watch else None) except requests.exceptions.RequestException: if not watch or watch.expired(): return False
def reply(self, reply=None, failure=None): """Send back reply to the RPC client :param reply: Dictionary, reply. In case of exception should be None :param failure: Tuple, should be a sys.exc_info() tuple. Should be None if RPC request was successfully processed. :return RpcReplyPikaIncomingMessage, message with reply """ if self.reply_q is None: return reply_outgoing_message = RpcReplyPikaOutgoingMessage( self._pika_engine, self.msg_id, reply=reply, failure_info=failure, content_type=self._content_type, ) def on_exception(ex): if isinstance(ex, pika_drv_exc.ConnectionException): LOG.warning( "Connectivity related problem during reply sending. %s", ex) return True else: return False if self._pika_engine.rpc_reply_retry_attempts: retrier = tenacity.retry( stop=(tenacity.stop_never if self._pika_engine.rpc_reply_retry_attempts == -1 else tenacity.stop_after_attempt( self._pika_engine.rpc_reply_retry_attempts)), retry=tenacity.retry_if_exception(on_exception), wait=tenacity.wait_fixed( self._pika_engine.rpc_reply_retry_delay)) else: retrier = None try: timeout = (None if self.expiration_time is None else max( self.expiration_time - time.time(), 0)) with timeutils.StopWatch(duration=timeout) as stopwatch: reply_outgoing_message.send(reply_q=self.reply_q, stopwatch=stopwatch, retrier=retrier) LOG.debug("Message [id:'%s'] replied to '%s'.", self.msg_id, self.reply_q) except Exception: LOG.exception("Message [id:'%s'] wasn't replied to : %s", self.msg_id, self.reply_q)
def run_server(server, duration=None): try: server.start() if duration: with timeutils.StopWatch(duration) as stop_watch: while not stop_watch.expired(): time.sleep(1) server.stop() server.wait() except KeyboardInterrupt: # caught SIGINT LOG.info('Caught SIGINT, terminating') time.sleep(1) # wait for stats collector to process the last second
def wrapper(in_self, timeout=None, prefetch_size=1): incomings = [] watch = timeutils.StopWatch(duration=timeout) with watch: for __ in compat_range(prefetch_size): msg = func(in_self, timeout=watch.leftover(return_none=True)) if msg is not None: incomings.append(msg) else: # timeout reached or listener stopped break return incomings
def test_has_started_stopped(self): watch = timeutils.StopWatch() self.assertFalse(watch.has_started()) self.assertFalse(watch.has_stopped()) watch.start() self.assertTrue(watch.has_started()) self.assertFalse(watch.has_stopped()) watch.stop() self.assertTrue(watch.has_stopped()) self.assertFalse(watch.has_started())
def _run(self): replier = self.message.reply_text m = self.message.make_manual_progress_bar() with timeutils.StopWatch() as w: replier("Spinner initiated.", threaded=True, prefixed=False) while not self.dead.is_set(): self.dead.wait(self.update_frequency) if self.dead.is_set(): break else: m.update("%0.2f seconds" % w.elapsed()) replier("Spinner stopped.", threaded=True, prefixed=False)
def poll(self, timeout=None): """Main method of this class - consumes message from RabbitMQ :param: timeout: float, seconds, timeout for waiting new incoming message, None means wait forever :return: list of PikaIncomingMessage, RabbitMQ messages """ with timeutils.StopWatch(timeout) as stop_watch: while True: with self._lock: if self._message_queue: return self._message_queue.pop(0) if stop_watch.expired(): return None try: if self._started: if self._channel is None: self._reconnect() # we need some time_limit here, not too small to # avoid a lot of not needed iterations but not too # large to release lock time to time and give a # chance to perform another method waiting this # lock self._connection.process_data_events( time_limit=0.25) else: # consumer is stopped so we don't expect new # messages, just process already sent events if self._channel is not None: self._connection.process_data_events( time_limit=0) # and return if we don't see new messages if not self._message_queue: return None except pika_drv_exc.EstablishConnectionException as e: LOG.warning( "Problem during establishing connection for pika " "poller %s", e, exc_info=True) time.sleep( self._pika_engine.host_connection_reconnect_delay) except pika_drv_exc.ConnectionException: self._cleanup() raise except pika_drv_cmns.PIKA_CONNECTIVITY_ERRORS: self._cleanup() raise
def pop(): n = 0 global rwlock with timeutils.StopWatch(duration=dur) as w: while not w.expired(): with rwlock.write_lock() as l: print("poping start :") time.sleep(random.random()) n = cache.pop() if len(cache) else -1 time.sleep(random.random()) print("poping over : %d" % n) time.sleep(1)
def run_server(server, duration=None): global IS_RUNNING SERVERS.append(server) server.start() if duration: with timeutils.StopWatch(duration) as stop_watch: while not stop_watch.expired() and IS_RUNNING: time.sleep(1) server.stop() IS_RUNNING = False server.wait() LOG.info('The server is terminating') time.sleep(1) # wait for stats collector to process the last second