def test_event_api_compat(self, mock_eventlet): with mock.patch('oslo_utils.eventletutils.is_monkey_patched', return_value=True): e_event = eventletutils.Event() self.assertIsInstance(e_event, eventletutils.EventletEvent) t_event = eventletutils.Event() if six.PY3: t_event_cls = threading.Event else: t_event_cls = threading._Event self.assertIsInstance(t_event, t_event_cls) public_methods = [ m for m in dir(t_event) if not m.startswith("_") and callable(getattr(t_event, m)) ] for method in public_methods: self.assertTrue(hasattr(e_event, method)) # Ensure set() allows multiple invocations, same as in # threading implementation. e_event.set() self.assertTrue(e_event.isSet()) e_event.set() self.assertTrue(e_event.isSet())
def __init__(self, conf): super(KombuRPCServer, self).__init__(conf) CONF.register_opts(_pool_opts) kombu_base.set_transport_options() self._register_mistral_serialization() self.topic = conf.topic self.server_id = conf.host self._hosts = kombu_hosts.KombuHosts(CONF) self._executor_threads = CONF.executor_thread_pool_size self.exchange = CONF.control_exchange # TODO(rakhmerov): We shouldn't rely on any properties related # to oslo.messaging. Only "transport_url" should matter. self.durable_queue = CONF.oslo_messaging_rabbit.amqp_durable_queues self.auto_delete = CONF.oslo_messaging_rabbit.amqp_auto_delete self.routing_key = self.topic self.channel = None self.conn = None self._running = eventletutils.Event() self._stopped = eventletutils.Event() self.endpoints = [] self._worker = None self._thread = None # TODO(ddeja): Those 2 options should be gathered from config. self._sleep_time = 1 self._max_sleep_time = 10
def __init__(self, driver, conn): super(AMQPListener, self).__init__(driver.prefetch_size) self.driver = driver self.conn = conn self.msg_id_cache = rpc_amqp._MsgIdCache() self.incoming = [] self._shutdown = eventletutils.Event() self._shutoff = eventletutils.Event() self._obsolete_reply_queues = ObsoleteReplyQueuesCache() self._message_operations_handler = MessageOperationsHandler( "AMQPListener") self._current_timeout = ACK_REQUEUE_EVERY_SECONDS_MIN
def _do_test_heartbeat_sent(self, fake_ensure_connection, fake_heartbeat_support, fake_heartbeat, fake_logger, heartbeat_side_effect=None, info=None): event = eventletutils.Event() def heartbeat_check(rate=2): event.set() if heartbeat_side_effect: raise heartbeat_side_effect fake_heartbeat.side_effect = heartbeat_check transport = oslo_messaging.get_transport(self.conf, 'kombu+memory:////') self.addCleanup(transport.cleanup) conn = transport._driver._get_connection() conn.ensure(method=lambda: True) event.wait() conn._heartbeat_stop() # check heartbeat have been called self.assertLess(0, fake_heartbeat.call_count) if not heartbeat_side_effect: self.assertEqual(1, fake_ensure_connection.call_count) self.assertEqual(2, fake_logger.debug.call_count) self.assertEqual(0, fake_logger.info.call_count) else: self.assertEqual(2, fake_ensure_connection.call_count) self.assertEqual(2, fake_logger.debug.call_count) self.assertEqual(1, fake_logger.info.call_count) self.assertIn(mock.call(info, mock.ANY), fake_logger.info.mock_calls)
def test_timeout_running(self): # Test that we will eventually timeout if we're waiting for another # thread to complete this task # Start the server, which will also instantiate an executor self.server.start() self.server.stop() shutdown_called = eventletutils.Event() # Patch the executor's stop method to be very slow def slow_shutdown(wait): shutdown_called.set() eventlet.sleep(10) self.executors[0].shutdown = slow_shutdown # Call wait in a new thread thread = eventlet.spawn(self.server.wait) # Wait until the thread is in the slow stop method shutdown_called.wait() # Call wait again in the main thread with a timeout self.assertRaises(server_module.TaskTimeout, self.server.wait, timeout=1) thread.kill()
def __init__(self, connections, callback_queue): self._results = {} self._connections = itertools.cycle(connections) self._callback_queue = callback_queue self._thread = None self.connection = six.next(self._connections) self.ready = eventletutils.Event()
def _heartbeat_start(self): self._heartbeat_exit_event = eventletutils.Event() self._heartbeat_thread = threading.Thread( target=self._heartbeat_thread_job) self._heartbeat_thread.daemon = True self._heartbeat_thread.start() with self._connection_lock: print("heartbeat started")
def __init__(self, name): self.name = "%s (%s)" % (name, hex(id(self))) self._tasks = moves.queue.Queue() self._shutdown = eventletutils.Event() self._shutdown_thread = threading.Thread( target=self._process_in_background) self._shutdown_thread.daemon = True
def test_event_api_compat(self, mock_clear): with mock.patch('oslo_utils.eventletutils.is_monkey_patched', return_value=True): e_event = eventletutils.Event() self.assertIsInstance(e_event, eventletutils._Event) t_event = eventletutils.Event() if six.PY3: t_event_cls = threading.Event else: t_event_cls = threading._Event self.assertIsInstance(t_event, t_event_cls) public_methods = [m for m in dir(t_event) if not m.startswith("_") and callable(getattr(t_event, m))] for method in public_methods: self.assertTrue(hasattr(e_event, method))
def __init__(self, conn): super(KafkaListener, self).__init__() self._stopped = eventletutils.Event() self.conn = conn self.incoming_queue = [] # FIXME(sileht): We do a first poll to ensure we topics are created # This is a workaround mainly for functional tests, in real life # this is fine if topics are not created synchroneously self.poll(5)
def dispatch(self, incoming): """Dispatch an RPC message to the appropriate endpoint method. :param incoming: incoming message :type incoming: IncomingMessage :raises: NoSuchMethod, UnsupportedVersion """ message = incoming.message ctxt = incoming.ctxt method = message.get('method') args = message.get('args', {}) namespace = message.get('namespace') version = message.get('version', '1.0') # NOTE(danms): This event and watchdog thread are used to send # call-monitoring heartbeats for this message while the call # is executing if it runs for some time. The thread will wait # for the event to be signaled, which we do explicitly below # after dispatching the method call. completion_event = eventletutils.Event() watchdog_thread = threading.Thread(target=self._watchdog, args=(completion_event, incoming)) if incoming.client_timeout: # NOTE(danms): The client provided a timeout, so we start # the watchdog thread. If the client is old or didn't send # a timeout, we just never start the watchdog thread. watchdog_thread.start() found_compatible = False for endpoint in self.endpoints: target = getattr(endpoint, 'target', None) if not target: target = self._default_target if not (self._is_namespace(target, namespace) and self._is_compatible(target, version)): continue if hasattr(endpoint, method): if self.access_policy.is_allowed(endpoint, method): try: return self._do_dispatch(endpoint, method, ctxt, args) finally: completion_event.set() if incoming.client_timeout: watchdog_thread.join() found_compatible = True if found_compatible: raise NoSuchMethod(method) else: raise UnsupportedVersion(version, method=method)
def __init__(self, exchange_manager, targets, pool=None): super(FakeListener, self).__init__() self._exchange_manager = exchange_manager self._targets = targets self._pool = pool self._stopped = eventletutils.Event() # NOTE(sileht): Ensure that all needed queues exists even the listener # have not been polled yet for target in self._targets: exchange = self._exchange_manager.get_exchange(target.exchange) exchange.ensure_queue(target, pool)
def __init__(self, reply_q, conn, allowed_remote_exmods): self.conn = conn self.allowed_remote_exmods = allowed_remote_exmods self.msg_id_cache = rpc_amqp._MsgIdCache() self.waiters = ReplyWaiters() self.conn.declare_direct_consumer(reply_q, self) self._thread_exit_event = eventletutils.Event() self._thread = threading.Thread(target=self.poll) self._thread.daemon = True self._thread.start()
def test_logging_with_timeout(self, mock_log): # Test that we log a message after log_after seconds if we've also # specified an absolute timeout log_event = eventletutils.Event() mock_log.warning.side_effect = lambda _, __: log_event.set() # Call stop without calling start. We should log a wait after 1 second thread = eventlet.spawn(self.server.stop, log_after=1, timeout=2) log_event.wait() # Redundant given that we already waited, but it's nice to assert self.assertTrue(mock_log.warning.called) thread.kill()
def test_logging_explicit_wait(self, mock_log): # Test that we generate a log message if we wait longer than # the number of seconds passed to log_after log_event = eventletutils.Event() mock_log.warning.side_effect = lambda _, __: log_event.set() # Call stop without calling start. We should log a wait after 1 second thread = eventlet.spawn(self.server.stop, log_after=1) log_event.wait() # Redundant given that we already waited, but it's nice to assert self.assertTrue(mock_log.warning.called) thread.kill()
def main(): parser = argparse.ArgumentParser() parser.add_argument('--eventlet-turned-on', action='store_true', help='turn on eventlet and monkey patch the env') parser.add_argument("--heartbeat-timeout", help="the heartbeat timeout", default=60) args = parser.parse_args() if args.eventlet_turned_on: print("----------------------------------------------") print("/!\ Running a monkey patched environment /!\\") print("----------------------------------------------") eventlet.monkey_patch() event = eventletutils.Event() transport_url = 'rabbit://*****:*****@localhost:5672/' transport = oslo_messaging.get_transport(cfg.CONF, transport_url) conn = transport._driver._get_connection() conn.ensure(method=lambda: True) event.wait() conn._heartbeat_stop()
def test_state_wrapping(self): # Test that we behave correctly if a thread waits, and the server state # has wrapped when it it next scheduled # Ensure that if 2 threads wait for the completion of 'start', the # first will wait until complete_event is signalled, but the second # will continue complete_event = eventletutils.Event() complete_waiting_callback = eventletutils.Event() start_state = self.server._states['start'] old_wait_for_completion = start_state.wait_for_completion waited = [False] def new_wait_for_completion(*args, **kwargs): if not waited[0]: waited[0] = True complete_waiting_callback.set() complete_event.wait() old_wait_for_completion(*args, **kwargs) start_state.wait_for_completion = new_wait_for_completion # thread1 will wait for start to complete until we signal it thread1 = eventlet.spawn(self.server.stop) thread1_finished = eventletutils.Event() thread1.link(lambda _: thread1_finished.set()) self.server.start() complete_waiting_callback.wait() # The server should have started, but stop should not have been called self.assertEqual(1, len(self.executors)) self.assertEqual([], self.executors[0]._calls) self.assertFalse(thread1_finished.is_set()) self.server.stop() self.server.wait() # We should have gone through all the states, and thread1 should still # be waiting self.assertEqual(1, len(self.executors)) self.assertEqual(['shutdown'], self.executors[0]._calls) self.assertFalse(thread1_finished.is_set()) # Start again self.server.start() # We should now record 4 executors (2 for each server) self.assertEqual(2, len(self.executors)) self.assertEqual(['shutdown'], self.executors[0]._calls) self.assertEqual([], self.executors[1]._calls) self.assertFalse(thread1_finished.is_set()) # Allow thread1 to complete complete_event.set() thread1_finished.wait() # thread1 should now have finished, and stop should not have been # called again on either the first or second executor self.assertEqual(2, len(self.executors)) self.assertEqual(['shutdown'], self.executors[0]._calls) self.assertEqual([], self.executors[1]._calls) self.assertTrue(thread1_finished.is_set())
def test_wait_for_running_task(self): # Test that if 2 threads call a method simultaneously, both will wait, # but only 1 will call the underlying executor method. start_event = eventletutils.Event() finish_event = eventletutils.Event() running_event = eventletutils.Event() done_event = eventletutils.Event() _runner = [None] class SteppingFakeExecutor(self.server._executor_cls): def __init__(self, *args, **kwargs): # Tell the test which thread won the race _runner[0] = eventlet.getcurrent() running_event.set() start_event.wait() super(SteppingFakeExecutor, self).__init__(*args, **kwargs) done_event.set() finish_event.wait() self.server._executor_cls = SteppingFakeExecutor start1 = eventlet.spawn(self.server.start) start2 = eventlet.spawn(self.server.start) # Wait until one of the threads starts running running_event.wait() runner = _runner[0] waiter = start2 if runner == start1 else start2 waiter_finished = eventletutils.Event() waiter.link(lambda _: waiter_finished.set()) # At this point, runner is running start(), and waiter() is waiting for # it to complete. runner has not yet logged anything. self.assertEqual(0, len(self.executors)) self.assertFalse(waiter_finished.is_set()) # Let the runner log the call start_event.set() done_event.wait() # We haven't signalled completion yet, so submit shouldn't have run self.assertEqual(1, len(self.executors)) self.assertEqual([], self.executors[0]._calls) self.assertFalse(waiter_finished.is_set()) # Let the runner complete finish_event.set() waiter.wait() runner.wait() # Check that both threads have finished, start was only called once, # and execute ran self.assertTrue(waiter_finished.is_set()) self.assertEqual(1, len(self.executors)) self.assertEqual([], self.executors[0]._calls)
def __init__(self): self.stopped = eventletutils.Event()
def __init__(self, server): super(ServerThreadHelper, self).__init__() self.daemon = True self._server = server self._stop_event = eventletutils.Event() self._start_event = eventletutils.Event()
def _heartbeat_start(self): self._heartbeat_exit_event = eventletutils.Event() self._heartbeat_thread = threading.Thread( target=self._heartbeat_thread_job) self._heartbeat_thread.daemon = True self._heartbeat_thread.start()