def __init__(self, func, *args, **kwargs): self.my_sem = Semaphore(0) # This is held by the thread as it runs. self.caller_sem = None self.dead = False started = Event() self.id = 5 self.ALL.append(self) def go(): self.id = eventlet.corolocal.get_ident() started.send(True) self.my_sem.acquire(blocking=True, timeout=None) try: func(*args, **kwargs) # except Exception as e: # print("Exception in coroutine! %s" % e) finally: self.dead = True self.caller_sem.release() # Relinquish control back to caller. for i in range(len(self.ALL)): if self.ALL[i].id == self.id: del self.ALL[i] break true_spawn(go) started.wait()
class Handler(base.Handler): __doc__ = base.Handler.__doc__ + """ This Handler subclass is designed for use with eventlet. It spawns a a new green thread to handle each incoming request. """ ConnectionClass = Connection def __init__(self,*args,**kwds): super(Handler,self).__init__(*args,**kwds) # We need to count the number of inflight requests, so the # main thread can wait for them to complete when shutting down. self._num_inflight_requests = 0 self._all_requests_complete = None def handle_request(self,req): self._num_inflight_requests += 1 if self._num_inflight_requests == 1: self._all_requests_complete = Event() @eventlet.spawn_n def do_handle_request(): try: self.process_request(req) finally: self._num_inflight_requests -= 1 if self._num_inflight_requests == 0: self._all_requests_complete.send() self._all_requests_complete = None def wait_for_completion(self): if self._num_inflight_requests > 0: self._all_requests_complete.wait()
def test_kill_container_with_active_workers(container_factory): waiting = Event() wait_forever = Event() class Service(object): name = 'kill-with-active-workers' @foobar def spam(self): waiting.send(None) wait_forever.wait() container = container_factory(Service, {}) dep = get_extension(container, Entrypoint) # start the first worker, which should wait for spam_continue container.spawn_worker(dep, (), {}) waiting.wait() with patch('nameko.containers._log') as logger: container.kill() assert logger.warning.call_args_list == [ call('killing %s active workers(s)', 1), call('killing active worker for %s', ANY) ]
class Queue(LightQueue): '''Create a queue object with a given maximum size. If *maxsize* is less than zero or ``None``, the queue size is infinite. ``Queue(0)`` is a channel, that is, its :meth:`put` method always blocks until the item is delivered. (This is unlike the standard :class:`Queue`, where 0 means infinite size). In all other respects, this Queue class resembled the standard library, :class:`Queue`. ''' def __init__(self, maxsize=None): LightQueue.__init__(self, maxsize) self.unfinished_tasks = 0 self._cond = Event() def _format(self): result = LightQueue._format(self) if self.unfinished_tasks: result += ' tasks=%s _cond=%s' % (self.unfinished_tasks, self._cond) return result def _put(self, item): LightQueue._put(self, item) self._put_bookkeeping() def _put_bookkeeping(self): self.unfinished_tasks += 1 if self._cond.ready(): self._cond.reset() def task_done(self): '''Indicate that a formerly enqueued task is complete. Used by queue consumer threads. For each :meth:`get <Queue.get>` used to fetch a task, a subsequent call to :meth:`task_done` tells the queue that the processing on the task is complete. If a :meth:`join` is currently blocking, it will resume when all items have been processed (meaning that a :meth:`task_done` call was received for every item that had been :meth:`put <Queue.put>` into the queue). Raises a :exc:`ValueError` if called more times than there were items placed in the queue. ''' if self.unfinished_tasks <= 0: raise ValueError('task_done() called too many times') self.unfinished_tasks -= 1 if self.unfinished_tasks == 0: self._cond.send(None) def join(self): '''Block until all items in the queue have been gotten and processed. The count of unfinished tasks goes up whenever an item is added to the queue. The count goes down whenever a consumer thread calls :meth:`task_done` to indicate that the item was retrieved and all work on it is complete. When the count of unfinished tasks drops to zero, :meth:`join` unblocks. ''' self._cond.wait()
def test_kill_container_with_active_workers(container_factory): waiting = Event() wait_forever = Event() class Service(object): name = 'kill-with-active-workers' @foobar def spam(self): waiting.send(None) wait_forever.wait() container = container_factory(Service, {}) dep = get_dependency(container, EntrypointProvider) # start the first worker, which should wait for spam_continue container.spawn_worker(dep, (), {}) waiting.wait() with patch('nameko.containers._log') as logger: container.kill() calls = logger.warning.call_args_list assert call( 'killing active thread for %s', 'kill-with-active-workers.spam' ) in calls
def test_container_doesnt_exhaust_max_workers(container): spam_called = Event() spam_continue = Event() class Service(object): name = 'max-workers' @foobar def spam(self, a): spam_called.send(a) spam_continue.wait() container = ServiceContainer(Service, config={MAX_WORKERS_CONFIG_KEY: 1}) dep = get_extension(container, Entrypoint) # start the first worker, which should wait for spam_continue container.spawn_worker(dep, ['ham'], {}) # start the next worker in a speparate thread, # because it should block until the first one completed gt = spawn(container.spawn_worker, dep, ['eggs'], {}) with Timeout(1): assert spam_called.wait() == 'ham' # if the container had spawned the second worker, we would see # an error indicating that spam_called was fired twice, and the # greenthread would now be dead. assert not gt.dead # reset the calls and allow the waiting worker to complete. spam_called.reset() spam_continue.send(None) # the second worker should now run and complete assert spam_called.wait() == 'eggs' assert gt.dead
def handler(sock, client): # socket opened # first message is either hammerlib:get_clientid:sessionid # or hammerlib:have_clientid:sessionid:clientid app, command, payload = parse(sock.recv(2048)) # FIXME: what about framing? payload = payload.strip() # FIXME: dirty hack if app != "hammerlib": return error(sock, "no handshake") if command == "get_clientid": clientid, sessionid = get_next_id(), payload add_connection(sock, clientid, payload) elif command == "have_clientid": # assert the proper session for given clientid sessionid, clientid = payload.split(":") client = clients.get(clientid) if client: if client["sessionid"] != sessionid: return error(sock, "bad sessionid") else: add_connection(sock, clientid, sessionid) else: return error(sock, "bad handshake") send_message_to_client(clientid, "hammerlib", "connected", clientid) send_message_to_app(sock, "hammerlib", "client_connected", "") event = Event() eventlet.spawn_n(reader, sock, event) event.wait()
def _poll(self, sockets, timeout=None): # Don't bother trampolining if there's data available immediately. # This also avoids calling into the eventlet hub with a timeout of # zero, which doesn't work right (it still switches the greenthread) (r, _, _) = zmq_poll.select(sockets, [], [], timeout=0) if r: return r if timeout == 0: return [] # Looks like we'll have to block :-( ready = [] threads = [] res = Event() for sock in sockets: threads.append( eventlet.spawn(self._do_poll, sock, ready, res, timeout)) self.poll_threads.append((res, threads)) try: res.wait() finally: self.poll_threads.remove((res, threads)) for t in threads: t.kill() try: t.wait() except GreenletExit: pass return ready
class Handler(base.Handler): __doc__ = base.Handler.__doc__ + """ This Handler subclass is designed for use with eventlet. It spawns a a new green thread to handle each incoming request. """ ConnectionClass = Connection def __init__(self, *args, **kwds): super(Handler, self).__init__(*args, **kwds) # We need to count the number of inflight requests, so the # main thread can wait for them to complete when shutting down. self._num_inflight_requests = 0 self._all_requests_complete = None def handle_request(self, req): self._num_inflight_requests += 1 if self._num_inflight_requests == 1: self._all_requests_complete = Event() @eventlet.spawn_n def do_handle_request(): try: self.process_request(req) finally: self._num_inflight_requests -= 1 if self._num_inflight_requests == 0: self._all_requests_complete.send() self._all_requests_complete = None def wait_for_completion(self): if self._num_inflight_requests > 0: self._all_requests_complete.wait()
def _poll(self,sockets,timeout=None): # Don't bother trampolining if there's data available immediately. # This also avoids calling into the eventlet hub with a timeout of # zero, which doesn't work right (it still switches the greenthread) (r,_,_) = zmq_poll.select(sockets,[],[],timeout=0) if r: return r if timeout == 0: return [] # Looks like we'll have to block :-( ready = [] threads = [] res = Event() for sock in sockets: threads.append(eventlet.spawn(self._do_poll,sock,ready,res,timeout)) self.poll_threads.append((res,threads)) try: res.wait() finally: self.poll_threads.remove((res,threads)) for t in threads: t.kill() try: t.wait() except GreenletExit: pass return ready
def test_create_shutdown_race(self): """ Test the race condition where the pipeline shuts down while `create` is still executing. """ created = [] destroyed = [] counter = itertools.count() creating = Event() def create(): creating.send(True) eventlet.sleep() obj = next(counter) created.append(obj) return obj def destroy(obj): destroyed.append(obj) with ResourcePipeline(create, destroy).run(): creating.wait() assert created == [] assert created == destroyed == list(range(1))
class Queue(LightQueue): '''Create a queue object with a given maximum size. If *maxsize* is less than zero or ``None``, the queue size is infinite. ``Queue(0)`` is a channel, that is, its :meth:`put` method always blocks until the item is delivered. (This is unlike the standard :class:`Queue`, where 0 means infinite size). In all other respects, this Queue class resembled the standard library, :class:`Queue`. ''' def __init__(self, maxsize=None): LightQueue.__init__(self, maxsize) self.unfinished_tasks = 0 self._cond = Event() def _format(self): result = LightQueue._format(self) if self.unfinished_tasks: result += ' tasks=%s _cond=%s' % (self.unfinished_tasks, self._cond) return result def _put(self, item): LightQueue._put(self, item) self._put_bookkeeping() def _put_bookkeeping(self): self.unfinished_tasks += 1 if self._cond.ready(): self._cond.reset() def task_done(self): '''Indicate that a formerly enqueued task is complete. Used by queue consumer threads. For each :meth:`get <Queue.get>` used to fetch a task, a subsequent call to :meth:`task_done` tells the queue that the processing on the task is complete. If a :meth:`join` is currently blocking, it will resume when all items have been processed (meaning that a :meth:`task_done` call was received for every item that had been :meth:`put <Queue.put>` into the queue). Raises a :exc:`ValueError` if called more times than there were items placed in the queue. ''' if self.unfinished_tasks <= 0: raise ValueError('task_done() called too many times') self.unfinished_tasks -= 1 if self.unfinished_tasks == 0: self._cond.send(None) def join(self): '''Block until all items in the queue have been gotten and processed. The count of unfinished tasks goes up whenever an item is added to the queue. The count goes down whenever a consumer thread calls :meth:`task_done` to indicate that the item was retrieved and all work on it is complete. When the count of unfinished tasks drops to zero, :meth:`join` unblocks. ''' if self.unfinished_tasks > 0: self._cond.wait()
class TimerProvider(EntrypointProvider): def __init__(self, interval, config_key): self._default_interval = interval self.config_key = config_key self.should_stop = Event() self.gt = None def prepare(self): interval = self._default_interval if self.config_key: config = self.container.config interval = config.get(self.config_key, interval) self.interval = interval def start(self): _log.debug('starting %s', self) self.gt = self.container.spawn_managed_thread(self._run) def stop(self): _log.debug('stopping %s', self) self.should_stop.send(True) self.gt.wait() def kill(self, exc): _log.debug('killing %s', self) self.gt.kill() def _run(self): ''' Runs the interval loop. This should not be called directly, rather the `start()` method should be used. ''' while not self.should_stop.ready(): start = time.time() self.handle_timer_tick() elapsed_time = (time.time() - start) sleep_time = max(self.interval - elapsed_time, 0) self._sleep_or_stop(sleep_time) def _sleep_or_stop(self, sleep_time): ''' Sleeps for `sleep_time` seconds or until a `should_stop` event has been fired, whichever comes first. ''' try: with Timeout(sleep_time): self.should_stop.wait() except Timeout: # we use the timeout as a cancellable sleep pass def handle_timer_tick(self): args = tuple() kwargs = {} self.container.spawn_worker(self, args, kwargs)
def test_send_rpc_multi_message_reply_ignores_all_but_last(get_connection): queue_declared = Event() def response_greenthread(): with get_connection() as conn: with conn.channel() as chan: queue = nova.get_topic_queue('test_rpc', 'test', channel=chan) queue.declare() queue_declared.send(True) body, msg = ifirst( queue_iterator(queue, no_ack=True, timeout=2)) msgid, _, _, args = nova.parse_message(body) exchange = nova.get_reply_exchange(msgid) producer = Producer(chan, exchange=exchange, routing_key=msgid) for _ in range(3): msg = dict(result='should ignore this message', failure=None, ending=False) producer.publish(msg) eventlet.sleep(0.1) msg = dict(result=args, failure=None, ending=False) producer.publish(msg) msg = dict(result=None, failure=None, ending=True) producer.publish(msg) g = eventlet.spawn_n(response_greenthread) eventlet.sleep() with get_connection() as conn: ctx = context.get_admin_context() queue_declared.wait() resp = nova.send_rpc(conn, context=ctx, exchange='test_rpc', topic='test', method='test_method', args={ 'spam': 'shrub', }, timeout=3) assert resp == { 'spam': 'shrub', } eventlet.sleep() def check_greenthread_dead(): assert not g assert_stops_raising(check_greenthread_dead)
def start_branch(env, argv=None): env.syncdb(interactive=False) from cyme.branch import Branch ready_event = Event() CYME_INSTANCE_DIR.mkdir() instance = Branch("127.0.0.1:%s" % (CYME_PORT, ), numc=1, ready_event=ready_event) instance.start() ready_event.wait() return instance
def handle_message(self, msg): try: event = Event() self.container.spawn_worker(self, (msg, ), {}, handle_result=partial( self.handle_result, event)) event.wait() # pylint: disable=broad-except except Exception as e: LOGGER.error('Hit exception: %s BackTrace: %s', e, traceback.format_exc())
class Timer(object): ''' A timer object, which will call a given method repeatedly at a given interval. ''' def __init__(self, interval, func): self.interval = interval self.func = func self.gt = None self.should_stop = Event() def start(self): ''' Starts the timer in a separate green thread. Once started it may be stopped using its `stop()` method. ''' self.gt = eventlet.spawn(self._run) _log.debug( 'started timer for %s with %ss interval', self.func, self.interval) def _run(self): ''' Runs the interval loop. This should not be called directly, rather the `start()` method should be used. ''' while not self.should_stop.ready(): start = time.time() try: self.func() except Exception as e: _log.exception('error in timer handler: %s', e) sleep_time = max(self.interval - (time.time() - start), 0) self._sleep_or_stop(sleep_time) def _sleep_or_stop(self, sleep_time): ''' Sleeps for `sleep_time` seconds or until a `should_stop` event has been fired, whichever comes first. ''' try: with Timeout(sleep_time): self.should_stop.wait() except Timeout: # we use the timeout as a cancellable sleep pass def stop(self): ''' Gracefully stops the timer, waiting for it's timer_method to complete if it is running. ''' self.should_stop.send(True) self.gt.wait()
def test_send_rpc_multi_message_reply_ignores_all_but_last(get_connection): queue_declared = Event() def response_greenthread(): with get_connection() as conn: with conn.channel() as chan: queue = nova.get_topic_queue( 'test_rpc', 'test', channel=chan) queue.declare() queue_declared.send(True) body, msg = ifirst( queue_iterator(queue, no_ack=True, timeout=2)) msgid, _, _, args = nova.parse_message(body) exchange = nova.get_reply_exchange(msgid) producer = Producer(chan, exchange=exchange, routing_key=msgid) for _ in range(3): msg = dict( result='should ignore this message', failure=None, ending=False) producer.publish(msg) eventlet.sleep(0.1) msg = dict(result=args, failure=None, ending=False) producer.publish(msg) msg = dict(result=None, failure=None, ending=True) producer.publish(msg) g = eventlet.spawn_n(response_greenthread) eventlet.sleep() with get_connection() as conn: ctx = context.get_admin_context() queue_declared.wait() resp = nova.send_rpc( conn, context=ctx, exchange='test_rpc', topic='test', method='test_method', args={'spam': 'shrub', }, timeout=3) assert resp == {'spam': 'shrub', } eventlet.sleep() def check_greenthread_dead(): assert not g assert_stops_raising(check_greenthread_dead)
def test_send_rpc_errors(get_connection): queue_declared = Event() def response_greenthread(): with get_connection() as conn: with conn.channel() as chan: queue = nova.get_topic_queue('test_rpc', 'test', channel=chan) queue.declare() queue_declared.send(True) body, msg = ifirst( queue_iterator(queue, no_ack=True, timeout=2)) msgid, _, _, _ = nova.parse_message(body) exchange = nova.get_reply_exchange(msgid) producer = Producer(chan, exchange=exchange, routing_key=msgid) exc = Exception('error') failure = (type(exc).__name__, str(exc)) msg = {'result': None, 'failure': failure, 'ending': False} producer.publish(msg) msg = {'result': None, 'failure': None, 'ending': True} producer.publish(msg) g = eventlet.spawn_n(response_greenthread) eventlet.sleep(0) with get_connection() as conn: ctx = context.get_admin_context() with pytest.raises(RemoteError): queue_declared.wait() nova.send_rpc(conn, context=ctx, exchange='test_rpc', topic='test', method='test_method', args={ 'foo': 'bar', }, timeout=3) def check_greenthread_dead(): assert not g assert_stops_raising(check_greenthread_dead)
def handle_request(self, request): request.shallow = False try: context_data = self.server.context_data_from_headers(request) args, kwargs = self.get_entrypoint_parameters(request) self.check_signature(args, kwargs) event = Event() self.container.spawn_worker(self, args, kwargs, context_data=context_data, handle_result=partial( self.handle_result, event)) result = event.wait() response = response_from_result(result) except Exception as exc: if (isinstance(exc, self.expected_exceptions) or isinstance(exc, BadRequest)): status_code = 400 else: status_code = 500 error_dict = serialize(exc) payload = u'Error: {exc_type}: {value}\n'.format(**error_dict) response = Response( payload, status=status_code, ) return response
class StreamsResource(object): def __init__(self): self._action_event = Event() self._session_events = {} def new(self): new_id = str(uuid.uuid4()) self._session_events[new_id] = Event() print self._session_events.keys() return new_id def sessions(self): return self._session_events.keys() def send_message(self, id, message): if id: self._session_events[id].send(message) eventlet.sleep() self._session_events[id] = Event() else: self._action_event.send(message) eventlet.sleep self._action_event = Event() def get_message(self, id): if id: return self._session_events[id].wait() else: return self._action_event.wait()
def handle_request(self, request): log.info(' ### handle_request: %s' % request) request.shallow = False try: context_data = self.server.context_data_from_headers(request) openapi_request = OpenAPIRequest(request, self.operation) log.info('openapi_request: %r' % openapi_request) openapi_request_result = self.spec_manager.validate_request( openapi_request) args, kwargs = self.get_entrypoint_parameters( openapi_request_result) event = Event() self.container.spawn_worker(self, args, kwargs, context_data=context_data, handle_result=partial( self.handle_result, event)) result = event.wait() log.info('handle_request: result: %s', result) response = self.response_from_result(result, openapi_request) log.info('handle_request: %r' % response) except Exception as exc: raise exc response = self.response_from_exception(exc, openapi_request) return response
def rengine_side(self, appid, token, uri): """ Handle rengine (client) GET requests """ if not self.rengine_authorization_ok(appid, token): LOGGER.info('Rengine content request authorization fails') abort(401, 'Authorization failed') evt = Event() request_id = str(uuid4()) self.request_id_events[request_id] = evt headers = [ "%s: %s" % (header, val) for (header, val) in request.headers.items() ] packet = ScpPacket.make_sfkcontent(uri, request_id, headers) try: self._send(packet, appid) except Exception as e: abort(500, str(e)) LOGGER.debug("uri %s expected" % uri) timeout = Timeout(TIMEOUT) try: resp = evt.wait() except Timeout: del self.request_id_events[request_id] abort(504, 'Gateway Timeout') finally: timeout.cancel() LOGGER.debug("uri %s got" % uri) return resp
def rengine_side(self, appid, token, uri): """ Handle rengine (client) GET requests """ if not self.rengine_authorization_ok(appid, token): LOGGER.info('Rengine content request authorization fails') abort(401, 'Authorization failed') evt = Event() request_id = str(uuid4()) self.request_id_events[request_id] = evt headers = ["%s: %s" % (header, val) for (header, val) in request.headers.items()] packet = ScpPacket.make_sfkcontent(uri, request_id, headers) try: self._send(packet, appid) except Exception as e: abort(500, str(e)) LOGGER.debug("uri %s expected" % uri) timeout = Timeout(TIMEOUT) try: resp = evt.wait() except Timeout: del self.request_id_events[request_id] abort(504, 'Gateway Timeout') finally: timeout.cancel() LOGGER.debug("uri %s got" % uri) return resp
def unregister_provider(self, provider): if not self._consumers_ready.ready(): # we cannot handle the situation where we are starting up and # want to remove a consumer at the same time # TODO: With the upcomming error handling mechanism, this needs # TODO: to be thought through again. self._last_provider_unregistered.send() return removed_event = Event() # we can only cancel a consumer from within the consumer thread self._pending_remove_providers[provider] = removed_event # so we will just register the consumer to be canceled removed_event.wait() super(QueueConsumer, self).unregister_provider(provider)
def handle_request(self, request): request.shallow = False try: context_data = self.server.context_data_from_headers(request) args, kwargs = self.get_entrypoint_parameters(request) self.check_signature(args, kwargs) event = Event() self.container.spawn_worker( self, args, kwargs, context_data=context_data, handle_result=partial(self.handle_result, event)) result = event.wait() response = response_from_result(result) except Exception as exc: if ( isinstance(exc, self.expected_exceptions) or isinstance(exc, BadRequest) ): status_code = 400 else: status_code = 500 error_dict = serialize(exc) payload = u'Error: {exc_type}: {value}\n'.format(**error_dict) response = Response( payload, status=status_code, ) return response
class MultiQueueConsumer(object): def __init__(self, queues): self.cancelled = False self.event = Event() self.queues = queues class Waiter(object): def __init__(self, consumer, queue): self.consumer = consumer self.queue = queue @property def cancelled(self): return self.consumer.cancelled def switch(self, item): if self.cancelled or self.consumer.event.ready(): self.queue.queue.appendleft(item) self.queue._schedule_unlock() else: self.consumer.event.send((self.queue, item)) def kill(self, *exc_info): if not self.cancelled and not self.consumer.event.ready(): self.consumer.event.send(exc=exc_info) def wait(self, timeout=None, return_queue=False): empty_queues = [] for q in self.queues: try: if return_queue: return q, q.get_nowait() else: return q.get_nowait() except Queue.Empty: empty_queues.append(q) for q in empty_queues: q.getters.add(self.Waiter(self, q)) self.cancelled = False try: with eventlet.Timeout(timeout, exception=Queue.Empty): if return_queue: return self.event.wait() else: return self.event.wait()[1] finally: self.cancelled = True
def test_send_rpc_errors(get_connection): queue_declared = Event() def response_greenthread(): with get_connection() as conn: with conn.channel() as chan: queue = nova.get_topic_queue( 'test_rpc', 'test', channel=chan) queue.declare() queue_declared.send(True) body, msg = ifirst( queue_iterator(queue, no_ack=True, timeout=2)) msgid, _, _, _ = nova.parse_message(body) exchange = nova.get_reply_exchange(msgid) producer = Producer(chan, exchange=exchange, routing_key=msgid) exc = Exception('error') failure = (type(exc).__name__, str(exc)) msg = {'result': None, 'failure': failure, 'ending': False} producer.publish(msg) msg = {'result': None, 'failure': None, 'ending': True} producer.publish(msg) g = eventlet.spawn_n(response_greenthread) eventlet.sleep(0) with get_connection() as conn: ctx = context.get_admin_context() with pytest.raises(RemoteError): queue_declared.wait() nova.send_rpc( conn, context=ctx, exchange='test_rpc', topic='test', method='test_method', args={'foo': 'bar', }, timeout=3) def check_greenthread_dead(): assert not g assert_stops_raising(check_greenthread_dead)
def handle_message(self, socket_id, data, context_data): self.check_signature((socket_id,), data) event = Event() self.container.spawn_worker(self, (socket_id,), data, context_data=context_data, handle_result=partial( self.handle_result, event)) return event.wait()
def test_stop_while_starting(rabbit_config, mock_container): started = Event() container = mock_container container.shared_extensions = {} container.config = rabbit_config container.max_workers = 3 container.spawn_managed_thread = spawn_managed_thread class BrokenConnConsumer(QueueConsumer): def consume(self, *args, **kwargs): started.send(None) # kombu will retry again and again on broken connections # so we have to make sure the event is reset to allow consume # to be called again started.reset() return super(BrokenConnConsumer, self).consume(*args, **kwargs) queue_consumer = BrokenConnConsumer().bind(container) queue_consumer.setup() handler = MessageHandler() queue_consumer.register_provider(handler) with eventlet.Timeout(TIMEOUT): with patch.object(Connection, "connect", autospec=True) as connect: # patch connection to raise an error connect.side_effect = TimeoutError("test") # try to start the queue consumer gt = eventlet.spawn(queue_consumer.start) # wait for the queue consumer to begin starting and # then immediately stop it started.wait() with eventlet.Timeout(TIMEOUT): queue_consumer.unregister_provider(handler) queue_consumer.stop() with eventlet.Timeout(TIMEOUT): # we expect the queue_consumer.start thread to finish # almost immediately adn when it does the queue_consumer thread # should be dead too while not gt.dead: eventlet.sleep() assert queue_consumer._gt.dead
def test_stop_while_starting(rabbit_config, mock_container): started = Event() container = mock_container container.shared_extensions = {} container.config = rabbit_config container.max_workers = 3 container.spawn_managed_thread = spawn_managed_thread class BrokenConnConsumer(QueueConsumer): def consume(self, *args, **kwargs): started.send(None) # kombu will retry again and again on broken connections # so we have to make sure the event is reset to allow consume # to be called again started.reset() return super(BrokenConnConsumer, self).consume(*args, **kwargs) queue_consumer = BrokenConnConsumer().bind(container) queue_consumer.setup() handler = MessageHandler() queue_consumer.register_provider(handler) with eventlet.Timeout(TIMEOUT): with patch.object(Connection, 'connect', autospec=True) as connect: # patch connection to raise an error connect.side_effect = TimeoutError('test') # try to start the queue consumer gt = eventlet.spawn(queue_consumer.start) # wait for the queue consumer to begin starting and # then immediately stop it started.wait() with eventlet.Timeout(TIMEOUT): queue_consumer.unregister_provider(handler) queue_consumer.stop() with eventlet.Timeout(TIMEOUT): # we expect the queue_consumer.start thread to finish # almost immediately adn when it does the queue_consumer thread # should be dead too while not gt.dead: eventlet.sleep() assert queue_consumer._gt.dead
def test_send_rpc(get_connection): queue_declared = Event() def response_greenthread(): with get_connection() as conn: with conn.channel() as chan: queue = nova.get_topic_queue('test_rpc', 'test', channel=chan) queue.declare() queue_declared.send(True) msg = ifirst(queue_iterator(queue, no_ack=True, timeout=2)) msgid, _, _, args = nova.parse_message(msg.payload) exchange = nova.get_reply_exchange(msgid) producer = Producer(chan, exchange=exchange, routing_key=msgid) msg = {'result': args, 'failure': None, 'ending': False} producer.publish(msg) msg = {'result': None, 'failure': None, 'ending': True} producer.publish(msg) g = eventlet.spawn_n(response_greenthread) eventlet.sleep(0) with get_connection() as conn: ctx = context.get_admin_context() queue_declared.wait() resp = nova.send_rpc(conn, context=ctx, exchange='test_rpc', topic='test', method='test_method', args={ 'foo': 'bar', }, timeout=3) assert resp == { 'foo': 'bar', } def check_greenthread_dead(): assert not g assert_stops_raising(check_greenthread_dead)
def handle_message(self, socket_id, data, context_data): self.check_signature((socket_id, ), data) event = Event() self.container.spawn_worker(self, (socket_id, ), data, context_data=context_data, handle_result=partial( self.handle_result, event)) return event.wait()
def test_two_bogus_waiters(self): def do_receive(q, evt): eventlet.Timeout(0, RuntimeError()) try: result = q.wait() evt.send(result) except RuntimeError: evt.send('timed out') q = coros.queue() e1 = Event() e2 = Event() spawn(do_receive, q, e1) spawn(do_receive, q, e2) sleep(0) q.send('sent') self.assertEqual(e1.wait(), 'timed out') self.assertEqual(e2.wait(), 'timed out') self.assertEqual(q.wait(), 'sent')
def test_rpc_consumer_sharing(container_factory, rabbit_config, rabbit_manager): """ Verify that the RpcConsumer unregisters from the queueconsumer when the first provider unregisters itself. Otherwise it keeps consuming messages for the unregistered provider, raising MethodNotFound. """ container = container_factory(ExampleService, rabbit_config) container.start() task_a = get_dependency(container, RpcProvider, name="task_a") task_a_stop = task_a.stop task_b = get_dependency(container, RpcProvider, name="task_b") task_b_stop = task_b.stop task_a_stopped = Event() def patched_task_a_stop(): task_a_stop() # stop immediately task_a_stopped.send(True) def patched_task_b_stop(): eventlet.sleep(2) # stop after 2 seconds task_b_stop() with patch.object(task_b, 'stop', patched_task_b_stop), \ patch.object(task_a, 'stop', patched_task_a_stop): # stop the container and wait for task_a to stop # task_b will still be in the process of stopping eventlet.spawn(container.stop) task_a_stopped.wait() # try to call task_a. # should timeout, rather than raising MethodNotFound with RpcProxy("exampleservice", rabbit_config) as proxy: with pytest.raises(eventlet.Timeout): with eventlet.Timeout(1): proxy.task_a() # kill the container so we don't have to wait for task_b to stop container.kill()
def test_zero_max_size(self): q = coros.queue(0) def sender(evt, q): q.send('hi') evt.send('done') def receiver(evt, q): x = q.wait() evt.send(x) e1 = Event() e2 = Event() spawn(sender, e1, q) sleep(0) self.assert_(not e1.ready()) spawn(receiver, e2, q) self.assertEqual(e2.wait(),'hi') self.assertEqual(e1.wait(),'done')
def test_rpc_consumer_sharing(container_factory, rabbit_config, rabbit_manager): """ Verify that the RpcConsumer unregisters from the queueconsumer when the first provider unregisters itself. Otherwise it keeps consuming messages for the unregistered provider, raising MethodNotFound. """ container = container_factory(ExampleService, rabbit_config) container.start() task_a = get_extension(container, Rpc, method_name="task_a") task_a_stop = task_a.stop task_b = get_extension(container, Rpc, method_name="task_b") task_b_stop = task_b.stop task_a_stopped = Event() def patched_task_a_stop(): task_a_stop() # stop immediately task_a_stopped.send(True) def patched_task_b_stop(): eventlet.sleep(2) # stop after 2 seconds task_b_stop() with patch.object(task_b, 'stop', patched_task_b_stop), \ patch.object(task_a, 'stop', patched_task_a_stop): # stop the container and wait for task_a to stop # task_b will still be in the process of stopping eventlet.spawn(container.stop) task_a_stopped.wait() # try to call task_a. # should timeout, rather than raising MethodNotFound with ServiceRpcProxy("exampleservice", rabbit_config) as proxy: with pytest.raises(eventlet.Timeout): with eventlet.Timeout(1): proxy.task_a() # kill the container so we don't have to wait for task_b to stop container.kill()
def test_send_rpc(get_connection): queue_declared = Event() def response_greenthread(): with get_connection() as conn: with conn.channel() as chan: queue = nova.get_topic_queue( 'test_rpc', 'test', channel=chan) queue.declare() queue_declared.send(True) msg = ifirst(queue_iterator(queue, no_ack=True, timeout=2)) msgid, _, _, args = nova.parse_message(msg.payload) exchange = nova.get_reply_exchange(msgid) producer = Producer(chan, exchange=exchange, routing_key=msgid) msg = {'result': args, 'failure': None, 'ending': False} producer.publish(msg) msg = {'result': None, 'failure': None, 'ending': True} producer.publish(msg) g = eventlet.spawn_n(response_greenthread) eventlet.sleep(0) with get_connection() as conn: ctx = context.get_admin_context() queue_declared.wait() resp = nova.send_rpc( conn, context=ctx, exchange='test_rpc', topic='test', method='test_method', args={'foo': 'bar', }, timeout=3) assert resp == {'foo': 'bar', } def check_greenthread_dead(): assert not g assert_stops_raising(check_greenthread_dead)
class Timer(Entrypoint): def __init__(self, interval, eager=True, **kwargs): self.gt = None self.eager = eager self.interval = interval self.stopping_event = Event() self.finished_event = Event() super(Timer, self).__init__(**kwargs) def start(self): self.gt = self.container.spawn_manage_thread(self._run) def stop(self): self.stopping_event.send(True) self.gt.wait() def kill(self): self.gt.kill() def _run(self): def gen_interval(): start_time = time.time() start = 1 if self.eager else 0 for n in count(start=start): i = max(start_time + n * self.interval - time.time(), 0) yield i interval = gen_interval() to_sleep = next(interval) while True: with Timeout(to_sleep, exception=False): self.stopping_event.wait() break self.container.spawn_worker_thread(self, (), {}, res_handler=self.res_handler) self.finished_event.wait() self.finished_event.reset() to_sleep = next(interval) def res_handler(self, context, result, exc_info): self.finished_event.send(True) return result, exc_info
def test_zero_max_size(self): q = coros.queue(0) def sender(evt, q): q.send('hi') evt.send('done') def receiver(evt, q): x = q.wait() evt.send(x) e1 = Event() e2 = Event() spawn(sender, e1, q) sleep(0) self.assert_(not e1.ready()) spawn(receiver, e2, q) self.assertEqual(e2.wait(), 'hi') self.assertEqual(e1.wait(), 'done')
class MessageHandler(object): queue = ham_queue def __init__(self): self.handle_message_called = Event() def handle_message(self, body, message): self.handle_message_called.send(message) def wait(self): return self.handle_message_called.wait()
def test_spawned_thread_causes_container_to_kill_other_thread(container): killed_by_error_raised = Event() def raise_error(): raise Exception('foobar') def wait_forever(): try: Event().wait() except: killed_by_error_raised.send() raise container.start() container.spawn_managed_thread(wait_forever) container.spawn_managed_thread(raise_error) with Timeout(1): killed_by_error_raised.wait()
def _start_consumer(self, channel): ready = Event() def make_consumer(ctx): consumer = EventConsumer(channel, ctx) self._consumers[channel] = consumer if not ready.has_result(): ready.send(consumer) return consumer proc = spawn(consumer_loop, make_consumer, self.context) consumer = ready.wait() return consumer, proc
def test_client_closing_connection(container, web_config_port): ws_app, wait_for_sock = make_virtual_socket('127.0.0.1', web_config_port) gt = eventlet.spawn(ws_app.run_forever) wait_for_sock() wait_for_close = Event() def on_close(ws): wait_for_close.send(None) ws_app.on_close = on_close ws_app.send(b'\xff\x00') # "Close the connection" packet. wait_for_close.wait() ws_app.close() gt.kill() assert container.stop() is None
def save_to(self, data): event = Event() gt = self.container.spawn_managed_thread(lambda: save_to_hbase(data)) gt.link(lambda res: event.send(res.wait())) while True: if event.ready(): is_saved = event.wait() return is_saved eventlet.sleep()
class Pact: def __init__(self, threshold=2): self.count = 0 self.event = Event() self.threshold = threshold def wait(self): self.count += 1 if self.count == self.threshold: self.event.send() return self.event.wait()
def test_two_waiters_one_dies(self): def waiter(q, evt): evt.send(q.wait()) def do_receive(q, evt): eventlet.Timeout(0, RuntimeError()) try: result = q.wait() evt.send(result) except RuntimeError: evt.send('timed out') q = coros.queue() dying_evt = Event() waiting_evt = Event() spawn(do_receive, q, dying_evt) spawn(waiter, q, waiting_evt) sleep(0) q.send('hi') self.assertEqual(dying_evt.wait(), 'timed out') self.assertEqual(waiting_evt.wait(), 'hi')