예제 #1
0
    def _invoke_method_implementation(self, method, this, murano_class,
                                      context, params):
        body = method.body
        if not body:
            return None

        current_thread = eventlet.greenthread.getcurrent()
        if not hasattr(current_thread, '_murano_dsl_thread_marker'):
            thread_marker = current_thread._murano_dsl_thread_marker = \
                uuid.uuid4().hex
        else:
            thread_marker = current_thread._murano_dsl_thread_marker

        method_id = id(body)
        this_id = this.object_id

        event, marker = self._locks.get((method_id, this_id), (None, None))
        if event:
            if marker == thread_marker:
                return self._invoke_method_implementation_gt(
                    body, this, params, murano_class, context)
            event.wait()

        event = Event()
        self._locks[(method_id, this_id)] = (event, thread_marker)
        gt = eventlet.spawn(self._invoke_method_implementation_gt, body,
                            this, params, murano_class, context,
                            thread_marker)
        result = gt.wait()
        del self._locks[(method_id, this_id)]
        event.send()
        return result
예제 #2
0
def test_container_doesnt_exhaust_max_workers(container):
    spam_called = Event()
    spam_continue = Event()

    class Service(object):
        name = 'max-workers'

        @foobar
        def spam(self, a):
            spam_called.send(a)
            spam_continue.wait()

    container = ServiceContainer(Service, config={MAX_WORKERS_CONFIG_KEY: 1})

    dep = get_extension(container, Entrypoint)

    # start the first worker, which should wait for spam_continue
    container.spawn_worker(dep, ['ham'], {})

    # start the next worker in a speparate thread,
    # because it should block until the first one completed
    gt = spawn(container.spawn_worker, dep, ['eggs'], {})

    with Timeout(1):
        assert spam_called.wait() == 'ham'
        # if the container had spawned the second worker, we would see
        # an error indicating that spam_called was fired twice, and the
        # greenthread would now be dead.
        assert not gt.dead
        # reset the calls and allow the waiting worker to complete.
        spam_called.reset()
        spam_continue.send(None)
        # the second worker should now run and complete
        assert spam_called.wait() == 'eggs'
        assert gt.dead
예제 #3
0
def test_debounce_with_repeat(redis_):

    lock = Lock(redis_)

    tracker = Mock()
    release = Event()

    @lock.debounce(repeat=True)
    def func(*args, **kwargs):
        tracker(*args, **kwargs)
        release.wait()
        return tracker

    def coroutine():
        return func("egg", spam="ham")

    thread = eventlet.spawn(coroutine)
    eventlet.sleep(0.1)

    assert b"1" == redis_.get("lock:func(egg)")

    # simulate locking attempt
    redis_.incr("lock:func(egg)")

    release.send()
    eventlet.sleep(0.1)

    assert b"0" == redis_.get("lock:func(egg)")

    assert tracker == thread.wait()

    # must be called twice with the same args
    assert 2 == tracker.call_count
    assert [call("egg", spam="ham"),
            call("egg", spam="ham")] == tracker.call_args_list
예제 #4
0
class Queue(LightQueue):
    '''Create a queue object with a given maximum size.

    If *maxsize* is less than zero or ``None``, the queue size is infinite.

    ``Queue(0)`` is a channel, that is, its :meth:`put` method always blocks
    until the item is delivered. (This is unlike the standard :class:`Queue`,
    where 0 means infinite size).

    In all other respects, this Queue class resembled the standard library,
    :class:`Queue`.
    '''
    def __init__(self, maxsize=None):
        LightQueue.__init__(self, maxsize)
        self.unfinished_tasks = 0
        self._cond = Event()

    def _format(self):
        result = LightQueue._format(self)
        if self.unfinished_tasks:
            result += ' tasks=%s _cond=%s' % (self.unfinished_tasks, self._cond)
        return result

    def _put(self, item):
        LightQueue._put(self, item)
        self._put_bookkeeping()

    def _put_bookkeeping(self):
        self.unfinished_tasks += 1
        if self._cond.ready():
            self._cond.reset()

    def task_done(self):
        '''Indicate that a formerly enqueued task is complete. Used by queue consumer threads.
        For each :meth:`get <Queue.get>` used to fetch a task, a subsequent call to :meth:`task_done` tells the queue
        that the processing on the task is complete.

        If a :meth:`join` is currently blocking, it will resume when all items have been processed
        (meaning that a :meth:`task_done` call was received for every item that had been
        :meth:`put <Queue.put>` into the queue).

        Raises a :exc:`ValueError` if called more times than there were items placed in the queue.
        '''

        if self.unfinished_tasks <= 0:
            raise ValueError('task_done() called too many times')
        self.unfinished_tasks -= 1
        if self.unfinished_tasks == 0:
            self._cond.send(None)

    def join(self):
        '''Block until all items in the queue have been gotten and processed.

        The count of unfinished tasks goes up whenever an item is added to the queue.
        The count goes down whenever a consumer thread calls :meth:`task_done` to indicate
        that the item was retrieved and all work on it is complete. When the count of
        unfinished tasks drops to zero, :meth:`join` unblocks.
        '''
        if self.unfinished_tasks > 0:
            self._cond.wait()
예제 #5
0
def test_debounce_with_custom_key(redis_):

    lock = Lock(redis_)

    tracker = Mock()
    release = Event()

    @lock.debounce(key=lambda _, spam: "yo:{}".format(spam.upper()))
    def func(*args, **kwargs):
        tracker(*args, **kwargs)
        release.wait()
        return tracker

    def coroutine():
        return func("egg", spam="ham")

    thread = eventlet.spawn(coroutine)
    eventlet.sleep(0.1)

    assert b"1" == redis_.get("lock:yo:HAM")

    release.send()
    eventlet.sleep(0.1)

    assert b"0" == redis_.get("lock:yo:HAM")

    assert tracker == thread.wait()

    assert 1 == tracker.call_count
    assert call("egg", spam="ham") == tracker.call_args
예제 #6
0
def test_debounce(redis_):

    lock = Lock(redis_)

    tracker = Mock()
    release = Event()

    @lock.debounce
    def func(*args, **kwargs):
        release.wait()
        tracker(*args, **kwargs)
        return tracker

    def coroutine():
        return func("egg", spam="ham")

    thread = eventlet.spawn(coroutine)
    eventlet.sleep(0.1)

    assert b"1" == redis_.get("lock:func(egg)")

    another_thread = eventlet.spawn(coroutine)
    assert another_thread.wait() is None

    assert tracker.call_count == 0

    release.send()
    eventlet.sleep(0.1)

    assert b"0" == redis_.get("lock:func(egg)")

    assert tracker == thread.wait()

    assert 1 == tracker.call_count
    assert call("egg", spam="ham") == tracker.call_args
예제 #7
0
파일: eventlet.py 프로젝트: ged/m2wsgi
class Handler(base.Handler):
    __doc__ = base.Handler.__doc__ + """
    This Handler subclass is designed for use with eventlet.  It spawns a
    a new green thread to handle each incoming request.
    """
    ConnectionClass = Connection

    def __init__(self,*args,**kwds):
        super(Handler,self).__init__(*args,**kwds)
        #  We need to count the number of inflight requests, so the
        #  main thread can wait for them to complete when shutting down.
        self._num_inflight_requests = 0
        self._all_requests_complete = None

    def handle_request(self,req):
        self._num_inflight_requests += 1
        if self._num_inflight_requests == 1:
            self._all_requests_complete = Event()
        @eventlet.spawn_n
        def do_handle_request():
            try:
                self.process_request(req)
            finally:
                self._num_inflight_requests -= 1
                if self._num_inflight_requests == 0:
                    self._all_requests_complete.send()
                    self._all_requests_complete = None

    def wait_for_completion(self):
        if self._num_inflight_requests > 0:
            self._all_requests_complete.wait()
예제 #8
0
class Handler(base.Handler):
    __doc__ = base.Handler.__doc__ + """
    This Handler subclass is designed for use with eventlet.  It spawns a
    a new green thread to handle each incoming request.
    """
    ConnectionClass = Connection

    def __init__(self, *args, **kwds):
        super(Handler, self).__init__(*args, **kwds)
        #  We need to count the number of inflight requests, so the
        #  main thread can wait for them to complete when shutting down.
        self._num_inflight_requests = 0
        self._all_requests_complete = None

    def handle_request(self, req):
        self._num_inflight_requests += 1
        if self._num_inflight_requests == 1:
            self._all_requests_complete = Event()

        @eventlet.spawn_n
        def do_handle_request():
            try:
                self.process_request(req)
            finally:
                self._num_inflight_requests -= 1
                if self._num_inflight_requests == 0:
                    self._all_requests_complete.send()
                    self._all_requests_complete = None

    def wait_for_completion(self):
        if self._num_inflight_requests > 0:
            self._all_requests_complete.wait()
예제 #9
0
파일: queue.py 프로젝트: macboy80/bitHopper
class Queue(LightQueue):
    '''Create a queue object with a given maximum size.

    If *maxsize* is less than zero or ``None``, the queue size is infinite.

    ``Queue(0)`` is a channel, that is, its :meth:`put` method always blocks 
    until the item is delivered. (This is unlike the standard :class:`Queue`, 
    where 0 means infinite size).
    
    In all other respects, this Queue class resembled the standard library,
    :class:`Queue`.
    '''
    def __init__(self, maxsize=None):
        LightQueue.__init__(self, maxsize)
        self.unfinished_tasks = 0
        self._cond = Event()

    def _format(self):
        result = LightQueue._format(self)
        if self.unfinished_tasks:
            result += ' tasks=%s _cond=%s' % (self.unfinished_tasks,
                                              self._cond)
        return result

    def _put(self, item):
        LightQueue._put(self, item)
        self._put_bookkeeping()

    def _put_bookkeeping(self):
        self.unfinished_tasks += 1
        if self._cond.ready():
            self._cond.reset()

    def task_done(self):
        '''Indicate that a formerly enqueued task is complete. Used by queue consumer threads.
        For each :meth:`get <Queue.get>` used to fetch a task, a subsequent call to :meth:`task_done` tells the queue
        that the processing on the task is complete.

        If a :meth:`join` is currently blocking, it will resume when all items have been processed
        (meaning that a :meth:`task_done` call was received for every item that had been
        :meth:`put <Queue.put>` into the queue).

        Raises a :exc:`ValueError` if called more times than there were items placed in the queue.
        '''

        if self.unfinished_tasks <= 0:
            raise ValueError('task_done() called too many times')
        self.unfinished_tasks -= 1
        if self.unfinished_tasks == 0:
            self._cond.send(None)

    def join(self):
        '''Block until all items in the queue have been gotten and processed.

        The count of unfinished tasks goes up whenever an item is added to the queue.
        The count goes down whenever a consumer thread calls :meth:`task_done` to indicate
        that the item was retrieved and all work on it is complete. When the count of
        unfinished tasks drops to zero, :meth:`join` unblocks.
        '''
        self._cond.wait()
예제 #10
0
def test_debounce_failing_on_execution(redis_):

    lock = Lock(redis_)

    tracker = Mock()
    release = Event()

    class Whoops(Exception):
        pass

    tracker.side_effect = Whoops("Yo!")

    @lock.debounce()
    def func(*args, **kwargs):
        release.wait()
        tracker(*args, **kwargs)

    def coroutine():
        with pytest.raises(Whoops):
            func("egg", spam="ham")

    thread = eventlet.spawn(coroutine)
    eventlet.sleep(0.1)

    assert b"1" == redis_.get("lock:func(egg)")

    release.send()
    eventlet.sleep(0.1)

    assert b"0" == redis_.get("lock:func(egg)")

    thread.wait()

    assert 1 == tracker.call_count
    assert call("egg", spam="ham") == tracker.call_args
예제 #11
0
class StreamsResource(object):

    def __init__(self):
        self._action_event = Event()
        self._session_events = {}

    def new(self):
        new_id = str(uuid.uuid4())
        self._session_events[new_id] = Event()
        print self._session_events.keys()
        return new_id

    def sessions(self):
        return self._session_events.keys()
    
    def send_message(self, id, message):
        if id:
            self._session_events[id].send(message)
            eventlet.sleep()
            self._session_events[id] = Event()
        else:
            self._action_event.send(message)
            eventlet.sleep
            self._action_event = Event()
    
    def get_message(self, id):
        if id:
            return self._session_events[id].wait()
        else:
            return self._action_event.wait()
예제 #12
0
파일: timer.py 프로젝트: pombredanne/nameko
class TimerProvider(EntrypointProvider):
    def __init__(self, interval, config_key):
        self._default_interval = interval
        self.config_key = config_key
        self.should_stop = Event()
        self.gt = None

    def prepare(self):
        interval = self._default_interval

        if self.config_key:
            config = self.container.config
            interval = config.get(self.config_key, interval)

        self.interval = interval

    def start(self):
        _log.debug('starting %s', self)
        self.gt = self.container.spawn_managed_thread(self._run)

    def stop(self):
        _log.debug('stopping %s', self)
        self.should_stop.send(True)
        self.gt.wait()

    def kill(self, exc):
        _log.debug('killing %s', self)
        self.gt.kill()

    def _run(self):
        ''' Runs the interval loop.

        This should not be called directly, rather the `start()` method
        should be used.
        '''
        while not self.should_stop.ready():
            start = time.time()

            self.handle_timer_tick()

            elapsed_time = (time.time() - start)
            sleep_time = max(self.interval - elapsed_time, 0)
            self._sleep_or_stop(sleep_time)

    def _sleep_or_stop(self, sleep_time):
        ''' Sleeps for `sleep_time` seconds or until a `should_stop` event
        has been fired, whichever comes first.
        '''
        try:
            with Timeout(sleep_time):
                self.should_stop.wait()
        except Timeout:
            # we use the timeout as a cancellable sleep
            pass

    def handle_timer_tick(self):
        args = tuple()
        kwargs = {}
        self.container.spawn_worker(self, args, kwargs)
예제 #13
0
class ServerEndpoint(object):
    __slots__ = ('_handlers', '_ctx', '_sock', '_stopevent')
    def __init__(self, bind_addr, ctx=None):
        self._handlers = {}
        self._ctx = ctx or zmq.Context.instance()
        self._sock = self._ctx.socket(zmq.XREP)
        self._sock.bind(bind_addr)
        self._stopevent = Event()

    def register(self, name, what):
        """
        Register an API call with the server, clients will use the `name` to
        call it.

        `what` must be a callable object.
        """
        assert callable(what) is True
        if name in self._handlers:
            raise RuntimeError, "Function '%s' already registered" % (name,)
        self._handlers[name] = what

    def stop(self):
        """
        Notify run() to return at the earliest convenient time
        """
        self._stopevent.send(True)

    def run(self):
        """
        Listen on the server socket and process incoming requests
        """
        sock = self._sock
        handlers = self._handlers
        stopevent = self._stopevent

        while not stopevent.ready():
            client_id, raw_msg = sock.recv_multipart()            
            if raw_msg is None:
                continue
            msg = loads(raw_msg)
            if not isinstance(msg, (list, tuple)) or len(msg) != 5:
                continue
            try:
                if not isinstance(msg[0], (list, tuple)) or len(msg[0]) != 3:
                    continue
                name, args, kwargs = msg[0]                
                if name not in handlers:
                    msg = (msg[0], msg[1], 404, True, msg[4])
                elif name[0] == '_':
                    msg = (msg[0], msg[1], 400, True, msg[4])
                else:
                    handler = handlers[name]
                    response = handler(*args, **kwargs)
                    msg = (None, msg[1], response, False, msg[4])
            except:
                msg = (msg[0], msg[1], 500, True, msg[4])
            sock.send_multipart([client_id, dumps(msg)])
        stopevent.wait()
        stopevent.reset()
예제 #14
0
class ClientEndpoint(object):
    __slots__ = ('_ctx', '_sock', '_tracker', '_seq', '_stopevent')
    def __init__(self, server_addr, ctx=None):
        self._seq = 0
        self._ctx = ctx or zmq.Context.instance()
        self._sock = self._ctx.socket(zmq.XREQ)
        if not isinstance(server_addr, (list, tuple)):
            server_addr = [server_addr]
        for addr in server_addr:
            self._sock.connect(addr)
        self._tracker = MessageTracker()  
        self._stopevent = Event()

    def send(self, args):
        assert isinstance(args, (list, tuple))
        self._seq += 1
        msg = (args, 0xFFFFFFFF, None, None, self._seq)
        self._sock.send(dumps(msg))
        return self._tracker.track(msg)                

    def run(self):
        """
        Listen for and process replies as they come in
        """
        stopevent = self._stopevent
        sock = self._sock
        tracker = self._tracker
        eventlet.spawn_n(tracker.run)      
        while not stopevent.ready():
            raw_msg = sock.recv()
            if raw_msg is not None:                
                msg = loads(raw_msg)
                if isinstance(msg, (list, tuple)):
                    tracker.on_reply(msg)
        ret = stopevent.wait()
        stopevent.reset()
        return ret

    def stop(self):
        """
        Stop the run() method at the next convenient time.
        """
        self._stopevent.send(True)
        self._tracker.stop()

    def call(self, name, async=True):
        """        
        >>> client.call("ping")().wait() == "pong"

        >>> client.call("ping", False) == "pong"

        :param name: Name of remote method to call
        :param async: Should calls be asynchronous
        :returns: callable object
        """
        if async:
            return AsyncFunctionProxy(name, self)
        else:
            return SyncFunctionProxy(name, self)
예제 #15
0
def test_prefetch_count(rabbit_manager, rabbit_config, container_factory):
    class NonShared(QueueConsumer):
        @property
        def sharing_key(self):
            return uuid.uuid4()

    messages = []

    class SelfishConsumer1(Consumer):
        queue_consumer = NonShared()

        def handle_message(self, body, message):
            consumer_continue.wait()
            super(SelfishConsumer1, self).handle_message(body, message)

    class SelfishConsumer2(Consumer):
        queue_consumer = NonShared()

        def handle_message(self, body, message):
            messages.append(body)
            super(SelfishConsumer2, self).handle_message(body, message)

    class Service(object):
        name = "service"

        @SelfishConsumer1.decorator(queue=ham_queue)
        @SelfishConsumer2.decorator(queue=ham_queue)
        def handle(self, payload):
            pass

    rabbit_config['max_workers'] = 1
    container = container_factory(Service, rabbit_config)
    container.start()

    consumer_continue = Event()

    # the two handlers would ordinarily take alternating messages, but are
    # limited to holding one un-ACKed message. Since Handler1 never ACKs, it
    # only ever gets one message, and Handler2 gets the others.

    def wait_for_expected(worker_ctx, res, exc_info):
        return {'m3', 'm4', 'm5'}.issubset(set(messages))

    with entrypoint_waiter(container, 'handle', callback=wait_for_expected):
        vhost = rabbit_config['vhost']
        properties = {'content_type': 'application/data'}
        for message in ('m1', 'm2', 'm3', 'm4', 'm5'):
            rabbit_manager.publish(vhost,
                                   'spam',
                                   '',
                                   message,
                                   properties=properties)

    # we don't know which handler picked up the first message,
    # but all the others should've been handled by Handler2
    assert messages[-3:] == ['m3', 'm4', 'm5']

    # release the waiting consumer
    consumer_continue.send(None)
예제 #16
0
def test_prefetch_count(rabbit_manager, rabbit_config, container_factory):

    class NonShared(QueueConsumer):
        @property
        def sharing_key(self):
            return uuid.uuid4()

    messages = []

    class SelfishConsumer1(Consumer):
        queue_consumer = NonShared()

        def handle_message(self, body, message):
            consumer_continue.wait()
            super(SelfishConsumer1, self).handle_message(body, message)

    class SelfishConsumer2(Consumer):
        queue_consumer = NonShared()

        def handle_message(self, body, message):
            messages.append(body)
            super(SelfishConsumer2, self).handle_message(body, message)

    class Service(object):
        name = "service"

        @SelfishConsumer1.decorator(queue=ham_queue)
        @SelfishConsumer2.decorator(queue=ham_queue)
        def handle(self, payload):
            pass

    rabbit_config['max_workers'] = 1
    container = container_factory(Service, rabbit_config)
    container.start()

    consumer_continue = Event()

    # the two handlers would ordinarily take alternating messages, but are
    # limited to holding one un-ACKed message. Since Handler1 never ACKs, it
    # only ever gets one message, and Handler2 gets the others.

    def wait_for_expected(worker_ctx, res, exc_info):
        return {'m3', 'm4', 'm5'}.issubset(set(messages))

    with entrypoint_waiter(container, 'handle', callback=wait_for_expected):
        vhost = rabbit_config['vhost']
        properties = {'content_type': 'application/data'}
        for message in ('m1', 'm2', 'm3', 'm4', 'm5'):
            rabbit_manager.publish(
                vhost, 'spam', '', message, properties=properties
            )

    # we don't know which handler picked up the first message,
    # but all the others should've been handled by Handler2
    assert messages[-3:] == ['m3', 'm4', 'm5']

    # release the waiting consumer
    consumer_continue.send(None)
class Client(object):
    def __init__(self):
        self.results = []
        self.stop = Event()
        self.no_more_results = Event()
        self.failure = None

    def read(self, path, **kwargs):
        try:
            result = self.results.pop(0)
        except IndexError:
            if not self.no_more_results.ready():
                self.no_more_results.send()
            eventlet.with_timeout(5, self.stop.wait)
            raise NoMoreResults()
        if result.op != READ:
            self.failure = "Unexpected result type for read(): %s" % result.op
            raise UnexpectedResultType()
        if result.exception is not None:
            log.debug("Raise read exception %s",
                      type(result.exception).__name__)
            raise result.exception
        log.debug("Return read result %s", result)
        return result

    def write(self, path, value, **kwargs):
        log.debug("Write of %s to %s", value, path)
        try:
            result = self.results.pop(0)
        except IndexError:
            if not self.no_more_results.ready():
                self.no_more_results.send()
            eventlet.with_timeout(5, self.stop.wait)
            raise NoMoreResults()
        if result.op != WRITE:
            self.failure = "Unexpected result type for write(): %s" % result.op
            raise UnexpectedResultType()
        if result.exception is not None:
            log.debug("Raise write exception %s", result.exception)
            raise result.exception
        log.debug("Return write result")
        return result

    def add_read_exception(self, exception):
        assert(isinstance(exception, Exception))
        self.results.append(EtcdResult(exception=exception))

    def add_read_result(self, **kwargs):
        self.results.append(EtcdResult(**kwargs))

    def add_write_result(self):
        # Write results have no useful content.
        self.results.append(EtcdResult(op=WRITE))

    def add_write_exception(self, exception):
        self.results.append(EtcdResult(op=WRITE, exception=exception))
예제 #18
0
class Client(object):
    def __init__(self):
        self.results = []
        self.stop = Event()
        self.no_more_results = Event()
        self.failure = None

    def read(self, path, **kwargs):
        try:
            result = self.results.pop(0)
        except IndexError:
            if not self.no_more_results.ready():
                self.no_more_results.send()
            eventlet.with_timeout(5, self.stop.wait)
            raise NoMoreResults()
        if result.op != READ:
            self.failure = "Unexpected result type for read(): %s" % result.op
            raise UnexpectedResultType()
        if result.exception is not None:
            log.debug("Raise read exception %s",
                      type(result.exception).__name__)
            raise result.exception
        log.debug("Return read result %s", result)
        return result

    def write(self, path, value, **kwargs):
        log.debug("Write of %s to %s", value, path)
        try:
            result = self.results.pop(0)
        except IndexError:
            if not self.no_more_results.ready():
                self.no_more_results.send()
            eventlet.with_timeout(5, self.stop.wait)
            raise NoMoreResults()
        if result.op != WRITE:
            self.failure = "Unexpected result type for write(): %s" % result.op
            raise UnexpectedResultType()
        if result.exception is not None:
            log.debug("Raise write exception %s", result.exception)
            raise result.exception
        log.debug("Return write result")
        return result

    def add_read_exception(self, exception):
        assert (isinstance(exception, Exception))
        self.results.append(EtcdResult(exception=exception))

    def add_read_result(self, **kwargs):
        self.results.append(EtcdResult(**kwargs))

    def add_write_result(self):
        # Write results have no useful content.
        self.results.append(EtcdResult(op=WRITE))

    def add_write_exception(self, exception):
        self.results.append(EtcdResult(op=WRITE, exception=exception))
예제 #19
0
class MessageHandler(object):
    queue = ham_queue

    def __init__(self):
        self.handle_message_called = Event()

    def handle_message(self, body, message):
        self.handle_message_called.send(message)

    def wait(self):
        return self.handle_message_called.wait()
예제 #20
0
class MessageHandler(object):
    queue = ham_queue

    def __init__(self):
        self.handle_message_called = Event()

    def handle_message(self, body, message):
        self.handle_message_called.send(message)

    def wait(self):
        return self.handle_message_called.wait()
예제 #21
0
    class Pact:
        def __init__(self, threshold=2):
            self.count = 0
            self.event = Event()
            self.threshold = threshold

        def wait(self):
            self.count += 1
            if self.count == self.threshold:
                self.event.send()
            return self.event.wait()
예제 #22
0
파일: timer.py 프로젝트: shauns/nameko
class Timer(object):
    ''' A timer object, which will call a given method repeatedly at a given
    interval.
    '''
    def __init__(self, interval, func):
        self.interval = interval
        self.func = func
        self.gt = None
        self.should_stop = Event()

    def start(self):
        ''' Starts the timer in a separate green thread.

        Once started it may be stopped using its `stop()` method.
        '''
        self.gt = eventlet.spawn(self._run)
        _log.debug(
            'started timer for %s with %ss interval',
            self.func, self.interval)

    def _run(self):
        ''' Runs the interval loop.

        This should not be called directly, rather the `start()` method
        should be used.
        '''
        while not self.should_stop.ready():
            start = time.time()
            try:
                self.func()
            except Exception as e:
                _log.exception('error in timer handler: %s', e)

            sleep_time = max(self.interval - (time.time() - start), 0)
            self._sleep_or_stop(sleep_time)

    def _sleep_or_stop(self, sleep_time):
        ''' Sleeps for `sleep_time` seconds or until a `should_stop` event
        has been fired, whichever comes first.
        '''
        try:
            with Timeout(sleep_time):
                self.should_stop.wait()
        except Timeout:
            # we use the timeout as a cancellable sleep
            pass

    def stop(self):
        ''' Gracefully stops the timer, waiting for it's timer_method
        to complete if it is running.
        '''
        self.should_stop.send(True)
        self.gt.wait()
예제 #23
0
class EventletCallback(object):
    def __init__(self):
        self.event = Event()

    def wait(self):
        with eventlet.Timeout(10):
            return self.event.wait()

    def success(self, result):
        self.event.send(result)

    def failure(self, exc):
        self.event.send_exception(exc)
예제 #24
0
class eventletDrainer(greenletDrainer):
    def spawn(self, func):
        from eventlet import sleep, spawn
        g = spawn(func)
        sleep(0)
        return g

    def _create_drain_complete_event(self):
        from eventlet.event import Event
        self._drain_complete_event = Event()

    def _send_drain_complete_event(self):
        self._drain_complete_event.send()
예제 #25
0
class Receipt(object):
    result = None

    def __init__(self, callback=None):
        self.callback = callback
        self.ready = Event()

    def finished(self, result):
        self.result = result
        if self.callback:
            self.callback(result)
        self.ready.send()

    def wait(self, timeout=None):
        with Timeout(timeout):
            return self.ready.wait()
예제 #26
0
class GreenBody(GreenPool):
    """
    Special subclass of GreenPool which has a wait() method,
    that will return when any greenthread inside the pool exits.
    """
    def __init__(self, *args, **kwargs):
        super(GreenBody, self).__init__(*args, **kwargs)
        self.one_exited = Event()

    def wait(self):
        return self.one_exited.wait()

    def _spawn_done(self, coro):
        super(GreenBody, self)._spawn_done(coro)
        if not self.one_exited.ready():
            self.one_exited.send(coro.wait())
예제 #27
0
파일: greenbody.py 프로젝트: alex/mantrid
class GreenBody(GreenPool):
    """
    Special subclass of GreenPool which has a wait() method,
    that will return when any greenthread inside the pool exits.
    """

    def __init__(self, *args, **kwargs):
        super(GreenBody, self).__init__(*args, **kwargs)
        self.one_exited = Event()

    def wait(self):
        return self.one_exited.wait()
    
    def _spawn_done(self, coro):
        super(GreenBody, self)._spawn_done(coro)
        self.one_exited.send(coro.wait())
예제 #28
0
def test_handlers_do_not_block(SlackClient, container_factory, config,
                               tracker):

    work_1 = Event()
    work_2 = Event()

    class Service:

        name = 'sample'

        @rtm.handle_event
        def handle_1(self, event):
            work_1.wait()
            tracker.handle_1(event)

        @rtm.handle_event
        def handle_2(self, event):
            work_2.wait()
            tracker.handle_2(event)

    events = [{'spam': 'ham'}]

    def rtm_read():
        if events:
            return [events.pop(0)]
        else:
            return []

    SlackClient.return_value.rtm_read.side_effect = rtm_read
    container = container_factory(Service, config)
    container.start()

    try:
        # both handlers are still working
        assert (tracker.handle_1.call_args_list == [])
        assert (tracker.handle_2.call_args_list == [])

        # finish work of the second handler
        work_2.send()
        sleep(0.1)

        # second handler is done
        assert (tracker.handle_1.call_args_list == [])
        assert (tracker.handle_2.call_args_list == [call({'spam': 'ham'})])

        # finish work of the first handler
        work_1.send()
        sleep(0.1)

        # first handler is done
        assert (tracker.handle_1.call_args_list == [call({'spam': 'ham'})])
        assert (tracker.handle_2.call_args_list == [call({'spam': 'ham'})])
    finally:
        if not work_1.ready():
            work_1.send()
        if not work_2.ready():
            work_2.send()
예제 #29
0
def test_wait_for_worker_idle(container_factory, rabbit_config):

    event = Event()

    class Service(object):
        name = "service"

        @rpc
        def wait_for_event(self):
            event.wait()

    container = container_factory(Service, rabbit_config)
    container.start()

    max_workers = DEFAULT_MAX_WORKERS

    # TODO: pytest.warns is not supported until pytest >= 2.8.0, whose
    # `testdir` plugin is not compatible with eventlet on python3 --
    # see https://github.com/mattbennett/eventlet-pytest-bug
    with warnings.catch_warnings(record=True) as ws:
        wait_for_worker_idle(container)
        assert len(ws) == 1
        assert issubclass(ws[-1].category, DeprecationWarning)

    # verify nothing running
    assert container._worker_pool.free() == max_workers
    with eventlet.Timeout(1):
        wait_for_worker_idle(container)

    # spawn a worker
    wait_for_event = get_extension(container, Rpc)
    container.spawn_worker(wait_for_event, [], {})

    # verify that wait_for_worker_idle does not return while worker active
    assert container._worker_pool.free() == max_workers - 1
    gt = eventlet.spawn(wait_for_worker_idle, container)
    assert not gt.dead  # still waiting

    # verify that wait_for_worker_idle raises when it times out
    with pytest.raises(eventlet.Timeout):
        wait_for_worker_idle(container, timeout=0)

    # complete the worker, verify previous wait_for_worker_idle completes
    event.send()
    with eventlet.Timeout(1):
        gt.wait()
    assert container._worker_pool.free() == max_workers
예제 #30
0
    def test_send_exc(self):
        log = []
        e = Event()

        def waiter():
            try:
                result = e.wait()
                log.append(('received', result))
            except Exception as ex:
                log.append(('catched', ex))
        spawn(waiter)
        sleep(0) # let waiter to block on e.wait()
        obj = Exception()
        e.send(exc=obj)
        sleep(0)
        sleep(0)
        assert log == [('catched', obj)], log
예제 #31
0
    def test_send_exc(self):
        log = []
        e = Event()

        def waiter():
            try:
                result = e.wait()
                log.append(('received', result))
            except Exception as ex:
                log.append(('catched', ex))
        spawn(waiter)
        sleep(0)  # let waiter to block on e.wait()
        obj = Exception()
        e.send(exc=obj)
        sleep(0)
        sleep(0)
        assert log == [('catched', obj)], log
예제 #32
0
def test_wait_for_worker_idle(container_factory, rabbit_config):

    event = Event()

    class Service(object):
        name = "service"

        @rpc
        def wait_for_event(self):
            event.wait()

    container = container_factory(Service, rabbit_config)
    container.start()

    max_workers = DEFAULT_MAX_WORKERS

    with pytest.deprecated_call():
        wait_for_worker_idle(container)

    # verify nothing running
    assert container._worker_pool.free() == max_workers
    with eventlet.Timeout(1):
        wait_for_worker_idle(container)

    # spawn a worker
    wait_for_event = get_extension(container, Rpc)
    container.spawn_worker(wait_for_event, [], {})

    # verify that wait_for_worker_idle does not return while worker active
    assert container._worker_pool.free() == max_workers - 1
    gt = eventlet.spawn(wait_for_worker_idle, container)
    assert not gt.dead  # still waiting

    # verify that wait_for_worker_idle raises when it times out
    with pytest.raises(eventlet.Timeout):
        wait_for_worker_idle(container, timeout=0)

    # complete the worker, verify previous wait_for_worker_idle completes
    event.send()
    with eventlet.Timeout(1):
        gt.wait()
    assert container._worker_pool.free() == max_workers
예제 #33
0
def test_wait_for_worker_idle(container_factory, rabbit_config):

    event = Event()

    class Service(object):
        name = "service"

        @rpc
        def wait_for_event(self):
            event.wait()

    container = container_factory(Service, rabbit_config)
    container.start()

    max_workers = DEFAULT_MAX_WORKERS

    with pytest.deprecated_call():
        wait_for_worker_idle(container)

    # verify nothing running
    assert container._worker_pool.free() == max_workers
    with eventlet.Timeout(1):
        wait_for_worker_idle(container)

    # spawn a worker
    wait_for_event = get_extension(container, Rpc)
    container.spawn_worker(wait_for_event, [], {})

    # verify that wait_for_worker_idle does not return while worker active
    assert container._worker_pool.free() == max_workers - 1
    gt = eventlet.spawn(wait_for_worker_idle, container)
    assert not gt.dead  # still waiting

    # verify that wait_for_worker_idle raises when it times out
    with pytest.raises(eventlet.Timeout):
        wait_for_worker_idle(container, timeout=0)

    # complete the worker, verify previous wait_for_worker_idle completes
    event.send()
    with eventlet.Timeout(1):
        gt.wait()
    assert container._worker_pool.free() == max_workers
예제 #34
0
class Timer(Entrypoint):
    def __init__(self, interval, eager=True, **kwargs):
        self.gt = None
        self.eager = eager
        self.interval = interval
        self.stopping_event = Event()
        self.finished_event = Event()
        super(Timer, self).__init__(**kwargs)

    def start(self):
        self.gt = self.container.spawn_manage_thread(self._run)

    def stop(self):
        self.stopping_event.send(True)
        self.gt.wait()

    def kill(self):
        self.gt.kill()

    def _run(self):
        def gen_interval():
            start_time = time.time()
            start = 1 if self.eager else 0
            for n in count(start=start):
                i = max(start_time + n * self.interval - time.time(), 0)
                yield i

        interval = gen_interval()
        to_sleep = next(interval)
        while True:
            with Timeout(to_sleep, exception=False):
                self.stopping_event.wait()
                break
            self.container.spawn_worker_thread(self, (), {},
                                               res_handler=self.res_handler)
            self.finished_event.wait()
            self.finished_event.reset()
            to_sleep = next(interval)

    def res_handler(self, context, result, exc_info):
        self.finished_event.send(True)
        return result, exc_info
    def save_to(self, data):

        event = Event()
        gt = self.container.spawn_managed_thread(lambda: save_to_hbase(data))
        gt.link(lambda res: event.send(res.wait()))

        while True:
            if event.ready():
                is_saved = event.wait()
                return is_saved
            eventlet.sleep()
예제 #36
0
    def test_disconnect_and_fail_to_reconnect(self, container_factory,
                                              rabbit_manager, rabbit_config,
                                              toxic_rpc_proxy, toxiproxy):
        block = Event()

        class Service(object):
            name = "service"

            @rpc
            def method(self, arg):
                block.wait()
                return arg

        container = container_factory(Service, rabbit_config)
        container.start()

        # make an async call that will block,
        # wait for the worker to have spawned
        with wait_for_call(container, 'spawn_worker'):
            res = toxic_rpc_proxy.service.method.call_async('msg1')

        try:
            # disconnect
            toxiproxy.disable()

            # rpc proxy should return an error for the request in flight.
            # it will also attempt to reconnect and throw on failure
            # because toxiproxy is still disconnected
            with pytest.raises(socket.error):
                with pytest.raises(RpcConnectionError):
                    res.result()

        finally:
            # reconnect toxiproxy
            block.send(True)
            toxiproxy.enable()

        # proxy will not work afterwards because the queueconsumer connection
        # was not recovered on the second attempt
        with pytest.raises(RuntimeError):
            toxic_rpc_proxy.service.method("msg3")
예제 #37
0
class ProviderCollector(object):
    def __init__(self, *args, **kwargs):
        self._providers = set()
        self._providers_registered = False
        self._last_provider_unregistered = Event()
        super(ProviderCollector, self).__init__(*args, **kwargs)

    def register_provider(self, provider):
        self._providers_registered = True
        _log.debug('registering provider %s for %s', provider, self)
        self._providers.add(provider)

    def unregister_provider(self, provider):
        providers = self._providers
        if provider not in self._providers:
            return

        _log.debug('unregistering provider %s for %s', provider, self)

        providers.remove(provider)
        if len(providers) == 0:
            _log.debug('last provider unregistered for %s', self)
            self._last_provider_unregistered.send()

    def wait_for_providers(self):
        """ Wait for any providers registered with the collector to have
        unregistered.

        Returns immediately if no providers were ever registered.
        """
        if self._providers_registered:
            _log.debug('waiting for providers to unregister %s', self)
            self._last_provider_unregistered.wait()
            _log.debug('all providers unregistered %s', self)

    def stop(self):
        """ Default `:meth:Extension.stop()` implementation for
        subclasses using `ProviderCollector` as a mixin.
        """
        self.wait_for_providers()
예제 #38
0
파일: extensions.py 프로젝트: zmyer/nameko
class ProviderCollector(object):
    def __init__(self, *args, **kwargs):
        self._providers = set()
        self._providers_registered = False
        self._last_provider_unregistered = Event()
        super(ProviderCollector, self).__init__(*args, **kwargs)

    def register_provider(self, provider):
        self._providers_registered = True
        _log.debug('registering provider %s for %s', provider, self)
        self._providers.add(provider)

    def unregister_provider(self, provider):
        providers = self._providers
        if provider not in self._providers:
            return

        _log.debug('unregistering provider %s for %s', provider, self)

        providers.remove(provider)
        if len(providers) == 0:
            _log.debug('last provider unregistered for %s', self)
            self._last_provider_unregistered.send()

    def wait_for_providers(self):
        """ Wait for any providers registered with the collector to have
        unregistered.

        Returns immediately if no providers were ever registered.
        """
        if self._providers_registered:
            _log.debug('waiting for providers to unregister %s', self)
            self._last_provider_unregistered.wait()
            _log.debug('all providers unregistered %s', self)

    def stop(self):
        """ Default `:meth:Extension.stop()` implementation for
        subclasses using `ProviderCollector` as a mixin.
        """
        self.wait_for_providers()
예제 #39
0
def test_debounce_failing_on_repeat_execution(redis_):

    lock = Lock(redis_)

    tracker = Mock()
    release = Event()

    class Whoops(Exception):
        pass

    tracker.side_effect = [None, Whoops("Yo!")]

    @lock.debounce(repeat=True)
    def func(*args, **kwargs):
        tracker(*args, **kwargs)
        release.wait()

    def coroutine():
        with pytest.raises(Whoops):
            func("egg", spam="ham")

    thread = eventlet.spawn(coroutine)
    eventlet.sleep(0.1)

    assert b"1" == redis_.get("lock:func(egg)")

    # simulate locking attempt
    redis_.incr("lock:func(egg)")

    release.send()
    eventlet.sleep(0.1)

    assert b"0" == redis_.get("lock:func(egg)")

    thread.wait()

    # must be called twice with the same args
    assert 2 == tracker.call_count
    assert [call("egg", spam="ham"),
            call("egg", spam="ham")] == tracker.call_args_list
예제 #40
0
def test_debounce_with_callback(redis_):

    lock = Lock(redis_)

    tracker, callback_tracker = Mock(), Mock()
    release = Event()

    def callback(*args, **kwargs):
        callback_tracker(*args, **kwargs)

    @lock.debounce(callback=callback)
    def func(*args, **kwargs):
        tracker(*args, **kwargs)
        release.wait()
        return tracker

    def coroutine():
        return func("egg", spam="ham")

    thread = eventlet.spawn(coroutine)
    eventlet.sleep(0.1)

    assert b"1" == redis_.get("lock:func(egg)")

    # simulate locking attempt
    redis_.incr("lock:func(egg)")

    release.send()
    eventlet.sleep(0.1)

    assert b"0" == redis_.get("lock:func(egg)")

    assert tracker == thread.wait()

    assert 1 == tracker.call_count
    assert call("egg", spam="ham") == tracker.call_args

    # test callback call
    assert 1 == callback_tracker.call_count
    assert call("egg", spam="ham") == callback_tracker.call_args
    def all_data(self, table_name):

        event = Event()
        gt = self.container.spawn_managed_thread(
            lambda: all_data_hbase(table_name))
        gt.link(lambda res: event.send(res.wait()))

        while True:
            if event.ready():
                retFile = event.wait()
                retJson = {"results": retFile}

                return json.dumps(retJson)
            eventlet.sleep()
    def send_it(self, data):

        pr_data = process_data(data)
        event = Event()
        gt = self.container.spawn_managed_thread(
            lambda: send_to_kafka(pr_data))
        gt.link(lambda res: event.send(res.wait()))
        eventlet.sleep()

        while True:
            if event.ready():
                is_sent = event.wait()
                return is_sent
            eventlet.sleep()
예제 #43
0
    def test_disconnect_and_fail_to_reconnect(
        self, container_factory, rabbit_manager, rabbit_config,
        toxic_rpc_proxy, toxiproxy
    ):
        block = Event()

        class Service(object):
            name = "service"

            @rpc
            def method(self, arg):
                block.wait()
                return arg

        container = container_factory(Service, rabbit_config)
        container.start()

        # make an async call that will block,
        # wait for the worker to have spawned
        with wait_for_call(container, 'spawn_worker'):
            res = toxic_rpc_proxy.service.method.call_async('msg1')

        # disable toxiproxy to kill connections
        with toxiproxy.disabled():

            # toxiproxy remains disabled when the proxy attempts to reconnect,
            # so we should return an error for the request in flight
            with pytest.raises(socket.error):
                res.result()

        # unblock worker
        block.send(True)

        # proxy will not work afterwards because the queueconsumer connection
        # was not recovered on the second attempt
        with pytest.raises(RuntimeError):
            toxic_rpc_proxy.service.method("msg2")
예제 #44
0
    def test_disconnect_and_fail_to_reconnect(
        self, container_factory, rabbit_manager, rabbit_config,
        toxic_rpc_proxy, toxiproxy
    ):
        block = Event()

        class Service(object):
            name = "service"

            @rpc
            def method(self, arg):
                block.wait()
                return arg

        container = container_factory(Service, rabbit_config)
        container.start()

        # make an async call that will block,
        # wait for the worker to have spawned
        with wait_for_call(container, 'spawn_worker'):
            res = toxic_rpc_proxy.service.method.call_async('msg1')

        # disable toxiproxy to kill connections
        with toxiproxy.disabled():

            # toxiproxy remains disabled when the proxy attempts to reconnect,
            # so we should return an error for the request in flight
            with pytest.raises(socket.error):
                res.result()

        # unblock worker
        block.send(True)

        # proxy will not work afterwards because the queueconsumer connection
        # was not recovered on the second attempt
        with pytest.raises(RuntimeError):
            toxic_rpc_proxy.service.method("msg2")
예제 #45
0
파일: producers.py 프로젝트: jmoiron/gaspar
class Producer(object):
    """The producer object, a server which takes requests from a TCP socket
    and forwards them to a zmq.PUSH socket that is PULLed from by workers
    that the producer starts.  The port is the TCP port to listen on, but
    the host is used by all sockets.  The consumer should be a Consumer
    object that will run in the worker processes and actually handle requests."""

    def __init__(self, consumer, port, processes=num_cpus, host='127.0.0.1'):
        self.outstanding = {}
        self.port = port
        self.host = host
        self.consumer = consumer
        self.consumer.initialize(self)
        self.init_events()
        self.pool = TokenPool(max_size=processes)
        self.pushpool = TokenPool(max_size=1)
        self.forker = Forker(self, consumer, processes)

    def init_events(self):
        # these events correspond to the server socket
        self.server_start = Event()
        self.server_stop = Event()
        # these events more or less correspond to the completion of the 
        # startup process, including forking
        self.running = Event()
        self.stopped = Event()

    def setup_zmq(self):
        """Set up a PUSH and a PULL socket.  The PUSH socket will push out
        requests to the workers.  The PULL socket will receive responses from
        the workers and reply through the server socket."""
        self.context = zmq.Context()
        self.push = self.context.socket(zmq.PUSH)
        self.push_port = self.push.bind_to_random_port("tcp://%s" % self.host)
        # start a listener for the pull socket
        eventlet.spawn(self.zmq_pull)
        eventlet.sleep(0)

    def zmq_pull(self):
        # bind to the port and wait for the workers to start
        self.pull = self.context.socket(zmq.PULL)
        self.pull_port = self.pull.bind_to_random_port("tcp://%s" % self.host)
        self.running.wait()
        while True:
            try:
                packed = self.pull.recv()
                self.pool.put(None)
                eventlet.spawn(self.response_handler, packed)
            except zmq.ZMQError:
                eventlet.sleep(0.05)
            except:
                import traceback
                traceback.print_exc()
                return

    def serve(self):
        self.server = eventlet.listen((self.host, self.port))
        self.server_addr = self.server.getsockname()
        # finish server listening, fire off event which fires workers and wait
        self.server_start.send()
        self.running.wait()
        while not self.server_stop.ready():
            try:
                conn, addr = self.server.accept()
            except error:
                if self.server_stop.ready():
                    return
                logger.error("error accepting connection: %r" % error)
            eventlet.spawn(self.request_handler, conn, addr)

    def start(self, blocking=True):
        """Start the producer.  This will eventually fire the ``server_start``
        and ``running`` events in sequence, which signify that the incoming
        TCP request socket is running and the workers have been forked,
        respectively.  If ``blocking`` is False, control ."""
        self.setup_zmq()
        if blocking:
            self.serve()
        else:
            eventlet.spawn(self.serve)
            # ensure that self.serve runs now as calling code will
            # expect start() to have started the server even non-blk
            eventlet.sleep(0)

    def stop(self):
        self.push.close(linger=0)
        self.pull.close(linger=0)
        try:
            self.server.shutdown(SHUT_RDWR)
        except error, e:
            if e.errno != 57:
                raise
        self.server.close()
        self.server_stop.send()
        # let event listeners listening to this event run
        eventlet.sleep(0)
예제 #46
0
파일: worker.py 프로젝트: jab/melkman
class ScheduledMessageService(object):

    MIN_SLEEP_TIME = timedelta(seconds=1)
    MAX_SLEEP_TIME = timedelta(minutes=60)
    MAX_CLAIM_TIME = timedelta(minutes=5)

    def __init__(self, context):
        self.context = context
        self.service_queue = Event()
        self._listener = None
        self._dispatch = None

    def run(self):
        try:
            with self.context:
                self._listener = self._start_listener()
                self._dispatcher = spawn(self.run_dispatcher)

            procs = [self._listener, self._dispatcher]
            waitall(procs)
        except GreenletExit:
            pass
        finally:
            killall(procs)
            waitall(procs)

    ################################################################
    # The listener consumes messages on the scheduled message queue 
    # and stores the deferred messages in the database.
    ################################################################

    def _start_listener(self):
        @always_ack
        def cb(message_data, message):
            with self.context:
                _handle_scheduler_command(message_data, message, self.context)
                self.wakeup_dispatcher()

        dispatch = MessageDispatch(self.context)
        return dispatch.start_worker(SCHEDULER_COMMAND, cb)


    ##############################################################
    # The dispatcher consumes deferred messages from the database 
    # when their scheduled time arrives and spits them out 
    # to the message broker
    ##############################################################    
    def run_dispatcher(self):
        try:
            # cleanup any mess left over last time...
            with self.context:
                self.cleanup()
                while(True):
                    log.info("checking for ready messages...")
                    last_time = self.send_ready_messages()
                    sleep_time = self._calc_sleep(last_time)
                    log.info("sleeping for %s" % sleep_time)
                    sleep_secs = sleep_time.days*84600 + sleep_time.seconds
                    try:
                        with_timeout(sleep_secs, self.service_queue.wait)
                    except TimeoutError:
                        pass

                    if self.service_queue.ready():
                        self.service_queue.reset()
        except GreenletExit:
            log.debug("ScheduledMessageService dispatcher exiting...")

    def wakeup_dispatcher(self):
        if not self.service_queue.ready():
            self.service_queue.send(True)

    def _calc_sleep(self, after=None):
        next_time = self.find_next_send_time(after=after)
    
        if next_time is None:
            sleep_time = self.MAX_SLEEP_TIME
        else:
            sleep_time = next_time - datetime.utcnow()
            sleep_time += timedelta(seconds=1)
            sleep_time -= timedelta(microseconds=sleep_time.microseconds)

        if sleep_time < self.MIN_SLEEP_TIME:
            sleep_time = self.MIN_SLEEP_TIME
        if sleep_time > self.MAX_SLEEP_TIME:
            sleep_time = self.MAX_SLEEP_TIME        
        
        return sleep_time

    def find_next_send_time(self, after=None):
        if after is None:
            after = datetime.utcnow()
        after_str = DateTimeField()._to_json(after)

        next_query = dict(
            startkey = [False, after_str, {}],
            endkey = [True, None],
            include_docs = False,
            descending = False,
            limit = 1
        )

        next_send = None
        for r in view_deferred_messages_by_timestamp(self.context.db, **next_query):
            next_send = DateTimeField()._to_python(r.key[1])
            break

        return next_send

    def send_ready_messages(self):
        while True:
            now = datetime.utcnow()
            now_str = DateTimeField()._to_json(now)

            query = dict(
                startkey = [False, None],
                endkey = [False, now_str, {}],
                include_docs = True,
                descending = False,
                limit = 100
            )


            vr = view_deferred_messages_by_timestamp(self.context.db, **query)
            batch = []
            for r in vr:
                batch.append(DeferredAMQPMessage.wrap(r.doc))

            if len(batch) == 0:
                break
            
            dispatch_count = 0
            for message in batch:
                try:
                    if self._dispatch_message(message):
                        dispatch_count += 1
                except GreenletExit:
                    # asked to stop, go ahead and quit.
                    raise
                except:
                    log.error("Unexected error dispatching message %s: %s" %
                              (message, traceback.format_exc()))
                    
            log.info("Dispatched %d messages" % dispatch_count)
            
        return now

    def _dispatch_message(self, message):
        if not message.claim(self.context.db):
            return
        
        try:
            publisher = Publisher(self.context.broker, exchange=message.options.exchange,
                                  exchange_type=message.options.exchange_type)
            publisher.send(message.message,
                           routing_key = message.options.routing_key,
                           delivery_mode = message.options.delivery_mode,
                           mandatory = message.options.mandatory,
                           priority = message.options.priority)
            publisher.close()
        except:
            log.error("Error dispatching deferred message %s: %s" % (message, traceback.format_exc()))
            self.error_reschedule(message)
            return False
        else:
            log.debug("Dispatched message %s" % message)
            # sent with no problems, done with it.
            self.context.db.delete(message)
            return True

    def error_reschedule(self, message):
        message.error_count += 1
        
        if message.error_count < 10:
            delay = 2**message.error_count
        else:
            delay = 60*10

        resched_time = datetime.utcnow() + timedelta(seconds=delay)
        message.unclaim(self.context.db, resched_time)
        
        log.warn("Rescheduled message %s for %s" % (message.id, resched_time))

    def cleanup(self):
        log.info("Performing cleanup of claimed items...")

        # anything older than this has held the claim for too long
        # and is considered dead.
        cutoff = datetime.utcnow() - self.MAX_CLAIM_TIME
        cutoff_str = DateTimeField()._to_json(cutoff)

        query = dict(
            startkey = [True, cutoff_str, {}],
            endkey = [True],
            limit = 100,
            include_docs = True,
            descending = True
        )

        unclaim_count = 0
        while(True):
            vr = view_deferred_messages_by_timestamp(self.context.db, **query)
            batch = [DeferredAMQPMessage.wrap(r.doc) for r in vr]
            if len(batch) == 0:
                break

            for message in batch:
                self.error_reschedule(message)
                unclaim_count += 1

        if unclaim_count > 0:
            log.warn('Cleanup unclaimed %d items' % unclaim_count)
예제 #47
0
class QueueConsumer(DependencyProvider, ProviderCollector, ConsumerMixin):
    def __init__(self):
        super(QueueConsumer, self).__init__()
        self._connection = None

        self._consumers = {}

        self._pending_messages = set()
        self._pending_ack_messages = []
        self._pending_requeue_messages = []
        self._pending_remove_providers = {}

        self._gt = None
        self._starting = False

        self._consumers_ready = Event()

    @property
    def _amqp_uri(self):
        return self.container.config[AMQP_URI_CONFIG_KEY]

    @property
    def _prefetch_count(self):
        return self.container.max_workers

    def _handle_thread_exited(self, gt):
        exc = None
        try:
            gt.wait()
        except Exception as e:
            exc = e

        if not self._consumers_ready.ready():
            self._consumers_ready.send_exception(exc)

    def start(self):
        if not self._starting:
            self._starting = True

            _log.debug('starting %s', self)
            self._gt = self.container.spawn_managed_thread(
                self.run, protected=True)
            self._gt.link(self._handle_thread_exited)
        try:
            _log.debug('waiting for consumer ready %s', self)
            self._consumers_ready.wait()
        except QueueConsumerStopped:
            _log.debug('consumer was stopped before it started %s', self)
        except Exception as exc:
            _log.debug('consumer failed to start %s (%s)', self, exc)
        else:
            _log.debug('started %s', self)

    def stop(self):
        """ Stop the queue-consumer gracefully.

        Wait until the last provider has been unregistered and for
        the ConsumerMixin's greenthread to exit (i.e. until all pending
        messages have been acked or requeued and all consumers stopped).
        """
        if not self._consumers_ready.ready():
            _log.debug('stopping while consumer is starting %s', self)

            stop_exc = QueueConsumerStopped()

            # stopping before we have started successfully by brutally
            # killing the consumer thread as we don't have a way to hook
            # into the pre-consumption startup process
            self._gt.kill(stop_exc)

        self.wait_for_providers()

        try:
            _log.debug('waiting for consumer death %s', self)
            self._gt.wait()
        except QueueConsumerStopped:
            pass

        super(QueueConsumer, self).stop()
        _log.debug('stopped %s', self)

    def kill(self):
        """ Kill the queue-consumer.

        Unlike `stop()` any pending message ack or requeue-requests,
        requests to remove providers, etc are lost and the consume thread is
        asked to terminate as soon as possible.
        """
        # greenlet has a magic attribute ``dead`` - pylint: disable=E1101
        if self._gt and not self._gt.dead:
            # we can't just kill the thread because we have to give
            # ConsumerMixin a chance to close the sockets properly.
            self._providers = set()
            self._pending_messages = set()
            self._pending_ack_messages = []
            self._pending_requeue_messages = []
            self._pending_remove_providers = {}
            self.should_stop = True
            self._gt.wait()

            super(QueueConsumer, self).kill()
            _log.debug('killed %s', self)

    def unregister_provider(self, provider):
        if not self._consumers_ready.ready():
            # we cannot handle the situation where we are starting up and
            # want to remove a consumer at the same time
            # TODO: With the upcomming error handling mechanism, this needs
            # TODO: to be thought through again.
            self._last_provider_unregistered.send()
            return

        removed_event = Event()
        # we can only cancel a consumer from within the consumer thread
        self._pending_remove_providers[provider] = removed_event
        # so we will just register the consumer to be canceled
        removed_event.wait()

        super(QueueConsumer, self).unregister_provider(provider)

    def ack_message(self, message):
        _log.debug("stashing message-ack: %s", message)
        self._pending_messages.remove(message)
        self._pending_ack_messages.append(message)

    def requeue_message(self, message):
        _log.debug("stashing message-requeue: %s", message)
        self._pending_messages.remove(message)
        self._pending_requeue_messages.append(message)

    def _on_message(self, body, message):
        _log.debug("received message: %s", message)
        self._pending_messages.add(message)

    def _cancel_consumers_if_requested(self):
        provider_remove_events = self._pending_remove_providers.items()
        self._pending_remove_providers = {}

        for provider, removed_event in provider_remove_events:
            consumer = self._consumers.pop(provider)

            _log.debug('cancelling consumer [%s]: %s', provider, consumer)
            consumer.cancel()
            removed_event.send()

    def _process_pending_message_acks(self):
        messages = self._pending_ack_messages
        if messages:
            _log.debug('ack() %d processed messages', len(messages))
            while messages:
                msg = messages.pop()
                msg.ack()
                eventlet.sleep()

        messages = self._pending_requeue_messages
        if messages:
            _log.debug('requeue() %d processed messages', len(messages))
            while messages:
                msg = messages.pop()
                msg.requeue()
                eventlet.sleep()

    @property
    def connection(self):
        """ Kombu requirement """
        if self._connection is None:
            self._connection = Connection(self._amqp_uri)

        return self._connection

    def get_consumers(self, Consumer, channel):
        """ Kombu callback to set up consumers.

        Called after any (re)connection to the broker.
        """
        _log.debug('setting up consumers %s', self)

        for provider in self._providers:
            callbacks = [self._on_message, provider.handle_message]

            consumer = Consumer(queues=[provider.queue], callbacks=callbacks)
            consumer.qos(prefetch_count=self._prefetch_count)

            self._consumers[provider] = consumer

        return self._consumers.values()

    def on_iteration(self):
        """ Kombu callback for each `drain_events` loop iteration."""
        self._cancel_consumers_if_requested()

        self._process_pending_message_acks()

        num_consumers = len(self._consumers)
        num_pending_messages = len(self._pending_messages)

        if num_consumers + num_pending_messages == 0:
            _log.debug('requesting stop after iteration')
            self.should_stop = True

    def on_connection_error(self, exc, interval):
        _log.warn('broker connection error: {}. '
                  'Retrying in {} seconds.'.format(exc, interval))

    def on_consume_ready(self, connection, channel, consumers, **kwargs):
        """ Kombu callback when consumers are ready to accept messages.

        Called after any (re)connection to the broker.
        """
        if not self._consumers_ready.ready():
            _log.debug('consumer started %s', self)
            self._consumers_ready.send(None)

    def consume(self, limit=None, timeout=None, safety_interval=0.1, **kwargs):
        """ Lifted from Kombu.

        We switch the order of the `break` and `self.on_iteration()` to
        avoid waiting on a drain_events timeout before breaking the loop.
        """
        elapsed = 0
        with self.consumer_context(**kwargs) as (conn, channel, consumers):
            for i in limit and range(limit) or count():
                self.on_iteration()
                if self.should_stop:
                    break
                try:
                    conn.drain_events(timeout=safety_interval)
                except socket.timeout:
                    elapsed += safety_interval
                    # Excluding the following clause from coverage,
                    # as timeout never appears to be set - This method
                    # is a lift from kombu so will leave in place for now.
                    if timeout and elapsed >= timeout:  # pragma: no cover
                        raise
                except socket.error:
                    if not self.should_stop:
                        raise
                else:
                    yield
                    elapsed = 0
예제 #48
0
def test_prefetch_count(rabbit_manager, rabbit_config):
    container = Mock()
    container.config = rabbit_config
    container.max_workers = 1
    container.spawn_managed_thread = spawn_thread

    queue_consumer1 = QueueConsumer()
    queue_consumer1.bind("queue_consumer", container)

    queue_consumer2 = QueueConsumer()
    queue_consumer2.bind("queue_consumer", container)

    consumer_continue = Event()

    class Handler1(object):
        queue = ham_queue

        def handle_message(self, body, message):
            consumer_continue.wait()
            queue_consumer1.ack_message(message)

    messages = []

    class Handler2(object):
        queue = ham_queue

        def handle_message(self, body, message):
            messages.append(body)
            queue_consumer2.ack_message(message)

    handler1 = Handler1()
    handler2 = Handler2()

    queue_consumer1.register_provider(handler1)
    queue_consumer2.register_provider(handler2)

    queue_consumer1.start()
    queue_consumer2.start()

    vhost = rabbit_config['vhost']
    # the first consumer only has a prefetch_count of 1 and will only
    # consume 1 message and wait in handler1()
    rabbit_manager.publish(vhost, 'spam', '', 'ham')
    # the next message will go to handler2() no matter of any prefetch_count
    rabbit_manager.publish(vhost, 'spam', '', 'eggs')
    # the third message is only going to handler2 because the first consumer
    # has a prefetch_count of 1 and thus is unable to deal with another message
    # until having ACKed the first one
    rabbit_manager.publish(vhost, 'spam', '', 'bacon')

    with eventlet.Timeout(TIMEOUT):
        while len(messages) < 2:
            eventlet.sleep()

    # allow the waiting consumer to ack its message
    consumer_continue.send(None)

    assert messages == ['eggs', 'bacon']

    queue_consumer1.unregister_provider(handler1)
    queue_consumer2.unregister_provider(handler2)

    queue_consumer1.kill()
    queue_consumer2.kill()
예제 #49
0
파일: rpc.py 프로젝트: ayoshi/nameko
class RpcConsumer(SharedExtension, ProviderCollector):

    queue_consumer = QueueConsumer()

    def __init__(self):
        self._unregistering_providers = set()
        self._unregistered_from_queue_consumer = Event()
        self.queue = None
        super(RpcConsumer, self).__init__()

    def setup(self):
        if self.queue is None:

            service_name = self.container.service_name
            queue_name = RPC_QUEUE_TEMPLATE.format(service_name)
            routing_key = '{}.*'.format(service_name)

            exchange = get_rpc_exchange(self.container.config)

            self.queue = Queue(
                queue_name,
                exchange=exchange,
                routing_key=routing_key,
                durable=True)

            self.queue_consumer.register_provider(self)
            self._registered = True

    def stop(self):
        """ Stop the RpcConsumer.

        The RpcConsumer ordinary unregisters from the QueueConsumer when the
        last Rpc subclass unregisters from it. If no providers were registered,
        we should unregister from the QueueConsumer as soon as we're asked
        to stop.
        """
        if not self._providers_registered:
            self.queue_consumer.unregister_provider(self)
            self._unregistered_from_queue_consumer.send(True)

    def unregister_provider(self, provider):
        """ Unregister a provider.

        Blocks until this RpcConsumer is unregistered from its QueueConsumer,
        which only happens when all providers have asked to unregister.
        """
        self._unregistering_providers.add(provider)
        remaining_providers = self._providers - self._unregistering_providers
        if not remaining_providers:
            _log.debug('unregistering from queueconsumer %s', self)
            self.queue_consumer.unregister_provider(self)
            _log.debug('unregistered from queueconsumer %s', self)
            self._unregistered_from_queue_consumer.send(True)

        _log.debug('waiting for unregister from queue consumer %s', self)
        self._unregistered_from_queue_consumer.wait()
        super(RpcConsumer, self).unregister_provider(provider)

    def get_provider_for_method(self, routing_key):
        service_name = self.container.service_name

        for provider in self._providers:
            key = '{}.{}'.format(service_name, provider.method_name)
            if key == routing_key:
                return provider
        else:
            method_name = routing_key.split(".")[-1]
            raise MethodNotFound(method_name)

    def handle_message(self, body, message):
        routing_key = message.delivery_info['routing_key']
        try:
            provider = self.get_provider_for_method(routing_key)
            provider.handle_message(body, message)
        except Exception:
            exc_info = sys.exc_info()
            self.handle_result(message, None, exc_info)

    def handle_result(self, message, result, exc_info):
        responder = Responder(self.container.config, message)
        result, exc_info = responder.send_response(result, exc_info)

        self.queue_consumer.ack_message(message)
        return result, exc_info

    def requeue_message(self, message):
        self.queue_consumer.requeue_message(message)
예제 #50
0
파일: timer.py 프로젝트: ahmb/nameko
class TimerProvider(EntrypointProvider):
    def __init__(self, interval, config_key):
        self._default_interval = interval
        self.config_key = config_key
        self.should_stop = Event()
        self.gt = None

    def prepare(self):
        interval = self._default_interval

        if self.config_key:
            config = self.container.config
            interval = config.get(self.config_key, interval)

        self.interval = interval

    def start(self):
        _log.debug('starting %s', self)
        self.gt = self.container.spawn_managed_thread(self._run)

    def stop(self):
        _log.debug('stopping %s', self)
        self.should_stop.send(True)
        self.gt.wait()

    def kill(self):
        _log.debug('killing %s', self)
        self.gt.kill()

    def _run(self):
        ''' Runs the interval loop.

        This should not be called directly, rather the `start()` method
        should be used.
        '''
        while not self.should_stop.ready():
            start = time.time()

            self.handle_timer_tick()

            elapsed_time = (time.time() - start)
            sleep_time = max(self.interval - elapsed_time, 0)
            self._sleep_or_stop(sleep_time)

    def _sleep_or_stop(self, sleep_time):
        ''' Sleeps for `sleep_time` seconds or until a `should_stop` event
        has been fired, whichever comes first.
        '''
        try:
            with Timeout(sleep_time):
                self.should_stop.wait()
        except Timeout:
            # we use the timeout as a cancellable sleep
            pass

    def handle_timer_tick(self):
        args = ()
        kwargs = {}

        # Note that we don't catch ContainerBeingKilled here. If that's raised,
        # there is nothing for us to do anyway. The exception bubbles, and is
        # caught by :meth:`Container._handle_thread_exited`, though the
        # triggered `kill` is a no-op, since the container is alredy
        # `_being_killed`.
        self.container.spawn_worker(self, args, kwargs)
예제 #51
0
def test_prefetch_count(rabbit_manager, rabbit_config, mock_container):
    container = mock_container
    container.shared_extensions = {}
    container.config = rabbit_config
    container.max_workers = 1
    container.spawn_managed_thread = spawn_managed_thread
    content_type = "application/data"
    container.accept = [content_type]

    class NonShared(QueueConsumer):
        @property
        def sharing_key(self):
            return uuid.uuid4()

    queue_consumer1 = NonShared().bind(container)
    queue_consumer1.setup()
    queue_consumer2 = NonShared().bind(container)
    queue_consumer2.setup()

    consumer_continue = Event()

    class Handler1(object):
        queue = ham_queue

        def handle_message(self, body, message):
            consumer_continue.wait()
            queue_consumer1.ack_message(message)

    messages = []

    class Handler2(object):
        queue = ham_queue

        def handle_message(self, body, message):
            messages.append(body)
            queue_consumer2.ack_message(message)

    handler1 = Handler1()
    handler2 = Handler2()

    queue_consumer1.register_provider(handler1)
    queue_consumer2.register_provider(handler2)

    queue_consumer1.start()
    queue_consumer2.start()

    vhost = rabbit_config["vhost"]
    # the first consumer only has a prefetch_count of 1 and will only
    # consume 1 message and wait in handler1()
    rabbit_manager.publish(vhost, "spam", "", "ham", properties=dict(content_type=content_type))
    # the next message will go to handler2() no matter of any prefetch_count
    rabbit_manager.publish(vhost, "spam", "", "eggs", properties=dict(content_type=content_type))
    # the third message is only going to handler2 because the first consumer
    # has a prefetch_count of 1 and thus is unable to deal with another message
    # until having ACKed the first one
    rabbit_manager.publish(vhost, "spam", "", "bacon", properties=dict(content_type=content_type))

    # allow the waiting consumer to ack its message
    consumer_continue.send(None)

    assert messages == ["eggs", "bacon"]

    queue_consumer1.unregister_provider(handler1)
    queue_consumer2.unregister_provider(handler2)

    queue_consumer1.kill()
    queue_consumer2.kill()
예제 #52
0
class QueueConsumer(SharedExtension, ProviderCollector, ConsumerMixin):

    def __init__(self):

        self._consumers = {}
        self._pending_remove_providers = {}

        self._gt = None
        self._starting = False

        self._consumers_ready = Event()
        super(QueueConsumer, self).__init__()

    @property
    def amqp_uri(self):
        return self.container.config[AMQP_URI_CONFIG_KEY]

    @property
    def prefetch_count(self):
        return self.container.max_workers

    @property
    def accept(self):
        return self.container.accept

    def _handle_thread_exited(self, gt):
        exc = None
        try:
            gt.wait()
        except Exception as e:
            exc = e

        if not self._consumers_ready.ready():
            self._consumers_ready.send_exception(exc)

    def setup(self):
        ssl = self.container.config.get(AMQP_SSL_CONFIG_KEY)
        verify_amqp_uri(self.amqp_uri, ssl=ssl)

    def start(self):
        if not self._starting:
            self._starting = True

            _log.debug('starting %s', self)
            self._gt = self.container.spawn_managed_thread(self.run)
            self._gt.link(self._handle_thread_exited)
        try:
            _log.debug('waiting for consumer ready %s', self)
            self._consumers_ready.wait()
        except QueueConsumerStopped:
            _log.debug('consumer was stopped before it started %s', self)
        except Exception as exc:
            _log.debug('consumer failed to start %s (%s)', self, exc)
        else:
            _log.debug('started %s', self)

    def stop(self):
        """ Stop the queue-consumer gracefully.

        Wait until the last provider has been unregistered and for
        the ConsumerMixin's greenthread to exit (i.e. until all pending
        messages have been acked or requeued and all consumers stopped).
        """
        if not self._consumers_ready.ready():
            _log.debug('stopping while consumer is starting %s', self)

            stop_exc = QueueConsumerStopped()

            # stopping before we have started successfully by brutally
            # killing the consumer thread as we don't have a way to hook
            # into the pre-consumption startup process
            self._gt.kill(stop_exc)

        self.wait_for_providers()

        try:
            _log.debug('waiting for consumer death %s', self)
            self._gt.wait()
        except QueueConsumerStopped:
            pass

        super(QueueConsumer, self).stop()
        _log.debug('stopped %s', self)

    def kill(self):
        """ Kill the queue-consumer.

        Unlike `stop()` any pending message ack or requeue-requests,
        requests to remove providers, etc are lost and the consume thread is
        asked to terminate as soon as possible.
        """
        # greenlet has a magic attribute ``dead`` - pylint: disable=E1101
        if self._gt is not None and not self._gt.dead:
            # we can't just kill the thread because we have to give
            # ConsumerMixin a chance to close the sockets properly.
            self._providers = set()
            self._pending_remove_providers = {}
            self.should_stop = True
            try:
                self._gt.wait()
            except Exception as exc:
                # discard the exception since we're already being killed
                _log.warn(
                    'QueueConsumer %s raised `%s` during kill', self, exc)

            super(QueueConsumer, self).kill()
            _log.debug('killed %s', self)

    def unregister_provider(self, provider):
        if not self._consumers_ready.ready():
            # we cannot handle the situation where we are starting up and
            # want to remove a consumer at the same time
            # TODO: With the upcomming error handling mechanism, this needs
            # TODO: to be thought through again.
            self._last_provider_unregistered.send()
            return

        removed_event = Event()
        # we can only cancel a consumer from within the consumer thread
        self._pending_remove_providers[provider] = removed_event
        # so we will just register the consumer to be canceled
        removed_event.wait()

        super(QueueConsumer, self).unregister_provider(provider)

    def ack_message(self, message):
        # only attempt to ack if the message connection is alive;
        # otherwise the message will already have been reclaimed by the broker
        if message.channel.connection:
            try:
                message.ack()
            except ConnectionError:  # pragma: no cover
                pass  # ignore connection closing inside conditional

    def requeue_message(self, message):
        # only attempt to requeue if the message connection is alive;
        # otherwise the message will already have been reclaimed by the broker
        if message.channel.connection:
            try:
                message.requeue()
            except ConnectionError:  # pragma: no cover
                pass  # ignore connection closing inside conditional

    def _cancel_consumers_if_requested(self):
        provider_remove_events = self._pending_remove_providers.items()
        self._pending_remove_providers = {}

        for provider, removed_event in provider_remove_events:
            consumer = self._consumers.pop(provider)

            _log.debug('cancelling consumer [%s]: %s', provider, consumer)
            consumer.cancel()
            removed_event.send()

    @property
    def connection(self):
        """ Provide the connection parameters for kombu's ConsumerMixin.

        The `Connection` object is a declaration of connection parameters
        that is lazily evaluated. It doesn't represent an established
        connection to the broker at this point.
        """
        heartbeat = self.container.config.get(
            HEARTBEAT_CONFIG_KEY, DEFAULT_HEARTBEAT
        )
        ssl = self.container.config.get(AMQP_SSL_CONFIG_KEY)
        return Connection(self.amqp_uri, heartbeat=heartbeat, ssl=ssl)

    def handle_message(self, provider, body, message):
        ident = u"{}.handle_message[{}]".format(
            type(provider).__name__, message.delivery_info['routing_key']
        )
        self.container.spawn_managed_thread(
            partial(provider.handle_message, body, message), identifier=ident
        )

    def get_consumers(self, consumer_cls, channel):
        """ Kombu callback to set up consumers.

        Called after any (re)connection to the broker.
        """
        _log.debug('setting up consumers %s', self)

        for provider in self._providers:
            callbacks = [partial(self.handle_message, provider)]

            consumer = consumer_cls(
                queues=[provider.queue],
                callbacks=callbacks,
                accept=self.accept
            )
            consumer.qos(prefetch_count=self.prefetch_count)

            self._consumers[provider] = consumer

        return self._consumers.values()

    def on_iteration(self):
        """ Kombu callback for each `drain_events` loop iteration."""
        self._cancel_consumers_if_requested()

        if len(self._consumers) == 0:
            _log.debug('requesting stop after iteration')
            self.should_stop = True

    def on_connection_error(self, exc, interval):
        _log.warning(
            "Error connecting to broker at {} ({}).\n"
            "Retrying in {} seconds.".format(self.amqp_uri, exc, interval))

    def on_consume_ready(self, connection, channel, consumers, **kwargs):
        """ Kombu callback when consumers are ready to accept messages.

        Called after any (re)connection to the broker.
        """
        if not self._consumers_ready.ready():
            _log.debug('consumer started %s', self)
            self._consumers_ready.send(None)
예제 #53
0
class ServiceContainer(object):

    def __init__(self, service_cls, worker_ctx_cls, config):

        self.service_cls = service_cls
        self.worker_ctx_cls = worker_ctx_cls

        self.service_name = get_service_name(service_cls)

        self.config = config
        self.max_workers = config.get(MAX_WORKERS_KEY) or DEFAULT_MAX_WORKERS

        self.dependencies = DependencySet()
        for dep in prepare_dependencies(self):
            self.dependencies.add(dep)

        self.started = False
        self._worker_pool = GreenPool(size=self.max_workers)

        self._active_threads = set()
        self._protected_threads = set()
        self._being_killed = False
        self._died = Event()

    @property
    def entrypoints(self):
        return filter(is_entrypoint_provider, self.dependencies)

    @property
    def injections(self):
        return filter(is_injection_provider, self.dependencies)

    def start(self):
        """ Start a container by starting all the dependency providers.
        """
        _log.debug('starting %s', self)
        self.started = True

        with log_time(_log.debug, 'started %s in %0.3f sec', self):
            self.dependencies.all.prepare()
            self.dependencies.all.start()

    def stop(self):
        """ Stop the container gracefully.

        First all entrypoints are asked to ``stop()``.
        This ensures that no new worker threads are started.

        It is the providers' responsibility to gracefully shut down when
        ``stop()`` is called on them and only return when they have stopped.

        After all entrypoints have stopped the container waits for any
        active workers to complete.

        After all active workers have stopped the container stops all
        injections.

        At this point there should be no more managed threads. In case there
        are any managed threads, they are killed by the container.
        """
        if self._died.ready():
            _log.debug('already stopped %s', self)
            return

        _log.debug('stopping %s', self)

        with log_time(_log.debug, 'stopped %s in %0.3f sec', self):
            dependencies = self.dependencies

            # entrypoint deps have to be stopped before injection deps
            # to ensure that running workers can successfully complete
            dependencies.entrypoints.all.stop()

            # there might still be some running workers, which we have to
            # wait for to complete before we can stop injection dependencies
            self._worker_pool.waitall()

            # it should be safe now to stop any injection as there is no
            # active worker which could be using it
            dependencies.injections.all.stop()

            # finally, stop nested dependencies
            dependencies.nested.all.stop()

            # just in case there was a provider not taking care of its workers,
            # or a dependency not taking care of its protected threads
            self._kill_active_threads()
            self._kill_protected_threads()

            self.started = False
            self._died.send(None)

    def kill(self, exc):
        """ Kill the container in a semi-graceful way.

        All non-protected managed threads are killed first. This includes
        all active workers generated by :meth:`ServiceContainer.spawn_worker`.
        Next, dependencies are killed. Finally, any remaining protected threads
        are killed.

        The container dies with the given ``exc``.
        """
        if self._being_killed:
            # this happens if a managed thread exits with an exception
            # while the container is being killed or another caller
            # behaves in a similar manner
            _log.debug('already killing %s ... waiting for death', self)
            self._died.wait()

        self._being_killed = True

        if self._died.ready():
            _log.debug('already stopped %s', self)
            return

        _log.info('killing %s due to "%s"', self, exc)

        self.dependencies.entrypoints.all.kill(exc)
        self._kill_active_threads()
        self.dependencies.all.kill(exc)
        self._kill_protected_threads()

        self.started = False
        self._died.send_exception(exc)

    def wait(self):
        """ Block until the container has been stopped.

        If the container was stopped using ``kill(exc)``,
        ``wait()`` raises ``exc``.
        Any unhandled exception raised in a managed thread or in the
        life-cycle management code also causes the container to be
        ``kill()``ed, which causes an exception to be raised from ``wait()``.
        """
        return self._died.wait()

    def spawn_worker(self, provider, args, kwargs,
                     context_data=None, handle_result=None):
        """ Spawn a worker thread for running the service method decorated
        with an entrypoint ``provider``.

        ``args`` and ``kwargs`` are used as arguments for the service
        method.

        ``context_data`` is used to initialize a ``WorkerContext``.

        ``handle_result`` is an optional callback which may be passed
        in by the calling entrypoint provider. It is called with the
        result returned or error raised by the service method.
        """
        service = self.service_cls()
        worker_ctx = self.worker_ctx_cls(
            self, service, provider.name, args, kwargs, data=context_data)

        _log.debug('spawning %s', worker_ctx,
                   extra=worker_ctx.extra_for_logging)
        gt = self._worker_pool.spawn(self._run_worker, worker_ctx,
                                     handle_result)
        self._active_threads.add(gt)
        gt.link(self._handle_thread_exited)
        return worker_ctx

    def spawn_managed_thread(self, run_method, protected=False):
        """ Spawn a managed thread to run ``run_method``.

        Threads can be marked as ``protected``, which means the container will
        not forcibly kill them until after all dependencies have been killed.
        Dependencies that require a managed thread to complete their kill
        procedure should ensure to mark them as ``protected``.

        Any uncaught errors inside ``run_method`` cause the container to be
        killed.

        It is the caller's responsibility to terminate their spawned threads.
        Threads are killed automatically if they are still running after
        all dependencies are stopped during :meth:`ServiceContainer.stop`.

        Entrypoints may only create separate threads using this method,
        to ensure they are life-cycle managed.
        """
        gt = eventlet.spawn(run_method)
        if not protected:
            self._active_threads.add(gt)
        else:
            self._protected_threads.add(gt)
        gt.link(self._handle_thread_exited)
        return gt

    def _run_worker(self, worker_ctx, handle_result):
        _log.debug('setting up %s', worker_ctx,
                   extra=worker_ctx.extra_for_logging)

        if not worker_ctx.parent_call_stack:
            _log.debug('starting call chain',
                       extra=worker_ctx.extra_for_logging)
        _log.debug('call stack for %s: %s',
                   worker_ctx, '->'.join(worker_ctx.call_id_stack),
                   extra=worker_ctx.extra_for_logging)

        with log_time(_log.debug, 'ran worker %s in %0.3fsec', worker_ctx):

            self.dependencies.injections.all.inject(worker_ctx)
            self.dependencies.all.worker_setup(worker_ctx)

            result = exc = None
            try:
                _log.debug('calling handler for %s', worker_ctx,
                           extra=worker_ctx.extra_for_logging)

                method = getattr(worker_ctx.service, worker_ctx.method_name)

                with log_time(_log.debug, 'ran handler for %s in %0.3fsec',
                              worker_ctx):
                    result = method(*worker_ctx.args, **worker_ctx.kwargs)
            except Exception as e:
                log_worker_exception(worker_ctx, e)
                exc = e

            with log_time(_log.debug, 'tore down worker %s in %0.3fsec',
                          worker_ctx):

                _log.debug('signalling result for %s', worker_ctx,
                           extra=worker_ctx.extra_for_logging)
                self.dependencies.injections.all.worker_result(
                    worker_ctx, result, exc)

                _log.debug('tearing down %s', worker_ctx,
                           extra=worker_ctx.extra_for_logging)
                self.dependencies.all.worker_teardown(worker_ctx)
                self.dependencies.injections.all.release(worker_ctx)

            if handle_result is not None:
                _log.debug('handling result for %s', worker_ctx,
                           extra=worker_ctx.extra_for_logging)

                with log_time(_log.debug, 'handled result for %s in %0.3fsec',
                              worker_ctx):
                    handle_result(worker_ctx, result, exc)

    def _kill_active_threads(self):
        """ Kill all managed threads that were not marked as "protected" when
        they were spawned.

        This set will include all worker threads generated by
        :meth:`ServiceContainer.spawn_worker`.

        See :meth:`ServiceContainer.spawn_managed_thread`
        """
        num_active_threads = len(self._active_threads)

        if num_active_threads:
            _log.warning('killing %s active thread(s)', num_active_threads)
            for gt in list(self._active_threads):
                gt.kill()

    def _kill_protected_threads(self):
        """ Kill any managed threads marked as protected when they were
        spawned.

        See :meth:`ServiceContainer.spawn_managed_thread`
        """
        num_protected_threads = len(self._protected_threads)

        if num_protected_threads:
            _log.warning('killing %s protected thread(s)',
                         num_protected_threads)
            for gt in list(self._protected_threads):
                gt.kill()

    def _handle_thread_exited(self, gt):
        self._active_threads.discard(gt)
        self._protected_threads.discard(gt)

        try:
            gt.wait()

        except greenlet.GreenletExit:
            # we don't care much about threads killed by the container
            # this can happen in stop() and kill() if providers
            # don't properly take care of their threads
            _log.warning('%s thread killed by container', self)

        except Exception as exc:
            _log.error('%s thread exited with error', self,
                       exc_info=True)
            # any error raised inside an active thread is unexpected behavior
            # and probably a bug in the providers or container
            # to be safe we kill the container
            self.kill(exc)

    def __str__(self):
        return '<ServiceContainer [{}] at 0x{:x}>'.format(
            self.service_name, id(self))
예제 #54
0
파일: rpc.py 프로젝트: pombredanne/nameko
class RpcConsumer(DependencyProvider, ProviderCollector):

    queue_consumer = queue_consumer(shared=CONTAINER_SHARED)

    def __init__(self):
        super(RpcConsumer, self).__init__()
        self._unregistering_providers = set()
        self._unregistered_from_queue_consumer = Event()
        self.queue = None

    def prepare(self):
        if self.queue is None:

            container = self.container
            service_name = container.service_name
            queue_name = RPC_QUEUE_TEMPLATE.format(service_name)
            routing_key = '{}.*'.format(service_name)
            exchange = get_rpc_exchange(container)

            self.queue = Queue(
                queue_name,
                exchange=exchange,
                routing_key=routing_key,
                durable=True)

            self.queue_consumer.register_provider(self)
            self._registered = True

    def stop(self):
        """ Stop the RpcConsumer.

        The RpcConsumer ordinary unregisters from the QueueConsumer when the
        last RpcProvider unregisters from it. If no providers were registered,
        we should unregister ourself from the QueueConsumer as soon as we're
        asked to stop.
        """
        if not self._providers_registered:
            self.queue_consumer.unregister_provider(self)
            self._unregistered_from_queue_consumer.send(True)

    def unregister_provider(self, provider):
        """ Unregister a provider.

        Blocks until this RpcConsumer is unregistered from its QueueConsumer,
        which only happens when all providers have asked to unregister.
        """
        self._unregistering_providers.add(provider)
        remaining_providers = self._providers - self._unregistering_providers
        if not remaining_providers:
            _log.debug('unregistering from queueconsumer %s', self)
            self.queue_consumer.unregister_provider(self)
            _log.debug('unregistered from queueconsumer %s', self)
            self._unregistered_from_queue_consumer.send(True)

        _log.debug('waiting for unregister from queue consumer %s', self)
        self._unregistered_from_queue_consumer.wait()
        super(RpcConsumer, self).unregister_provider(provider)

    def get_provider_for_method(self, routing_key):
        service_name = self.container.service_name

        for provider in self._providers:
            key = '{}.{}'.format(service_name, provider.name)
            if key == routing_key:
                return provider
        else:
            method_name = routing_key.split(".")[-1]
            raise MethodNotFound(method_name)

    def handle_message(self, body, message):
        routing_key = message.delivery_info['routing_key']
        try:
            provider = self.get_provider_for_method(routing_key)
            provider.handle_message(body, message)
        except MethodNotFound as exc:
            self.handle_result(message, self.container, None, exc)

    def handle_result(self, message, container, result, exc):
        error = None
        if exc is not None:
            # TODO: this is helpful for debug, but shouldn't be used in
            # production (since it exposes the callee's internals).
            # Replace this when we can correlate exceptions properly.
            error = RemoteErrorWrapper(exc)

        responder = Responder(message)
        responder.send_response(container, result, error)

        self.queue_consumer.ack_message(message)
예제 #55
0
class Timer(Entrypoint):
    def __init__(self, interval):
        """
        Timer entrypoint implementation. Fires every :attr:`self.interval`
        seconds.

        The implementation sleeps first, i.e. does not fire at time 0.

        Example::

            timer = Timer.decorator

            class Service(object):
                name = "service"

                @timer(interval=5)
                def tick(self):
                    pass

        """
        self.interval = interval
        self.should_stop = Event()
        self.gt = None

    def start(self):
        _log.debug('starting %s', self)
        self.gt = self.container.spawn_managed_thread(self._run)

    def stop(self):
        _log.debug('stopping %s', self)
        self.should_stop.send(True)
        self.gt.wait()

    def kill(self):
        _log.debug('killing %s', self)
        self.gt.kill()

    def _run(self):
        """ Runs the interval loop. """

        sleep_time = self.interval

        while True:
            # sleep for `sleep_time`, unless `should_stop` fires, in which
            # case we leave the while loop and stop entirely
            with Timeout(sleep_time, exception=False):
                self.should_stop.wait()
                break

            start = time.time()

            self.handle_timer_tick()

            elapsed_time = (time.time() - start)

            # next time, sleep however long is left of our interval, taking
            # off the time we took to run
            sleep_time = max(self.interval - elapsed_time, 0)

    def handle_timer_tick(self):
        args = ()
        kwargs = {}

        # Note that we don't catch ContainerBeingKilled here. If that's raised,
        # there is nothing for us to do anyway. The exception bubbles, and is
        # caught by :meth:`Container._handle_thread_exited`, though the
        # triggered `kill` is a no-op, since the container is alredy
        # `_being_killed`.
        self.container.spawn_worker(self, args, kwargs)
예제 #56
0
class Benchmarker():

    def __init__(self, rate, auth, total_ops=None, data_size=1024*1024, container='nacho', base='item-', queue_size=None):
        self.rate = rate
        self.dist = NegExp(self.rate)
        self.data_size = data_size
        self.ops = 0
        self.errors = 0
        self.container = container
        self.base = base
        self.outstanding = 0
        self.total_ops = total_ops
        self.running = False
        if queue_size:
            self.pile = GreenPile(queue_size)
        else:
            self.pile = GreenPile()
        self._done = Event()
        # self.client = Connection(authurl='http://localhost:8080/auth/v1.0', user='******', key='testing')
        self.storage_url, self.token = get_auth(auth, 'test:tester', 'testing')
        self.data = "x" * data_size
        LOG.info("Object-size=%s" % (len(self.data)))
        LOG.info("Object-base=%s" % base)
        if total_ops:
            LOG.info("This benchmark will take aprox %.0f seconds" % (total_ops / rate))

    def __iter__(self):
        return self.pile

    def next(): self.pile.next()

    def _work(self, op):
        running_ops = self.outstanding
        self.outstanding += 1
        t1 = time()
        try:
            # client = Connection(authurl='http://192.168.16.12:8080/auth/v1.0', user='******', key='testing')
            # client.put_object(self.container,'%s-%s' % (self.base, op), self.data)
            get_object(self.storage_url, token=self.token, container=self.container, name='%s-%s' % (self.base, op))
            t2 = time()
            elapsed = (t2-t1) * 1000
            self.outstanding -= 1
            LOG.info("Operation #%d took %.2f ms (%.2f MB/s, %d ops outstanding on arrival)" % (op, elapsed, (self.data_size / (1024.*1024)) / (t2 - t1) , running_ops))
            entry = { "Operation": op,
                      "Arrival-time": t1,
                      "Completion-time": t2,
                      "Elapsed": elapsed,
                      "Outstanding-on-arrival": running_ops,
                      "Outstanding-on-completion": self.outstanding }
            print '%d, %f, %f, %.2f, %d, %d' % (op, t1, t2, elapsed, running_ops, self.outstanding)
            return entry
        except KeyboardInterrupt:
            self.outstanding -= 1
            self.running = False
            return None
        except Exception:
            self.errors += 1
            raise

    def _run(self):
        print '# date-time = %s' % datetime.now()
        print '# object-size = %s' % self.data_size
        print '# rate = %s' % self.rate
        print "Operation, Arrival-time, Completion-time, Elapsed, Outstanding-on-arrival, Outstanding-on-completion"
        self.running = True
        while self.running:
            sleep(self.dist.next())
            if self.running:
                self.ops += 1
                self.pile.spawn(self._work, self.ops)
                # stop looping if we have an operations limit
            if self.running and self.total_ops:
                self.running = self.ops < self.total_ops
        self._done.send()

    def start(self):
        spawn_n(self._run)

    def wait(self):
        # wait for pending jobs
        #while self.outstanding > 0: sleep(.1)
        self._done.wait()
        self.pile.pool.waitall()

    def stop(self):
        self.running = False
        self.wait()