예제 #1
0
파일: queue.py 프로젝트: macboy80/bitHopper
class Queue(LightQueue):
    '''Create a queue object with a given maximum size.

    If *maxsize* is less than zero or ``None``, the queue size is infinite.

    ``Queue(0)`` is a channel, that is, its :meth:`put` method always blocks 
    until the item is delivered. (This is unlike the standard :class:`Queue`, 
    where 0 means infinite size).
    
    In all other respects, this Queue class resembled the standard library,
    :class:`Queue`.
    '''
    def __init__(self, maxsize=None):
        LightQueue.__init__(self, maxsize)
        self.unfinished_tasks = 0
        self._cond = Event()

    def _format(self):
        result = LightQueue._format(self)
        if self.unfinished_tasks:
            result += ' tasks=%s _cond=%s' % (self.unfinished_tasks,
                                              self._cond)
        return result

    def _put(self, item):
        LightQueue._put(self, item)
        self._put_bookkeeping()

    def _put_bookkeeping(self):
        self.unfinished_tasks += 1
        if self._cond.ready():
            self._cond.reset()

    def task_done(self):
        '''Indicate that a formerly enqueued task is complete. Used by queue consumer threads.
        For each :meth:`get <Queue.get>` used to fetch a task, a subsequent call to :meth:`task_done` tells the queue
        that the processing on the task is complete.

        If a :meth:`join` is currently blocking, it will resume when all items have been processed
        (meaning that a :meth:`task_done` call was received for every item that had been
        :meth:`put <Queue.put>` into the queue).

        Raises a :exc:`ValueError` if called more times than there were items placed in the queue.
        '''

        if self.unfinished_tasks <= 0:
            raise ValueError('task_done() called too many times')
        self.unfinished_tasks -= 1
        if self.unfinished_tasks == 0:
            self._cond.send(None)

    def join(self):
        '''Block until all items in the queue have been gotten and processed.

        The count of unfinished tasks goes up whenever an item is added to the queue.
        The count goes down whenever a consumer thread calls :meth:`task_done` to indicate
        that the item was retrieved and all work on it is complete. When the count of
        unfinished tasks drops to zero, :meth:`join` unblocks.
        '''
        self._cond.wait()
예제 #2
0
class Queue(LightQueue):
    '''Create a queue object with a given maximum size.

    If *maxsize* is less than zero or ``None``, the queue size is infinite.

    ``Queue(0)`` is a channel, that is, its :meth:`put` method always blocks
    until the item is delivered. (This is unlike the standard :class:`Queue`,
    where 0 means infinite size).

    In all other respects, this Queue class resembled the standard library,
    :class:`Queue`.
    '''
    def __init__(self, maxsize=None):
        LightQueue.__init__(self, maxsize)
        self.unfinished_tasks = 0
        self._cond = Event()

    def _format(self):
        result = LightQueue._format(self)
        if self.unfinished_tasks:
            result += ' tasks=%s _cond=%s' % (self.unfinished_tasks, self._cond)
        return result

    def _put(self, item):
        LightQueue._put(self, item)
        self._put_bookkeeping()

    def _put_bookkeeping(self):
        self.unfinished_tasks += 1
        if self._cond.ready():
            self._cond.reset()

    def task_done(self):
        '''Indicate that a formerly enqueued task is complete. Used by queue consumer threads.
        For each :meth:`get <Queue.get>` used to fetch a task, a subsequent call to :meth:`task_done` tells the queue
        that the processing on the task is complete.

        If a :meth:`join` is currently blocking, it will resume when all items have been processed
        (meaning that a :meth:`task_done` call was received for every item that had been
        :meth:`put <Queue.put>` into the queue).

        Raises a :exc:`ValueError` if called more times than there were items placed in the queue.
        '''

        if self.unfinished_tasks <= 0:
            raise ValueError('task_done() called too many times')
        self.unfinished_tasks -= 1
        if self.unfinished_tasks == 0:
            self._cond.send(None)

    def join(self):
        '''Block until all items in the queue have been gotten and processed.

        The count of unfinished tasks goes up whenever an item is added to the queue.
        The count goes down whenever a consumer thread calls :meth:`task_done` to indicate
        that the item was retrieved and all work on it is complete. When the count of
        unfinished tasks drops to zero, :meth:`join` unblocks.
        '''
        if self.unfinished_tasks > 0:
            self._cond.wait()
예제 #3
0
def test_container_doesnt_exhaust_max_workers(container):
    spam_called = Event()
    spam_continue = Event()

    class Service(object):
        name = 'max-workers'

        @foobar
        def spam(self, a):
            spam_called.send(a)
            spam_continue.wait()

    container = ServiceContainer(Service, config={MAX_WORKERS_CONFIG_KEY: 1})

    dep = get_extension(container, Entrypoint)

    # start the first worker, which should wait for spam_continue
    container.spawn_worker(dep, ['ham'], {})

    # start the next worker in a speparate thread,
    # because it should block until the first one completed
    gt = spawn(container.spawn_worker, dep, ['eggs'], {})

    with Timeout(1):
        assert spam_called.wait() == 'ham'
        # if the container had spawned the second worker, we would see
        # an error indicating that spam_called was fired twice, and the
        # greenthread would now be dead.
        assert not gt.dead
        # reset the calls and allow the waiting worker to complete.
        spam_called.reset()
        spam_continue.send(None)
        # the second worker should now run and complete
        assert spam_called.wait() == 'eggs'
        assert gt.dead
예제 #4
0
class Timer(Entrypoint):
    def __init__(self, interval, eager=True, **kwargs):
        self.gt = None
        self.eager = eager
        self.interval = interval
        self.stopping_event = Event()
        self.finished_event = Event()
        super(Timer, self).__init__(**kwargs)

    def start(self):
        self.gt = self.container.spawn_manage_thread(self._run)

    def stop(self):
        self.stopping_event.send(True)
        self.gt.wait()

    def kill(self):
        self.gt.kill()

    def _run(self):
        def gen_interval():
            start_time = time.time()
            start = 1 if self.eager else 0
            for n in count(start=start):
                i = max(start_time + n * self.interval - time.time(), 0)
                yield i

        interval = gen_interval()
        to_sleep = next(interval)
        while True:
            with Timeout(to_sleep, exception=False):
                self.stopping_event.wait()
                break
            self.container.spawn_worker_thread(self, (), {},
                                               res_handler=self.res_handler)
            self.finished_event.wait()
            self.finished_event.reset()
            to_sleep = next(interval)

    def res_handler(self, context, result, exc_info):
        self.finished_event.send(True)
        return result, exc_info
예제 #5
0
파일: timer.py 프로젝트: yijxiang/nameko
class Timer(Entrypoint):
    def __init__(self, interval, eager=False, **kwargs):
        """
        Timer entrypoint. Fires every `interval` seconds or as soon as
        the previous worker completes if that took longer.

        The default behaviour is to wait `interval` seconds
        before firing for the first time. If you want the entrypoint
        to fire as soon as the service starts, pass `eager=True`.

        Example::

            timer = Timer.decorator

            class Service(object):
                name = "service"

                @timer(interval=5)
                def tick(self):
                    pass

        """
        self.interval = interval
        self.eager = eager
        self.should_stop = Event()
        self.worker_complete = Event()
        self.gt = None
        super(Timer, self).__init__(**kwargs)

    def start(self):
        _log.debug('starting %s', self)
        self.gt = self.container.spawn_managed_thread(self._run)

    def stop(self):
        _log.debug('stopping %s', self)
        self.should_stop.send(True)
        self.gt.wait()

    def kill(self):
        _log.debug('killing %s', self)
        self.gt.kill()

    def _run(self):
        """ Runs the interval loop. """
        sleep_time = 0 if self.eager else self.interval

        while True:
            # sleep for `sleep_time`, unless `should_stop` fires, in which
            # case we leave the while loop and stop entirely
            with Timeout(sleep_time, exception=False):
                self.should_stop.wait()
                break

            start = time.time()

            self.handle_timer_tick()

            self.worker_complete.wait()
            self.worker_complete.reset()

            elapsed_time = (time.time() - start)

            # next time, sleep however long is left of our interval, taking
            # off the time we took to run
            sleep_time = max(self.interval - elapsed_time, 0)

    def handle_timer_tick(self):
        args = ()
        kwargs = {}

        # Note that we don't catch ContainerBeingKilled here. If that's raised,
        # there is nothing for us to do anyway. The exception bubbles, and is
        # caught by :meth:`Container._handle_thread_exited`, though the
        # triggered `kill` is a no-op, since the container is already
        # `_being_killed`.
        self.container.spawn_worker(
            self, args, kwargs, handle_result=self.handle_result)

    def handle_result(self, worker_ctx, result, exc_info):
        self.worker_complete.send()
        return result, exc_info
예제 #6
0
파일: worker.py 프로젝트: jab/melkman
class ScheduledMessageService(object):

    MIN_SLEEP_TIME = timedelta(seconds=1)
    MAX_SLEEP_TIME = timedelta(minutes=60)
    MAX_CLAIM_TIME = timedelta(minutes=5)

    def __init__(self, context):
        self.context = context
        self.service_queue = Event()
        self._listener = None
        self._dispatch = None

    def run(self):
        try:
            with self.context:
                self._listener = self._start_listener()
                self._dispatcher = spawn(self.run_dispatcher)

            procs = [self._listener, self._dispatcher]
            waitall(procs)
        except GreenletExit:
            pass
        finally:
            killall(procs)
            waitall(procs)

    ################################################################
    # The listener consumes messages on the scheduled message queue 
    # and stores the deferred messages in the database.
    ################################################################

    def _start_listener(self):
        @always_ack
        def cb(message_data, message):
            with self.context:
                _handle_scheduler_command(message_data, message, self.context)
                self.wakeup_dispatcher()

        dispatch = MessageDispatch(self.context)
        return dispatch.start_worker(SCHEDULER_COMMAND, cb)


    ##############################################################
    # The dispatcher consumes deferred messages from the database 
    # when their scheduled time arrives and spits them out 
    # to the message broker
    ##############################################################    
    def run_dispatcher(self):
        try:
            # cleanup any mess left over last time...
            with self.context:
                self.cleanup()
                while(True):
                    log.info("checking for ready messages...")
                    last_time = self.send_ready_messages()
                    sleep_time = self._calc_sleep(last_time)
                    log.info("sleeping for %s" % sleep_time)
                    sleep_secs = sleep_time.days*84600 + sleep_time.seconds
                    try:
                        with_timeout(sleep_secs, self.service_queue.wait)
                    except TimeoutError:
                        pass

                    if self.service_queue.ready():
                        self.service_queue.reset()
        except GreenletExit:
            log.debug("ScheduledMessageService dispatcher exiting...")

    def wakeup_dispatcher(self):
        if not self.service_queue.ready():
            self.service_queue.send(True)

    def _calc_sleep(self, after=None):
        next_time = self.find_next_send_time(after=after)
    
        if next_time is None:
            sleep_time = self.MAX_SLEEP_TIME
        else:
            sleep_time = next_time - datetime.utcnow()
            sleep_time += timedelta(seconds=1)
            sleep_time -= timedelta(microseconds=sleep_time.microseconds)

        if sleep_time < self.MIN_SLEEP_TIME:
            sleep_time = self.MIN_SLEEP_TIME
        if sleep_time > self.MAX_SLEEP_TIME:
            sleep_time = self.MAX_SLEEP_TIME        
        
        return sleep_time

    def find_next_send_time(self, after=None):
        if after is None:
            after = datetime.utcnow()
        after_str = DateTimeField()._to_json(after)

        next_query = dict(
            startkey = [False, after_str, {}],
            endkey = [True, None],
            include_docs = False,
            descending = False,
            limit = 1
        )

        next_send = None
        for r in view_deferred_messages_by_timestamp(self.context.db, **next_query):
            next_send = DateTimeField()._to_python(r.key[1])
            break

        return next_send

    def send_ready_messages(self):
        while True:
            now = datetime.utcnow()
            now_str = DateTimeField()._to_json(now)

            query = dict(
                startkey = [False, None],
                endkey = [False, now_str, {}],
                include_docs = True,
                descending = False,
                limit = 100
            )


            vr = view_deferred_messages_by_timestamp(self.context.db, **query)
            batch = []
            for r in vr:
                batch.append(DeferredAMQPMessage.wrap(r.doc))

            if len(batch) == 0:
                break
            
            dispatch_count = 0
            for message in batch:
                try:
                    if self._dispatch_message(message):
                        dispatch_count += 1
                except GreenletExit:
                    # asked to stop, go ahead and quit.
                    raise
                except:
                    log.error("Unexected error dispatching message %s: %s" %
                              (message, traceback.format_exc()))
                    
            log.info("Dispatched %d messages" % dispatch_count)
            
        return now

    def _dispatch_message(self, message):
        if not message.claim(self.context.db):
            return
        
        try:
            publisher = Publisher(self.context.broker, exchange=message.options.exchange,
                                  exchange_type=message.options.exchange_type)
            publisher.send(message.message,
                           routing_key = message.options.routing_key,
                           delivery_mode = message.options.delivery_mode,
                           mandatory = message.options.mandatory,
                           priority = message.options.priority)
            publisher.close()
        except:
            log.error("Error dispatching deferred message %s: %s" % (message, traceback.format_exc()))
            self.error_reschedule(message)
            return False
        else:
            log.debug("Dispatched message %s" % message)
            # sent with no problems, done with it.
            self.context.db.delete(message)
            return True

    def error_reschedule(self, message):
        message.error_count += 1
        
        if message.error_count < 10:
            delay = 2**message.error_count
        else:
            delay = 60*10

        resched_time = datetime.utcnow() + timedelta(seconds=delay)
        message.unclaim(self.context.db, resched_time)
        
        log.warn("Rescheduled message %s for %s" % (message.id, resched_time))

    def cleanup(self):
        log.info("Performing cleanup of claimed items...")

        # anything older than this has held the claim for too long
        # and is considered dead.
        cutoff = datetime.utcnow() - self.MAX_CLAIM_TIME
        cutoff_str = DateTimeField()._to_json(cutoff)

        query = dict(
            startkey = [True, cutoff_str, {}],
            endkey = [True],
            limit = 100,
            include_docs = True,
            descending = True
        )

        unclaim_count = 0
        while(True):
            vr = view_deferred_messages_by_timestamp(self.context.db, **query)
            batch = [DeferredAMQPMessage.wrap(r.doc) for r in vr]
            if len(batch) == 0:
                break

            for message in batch:
                self.error_reschedule(message)
                unclaim_count += 1

        if unclaim_count > 0:
            log.warn('Cleanup unclaimed %d items' % unclaim_count)
예제 #7
0
class Service(ConsumerMixin):
    def __init__(self, controllercls,
            connection, exchange, topic,
            pool=None, poolsize=1000):
        self.nodeid = UIDGEN()

        if pool is None:
            self.procpool = GreenPool(size=poolsize)
        else:
            self.procpool = pool

        self.connection = connection
        self.controller = controllercls()
        self.topic = topic
        self.greenlet = None
        self.messagesem = Semaphore()
        self.consume_ready = Event()

        node_topic = "{}.{}".format(self.topic, self.nodeid)
        self.queues = [entities.get_topic_queue(exchange, topic),
                       entities.get_topic_queue(exchange, node_topic),
                       entities.get_fanout_queue(topic), ]
        self._channel = None
        self._consumers = None

    def start(self):
        # self.connection = newrpc.create_connection()
        if self.greenlet is not None and not self.greenlet.dead:
            raise RuntimeError()
        self.greenlet = eventlet.spawn(self.run)

    def get_consumers(self, Consumer, channel):
        return [Consumer(self.queues, callbacks=[self.on_message, ]), ]

    def on_consume_ready(self, connection, channel, consumers, **kwargs):
        self._consumers = consumers
        self._channel = channel
        self.consume_ready.send(None)

    def on_consume_end(self, connection, channel):
        self.consume_ready.reset()

    def on_message(self, body, message):
        # need a semaphore to stop killing between message ack()
        # and spawning process.
        with self.messagesem:
            self.procpool.spawn(self.handle_request, body)
            message.ack()

    def handle_request(self, body):
        newrpc.process_message(self.connection, self.controller, body)

    def wait(self):
        try:
            self.greenlet.wait()
        except greenlet.GreenletExit:
            pass
        return self.procpool.waitall()

    def kill(self):
        if self.greenlet is not None and not self.greenlet.dead:
            self.should_stop = True
            #with self.messagesem:
                #self.greenlet.kill()
            self.greenlet.wait()
        if self._consumers:
            for c in self._consumers:
                c.cancel()
        if self._channel is not None:
            self._channel.close()

    def link(self, *args, **kwargs):
        return self.greenlet.link(*args, **kwargs)

    def kill_processes(self):
        for g in self.procpool.coroutines_running:
            g.kill()
예제 #8
0
class AbstractChannel(object):
    """
    Superclass for both the Connection, which is treated
    as channel 0, and other user-created Channel objects.

    The subclasses must have a _METHOD_MAP class property, mapping
    between AMQP method signatures and Python methods.

    """
    def __init__(self, connection, channel_id):
        self.connection = connection
        self.channel_id = channel_id
        connection.channels[channel_id] = self
        self.method_queue = [] # Higher level queue for methods
        self.new_method = Event()
        self.auto_decode = False
    

    def __enter__(self):
        """
        Support for Python >= 2.5 'with' statements.

        """
        return self


    def __exit__(self, type, value, traceback):
        """
        Support for Python >= 2.5 'with' statements.

        """
        self.close()


    def _send_method(self, method_sig, args='', content=None):
        """
        Send a method for our channel.

        """
        if isinstance(args, AMQPWriter):
            args = args.getvalue()

        self.connection.method_writer.write_method(self.channel_id,
            method_sig, args, content)


    def close(self):
        """
        Close this Channel or Connection

        """
        raise NotImplementedError('Must be overriden in subclass')

    def received_method(self, method_sig, args, content=None):
        meth = (method_sig, args, content)
        self.method_queue.append(meth)
        self.new_method.send(meth)
        self.new_method.reset()

    def _wait_method(self, allowed_methods):
        """
        Wait for a method from the server destined for
        this channel.
        """
        #
        # Check the channel's deferred methods
        #
        while(True):
            for queued_method in self.method_queue:
                method_sig = queued_method[0]
                if (allowed_methods is None) \
                or (method_sig in allowed_methods) \
                or (method_sig == (20, 40)):
                    self.method_queue.remove(queued_method)
                    return queued_method
            
            # nope, wait for it...
            self.new_method.wait()

    def wait(self, allowed_methods=None):
        """
        Wait for a method that matches our allowed_methods parameter (the
        default value of None means match any method), and dispatch to it.

        """
        method_sig, args, content = self._wait_method(allowed_methods)

        if content \
        and self.auto_decode \
        and hasattr(content, 'content_encoding'):
            try:
                content.body = content.body.decode(content.content_encoding)
            except Exception:
                pass

        amqp_method = self._METHOD_MAP.get(method_sig, None)

        if amqp_method is None:
            raise Exception('Unknown AMQP method (%d, %d)' % method_sig)

        if content is None:
            return amqp_method(self, args)
        else:
            return amqp_method(self, args, content)


    #
    # Placeholder, the concrete implementations will have to
    # supply their own versions of _METHOD_MAP
    #
    _METHOD_MAP = {}
예제 #9
0
파일: timer.py 프로젝트: onefinestay/nameko
class Timer(Entrypoint):
    def __init__(self, interval, eager=False, **kwargs):
        """
        Timer entrypoint. Fires every `interval` seconds or as soon as
        the previous worker completes if that took longer.

        The default behaviour is to wait `interval` seconds
        before firing for the first time. If you want the entrypoint
        to fire as soon as the service starts, pass `eager=True`.

        Example::

            timer = Timer.decorator

            class Service(object):
                name = "service"

                @timer(interval=5)
                def tick(self):
                    pass

        """
        self.interval = interval
        self.eager = eager
        self.should_stop = Event()
        self.worker_complete = Event()
        self.gt = None
        super(Timer, self).__init__(**kwargs)

    def start(self):
        _log.debug('starting %s', self)
        self.gt = self.container.spawn_managed_thread(self._run)

    def stop(self):
        _log.debug('stopping %s', self)
        self.should_stop.send(True)
        self.gt.wait()

    def kill(self):
        _log.debug('killing %s', self)
        self.gt.kill()

    def _run(self):
        """ Runs the interval loop. """

        def get_next_interval():
            start_time = time.time()
            start = 0 if self.eager else 1
            for count in itertools.count(start=start):
                yield max(start_time + count * self.interval - time.time(), 0)
        interval = get_next_interval()
        sleep_time = next(interval)
        while True:
            # sleep for `sleep_time`, unless `should_stop` fires, in which
            # case we leave the while loop and stop entirely
            with Timeout(sleep_time, exception=False):
                self.should_stop.wait()
                break

            self.handle_timer_tick()

            self.worker_complete.wait()
            self.worker_complete.reset()

            sleep_time = next(interval)

    def handle_timer_tick(self):
        args = ()
        kwargs = {}

        # Note that we don't catch ContainerBeingKilled here. If that's raised,
        # there is nothing for us to do anyway. The exception bubbles, and is
        # caught by :meth:`Container._handle_thread_exited`, though the
        # triggered `kill` is a no-op, since the container is already
        # `_being_killed`.
        self.container.spawn_worker(
            self, args, kwargs, handle_result=self.handle_result)

    def handle_result(self, worker_ctx, result, exc_info):
        self.worker_complete.send()
        return result, exc_info
예제 #10
0
파일: service.py 프로젝트: shauns/nameko
class Service(ConsumerMixin):
    def __init__(
            self, controllercls, connection_factory, exchange, topic,
            pool=None, poolsize=1000):
        self.nodeid = UIDGEN()

        self.max_workers = poolsize
        if pool is None:
            self.procpool = GreenPool(size=poolsize)
        else:
            self.procpool = pool

        self.controller = controllercls()
        self.service = self.controller
        self.topic = topic
        self.greenlet = None
        self.consume_ready = Event()

        node_topic = "{}.{}".format(self.topic, self.nodeid)
        self.nova_queues = [
            entities.get_topic_queue(exchange, topic),
            entities.get_topic_queue(exchange, node_topic),
            entities.get_fanout_queue(topic), ]

        self._channel = None
        self._consumers = None

        self.connection = connection_factory()
        self.connection_factory = connection_factory

        inject_dependencies(self.controller, self)

        self._connection_pool = Pool(
            max_size=self.procpool.size,
            create=connection_factory
        )

        self.workers = set()
        self._pending_ack_messages = []
        self._pending_requeue_messages = []
        self._do_cancel_consumers = False
        self._consumers_cancelled = Event()

        self._timers = list(get_timers(self.controller))

    def start(self):
        self.start_timers()
        # greenlet has a magic attribute ``dead`` - pylint: disable=E1101
        if self.greenlet is not None and not self.greenlet.dead:
            raise RuntimeError()
        self.greenlet = eventlet.spawn(self.run)

    def start_timers(self):
        for timer in self._timers:
            timer.start()

    def get_consumers(self, Consumer, channel):
        nova_consumer = Consumer(
            self.nova_queues, callbacks=[self.on_nova_message, ])

        consume_consumers = get_consumers(
            Consumer, self, self.on_consume_message)

        consumers = [nova_consumer] + list(consume_consumers)

        prefetch_count = self.procpool.size
        for consumer in consumers:
            consumer.qos(prefetch_count=prefetch_count)

        return consumers

    def on_consume_ready(self, connection, channel, consumers, **kwargs):
        self._consumers = consumers
        self._channel = channel
        self.consume_ready.send(None)

    def on_consume_end(self, connection, channel):
        self.consume_ready.reset()

    def on_nova_message(self, body, message):
        _log.debug('spawning RPC worker (%d free)', self.procpool.free())

        gt = self.procpool.spawn(self.handle_rpc_message, body)

        gt.link(self.handle_rpc_message_processed, message)
        self.workers.add(gt)

    def on_consume_message(self, consumer_method_config, body, message):
        _log.debug('spawning consume worker (%d free)', self.procpool.free())

        gt = self.procpool.spawn(
            self.handle_consume_message, consumer_method_config, body, message)

        gt.link(self.handle_consume_message_processed)
        self.workers.add(gt)

    def handle_rpc_message(self, body):
        # item is patched on for python with ``with``, pylint can't find it
        # pylint: disable=E1102
        with self._connection_pool.item() as connection:
            process_rpc_message(connection, self.controller, body)

    def handle_rpc_message_processed(self, gt, message):
        self.workers.discard(gt)
        self._pending_ack_messages.append(message)

    def handle_consume_message(self, consumer_method_config, body, message):
        with log_time(_log.debug, 'processed consume message in %0.3fsec'):
            consumer_method, consumer_config = consumer_method_config

            try:
                consumer_method(body)
            except Exception as e:
                if consumer_config.requeue_on_error:
                    _log.exception(
                        'failed to consume message, requeueing message: '
                        '%s(): %s', consumer_method, e)
                    self._pending_requeue_messages.append(message)
                else:
                    _log.exception(
                        'failed to consume message, ignoring message: '
                        '%s(): %s', consumer_method, e)
                    self._pending_ack_messages.append(message)
            else:
                self._pending_ack_messages.append(message)

    def handle_consume_message_processed(self, gt):
        self.workers.discard(gt)

    def on_iteration(self):
        self.process_consumer_cancellation()
        # we need to make sure we process any pending messages before shutdown
        self.process_pending_message_acks()
        self.process_shutdown()

    def process_consumer_cancellation(self):
        if self._do_cancel_consumers:
            self._do_cancel_consumers = False
            if self._consumers:
                _log.debug('cancelling consumers')
                for consumer in self._consumers:
                    consumer.cancel()
            self._consumers_cancelled.send(True)

    def process_pending_message_acks(self):
        messages = self._pending_ack_messages
        if messages:
            _log.debug('ack() %d processed messages', len(messages))
            while messages:
                msg = messages.pop()
                msg.ack()
                eventlet.sleep()

        messages = self._pending_requeue_messages
        if messages:
            _log.debug('requeue() %d processed messages', len(messages))
            while messages:
                msg = messages.pop()
                msg.requeue()
                eventlet.sleep()

    def consume(self, limit=None, timeout=None, safety_interval=0.1, **kwargs):
        """ Lifted from kombu so we are able to break the loop immediately
            after a shutdown is triggered rather than waiting for the timeout.
        """
        elapsed = 0
        with self.Consumer() as (connection, channel, consumers):
            with self.extra_context(connection, channel):
                self.on_consume_ready(connection, channel, consumers, **kwargs)
                for i in limit and xrange(limit) or count():
                    # moved from after the following `should_stop` condition to
                    # avoid waiting on a drain_events timeout before breaking
                    # the loop.
                    self.on_iteration()
                    if self.should_stop:
                        break

                    try:
                        connection.drain_events(timeout=safety_interval)
                    except socket.timeout:
                        elapsed += safety_interval
                        # Excluding the following clause from coverage,
                        # as timeout never appears to be set - This method
                        # is a lift from kombu so will leave in place for now.
                        if timeout and elapsed >= timeout:  # pragma: no cover
                            raise socket.timeout()
                    except socket.error:
                        if not self.should_stop:
                            raise
                    else:
                        yield
                        elapsed = 0

    def process_shutdown(self):
        consumers_cancelled = self._consumers_cancelled.ready()

        no_active_timers = (len(self._timers) == 0)

        no_active_workers = (self.procpool.running() < 1)

        no_pending_message_acks = not (
            self._pending_ack_messages or
            self._pending_requeue_messages
        )

        ready_to_stop = (
            consumers_cancelled and
            no_active_timers and
            no_active_workers and
            no_pending_message_acks
        )

        if ready_to_stop:
            _log.debug('notifying service to stop')
            self.should_stop = True

    def cancel_consumers(self):
        # greenlet has a magic attribute ``dead`` - pylint: disable=E1101
        if self.greenlet is not None and not self.greenlet.dead:
            # since consumers were started in a separate thread,
            # we will just notify the thread to avoid getting
            # "Second simultaneous read" errors
            _log.debug('notifying consumers to be cancelled')
            self._do_cancel_consumers = True
            self._consumers_cancelled.wait()
        else:
            _log.debug('consumer thread already dead')

    def cancel_timers(self):
        if self._timers:
            _log.debug('stopping %d timers', len(self._timers))
            while self._timers:
                self._timers.pop().stop()

    def kill_workers(self):
        _log.debug('force killing %d workers', len(self.workers))
        while self.workers:
            self.workers.pop().kill()

    def wait_for_workers(self):
        pool = self.procpool
        _log.debug('waiting for %d workers to complete', pool.running())
        pool.waitall()

    def shut_down(self):
        # greenlet has a magic attribute ``dead`` - pylint: disable=E1101
        if self.greenlet is not None and not self.greenlet.dead:
            _log.debug('stopping service')
            self.greenlet.wait()

        # TODO: when is this ever not None?
        if self._channel is not None:
            _log.debug('closing channel')
            self._channel.close()

    def kill(self, force=False):
        _log.debug('killing service')

        self.cancel_consumers()

        self.cancel_timers()

        if force:
            self.kill_workers()
        else:
            self.wait_for_workers()

        self.shut_down()

    def link(self, *args, **kwargs):
        return self.greenlet.link(*args, **kwargs)
예제 #11
0
class Service(ConsumerMixin):
    def __init__(self,
                 controllercls,
                 connection,
                 exchange,
                 topic,
                 pool=None,
                 poolsize=1000):
        self.nodeid = UIDGEN()

        if pool is None:
            self.procpool = GreenPool(size=poolsize)
        else:
            self.procpool = pool

        self.connection = connection
        self.controller = controllercls()
        self.topic = topic
        self.greenlet = None
        self.messagesem = Semaphore()
        self.consume_ready = Event()

        node_topic = "{}.{}".format(self.topic, self.nodeid)
        self.queues = [
            entities.get_topic_queue(exchange, topic),
            entities.get_topic_queue(exchange, node_topic),
            entities.get_fanout_queue(topic),
        ]
        self._channel = None
        self._consumers = None

    def start(self):
        # self.connection = newrpc.create_connection()
        if self.greenlet is not None and not self.greenlet.dead:
            raise RuntimeError()
        self.greenlet = eventlet.spawn(self.run)

    def get_consumers(self, Consumer, channel):
        return [
            Consumer(self.queues, callbacks=[
                self.on_message,
            ]),
        ]

    def on_consume_ready(self, connection, channel, consumers, **kwargs):
        self._consumers = consumers
        self._channel = channel
        self.consume_ready.send(None)

    def on_consume_end(self, connection, channel):
        self.consume_ready.reset()

    def on_message(self, body, message):
        # need a semaphore to stop killing between message ack()
        # and spawning process.
        with self.messagesem:
            self.procpool.spawn(self.handle_request, body)
            message.ack()

    def handle_request(self, body):
        newrpc.process_message(self.connection, self.controller, body)

    def wait(self):
        try:
            self.greenlet.wait()
        except greenlet.GreenletExit:
            pass
        return self.procpool.waitall()

    def kill(self):
        if self.greenlet is not None and not self.greenlet.dead:
            self.should_stop = True
            #with self.messagesem:
            #self.greenlet.kill()
            self.greenlet.wait()
        if self._consumers:
            for c in self._consumers:
                c.cancel()
        if self._channel is not None:
            self._channel.close()

    def link(self, *args, **kwargs):
        return self.greenlet.link(*args, **kwargs)

    def kill_processes(self):
        for g in self.procpool.coroutines_running:
            g.kill()
예제 #12
0
class Cron(Entrypoint):
    def __init__(self,
                 schedule: str,
                 tz: str = None,
                 concurrency: str = ConcurrencyPolicy.WAIT,
                 **kwargs):
        """
        Cron entrypoint. Fires according to a (possibly timezone-aware)
        cron schedule. If no timezone info is passed, the default is UTC.
        Set ``concurrency`` to ``ConcurrencyPolicy.ALLOW`` to allow multiple workers
        to run simultaneously. Set ``concurrency`` to ``ConcurrencyPolicy.SKIP`` to
        skip lapsed scheduled runs. The default behavior (``ConcurrencyPolicy.WAIT``)
        is to wait until the running worker completes and immediately spawn another
        if the schedule has lapsed.

        Example::

            class Service(object):
                name = "service"

            @cron(schedule='0 12 * * *', tz='America/Chicago')
            def ping(self):
                # method executes every day at noon America/Chicago time
                print("pong")

        """
        self.schedule = schedule
        self.tz = tz
        self.concurrency = concurrency
        self.should_stop = Event()
        self.worker_complete = Event()
        self.gt = None
        super().__init__(**kwargs)

    def start(self):
        _log.debug('starting %s', self)
        self.gt = self.container.spawn_managed_thread(self._run)

    def stop(self):
        _log.debug('stopping %s', self)
        self.should_stop.send(True)
        self.gt.wait()

    def kill(self):
        _log.debug('killing %s', self)
        self.gt.kill()

    def _get_next_interval(self):
        now_utc = datetime.datetime.now(tz=pytz.UTC)
        if self.tz:
            tz = pytz.timezone(self.tz)
            base = now_utc.astimezone(tz)
        else:
            base = now_utc
        cron_schedule = croniter(self.schedule, base)
        while True:
            yield max(cron_schedule.get_next() - time.time(), 0)

    def _run(self):
        """ Runs the schedule loop. """
        interval = self._get_next_interval()
        sleep_time = next(interval)
        while True:
            # sleep for `sleep_time`, unless `should_stop` fires, in which
            # case we leave the while loop and stop entirely
            with Timeout(sleep_time, exception=False):
                self.should_stop.wait()
                break

            self.handle_timer_tick()

            if self.concurrency != ConcurrencyPolicy.ALLOW:
                self.worker_complete.wait()
                self.worker_complete.reset()

            sleep_time = next(interval)

            # a sleep time of zero represents that we've elapsed the next start time, so
            # if the user set the policy to skip, we need to update the interval again.
            if self.concurrency == ConcurrencyPolicy.SKIP and sleep_time == 0:
                sleep_time = next(interval)

    def handle_timer_tick(self):
        args = ()
        kwargs = {}

        # Note that we don't catch ContainerBeingKilled here. If that's raised,
        # there is nothing for us to do anyway. The exception bubbles, and is
        # caught by :meth:`Container._handle_thread_exited`, though the
        # triggered `kill` is a no-op, since the container is already
        # `_being_killed`.
        self.container.spawn_worker(self,
                                    args,
                                    kwargs,
                                    handle_result=self.handle_result)

    def handle_result(self, worker_ctx, result, exc_info):
        # we only care about the worker completion if we're going to be waiting for it.
        if self.concurrency != ConcurrencyPolicy.ALLOW:
            self.worker_complete.send()
        return result, exc_info