Example #1
0
def run_feed_indexer(context):
    try:
        worker_pool = Pool()

        @pooled(worker_pool)
        @always_ack
        def cb(message_data, message):
            try:
                with context:
                    return handle_message(message_data, message, context)
            except GreenletExit:
                pass
            except: 
                log.error("Unexpected error handling feed indexer message: %s" % traceback.format_exc())

        with context:
            dispatch = MessageDispatch(context)
            proc = dispatch.start_worker(INDEX_FEED_COMMAND, cb)
        
        proc.wait()
    except GreenletExit:
        pass
    except: 
        log.error("Unexpected error running feed indexer: %s" % traceback.format_exc())
    finally:
        # stop accepting work
        proc.kill()
        proc.wait()
        # stop working on existing work
        worker_pool.killall()
        worker_pool.waitall()
Example #2
0
def _notify_subscribers(message_data, message, context):
    """
    helper handler called to notify subscribers to a bucket
    of an update.
    """
    try:
        bucket_id = message_data.get('bucket_id', None)
        if bucket_id is None:
            log.warn("Ignoring malformed bucket_modified message... (no bucket_id)")
            message.ack()
            return

        out_message = deepcopy(message_data)
        out_message['command'] = 'update_subscription'

        publisher = MessageDispatch(context)
        # iterate subscribed composites.
        query = {
            'startkey': bucket_id,
            'endkey': bucket_id,
            'include_docs': False
        }
        # send a message for each subscribed composite that indicates the
        # need to update from the changed bucket.
        for r in batched_view_iter(context.db, view_composites_by_subscription, 100, **query):
            log.debug("notify %s of update to %s" % (r.id, out_message['bucket_id']))
            out_message['composite_id'] = r.id
            publisher.send(out_message, UPDATE_SUBSCRIPTION)
    except:
        log.error("Error dispatching composite updates: %s" % traceback.format_exc())
        raise
Example #3
0
def test_dispatch_one_receiver(ctx):
    from eventlet import sleep
    from eventlet.event import Event
    from melkman.messaging import MessageDispatch, always_ack

    w = MessageDispatch(ctx)
    message_type = "test_dispatch_one_receiver"

    work_result = Event()

    got_events = {"count": 0}

    @always_ack
    def handler(job, message):
        got_events["count"] += 1

    worker1 = w.start_worker(message_type, handler)
    worker2 = w.start_worker(message_type, handler)
    try:
        w.send({}, message_type)
        sleep(2)

        assert got_events["count"] == 1
    finally:
        worker1.kill()
        worker1.wait()
        worker2.kill()
        worker2.wait()
Example #4
0
File: api.py Project: jab/melkman
def defer_amqp_message(send_time, message, routing_key, exchange, context, **kw):
    """
    This is a lower level version of defer which allows
    specification of the exact amqp exchange and routing_key 

    send_time: datetime representing when to send
    message: the message to send
    exchange: the exchange to send to
    routing_key: the routing key to use when sending
    context: current melkman context 
    
    optional kwargs:
    message_id 
    mandatory
    delivery_mode
    priority
    """

    message = {
        'command': DEFER_MESSAGE_COMMAND,
        'timestamp': DateTimeField()._to_json(send_time),
        'exchange': exchange,
        'routing_key': routing_key,
        'message': message,
    }
    message.update(**kw)

    with context:
        publisher = MessageDispatch(context)
        publisher.send(message, SCHEDULER_COMMAND)
Example #5
0
def test_defer_message_dispatch(ctx):
    from datetime import datetime, timedelta
    from eventlet import sleep, spawn, with_timeout
    from eventlet.event import Event
    from melkman.messaging import MessageDispatch, always_ack
    from melkman.scheduler import defer_message
    from melkman.scheduler.worker import ScheduledMessageService

    sms = ScheduledMessageService(ctx)
    sched = spawn(sms.run)
    w = MessageDispatch(ctx)
    
    message_type = 'test_dispatch_send_recv'
    
    work_result = Event()
    
    @always_ack
    def handler(job, message):
        work_result.send(sum(job['values']))
    
    worker = w.start_worker(message_type, handler)

    try:
        now = datetime.utcnow()
        wait = timedelta(seconds=2)
        # w.send({'values': [1, 2]}, message_type)
        defer_message(now + wait, {'values': [1 ,2]}, message_type, ctx)
        sleep(3)
    
        assert with_timeout(2, work_result.wait) == 3
    finally:
        worker.kill()
        worker.wait()
        sched.kill()
        sched.wait()
Example #6
0
File: api.py Project: jab/melkman
def cancel_deferred(message_id, context):
    message = {
        'command': CANCEL_MESSAGE_COMMAND,
        'message_id': message_id
    }
    with context:
        publisher = MessageDispatch(context)
        publisher.send(message, SCHEDULER_COMMAND)
Example #7
0
File: api.py Project: jab/melkman
    def bootstrap(self, context, purge=False):

        log.info("Setting up feed indexing queues...")
        c = MessageDispatch(context)
        c.declare(INDEX_FEED_COMMAND)

        if purge == True:
            log.info("Clearing feed indexing queues...")
            c.clear(INDEX_FEED_COMMAND)
Example #8
0
File: api.py Project: jab/melkman
def request_feed_index(url, context, skip_reschedule=False):
    """
    request that the url specified be fetched and indexed.
    """
    message = {'url': url}
    if skip_reschedule:
        message['skip_reschedule'] = True
    publisher = MessageDispatch(context)
    publisher.send(message, INDEX_FEED_COMMAND)
Example #9
0
    def _start_listener(self):
        @always_ack
        def cb(message_data, message):
            with self.context:
                _handle_scheduler_command(message_data, message, self.context)
                self.wakeup_dispatcher()

        dispatch = MessageDispatch(self.context)
        return dispatch.start_worker(SCHEDULER_COMMAND, cb)
Example #10
0
File: api.py Project: jab/melkman
    def bootstrap(self, context, purge=False):

        types = (BUCKET_MODIFIED, UPDATE_SUBSCRIPTION)
        
        log.info("Setting up aggregator queues...")
        dispatch = MessageDispatch(context)
        for t in types:
            dispatch.declare(t)
            if purge == True:
                dispatch.clear(t)
Example #11
0
File: api.py Project: jab/melkman
def push_feed_index(url, content, context, **kw):
    """
    immediately index the content given (identified by its url)
    """
    message = {
        'url': url,
        'content': content
    }
    message.update(kw)

    publisher = MessageDispatch(context)
    publisher.send(message, INDEX_FEED_COMMAND)
Example #12
0
File: api.py Project: jab/melkman
    def bootstrap(self, context, purge=False):
        with context:
            log.info("Syncing deferred message database views...")
            view_deferred_messages_by_timestamp.sync(context.db)

            log.info("Setting up scheduler queues...")
            dispatch = MessageDispatch(context)
            dispatch.declare(SCHEDULER_COMMAND)
            if purge == True:
                log.info("Clearing scheduler queues...")
                dispatch.clear(SCHEDULER_COMMAND)
                log.info("Destroying existing deferred messages...")
                delete_all_in_view(context.db, view_deferred_messages_by_timestamp)
Example #13
0
def run_aggregator(context):
    try:
        worker_pool = Pool()

        @pooled(worker_pool)
        @always_ack
        def bucket_modified_handler(message_data, message):
            try:
                with context:
                    _handle_bucket_modified(message_data, message, context)
            except GreenletExit:
                pass
            except: 
                log.error("Unexpected error handling bucking modified message: %s" % traceback.format_exc())

        
        @pooled(worker_pool)
        @always_ack
        def update_subscription_handler(message_data, message):
            try:
                with context:
                    _handle_update_subscription(message_data, message, context)
            except GreenletExit:
                pass
            except: 
                log.error("Unexpected error handling update subscription message: %s" % traceback.format_exc())

        procs = []
        with context:
            dispatcher = MessageDispatch(context)
            procs.append(dispatcher.start_worker(BUCKET_MODIFIED, bucket_modified_handler))
            procs.append(dispatcher.start_worker(UPDATE_SUBSCRIPTION, update_subscription_handler))
    
        waitall(procs)
    except GreenletExit:
        pass
    except: 
        log.error("Unexpected error running aggregator process: %s" % traceback.format_exc())
    finally:
        # stop accepting work
        killall(procs)
        waitall(procs)
        # stop working on existing work
        worker_pool.killall()
        worker_pool.waitall()
Example #14
0
def test_dispatch_send_recv(ctx):
    from eventlet import with_timeout
    from eventlet.event import Event
    from melkman.messaging import MessageDispatch, always_ack

    w = MessageDispatch(ctx)
    message_type = "test_dispatch_send_recv"

    work_result = Event()

    @always_ack
    def handler(job, message):
        work_result.send(sum(job["values"]))

    worker = w.start_worker(message_type, handler)
    w.send({"values": [1, 2]}, message_type)

    try:
        assert with_timeout(2, work_result.wait) == 3
    finally:
        worker.kill()
        worker.wait()