def defer_amqp_message(send_time, message, routing_key, exchange, context, **kw): """ This is a lower level version of defer which allows specification of the exact amqp exchange and routing_key send_time: datetime representing when to send message: the message to send exchange: the exchange to send to routing_key: the routing key to use when sending context: current melkman context optional kwargs: message_id mandatory delivery_mode priority """ message = { 'command': DEFER_MESSAGE_COMMAND, 'timestamp': DateTimeField()._to_json(send_time), 'exchange': exchange, 'routing_key': routing_key, 'message': message, } message.update(**kw) with context: publisher = MessageDispatch(context) publisher.send(message, SCHEDULER_COMMAND)
def _notify_subscribers(message_data, message, context): """ helper handler called to notify subscribers to a bucket of an update. """ try: bucket_id = message_data.get('bucket_id', None) if bucket_id is None: log.warn("Ignoring malformed bucket_modified message... (no bucket_id)") message.ack() return out_message = deepcopy(message_data) out_message['command'] = 'update_subscription' publisher = MessageDispatch(context) # iterate subscribed composites. query = { 'startkey': bucket_id, 'endkey': bucket_id, 'include_docs': False } # send a message for each subscribed composite that indicates the # need to update from the changed bucket. for r in batched_view_iter(context.db, view_composites_by_subscription, 100, **query): log.debug("notify %s of update to %s" % (r.id, out_message['bucket_id'])) out_message['composite_id'] = r.id publisher.send(out_message, UPDATE_SUBSCRIPTION) except: log.error("Error dispatching composite updates: %s" % traceback.format_exc()) raise
def test_dispatch_one_receiver(ctx): from eventlet import sleep from eventlet.event import Event from melkman.messaging import MessageDispatch, always_ack w = MessageDispatch(ctx) message_type = "test_dispatch_one_receiver" work_result = Event() got_events = {"count": 0} @always_ack def handler(job, message): got_events["count"] += 1 worker1 = w.start_worker(message_type, handler) worker2 = w.start_worker(message_type, handler) try: w.send({}, message_type) sleep(2) assert got_events["count"] == 1 finally: worker1.kill() worker1.wait() worker2.kill() worker2.wait()
def cancel_deferred(message_id, context): message = { 'command': CANCEL_MESSAGE_COMMAND, 'message_id': message_id } with context: publisher = MessageDispatch(context) publisher.send(message, SCHEDULER_COMMAND)
def request_feed_index(url, context, skip_reschedule=False): """ request that the url specified be fetched and indexed. """ message = {'url': url} if skip_reschedule: message['skip_reschedule'] = True publisher = MessageDispatch(context) publisher.send(message, INDEX_FEED_COMMAND)
def push_feed_index(url, content, context, **kw): """ immediately index the content given (identified by its url) """ message = { 'url': url, 'content': content } message.update(kw) publisher = MessageDispatch(context) publisher.send(message, INDEX_FEED_COMMAND)
def test_dispatch_send_recv(ctx): from eventlet import with_timeout from eventlet.event import Event from melkman.messaging import MessageDispatch, always_ack w = MessageDispatch(ctx) message_type = "test_dispatch_send_recv" work_result = Event() @always_ack def handler(job, message): work_result.send(sum(job["values"])) worker = w.start_worker(message_type, handler) w.send({"values": [1, 2]}, message_type) try: assert with_timeout(2, work_result.wait) == 3 finally: worker.kill() worker.wait()