예제 #1
0
def amqp_subscribe(exchange, callback, queue='',
                   ex_type='fanout', routing_keys=None):
    def json_parse_dec(func):
        @functools.wraps(func)
        def wrapped(msg):
            try:
                msg.body = json.loads(msg.body)
            except:
                pass
            return func(msg)
        return wrapped

    connection = Connection(config.AMQP_URI)
    channel = connection.channel()
    channel.exchange_declare(exchange=exchange, type=ex_type, auto_delete=True)
    resp = channel.queue_declare(queue, exclusive=True)
    if not routing_keys:
        channel.queue_bind(resp.queue, exchange)
    else:
        for routing_key in routing_keys:
            channel.queue_bind(resp.queue, exchange, routing_key=routing_key)
    channel.basic_consume(queue=queue,
                          callback=json_parse_dec(callback),
                          no_ack=True)
    try:
        while True:
            channel.wait()
    except BaseException as exc:
        # catch BaseException so that it catches KeyboardInterrupt
        channel.close()
        connection.close()
        amqp_log("SUBSCRIPTION ENDED: %s %s %r" % (exchange, queue, exc))
예제 #2
0
def amqp_subscribe(exchange, queue, callback):
    def json_parse_dec(func):
        @functools.wraps(func)
        def wrapped(msg):
            try:
                msg.body = json.loads(msg.body)
            except:
                pass
            return func(msg)
        return wrapped

    if not queue:
        queue = "mist-tmp_%d" % random.randrange(2 ** 20)

    connection = Connection()
    channel = connection.channel()
    channel.exchange_declare(exchange=exchange, type='fanout')
    channel.queue_declare(queue, exclusive=True)
    channel.queue_bind(queue, exchange)
    channel.basic_consume(queue=queue,
                          callback=json_parse_dec(callback),
                          no_ack=True)
    try:
        while True:
            channel.wait()
    except BaseException as exc:
        # catch BaseException so that it catches KeyboardInterrupt
        channel.close()
        connection.close()
        amqp_log("SUBSCRIPTION ENDED: %s %s %r" % (exchange, queue, exc))
예제 #3
0
파일: helpers.py 프로젝트: gabeo8/mist.io
def amqp_publish(exchange, routing_key, data):
    connection = Connection()
    channel = connection.channel()
    msg = Message(json.dumps(data))
    channel.basic_publish(msg, exchange=exchange, routing_key=routing_key)
    channel.close()
    connection.close()
예제 #4
0
def amqp_subscribe(exchange, callback, queue='',
                   ex_type='fanout', routing_keys=None):
    def json_parse_dec(func):
        @functools.wraps(func)
        def wrapped(msg):
            try:
                msg.body = json.loads(msg.body)
            except:
                pass
            return func(msg)
        return wrapped

    connection = Connection()
    channel = connection.channel()
    channel.exchange_declare(exchange=exchange, type=ex_type, auto_delete=true)
    resp = channel.queue_declare(queue, exclusive=True)
    if not routing_keys:
        channel.queue_bind(resp.queue, exchange)
    else:
        for routing_key in routing_keys:
            channel.queue_bind(resp.queue, exchange, routing_key=routing_key)
    channel.basic_consume(queue=queue,
                          callback=json_parse_dec(callback),
                          no_ack=True)
    try:
        while True:
            channel.wait()
    except BaseException as exc:
        # catch BaseException so that it catches KeyboardInterrupt
        channel.close()
        connection.close()
        amqp_log("SUBSCRIPTION ENDED: %s %s %r" % (exchange, queue, exc))
예제 #5
0
파일: helpers.py 프로젝트: gabeo8/mist.io
def amqp_subscribe(exchange, queue, callback):
    def json_parse_dec(func):
        @functools.wraps(func)
        def wrapped(msg):
            try:
                msg.body = json.loads(msg.body)
            except:
                pass
            return func(msg)

        return wrapped

    if not queue:
        queue = "mist-tmp_%d" % random.randrange(2**20)

    connection = Connection()
    channel = connection.channel()
    channel.exchange_declare(exchange=exchange, type='fanout')
    channel.queue_declare(queue, exclusive=True)
    channel.queue_bind(queue, exchange)
    channel.basic_consume(queue=queue,
                          callback=json_parse_dec(callback),
                          no_ack=True)
    try:
        while True:
            channel.wait()
    except BaseException as exc:
        # catch BaseException so that it catches KeyboardInterrupt
        channel.close()
        connection.close()
        amqp_log("SUBSCRIPTION ENDED: %s %s %r" % (exchange, queue, exc))
예제 #6
0
def amqp_publish(exchange, routing_key, data):
    connection = Connection()
    channel = connection.channel()
    msg = Message(json.dumps(data))
    channel.basic_publish(msg, exchange=exchange, routing_key=routing_key)
    channel.close()
    connection.close()
예제 #7
0
    def setup_conn(self):
        self.connection = Connection(**self.connection_meta)
        self.channel = Channel(self.connection)

        # Since amqp v2.0, you should explicitly call Connection.connect() and Channel.open()
        if VERSION[0] >= 2:
            self.connection.connect()
            self.channel.open()
예제 #8
0
def amqp_publish(exchange, routing_key, data, ex_type="fanout", ex_declare=False):
    connection = Connection(config.AMQP_URI)
    channel = connection.channel()
    if ex_declare:
        channel.exchange_declare(exchange=exchange, type=ex_type)
    msg = Message(json.dumps(data))
    channel.basic_publish(msg, exchange=exchange, routing_key=routing_key)
    channel.close()
    connection.close()
예제 #9
0
def amqp_user_listening(user):
    connection = Connection(config.AMQP_URI)
    channel = connection.channel()
    try:
        channel.exchange_declare(exchange=_amqp_user_exchange(user), type="fanout", passive=True)
    except AmqpNotFound:
        return False
    else:
        return True
    finally:
        channel.close()
        connection.close()
예제 #10
0
def amqp_owner_listening(owner):
    connection = Connection(config.AMQP_URI)
    channel = connection.channel()
    try:
        channel.exchange_declare(exchange=_amqp_owner_exchange(owner),
                                 type='fanout', passive=True)
    except AmqpNotFound:
        return False
    else:
        return True
    finally:
        channel.close()
        connection.close()
예제 #11
0
def amqp_publish(exchange,
                 routing_key,
                 data,
                 ex_type='fanout',
                 ex_declare=False):
    connection = Connection()
    channel = connection.channel()
    if ex_declare:
        channel.exchange_declare(exchange=exchange, type=ex_type)
    msg = Message(json.dumps(data))
    channel.basic_publish(msg, exchange=exchange, routing_key=routing_key)
    channel.close()
    connection.close()
예제 #12
0
def amqp_publish(exchange, routing_key, data,
                 ex_type='fanout', ex_declare=False, auto_delete=True,
                 connection=None):
    close = False
    if connection is None:
        connection = Connection(config.AMQP_URI)
        close = True
    channel = connection.channel()
    if ex_declare:
        channel.exchange_declare(exchange=exchange, type=ex_type,
                                 auto_delete=auto_delete)
    msg = Message(json.dumps(data))
    channel.basic_publish(msg, exchange=exchange, routing_key=routing_key)
    channel.close()
    if close:
        connection.close()
예제 #13
0
    def setupConnectivity(self):

        if self.kwargs.heartbeat > 0:
            self.logging.info("Sending heartbeat every %s seconds." % (self.kwargs.heartbeat))
            self.sendToBackground(self.heartbeat)

        while self.loop():
            self.connect.wait()
            self.logging.debug("Connecting to %s" % (self.kwargs.host))
            try:
                self.connection = Connection(
                    host=self.kwargs.host,
                    heartbeat=self.kwargs.heartbeat,
                    virtual_host=self.kwargs.vhost,
                    userid=self.kwargs.user,
                    password=self.kwargs.password,
                    ssl=self.kwargs.ssl,
                    connect_timeout=5
                )
                self.connection.connect()
                self.channel = self.connection.channel()

                if self.kwargs.exchange != "":
                    self.channel.exchange_declare(
                        self.kwargs.exchange,
                        self.kwargs.exchange_type,
                        durable=self.kwargs.exchange_durable,
                        auto_delete=self.kwargs.exchange_auto_delete,
                        passive=self.kwargs.exchange_passive,
                        arguments=self._exchange_arguments
                    )
                    self.logging.debug("Declared exchange %s." % (self.kwargs.exchange))

                if self.kwargs.queue_declare:
                    self.channel.queue_declare(
                        self.kwargs.queue,
                        durable=self.kwargs.queue_durable,
                        exclusive=self.kwargs.queue_exclusive,
                        auto_delete=self.kwargs.queue_auto_delete,
                        arguments=self._queue_arguments
                    )
                    self.logging.debug("Declared queue %s." % (self.kwargs.queue))

                if self.kwargs.exchange != "":
                    self.channel.queue_bind(
                        self.kwargs.queue,
                        self.kwargs.exchange,
                        routing_key=self.kwargs.routing_key
                    )
                    self.logging.debug("Bound queue %s to exchange %s." % (self.kwargs.queue, self.kwargs.exchange))

                self.logging.info("Connected to broker %s." % (self.kwargs.host))
            except Exception as err:
                self.logging.error("Failed to connect to broker.  Reason %s " % (err))
                sleep(1)
            else:
                self.do_consume.set()
                self.connect.clear()
                self.connected = True
예제 #14
0
    def __init__(self):
        self.response = None

        self.connection = Connection()
        self.channel = Channel(self.connection)

        (self.queue, _, _) = self.channel.queue_declare(exclusive=True)
        self.channel.queue_bind(self.queue, exchange='django_amqp_example')
예제 #15
0
    def _create_resource(conn, res_id, user_id, project_id, source_id, rmeta):
        # TODO(gordc): implement lru_cache to improve performance
        try:
            res = models.Resource.__table__
            m_hash = hashlib.md5(jsonutils.dumps(rmeta,
                                                 sort_keys=True)).hexdigest()
            trans = conn.begin_nested()
            if conn.dialect.name == 'sqlite':
                trans = conn.begin()
            with trans:
                res_row = conn.execute(
                    sa.select([res.c.internal_id]).where(
                        sa.and_(res.c.resource_id == res_id,
                                res.c.user_id == user_id,
                                res.c.project_id == project_id,
                                res.c.source_id == source_id,
                                res.c.metadata_hash == m_hash))).first()
                internal_id = res_row[0] if res_row else None
                if internal_id is None:
                    result = conn.execute(res.insert(),
                                          resource_id=res_id,
                                          user_id=user_id,
                                          project_id=project_id,
                                          source_id=source_id,
                                          resource_metadata=rmeta,
                                          metadata_hash=m_hash)
                    internal_id = result.inserted_primary_key[0]
                    if rmeta and isinstance(rmeta, dict):
                        meta_map = {}
                        for key, v in utils.dict_to_keyval(rmeta):
                            try:
                                _model = sql_utils.META_TYPE_MAP[type(v)]
                                if meta_map.get(_model) is None:
                                    meta_map[_model] = []
                                meta_map[_model].append({
                                    'id': internal_id,
                                    'meta_key': key,
                                    'value': v
                                })
                            except KeyError:
                                LOG.warn(
                                    _("Unknown metadata type. Key (%s) "
                                      "will not be queryable."), key)
                        for _model in meta_map.keys():
                            conn.execute(_model.__table__.insert(),
                                         meta_map[_model])

        except dbexc.DBDuplicateEntry:
            # retry function to pick up duplicate committed object
            internal_id = Connection._create_resource(conn, res_id, user_id,
                                                      project_id, source_id,
                                                      rmeta)

        return internal_id
예제 #16
0
class RpcClient(object):
    def __init__(self):
        self.response = None

        self.connection = Connection()
        self.channel = Channel(self.connection)

        (self.queue, _, _) = self.channel.queue_declare(exclusive=True)
        self.channel.queue_bind(self.queue, exchange='django_amqp_example')

    def request(self, body):
        message = Message(
            body=json.dumps(body),
            reply_to=self.queue,
            content_type='application/json')
        
        self.channel.basic_publish(
            message,
            exchange='django_amqp_example',
            routing_key='task_queue')

        print "Task submitted:", json.dumps(body)

        def callback(msg):
            self.response = json.loads(msg.body)

        self.channel.basic_consume(
            callback=callback,
            queue=self.queue,
            no_ack=True)

        while True:
            self.connection.drain_events(timeout=60)
            if self.response is not None: 
                break

        self.connection.close()
        return self.response
예제 #17
0
    def list_networks(self, persist=True):
        """Return list of networks for cloud

        A list of networks is fetched from libcloud, data is processed, stored
        on network models, and a list of network models is returned.

        Subclasses SHOULD NOT override or extend this method.

        This method wraps `_list_networks` which contains the core
        implementation.

        """
        task_key = 'cloud:list_networks:%s' % self.cloud.id
        task = PeriodicTaskInfo.get_or_add(task_key)
        with task.task_runner(persist=persist):
            cached_networks = {
                '%s' % n.id: n.as_dict()
                for n in self.list_cached_networks()
            }

            networks = self._list_networks()

        # Initialize AMQP connection to reuse for multiple messages.
        amqp_conn = Connection(config.AMQP_URI)
        if amqp_owner_listening(self.cloud.owner.id):
            networks_dict = [n.as_dict() for n in networks]
            if cached_networks and networks_dict:
                # Publish patches to rabbitmq.
                new_networks = {'%s' % n['id']: n for n in networks_dict}
                patch = jsonpatch.JsonPatch.from_diff(cached_networks,
                                                      new_networks).patch
                if patch:
                    amqp_publish_user(self.cloud.owner.id,
                                      routing_key='patch_networks',
                                      connection=amqp_conn,
                                      data={
                                          'cloud_id': self.cloud.id,
                                          'patch': patch
                                      })
            else:
                # TODO: remove this block, once patches
                # are implemented in the UI
                amqp_publish_user(self.cloud.owner.id,
                                  routing_key='list_networks',
                                  connection=amqp_conn,
                                  data={
                                      'cloud_id': self.cloud.id,
                                      'networks': networks_dict
                                  })
        return networks
예제 #18
0
    def _create_resource(conn, res_id, user_id, project_id, source_id,
                         rmeta):
        # TODO(gordc): implement lru_cache to improve performance
        try:
            res = models.Resource.__table__
            m_hash = hashlib.md5(jsonutils.dumps(rmeta,
                                                 sort_keys=True)).hexdigest()
            trans = conn.begin_nested()
            if conn.dialect.name == 'sqlite':
                trans = conn.begin()
            with trans:
                res_row = conn.execute(
                    sa.select([res.c.internal_id])
                    .where(sa.and_(res.c.resource_id == res_id,
                                   res.c.user_id == user_id,
                                   res.c.project_id == project_id,
                                   res.c.source_id == source_id,
                                   res.c.metadata_hash == m_hash))).first()
                internal_id = res_row[0] if res_row else None
                if internal_id is None:
                    result = conn.execute(res.insert(), resource_id=res_id,
                                          user_id=user_id,
                                          project_id=project_id,
                                          source_id=source_id,
                                          resource_metadata=rmeta,
                                          metadata_hash=m_hash)
                    internal_id = result.inserted_primary_key[0]
                    if rmeta and isinstance(rmeta, dict):
                        meta_map = {}
                        for key, v in utils.dict_to_keyval(rmeta):
                            try:
                                _model = sql_utils.META_TYPE_MAP[type(v)]
                                if meta_map.get(_model) is None:
                                    meta_map[_model] = []
                                meta_map[_model].append(
                                    {'id': internal_id, 'meta_key': key,
                                     'value': v})
                            except KeyError:
                                LOG.warn(_("Unknown metadata type. Key (%s) "
                                         "will not be queryable."), key)
                        for _model in meta_map.keys():
                            conn.execute(_model.__table__.insert(),
                                         meta_map[_model])

        except dbexc.DBDuplicateEntry:
            # retry function to pick up duplicate committed object
            internal_id = Connection._create_resource(
                conn, res_id, user_id, project_id, source_id, rmeta)

        return internal_id
예제 #19
0
 def _stats_result_to_model(result, period, period_start,
                            period_end, groupby, aggregate):
     stats_args = Connection._stats_result_aggregates(result, aggregate)
     stats_args['unit'] = result.unit
     duration = (timeutils.delta_seconds(result.tsmin, result.tsmax)
                 if result.tsmin is not None and result.tsmax is not None
                 else None)
     stats_args['duration'] = duration
     stats_args['duration_start'] = result.tsmin
     stats_args['duration_end'] = result.tsmax
     stats_args['period'] = period
     stats_args['period_start'] = period_start
     stats_args['period_end'] = period_end
     stats_args['groupby'] = (dict(
         (g, getattr(result, g)) for g in groupby) if groupby else None)
     return api_models.Statistics(**stats_args)
예제 #20
0
 def _stats_result_to_model(result, period, period_start, period_end,
                            groupby, aggregate):
     stats_args = Connection._stats_result_aggregates(result, aggregate)
     stats_args['unit'] = result.unit
     duration = (timeutils.delta_seconds(result.tsmin, result.tsmax)
                 if result.tsmin is not None and result.tsmax is not None
                 else None)
     stats_args['duration'] = duration
     stats_args['duration_start'] = result.tsmin
     stats_args['duration_end'] = result.tsmax
     stats_args['period'] = period
     stats_args['period_start'] = period_start
     stats_args['period_end'] = period_end
     stats_args['groupby'] = (dict(
         (g, getattr(result, g)) for g in groupby) if groupby else None)
     return api_models.Statistics(**stats_args)
예제 #21
0
파일: q4rmq.py 프로젝트: smihica/py-q4rmq
 def open(self, channel=None):
     if channel:
         saved_channel = self.channel
         self.channel = channel
         yield self.channel
         self.channel = saved_channel
     elif self.channel:
         yield self.channel
     else:
         try:
             args = {}
             if self.host:   args['host']   = self.host
             if self.userid: args['userid'] = self.userid
             self.connection = Connection(**args)
             self.channel    = self.connection.channel()
             yield self.channel
         finally:
             self.close()
     return
예제 #22
0
    def _create_meter(conn, name, type, unit):
        # TODO(gordc): implement lru_cache to improve performance
        try:
            meter = models.Meter.__table__
            trans = conn.begin_nested()
            if conn.dialect.name == 'sqlite':
                trans = conn.begin()
            with trans:
                meter_row = conn.execute(
                    sa.select([meter.c.id])
                    .where(sa.and_(meter.c.name == name,
                                   meter.c.type == type,
                                   meter.c.unit == unit))).first()
                meter_id = meter_row[0] if meter_row else None
                if meter_id is None:
                    result = conn.execute(meter.insert(), name=name,
                                          type=type, unit=unit)
                    meter_id = result.inserted_primary_key[0]
        except dbexc.DBDuplicateEntry:
            # retry function to pick up duplicate committed object
            meter_id = Connection._create_meter(conn, name, type, unit)

        return meter_id
예제 #23
0
    def _create_meter(conn, name, type, unit):
        # TODO(gordc): implement lru_cache to improve performance
        try:
            meter = models.Meter.__table__
            trans = conn.begin_nested()
            if conn.dialect.name == 'sqlite':
                trans = conn.begin()
            with trans:
                meter_row = conn.execute(
                    sa.select([meter.c.id]).where(
                        sa.and_(meter.c.name == name, meter.c.type == type,
                                meter.c.unit == unit))).first()
                meter_id = meter_row[0] if meter_row else None
                if meter_id is None:
                    result = conn.execute(meter.insert(),
                                          name=name,
                                          type=type,
                                          unit=unit)
                    meter_id = result.inserted_primary_key[0]
        except dbexc.DBDuplicateEntry:
            # retry function to pick up duplicate committed object
            meter_id = Connection._create_meter(conn, name, type, unit)

        return meter_id
예제 #24
0
def test_module_amqp_submit_message():

    actor_config = ActorConfig('amqp', 100, 1, {}, "", disable_exception_handling=True)
    amqp = AMQPIn(actor_config, exchange="wishbone")

    amqp.pool.queue.outbox.disableFallThrough()
    amqp.start()

    sleep(1)
    conn = Connection()
    conn.connect()
    channel = conn.channel()
    channel.basic_publish(basic_message.Message("test"), exchange="wishbone")
    channel.close()
    conn.close()
    sleep(1)
    event = getter(amqp.pool.queue.outbox)
    assert event.get() == "test"
    amqp.stop()
예제 #25
0
def test_module_amqp_submit_message():

    actor_config = ActorConfig('amqp', 100, 1, {}, "", disable_exception_handling=True)
    amqp = AMQPOut(actor_config, exchange="wishbone_submit_message", queue="wishbone_submit_message", exchange_durable=False, queue_durable=False)

    amqp.pool.queue.inbox.disableFallThrough()
    amqp.start()

    event = Event("test")
    amqp.submit(event, "inbox")

    sleep(1)
    conn = Connection()
    conn.connect()
    channel = conn.channel()
    message = channel.basic_get("wishbone_submit_message")
    channel.close()
    conn.close()
    sleep(1)
    amqp.stop()
    assert message.body == "test"
예제 #26
0
def test_module_amqp_submit_message_encode():

    c = ComponentManager()
    protocol = c.getComponentByName("wishbone.protocol.encode.json")()

    actor_config = ActorConfig('amqp', 100, 1, {}, "", protocol=protocol, disable_exception_handling=True)
    amqp = AMQPOut(actor_config, exchange="wishbone_submit_encode", queue="wishbone_submit_encode", exchange_durable=False, queue_durable=False)

    amqp.pool.queue.inbox.disableFallThrough()
    amqp.start()

    event = Event({"one": 1})
    amqp.submit(event, "inbox")

    sleep(1)
    conn = Connection()
    conn.connect()
    channel = conn.channel()
    message = channel.basic_get("wishbone_submit_encode")
    channel.close()
    conn.close()
    sleep(1)
    amqp.stop()
    assert message.body == '{"one": 1}'
예제 #27
0
class AMQPOut(OutputModule):
    '''
    Submits messages to AMQP.

    Submits messages to an AMQP message broker.

    If <exchange> is not provided, no exchange will be created during initialisation.
    If <queue> is not provided, queue will be created during initialisation

    If <exchange> and <queue> are provided, they will both be created and
    bound during initialisation.

    Parameters::

        - delivery_mode(int)(1)
           |  Sets the delivery mode of the messages.

        - exchange(str)("")
           |  The exchange to declare.

        - exchange_type(str)("direct")
           |  The exchange type to create. (direct, topic, fanout)

        - exchange_durable(bool)(false)
           |  Declare a durable exchange.

        - exchange_auto_delete(bool)(true)
           |  If set, the exchange is deleted when all queues have finished using it.

        - exchange_passive(bool)(false)
           |  If set, the server will not create the exchange. The client can use
           |  this to check whether an exchange exists without modifying the server state.

        - exchange_arguments(dict)({})
           |  Additional arguments for exchange declaration.

        - heartbeat(int)(0)
            | Enable AMQP heartbeat. The value is the interval in seconds.
            | 0 disables heartbeat support.

        - host(str)("localhost:5672")
           |  The host broker to connect to.

        - native_events(bool)(False)
           |  Outgoing events should be native Wishbone events

        - parallel_streams(int)(1)
           |  The number of outgoing parallel data streams.

        - password(str)("guest")
           |  The password to authenticate.

        - payload(str)(None)
           |  The string to submit.
           |  If defined takes precedence over `selection`.

        - queue(str)("wishbone")
           |  The queue to declare and bind to <exchange>. This will also the
           |  the destination queue of the submitted messages unless
           |  <routing_key> is set to another value and <exchange_type> is
           |  "topic".

        - queue_arguments(dict)({})
           |  Additional arguments for queue declaration.

        - queue_auto_delete(bool)(true)
           |  Whether to autodelete the queue.

        - queue_declare(bool)(true)
           |  Whether to actually declare the queue.

        - queue_durable(bool)(false)
           |  Declare a durable queue.

        - queue_exclusive(bool)(false)
           |  Declare an exclusive queue.

        - routing_key(str)("")
           |  The routing key to use when submitting messages.

        - selection(str)("data")
           |  The part of the event to submit externally.

        - ssl(bool)(False)
           |  If True expects SSL

        - user(str)("guest")
           |  The username to authenticate.

        - vhost(str)("/")
           |  The virtual host to connect to.


    Queues::

        - inbox
           | Messages going to the defined broker.
    '''

    def __init__(self, actor_config, native_events=False, selection="data", payload=None, parallel_streams=1,
                 host="localhost:5672", vhost="/", user="******", password="******", ssl=False, heartbeat=0,
                 exchange="wishbone", exchange_type="direct", exchange_durable=False, exchange_auto_delete=True, exchange_passive=False,
                 exchange_arguments={},
                 queue="wishbone", queue_durable=False, queue_exclusive=False, queue_auto_delete=True, queue_declare=True,
                 queue_arguments={},
                 routing_key="", delivery_mode=1):

        OutputModule.__init__(self, actor_config)

        self.pool.createQueue("inbox")
        self.registerConsumer(self.consume, "inbox")

        self.connect = Event()
        self.connect.set()

        self.do_consume = Event()
        self.do_consume.clear()

        self.channel = None

        self.connected = False

    def heartbeat(self):

        while self.loop():
            sleep(self.kwargs.heartbeat)
            try:
                if self.connected:
                    self.connection.send_heartbeat()
            except Exception as err:
                self.logging.error("Failed to send heartbeat. Reason: %s" % (err))

    def preHook(self):
        self._queue_arguments = dict(self.kwargs.queue_arguments)
        self._exchange_arguments = dict(self.kwargs.exchange_arguments)
        self.sendToBackground(self.setupConnectivity)

    def consume(self, event):

        self.do_consume.wait()
        if self.channel is None:
            self.logging.error("Failed to submit message. Initial connection not established yet.")
        else:
            data = self.getDataToSubmit(event)
            data = self.encode(data)
            message = basic_message.Message(
                body=data,
                delivery_mode=self.kwargs.delivery_mode
            )

            try:
                self.channel.basic_publish(
                    message,
                    exchange=self.kwargs.exchange,
                    routing_key=self.kwargs.routing_key
                )
            except Exception as err:
                self.logging.error("Failed to submit event to broker. Reason: %s" % (err))
                self.connected = False
                self.connect.set()

    def setupConnectivity(self):

        if self.kwargs.heartbeat > 0:
            self.logging.info("Sending heartbeat every %s seconds." % (self.kwargs.heartbeat))
            self.sendToBackground(self.heartbeat)

        while self.loop():
            self.connect.wait()
            self.logging.debug("Connecting to %s" % (self.kwargs.host))
            try:
                self.connection = Connection(
                    host=self.kwargs.host,
                    heartbeat=self.kwargs.heartbeat,
                    virtual_host=self.kwargs.vhost,
                    userid=self.kwargs.user,
                    password=self.kwargs.password,
                    ssl=self.kwargs.ssl,
                    connect_timeout=5
                )
                self.connection.connect()
                self.channel = self.connection.channel()

                if self.kwargs.exchange != "":
                    self.channel.exchange_declare(
                        self.kwargs.exchange,
                        self.kwargs.exchange_type,
                        durable=self.kwargs.exchange_durable,
                        auto_delete=self.kwargs.exchange_auto_delete,
                        passive=self.kwargs.exchange_passive,
                        arguments=self._exchange_arguments
                    )
                    self.logging.debug("Declared exchange %s." % (self.kwargs.exchange))

                if self.kwargs.queue_declare:
                    self.channel.queue_declare(
                        self.kwargs.queue,
                        durable=self.kwargs.queue_durable,
                        exclusive=self.kwargs.queue_exclusive,
                        auto_delete=self.kwargs.queue_auto_delete,
                        arguments=self._queue_arguments
                    )
                    self.logging.debug("Declared queue %s." % (self.kwargs.queue))

                if self.kwargs.exchange != "":
                    self.channel.queue_bind(
                        self.kwargs.queue,
                        self.kwargs.exchange,
                        routing_key=self.kwargs.routing_key
                    )
                    self.logging.debug("Bound queue %s to exchange %s." % (self.kwargs.queue, self.kwargs.exchange))

                self.logging.info("Connected to broker %s." % (self.kwargs.host))
            except Exception as err:
                self.logging.error("Failed to connect to broker.  Reason %s " % (err))
                sleep(1)
            else:
                self.do_consume.set()
                self.connect.clear()
                self.connected = True

    def postHook(self):
        try:
            self.channel.close()
        except Exception as err:
            del(err)
        try:
            self.connection.close()
        except Exception as err:
            del(err)
예제 #28
0
    def send(self, users=None, dismiss=False):
        # FIXME Imported here due to circular dependency issues.
        from mist.api.notifications.models import InAppNotification
        from mist.api.notifications.models import UserNotificationPolicy

        # Get the list of `InAppNotifications`s in the current context before
        # any update takes place.
        owner_old_ntfs = list(InAppNotification.objects(owner=self.ntf.owner))

        if not users:
            users = self.ntf.owner.members
        elif not isinstance(users, list):
            users = [users]

        # Save/update/dismiss notifications.
        if dismiss:
            dismissed_by = set(self.ntf.dismissed_by)
            old_dismissed_by = list(dismissed_by)
            dismissed_by |= set(user.id for user in users)
            self.ntf.dismissed_by = list(dismissed_by)

        # Is anyone listening?
        if not amqp_owner_listening(self.ntf.owner.id):
            return

        # Initialize AMQP connection to reuse for multiple messages.
        amqp_conn = Connection(config.AMQP_URI)

        # Re-fetch all notifications in order to calculate the diff between
        # the two lists.
        owner_new_ntfs = list(InAppNotification.objects(owner=self.ntf.owner))

        # Apply each user's notification policy on the above lists to get rid
        # of notifications users are not interested in.
        for user in users:
            user_old_ntfs, user_new_ntfs = [], []
            try:
                np = UserNotificationPolicy.objects.get(user_id=user.id)
            except UserNotificationPolicy.DoesNotExist:
                log.debug('No UserNotificationPolicy found for %s', user)
                user_old_ntfs = [
                    ntf.as_dict() for ntf in owner_old_ntfs if not (
                        self.ntf.id == ntf.id and user.id in old_dismissed_by)
                ]
                user_new_ntfs = [
                    ntf.as_dict() for ntf in owner_new_ntfs
                    if not (self.ntf.id == ntf.id and user.id in dismissed_by)
                ]
            else:
                user_old_ntfs = [
                    ntf.as_dict() for ntf in owner_old_ntfs
                    if not np.has_blocked(ntf) and not (
                        self.ntf.id == ntf.id and user.id in old_dismissed_by)
                ]
                user_new_ntfs = [
                    ntf.as_dict() for ntf in owner_new_ntfs
                    if not np.has_blocked(ntf)
                    and not (self.ntf.id == ntf.id and user.id in dismissed_by)
                ]
            # Now we can save the dismissed notification
            self.ntf.save()

            # Calculate diff.
            patch = jsonpatch.JsonPatch.from_diff(user_old_ntfs,
                                                  user_new_ntfs).patch

            if patch:
                amqp_publish_user(self.ntf.owner.id,
                                  routing_key='patch_notifications',
                                  connection=amqp_conn,
                                  data={
                                      'user': user.id,
                                      'patch': patch
                                  })

        # Finally, try to close the AMQP connection.
        try:
            amqp_conn.close()
        except Exception as exc:
            log.exception(repr(exc))
예제 #29
0
#!/usr/bin/env python

from amqp.connection import Connection
from amqp.channel import Channel
from amqp.basic_message import Message
import json

connection = Connection()
channel = Channel(connection)

channel.exchange_declare('django_amqp_example', 'topic', auto_delete=False)
channel.queue_declare(queue='task_queue', durable=True)
channel.queue_bind('task_queue', 'django_amqp_example', 'task_queue')

def callback(msg):
    print "Received request:", msg.body
    content = json.loads(msg.body)['content']
    response = {
        'rot13': content.encode('rot13')
    }

    response_msg = Message(
        body=json.dumps(response),
        exchange='django_amqp_example')

    print "Sending response:", json.dumps(response)
    channel.basic_publish(
        response_msg,
        routing_key=msg.reply_to)

channel.basic_consume(callback=callback, queue='task_queue')
예제 #30
0
파일: test01.py 프로젝트: smihica/py-q4rmq
def queue():
    c = Connection()
    ch = c.channel()
    return QueueManager(channel=ch)
예제 #31
0
파일: q4rmq.py 프로젝트: smihica/py-q4rmq
class QueueManager(object):

    TTL_MAX     = 86400000 # 604800000 one-week # 2147483647 max
    TIME_FORMAT = '%Y%m%d%H%M%S.%f'
    TAGS = {
        'HEADER'   : 'PQ4R',
        'READY'    : 'R',
        'SCHEDULE' : 'S',
        'EXCHANGE' : 'O',
    }
    SCHEDULING_ACCURACY = 1000 # second

    def setup_tag(self, tag):
        rtag = self.get_tag(tag, 'READY')
        if not self.tags_tbl.get(rtag):
            stag = self.get_tag(tag, 'SCHEDULE')
            otag = self.get_tag(tag, 'EXCHANGE')
            self.channel.queue_declare(queue=rtag, durable=True, auto_delete=False)
            self.channel.exchange_declare(exchange=otag, type='topic', durable=True)
            self.channel.queue_bind(exchange=otag, routing_key=stag+'.*', queue=rtag)
            self.tags_tbl[rtag] = True

    def __init__(self, host=None, userid=None, on_dead=lambda m: m, channel=None, error_times_to_ignore = 3):
        self.host         = host
        self.userid       = userid
        self.connection   = None
        self.channel      = channel
        self.serializer   = lambda d: json.dumps(d, separators=(',',':'))
        self.deserializer = lambda d: json.loads(d)
        self.on_dead      = on_dead
        self.listenning   = False
        self.consumer     = None
        self.error_times_to_ignore = error_times_to_ignore
        self.tags_tbl     = {}

    @contextmanager
    def open(self, channel=None):
        if channel:
            saved_channel = self.channel
            self.channel = channel
            yield self.channel
            self.channel = saved_channel
        elif self.channel:
            yield self.channel
        else:
            try:
                args = {}
                if self.host:   args['host']   = self.host
                if self.userid: args['userid'] = self.userid
                self.connection = Connection(**args)
                self.channel    = self.connection.channel()
                yield self.channel
            finally:
                self.close()
        return

    def close(self):
        if self.channel:
            self.channel.close()
            self.channel = None
        if self.connection:
            self.connection.close()
            self.connection = None

    def compose_tag(self, tags):
        if isinstance(tags, str):
            tags = [tags]
        for tag in tags:
            if not re.match(r'^[A-Za-z0-9_$\-]+$', tag):
                raise Exception('validation error: tag must be matched /^[A-Za-z0-9_$\\-]+$/')
        return ':'.join(tags)

    def get_tag(self, tags, typ):
        return ('%s.%s.%s' % (self.TAGS['HEADER'], self.TAGS[typ], self.compose_tag(tags)))

    def count(self, tag=None):
        # This function is really slow.
        proc = subprocess.Popen("sudo rabbitmqctl list_queues", shell=True, stdout=subprocess.PIPE)
        stdout_value = proc.communicate()[0]
        rt = stdout_value.decode('utf-8')
        tag_count_pairs = map(lambda p: p.split('\t'), rt.rstrip().split('\n')[1:-1])
        rtag = self.get_tag(tag, 'READY')
        stag = self.get_tag(tag, 'SCHEDULE')
        rt = { 'READY': 0, 'SCHEDULED': 0 }
        for tag, count in tag_count_pairs:
            if tag == rtag:
                rt['READY'] = int(count)
            elif tag[0:len(stag)] == stag:
                rt['SCHEDULED'] += int(count)
            else:
                pass
        return rt

    def gensym(self, l=5):
        src = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'
        h = ''
        for i in range(l): h = h + random.choice(src)
        return h + '_' + datetime.now().strftime('%H%M%S%f')

    def enqueue(self, tag, data, schedule = None, error_times=0, channel=None):
        with self.open(channel):
            self.setup_tag(tag)
            msg = Message()
            msg.properties['delivery_mode'] = 2
            body = {'i': self.gensym(), 'c': data, 'e': error_times}
            id = None
            if schedule:
                ttl = round_by_accuracy(get_millis_to_the_date(schedule), self.SCHEDULING_ACCURACY)
                ttl1 = min(ttl, self.TTL_MAX)
                stag = ('%s.%s' % (self.get_tag(tag, 'SCHEDULE'), str(ttl1), ))
                self.channel.queue_declare(
                    queue=stag, durable=True, auto_delete=False,
                    arguments = { 'x-message-ttl' : ttl1, "x-dead-letter-exchange" : self.get_tag(tag, 'EXCHANGE') }
                )
                body['s'] = schedule.strftime(self.TIME_FORMAT)
                msg.body = self.serializer(body)
                id = self.channel.basic_publish(msg, routing_key=stag)
                log(" [%s] Sent scheduled -> ttl: %s(%s) schedule: %s" % (body['i'], ttl1, ttl, body['s'], ))
            else:
                rtag = self.get_tag(tag, 'READY')
                msg.body = self.serializer(body)
                id = self.channel.basic_publish(msg, routing_key=rtag)
                log(" [%s] Sent immediate -> tag: %s" % (body['i'], rtag))
            return body['i']

    def notify_dead(self, msg):
        self.on_dead(msg)
        return

#    @contextmanager
#    def dequeue_item(self, tag, channel=None):
#        with self.open(channel):
#            ctag = self.compose_tag(tag)
#            receiver = QueueReceiver.get(ctag, self)
#            res = receiver.fetchone()
#            if res:
#                if ((0 < self.error_times_to_ignore) and
#                    (self.error_times_to_ignore <= res['e'])):
#                    try:
#                        self.notify_dead(res)
#                    except:
#                        pass
#                    yield None
#                else:
#                    try:
#                        yield res
#                    except:
#                        self.enqueue(tag, res['c'], error_times = res['e'] + 1)
#                        raise
#            else:
#                yield None
#            return
# 
#    @contextmanager
#    def dequeue(self, tag, channel=None):
#        with self.dequeue_item(tag, channel) as res:
#            if res:
#                yield res['c']
#            else:
#                yield res
#            return

    def listen_item(self, tag, on_recv, channel=None):
        #
        def listen_item_iter(res):
            nonlocal on_recv
            if res:
                if ((0 < self.error_times_to_ignore) and
                    (self.error_times_to_ignore <= res['e'])):
                    try:
                        self.notify_dead(res)
                    except:
                        pass
                else:
                    on_recv(res)
        #
        self.stop_listen()
        self.listenning = True
        while True:
            if self.listenning:
                with self.open(channel):
                    self.setup_tag(tag)
                    self.consumer = QueueConsumer(tag, self, listen_item_iter)
                    self.consumer.start()
                    try:
                        while self.consumer.wait1():
                            pass
                    finally:
                        self.consumer.stop()
            else:
                return None

    def stop_listen(self):
        if self.listenning:
            if self.consumer:
                self.consumer.stop()
            self.listenning = False

    def listen(self, tag, on_recv, channel=None):
        wrap = lambda d: on_recv(d['c'] if d != None else None)
        self.listen_item(tag, wrap, channel)

    def dequeue_item_immediate(self, tag, channel=None):
        ctag = self.compose_tag(tag)
        with self.open(channel):
            self.setup_tag(tag)
            msg = self.channel.basic_get(tag)
            if msg:
                headers = msg.properties.get('application_headers')
                death = headers.get('x-death') if headers else None
                reason = death[0]['reason'] if death else None
                expired = (reason == 'expired')
                body = self.deserializer(msg.body)
                id = body['i']
                schedule = body.get('s')
                #
                self.channel.basic_ack(msg.delivery_tag)
                #
                if schedule and expired:
                    ready_time = datetime.strptime(schedule, self.TIME_FORMAT)
                    n = datetime.now()
                    if ready_time < n:
                        log(" [%s] Done Scheduled missed %sms" % (id, timedelta_to_millis(n - ready_time), ))
                        return body
                    else:
                        self.enqueue(self.tag, body['c'], ready_time)
                        log(" [%s] Re-Published -> %s" % (id, schedule, ))
                elif not schedule and not expired:
                    log(" [%s] Done Immediately" % id)
                    return body
                else:
                    log(" [%s] Not-Done because of (%s)" % (id, reason))
            return None

    def dequeue_immediate(self, tag, channel=None):
        res = self.dequeue_item_immediate(tag, channel)
        return res['c'] if res else None
예제 #32
0
파일: base.py 프로젝트: hb407033/mist.api
    def list_machines(self, persist=True):
        """Return list of machines for cloud

        A list of nodes is fetched from libcloud, the data is processed, stored
        on machine models, and a list of machine models is returned.

        Subclasses SHOULD NOT override or extend this method.

        This method wraps `_list_machines` which contains the core
        implementation.

        """

        task_key = 'cloud:list_machines:%s' % self.cloud.id
        task = PeriodicTaskInfo.get_or_add(task_key)
        try:
            with task.task_runner(persist=persist):
                old_machines = {
                    '%s-%s' % (m.id, m.machine_id): m.as_dict()
                    for m in self.list_cached_machines()
                }
                machines = self._list_machines()
        except PeriodicTaskThresholdExceeded:
            self.cloud.disable()
            raise

        # Initialize AMQP connection to reuse for multiple messages.
        amqp_conn = Connection(config.AMQP_URI)

        if amqp_owner_listening(self.cloud.owner.id):
            if not config.MACHINE_PATCHES:
                amqp_publish_user(
                    self.cloud.owner.id,
                    routing_key='list_machines',
                    connection=amqp_conn,
                    data={
                        'cloud_id': self.cloud.id,
                        'machines':
                        [machine.as_dict() for machine in machines]
                    })
            else:
                # Publish patches to rabbitmq.
                new_machines = {
                    '%s-%s' % (m.id, m.machine_id): m.as_dict()
                    for m in machines
                }
                # Exclude last seen and probe fields from patch.
                for md in old_machines, new_machines:
                    for m in md.values():
                        m.pop('last_seen')
                        m.pop('probe')
                patch = jsonpatch.JsonPatch.from_diff(old_machines,
                                                      new_machines).patch
                if patch:
                    amqp_publish_user(self.cloud.owner.id,
                                      routing_key='patch_machines',
                                      connection=amqp_conn,
                                      data={
                                          'cloud_id': self.cloud.id,
                                          'patch': patch
                                      })

        # Push historic information for inventory and cost reporting.
        for machine in machines:
            data = {
                'owner_id': self.cloud.owner.id,
                'machine_id': machine.id,
                'cost_per_month': machine.cost.monthly
            }
            amqp_publish(exchange='machines_inventory',
                         routing_key='',
                         auto_delete=False,
                         data=data,
                         connection=amqp_conn)

        return machines
예제 #33
0
class App(object):
    handlers = []

    @classmethod  # register single or multiple handlers to the application
    def register_handlers(cls, *handlers):
        cls.handlers = list(handlers)

    def __init__(self, host, userid, password, virtual_host):
        self.connection_meta = {}
        if host:
            self.connection_meta['host'] = host
        if userid:
            self.connection_meta['userid'] = userid
        if password:
            self.connection_meta['password'] = password
        if virtual_host:
            self.connection_meta['virtual_host'] = virtual_host

        self.terminated = False
        self.connection = None
        self.channel = None
        self.setup_conn()

    def setup_conn(self):
        self.connection = Connection(**self.connection_meta)
        self.channel = Channel(self.connection)

        # Since amqp v2.0, you should explicitly call Connection.connect() and Channel.open()
        if VERSION[0] >= 2:
            self.connection.connect()
            self.channel.open()

    @staticmethod
    def welcome():
        UserInterface.output(
            banner.lstrip() + "Connected to the channel.\n"
            "Type `help` to see the help document.\n"
            "Type `exit` or `Ctrl+C` whenever to exit this shell.")

    @staticmethod
    def help():
        for handler in App.handlers:
            command = handler.group + "." + handler.name
            for meta_argument in handler.meta_arguments:
                command += ' ' + str(meta_argument)
            UserInterface.output(command)

    def terminate(self):
        UserInterface.output("Oops! Please don't go... /(ㄒoㄒ)/~~")
        self.terminated = True

    def event_loop(self):
        self.terminated = False

        try:
            # event_loop starts a event loop to "read - parse - handle - output"
            while not self.terminated:
                cmd = UserInterface.read()
                try:
                    if len(cmd) == 0:
                        UserInterface.output(
                            "Nothing entered, please type any command")
                        continue

                    if cmd.split()[0] == "exit":
                        self.terminate()
                        continue

                    if cmd.split()[0] == "help":
                        App.help()
                        continue

                    self.dispatch(cmd)
                except UnsupportedCommandError:
                    UserInterface.output("Unsupported Command: {}".format(cmd))
                except InvalidArgumentValueError as e:
                    UserInterface.output(e.message)
        except KeyboardInterrupt:
            self.terminate()

    def dispatch(self, cmd):
        # dispatch forwards the cmd and its arguments to corresponding handler
        cmd, arguments = cmd.split()[0], cmd.split()[1:]
        for handler in App.handlers:
            if cmd == handler.group + "." + handler.name:
                return handler(channel=self.channel,
                               arguments=arguments).perform()
        raise UnsupportedCommandError