def amqp_publish(exchange, routing_key, data): connection = Connection() channel = connection.channel() msg = Message(json.dumps(data)) channel.basic_publish(msg, exchange=exchange, routing_key=routing_key) channel.close() connection.close()
def amqp_subscribe(exchange, queue, callback): def json_parse_dec(func): @functools.wraps(func) def wrapped(msg): try: msg.body = json.loads(msg.body) except: pass return func(msg) return wrapped if not queue: queue = "mist-tmp_%d" % random.randrange(2**20) connection = Connection() channel = connection.channel() channel.exchange_declare(exchange=exchange, type='fanout') channel.queue_declare(queue, exclusive=True) channel.queue_bind(queue, exchange) channel.basic_consume(queue=queue, callback=json_parse_dec(callback), no_ack=True) try: while True: channel.wait() except BaseException as exc: # catch BaseException so that it catches KeyboardInterrupt channel.close() connection.close() amqp_log("SUBSCRIPTION ENDED: %s %s %r" % (exchange, queue, exc))
def amqp_subscribe(exchange, callback, queue='', ex_type='fanout', routing_keys=None): def json_parse_dec(func): @functools.wraps(func) def wrapped(msg): try: msg.body = json.loads(msg.body) except: pass return func(msg) return wrapped connection = Connection(config.AMQP_URI) channel = connection.channel() channel.exchange_declare(exchange=exchange, type=ex_type, auto_delete=True) resp = channel.queue_declare(queue, exclusive=True) if not routing_keys: channel.queue_bind(resp.queue, exchange) else: for routing_key in routing_keys: channel.queue_bind(resp.queue, exchange, routing_key=routing_key) channel.basic_consume(queue=queue, callback=json_parse_dec(callback), no_ack=True) try: while True: channel.wait() except BaseException as exc: # catch BaseException so that it catches KeyboardInterrupt channel.close() connection.close() amqp_log("SUBSCRIPTION ENDED: %s %s %r" % (exchange, queue, exc))
def setup_conn(self): self.connection = Connection(**self.connection_meta) self.channel = Channel(self.connection) # Since amqp v2.0, you should explicitly call Connection.connect() and Channel.open() if VERSION[0] >= 2: self.connection.connect() self.channel.open()
def amqp_publish(exchange, routing_key, data, ex_type='fanout', ex_declare=False): connection = Connection() channel = connection.channel() if ex_declare: channel.exchange_declare(exchange=exchange, type=ex_type) msg = Message(json.dumps(data)) channel.basic_publish(msg, exchange=exchange, routing_key=routing_key) channel.close() connection.close()
def amqp_owner_listening(owner): connection = Connection(config.AMQP_URI) channel = connection.channel() try: channel.exchange_declare(exchange=_amqp_owner_exchange(owner), type='fanout', passive=True) except AmqpNotFound: return False else: return True finally: channel.close() connection.close()
def list_networks(self, persist=True): """Return list of networks for cloud A list of networks is fetched from libcloud, data is processed, stored on network models, and a list of network models is returned. Subclasses SHOULD NOT override or extend this method. This method wraps `_list_networks` which contains the core implementation. """ task_key = 'cloud:list_networks:%s' % self.cloud.id task = PeriodicTaskInfo.get_or_add(task_key) with task.task_runner(persist=persist): cached_networks = { '%s' % n.id: n.as_dict() for n in self.list_cached_networks() } networks = self._list_networks() # Initialize AMQP connection to reuse for multiple messages. amqp_conn = Connection(config.AMQP_URI) if amqp_owner_listening(self.cloud.owner.id): networks_dict = [n.as_dict() for n in networks] if cached_networks and networks_dict: # Publish patches to rabbitmq. new_networks = {'%s' % n['id']: n for n in networks_dict} patch = jsonpatch.JsonPatch.from_diff(cached_networks, new_networks).patch if patch: amqp_publish_user(self.cloud.owner.id, routing_key='patch_networks', connection=amqp_conn, data={ 'cloud_id': self.cloud.id, 'patch': patch }) else: # TODO: remove this block, once patches # are implemented in the UI amqp_publish_user(self.cloud.owner.id, routing_key='list_networks', connection=amqp_conn, data={ 'cloud_id': self.cloud.id, 'networks': networks_dict }) return networks
def amqp_publish(exchange, routing_key, data, ex_type='fanout', ex_declare=False, auto_delete=True, connection=None): close = False if connection is None: connection = Connection(config.AMQP_URI) close = True channel = connection.channel() if ex_declare: channel.exchange_declare(exchange=exchange, type=ex_type, auto_delete=auto_delete) msg = Message(json.dumps(data)) channel.basic_publish(msg, exchange=exchange, routing_key=routing_key) channel.close() if close: connection.close()
def send(self, users=None, dismiss=False): # FIXME Imported here due to circular dependency issues. from mist.api.notifications.models import InAppNotification from mist.api.notifications.models import UserNotificationPolicy # Get the list of `InAppNotifications`s in the current context before # any update takes place. owner_old_ntfs = list(InAppNotification.objects(owner=self.ntf.owner)) if not users: users = self.ntf.owner.members elif not isinstance(users, list): users = [users] # Save/update/dismiss notifications. if dismiss: dismissed_by = set(self.ntf.dismissed_by) old_dismissed_by = list(dismissed_by) dismissed_by |= set(user.id for user in users) self.ntf.dismissed_by = list(dismissed_by) # Is anyone listening? if not amqp_owner_listening(self.ntf.owner.id): return # Initialize AMQP connection to reuse for multiple messages. amqp_conn = Connection(config.AMQP_URI) # Re-fetch all notifications in order to calculate the diff between # the two lists. owner_new_ntfs = list(InAppNotification.objects(owner=self.ntf.owner)) # Apply each user's notification policy on the above lists to get rid # of notifications users are not interested in. for user in users: user_old_ntfs, user_new_ntfs = [], [] try: np = UserNotificationPolicy.objects.get(user_id=user.id) except UserNotificationPolicy.DoesNotExist: log.debug('No UserNotificationPolicy found for %s', user) user_old_ntfs = [ ntf.as_dict() for ntf in owner_old_ntfs if not ( self.ntf.id == ntf.id and user.id in old_dismissed_by) ] user_new_ntfs = [ ntf.as_dict() for ntf in owner_new_ntfs if not (self.ntf.id == ntf.id and user.id in dismissed_by) ] else: user_old_ntfs = [ ntf.as_dict() for ntf in owner_old_ntfs if not np.has_blocked(ntf) and not ( self.ntf.id == ntf.id and user.id in old_dismissed_by) ] user_new_ntfs = [ ntf.as_dict() for ntf in owner_new_ntfs if not np.has_blocked(ntf) and not (self.ntf.id == ntf.id and user.id in dismissed_by) ] # Now we can save the dismissed notification self.ntf.save() # Calculate diff. patch = jsonpatch.JsonPatch.from_diff(user_old_ntfs, user_new_ntfs).patch if patch: amqp_publish_user(self.ntf.owner.id, routing_key='patch_notifications', connection=amqp_conn, data={ 'user': user.id, 'patch': patch }) # Finally, try to close the AMQP connection. try: amqp_conn.close() except Exception as exc: log.exception(repr(exc))
def list_machines(self, persist=True): """Return list of machines for cloud A list of nodes is fetched from libcloud, the data is processed, stored on machine models, and a list of machine models is returned. Subclasses SHOULD NOT override or extend this method. This method wraps `_list_machines` which contains the core implementation. """ task_key = 'cloud:list_machines:%s' % self.cloud.id task = PeriodicTaskInfo.get_or_add(task_key) try: with task.task_runner(persist=persist): old_machines = { '%s-%s' % (m.id, m.machine_id): m.as_dict() for m in self.list_cached_machines() } machines = self._list_machines() except PeriodicTaskThresholdExceeded: self.cloud.disable() raise # Initialize AMQP connection to reuse for multiple messages. amqp_conn = Connection(config.AMQP_URI) if amqp_owner_listening(self.cloud.owner.id): if not config.MACHINE_PATCHES: amqp_publish_user( self.cloud.owner.id, routing_key='list_machines', connection=amqp_conn, data={ 'cloud_id': self.cloud.id, 'machines': [machine.as_dict() for machine in machines] }) else: # Publish patches to rabbitmq. new_machines = { '%s-%s' % (m.id, m.machine_id): m.as_dict() for m in machines } # Exclude last seen and probe fields from patch. for md in old_machines, new_machines: for m in md.values(): m.pop('last_seen') m.pop('probe') patch = jsonpatch.JsonPatch.from_diff(old_machines, new_machines).patch if patch: amqp_publish_user(self.cloud.owner.id, routing_key='patch_machines', connection=amqp_conn, data={ 'cloud_id': self.cloud.id, 'patch': patch }) # Push historic information for inventory and cost reporting. for machine in machines: data = { 'owner_id': self.cloud.owner.id, 'machine_id': machine.id, 'cost_per_month': machine.cost.monthly } amqp_publish(exchange='machines_inventory', routing_key='', auto_delete=False, data=data, connection=amqp_conn) return machines