Ejemplo n.º 1
0
    def test_filters(self):
        notification_filter = oslo_messaging.NotificationFilter(
            **self.filter_rule)
        endpoint = mock.Mock(spec=['info'], filter_rule=notification_filter)

        dispatcher = notify_dispatcher.NotificationDispatcher([endpoint],
                                                              serializer=None)
        message = {
            'payload': {
                'state': 'active',
                'virtual_size': None
            },
            'priority': 'info',
            'publisher_id': self.publisher_id,
            'event_type': self.event_type,
            'timestamp': '2014-03-03 18:21:04.369234',
            'message_id': '99863dda-97f0-443a-a0c1-6ed317b7fd45'
        }
        incoming = mock.Mock(ctxt=self.context, message=message)
        dispatcher.dispatch(incoming)

        if self.match:
            self.assertEqual(1, endpoint.info.call_count)
        else:
            self.assertEqual(0, endpoint.info.call_count)
Ejemplo n.º 2
0
 def __init__(self, manager):
     super(NotificationBase, self).__init__()
     # NOTE(gordc): this is filter rule used by oslo.messaging to dispatch
     # messages to an endpoint.
     self.filter_rule = oslo_messaging.NotificationFilter(
         event_type='|'.join(self.event_types))
     self.manager = manager
Ejemplo n.º 3
0
class InstanceOfflineNotificationEndpoint(BaseInstanceEndpoint):
    filter_rule = oslo_messaging.NotificationFilter(
        event_type=pci_utils.get_event_type_regexp(
            pci_utils.OFFLINE_EVENT_TYPES))

    def __init__(self, payload_decoder):
        super(InstanceOfflineNotificationEndpoint,
              self).__init__(payload_decoder)

    def info(self, ctxt, publisher_id, event_type, payload, metadata):
        instance_host = self.payload_decoder.decode_instance_host(payload)
        current_host = os.getenv("COMPUTE_HOSTNAME",
                                 default=socket.gethostname())
        if instance_host is not None and instance_host != current_host:
            LOG.debug(
                "Requeue notification: instance_host=%s != current_host=%s" %
                (instance_host, current_host))
            return oslo_messaging.NotificationResult.REQUEUE

        instance_uuid = self.payload_decoder.decode_instance_uuid(payload)
        if instance_uuid:
            LOG.info(
                "Instance offline: uuid=%s, instance_host=%s, event_type=%s" %
                (instance_uuid, instance_host, event_type))
            affinity.pci_irq_affinity.reset_irq_affinity(instance_uuid)
Ejemplo n.º 4
0
    def test_filters(self):
        notification_filter = oslo_messaging.NotificationFilter(
            **self.filter_rule)
        endpoint = mock.Mock(spec=['info'], filter_rule=notification_filter)

        targets = [oslo_messaging.Target(topic='notifications')]
        dispatcher = notify_dispatcher.NotificationDispatcher(
            targets, [endpoint], serializer=None, allow_requeue=True)
        message = {
            'payload': {
                'state': 'active'
            },
            'priority': 'info',
            'publisher_id': self.publisher_id,
            'event_type': self.event_type,
            'timestamp': '2014-03-03 18:21:04.369234',
            'message_id': '99863dda-97f0-443a-a0c1-6ed317b7fd45'
        }
        incoming = mock.Mock(ctxt=self.context, message=message)
        callback = dispatcher(incoming)
        callback.run()
        callback.done()

        if self.match:
            self.assertEqual(1, endpoint.info.call_count)
        else:
            self.assertEqual(0, endpoint.info.call_count)
Ejemplo n.º 5
0
 def __init__(self, project_id, cluster_id):
     self.filter_rule = messaging.NotificationFilter(
         publisher_id='^compute.*',
         event_type='^compute\.instance\..*',
         context={'project_id': '^%s$' % project_id})
     self.project_id = project_id
     self.cluster_id = cluster_id
     self.rpc = rpc_client.EngineClient()
Ejemplo n.º 6
0
 def __init__(self, conf, publisher):
     super(NotificationEndpoint, self).__init__()
     # NOTE(gordc): this is filter rule used by oslo.messaging to dispatch
     # messages to an endpoint.
     if self.event_types:
         self.filter_rule = oslo_messaging.NotificationFilter(
             event_type='|'.join(self.event_types))
     self.conf = conf
     self.publisher = publisher
class HogeEndpoint(object):
    filter_rule = oslo_messaging.NotificationFilter(event_type='event-hoge')

    def info(self, ctxt, publisher_id, event_type, payload, metadata):
        print("[HogeEndpoint] ctxt: %s" % (ctxt))
        print("[HogeEndpoint] publisher_id: %s" % (publisher_id))
        print("[HogeEndpoint] event_type: %s" % (event_type))
        print("[HogeEndpoint] payload: %s" % (payload))
        print("[HogeEndpoint] metadata: %s" % (metadata))
Ejemplo n.º 8
0
 def __init__(self, project_id, cluster_id, recover_action):
     self.filter_rule = messaging.NotificationFilter(
         publisher_id='^orchestration.*',
         event_type='^orchestration\.stack\..*',
         context={'project_id': '^%s$' % project_id})
     self.project_id = project_id
     self.cluster_id = cluster_id
     self.rpc = rpc_client.EngineClient()
     self.recover_action = recover_action
Ejemplo n.º 9
0
 def __init__(self, transporter):
     super(NotificationBase, self).__init__()
     # NOTE(gordc): this is filter rule used by oslo.messaging to dispatch
     # messages to an endpoint.
     self.filter_rule = oslo_messaging.NotificationFilter(
         event_type='|'.join(self.event_types))
     self.transporter = transporter
     # NOTE(gordc): if no publisher, this isn't a PipelineManager and
     # data should be requeued.
     self.requeue = False if hasattr(transporter, 'publisher') else True
Ejemplo n.º 10
0
class ImageDeleteEndpoint(object):
    filter_rule = oslo_messaging.NotificationFilter(
        publisher_id='^image.*', event_type='^image.delete$')

    def __init__(self, sp_name):
        self.sp_name = sp_name

    def info(self, ctxt, publisher_id, event_type, payload, metadata):
        LOG.info('Deleting image mapping %s -> %s at %s' %
                 (payload['id'], payload['owner'], self.sp_name))
        delete(ResourceMapping.find("images", payload['id']))
Ejemplo n.º 11
0
class NotificationEndPoint():
    filter_rule = oslo_messaging.NotificationFilter(publisher_id='oslo_test')

    def info(self, ctxt, publisher_id, event_type, payload, metadata):
        """
        Method is used for notification client.
        """
        self.action(payload)

    def action(self, data):
        LOG.info(_LI(json.dumps(data)))
Ejemplo n.º 12
0
class SnapshotDeleteEndpoint(object):
    filter_rule = oslo_messaging.NotificationFilter(
        publisher_id='^snapshot.*', event_type='^snapshot.delete.end$')

    def __init__(self, sp_name):
        self.sp_name = sp_name

    def info(self, ctxt, publisher_id, event_type, payload, metadata):
        LOG.info('Deleting snapshot mapping %s -> %s at %s' %
                 (payload['snapshot_id'], payload['tenant_id'], self.sp_name))
        delete(ResourceMapping.find("snapshots", payload['snapshot_id']))
Ejemplo n.º 13
0
class InstDelNotificationEp(object):
    filter_rule = oslo_messaging.NotificationFilter(
        event_type=EventType.DELETE)

    def info(self, ctxt, publisher_id, event_type, payload, metadata):
        uuid = payload.get('instance_id', None)
        self.instance_delete_handler(uuid)

    def instance_delete_handler(self, instance_uuid):
        if instance_uuid is not None:
            LOG.info("instance_deleted: uuid=%s." % instance_uuid)
            pciIrqAffinity.reset_irq_affinity(instance_uuid)
Ejemplo n.º 14
0
 def __init__(self, project_id, engine_id, recover_action):
     self.filter_rule = messaging.NotificationFilter(
         publisher_id='^orchestration.*',
         event_type='^orchestration\.stack\..*',
         context={'project_id': '^%s$' % project_id})
     self.project_id = project_id
     self.engine_id = engine_id
     self.rpc = rpc_client.EngineClient()
     self.recover_action = recover_action
     self.exchange = cfg.CONF.health_manager.heat_control_exchange
     self.target = messaging.Target(topic='notifications',
                                    exchange=self.exchange)
Ejemplo n.º 15
0
 def __init__(self, project_id, cluster_id, recover_action):
     super(NovaNotificationEndpoint, self).__init__(project_id, cluster_id,
                                                    recover_action)
     self.filter_rule = messaging.NotificationFilter(
         publisher_id='^compute.*',
         event_type='^compute\.instance\..*',
         context={'project_id': '^%s$' % project_id})
     self.rpc = rpc_client.EngineClient()
     self.target = messaging.Target(
         topic=cfg.CONF.health_manager.nova_notification_topic,
         exchange=cfg.CONF.health_manager.nova_control_exchange,
     )
Ejemplo n.º 16
0
class VolumeCreateEndpoint(object):
    filter_rule = oslo_messaging.NotificationFilter(
        publisher_id='^volume.*', event_type='^volume.create.start$')

    def __init__(self, sp_name):
        self.sp_name = sp_name

    def info(self, ctxt, publisher_id, event_type, payload, metadata):
        LOG.info('Creating volume mapping %s -> %s at %s' %
                 (payload['volume_id'], payload['tenant_id'], self.sp_name))
        insert(
            ResourceMapping("volumes", payload['volume_id'],
                            payload['tenant_id'], self.sp_name))
Ejemplo n.º 17
0
class InstResizeNotificationEp(object):
    filter_rule = oslo_messaging.NotificationFilter(
        event_type=EventType.RESIZE)

    def info(self, ctxt, publisher_id, event_type, payload, metadata):
        uuid = payload.get('instance_id', None)
        self.instance_resize_handler(uuid)

    def instance_resize_handler(self, instance_uuid):
        if instance_uuid is not None:
            LOG.info("instance_resized: uuid=%s." % instance_uuid)
            eventlet.spawn(get_inst, instance_uuid,
                           query_instance_callback).wait()
Ejemplo n.º 18
0
 def __init__(self, project_id, cluster_id, recover_action):
     super(HeatNotificationEndpoint, self).__init__(
         project_id, cluster_id, recover_action
     )
     self.filter_rule = messaging.NotificationFilter(
         publisher_id='^orchestration.*',
         event_type='^orchestration\.stack\..*',
         context={'project_id': '^%s$' % project_id})
     self.rpc = rpc_client.get_engine_client()
     self.target = messaging.Target(
         topic=cfg.CONF.health_manager.heat_notification_topic,
         exchange=cfg.CONF.health_manager.heat_control_exchange,
     )
Ejemplo n.º 19
0
class VolumeTransferEndpoint(object):
    filter_rule = oslo_messaging.NotificationFilter(
        publisher_id='^volume.*', event_type='^volume.transfer.accept.end$')

    def __init__(self, sp_name):
        self.sp_name = sp_name

    def info(self, ctxt, publisher_id, event_type, payload, metadata):
        LOG.info('Moving volume mapping %s -> %s at %s' %
                 (payload['volume_id'], payload['tenant_id'], self.sp_name))
        mapping = ResourceMapping.find("volumes", payload['volume_id'])
        # Since we're manually updating a field, we have to sanitize the UUID
        # ourselves.
        mapping.tenant_id = payload['tenant_id'].replace("-", "")
class NotificationEndpoint(object):
    """Task which exposes the API for consuming priority based notifications.

    The Oslo notification framework delivers notifications based on priority to
    matching callback APIs as defined in its notification listener endpoint
    list.

    Currently from Keystone perspective, `info` API is sufficient as Keystone
    send notifications at `info` priority ONLY. Other priority level APIs
    (warn, error, critical, audit, debug) are not needed here.
    """
    filter_rule = oslo_messaging.NotificationFilter(
        event_type='identity.user.updated')

    def info(self, ctxt, publisher_id, event_type, payload, metadata):
        """Receives notification at info level."""
        global kube_app
        kube_app.audit_local_registry_secrets()
        return oslo_messaging.NotificationResult.HANDLED
class HashRingMemberManagerNotificationEndpoint(object):
    """Class variables members and hashring is shared by all instances"""

    filter_rule = oslo_messaging.NotificationFilter(
        publisher_id='^ironic-neutron-agent.*')

    members = []
    hashring = hashring.HashRing([])

    def info(self, ctxt, publisher_id, event_type, payload, metadata):

        timestamp = timeutils.utcnow_ts()
        # Add members or update timestamp for existing members
        if not payload['id'] in [x['id'] for x in self.members]:
            try:
                LOG.info('Adding member id %s on host %s to hashring.',
                         payload['id'], payload['host'])
                self.hashring.add_node(payload['id'])
                self.members.append(payload)
            except Exception:
                LOG.exception('Failed to add member %s to hash ring!',
                              payload['id'])
        else:
            for member in self.members:
                if payload['id'] == member['id']:
                    member['timestamp'] = payload['timestamp']

        # Remove members that have not checked in for a while
        for member in self.members:
            if (timestamp -
                    member['timestamp']) > (CONF.AGENT.report_interval * 3):
                try:
                    LOG.info('Removing member %s on host %s from hashring.',
                             member['id'], member['host'])
                    self.hashring.remove_node(member['id'])
                    self.members.remove(member)
                except Exception:
                    LOG.exception('Failed to remove member %s from hash ring!',
                                  member['id'])

        return oslo_messaging.NotificationResult.HANDLED
Ejemplo n.º 22
0
class NotificationEndPoint():

    # custom event type you want to receive
    VM_FAILURE_EVENTS = {
        'compute.instance.delete.end': 'DELETE',
        'compute.instance.pause.end': 'PAUSE',
        'compute.instance.power_off.end': 'POWER_OFF',
        'compute.instance.rebuild.error': 'REBUILD',
        'compute.instance.shutdown.end': 'SHUTDOWN',
        'compute.instance.soft_delete.end': 'SOFT_DELETE',
    }
    filter_rule = oslo_messaging.NotificationFilter(
            publisher_id='^compute.*')

    def info(self, ctxt, publisher_id, event_type, payload, metadata):
        status = payload.get('state_description')
        if status != "" and event_type in VM_FAILURE_EVENTS:
            self.action(payload)
            # throw event to dmaap
            producer = Producer()
            producer.produce_without_auth(json.dumps(payload))

    def action(self, data):
        LOG.info(_LI(json.dumps(data)))
Ejemplo n.º 23
0
class NotificationEndpoint(object):
    filter_rule = oslo_messaging.NotificationFilter(
        publisher_id='^identity.*|^compute.*',
        event_type='^identity.project.(created|deleted)|'
        '^identity.user.deleted|'
        '^identity.role_assignment.deleted|'
        '^compute.instance.delete.end')

    #TODO(pino): what about user removal from a project? (rather than deletion)

    def __init__(self, engine):
        self.Session = scoped_session(sessionmaker(engine))

    def info(self, ctxt, publisher_id, event_type, payload, metadata):
        LOG.debug('notification:')
        LOG.debug(jsonutils.dumps(payload, indent=4))

        LOG.debug("publisher: %s, event: %s, metadata: %s", publisher_id,
                  event_type, metadata)

        se = self.Session()
        if event_type == 'identity.project.created':
            proj_id = canonical_uuid_string(payload.get('resource_info'))
            name = ks_utils.getProjectNameForID(proj_id)
            _createAuthority(self.Session, proj_id, name)
        elif event_type == 'identity.project.deleted':
            # Assume all the users and instances must have been removed.
            proj_id = canonical_uuid_string(payload.get('resource_info'))
            _deleteAuthority(self.Session,
                             db.getAuthority(self.Session(), proj_id))
        elif event_type == 'identity.role_assignment.deleted':
            users = []
            if 'user' in payload:
                users = [payload['user']]
            else:
                users = ks_utils.getUserIdsByGroupId(payload['group'])
            # TODO: look for domain if project isn't available
            proj_id = payload['project']
            for user_id in users:
                roles = ks_utils.getProjectRoleNamesForUser(proj_id, user_id)
                try:
                    se = self.Session()
                    db.revokeUserCertsForRoleChange(se, user_id, proj_id,
                                                    roles)
                except Exception as e:
                    LOG.error(
                        "Failed to revoke user {} certificates in project {} "
                        "after role {} was removed, due to exception {}".
                        format(user_id, proj_id, payload['role'], e))
                    import traceback
                    traceback.print_exc()
                    se.rollback()
                    self.Session.remove()
        elif event_type == 'identity.user.deleted':
            user_id = payload.get('resource_info')
            LOG.debug("User with ID {} deleted " "in Keystone".format(user_id))
            try:
                db.revokeUserCerts(se, user_id)
                # TODO(pino): also prevent generating new certs for this user?
            except Exception as e:
                LOG.error(
                    "Failed to revoke all certs for deleted user with ID {} "
                    "due to exception {}".format(user_id, e))
                se.rollback()
                self.Session.remove()
        elif event_type == 'compute.instance.delete.end':
            instance_id = canonical_uuid_string(payload.get('instance_id'))
            host = db.getHost(se, instance_id)
            if host is not None:
                _deleteHost(self.Session, host)
            # TODO(Pino): record the deletion to prevent new certs generation?
            pass
        else:
            LOG.error("Unknown update.")
Ejemplo n.º 24
0
    def __init__(self, event_engine):
        self.event_engine = event_engine

        self.filter_rule = oslo_messaging.NotificationFilter(
            event_type='|'.join(self.event_types))
class NotificationEndpoint(object):
	filter_rule = oslo_messaging.NotificationFilter(publisher_id='^computer.*')
	def warn(self,ctxt,publisher_id,event_type,payload.metadate):
		do_something(playload)
Ejemplo n.º 26
0
 def __init__(self, monitor, filter_rules={}):
     self.monitor = monitor
     self.filter_rule = oslo_messaging.NotificationFilter(**filter_rules)
class ErrorEndpoint(object):
	filter_rule = oslo_messaging.NotificationFilter(event_type='^instance\..*\.start$',context={'ctxt_key':'regexp'})
	def error(self,ctxt,publisher_id,event_type,payload,metadata):
		do_something(payload)
Ejemplo n.º 28
0
 def __init__(self, pipeline):
     self.filter_rule = oslo_messaging.NotificationFilter(
         publisher_id=pipeline.name)
     self.publish_context = PublishContext([pipeline])
     self.conf = pipeline.conf
Ejemplo n.º 29
0
 def __init__(self):
     super(NotificationEndpoint, self).__init__()
     if self.event_types:
         self.filter_rule = oslo_messaging.NotificationFilter(
             event_type='|'.join(self.event_types))