def associate_floatingip_router(self, context, floatingip_id, router_id): with context.session.begin(subtransactions=True): floatingip_db = self._get_floatingip(context, floatingip_id) floating_network_id = floatingip_db['floating_network_id'] gw_portdb = None try: port_qry = context.elevated().session.query(models_v2.Port) gw_portdb = port_qry.filter_by( network_id=floating_network_id, device_id=router_id, device_owner=DEVICE_OWNER_ROUTER_GW).one() except exc.NoResultFound: raise uosfloatingip.GWPortForFloatingIPNotFound(id=router_id) tenant_id = floatingip_db.tenant_id _ctx = n_context.Context('', tenant_id) body = {'floatingip': {'port_id': gw_portdb['id']}} payload = {'id': floatingip_db.id} payload.update(body) self._notifier = n_rpc.get_notifier('network') _notifer = n_rpc.get_notifier('network') _notifer.info(_ctx, 'floatingip.update.start', payload) result = self.update_floatingip(context, floatingip_id, body) _notifer.info(_ctx, 'floatingip.update.end', {'floatingip': result}) return result
def _notify_new_addresses(self, context, new_addresses): for addr in new_addresses: payload = dict(used_by_tenant_id=addr["used_by_tenant_id"], ip_block_id=addr["subnet_id"], ip_address=addr["address_readable"], device_ids=[p["device_id"] for p in addr["ports"]], created_at=addr["created_at"]) n_rpc.get_notifier("network").info(context, "ip_block.address.create", payload)
def deallocate_ip_address(self, context, address): address["deallocated"] = 1 payload = dict(used_by_tenant_id=address["used_by_tenant_id"], ip_block_id=address["subnet_id"], ip_address=address["address_readable"], device_ids=[p["device_id"] for p in address["ports"]], created_at=address["created_at"], deleted_at=timeutils.utcnow()) n_rpc.get_notifier("network").info(context, "ip_block.address.delete", payload)
def remove_router_interface(self, context, router_id, interface_info): if not interface_info: msg = _("Either subnet_id or port_id must be specified") raise n_exc.BadRequest(resource='router', msg=msg) port_id = interface_info.get('port_id') subnet_id = interface_info.get('subnet_id') device_owner = self._get_device_owner(context, router_id) if port_id: port, subnet = self._remove_interface_by_port(context, router_id, port_id, subnet_id, device_owner) elif subnet_id: port, subnet = self._remove_interface_by_subnet( context, router_id, subnet_id, device_owner) self.l3_rpc_notifier.routers_updated( context, [router_id], 'remove_router_interface') info = {'id': router_id, 'tenant_id': port['tenant_id'], 'port_id': port['id'], 'subnet_id': subnet['id']} notifier = n_rpc.get_notifier('network') notifier.info( context, 'router.interface.delete', {'router_interface': info}) return info
def add_router_interface(self, context, router_id, interface_info): add_by_port, add_by_sub = self._validate_interface_info(interface_info) device_owner = self._get_device_owner(context, router_id) if add_by_port: port = self._add_interface_by_port(context, router_id, interface_info['port_id'], device_owner) elif add_by_sub: port = self._add_interface_by_subnet(context, router_id, interface_info['subnet_id'], device_owner) self.l3_rpc_notifier.routers_updated(context, [router_id], 'add_router_interface') info = { 'id': router_id, 'tenant_id': port['tenant_id'], 'port_id': port['id'], 'subnet_id': port['fixed_ips'][0]['subnet_id'] } notifier = n_rpc.get_notifier('network') notifier.info(context, 'router.interface.create', {'router_interface': info}) return info
def remove_router_interface(self, context, router_id, interface_info): if not interface_info: msg = _("Either subnet_id or port_id must be specified") raise n_exc.BadRequest(resource='router', msg=msg) port_id = interface_info.get('port_id') subnet_id = interface_info.get('subnet_id') device_owner = self._get_device_owner(context, router_id) if port_id: port, subnet = self._remove_interface_by_port( context, router_id, port_id, subnet_id, device_owner) elif subnet_id: port, subnet = self._remove_interface_by_subnet( context, router_id, subnet_id, device_owner) self.l3_rpc_notifier.routers_updated(context, [router_id], 'remove_router_interface') info = { 'id': router_id, 'tenant_id': port['tenant_id'], 'port_id': port['id'], 'subnet_id': subnet['id'] } notifier = n_rpc.get_notifier('network') notifier.info(context, 'router.interface.delete', {'router_interface': info}) return info
def __init__(self, plugin, collection, resource, attr_info, allow_bulk=False, member_actions=None, parent=None, allow_pagination=False, allow_sorting=False): if member_actions is None: member_actions = [] self._plugin = plugin self._collection = collection.replace('-', '_') self._resource = resource.replace('-', '_') self._attr_info = attr_info self._allow_bulk = allow_bulk self._allow_pagination = allow_pagination self._allow_sorting = allow_sorting self._native_bulk = self._is_native_bulk_supported() self._native_pagination = self._is_native_pagination_supported() self._native_sorting = self._is_native_sorting_supported() self._policy_attrs = [ name for (name, info) in self._attr_info.items() if info.get('required_by_policy') ] self._notifier = n_rpc.get_notifier('network') # use plugin's dhcp notifier, if this is already instantiated agent_notifiers = getattr(plugin, 'agent_notifiers', {}) self._dhcp_agent_notifier = (agent_notifiers.get( const.AGENT_TYPE_DHCP) or dhcp_rpc_agent_api.DhcpAgentNotifyAPI()) if cfg.CONF.notify_nova_on_port_data_changes: from neutron.notifiers import nova self._nova_notifier = nova.Notifier() self._member_actions = member_actions self._primary_key = self._get_primary_key() if self._allow_pagination and self._native_pagination: # Native pagination need native sorting support if not self._native_sorting: raise exceptions.Invalid( _("Native pagination depend on native sorting")) if not self._allow_sorting: LOG.info( _LI("Allow sorting is enabled because native " "pagination requires native sorting")) self._allow_sorting = True if parent: self._parent_id_name = '%s_id' % parent['member_name'] parent_part = '_%s' % parent['member_name'] else: self._parent_id_name = None parent_part = '' self._plugin_handlers = { self.LIST: 'get%s_%s' % (parent_part, self._collection), self.SHOW: 'get%s_%s' % (parent_part, self._resource) } for action in [self.CREATE, self.UPDATE, self.DELETE]: self._plugin_handlers[action] = '%s%s_%s' % (action, parent_part, self._resource)
def update_floatingip_registerno(self, request, id, body): context = request.context router = self._get_service_plugin(constants.L3_ROUTER_NAT) # NOTE(gongysh) to fix the privilege problem router.get_floatingip(context, id) _notifier = n_rpc.get_notifier('network') payload = body.copy() registerno = payload.get('uos_registerno', None) if registerno is None: msg = _("Invalid format: %s") % request.body raise qexception.BadRequest(resource='body', msg=msg) payload['id'] = id _notifier.info(context, 'floatingip.update_registerno.start', payload) # NOTE(gongysh) do we need to validate it? with context.session.begin(subtransactions=True): try: fip_qry = context.session.query(l3_db.FloatingIP) floating_ip = fip_qry.filter_by(id=id).one() except sa_exc.NoResultFound: raise l3.FloatingIPNotFound(floatingip_id=id) floating_ip.update({'uos_registerno': registerno.strip()}) result = router._make_floatingip_dict(floating_ip) _notifier.info(context, 'floatingip.update_registerno.end', {'floatingip': result}) return result
def update_floatingip_ratelimit(self, request, id, body): context = request.context router = self._get_service_plugin(constants.L3_ROUTER_NAT) # NOTE(gongysh) to fix the privilege problem router.get_floatingip(context, id) try: payload = body.copy() rate_limit = payload.get('rate_limit') rate_limit = int(rate_limit) except (AttributeError, ValueError, TypeError): msg = _("Invalid format: %s") % request.body raise qexception.BadRequest(resource='body', msg=msg) payload['id'] = id _notifier = n_rpc.get_notifier('network') _notifier.info(context, 'floatingip.update_ratelimit.start', payload) with context.session.begin(subtransactions=True): try: fip_qry = context.session.query(l3_db.FloatingIP) floating_ip = fip_qry.filter_by(id=id).one() floating_ip.update({'rate_limit': body['rate_limit']}) except sa_exc.NoResultFound: raise l3.FloatingIPNotFound(floatingip_id=id) router_id = floating_ip['router_id'] if router_id: router.l3_rpc_notifier.routers_updated(context, [router_id]) result = router._make_floatingip_dict(floating_ip) _notifier.info(context, 'floatingip.update_ratelimit.end', {'floatingip': result}) return result
def notify_tag_action(context, action, parent, parent_id, tags=None): notifier = n_rpc.get_notifier('network') tag_event = 'tag.%s' % action # TODO(hichihara): Add 'updated_at' into payload payload = {'parent_resource': parent, 'parent_resource_id': parent_id} if tags is not None: payload['tags'] = tags notifier.info(context, tag_event, payload)
def notify_router_interface_action(self, context, router_interface_info, routers, action): l3_method = "%s_router_interface" % action self.l3_cfg_rpc_notifier.routers_updated(context, routers, l3_method) mapping = {"add": "create", "remove": "delete"} notifier = n_rpc.get_notifier("network") router_event = "router.interface.%s" % mapping[action] notifier.info(context, router_event, {"router_interface": router_interface_info})
def __init__( self, plugin, collection, resource, attr_info, allow_bulk=False, member_actions=None, parent=None, allow_pagination=False, allow_sorting=False, ): if member_actions is None: member_actions = [] self._plugin = plugin self._collection = collection.replace("-", "_") self._resource = resource.replace("-", "_") self._attr_info = attr_info self._allow_bulk = allow_bulk self._allow_pagination = allow_pagination self._allow_sorting = allow_sorting self._native_bulk = self._is_native_bulk_supported() self._native_pagination = self._is_native_pagination_supported() self._native_sorting = self._is_native_sorting_supported() self._policy_attrs = [name for (name, info) in self._attr_info.items() if info.get("required_by_policy")] self._notifier = n_rpc.get_notifier("network") # use plugin's dhcp notifier, if this is already instantiated agent_notifiers = getattr(plugin, "agent_notifiers", {}) self._dhcp_agent_notifier = ( agent_notifiers.get(const.AGENT_TYPE_DHCP) or dhcp_rpc_agent_api.DhcpAgentNotifyAPI() ) if cfg.CONF.notify_nova_on_port_data_changes: from neutron.notifiers import nova self._nova_notifier = nova.Notifier() self._member_actions = member_actions self._primary_key = self._get_primary_key() if self._allow_pagination and self._native_pagination: # Native pagination need native sorting support if not self._native_sorting: raise exceptions.Invalid(_("Native pagination depend on native sorting")) if not self._allow_sorting: LOG.info(_("Allow sorting is enabled because native " "pagination requires native sorting")) self._allow_sorting = True if parent: self._parent_id_name = "%s_id" % parent["member_name"] parent_part = "_%s" % parent["member_name"] else: self._parent_id_name = None parent_part = "" self._plugin_handlers = { self.LIST: "get%s_%s" % (parent_part, self._collection), self.SHOW: "get%s_%s" % (parent_part, self._resource), } for action in [self.CREATE, self.UPDATE, self.DELETE]: self._plugin_handlers[action] = "%s%s_%s" % (action, parent_part, self._resource)
def notify_router_interface_action(self, context, router_interface_info, routers, action): l3_method = '%s_router_interface' % action self.l3_cfg_rpc_notifier.routers_updated(context, routers, l3_method) mapping = {'add': 'create', 'remove': 'delete'} notifier = n_rpc.get_notifier('network') router_event = 'router.interface.%s' % mapping[action] notifier.info(context, router_event, {'router_interface': router_interface_info})
def test_get_notifier_null_publisher(self): rpc.NOTIFIER = mock.Mock() mock_prep = mock.Mock() mock_prep.return_value = 'notifier' rpc.NOTIFIER.prepare = mock_prep notifier = rpc.get_notifier('service', host='bar') mock_prep.assert_called_once_with(publisher_id='service.bar') self.assertEqual('notifier', notifier)
def notify_router_interface_action( self, context, router_interface_info, routers, action): l3_method = '%s_router_interface' % action self.l3_cfg_rpc_notifier.routers_updated(context, routers, l3_method) mapping = {'add': 'create', 'remove': 'delete'} notifier = n_rpc.get_notifier('network') router_event = 'router.interface.%s' % mapping[action] notifier.info(context, router_event, {'router_interface': router_interface_info})
def __init__(self, plugin, collection, resource, attr_info, allow_bulk=False, member_actions=None, parent=None, allow_pagination=False, allow_sorting=False): if member_actions is None: member_actions = [] self._plugin = plugin self._collection = collection.replace('-', '_') # networks self._resource = resource.replace('-', '_') # network self._attr_info = attr_info # allow ... self._allow_bulk = allow_bulk self._allow_pagination = allow_pagination self._allow_sorting = allow_sorting self._native_bulk = self._is_native_bulk_supported() self._native_pagination = self._is_native_pagination_supported() self._native_sorting = self._is_native_sorting_supported() self._policy_attrs = self._init_policy_attrs() self._notifier = n_rpc.get_notifier('network') self._member_actions = member_actions self._primary_key = self._get_primary_key() if self._allow_pagination and self._native_pagination: # Native pagination need native sorting support if not self._native_sorting: raise exceptions.Invalid( _("Native pagination depend on native sorting")) if not self._allow_sorting: LOG.info( _LI("Allow sorting is enabled because native " "pagination requires native sorting")) self._allow_sorting = True self.parent = parent if parent: self._parent_id_name = '%s_id' % parent['member_name'] parent_part = '_%s' % parent['member_name'] else: self._parent_id_name = None parent_part = '' self._plugin_handlers = { self.LIST: 'get%s_%s' % (parent_part, self._collection), # get_networks list操作 self.SHOW: 'get%s_%s' % (parent_part, self._resource) # get_network show操作 } for action in [self.CREATE, self.UPDATE, self.DELETE]: # create: create_network # update: update_network # delete: delete_network self._plugin_handlers[action] = '%s%s_%s' % (action, parent_part, self._resource)
def delete_subnet(context, id): """Delete a subnet. : param context: neutron api request context : param id: UUID representing the subnet to delete. """ LOG.info("delete_subnet %s for tenant %s" % (id, context.tenant_id)) with context.session.begin(): subnet = db_api.subnet_find(context, id=id, scope=db_api.ONE) if not subnet: raise exceptions.SubnetNotFound(subnet_id=id) payload = dict(tenant_id=subnet["tenant_id"], ip_block_id=subnet["id"], created_at=subnet["created_at"], deleted_at=timeutils.utcnow()) _delete_subnet(context, subnet) n_rpc.get_notifier("network").info(context, "ip_block.delete", payload)
def do_notify(context, event_type, payload): """Generic Notifier. Parameters: - `context`: session context - `event_type`: the event type to report, i.e. ip.usage - `payload`: dict containing the payload to send """ LOG.debug('IP_BILL: notifying {}'.format(payload)) notifier = n_rpc.get_notifier('network') notifier.info(context, event_type, payload)
def __init__( self, plugin, collection, resource, attr_info, allow_bulk=False, member_actions=None, parent=None, allow_pagination=False, allow_sorting=False, ): if member_actions is None: member_actions = [] self._plugin = plugin self._collection = collection.replace("-", "_") self._resource = resource.replace("-", "_") self._attr_info = attr_info self._allow_bulk = allow_bulk self._allow_pagination = allow_pagination self._allow_sorting = allow_sorting self._native_bulk = self._is_native_bulk_supported() self._native_pagination = self._is_native_pagination_supported() self._native_sorting = self._is_native_sorting_supported() self._policy_attrs = [name for (name, info) in self._attr_info.items() if info.get("required_by_policy")] self._notifier = n_rpc.get_notifier("network") self._member_actions = member_actions self._primary_key = self._get_primary_key() if self._allow_pagination and self._native_pagination: # Native pagination need native sorting support if not self._native_sorting: raise exceptions.Invalid(_("Native pagination depend on native sorting")) if not self._allow_sorting: LOG.info(_LI("Allow sorting is enabled because native " "pagination requires native sorting")) self._allow_sorting = True self.parent = parent if parent: self._parent_id_name = "%s_id" % parent["member_name"] parent_part = "_%s" % parent["member_name"] else: self._parent_id_name = None parent_part = "" self._plugin_handlers = { self.LIST: "get%s_%s" % (parent_part, self._collection), self.SHOW: "get%s_%s" % (parent_part, self._resource), } for action in [self.CREATE, self.UPDATE, self.DELETE]: self._plugin_handlers[action] = "%s%s_%s" % (action, parent_part, self._resource)
def notify_router_interface_action( self, context, router_id, tenant_id, port_id, subnet_id, action): l3_method = '%s_router_interface' % action self.l3_rpc_notifier.routers_updated(context, [router_id], l3_method) mapping = {'add': 'create', 'remove': 'delete'} info = { 'id': router_id, 'tenant_id': tenant_id, 'port_id': port_id, 'subnet_id': subnet_id } notifier = n_rpc.get_notifier('network') router_event = 'router.interface.%s' % mapping[action] notifier.info(context, router_event, {'router_interface': info}) return info
def __init__(self, plugin, collection, resource, attr_info, allow_bulk=False, member_actions=None, parent=None, allow_pagination=False, allow_sorting=False): if member_actions is None: member_actions = [] self._plugin = plugin self._collection = collection.replace('-', '_') self._resource = resource.replace('-', '_') self._attr_info = attr_info self._allow_bulk = allow_bulk self._allow_pagination = allow_pagination self._allow_sorting = allow_sorting self._native_bulk = self._is_native_bulk_supported() self._native_pagination = self._is_native_pagination_supported() self._native_sorting = self._is_native_sorting_supported() self._policy_attrs = [name for (name, info) in self._attr_info.items() if info.get('required_by_policy')] self._notifier = n_rpc.get_notifier('network') if cfg.CONF.notify_nova_on_port_data_changes: from neutron.notifiers import nova self._nova_notifier = nova.Notifier() self._member_actions = member_actions self._primary_key = self._get_primary_key() if self._allow_pagination and self._native_pagination: # Native pagination need native sorting support if not self._native_sorting: raise exceptions.Invalid( _("Native pagination depend on native sorting") ) if not self._allow_sorting: LOG.info(_LI("Allow sorting is enabled because native " "pagination requires native sorting")) self._allow_sorting = True if parent: self._parent_id_name = '%s_id' % parent['member_name'] parent_part = '_%s' % parent['member_name'] else: self._parent_id_name = None parent_part = '' self._plugin_handlers = { self.LIST: 'get%s_%s' % (parent_part, self._collection), self.SHOW: 'get%s_%s' % (parent_part, self._resource) } for action in [self.CREATE, self.UPDATE, self.DELETE]: self._plugin_handlers[action] = '%s%s_%s' % (action, parent_part, self._resource)
def update_status(self, context, obj_type, obj_id, status): model_mapping = { 'loadbalancer': loadbalancer_dbv2.LoadBalancer, 'listener': loadbalancer_dbv2.Listener, } if obj_type not in model_mapping: raise n_exc.Invalid(_('Unknown object type: %s') % obj_type) try: if obj_type == 'loadbalancer': lb = self.plugin.db.get_loadbalancer(context, obj_id) self.plugin.db.update_status(context, model_mapping[obj_type], obj_id, status) LOG.debug( _('update status: %(obj_type)s %(obj_id)s %(status)s '), { 'obj_type': obj_type, 'obj_id': obj_id, 'status': status }) if (lb and lb.status != status): LOG.info( _('update status: %(obj_type)s %(obj_id)s %(status)s notified' ), { 'obj_type': obj_type, 'obj_id': obj_id, 'status': status }) notifier = n_rpc.get_notifier('loadbalancer') notifier.info(context, 'loadbalancer.update.end', lb.to_dict()) else: LOG.warning( _('Cannot update status: %(obj_type)s %(obj_id)s ' 'the object type not supported'), { 'obj_type': obj_type, 'obj_id': obj_id }) pass except n_exc.NotFound: # update_status may come from agent on an object which was # already deleted from db with other request LOG.warning( _('Cannot update status: %(obj_type)s %(obj_id)s ' 'not found in the DB, it was probably deleted ' 'concurrently'), { 'obj_type': obj_type, 'obj_id': obj_id })
def main(): config.init(sys.argv[1:]) config.setup_logging() cxt = context.get_admin_context() plugin = manager.NeutronManager.get_plugin() notifier = n_rpc.get_notifier('network') for network in plugin.get_networks(cxt): notifier.info(cxt, 'network.exists', {'network': network}) for subnet in plugin.get_subnets(cxt): notifier.info(cxt, 'subnet.exists', {'subnet': subnet}) for port in plugin.get_ports(cxt): notifier.info(cxt, 'port.exists', {'port': port}) for router in plugin.get_routers(cxt): notifier.info(cxt, 'router.exists', {'router': router}) for floatingip in plugin.get_floatingips(cxt): notifier.info(cxt, 'floatingip.exists', {'floatingip': floatingip})
def collect_stats_for_monitor(self): for loadbalancer_id, driver_name in self.instance_mapping.items(): driver = self.device_drivers[driver_name] tenant_id = self.label_tenant_id.get(loadbalancer_id, None) try: stats = driver.get_stats(loadbalancer_id) if stats and self.conf.statistic_notification_enable: stats.pop('members', None) stats['tenant_id'] = tenant_id notifier = n_rpc.get_notifier('loadbalancer') notifier.info(self.context, 'loadbalancer.meter', stats) LOG.info(_('collect_stats_for_monitor' 'loadbalancer %(id)s stats %(stats)s .'), {'id':loadbalancer_id,'stats':stats}) except Exception: LOG.exception(_('Error updating statistics on loadbalancer %s'), loadbalancer_id)
def _metering_notification(self): for label_id, info in self.metering_infos.items(): data = {'label_id': label_id, 'tenant_id': self.label_tenant_id.get(label_id), 'pkts': info['pkts'], 'bytes': info['bytes'], 'time': info['time'], 'first_update': info['first_update'], 'last_update': info['last_update'], 'host': self.host} LOG.debug("Send metering report: %s", data) notifier = n_rpc.get_notifier('metering') notifier.info(self.context, 'l3.meter', data) info['pkts'] = 0 info['bytes'] = 0 info['time'] = 0
def main(): config.init(sys.argv[1:]) config.setup_logging() cxt = context.get_admin_context() plugin = manager.NeutronManager.get_plugin() l3_plugin = manager.NeutronManager.get_service_plugins().get(constants.L3_ROUTER_NAT) notifier = n_rpc.get_notifier("network") for network in plugin.get_networks(cxt): notifier.info(cxt, "network.exists", {"network": network}) for subnet in plugin.get_subnets(cxt): notifier.info(cxt, "subnet.exists", {"subnet": subnet}) for port in plugin.get_ports(cxt): notifier.info(cxt, "port.exists", {"port": port}) for router in l3_plugin.get_routers(cxt): notifier.info(cxt, "router.exists", {"router": router}) for floatingip in l3_plugin.get_floatingips(cxt): notifier.info(cxt, "floatingip.exists", {"floatingip": floatingip})
def _set_member_status(self, context, loadbalancer, members_stats): for member in self._get_members(loadbalancer): if member.id in members_stats: status = members_stats[member.id].get('status') old_status = self.db.get_member_status_info(context, member.id) if status and status == constants.ACTIVE: self.db.update_status( context, models.MemberV2, member.id, constants.ACTIVE) else: self.db.update_status( context, models.MemberV2, member.id, constants.INACTIVE) if old_status != status: LOG.info(_('kiki_set_member_status: %(obj_id)s %(status)s notified'), {'obj_id': member.id, 'status':status }) notifier = n_rpc.get_notifier('loadbalancer') notifier.info(context, 'member.update.end', {'id':member.id})
def update_port_sg(self, request, id, body): # body like this: # {"port": {"security_groups": # ["d85a5172-2929-48eb-84cb-a6f6defaeb2e"]}} context = request.context core_plugin = manager.NeutronManager.get_plugin() _notifier = n_rpc.get_notifier('network') if ("port" not in body or "security_groups" not in body['port']): msg = _("Invalid input %s") % body raise webob.exc.HTTPBadRequest(msg) new_body = {'port': {'security_groups': body['port']['security_groups']}} payload = new_body.copy() payload['id'] = id _notifier.info(context, 'portsg.update.start', payload) result = core_plugin.update_port(context, id, new_body) _notifier.info(context, 'portsg.update.end', {'port': result}) return result
def _metering_notification(self): for label_id, info in self.metering_infos.items(): data = { "label_id": label_id, "tenant_id": self.label_tenant_id.get(label_id), "pkts": info["pkts"], "bytes": info["bytes"], "time": info["time"], "first_update": info["first_update"], "last_update": info["last_update"], "host": self.host, } LOG.debug("Send metering report: %s", data) notifier = n_rpc.get_notifier("metering") notifier.info(self.context, "l3.meter", data) info["pkts"] = 0 info["bytes"] = 0 info["time"] = 0
def notify_router_interface_action( self, context, router_id, tenant_id, port_id, subnet_id, action): self.l3_rpc_notifier.routers_updated( context, [router_id], action, {'subnet_id': subnet_id}) if action == 'add_router_interface': router_event = 'router.interface.create' elif action == 'remove_router_interface': router_event = 'router.interface.delete' info = { 'id': router_id, 'tenant_id': tenant_id, 'port_id': port_id, 'subnet_id': subnet_id } notifier = n_rpc.get_notifier('network') notifier.info(context, router_event, {'router_interface': info}) return info
def collect_stats_for_monitor(self): for loadbalancer_id, driver_name in self.instance_mapping.items(): driver = self.device_drivers[driver_name] tenant_id = self.label_tenant_id.get(loadbalancer_id, None) try: stats = driver.get_stats(loadbalancer_id) if stats and self.conf.statistic_notification_enable: stats.pop('members', None) stats['tenant_id'] = tenant_id notifier = n_rpc.get_notifier('loadbalancer') notifier.info(self.context, 'loadbalancer.meter', stats) LOG.info( _('collect_stats_for_monitor' 'loadbalancer %(id)s stats %(stats)s .'), { 'id': loadbalancer_id, 'stats': stats }) except Exception: LOG.exception( _('Error updating statistics on loadbalancer %s'), loadbalancer_id)
def update_port_sg(self, request, id, body): # body like this: # {"port": {"security_groups": # ["d85a5172-2929-48eb-84cb-a6f6defaeb2e"]}} context = request.context core_plugin = manager.NeutronManager.get_plugin() _notifier = n_rpc.get_notifier('network') if ("port" not in body or "security_groups" not in body['port']): msg = _("Invalid input %s") % body raise webob.exc.HTTPBadRequest(msg) new_body = { 'port': { 'security_groups': body['port']['security_groups'] } } payload = new_body.copy() payload['id'] = id _notifier.info(context, 'portsg.update.start', payload) result = core_plugin.update_port(context, id, new_body) _notifier.info(context, 'portsg.update.end', {'port': result}) return result
def add_router_interface(self, context, router_id, interface_info): add_by_port, add_by_sub = self._validate_interface_info(interface_info) device_owner = self._get_device_owner(context, router_id) if add_by_port: port = self._add_interface_by_port( context, router_id, interface_info['port_id'], device_owner) elif add_by_sub: port = self._add_interface_by_subnet( context, router_id, interface_info['subnet_id'], device_owner) self.l3_rpc_notifier.routers_updated( context, [router_id], 'add_router_interface') info = {'id': router_id, 'tenant_id': port['tenant_id'], 'port_id': port['id'], 'subnet_id': port['fixed_ips'][0]['subnet_id']} notifier = n_rpc.get_notifier('network') notifier.info( context, 'router.interface.create', {'router_interface': info}) return info
def _update_status_by_agent(self, ctx, service_status_info_list): """Updating vpnservice and vpnconnection status. :param context: context variable :param service_status_info_list: list of status The structure is [{id: openvpnconn_id, tenant_id: tenant_id status: ACTIVE|DOWN|ERROR}] The agent will set updated_pending_status as True, when agent update any pending status. """ _resource = 'openvpnconnection' notifier = n_rpc.get_notifier('network') for status in service_status_info_list: _ctx = context.Context('', status['tenant_id']) payload = {'id': status['id']} notifier.info(_ctx, _resource + '.update.start', payload) updated_openvpn_dict = [] with ctx.session.begin(subtransactions=True): for openvpn in service_status_info_list: try: openvpn_db = self._get_openvpnconnection_db( ctx, openvpn['id']) except openvpn.OpenVPNServiceNotFound: LOG.warn(_('vpnservice %s in db is already deleted'), vpnservice['id']) continue openvpn_db.status = openvpn['status'] vpn_dict = self._make_openvpnconnection_dict(openvpn_db) updated_openvpn_dict.append(vpn_dict) notifier_method = _resource + '.update.end' for vpn_dict in updated_openvpn_dict: _ctx = context.Context('', vpn_dict['tenant_id']) result = {_resource: vpn_dict} notifier.info(_ctx, notifier_method, result)
def _update_status_by_agent(self, ctx, service_status_info_list): """Updating vpnservice and vpnconnection status. :param context: context variable :param service_status_info_list: list of status The structure is [{id: ptpconn_id, tenant_id: tenant_id status: ACTIVE|DOWN|ERROR}] The agent will set updated_pending_status as True, when agent update any pending status. """ _resource = 'pptpconnection' notifier = n_rpc.get_notifier('network') for status in service_status_info_list: _ctx = context.Context('', status['tenant_id']) payload = {'id': status['id']} notifier.info(_ctx, _resource + '.update.start', payload) updated_pptp_dict = [] with ctx.session.begin(subtransactions=True): for vpnservice in service_status_info_list: try: vpnservice_db = self._get_pptpconnection_db( ctx, vpnservice['id']) except pptpvpnaas.PPTPVPNServiceNotFound: LOG.warn(_('vpnservice %s in db is already deleted'), vpnservice['id']) continue vpnservice_db.status = vpnservice['status'] vpn_dict = self._make_pptpconnection_dict(vpnservice_db) updated_pptp_dict.append(vpn_dict) notifier_method = _resource + '.update.end' for vpn_dict in updated_pptp_dict: _ctx = context.Context('', vpn_dict['tenant_id']) result = {_resource: vpn_dict} notifier.info(_ctx, notifier_method, result)
def update_status(self, context, obj_type, obj_id, status): model_mapping = {"loadbalancer": loadbalancer_dbv2.LoadBalancer, "listener": loadbalancer_dbv2.Listener} if obj_type not in model_mapping: raise n_exc.Invalid(_("Unknown object type: %s") % obj_type) try: if obj_type == "loadbalancer": lb = self.plugin.db.get_loadbalancer(context, obj_id) self.plugin.db.update_status(context, model_mapping[obj_type], obj_id, status) LOG.debug( _("update status: %(obj_type)s %(obj_id)s %(status)s "), {"obj_type": obj_type, "obj_id": obj_id, "status": status}, ) if lb and lb.status != status: LOG.info( _("update status: %(obj_type)s %(obj_id)s %(status)s notified"), {"obj_type": obj_type, "obj_id": obj_id, "status": status}, ) notifier = n_rpc.get_notifier("loadbalancer") notifier.info(context, "loadbalancer.update.end", lb.to_dict()) else: LOG.warning( _("Cannot update status: %(obj_type)s %(obj_id)s " "the object type not supported"), {"obj_type": obj_type, "obj_id": obj_id}, ) pass except n_exc.NotFound: # update_status may come from agent on an object which was # already deleted from db with other request LOG.warning( _( "Cannot update status: %(obj_type)s %(obj_id)s " "not found in the DB, it was probably deleted " "concurrently" ), {"obj_type": obj_type, "obj_id": obj_id}, )
def _notify_firewall_updates(self, context, resource, update_info): notifier = n_rpc.get_notifier('network') notifier.info(context, resource, update_info)
def _notifier(self): if not hasattr(self, '_notifier_inst'): self._notifier_inst = n_rpc.get_notifier('network') return self._notifier_inst
def notify(context, action, network_id, agent_id): info = {'id': agent_id, 'network_id': network_id} notifier = n_rpc.get_notifier('network') notifier.info(context, action, {'agent': info})
def notify(context, action, router_id, agent_id): info = {'id': agent_id, 'router_id': router_id} notifier = n_rpc.get_notifier('router') notifier.info(context, action, {'agent': info})
def __init__(self, plugin): self.plugin = plugin self.status = constants.ACTIVE self._notifier = n_rpc.get_notifier('network')
def notify(context, action, hosting_device_id, cfg_agent_id): info = {'id': cfg_agent_id, 'hosting_device_id': hosting_device_id} notifier = n_rpc.get_notifier('hosting_device') notifier.info(context, action, {'cfg_agent': info})
def create_subnet(context, subnet): """Create a subnet. Create a subnet which represents a range of IP addresses that can be allocated to devices : param context: neutron api request context : param subnet: dictionary describing the subnet, with keys as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. All keys will be populated. """ LOG.info("create_subnet for tenant %s" % context.tenant_id) net_id = subnet["subnet"]["network_id"] with context.session.begin(): net = db_api.network_find(context, None, None, None, False, id=net_id, scope=db_api.ONE) if not net: raise exceptions.NetworkNotFound(net_id=net_id) sub_attrs = subnet["subnet"] always_pop = ["enable_dhcp", "ip_version", "first_ip", "last_ip", "_cidr"] admin_only = ["segment_id", "do_not_use", "created_at", "next_auto_assign_ip"] utils.filter_body(context, sub_attrs, admin_only, always_pop) _validate_subnet_cidr(context, net_id, sub_attrs["cidr"]) cidr = netaddr.IPNetwork(sub_attrs["cidr"]) err_vals = {'cidr': sub_attrs["cidr"], 'network_id': net_id} err = _("Requested subnet with cidr: %(cidr)s for " "network: %(network_id)s. Prefix is too small, must be a " "larger subnet. A prefix less than /%(prefix)s is required.") if cidr.version == 6 and cidr.prefixlen > 64: err_vals["prefix"] = 65 err_msg = err % err_vals raise exceptions.InvalidInput(error_message=err_msg) elif cidr.version == 4 and cidr.prefixlen > 30: err_vals["prefix"] = 31 err_msg = err % err_vals raise exceptions.InvalidInput(error_message=err_msg) # Enforce subnet quotas net_subnets = get_subnets(context, filters=dict(network_id=net_id)) if not context.is_admin: v4_count, v6_count = 0, 0 for subnet in net_subnets: if netaddr.IPNetwork(subnet['cidr']).version == 6: v6_count += 1 else: v4_count += 1 if cidr.version == 6: tenant_quota_v6 = context.session.query(qdv.Quota).filter_by( tenant_id=context.tenant_id, resource='v6_subnets_per_network').first() if tenant_quota_v6 != -1: quota.QUOTAS.limit_check( context, context.tenant_id, v6_subnets_per_network=v6_count + 1) else: tenant_quota_v4 = context.session.query(qdv.Quota).filter_by( tenant_id=context.tenant_id, resource='v4_subnets_per_network').first() if tenant_quota_v4 != -1: quota.QUOTAS.limit_check( context, context.tenant_id, v4_subnets_per_network=v4_count + 1) # See RM981. The default behavior of setting a gateway unless # explicitly asked to not is no longer desirable. gateway_ip = utils.pop_param(sub_attrs, "gateway_ip") dns_ips = utils.pop_param(sub_attrs, "dns_nameservers", []) host_routes = utils.pop_param(sub_attrs, "host_routes", []) allocation_pools = utils.pop_param(sub_attrs, "allocation_pools", None) sub_attrs["network"] = net new_subnet = db_api.subnet_create(context, **sub_attrs) cidrs = [] alloc_pools = allocation_pool.AllocationPools(sub_attrs["cidr"], allocation_pools) if isinstance(allocation_pools, list): cidrs = alloc_pools.get_policy_cidrs() quota.QUOTAS.limit_check( context, context.tenant_id, alloc_pools_per_subnet=len(alloc_pools)) ip_policies.ensure_default_policy(cidrs, [new_subnet]) new_subnet["ip_policy"] = db_api.ip_policy_create(context, exclude=cidrs) quota.QUOTAS.limit_check(context, context.tenant_id, routes_per_subnet=len(host_routes)) default_route = None for route in host_routes: netaddr_route = netaddr.IPNetwork(route["destination"]) if netaddr_route.value == routes.DEFAULT_ROUTE.value: if default_route: raise q_exc.DuplicateRouteConflict( subnet_id=new_subnet["id"]) default_route = route gateway_ip = default_route["nexthop"] alloc_pools.validate_gateway_excluded(gateway_ip) new_subnet["routes"].append(db_api.route_create( context, cidr=route["destination"], gateway=route["nexthop"])) quota.QUOTAS.limit_check(context, context.tenant_id, dns_nameservers_per_subnet=len(dns_ips)) for dns_ip in dns_ips: new_subnet["dns_nameservers"].append(db_api.dns_create( context, ip=netaddr.IPAddress(dns_ip))) # if the gateway_ip is IN the cidr for the subnet and NOT excluded by # policies, we should raise a 409 conflict if gateway_ip and default_route is None: alloc_pools.validate_gateway_excluded(gateway_ip) new_subnet["routes"].append(db_api.route_create( context, cidr=str(routes.DEFAULT_ROUTE), gateway=gateway_ip)) subnet_dict = v._make_subnet_dict(new_subnet) subnet_dict["gateway_ip"] = gateway_ip n_rpc.get_notifier("network").info( context, "ip_block.create", dict(tenant_id=subnet_dict["tenant_id"], ip_block_id=subnet_dict["id"], created_at=new_subnet["created_at"])) return subnet_dict
def create_subnet(context, subnet): """Create a subnet. Create a subnet which represents a range of IP addresses that can be allocated to devices : param context: neutron api request context : param subnet: dictionary describing the subnet, with keys as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. All keys will be populated. """ LOG.info("create_subnet for tenant %s" % context.tenant_id) net_id = subnet["subnet"]["network_id"] with context.session.begin(): net = db_api.network_find(context, None, None, None, False, id=net_id, scope=db_api.ONE) if not net: raise exceptions.NetworkNotFound(net_id=net_id) sub_attrs = subnet["subnet"] always_pop = [ "enable_dhcp", "ip_version", "first_ip", "last_ip", "_cidr" ] admin_only = [ "segment_id", "do_not_use", "created_at", "next_auto_assign_ip" ] utils.filter_body(context, sub_attrs, admin_only, always_pop) _validate_subnet_cidr(context, net_id, sub_attrs["cidr"]) cidr = netaddr.IPNetwork(sub_attrs["cidr"]) err_vals = {'cidr': sub_attrs["cidr"], 'network_id': net_id} err = _("Requested subnet with cidr: %(cidr)s for " "network: %(network_id)s. Prefix is too small, must be a " "larger subnet. A prefix less than /%(prefix)s is required.") if cidr.version == 6 and cidr.prefixlen > 64: err_vals["prefix"] = 65 err_msg = err % err_vals raise exceptions.InvalidInput(error_message=err_msg) elif cidr.version == 4 and cidr.prefixlen > 30: err_vals["prefix"] = 31 err_msg = err % err_vals raise exceptions.InvalidInput(error_message=err_msg) # Enforce subnet quotas net_subnets = get_subnets(context, filters=dict(network_id=net_id)) if not context.is_admin: v4_count, v6_count = 0, 0 for subnet in net_subnets: if netaddr.IPNetwork(subnet['cidr']).version == 6: v6_count += 1 else: v4_count += 1 if cidr.version == 6: tenant_quota_v6 = context.session.query(qdv.Quota).filter_by( tenant_id=context.tenant_id, resource='v6_subnets_per_network').first() if tenant_quota_v6 != -1: quota.QUOTAS.limit_check(context, context.tenant_id, v6_subnets_per_network=v6_count + 1) else: tenant_quota_v4 = context.session.query(qdv.Quota).filter_by( tenant_id=context.tenant_id, resource='v4_subnets_per_network').first() if tenant_quota_v4 != -1: quota.QUOTAS.limit_check(context, context.tenant_id, v4_subnets_per_network=v4_count + 1) # See RM981. The default behavior of setting a gateway unless # explicitly asked to not is no longer desirable. gateway_ip = utils.pop_param(sub_attrs, "gateway_ip") dns_ips = utils.pop_param(sub_attrs, "dns_nameservers", []) host_routes = utils.pop_param(sub_attrs, "host_routes", []) allocation_pools = utils.pop_param(sub_attrs, "allocation_pools", None) sub_attrs["network"] = net new_subnet = db_api.subnet_create(context, **sub_attrs) cidrs = [] alloc_pools = allocation_pool.AllocationPools(sub_attrs["cidr"], allocation_pools) if isinstance(allocation_pools, list): cidrs = alloc_pools.get_policy_cidrs() quota.QUOTAS.limit_check(context, context.tenant_id, alloc_pools_per_subnet=len(alloc_pools)) ip_policies.ensure_default_policy(cidrs, [new_subnet]) new_subnet["ip_policy"] = db_api.ip_policy_create(context, exclude=cidrs) quota.QUOTAS.limit_check(context, context.tenant_id, routes_per_subnet=len(host_routes)) default_route = None for route in host_routes: netaddr_route = netaddr.IPNetwork(route["destination"]) if netaddr_route.value == routes.DEFAULT_ROUTE.value: if default_route: raise q_exc.DuplicateRouteConflict( subnet_id=new_subnet["id"]) default_route = route gateway_ip = default_route["nexthop"] alloc_pools.validate_gateway_excluded(gateway_ip) new_subnet["routes"].append( db_api.route_create(context, cidr=route["destination"], gateway=route["nexthop"])) quota.QUOTAS.limit_check(context, context.tenant_id, dns_nameservers_per_subnet=len(dns_ips)) for dns_ip in dns_ips: new_subnet["dns_nameservers"].append( db_api.dns_create(context, ip=netaddr.IPAddress(dns_ip))) # if the gateway_ip is IN the cidr for the subnet and NOT excluded by # policies, we should raise a 409 conflict if gateway_ip and default_route is None: alloc_pools.validate_gateway_excluded(gateway_ip) new_subnet["routes"].append( db_api.route_create(context, cidr=str(routes.DEFAULT_ROUTE), gateway=gateway_ip)) subnet_dict = v._make_subnet_dict(new_subnet) subnet_dict["gateway_ip"] = gateway_ip n_rpc.get_notifier("network").info( context, "ip_block.create", dict(tenant_id=subnet_dict["tenant_id"], ip_block_id=subnet_dict["id"], created_at=new_subnet["created_at"])) return subnet_dict
def __init__(self, plugin, collection, resource, attr_info, allow_bulk=False, member_actions=None, parent=None, allow_pagination=False, allow_sorting=False): """ 控制器初始化 add by ;luoyibing :param allowbulk 是否允许批量? :param collection "subnet" :param resource "subnet" :param attr_info :param plugin 插件信息 { agent_notifters:{"DHCP agent":"DHCCPAgent***","L3 agent":"***","Loadbalancer agent":“**”} ,extension_manager: ,mechaism manager 含义指 ? ,network schedule...... :param member_actions resource """ if member_actions is None: member_actions = [] self._plugin = plugin self._collection = collection.replace('-', '_') self._resource = resource.replace('-', '_') self._attr_info = attr_info self._allow_bulk = allow_bulk self._allow_pagination = allow_pagination self._allow_sorting = allow_sorting self._native_bulk = self._is_native_bulk_supported() self._native_pagination = self._is_native_pagination_supported() self._native_sorting = self._is_native_sorting_supported() self._policy_attrs = [ name for (name, info) in self._attr_info.items() if info.get('required_by_policy') ] self._notifier = n_rpc.get_notifier( 'network' ) #建立根据network.controller 名称通知的发布者:具体过程调用notifier.py中prepare方法实现 # use plugin's dhcp notifier, if this is already instantiated agent_notifiers = getattr(plugin, 'agent_notifiers', {}) #从插件中获取agent_notitfiers self._dhcp_agent_notifier = ( #agent_notifiers中获取dhcp agent或者 DhcpAgentNotifyAPI agent_notifiers.get(const.AGENT_TYPE_DHCP) or dhcp_rpc_agent_api.DhcpAgentNotifyAPI()) if cfg.CONF.notify_nova_on_port_data_changes: #neutron.conf配置文件中配置项 from neutron.notifiers import nova self._nova_notifier = nova.Notifier() #引入通知计算节点的Notifier self._member_actions = member_actions self._primary_key = self._get_primary_key() if self._allow_pagination and self._native_pagination: # Native pagination need native sorting support if not self._native_sorting: raise exceptions.Invalid( _("Native pagination depend on native sorting")) if not self._allow_sorting: LOG.info( _("Allow sorting is enabled because native " "pagination requires native sorting")) self._allow_sorting = True if parent: self._parent_id_name = '%s_id' % parent['member_name'] parent_part = '_%s' % parent['member_name'] else: self._parent_id_name = None parent_part = '' self._plugin_handlers = { self.LIST: 'get%s_%s' % (parent_part, self._collection), self.SHOW: 'get%s_%s' % (parent_part, self._resource) } for action in [self.CREATE, self.UPDATE, self.DELETE]: self._plugin_handlers[action] = '%s%s_%s' % ( action, parent_part, self._resource) #??创建对应的资源handers
def notify(context, action, router_id, hosting_device_id): info = {'id': hosting_device_id, 'router_id': router_id} notifier = n_rpc.get_notifier('router') notifier.info(context, action, {'hosting_device': info})