def sync_routers(self, context, **kwargs): """Sync routers according to filters to a specific agent. @param context: contain user information @param kwargs: host, router_ids @return: a list of routers with their interfaces and floating_ips """ router_ids = kwargs.get('router_ids') host = kwargs.get('host') context = neutron_context.get_admin_context() if not self.l3plugin: routers = {} LOG.error(_('No plugin for L3 routing registered! Will reply ' 'to l3 agent with empty router dictionary.')) elif utils.is_extension_supported( self.l3plugin, constants.L3_AGENT_SCHEDULER_EXT_ALIAS): if cfg.CONF.router_auto_schedule: self.l3plugin.auto_schedule_routers(context, host, router_ids) routers = ( self.l3plugin.list_active_sync_routers_on_active_l3_agent( context, host, router_ids)) else: routers = self.l3plugin.get_sync_data(context, router_ids) if utils.is_extension_supported( self.plugin, constants.PORT_BINDING_EXT_ALIAS): self._ensure_host_set_on_ports(context, host, routers) LOG.debug(_("Routers returned to l3 agent:\n %s"), jsonutils.dumps(routers, indent=5)) return routers
def sync_routers(self, context, **kwargs): """Sync routers according to filters to a specific agent. @param context: contain user information @param kwargs: host, router_ids @return: a list of routers with their interfaces and floating_ips """ router_ids = kwargs.get('router_ids') host = kwargs.get('host') context = neutron_context.get_admin_context() if utils.is_extension_supported( self.l3plugin, constants.L3_AGENT_SCHEDULER_EXT_ALIAS): routers = ( self.l3plugin.list_active_sync_routers_on_active_l3_agent( context, host, router_ids)) else: routers = self.l3plugin.get_sync_data(context, router_ids) if utils.is_extension_supported( self.plugin, constants.PORT_BINDING_EXT_ALIAS): self._ensure_host_set_on_ports(context, host, routers) LOG.debug("Routers returned to l3 agent:\n %s", utils.DelayedStringRenderer(jsonutils.dumps, routers, indent=5)) return routers
def _add_az_to_response(router_res, router_db): l3_plugin = directory.get_plugin(constants.L3) if not utils.is_extension_supported(l3_plugin, 'router_availability_zone'): return router_res['availability_zones'] = ( l3_plugin.get_router_availability_zones(router_db))
def update_host(self, context, id, host): LOG.warning(("update_host id={}, host={}").format(id, host)) data = super(HostSchedulerDbMixin, self).update_host( context, id, host) agents = self.get_agents_by_hosts(context, [data['name']]) # Check if the l3 agent needs to be deleted sdn_enabled = False if tsconfig.sdn_enabled.lower() == 'yes': sdn_enabled = True l3plugin = directory.get_plugin(plugin_constants.L3) if (l3plugin and sdn_enabled and not utils.is_extension_supported( l3plugin, lib_constants.L3_AGENT_SCHEDULER_EXT_ALIAS)): for agent in agents[:]: if agent['agent_type'] == lib_constants.AGENT_TYPE_L3: # agent should not be configured since l3plugin router is # not enabled LOG.info("Deleting agent {}".format(agent['id'])) self.delete_agent(context, agent['id']) agents.remove(agent) if data.get('availability', constants.HOST_DOWN) == constants.HOST_UP: LOG.debug("enabling {} agents on {}".format(len(agents), id)) self._auto_schedule_host(context, data) for agent in agents: self._update_agent(context, agent['agent_type'], data['name'], agent['admin_state_up']) else: LOG.debug("disabling {} agents on {}".format(len(agents), id)) for agent in agents: self._relocate_agent(context, agent) return self.get_host(context, id)
def _notify_agents(self, context, method, payload, network_id): """Notify all the agents that are hosting the network.""" # fanout is required as we do not know who is "listening" no_agents = not utils.is_extension_supported( self.plugin, constants.DHCP_AGENT_SCHEDULER_EXT_ALIAS) fanout_required = method == 'network_delete_end' or no_agents # we do nothing on network creation because we want to give the # admin the chance to associate an agent to the network manually cast_required = method != 'network_create_end' if fanout_required: self._fanout_message(context, method, payload) elif cast_required: admin_ctx = (context if context.is_admin else context.elevated()) network = self.plugin.get_network(admin_ctx, network_id) agents = self.plugin.get_dhcp_agents_hosting_networks( context, [network_id]) # schedule the network first, if needed schedule_required = ( method == 'port_create_end' and not self._is_reserved_dhcp_port(payload['port'])) if schedule_required: agents = self._schedule_network(admin_ctx, network, agents) enabled_agents = self._get_enabled_agents( context, network, agents, method, payload) for agent in enabled_agents: self._cast_message( context, method, payload, agent.host, agent.topic)
def add_dvr_arp_rule(self, context, port_id, action): l3plugin = manager.NeutronManager.get_service_plugins().get(service_constants.L3_ROUTER_NAT) if l3plugin and utils.is_extension_supported(l3plugin, q_const.L3_DISTRIBUTED_EXT_ALIAS): try: l3plugin.dvr_remote_port_vmarp_table_update(context, port_id, action) except exceptions.PortNotFound: LOG.debug("Port %s not found during ARP update", port_id)
def _notify_agents(self, context, method, payload, network_id): """Notify all the agents that are hosting the network.""" # fanout is required as we do not know who is "listening" no_agents = not utils.is_extension_supported( self.plugin, constants.DHCP_AGENT_SCHEDULER_EXT_ALIAS) fanout_required = method == 'network_delete_end' or no_agents # we do nothing on network creation because we want to give the # admin the chance to associate an agent to the network manually cast_required = method != 'network_create_end' if fanout_required: self._fanout_message(context, method, payload) elif cast_required: admin_ctx = (context if context.is_admin else context.elevated()) network = self.plugin.get_network(admin_ctx, network_id) agents = self.plugin.get_dhcp_agents_hosting_networks( context, [network_id]) # schedule the network first, if needed schedule_required = ( method == 'port_create_end' and not self._is_reserved_dhcp_port(payload['port'])) if schedule_required: agents = self._schedule_network(admin_ctx, network, agents) enabled_agents = self._get_enabled_agents( context, network, agents, method, payload) for agent in enabled_agents: self._cast_message( context, method, payload, agent.host, agent.topic)
def auto_schedule_routers(self, plugin, context, host, router_ids): """Schedule non-hosted routers to L3 Agent running on host. If router_ids is given, each router in router_ids is scheduled if it is not scheduled yet. Otherwise all unscheduled routers are scheduled. Do not schedule the routers which are hosted already by active l3 agents. :returns: True if routers have been successfully assigned to host """ l3_agent = plugin.get_enabled_agent_on_host( context, lib_const.AGENT_TYPE_L3, host) if not l3_agent: return False unscheduled_routers = self._get_routers_to_schedule( context, plugin, router_ids) if not unscheduled_routers: if utils.is_extension_supported( plugin, lib_const.L3_HA_MODE_EXT_ALIAS): return self._schedule_ha_routers_to_additional_agent( plugin, context, l3_agent) target_routers = self._get_routers_can_schedule( context, plugin, unscheduled_routers, l3_agent) if not target_routers: LOG.warning(_LW('No routers compatible with L3 agent ' 'configuration on host %s'), host) return False self._bind_routers(context, plugin, target_routers, l3_agent) return True
def list_active_sync_routers_on_active_l3_agent( self, context, host, router_ids): agent = self._get_agent_by_type_and_host( context, constants.AGENT_TYPE_L3, host) if not agent.admin_state_up: return [] query = context.session.query(RouterL3AgentBinding.router_id) query = query.filter( RouterL3AgentBinding.l3_agent_id == agent.id) if router_ids: query = query.filter( RouterL3AgentBinding.router_id.in_(router_ids)) router_ids = [item[0] for item in query] if router_ids: if n_utils.is_extension_supported(self, constants.L3_HA_MODE_EXT_ALIAS): return self.get_ha_sync_data_for_host(context, host, router_ids=router_ids, active=True) else: return self.get_sync_data(context, router_ids=router_ids, active=True) else: return []
def mgmt_sec_grp_id(cls): """Returns id of security group used by the management network.""" if not utils.is_extension_supported( manager.NeutronManager.get_plugin(), "security-group"): return if cls._mgmt_sec_grp_id is None: # Get the id for the _mgmt_security_group_id tenant_id = cls.l3_tenant_id() res = manager.NeutronManager.get_plugin().get_security_groups( neutron_context.get_admin_context(), {'tenant_id': [tenant_id], 'name': [cfg.CONF.general.default_security_group]}, ['id']) if len(res) == 1: cls._mgmt_sec_grp_id = res[0].get('id') elif len(res) > 1: # the mgmt sec group must be unique. LOG.error(_LE('The security group for the virtual management ' 'network does not have unique name. Please ensure ' 'that it is.')) else: # CSR Mgmt security group is not present. LOG.error(_LE('There is no security group for the virtual ' 'management network. Please create one.')) return cls._mgmt_sec_grp_id
def _agent_notification_bulk(self, context, method, routers, hosting_device, operation): """Notify the Cisco cfg agent handling a particular hosting_device. A single notification can contain multiple routers. """ admin_context = context.is_admin and context or context.elevated() dmplugin = manager.NeutronManager.get_service_plugins().get( cisco_constants.DEVICE_MANAGER) if (hosting_device is not None and utils.is_extension_supported( dmplugin, CFGAGENT_SCHED)): agents = dmplugin.get_cfg_agents_for_hosting_devices( admin_context, [hosting_device['id']], admin_state_up=True, schedule=True) if agents: agent = agents[0] LOG.debug('Notify %(agent_type)s at %(topic)s.%(host)s the ' 'message %(method)s [BULK]', {'agent_type': agent.agent_type, 'topic': CFG_AGENT_L3_ROUTING, 'host': agent.host, 'method': method}) cctxt = self.client.prepare(server=agent.host, version='1.1') cctxt.cast(context, method, routers=routers)
def _extend_router_dict_ha(self, router_res, router_db): if utils.is_extension_supported(self, ha.HA_ALIAS): ha_s = router_db.ha_settings rr_b = router_db.redundancy_binding if rr_b and rr_b.user_router: # include static routes from user visible router temp = {} self._extend_router_dict_extraroute(temp, rr_b.user_router) if temp['routes']: router_res['routes'].extend(temp['routes']) router_res[ha.ENABLED] = False if ha_s is None else True if router_res[ha.ENABLED]: ha_details = {ha.TYPE: ha_s.ha_type, ha.PRIORITY: ha_s.priority, ha.STATE: ha_s.state, ha.REDUNDANCY_LEVEL: ha_s.redundancy_level, ha.PROBE_CONNECTIVITY: ha_s.probe_connectivity} if ha_details[ha.PROBE_CONNECTIVITY]: ha_details.update({ha.PROBE_TARGET: ha_s.probe_target, ha.PROBE_INTERVAL: ha_s.probe_interval}) ha_details[ha.REDUNDANCY_ROUTERS] = ( [{'id': b.redundancy_router_id, ha.PRIORITY: b.priority, ha.STATE: b.state} for b in router_db.redundancy_bindings]) router_res[ha.DETAILS] = ha_details else: # ensure any router details are removed router_res.pop(ha.DETAILS, None)
def update_all_ha_network_port_statuses(self, context, host): """Set HA network port to DOWN for HA routers hosted on <host> This will update HA network port status to down for all HA routers hosted on <host>. This is needed to avoid l3 agent spawning keepalived when l2 agent not yet wired the port. This can happen after a system reboot that has wiped out flows, etc and the L2 agent hasn't started up yet. The port will still be ACTIVE in the data model and the L3 agent will use that info to mistakenly think that L2 network is ready. By forcing into DOWN, we will require the L2 agent to essentially ack that the port is indeed ACTIVE by reacting to the port update and calling update_device_up. """ if not utils.is_extension_supported( self.plugin, constants.PORT_BINDING_EXT_ALIAS): return device_filter = { 'device_owner': [constants.DEVICE_OWNER_ROUTER_HA_INTF], 'status': [constants.PORT_STATUS_ACTIVE]} ports = self.plugin.get_ports(context, filters=device_filter) ha_ports = [p['id'] for p in ports if p.get(portbindings.HOST_ID) == host] if not ha_ports: return LOG.debug("L3 agent on host %(host)s requested for fullsync, so " "setting HA network ports %(ha_ports)s status to DOWN.", {"host": host, "ha_ports": ha_ports}) for p in ha_ports: self.plugin.update_port( context, p, {'port': {'status': constants.PORT_STATUS_DOWN}})
def get_sync_data_metering(self, context, **kwargs): l3_plugin = directory.get_plugin(consts.L3) if not l3_plugin: return metering_data = self.meter_plugin.get_sync_data_metering(context) host = kwargs.get('host') if not utils.is_extension_supported( l3_plugin, consts.L3_AGENT_SCHEDULER_EXT_ALIAS) or not host: return metering_data else: agents = l3_plugin.get_l3_agents(context, filters={'host': [host]}) if not agents: LOG.error(_LE('Unable to find agent %s.'), host) return routers = l3_plugin.list_routers_on_l3_agent(context, agents[0].id) router_ids = [router['id'] for router in routers['routers']] if not router_ids: return else: return [ router for router in metering_data if router['id'] in router_ids ]
def cfg_sync_routers(self, context, host, router_ids=None, hosting_device_ids=None): """Sync routers according to filters to a specific Cisco cfg agent. @param context: contains user information @param host - originator of callback @param router_ids - list of router ids to return information about @param hosting_device_ids - list of hosting device ids to get routers for. @return: a list of routers with their hosting devices, interfaces and floating_ips """ context = neutron_context.get_admin_context() try: routers = ( self._l3plugin.list_active_sync_routers_on_hosting_devices( context, host, router_ids, hosting_device_ids)) except AttributeError: routers = [] if routers and utils.is_extension_supported( self._core_plugin, constants.PORT_BINDING_EXT_ALIAS): self._ensure_host_set_on_ports(context, host, routers) LOG.debug('Routers returned to Cisco cfg agent@%(agt)s:\n %(routers)s', {'agt': host, 'routers': jsonutils.dumps(routers, indent=5)}) return routers
def update_all_ha_network_port_statuses(self, context, host): """Set HA network port to DOWN for HA routers hosted on <host> This will update HA network port status to down for all HA routers hosted on <host>. This is needed to avoid l3 agent spawning keepalived when l2 agent not yet wired the port. This can happen after a system reboot that has wiped out flows, etc and the L2 agent hasn't started up yet. The port will still be ACTIVE in the data model and the L3 agent will use that info to mistakenly think that L2 network is ready. By forcing into DOWN, we will require the L2 agent to essentially ack that the port is indeed ACTIVE by reacting to the port update and calling update_device_up. """ if not utils.is_extension_supported( self.plugin, constants.PORT_BINDING_EXT_ALIAS): return device_filter = { 'device_owner': [constants.DEVICE_OWNER_ROUTER_HA_INTF], 'status': [constants.PORT_STATUS_ACTIVE]} ports = self.plugin.get_ports(context, filters=device_filter) ha_ports = [p['id'] for p in ports if p.get(portbindings.HOST_ID) == host] if not ha_ports: return LOG.debug("L3 agent on host %(host)s requested for fullsync, so " "setting HA network ports %(ha_ports)s status to DOWN.", {"host": host, "ha_ports": ha_ports}) for p in ha_ports: self.plugin.update_port( context, p, {'port': {'status': constants.PORT_STATUS_DOWN}})
def _notification(self, context, method, payload, network_id): """Notify all the agents that are hosting the network.""" plugin = manager.NeutronManager.get_plugin() if (method != 'network_delete_end' and utils.is_extension_supported( plugin, constants.DHCP_AGENT_SCHEDULER_EXT_ALIAS)): if method == 'port_create_end': # we don't schedule when we create network # because we want to give admin a chance to # schedule network manually by API adminContext = (context if context.is_admin else context.elevated()) network = plugin.get_network(adminContext, network_id) chosen_agents = plugin.schedule_network(adminContext, network) if chosen_agents: for agent in chosen_agents: self._notification_host( context, 'network_create_end', {'network': {'id': network_id}}, agent['host']) for (host, topic) in self._get_dhcp_agents(context, network_id): self.cast( context, self.make_msg(method, payload=payload), topic='%s.%s' % (topic, host)) else: # besides the non-agentscheduler plugin, # There is no way to query who is hosting the network # when the network is deleted, so we need to fanout self._notification_fanout(context, method, payload)
def _prepare_subports(self, context): """Update subports segmentation details if INHERIT is requested.""" port_ids = { s['port_id']: i for i, s in enumerate(self.subports) if s.get('segmentation_type') == constants.INHERIT } core_plugin = directory.get_plugin() if not port_ids: return elif not n_utils.is_extension_supported(core_plugin, provider.ALIAS): msg = _("Cannot accept segmentation type %s") % constants.INHERIT raise n_exc.InvalidInput(error_message=msg) ports = core_plugin.get_ports(context, filters={'id': port_ids}) # this assumes a user does not try to trunk the same network # more than once. network_port_map = { x['network_id']: {'port_id': x['id']} for x in ports } networks = core_plugin.get_networks( context.elevated(), filters={'id': network_port_map}) for net in networks: port = network_port_map[net['id']] port.update({'segmentation_id': net[provider.SEGMENTATION_ID], 'segmentation_type': net[provider.NETWORK_TYPE]}) self.subports[port_ids[port['port_id']]] = port
def auto_schedule_routers(self, plugin, context, host, router_ids): """Schedule non-hosted routers to L3 Agent running on host. If router_ids is given, each router in router_ids is scheduled if it is not scheduled yet. Otherwise all unscheduled routers are scheduled. Do not schedule the routers which are hosted already by active l3 agents. :returns: True if routers have been successfully assigned to host """ l3_agent = plugin.get_enabled_agent_on_host(context, constants.AGENT_TYPE_L3, host) if not l3_agent: return False # NOTE(armando-migliaccio): DVR routers should not be auto # scheduled because auto-scheduling may interfere with the # placement rules for IR and SNAT namespaces. unscheduled_routers = self._get_routers_to_schedule(context, plugin, router_ids, exclude_distributed=True) if not unscheduled_routers: if utils.is_extension_supported(plugin, constants.L3_HA_MODE_EXT_ALIAS): return self._schedule_ha_routers_to_additional_agent(plugin, context, l3_agent) target_routers = self._get_routers_can_schedule(context, plugin, unscheduled_routers, l3_agent) if not target_routers: LOG.warn(_LW("No routers compatible with L3 agent configuration " "on host %s"), host) return False self._bind_routers(context, plugin, target_routers, l3_agent) return True
def update_device_up(self, rpc_context, **kwargs): """Device is up on agent.""" agent_id = kwargs.get('agent_id') device = kwargs.get('device') host = kwargs.get('host') LOG.debug( _("Device %(device)s up at agent %(agent_id)s"), { 'device': device, 'agent_id': agent_id }) plugin = manager.NeutronManager.get_plugin() port_id = plugin._device_to_port_id(device) if (host and not plugin.port_bound_to_host(rpc_context, port_id, host)): LOG.debug( _("Device %(device)s not bound to the" " agent host %(host)s"), { 'device': device, 'host': host }) return port_id = plugin.update_port_status(rpc_context, port_id, q_const.PORT_STATUS_ACTIVE, host) l3plugin = manager.NeutronManager.get_service_plugins().get( service_constants.L3_ROUTER_NAT) if (l3plugin and utils.is_extension_supported( l3plugin, q_const.L3_DISTRIBUTED_EXT_ALIAS)): try: port = plugin._get_port(rpc_context, port_id) l3plugin.dvr_vmarp_table_update(rpc_context, port, "add") except exceptions.PortNotFound: LOG.debug('Port %s not found during ARP update', port_id)
def _add_az_to_response(router_res, router_db): l3_plugin = directory.get_plugin(constants.L3) if not utils.is_extension_supported(l3_plugin, 'router_availability_zone'): return router_res['availability_zones'] = ( l3_plugin.get_router_availability_zones(router_db))
def sync_routers(self, context, **kwargs): """Sync routers according to filters to a specific agent. @param context: contain user information @param kwargs: host, router_ids @return: a list of routers with their interfaces and floating_ips """ begin_time = datetime.datetime.now() router_ids = kwargs.get('router_ids') #LOG.debug(_("111111 l3_rpc.py lin65 %s"), router_ids) host = kwargs.get('host') LOG.debug("router_ids: %r,host %s" % (router_ids, host)) context = neutron_context.get_admin_context() if not self.l3plugin: routers = {} LOG.error( _('No plugin for L3 routing registered! Will reply ' 'to l3 agent with empty router dictionary.')) elif utils.is_extension_supported( self.l3plugin, constants.L3_AGENT_SCHEDULER_EXT_ALIAS): #LOG.debug(_("111111 line 75")) if cfg.CONF.router_auto_schedule: #LOG.debug(_("111111 line 77")) self.l3plugin.auto_schedule_routers(context, host, router_ids) #LOG.debug(_("111111 line 79")) routers = ( self.l3plugin.list_active_sync_routers_on_active_l3_agent( context, host, router_ids)) else: #LOG.debug(_("111111 line 81")) routers = self.l3plugin.get_sync_data(context, router_ids) if self.gcloud_router_qos_plugin: #get router qos routers = self.gcloud_router_qos_plugin.get_sync_router_all_qos( context, routers) if utils.is_extension_supported(self.plugin, constants.PORT_BINDING_EXT_ALIAS): self._ensure_host_set_on_ports(context, host, routers) #LOG.debug(_("Routers returned to l3 agent:\n %s"), # jsonutils.dumps(routers, indent=5)) end_time = datetime.datetime.now() time_sec = (end_time - begin_time).seconds LOG.debug( "Routers returned to l3 agent,host: %s, router number: %d,str len:%d ,time seconds: %s" % (host, len(routers), len(str(routers)), time_sec)) return routers
def _get_active_l3_agent_routers_sync_data(self, context, host, agent, router_ids): if n_utils.is_extension_supported(self, n_const.L3_HA_MODE_EXT_ALIAS): return self.get_ha_sync_data_for_host(context, host, router_ids=router_ids, active=True) return self._get_dvr_sync_data(context, host, agent, router_ids=router_ids, active=True)
def _auto_schedule_host(self, context, host): plugin = directory.get_plugin() plugin.auto_schedule_networks(context, host['name']) plugin = directory.get_plugin(plugin_constants.L3) if utils.is_extension_supported( plugin, lib_constants.L3_AGENT_SCHEDULER_EXT_ALIAS): plugin.auto_schedule_routers(context, host['name'], None)
def get_ha_sync_data_for_host(self, context, host=None, router_ids=None, active=None): if n_utils.is_extension_supported(self, constants.L3_DISTRIBUTED_EXT_ALIAS): # DVR has to be handled differently agent = self._get_agent_by_type_and_host(context, constants.AGENT_TYPE_L3, host) sync_data = self._get_dvr_sync_data(context, host, agent, router_ids, active) else: sync_data = super(L3_HA_NAT_db_mixin, self).get_sync_data(context, router_ids, active) return self._process_sync_ha_data(context, sync_data, host)
def are_agent_types_available_on_host(context, agent_types, host): """Return true if agent types are present on the host.""" core_plugin = manager.NeutronManager.get_plugin() if utils.is_extension_supported(core_plugin, 'agent'): return bool(core_plugin.get_agents( context.elevated(), filters={'host': [host], 'agent_type': agent_types})) return False
def _get_active_l3_agent_routers_sync_data(self, context, host, agent, router_ids): if n_utils.is_extension_supported(self, n_const.L3_HA_MODE_EXT_ALIAS): return self.get_ha_sync_data_for_host(context, host, agent, router_ids=router_ids, active=True) return self._get_dvr_sync_data(context, host, agent, router_ids=router_ids, active=True)
def _routers_to_sync(self, context, router_ids, host=None): if utils.is_extension_supported( self.l3plugin, constants.L3_AGENT_SCHEDULER_EXT_ALIAS): routers = ( self.l3plugin.list_active_sync_routers_on_active_l3_agent( context, host, router_ids)) else: routers = self.l3plugin.get_sync_data(context, router_ids) return routers
def auto_schedule_networks(self, context, **kwargs): host = kwargs.get('host') LOG.debug(_('auto_schedule_networks from %s'), host) plugin = manager.NeutronManager.get_plugin() if utils.is_extension_supported( plugin, constants.DHCP_AGENT_SCHEDULER_EXT_ALIAS): if cfg.CONF.network_auto_schedule: plugin.auto_schedule_networks(context, host) return True
def _relocate_agent(self, context, agent): if agent['topic'] == topics.DHCP_AGENT: plugin = directory.get_plugin() plugin.relocate_networks(context, agent) elif agent['topic'] == topics.L3_AGENT: plugin = directory.get_plugin(plugin_constants.L3) if utils.is_extension_supported( plugin, lib_constants.L3_AGENT_SCHEDULER_EXT_ALIAS): plugin.relocate_routers(context, agent['id'])
def _notification(self, context, method, routers): """Notify all the agents that are hosting the routers.""" plugin = directory.get_plugin(plugin_constants.L3) if utils.is_extension_supported( plugin, constants.L3_AGENT_SCHEDULER_EXT_ALIAS): self._agent_notification(context, method, routers) else: cctxt = self.client.prepare(fanout=True) cctxt.cast(context, method, routers=routers)
def get_agent_types_by_host(context, host): """Return the agent types registered on the host.""" agent_types = [] core_plugin = directory.get_plugin() if utils.is_extension_supported(core_plugin, 'agent'): agents = core_plugin.get_agents( context.elevated(), filters={'host': [host]}) agent_types = [a['agent_type'] for a in agents] return agent_types
def get_agent_types_by_host(context, host): """Return the agent types registered on the host.""" agent_types = [] core_plugin = manager.NeutronManager.get_plugin() if utils.is_extension_supported(core_plugin, 'agent'): agents = core_plugin.get_agents( context.elevated(), filters={'host': [host]}) agent_types = [a['agent_type'] for a in agents] return agent_types
def _notification(self, context, method, routers): """Notify all the agents that are hosting the routers.""" plugin = manager.NeutronManager.get_service_plugins().get( service_constants.L3_ROUTER_NAT) if utils.is_extension_supported( plugin, constants.L3_AGENT_SCHEDULER_EXT_ALIAS): self._agent_notification(context, method, routers) else: self.fanout_cast(context, self.make_msg(method, routers=routers))
def get_ha_sync_data_for_host(self, context, host, agent, router_ids=None, active=None): agent_mode = self._get_agent_mode(agent) dvr_agent_mode = agent_mode in [constants.L3_AGENT_MODE_DVR_SNAT, constants.L3_AGENT_MODE_DVR] if dvr_agent_mode and n_utils.is_extension_supported(self, constants.L3_DISTRIBUTED_EXT_ALIAS): # DVR has to be handled differently sync_data = self._get_dvr_sync_data(context, host, agent, router_ids, active) else: sync_data = super(L3_HA_NAT_db_mixin, self).get_sync_data(context, router_ids, active) return self._process_sync_ha_data(context, sync_data, host)
def _notification(self, context, method, routers): """Notify all the agents that are hosting the routers.""" plugin = manager.NeutronManager.get_service_plugins().get( service_constants.L3_ROUTER_NAT) if utils.is_extension_supported( plugin, constants.L3_AGENT_SCHEDULER_EXT_ALIAS): self._agent_notification(context, method, routers) else: self.fanout_cast(context, self.make_msg(method, routers=routers))
def _extend_extra_router_dict(self, router_res, router_db): super(RouterAvailabilityZoneMixin, self)._extend_extra_router_dict(router_res, router_db) if not utils.is_extension_supported(self, 'router_availability_zone'): return router_res[az_ext.AZ_HINTS] = az_ext.convert_az_string_to_list( router_res[az_ext.AZ_HINTS]) router_res['availability_zones'] = ( self.get_router_availability_zones(router_db))
def _extend_extra_router_dict(self, router_res, router_db): super(RouterAvailabilityZoneMixin, self)._extend_extra_router_dict( router_res, router_db) if not utils.is_extension_supported(self, 'router_availability_zone'): return router_res[az_ext.AZ_HINTS] = az_ext.convert_az_string_to_list( router_res[az_ext.AZ_HINTS]) router_res['availability_zones'] = ( self.get_router_availability_zones(router_db))
def sync_routers(self, context, **kwargs): """Sync routers according to filters to a specific agent. @param context: contain user information @param kwargs: host, router_ids @return: a list of routers with their interfaces and floating_ips """ begin_time=datetime.datetime.now() router_ids = kwargs.get('router_ids') #LOG.debug(_("111111 l3_rpc.py lin65 %s"), router_ids) host = kwargs.get('host') LOG.debug("router_ids: %r,host %s"%(router_ids,host)) context = neutron_context.get_admin_context() if not self.l3plugin: routers = {} LOG.error(_('No plugin for L3 routing registered! Will reply ' 'to l3 agent with empty router dictionary.')) elif utils.is_extension_supported( self.l3plugin, constants.L3_AGENT_SCHEDULER_EXT_ALIAS): #LOG.debug(_("111111 line 75")) if cfg.CONF.router_auto_schedule: #LOG.debug(_("111111 line 77")) self.l3plugin.auto_schedule_routers(context, host, router_ids) #LOG.debug(_("111111 line 79")) routers = ( self.l3plugin.list_active_sync_routers_on_active_l3_agent( context, host, router_ids)) else: #LOG.debug(_("111111 line 81")) routers = self.l3plugin.get_sync_data(context, router_ids) if self.gcloud_router_qos_plugin: #get router qos routers = self.gcloud_router_qos_plugin.get_sync_router_all_qos(context, routers) if utils.is_extension_supported( self.plugin, constants.PORT_BINDING_EXT_ALIAS): self._ensure_host_set_on_ports(context, host, routers) #LOG.debug(_("Routers returned to l3 agent:\n %s"), # jsonutils.dumps(routers, indent=5)) end_time=datetime.datetime.now() time_sec=(end_time-begin_time).seconds LOG.debug("Routers returned to l3 agent,host: %s, router number: %d,str len:%d ,time seconds: %s"%(host,len(routers),len(str(routers)),time_sec)) return routers
def add_dvr_arp_rule(self, context, port_id, action): l3plugin = manager.NeutronManager.get_service_plugins().get( service_constants.L3_ROUTER_NAT) if (l3plugin and utils.is_extension_supported( l3plugin, q_const.L3_DISTRIBUTED_EXT_ALIAS)): try: l3plugin.dvr_remote_port_vmarp_table_update( context, port_id, action) except exceptions.PortNotFound: LOG.debug('Port %s not found during ARP update', port_id)
def _get_active_networks(self, context, **kwargs): """Retrieve and return a list of the active networks.""" host = kwargs.get('host') plugin = directory.get_plugin() if utils.is_extension_supported(plugin, n_const.HOST_EXT_ALIAS): if not plugin.is_host_available(context, host): LOG.debug("host {} disabled; not returning any " "networks to agent".format(host)) return [] if utils.is_extension_supported( plugin, constants.DHCP_AGENT_SCHEDULER_EXT_ALIAS): if cfg.CONF.network_auto_schedule: plugin.auto_schedule_networks(context, host) nets = plugin.list_active_networks_on_active_dhcp_agent( context, host) else: filters = dict(admin_state_up=[True]) nets = plugin.get_networks(context, filters=filters) return nets
def _get_active_l3_agent_routers_sync_data(self, context, host, agent, router_ids): if n_utils.is_extension_supported(self, constants.L3_HA_MODE_EXT_ALIAS): routers = self.get_ha_sync_data_for_host(context, host, agent, router_ids=router_ids, active=True) else: routers = self.get_sync_data(context, router_ids=router_ids, active=True) return self.filter_allocating_and_missing_routers(context, routers)
def get_router_ids(self, context, host): """Returns IDs of routers scheduled to l3 agent on <host> This will autoschedule unhosted routers to l3 agent on <host> and then return all ids of routers scheduled to it. """ if utils.is_extension_supported( self.l3plugin, constants.L3_AGENT_SCHEDULER_EXT_ALIAS): if cfg.CONF.router_auto_schedule: self.l3plugin.auto_schedule_routers(context, host) return self.l3plugin.list_router_ids_on_host(context, host)
def _fill_agent_info_in_profile(self, context, port_id, host, profile_dict): """Fill agent information in the binding profile Called inside self.update_port function. When local plugin handles port update request, it checks if host is in the body, if so, local plugin will send a port update request to central Neutron to tell central plugin that the port has been bound to a host. The information of the agent in the host is inserted in the update body by calling this function. So after central Neutron receives the request, it can save the agent information in the Tricircle shadow agent table. :param context: neutron object :param port_id: port uuid :param host: host the port is bound to :param profile_dict: binding profile dict in the port update body :return: None """ if not utils.is_extension_supported(self.core_plugin, 'agent'): return if cfg.CONF.client.cross_pod_vxlan_mode == t_constants.NM_NOOP: return port = self.core_plugin.get_port(context, port_id) net = self.core_plugin.get_network(context, port['network_id']) if net[provider_net.NETWORK_TYPE] != t_constants.NT_VxLAN: return vif_type = port[portbindings.VIF_TYPE] agent_type = helper.NetworkHelper.get_agent_type_by_vif(vif_type) if not agent_type: return agents = self.core_plugin.get_agents(context, filters={ 'agent_type': [agent_type], 'host': [host] }) if not agents: return if cfg.CONF.client.cross_pod_vxlan_mode == t_constants.NM_P2P: helper.NetworkHelper.fill_agent_data(agent_type, host, agents[0], profile_dict) elif cfg.CONF.client.cross_pod_vxlan_mode == t_constants.NM_L2GW: if not cfg.CONF.tricircle.l2gw_tunnel_ip: LOG.error('Cross-pod VxLAN networking mode is set to l2gw ' 'but L2 gateway tunnel ip is not configured') return l2gw_tunnel_ip = cfg.CONF.tricircle.l2gw_tunnel_ip helper.NetworkHelper.fill_agent_data(agent_type, host, agents[0], profile_dict, tunnel_ip=l2gw_tunnel_ip)
def _process_dns_floatingip_delete(self, context, floatingip_data): if not utils.is_extension_supported(self._core_plugin, dns_apidef.ALIAS): return dns_data_db = fip_obj.FloatingIPDNS.get_object(context, floatingip_id=floatingip_data['id']) if dns_data_db: self._delete_floatingip_from_external_dns_service( context, dns_data_db['published_dns_domain'], dns_data_db['published_dns_name'], [floatingip_data['floating_ip_address']])
def _get_active_l3_agent_routers_sync_data(self, context, host, agent, router_ids): if n_utils.is_extension_supported(self, constants.L3_HA_MODE_EXT_ALIAS): routers = self.get_ha_sync_data_for_host(context, host, agent, router_ids=router_ids, active=True) else: routers = self.get_sync_data(context, router_ids=router_ids, active=True) return self.filter_allocating_and_missing_routers(context, routers)
def _process_dns_floatingip_delete(self, context, floatingip_data): if not utils.is_extension_supported(self._core_plugin, dns.Dns.get_alias()): return dns_data_db = context.session.query(FloatingIPDNS).filter_by( floatingip_id=floatingip_data['id']).one_or_none() if dns_data_db: self._delete_floatingip_from_external_dns_service( context, dns_data_db['published_dns_domain'], dns_data_db['published_dns_name'], [floatingip_data['floating_ip_address']])
def _process_dns_floatingip_delete(self, context, floatingip_data): if not utils.is_extension_supported(self._core_plugin, dns.Dns.get_alias()): return dns_data_db = fip_obj.FloatingIPDNS.get_object( context, floatingip_id=floatingip_data['id']) if dns_data_db: self._delete_floatingip_from_external_dns_service( context, dns_data_db['published_dns_domain'], dns_data_db['published_dns_name'], [floatingip_data['floating_ip_address']])
def _process_dns_floatingip_delete(self, context, floatingip_data): if not utils.is_extension_supported(self._core_plugin, dns.Dns.get_alias()): return dns_data_db = context.session.query(FloatingIPDNS).filter_by( floatingip_id=floatingip_data['id']).one_or_none() if dns_data_db: self._delete_floatingip_from_external_dns_service( context, dns_data_db['published_dns_domain'], dns_data_db['published_dns_name'], [floatingip_data['floating_ip_address']])
def _notification(self, context, method, routers, operation, shuffle_agents): """Notify all or individual Cisco cfg agents.""" if utils.is_extension_supported(self._l3plugin, L3AGENT_SCHED): adm_context = (context.is_admin and context or context.elevated()) # This is where hosting device gets scheduled to Cisco cfg agent self._l3plugin.schedule_routers(adm_context, routers) self._agent_notification(context, method, routers, operation, shuffle_agents) else: cctxt = self.client.prepare(topics=topics.L3_AGENT, fanout=True) cctxt.cast(context, method, routers=[r['id'] for r in routers])
def _notification(self, context, method, router_ids, operation, data): """Notify all the agents that are hosting the routers.""" plugin = manager.NeutronManager.get_plugin() if utils.is_extension_supported( plugin, constants.L3_AGENT_SCHEDULER_EXT_ALIAS): adminContext = (context.is_admin and context or context.elevated()) plugin.schedule_routers(adminContext, router_ids) self._agent_notification(context, method, router_ids, operation, data) else: self.fanout_cast(context, self.make_msg(method, routers=router_ids), topic=topics.L3_AGENT)
def sync_routers(self, context, **kwargs): """Sync routers according to filters to a specific agent. @param context: contain user information @param kwargs: host, router_ids @return: a list of routers with their interfaces and floating_ips """ router_ids = kwargs.get('router_ids') host = kwargs.get('host') context = neutron_context.get_admin_context() if utils.is_extension_supported( self.l3plugin, constants.L3_AGENT_SCHEDULER_EXT_ALIAS): routers = ( self.l3plugin.list_active_sync_routers_on_active_l3_agent( context, host, router_ids)) else: routers = self.l3plugin.get_sync_data(context, router_ids) if utils.is_extension_supported(self.plugin, constants.PORT_BINDING_EXT_ALIAS): self._ensure_host_set_on_ports(context, host, routers) return routers
def _get_hosts_to_notify(self, context, router_ids): """Returns all hosts to send notification about firewall update""" l3_plugin = directory.get_plugin(nl_constants.L3) no_broadcast = (n_utils.is_extension_supported( l3_plugin, nl_constants.L3_AGENT_SCHEDULER_EXT_ALIAS) and getattr( l3_plugin, 'get_l3_agents_hosting_routers', False)) if no_broadcast: agents = l3_plugin.get_l3_agents_hosting_routers( context, router_ids, admin_state_up=True, active=True) return [a.host for a in agents] # NOTE(blallau): default: FirewallAgentAPI performs RPC broadcast return [None]
def _notification(self, context, method, payload, network_id): """Notify all the agents that are hosting the network.""" plugin = manager.NeutronManager.get_plugin() if (method != 'network_delete_end' and utils.is_extension_supported( plugin, constants.DHCP_AGENT_SCHEDULER_EXT_ALIAS)): if method == 'port_create_end': # we don't schedule when we create network # because we want to give admin a chance to # schedule network manually by API adminContext = (context if context.is_admin else context.elevated()) network = plugin.get_network(adminContext, network_id) chosen_agents = plugin.schedule_network(adminContext, network) if chosen_agents: for agent in chosen_agents: self._notification_host( context, 'network_create_end', {'network': {'id': network_id}}, agent['host']) agents = self._get_enabled_dhcp_agents(context, network_id) if not agents: LOG.error(_("No DHCP agents are associated with network " "'%(net_id)s'. Unable to send notification " "for '%(method)s' with payload: %(payload)s"), { 'net_id': network_id, 'method': method, 'payload': payload, }) return active_agents = [x for x in agents if x.is_active] if active_agents != agents: LOG.warning(_("Only %(active)d of %(total)d DHCP agents " "associated with network '%(net_id)s' are " "marked as active, so notifications may " "be sent to inactive agents."), { 'active': len(active_agents), 'total': len(agents), 'net_id': network_id, }) for agent in agents: self.cast( context, self.make_msg(method, payload=payload), topic='%s.%s' % (agent.topic, agent.host)) else: # besides the non-agentscheduler plugin, # There is no way to query who is hosting the network # when the network is deleted, so we need to fanout self._notification_fanout(context, method, payload)
def get_ha_sync_data_for_host(self, context, host=None, router_ids=None, active=None): if n_utils.is_extension_supported(self, constants.L3_DISTRIBUTED_EXT_ALIAS): # DVR has to be handled differently agent = self._get_agent_by_type_and_host(context, constants.AGENT_TYPE_L3, host) sync_data = self._get_dvr_sync_data(context, host, agent, router_ids, active) else: sync_data = super(L3_HA_NAT_db_mixin, self).get_sync_data(context, router_ids, active) return self._process_sync_ha_data(context, sync_data, host)
def _notify_agents(self, context, method, payload, network_id): """Notify all the agents that are hosting the network.""" payload['priority'] = METHOD_PRIORITY_MAP.get(method) # fanout is required as we do not know who is "listening" no_agents = not utils.is_extension_supported( self.plugin, constants.DHCP_AGENT_SCHEDULER_EXT_ALIAS) fanout_required = method == 'network_delete_end' or no_agents # we do nothing on network creation because we want to give the # admin the chance to associate an agent to the network manually cast_required = method != 'network_create_end' if fanout_required: self._fanout_message(context, method, payload) elif cast_required: admin_ctx = (context if context.is_admin else context.elevated()) network = self.plugin.get_network(admin_ctx, network_id) if 'subnet' in payload and payload['subnet'].get('segment_id'): # if segment_id exists then the segment service plugin # must be loaded segment_plugin = directory.get_plugin('segments') segment = segment_plugin.get_segment( context, payload['subnet']['segment_id']) network['candidate_hosts'] = segment['hosts'] agents = self.plugin.get_dhcp_agents_hosting_networks( context, [network_id], hosts=network.get('candidate_hosts')) # schedule the network first, if needed schedule_required = ( method == 'subnet_create_end' or method == 'port_create_end' and not self._is_reserved_dhcp_port(payload['port'])) if schedule_required: agents = self._schedule_network(admin_ctx, network, agents) if not agents: LOG.debug("Network %s is not hosted by any dhcp agent", network_id) return enabled_agents = self._get_enabled_agents(context, network, agents, method, payload) if method == 'port_create_end': high_agent = enabled_agents.pop( random.randint(0, len(enabled_agents) - 1)) self._notify_high_priority_agent(context, copy.deepcopy(payload), high_agent) for agent in enabled_agents: self._cast_message(context, method, payload, agent.host, agent.topic)
def _get_active_networks(self, context, **kwargs): """Retrieve and return a list of the active networks.""" host = kwargs.get('host') plugin = manager.NeutronManager.get_plugin() if utils.is_extension_supported( plugin, constants.DHCP_AGENT_SCHEDULER_EXT_ALIAS): if cfg.CONF.network_auto_schedule: plugin.auto_schedule_networks(context, host) nets = plugin.list_active_networks_on_active_dhcp_agent( context, host) else: filters = dict(admin_state_up=[True]) nets = plugin.get_networks(context, filters=filters) return nets
def get_ha_sync_data_for_host(self, context, host, agent, router_ids=None, active=None): agent_mode = self._get_agent_mode(agent) dvr_agent_mode = (agent_mode in [constants.L3_AGENT_MODE_DVR_SNAT, constants.L3_AGENT_MODE_DVR]) if (dvr_agent_mode and n_utils.is_extension_supported( self, constants.L3_DISTRIBUTED_EXT_ALIAS)): # DVR has to be handled differently sync_data = self._get_dvr_sync_data(context, host, agent, router_ids, active) else: sync_data = super(L3_HA_NAT_db_mixin, self).get_sync_data(context, router_ids, active) return self._process_sync_ha_data(context, sync_data, host)