def delete_router(self, context, id): with context.session.begin(subtransactions=True): router = self._get_router(context, id) # Ensure that the router is not used fips = self.get_floatingips_count(context.elevated(), filters={'router_id': [id]}) if fips: raise l3.RouterInUse(router_id=id) device_filter = {'device_id': [id], 'device_owner': [DEVICE_OWNER_ROUTER_INTF]} ports = self._core_plugin.get_ports_count(context.elevated(), filters=device_filter) if ports: raise l3.RouterInUse(router_id=id) #TODO(nati) Refactor here when we have router insertion model vpnservice = manager.NeutronManager.get_service_plugins().get( constants.VPN) if vpnservice: vpnservice.check_router_in_use(context, id) # delete any gw port device_filter = {'device_id': [id], 'device_owner': [DEVICE_OWNER_ROUTER_GW]} ports = self._core_plugin.get_ports(context.elevated(), filters=device_filter) if ports: self._core_plugin._delete_port(context.elevated(), ports[0]['id']) context.session.delete(router) self.l3_rpc_notifier.router_deleted(context, id)
def delete_router(self, context, id): with context.session.begin(subtransactions=True): router = self._get_router(context, id) # Ensure that the router is not used fips = self.get_floatingips_count(context.elevated(), filters={'router_id': [id]}) if fips: raise l3.RouterInUse(router_id=id) device_filter = { 'device_id': [id], 'device_owner': [DEVICE_OWNER_ROUTER_INTF] } ports = self.get_ports_count(context.elevated(), filters=device_filter) if ports: raise l3.RouterInUse(router_id=id) # delete any gw port device_filter = { 'device_id': [id], 'device_owner': [DEVICE_OWNER_ROUTER_GW] } ports = self.get_ports(context.elevated(), filters=device_filter) if ports: self._delete_port(context.elevated(), ports[0]['id']) context.session.delete(router) l3_rpc_agent_api.L3AgentNotify.router_deleted(context, id)
def delete_router(self, context, router_id): LOG.debug(_("NeutronRestProxyV2: delete_router() called")) with context.session.begin(subtransactions=True): orig_router = self._get_router(context, router_id) tenant_id = orig_router["tenant_id"] # Ensure that the router is not used router_filter = {'router_id': [router_id]} fips = self.get_floatingips_count(context.elevated(), filters=router_filter) if fips: raise l3.RouterInUse(router_id=router_id) device_owner = l3_db.DEVICE_OWNER_ROUTER_INTF device_filter = { 'device_id': [router_id], 'device_owner': [device_owner] } ports = self.get_ports_count(context.elevated(), filters=device_filter) if ports: raise l3.RouterInUse(router_id=router_id) ret_val = super(NeutronRestProxyV2, self).delete_router(context, router_id) # delete from network ctrl self.servers.rest_delete_router(tenant_id, router_id) return ret_val
def delete_router(self, context, id): """Deletes the DVA with the specific router id.""" # Copy of the parent validation code, shouldn't the base modules # provide functions for validating operations? device_owner_router_intf = l3_constants.DEVICE_OWNER_ROUTER_INTF fips = self.get_floatingips_count(context.elevated(), filters={"router_id": [id]}) if fips: raise l3.RouterInUse(router_id=id) device_filter = { "device_id": [id], "device_owner": [device_owner_router_intf] } ports = self.get_ports_count(context.elevated(), filters=device_filter) if ports: raise l3.RouterInUse(router_id=id) neutron_router = self._get_router(context, id) state_change = operation.Operation(self._set_db_router_state, args=(context, neutron_router, p_con.Status.DELETING)) self._dispatcher.dispatch_l3(d_context=embrane_ctx.DispatcherContext( p_con.Events.DELETE_ROUTER, neutron_router, context, state_change), args=()) LOG.debug(_("Deleting router=%s"), neutron_router) return neutron_router
def _check_router_in_use(self, context, router_id): with context.session.begin(subtransactions=True): # Ensure that the router is not used router_filter = {'router_id': [router_id]} fips = self.get_floatingips_count(context.elevated(), filters=router_filter) if fips: raise l3.RouterInUse(router_id=router_id) device_filter = {'device_id': [router_id], 'device_owner': [l3_db.DEVICE_OWNER_ROUTER_INTF]} ports = self.get_ports_count(context.elevated(), filters=device_filter) if ports: raise l3.RouterInUse(router_id=router_id)
def delete_router(self, context, id): neutron_router = self.get_router(context, id) session = context.session ent_rtr_mapping = nuagedb.get_ent_rtr_mapping_by_rtrid(session, id) if ent_rtr_mapping: filters = { 'device_id': [id], 'device_owner': [os_constants.DEVICE_OWNER_ROUTER_INTF] } ports = self.get_ports(context, filters) if ports: raise l3.RouterInUse(router_id=id) nuage_domain_id = ent_rtr_mapping['nuage_router_id'] self.nuageclient.delete_router(nuage_domain_id) super(NuagePlugin, self).delete_router(context, id) nuage_zone = self.nuageclient.get_zone_by_routerid(id) if nuage_zone and not self._check_router_subnet_for_tenant( context, neutron_router['tenant_id']): user_id, group_id = self.nuageclient.get_usergroup( neutron_router['tenant_id'], ent_rtr_mapping['net_partition_id']) self.nuageclient.delete_user(user_id) self.nuageclient.delete_group(group_id)
def _validate_router_migration(self, context, router_db, router_res): """Allow transition only when admin_state_up=False""" original_distributed_state = router_db.extra_attributes.distributed requested_distributed_state = router_res.get('distributed', None) distributed_changed = ( requested_distributed_state is not None and requested_distributed_state != original_distributed_state) if not distributed_changed: return False if router_db.admin_state_up: msg = _("Cannot change the 'distributed' attribute of active " "routers. Please set router admin_state_up to False " "prior to upgrade") raise n_exc.BadRequest(resource='router', msg=msg) # Notify advanced services of the imminent state transition # for the router. try: kwargs = {'context': context, 'router': router_db} registry.notify( resources.ROUTER, events.BEFORE_UPDATE, self, **kwargs) except exceptions.CallbackFailure as e: # NOTE(armax): preserve old check's behavior if len(e.errors) == 1: raise e.errors[0].error raise l3.RouterInUse(router_id=router_db['id'], reason=e) return True
def _validate_router_migration(self, context, router_db, router_res): """Allow centralized -> distributed state transition only.""" if (router_db.extra_attributes.distributed and router_res.get('distributed') is False): LOG.info( _LI("Centralizing distributed router %s " "is not supported"), router_db['id']) raise n_exc.BadRequest( resource='router', msg=_("Migration from distributed router to centralized is " "not supported")) elif (not router_db.extra_attributes.distributed and router_res.get('distributed')): # router should be disabled in order for upgrade if router_db.admin_state_up: msg = _('Cannot upgrade active router to distributed. Please ' 'set router admin_state_up to False prior to upgrade.') raise n_exc.BadRequest(resource='router', msg=msg) # Notify advanced services of the imminent state transition # for the router. try: kwargs = {'context': context, 'router': router_db} registry.notify(resources.ROUTER, events.BEFORE_UPDATE, self, **kwargs) except exceptions.CallbackFailure as e: with excutils.save_and_reraise_exception(): # NOTE(armax): preserve old check's behavior if len(e.errors) == 1: raise e.errors[0].error raise l3.RouterInUse(router_id=router_db['id'], reason=e)
def _ensure_router_not_in_use(self, context, router_id): admin_ctx = context.elevated() router = self._get_router(context, router_id) if self.get_floatingips_count(admin_ctx, filters={'router_id': [router_id]}): raise l3.RouterInUse(router_id=router_id) device_owner = self._get_device_owner(context, router) device_filter = { 'device_id': [router_id], 'device_owner': [device_owner] } port_count = self._core_plugin.get_ports_count(admin_ctx, filters=device_filter) if port_count: raise l3.RouterInUse(router_id=router_id) return router
def test__validate_router_migration_fail_with_fwservice(self): '''Test to check router migration with firewall.''' router_db, mock_firewall, mock_vpnaas = ( self._router_migration_with_services_setup( test_side_effect_vpn=None, test_side_effect_fw=l3.RouterInUse(router_id='fake_id'), no_vpn=True, no_fw=False)) mock_firewall.assert_called_once_with(self.ctx, router_db)
def migration_callback(resource, event, trigger, **kwargs): context = kwargs['context'] router = kwargs['router'] fw_plugin = directory.get_plugin(fwaas_constants.FIREWALL) if fw_plugin: tenant_firewalls = fw_plugin.get_firewalls( context, filters={'tenant_id': [router['tenant_id']]}) if tenant_firewalls: raise l3.RouterInUse(router_id=router['id'])
def migration_callback(resource, event, trigger, **kwargs): context = kwargs['context'] router = kwargs['router'] opt_plugin = manager.NeutronManager.get_service_plugins().get( p_const.OPTIMIZER) if opt_plugin: tenant_optimizers = opt_plugin.get_optimizers( context, filters={'tenant_id': [router['tenant_id']]}) if tenant_optimizers: raise l3.RouterInUse(router_id=router['id'])
def check_router_has_no_firewall(self, context, router_db): """Check if FWaaS is associated with the legacy router.""" fwaas_service = manager.NeutronManager.get_service_plugins().get( constants.FIREWALL) if fwaas_service: tenant_firewalls = fwaas_service.get_firewalls( context, filters={'tenant_id': [router_db['tenant_id']]}) if tenant_firewalls: raise l3.RouterInUse(router_id=router_db['id']) return True
def migration_callback(resource, event, trigger, **kwargs): context = kwargs['context'] router = kwargs['router'] fw_plugin = manager.NeutronManager.get_service_plugins().get( p_const.FIREWALL) if fw_plugin: tenant_firewalls = fw_plugin.get_firewalls( context, filters={'tenant_id': [router['tenant_id']]}) if tenant_firewalls: raise l3.RouterInUse(router_id=router['id'])
def _check_router_not_in_use(self, context, router_id): try: kwargs = {'context': context, 'router_id': router_id} registry.notify( resources.ROUTER, events.BEFORE_DELETE, self, **kwargs) except exceptions.CallbackFailure as e: with excutils.save_and_reraise_exception(): if len(e.errors) == 1: raise e.errors[0].error raise l3.RouterInUse(router_id=router_id, reason=e)
def check_router_in_use(self, context, router_id): vpnservices = self.get_vpnservices( context, filters={'router_id': [router_id]}) if vpnservices: plural = "s" if len(vpnservices) > 1 else "" services = ",".join([v['id'] for v in vpnservices]) raise l3_exception.RouterInUse( router_id=router_id, reason="is currently used by VPN service%(plural)s " "(%(services)s)" % {'plural': plural, 'services': services})
def delete_router(self, context, router_id): with db.context_manager.writer.using(context): orig_router = self._get_router(context, router_id) tenant_id = orig_router["tenant_id"] # Ensure that the router is not used router_filter = {'router_id': [router_id]} fips = self.get_floatingips_count(context.elevated(), filters=router_filter) if fips: raise l3.RouterInUse(router_id=router_id) device_owner = lib_constants.DEVICE_OWNER_ROUTER_INTF device_filter = { 'device_id': [router_id], 'device_owner': [device_owner] } ports = self.get_ports_count(context.elevated(), filters=device_filter) if ports: raise l3.RouterInUse(router_id=router_id) # TODO(wolverineav): hack until fixed at right place setattr(context, 'GUARD_TRANSACTION', False) super(L3RestProxy, self).delete_router(context, router_id) # added check to update router policy for another router for # default routes updated_router = (super(L3RestProxy, self).update_policies_post_delete( context, tenant_id)) # delete from network controller self.servers.rest_delete_router(tenant_id, router_id) if updated_router: # update BCF after removing the router first LOG.debug('Default policies now part of router: %s', updated_router) router = self._update_ext_gateway_info(context, updated_router) self.servers.rest_update_router(tenant_id, router, router['id'])
def create_ipsec_site_connection(self, context, ipsec_site_connection): l3_plugin = self._get_l3_plugin() vpnservice = self.service_plugin.get_vpnservice( context, ipsec_site_connection['vpnservice_id']) ns_name = 'vpn-' + vpnservice['router_id'] try: res = self._get_vpn_serv_nuage_resources(context, vpnservice) res['p_dummy']['gw'] = res['s_dummy']['gateway_ip'] ns_ports = (res['p_dummy'], res['p_openswan']) except Exception: # case when the user is trying to create an IPSec Site connection # when already there is one associated with the current VPN Svc self.service_plugin.delete_ipsec_site_connection( context, ipsec_site_connection['id']) raise l3.RouterInUse( router_id=vpnservice['router_id'], reason="is currently associated with IPSec Site Connection." " One IPSec Site Connection per VPN service") else: self.agent_rpc.tracking(context, vpnservice['router_id']) for prt in ns_ports: device_name = 'vm-' + "".join(prt['mac_address'].split(':')) l3_plugin.update_port(context, prt['id'], {'port': { 'device_id': prt['id'] }}) cidr = self._get_cidr_list(context, prt['fixed_ips'][0]['ip_address'], prt['fixed_ips'][0]['subnet_id']) gw_ip = [prt.get('gw')] if prt.get('gw') else [] self.agent_rpc.plug_to_ovs(context, vpnservice['router_id'], device_name=device_name, ns_name=ns_name, cidr=cidr, gw_ip=gw_ip, network_id=prt['network_id'], port_id=prt['id'], mac=prt['mac_address']) l3_plugin.update_port( context, prt['id'], {'port': { 'device_owner': 'compute:None' }}) l3_plugin.add_rules_vpn_ping(context, vpnservice['router_id'], ipsec_site_connection['peer_cidrs'][0], res['p_openswan']) super(NuageIPsecVPNDriver, self).create_ipsec_site_connection(context, ipsec_site_connection)
def delete_router(self, context, router_id, router): # Pre-delete checks # Ensure that the router is not used fips = self.get_floatingips_count( context.elevated(), filters={'router_id': [router_id]}) if fips: raise l3.RouterInUse(router_id=router_id) device_filter = {'device_id': [router_id], 'device_owner': [l3_db.DEVICE_OWNER_ROUTER_INTF]} ports = self._core_plugin.get_ports_count(context.elevated(), filters=device_filter) if ports: raise l3.RouterInUse(router_id=router_id) # Put the router in PENDING_DELETE status with context.session.begin(subtransactions=True): router = self._get_router(context, router_id) router.status = STATUS_PENDING_DELETE # Notify HDN operators hdnlib.notify_router_delete({'id': router_id, 'tenant_id': context.tenant_id}) LOG.debug(_("Queued request to delete router: %s"), router_id)
def _validate_nuage_vpn_svc_create(self, context, vpnservice_dict): rtr_id = vpnservice_dict['router_id'] vpn_services = self.get_vpn_services_using(context, rtr_id) if len(vpn_services) > 1: self._delete_from_db(context, vpnservice_dict['id']) raise l3.RouterInUse(router_id=rtr_id, reason="is currently used by VPN service." " One VPN service per router") if not self._get_l3_plugin().rtr_in_def_ent(context, rtr_id): self._delete_from_db(context, vpnservice_dict['id']) msg = _('router %s is not associated with ' 'default net-partition') % rtr_id raise n_exc.BadRequest(resource='vpn-service', msg=msg)
def delete_router(self, context, router_id): with context.session.begin(subtransactions=True): orig_router = self._get_router(context, router_id) tenant_id = orig_router["tenant_id"] # Ensure that the router is not used router_filter = {'router_id': [router_id]} fips = self.get_floatingips_count(context.elevated(), filters=router_filter) if fips: raise l3.RouterInUse(router_id=router_id) device_owner = lib_constants.DEVICE_OWNER_ROUTER_INTF device_filter = { 'device_id': [router_id], 'device_owner': [device_owner] } ports = self.get_ports_count(context.elevated(), filters=device_filter) if ports: raise l3.RouterInUse(router_id=router_id) super(L3RestProxy, self).delete_router(context, router_id) # added check to update router policy for another router for # default routes updated_router = (super(L3RestProxy, self).apply_default_post_delete( context, tenant_id)) # delete from network controller self.servers.rest_delete_router(tenant_id, router_id) if updated_router: # update BCF after removing the router first LOG.debug('Default policies now part of router: %s' % updated_router) self.server.rest_update_router(tenant_id, updated_router, updated_router['id'])
def delete_router(self, context, id): session = context.session ent_rtr_mapping = nuagedb.get_ent_rtr_mapping_by_rtrid(session, id) if ent_rtr_mapping: filters = { 'device_id': [id], 'device_owner': [os_constants.DEVICE_OWNER_ROUTER_INTF] } ports = self.get_ports(context, filters) if ports: raise l3.RouterInUse(router_id=id) nuage_router_id = ent_rtr_mapping['nuage_router_id'] self.nuageclient.delete_router(nuage_router_id) router_zone = nuagedb.get_rtr_zone_mapping(session, id) super(NuagePlugin, self).delete_router(context, id) if router_zone and not self._check_router_subnet_for_tenant(context): self.nuageclient.delete_user(router_zone['nuage_user_id']) self.nuageclient.delete_group(router_zone['nuage_group_id'])
def _validate_router_migration(self, context, router_db, router_res): """Allow centralized -> distributed state transition only.""" if (router_db.extra_attributes.distributed and router_res.get('distributed') is False): LOG.info( _LI("Centralizing distributed router %s " "is not supported"), router_db['id']) raise NotImplementedError() elif (not router_db.extra_attributes.distributed and router_res.get('distributed')): # Notify advanced services of the imminent state transition # for the router. try: kwargs = {'context': context, 'router': router_db} registry.notify(resources.ROUTER, events.BEFORE_UPDATE, self, **kwargs) except exceptions.CallbackFailure as e: with excutils.save_and_reraise_exception(): # NOTE(armax): preserve old check's behavior if len(e.errors) == 1: raise e.errors[0].error raise l3.RouterInUse(router_id=router_db['id'], reason=e)