def post_update_port(context, id, port): LOG.info("post_update_port %s for tenant %s" % (id, context.tenant_id)) if not port.get("port"): raise exceptions.BadRequest(resource="ports", msg="Port body required") port_db = db_api.port_find(context, id=id, scope=db_api.ONE) if not port_db: raise exceptions.PortNotFound(port_id=id, net_id="") port = port["port"] if "fixed_ips" in port and port["fixed_ips"]: for ip in port["fixed_ips"]: address = None ipam_driver = ipam.IPAM_REGISTRY.get_strategy( port_db["network"]["ipam_strategy"]) if ip: if "ip_id" in ip: ip_id = ip["ip_id"] address = db_api.ip_address_find( context, id=ip_id, tenant_id=context.tenant_id, scope=db_api.ONE) elif "ip_address" in ip: ip_address = ip["ip_address"] net_address = netaddr.IPAddress(ip_address) address = db_api.ip_address_find( context, ip_address=net_address, network_id=port_db["network_id"], tenant_id=context.tenant_id, scope=db_api.ONE) if not address: address = ipam_driver.allocate_ip_address( context, port_db["network_id"], id, CONF.QUARK.ipam_reuse_after, ip_address=ip_address) else: address = ipam_driver.allocate_ip_address( context, port_db["network_id"], id, CONF.QUARK.ipam_reuse_after) address["deallocated"] = 0 already_contained = False for port_address in port_db["ip_addresses"]: if address["id"] == port_address["id"]: already_contained = True break if not already_contained: port_db["ip_addresses"].append(address) return v._make_port_dict(port_db)
def _update_router_db(self, context, router_id, data, gw_info): router_db = self._get_router(context, router_id) original_distributed_state = router_db.extra_attributes.distributed original_ha_state = router_db.extra_attributes.ha requested_ha_state = data.pop('ha', None) requested_distributed_state = data.get('distributed', None) if ((original_ha_state and requested_distributed_state) or (requested_ha_state and original_distributed_state) or (requested_ha_state and requested_distributed_state)): raise l3_ha.DistributedHARouterNotSupported() with context.session.begin(subtransactions=True): router_db = super(L3_HA_NAT_db_mixin, self)._update_router_db( context, router_id, data, gw_info) ha_not_changed = (requested_ha_state is None or requested_ha_state == original_ha_state) if ha_not_changed: return router_db if router_db.admin_state_up: msg = _('Cannot change HA attribute of active routers. Please ' 'set router admin_state_up to False prior to upgrade.') raise n_exc.BadRequest(resource='router', msg=msg) ha_network = self.get_ha_network(context, router_db.tenant_id) router_db.extra_attributes.ha = requested_ha_state if not requested_ha_state: self._delete_vr_id_allocation( context, ha_network, router_db.extra_attributes.ha_vr_id) router_db.extra_attributes.ha_vr_id = None # The HA attribute has changed. First unbind the router from agents # to force a proper re-scheduling to agents. # TODO(jschwarz): This will have to be more selective to get HA + DVR # working (Only unbind from dvr_snat nodes). self._unbind_ha_router(context, router_id) if requested_ha_state: if not ha_network: ha_network = self._create_ha_network(context, router_db.tenant_id) self._set_vr_id(context, router_db, ha_network) self._create_ha_interfaces(context, router_db, ha_network) self._notify_ha_interfaces_updated(context, router_db.id) else: self._delete_ha_interfaces(context, router_db.id) self._notify_ha_interfaces_updated(context, router_db.id) return router_db
def create_pool(self, context, pool): LOG.debug("MidonetMixin.create_pool called: %(pool)r", {'pool': pool}) subnet = db_util.get_subnet(context, pool['pool']['subnet_id']) if db_util.is_subnet_external(context, subnet): msg = _("pool subnet must not be public") raise n_exc.BadRequest(resource='subnet', msg=msg) router_id = db_util.get_router_from_subnet(context, subnet) if not router_id: msg = _("pool subnet must be associated with router") raise n_exc.BadRequest(resource='router', msg=msg) pool['pool'].update({'router_id': router_id}) if self._get_resource_router_id_binding(context, loadbalancer_db.Pool, router_id=router_id): msg = _("A pool is already associated with the router") raise n_exc.BadRequest(resource='router', msg=msg) with context.session.begin(subtransactions=True): p = super(MidonetMixin, self).create_pool(context, pool) task.create_task(context, task.CREATE, data_type=task.POOL, resource_id=p['id'], data=p) res = {'id': p['id'], rsi.ROUTER_ID: router_id} self._process_create_resource_router_id(context, res, loadbalancer_db.Pool) p[rsi.ROUTER_ID] = router_id self.api_cli.create_pool(p) p['status'] = constants.ACTIVE self.update_status(context, loadbalancer_db.Pool, p['id'], p['status']) LOG.debug("MidonetMixin.create_pool exiting: %(pool)r", {'pool': p}) return p
def create_nuage_external_security_group(self, context, nuage_external_security_group): external_sg = nuage_external_security_group[ 'nuage_external_security_group'] subnet_id = external_sg.get('subnet_id') router_id = external_sg.get('router_id') if not subnet_id and not router_id: msg = _("Either router_id or subnet_id must be specified") raise n_exc.BadRequest(resource='nuage_external_security_group', msg=msg) l2dom_id = None l3dom_id = None if subnet_id: subnet_mapping = nuagedb.get_subnet_l2dom_by_id( context.session, subnet_id) if subnet_mapping and subnet_mapping['nuage_l2dom_tmplt_id']: l2dom_id = subnet_mapping['nuage_subnet_id'] if not l2dom_id: msg = _("No subnet mapping found for subnet %s") % subnet_id raise n_exc.BadRequest( resource='nuage_external_security_group', msg=msg) elif router_id: nuage_router = self.nuageclient.get_router_by_external(router_id) if nuage_router: l3dom_id = nuage_router['ID'] if not l3dom_id: msg = _("VSD domain not found for router %s") % router_id raise n_exc.BadRequest( resource='nuage_external_security_group', msg=msg) params = { 'l2dom_id': l2dom_id, 'l3dom_id': l3dom_id, 'name': external_sg.get('name'), 'description': external_sg.get('description'), 'extended_community': external_sg.get('extended_community_id') } ext_sg_resp = ( self.nuageclient.create_nuage_external_security_group(params)) return self._make_external_security_group_dict(ext_sg_resp[0], context=context)
def update(self, request, id, body=None, **kwargs): """Updates the specified entity's attributes.""" try: payload = body.copy() except AttributeError: msg = _("Invalid format: %s") % request.body raise exceptions.BadRequest(resource='body', msg=msg) payload['id'] = id self._notifier.info(request.context, self._resource + '.update.start', payload) return self._update(request, id, body, **kwargs)
def _get_template(self): if self._lb_template is not None: return try: tmpl = self._api.service_template_read( fq_name=LOADBALANCER_SERVICE_TEMPLATE) except NoIdError: msg = ('Loadbalancer service-template not found when ' 'attempting to create pool %s' % pool_id) raise n_exc.BadRequest(resource='pool', msg=msg) self._lb_template = tmpl
def _validate_router(self, context, router_assoc): if not router_assoc or 'router_id' not in router_assoc: msg = 'no router specified' raise n_exc.BadRequest(resource='bgpvpn', msg=msg) l3_plugin = manager.NeutronManager.get_service_plugins().get( plugin_constants.L3_ROUTER_NAT) router = l3_plugin.get_router(context, router_assoc['router_id']) plugin = manager.NeutronManager.get_plugin() self._validate_router_has_net_assocs(context, router, plugin) return router
def delete_net_partition(self, context, id): ent_rtr_mapping = nuagedb.get_ent_rtr_mapping_by_entid( context.session, id) if ent_rtr_mapping: msg = (_("One or more router still attached to " "net_partition %s.") % id) raise n_exc.BadRequest(resource='net_partition', msg=msg) net_partition = nuagedb.get_net_partition_by_id(context.session, id) if not net_partition: msg = (_("NetPartition with %s does not exist") % id) raise n_exc.BadRequest(resource='net_partition', msg=msg) l3dom_tmplt_id = net_partition['l3dom_tmplt_id'] l2dom_tmplt_id = net_partition['l2dom_tmplt_id'] self.nuageclient.delete_net_partition(net_partition['id'], l3dom_id=l3dom_tmplt_id, l2dom_id=l2dom_tmplt_id) with context.session.begin(subtransactions=True): nuagedb.delete_net_partition(context.session, net_partition)
def _validate_pools_with_subnetpool(self, subnet): """Verifies that allocation pools are set correctly Allocation pools can be set for specific subnet request only """ has_allocpool = attributes.is_attr_set(subnet['allocation_pools']) is_any_subnetpool_request = not attributes.is_attr_set(subnet['cidr']) if is_any_subnetpool_request and has_allocpool: reason = _("allocation_pools allowed only " "for specific subnet requests.") raise n_exc.BadRequest(resource='subnets', msg=reason)
def _validate_cidrs_fit_into_subnets(cidrs, subnets): LOG.info("validate_cidrs_all_fit_into_subnets with CIDRs (%s) " "and subnets (%s)" % (cidrs, subnets)) for cidr in cidrs: cidr = netaddr.IPNetwork(cidr) for subnet in subnets: subnet_cidr = netaddr.IPNetwork(subnet["cidr"]) if cidr.version == subnet_cidr.version and cidr not in subnet_cidr: raise exceptions.BadRequest( resource="ip_policy", msg="CIDR %s not in subnet CIDR %s" % (cidr, subnet_cidr))
def add_router_interface(self, context, router_id, interface_info): """Add interface to a router.""" if not interface_info: msg = _("Either subnet_id or port_id must be specified") raise exc.BadRequest(resource='router', msg=msg) if 'port_id' in interface_info: if 'subnet_id' in interface_info: msg = _("Cannot specify both subnet-id and port-id") raise exc.BadRequest(resource='router', msg=msg) res_dict = self._encode_resource(resource_id=router_id, resource=interface_info) status_code, res_info = self._request_backend(context, res_dict, 'router', 'ADDINTERFACE') if status_code != requests.codes.ok: self._raise_contrail_error(status_code, info=res_info, obj_name='add_router_interface') return res_info
def _get_net_driver(network, port=None): port_driver = None if port and port.get("network_plugin"): port_driver = port.get("network_plugin") try: return registry.DRIVER_REGISTRY.get_driver(network["network_plugin"], port_driver=port_driver) except Exception as e: raise exceptions.BadRequest(resource="ports", msg="invalid network_plugin: %s" % e)
def update(self, request, id, body=None, **kwargs): """Updates the specified entity's attributes.""" parent_id = kwargs.get(self._parent_id_name) try: payload = body.copy() except AttributeError: msg = _("Invalid format: %s") % request.body raise exceptions.BadRequest(resource='body', msg=msg) payload['id'] = id notifier_api.notify(request.context, self._publisher_id, self._resource + '.update.start', notifier_api.CONF.default_notification_level, payload) body = Controller.prepare_request_body(request.context, body, False, self._resource, self._attr_info, allow_bulk=self._allow_bulk) action = self._plugin_handlers[self.UPDATE] # Load object to check authz # but pass only attributes in the original body and required # by the policy engine to the policy 'brain' field_list = [name for (name, value) in self._attr_info.iteritems() if (value.get('required_by_policy') or value.get('primary_key') or 'default' not in value)] orig_obj = self._item(request, id, field_list=field_list, parent_id=parent_id) orig_obj.update(body[self._resource]) try: policy.enforce(request.context, action, orig_obj) except exceptions.PolicyNotAuthorized: # To avoid giving away information, pretend that it # doesn't exist msg = _('The resource could not be found.') raise webob.exc.HTTPNotFound(msg) obj_updater = getattr(self._plugin, action) kwargs = {self._resource: body} if parent_id: kwargs[self._parent_id_name] = parent_id obj = obj_updater(request.context, id, **kwargs) result = {self._resource: self._view(request.context, obj)} notifier_method = self._resource + '.update.end' notifier_api.notify(request.context, self._publisher_id, notifier_method, notifier_api.CONF.default_notification_level, result) self._send_dhcp_notification(request.context, result, notifier_method) return result
def update_port(context, id, port): """Update values of a port. : param context: neutron api request context : param id: UUID representing the port to update. : param port: dictionary with keys indicating fields to update. valid keys are those that have a value of True for 'allow_put' as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. """ LOG.info("update_port %s for tenant %s" % (id, context.tenant_id)) port_db = db_api.port_find(context, id=id, scope=db_api.ONE) if not port_db: raise exceptions.PortNotFound(port_id=id) address_pairs = [] fixed_ips = port["port"].pop("fixed_ips", None) if fixed_ips: ipam_driver.deallocate_ip_address( context, port_db, ipam_reuse_after=CONF.QUARK.ipam_reuse_after) addresses = [] for fixed_ip in fixed_ips: subnet_id = fixed_ip.get("subnet_id") ip_address = fixed_ip.get("ip_address") if not (subnet_id and ip_address): raise exceptions.BadRequest( resource="fixed_ips", msg="subnet_id and ip_address required") # Note: we don't allow overlapping subnets, thus subnet_id is # ignored. addresses.append( ipam_driver.allocate_ip_address(context, port_db["network_id"], id, CONF.QUARK.ipam_reuse_after, ip_address=ip_address)) port["port"]["addresses"] = addresses mac_address_string = str( netaddr.EUI(port_db.mac_address, dialect=netaddr.mac_unix)) address_pairs = [{ 'mac_address': mac_address_string, 'ip_address': address.get('address_readable', '') } for address in addresses] group_ids, security_groups = v.make_security_group_list( context, port["port"].pop("security_groups", None)) net_driver.update_port(context, port_id=port_db.backend_key, security_groups=group_ids, allowed_pairs=address_pairs) port["port"]["security_groups"] = security_groups port = db_api.port_update(context, port_db, **port["port"]) return v._make_port_dict(port)
def set_all(self, model, **tags): """Validate and set all known tags on a port.""" for name, tag in self.tags.items(): if name in tags: value = tags.pop(name) if value: try: tag.set(model, value) except TagValidationError as e: raise exceptions.BadRequest(resource="tags", msg="%s" % (e.message))
def add_router_interface(self, context, router_id, interface_info): """Add interface to a router.""" if not interface_info: msg = "Either subnet_id or port_id must be specified" raise exc.BadRequest(resource='router', msg=msg) if 'port_id' in interface_info: if 'subnet_id' in interface_info: msg = "Cannot specify both subnet-id and port-id" raise exc.BadRequest(resource='router', msg=msg) self._set_user_auth_token() port_id = interface_info.get('port_id') subnet_id = interface_info.get('subnet_id') rtr_iface_handler = rtr_handler.LogicalRouterInterfaceHandler( self._vnc_lib) return rtr_iface_handler.add_router_interface( self._get_context_dict(context), router_id, port_id=port_id, subnet_id=subnet_id)
def _validate_ip_version_with_subnetpool(self, subnet, subnetpool): """Validates ip version for subnet_pool and requested subnet""" ip_version = subnet.get('ip_version') has_ip_version = attributes.is_attr_set(ip_version) if has_ip_version and ip_version != subnetpool.ip_version: args = { 'req_ver': str(subnet['ip_version']), 'pool_ver': str(subnetpool.ip_version) } reason = _("Cannot allocate IPv%(req_ver)s subnet from " "IPv%(pool_ver)s subnet pool") % args raise n_exc.BadRequest(resource='subnets', msg=reason)
def _get_router_for_floatingip(self, context, internal_port, internal_subnet_id, external_network_id): """We need to over-load this function so that we only return the user visible router and never its redundancy routers (as they never have floatingips associated with them). """ subnet = self._core_plugin._get_subnet(context, internal_subnet_id) if not subnet['gateway_ip']: msg = (_('Cannot add floating IP to port on subnet %s ' 'which has no gateway_ip') % internal_subnet_id) raise n_exc.BadRequest(resource='floatingip', msg=msg) gw_port = orm.aliased(models_v2.Port, name="gw_port") routerport_qry = context.session.query( l3_db.RouterPort.router_id, models_v2.IPAllocation.ip_address).join( models_v2.Port, models_v2.IPAllocation).filter( models_v2.Port.network_id == internal_port['network_id'], l3_db.RouterPort.port_type.in_( l3_constants.ROUTER_INTERFACE_OWNERS), models_v2.IPAllocation.subnet_id == internal_subnet_id ).join(gw_port, gw_port.device_id == l3_db.RouterPort.router_id).filter( gw_port.network_id == external_network_id).distinct() # Ensure that redundancy routers (in a ha group) are not returned, # since only the user visible router should have floatingips. # This can be done by checking that the id of routers does not # appear in the 'redundancy_router_id' column in the # 'cisco_router_redundancy_bindings' table. routerport_qry = routerport_qry.outerjoin( RouterRedundancyBinding, RouterRedundancyBinding.redundancy_router_id == l3_db.RouterPort.router_id) routerport_qry = routerport_qry.filter( RouterRedundancyBinding.redundancy_router_id == expr.null()) first_router_id = None for router_id, interface_ip in routerport_qry: if interface_ip == subnet['gateway_ip']: return router_id if not first_router_id: first_router_id = router_id if first_router_id: return first_router_id raise l3.ExternalGatewayForFloatingIPNotFound( subnet_id=internal_subnet_id, external_network_id=external_network_id, port_id=internal_port['id'])
def create_floatingip( self, context, floatingip, initial_status=l3_constants.FLOATINGIP_STATUS_ACTIVE): fip = floatingip['floatingip'] tenant_id = self._get_tenant_id_for_create(context, fip) fip_id = uuidutils.generate_uuid() f_net_id = fip['floating_network_id'] if not self._core_plugin._network_is_external(context, f_net_id): msg = _("Network %s is not a valid external network") % f_net_id raise n_exc.BadRequest(resource='floatingip', msg=msg) with context.session.begin(subtransactions=True): # This external port is never exposed to the tenant. # it is used purely for internal system and admin use when # managing floating IPs. external_port = self._core_plugin.create_port(context.elevated(), { 'port': {'tenant_id': '', # tenant intentionally not set 'network_id': f_net_id, 'mac_address': attributes.ATTR_NOT_SPECIFIED, 'fixed_ips': attributes.ATTR_NOT_SPECIFIED, 'admin_state_up': True, 'device_id': fip_id, 'device_owner': DEVICE_OWNER_FLOATINGIP, 'name': ''}}) # Ensure IP addresses are allocated on external port if not external_port['fixed_ips']: raise n_exc.ExternalIpAddressExhausted(net_id=f_net_id) floating_fixed_ip = external_port['fixed_ips'][0] floating_ip_address = floating_fixed_ip['ip_address'] floatingip_db = FloatingIP( id=fip_id, tenant_id=tenant_id, status=initial_status, floating_network_id=fip['floating_network_id'], floating_ip_address=floating_ip_address, floating_port_id=external_port['id']) fip['tenant_id'] = tenant_id # Update association with internal port # and define external IP address self._update_fip_assoc(context, fip, floatingip_db, external_port) context.session.add(floatingip_db) router_id = floatingip_db['router_id'] if router_id: self.l3_rpc_notifier.routers_updated( context, [router_id], 'create_floatingip') return self._make_floatingip_dict(floatingip_db)
def _validate_nuage_vpn_svc_create(self, context, vpnservice_dict): rtr_id = vpnservice_dict['router_id'] vpn_services = self.get_vpn_services_using(context, rtr_id) if len(vpn_services) > 1: self._delete_from_db(context, vpnservice_dict['id']) raise l3.RouterInUse(router_id=rtr_id, reason="is currently used by VPN service." " One VPN service per router") if not self._get_l3_plugin().rtr_in_def_ent(context, rtr_id): self._delete_from_db(context, vpnservice_dict['id']) msg = _('router %s is not associated with ' 'default net-partition') % rtr_id raise n_exc.BadRequest(resource='vpn-service', msg=msg)
def _validate_nuage_staticroutes(self, old_routes, added, removed): cidrs = [] for old in old_routes: if old not in removed: ip = netaddr.IPNetwork(old['destination']) cidrs.append(ip) for route in added: ip = netaddr.IPNetwork(route['destination']) matching = netaddr.all_matching_cidrs(ip.ip, cidrs) if matching: msg = _('for same subnet, multiple static routes not allowed') raise n_exc.BadRequest(resource='router', msg=msg) cidrs.append(ip)
def _get_net_partition_for_subnet(self, context, subnet): ent = subnet.get('net_partition', None) if not ent: def_net_part = cfg.CONF.RESTPROXY.default_net_partition_name net_partition = nuagedb.get_net_partition_by_name( context.session, def_net_part) else: net_partition = self._resource_finder(context, 'subnet', 'net_partition', subnet) if not net_partition: msg = _('Either net_partition is not provided with subnet OR ' 'default net_partition is not created at the start') raise n_exc.BadRequest(resource='subnet', msg=msg) return net_partition
def _get_net_partition_for_router(self, context, rtr): ent = rtr.get('net_partition', None) if not ent: def_net_part = cfg.CONF.RESTPROXY.default_net_partition_name net_partition = nuagedb.get_net_partition_by_name( context.session, def_net_part) else: net_partition = self._resource_finder(context, 'router', 'net_partition', rtr) if not net_partition: msg = _("Either net_partition is not provided with router OR " "default net_partition is not created at the start") raise n_exc.BadRequest(resource='router', msg=msg) return net_partition
def create_subnet(self, context, subnet): s = subnet['subnet'] cidr = s.get('cidr', attributes.ATTR_NOT_SPECIFIED) prefixlen = s.get('prefixlen', attributes.ATTR_NOT_SPECIFIED) has_cidr = attributes.is_attr_set(cidr) has_prefixlen = attributes.is_attr_set(prefixlen) if has_cidr and has_prefixlen: msg = _('cidr and prefixlen must not be supplied together') raise n_exc.BadRequest(resource='subnets', msg=msg) if has_cidr: # turn the CIDR into a proper subnet net = netaddr.IPNetwork(s['cidr']) subnet['subnet']['cidr'] = '%s/%s' % (net.network, net.prefixlen) s['tenant_id'] = self._get_tenant_id_for_create(context, s) subnetpool_id = self._get_subnetpool_id(s) if not subnetpool_id: if not has_cidr: msg = _('A cidr must be specified in the absence of a ' 'subnet pool') raise n_exc.BadRequest(resource='subnets', msg=msg) # Create subnet from the implicit(AKA null) pool created_subnet = self._create_subnet_from_implicit_pool( context, subnet) else: created_subnet = self._create_subnet_from_pool( context, subnet, subnetpool_id) # If this subnet supports auto-addressing, then update any # internal ports on the network with addresses for this subnet. if ipv6_utils.is_auto_address_subnet(created_subnet): self._add_auto_addrs_on_network_ports(context, created_subnet) return created_subnet
def create_nuage_redirect_target_vip(self, context, nuage_redirect_target_vip): redirect_target = nuage_redirect_target_vip[ 'nuage_redirect_target_vip'] nuage_redirect_target = self.get_nuage_redirect_target( context, redirect_target['redirect_target_id']) subnet_id = redirect_target.get('subnet_id') subnet_mapping = nuagedb.get_subnet_l2dom_by_id( context.session, subnet_id) vip = redirect_target.get('virtual_ip_address') self._validate_create_redirect_target_vip(context, nuage_redirect_target, subnet_mapping, vip) with context.session.begin(subtransactions=True): # Port has no 'tenant-id', as it is hidden from user subnet = self.core_plugin.get_subnet(context, subnet_id) network_id = subnet['network_id'] fixed_ips = {'ip_address': vip} vip_port = self.core_plugin.create_port( context, { 'port': { 'tenant_id': redirect_target['tenant_id'], 'network_id': network_id, 'mac_address': attributes.ATTR_NOT_SPECIFIED, 'fixed_ips': [fixed_ips], 'device_id': '', 'device_owner': constants.DEVICE_OWNER_VIP_NUAGE, 'admin_state_up': True, 'name': '' } }) if not vip_port['fixed_ips']: self.core_plugin.delete_port(context, vip_port['id']) msg = ('No IPs available for VIP %s') % network_id raise n_exc.BadRequest(resource='nuage-redirect-target', msg=msg) vip_resp = self.nuageclient.create_virtual_ip( redirect_target['redirect_target_id'], redirect_target['virtual_ip_address']) self.core_plugin.update_port( context, vip_port['id'], {'port': { 'device_id': redirect_target['redirect_target_id'] }}) return self._make_redirect_target_vip_dict(vip_resp[3][0], context=context)
def remove_router_interface(self, context, router_id, interface_info): """Delete interface from a router.""" if not interface_info: msg = _("Either subnet_id or port_id must be specified") raise exc.BadRequest(resource='router', msg=msg) res_dict = self._encode_resource(resource_id=router_id, resource=interface_info) status_code, res_info = self._request_backend(context, res_dict, 'router', 'DELINTERFACE') if status_code != requests.codes.ok: self._raise_contrail_error(status_code, info=res_info, obj_name='remove_router_interface') return res_info
def _validate_vip_subnet(self, context, subnet_id, pool_id): # ensure that if the vip subnet is public, the router has its # gateway set. subnet = self._get_subnet(context, subnet_id) if db_util.is_subnet_external(context, subnet): router_id = db_util.get_router_from_pool(context, pool_id) # router_id should never be None because it was already validated # when we created the pool assert router_id is not None router = self._get_router(context, router_id) if router.get('gw_port_id') is None: msg = _("The router must have its gateway set if the " "VIP subnet is external") raise n_exc.BadRequest(resource='router', msg=msg)
def _get_router_for_floatingip(self, context, internal_port, internal_subnet_id, external_network_id): """We need to over-load this function so that we only return the user visible router and never its redundancy routers (as they never have floatingips associated with them). """ subnet_db = self._core_plugin._get_subnet(context, internal_subnet_id) if not subnet_db['gateway_ip']: msg = (_('Cannot add floating IP to port on subnet %s ' 'which has no gateway_ip') % internal_subnet_id) raise n_exc.BadRequest(resource='floatingip', msg=msg) router_intf_ports = self._get_interface_ports_for_network( context, internal_port['network_id']) # This joins on port_id so is not a cross-join routerport_qry = router_intf_ports.join(models_v2.IPAllocation) routerport_qry = routerport_qry.filter( models_v2.IPAllocation.subnet_id == internal_subnet_id ) # Ensure that redundancy routers (in a ha group) are not returned, # since only the user visible router should have floatingips. # This can be done by checking that the id of routers does not # appear in the 'redundancy_router_id' column in the # 'cisco_router_redundancy_bindings' table. routerport_qry = routerport_qry.outerjoin( RouterRedundancyBinding, RouterRedundancyBinding.redundancy_router_id == l3_db.RouterPort.router_id) routerport_qry = routerport_qry.filter( RouterRedundancyBinding.redundancy_router_id == expr.null()) for router_port in routerport_qry: router_id = router_port.router.id router_gw_qry = context.session.query(models_v2.Port) has_gw_port = router_gw_qry.filter_by( network_id=external_network_id, device_id=router_id, device_owner=DEVICE_OWNER_ROUTER_GW).count() if has_gw_port: return router_id raise l3.ExternalGatewayForFloatingIPNotFound( subnet_id=internal_subnet_id, external_network_id=external_network_id, port_id=internal_port['id'])
def remove_router_interface(self, context, router_id, interface_info): """Delete interface from a router.""" if not interface_info: msg = "Either subnet_id or port_id must be specified" raise exc.BadRequest(resource='router', msg=msg) port_id = interface_info.get('port_id') subnet_id = interface_info.get('subnet_id') self._set_user_auth_token() rtr_iface_handler = rtr_handler.LogicalRouterInterfaceHandler( self._vnc_lib) return rtr_iface_handler.remove_router_interface( self._get_context_dict(context), router_id, port_id=port_id, subnet_id=subnet_id)
def _update_router_gw_info(self, context, router_id, info, router=None): LOG.debug(_("update_router_gw_info called in l3_db.py context: %s"), context) LOG.debug(_("update_router_gw_info called in l3_db.py router_id: %s"), router_id) LOG.debug(_("update_router_gw_info called in l3_db.py info: %s"), info) LOG.debug(_("update_router_gw_info called in l3_db.py router: %s"), router) # TODO(salvatore-orlando): guarantee atomic behavior also across # operations that span beyond the model classes handled by this # class (e.g.: delete_port) router = router or self._get_router(context, router_id) gw_port = router.gw_port # network_id attribute is required by API, so it must be present network_id = info['network_id'] if info else None if network_id: network_db = self._core_plugin._get_network(context, network_id) if not network_db.external: msg = _("Network %s is not a valid external " "network") % network_id raise q_exc.BadRequest(resource='router', msg=msg) # figure out if we need to delete existing port if gw_port and gw_port['network_id'] != network_id: fip_count = self.get_floatingips_count(context.elevated(), {'router_id': [router_id]}) if fip_count: raise l3.RouterExternalGatewayInUseByFloatingIp( router_id=router_id, net_id=gw_port['network_id']) if gw_port and gw_port['network_id'] != network_id: with context.session.begin(subtransactions=True): router.gw_port = None context.session.add(router) self._core_plugin.delete_port(context.elevated(), gw_port['id'], l3_port_check=False) if network_id is not None and (gw_port is None or gw_port['network_id'] != network_id): subnets = self._core_plugin._get_subnets_by_network( context, network_id) for subnet in subnets: self._check_for_dup_router_subnet(context, router_id, network_id, subnet['id'], subnet['cidr']) self._create_router_gw_port(context, router, network_id)