示例#1
0
def _delete_segments_for_network(resource, event, trigger, context,
                                 network_id):
    admin_ctx = common_utils.get_elevated_context(context)
    global segments_plugin
    if not segments_plugin:
        segments_plugin = manager.NeutronManager.load_class_for_provider(
            'neutron.service_plugins', 'segments')()
    segments = segments_plugin.get_segments(
        admin_ctx, filters={'network_id': [network_id]})
    for segment in segments:
        segments_plugin.delete_segment(admin_ctx,
                                       segment['id'],
                                       for_net_delete=True)
示例#2
0
    def _update_network_policy(self, context, network, network_changes):
        old_policy = policy_object.QosPolicy.get_network_policy(
            common_utils.get_elevated_context(context), network['id'])
        if old_policy:
            self._check_policy_change_permission(context, old_policy)
            old_policy.detach_network(network['id'])

        qos_policy_id = network_changes.get(qos_consts.QOS_POLICY_ID)
        if qos_policy_id is not None:
            policy = policy_object.QosPolicy.get_policy_obj(
                context, qos_policy_id)
            policy.attach_network(network['id'])
        network[qos_consts.QOS_POLICY_ID] = qos_policy_id
示例#3
0
    def _notify_agents(self, context, method, payload, network_id):
        """Notify all the agents that are hosting the network."""
        payload['priority'] = METHOD_PRIORITY_MAP.get(method)
        # fanout is required as we do not know who is "listening"
        no_agents = not extensions.is_extension_supported(
            self.plugin, constants.DHCP_AGENT_SCHEDULER_EXT_ALIAS)
        fanout_required = method == 'network_delete_end' or no_agents

        # we do nothing on network creation because we want to give the
        # admin the chance to associate an agent to the network manually
        cast_required = method != 'network_create_end'

        if fanout_required:
            self._fanout_message(context, method, payload)
        elif cast_required:
            admin_ctx = (context if context.is_admin else
                         common_utils.get_elevated_context(context))
            network = self.plugin.get_network(admin_ctx, network_id)
            if 'subnet' in payload and payload['subnet'].get('segment_id'):
                # if segment_id exists then the segment service plugin
                # must be loaded
                segment_plugin = directory.get_plugin('segments')
                segment = segment_plugin.get_segment(
                    context, payload['subnet']['segment_id'])
                network['candidate_hosts'] = segment['hosts']

            agents = self.plugin.get_dhcp_agents_hosting_networks(
                context, [network_id], hosts=network.get('candidate_hosts'))
            # schedule the network first, if needed
            schedule_required = (
                method == 'subnet_create_end' or method == 'port_create_end'
                and not self._is_reserved_dhcp_port(payload['port']))
            if schedule_required:
                agents = self._schedule_network(admin_ctx, network, agents)
            if not agents:
                LOG.debug("Network %s is not hosted by any dhcp agent",
                          network_id)
                return
            enabled_agents = self._get_enabled_agents(context, network, agents,
                                                      method, payload)

            if method == 'port_create_end' and enabled_agents:
                high_agent = enabled_agents.pop(
                    random.randint(0,
                                   len(enabled_agents) - 1))
                self._notify_high_priority_agent(context,
                                                 copy.deepcopy(payload),
                                                 high_agent)
            for agent in enabled_agents:
                self._cast_message(context, method, payload, agent.host,
                                   agent.topic)
示例#4
0
 def _unbind_dvr_port_before_delete(self, context, router_id, port_host):
     filter_rtr = {
         'device_id': [router_id],
         'device_owner': [n_const.DEVICE_OWNER_DVR_INTERFACE]
     }
     int_ports = self._core_plugin.get_ports(
         n_utils.get_elevated_context(context), filters=filter_rtr)
     for port in int_ports:
         dvr_binding = (ml2_db.get_distributed_port_binding_by_host(
             context, port['id'], port_host))
         if dvr_binding:
             # unbind this port from router
             dvr_binding['router_id'] = None
             dvr_binding.update(dvr_binding)
示例#5
0
    def _get_gateway_port_host(self, context, router, gw_ports):
        if not router.get('ha'):
            return super(L3_HA_NAT_db_mixin,
                         self)._get_gateway_port_host(context, router,
                                                      gw_ports)

        gw_port_id = router['gw_port_id']
        gateway_port = gw_ports.get(gw_port_id)
        if not gw_port_id or not gateway_port:
            return
        gateway_port_status = gateway_port['status']
        gateway_port_binding_host = gateway_port[portbindings.HOST_ID]

        admin_ctx = n_utils.get_elevated_context(context)
        router_id = router['id']
        ha_bindings = self.get_l3_bindings_hosting_router_with_ha_states(
            admin_ctx, router_id)
        LOG.debug(
            "HA router %(router_id)s gateway port %(gw_port_id)s "
            "binding host: %(host)s, status: %(status)s", {
                "router_id": router_id,
                "gw_port_id": gateway_port['id'],
                "host": gateway_port_binding_host,
                "status": gateway_port_status
            })
        for ha_binding_agent, ha_binding_state in ha_bindings:
            if ha_binding_state != constants.HA_ROUTER_STATE_ACTIVE:
                continue
            # For create router gateway, the gateway port may not be ACTIVE
            # yet, so we return 'primary' host directly.
            if gateway_port_status != constants.PORT_STATUS_ACTIVE:
                return ha_binding_agent.host
            # Do not let the original 'primary' (current is backup) host,
            # override the gateway port binding host.
            if (gateway_port_status == constants.PORT_STATUS_ACTIVE
                    and ha_binding_agent.host == gateway_port_binding_host):
                return ha_binding_agent.host

        LOG.debug(
            "No gateway port host retrieved. HA router %(router_id)s "
            "gateway port %(gw_port_id)s "
            "binding host: %(host)s, status: %(status)s, "
            "router HA bindings: %(ha_bindings)s", {
                "router_id": router_id,
                "gw_port_id": gateway_port['id'],
                "host": gateway_port_binding_host,
                "status": gateway_port_status,
                "ha_bindings": ha_bindings
            })
示例#6
0
 def _get_other_dvr_router_ids_connected_router(self, context, router_id):
     # TODO(slaweq): move this method to RouterPort OVO object
     subnet_ids = self.get_subnet_ids_on_router(context, router_id)
     RouterPort = l3_models.RouterPort
     query = n_utils.get_elevated_context(context).session.query(
         RouterPort.router_id)
     query = query.join(models_v2.Port)
     query = query.join(
         models_v2.Subnet,
         models_v2.Subnet.network_id == models_v2.Port.network_id)
     query = query.filter(
         models_v2.Subnet.id.in_(subnet_ids), RouterPort.port_type ==
         n_const.DEVICE_OWNER_DVR_INTERFACE).distinct()
     query = query.filter(RouterPort.router_id != router_id)
     return [item[0] for item in query]
示例#7
0
 def _load_shared(self, db_obj=None):
     if db_obj:
         # NOTE(korzen) db_obj is passed when Subnet object is loaded
         # from DB
         rbac_entries = db_obj.get('rbac_entries') or {}
         shared = (rbac_db.RbacNeutronDbObjectMixin.is_network_shared(
             self.obj_context, rbac_entries))
     else:
         # NOTE(korzen) this case is used when Subnet object was
         # instantiated and without DB interaction (get_object(s), update,
         # create), it should be rare case to load 'shared' by that method
         shared = (rbac_db.RbacNeutronDbObjectMixin.get_shared_with_tenant(
             common_utils.get_elevated_context(self.obj_context),
             network.NetworkRBAC, self.network_id, self.project_id))
     setattr(self, 'shared', shared)
     self.obj_reset_changes(['shared'])
示例#8
0
    def get_hosts_to_notify(self, context, router_id):
        """Returns all hosts to send notification about router update"""
        hosts = super(L3_DVRsch_db_mixin,
                      self).get_hosts_to_notify(context, router_id)
        router = self.get_router(n_utils.get_elevated_context(context),
                                 router_id)
        if router.get('distributed', False):
            dvr_hosts = self._get_dvr_hosts_for_router(context, router_id)
            dvr_hosts = set(dvr_hosts) - set(hosts)
            dvr_hosts |= self._get_other_dvr_hosts(context, router_id)
            state = agentschedulers_db.get_admin_state_up_filter()
            agents = self.get_l3_agents(context,
                                        active=state,
                                        filters={'host': dvr_hosts})
            hosts += [a.host for a in agents]

        return hosts
示例#9
0
    def _validate_update_port_callback(self,
                                       resource,
                                       event,
                                       trigger,
                                       payload=None):
        context = payload.context
        original_policy_id = payload.states[0].get(qos_consts.QOS_POLICY_ID)
        policy_id = payload.desired_state.get(qos_consts.QOS_POLICY_ID)

        if policy_id is None or policy_id == original_policy_id:
            return

        updated_port = ports_object.Port.get_object(
            context, id=payload.desired_state['id'])
        policy = policy_object.QosPolicy.get_object(
            common_utils.get_elevated_context(context), id=policy_id)

        self.validate_policy_for_port(context, policy, updated_port)
示例#10
0
    def _load_shared(self, db_obj=None):
        # Do not override 'shared' attribute on create() or update()
        if 'shared' in self.obj_get_changes():
            return

        if db_obj:
            # NOTE(korzen) db_obj is passed when object is loaded from DB
            rbac_entries = db_obj.get('rbac_entries') or {}
            shared = self.is_network_shared(self.obj_context, rbac_entries)
        else:
            # NOTE(korzen) this case is used when object was
            # instantiated and without DB interaction (get_object(s), update,
            # create), it should be rare case to load 'shared' by that method
            shared = self.get_shared_with_tenant(
                utils.get_elevated_context(self.obj_context), self.rbac_db_cls,
                self.id, self.project_id)
        setattr(self, 'shared', shared)
        self.obj_reset_changes(['shared'])
示例#11
0
    def _check_port_has_port_forwarding(self, resource, event,
                                        trigger, payload=None):
        port_id = payload.request_body['floatingip'].get('port_id')
        if not port_id:
            return

        pf_objs = pf.PortForwarding.get_objects(
            payload.context, internal_port_id=port_id)
        if not pf_objs:
            return
        # Port may not bind to host yet, or port may migrate from one
        # dvr_no_external host to one dvr host. So we just do not allow
        # all dvr router's floating IP to be binded to a port which
        # already has port forwarding.
        router = self.l3_plugin.get_router(
            utils.get_elevated_context(payload.context),
            pf_objs[0].router_id)
        if l3_dvr_db.is_distributed_router(router):
            raise pf_exc.PortHasPortForwarding(port_id=port_id)
示例#12
0
    def update_subport_bindings(self, context, subports):
        """Update subport bindings to match trunk host binding."""
        el = common_utils.get_elevated_context(context)
        ports_by_trunk_id = collections.defaultdict(list)
        updated_ports = collections.defaultdict(list)

        for s in subports:
            ports_by_trunk_id[s['trunk_id']].append(s['port_id'])
        for trunk_id, subport_ids in ports_by_trunk_id.items():
            trunk = trunk_objects.Trunk.get_object(el, id=trunk_id)
            if not trunk:
                LOG.debug("Trunk not found. id: %s", trunk_id)
                continue

            trunk_updated_ports = self._process_trunk_subport_bindings(
                el, trunk, subport_ids)
            updated_ports[trunk.id].extend(trunk_updated_ports)

        return updated_ports
示例#13
0
    def _validate_routes(self, context,
                         router_id, routes):
        if len(routes) > cfg.CONF.max_routes:
            raise xroute_exc.RoutesExhausted(
                router_id=router_id,
                quota=cfg.CONF.max_routes)

        context = common_utils.get_elevated_context(context)
        filters = {'device_id': [router_id]}
        ports = self._core_plugin.get_ports(context, filters)
        cidrs = []
        ips = []
        for port in ports:
            for ip in port['fixed_ips']:
                cidrs.append(self._core_plugin.get_subnet(
                    context, ip['subnet_id'])['cidr'])
                ips.append(ip['ip_address'])
        for route in routes:
            self._validate_routes_nexthop(
                cidrs, ips, routes, route['nexthop'])
示例#14
0
    def update_shared(self, is_shared_new, obj_id):
        admin_context = utils.get_elevated_context(self.obj_context)
        shared_prev = obj_db_api.get_object(self.rbac_db_cls,
                                            admin_context,
                                            object_id=obj_id,
                                            target_tenant='*',
                                            action=models.ACCESS_SHARED)
        is_shared_prev = bool(shared_prev)
        if is_shared_prev == is_shared_new:
            return

        # 'shared' goes False -> True
        if not is_shared_prev and is_shared_new:
            self.attach_rbac(obj_id, self.obj_context.tenant_id)
            return

        # 'shared' goes True -> False is actually an attempt to delete
        # rbac rule for sharing obj_id with target_tenant = '*'
        self._validate_rbac_policy_delete(self.obj_context, obj_id, '*')
        return self.obj_context.session.delete(shared_prev)
示例#15
0
    def validate_rbac_policy_change(cls,
                                    resource,
                                    event,
                                    trigger,
                                    payload=None):
        """Callback to validate  changes.

        This is the dispatching function for create, update and delete
        callbacks. On creation and update, verify that the creator is an admin
        or owns the resource being shared.
        """
        object_type = payload.metadata.get('object_type')
        context = payload.context
        policy = (payload.request_body
                  if event == events.BEFORE_CREATE else payload.latest_state)

        # TODO(hdaniel): As this code was shamelessly stolen from
        # NeutronDbPluginV2.validate_network_rbac_policy_change(), those pieces
        # should be synced and contain the same bugs, until Network RBAC logic
        # (hopefully) melded with this one.
        if object_type != cls.rbac_db_cls.db_model.object_type:
            return
        db_obj = obj_db_api.get_object(cls,
                                       utils.get_elevated_context(context),
                                       id=policy['object_id'])
        if event in (events.BEFORE_CREATE, events.BEFORE_UPDATE):
            if (not context.is_admin
                    and db_obj['tenant_id'] != context.tenant_id):
                msg = _("Only admins can manipulate policies on objects "
                        "they do not own")
                raise exceptions.InvalidInput(error_message=msg)
        callback_map = {
            events.BEFORE_CREATE: cls.validate_rbac_policy_create,
            events.BEFORE_UPDATE: cls.validate_rbac_policy_update,
            events.BEFORE_DELETE: cls.validate_rbac_policy_delete
        }
        if event in callback_map:
            return callback_map[event](resource,
                                       event,
                                       trigger,
                                       payload=payload)
示例#16
0
 def delete_address_group(self, context, id):
     if sg_obj.SecurityGroupRule.get_objects(
             common_utils.get_elevated_context(context),
             remote_address_group_id=id):
         raise ag_exc.AddressGroupInUse(address_group_id=id)
     ag = self._get_address_group(context, id)
     ag.delete()
     kwargs = {
         'address_group_id': id,
         'name': ag['name'],
         'description': ag['description']
     }
     # TODO(mlavalle) this notification should be updated to publish when
     # the callback handler handle_event, class _ObjectChangeHandler in
     # neutron.plugins.ml2.ovo_rpc is updated to receive notifications with
     # new style payload objects as argument.
     registry.notify(resources.ADDRESS_GROUP,
                     events.AFTER_DELETE,
                     self,
                     context=context,
                     **kwargs)
示例#17
0
 def _get_enabled_agents(self, context, network_id, network, agents, method,
                         payload):
     """Get the list of agents who can provide services."""
     if not agents:
         return []
     enabled_agents = agents
     if not cfg.CONF.enable_services_on_agents_with_admin_state_down:
         enabled_agents = [x for x in agents if x.admin_state_up]
     active_agents = [x for x in agents if x.is_active]
     len_enabled_agents = len(enabled_agents)
     len_active_agents = len(active_agents)
     if len_active_agents < len_enabled_agents:
         LOG.warning(
             "Only %(active)d of %(total)d DHCP agents "
             "associated with network '%(net_id)s' "
             "are marked as active, so notifications "
             "may be sent to inactive agents.", {
                 'active': len_active_agents,
                 'total': len_enabled_agents,
                 'net_id': network_id
             })
     if not enabled_agents:
         num_ports = self.plugin.get_ports_count(
             context, {'network_id': [network_id]})
         if not network:
             admin_ctx = (context if context.is_admin else
                          common_utils.get_elevated_context(context))
             network = self.plugin.get_network(admin_ctx, network_id)
         notification_required = (num_ports > 0
                                  and len(network['subnets']) >= 1)
         if notification_required:
             LOG.error(
                 "Will not send event %(method)s for network "
                 "%(net_id)s: no agent available. Payload: "
                 "%(payload)s", {
                     'method': method,
                     'net_id': network_id,
                     'payload': payload
                 })
     return enabled_agents
示例#18
0
 def _agent_notification(self, context, method, router_ids, operation,
                         shuffle_agents):
     """Notify changed routers to hosting l3 agents."""
     adminContext = (context if context.is_admin else
                     common_utils.get_elevated_context(context))
     plugin = directory.get_plugin(plugin_constants.L3)
     for router_id in router_ids:
         hosts = plugin.get_hosts_to_notify(adminContext, router_id)
         if shuffle_agents:
             random.shuffle(hosts)
         for host in hosts:
             LOG.debug(
                 'Notify agent at %(topic)s.%(host)s the message '
                 '%(method)s', {
                     'topic': topics.L3_AGENT,
                     'host': host,
                     'method': method
                 })
             cctxt = self.client.prepare(topic=topics.L3_AGENT,
                                         server=host,
                                         version='1.1')
             cctxt.cast(context, method, routers=[router_id])
示例#19
0
    def validate_rbac_policy_delete(cls,
                                    resource,
                                    event,
                                    trigger,
                                    payload=None):
        """Callback to handle RBAC_POLICY, BEFORE_DELETE callback.

        :raises: RbacPolicyInUse -- in case the policy is in use.
        """
        context = payload.context
        policy = payload.latest_state

        if policy['action'] != models.ACCESS_SHARED:
            return
        target_tenant = policy['target_tenant']
        db_obj = obj_db_api.get_object(cls,
                                       utils.get_elevated_context(context),
                                       id=policy['object_id'])
        if db_obj.tenant_id == target_tenant:
            return
        cls._validate_rbac_policy_delete(context=context,
                                         obj_id=policy['object_id'],
                                         target_tenant=target_tenant)
示例#20
0
 def _delete_ha_network(self, context, net):
     admin_ctx = n_utils.get_elevated_context(context)
     self._core_plugin.delete_network(admin_ctx, net.network_id)
示例#21
0
    def _process_port_request(self, resource, event, trigger, context,
                              **kwargs):
        # Deleting floatingip will receive port resource with precommit_delete
        # event, so just return, then check the request in
        # _check_floatingip_request callback.
        if kwargs['port']['device_owner'].startswith(
                lib_consts.DEVICE_OWNER_FLOATINGIP):
            return

        # This block is used for checking if there are some fixed ips updates.
        # Whatever the event is AFTER_UPDATE/PRECOMMIT_DELETE,
        # we will use the update_ip_set for checking if the possible associated
        # port forwarding resources need to be deleted for port's AFTER_UPDATE
        # event. Or get all affected ip addresses for port's PRECOMMIT_DELETE
        # event.
        port_id = kwargs['port']['id']
        update_fixed_ips = kwargs['port']['fixed_ips']
        update_ip_set = set()
        for update_fixed_ip in update_fixed_ips:
            if (netaddr.IPNetwork(update_fixed_ip.get('ip_address')).version ==
                    lib_consts.IP_VERSION_4):
                update_ip_set.add(update_fixed_ip.get('ip_address'))
        if not update_ip_set:
            return

        # If the port owner wants to update or delete port, we must elevate the
        # context to check if the floatingip or port forwarding resources
        # are owned by other tenants.
        if not context.is_admin:
            context = utils.get_elevated_context(context)
        # If the logic arrives here, that means we have got update_ip_set and
        # its value is not None. So we need to get all port forwarding
        # resources based on the request port_id for preparing the next
        # process, such as deleting them.
        pf_resources = pf.PortForwarding.get_objects(
            context, internal_port_id=port_id)
        if not pf_resources:
            return

        # If the logic arrives here, that means we have got pf_resources and
        # its value is not None either. Then we collect all ip addresses
        # which are used by port forwarding resources to generate used_ip_set,
        # and we default to set remove_ip_set as used_ip_set which means we
        # want to delete all port forwarding resources when event is
        # PRECOMMIT_DELETE. And when event is AFTER_UPDATE, we get the
        # different part.
        used_ip_set = set()
        for pf_resource in pf_resources:
            used_ip_set.add(str(pf_resource.internal_ip_address))
        remove_ip_set = used_ip_set
        if event == events.AFTER_UPDATE:
            remove_ip_set = used_ip_set - update_ip_set
            if not remove_ip_set:
                return

        # Here, we get the remove_ip_set, the following block will delete the
        # port forwarding resources based on remove_ip_set. Just need to note
        # here, if event is AFTER_UPDATE, and remove_ip_set is empty, the
        # following block won't be processed.
        remove_port_forwarding_list = []
        with db_api.CONTEXT_WRITER.using(context):
            for pf_resource in pf_resources:
                if str(pf_resource.internal_ip_address) in remove_ip_set:
                    pf_objs = pf.PortForwarding.get_objects(
                        context, floatingip_id=pf_resource.floatingip_id)
                    if len(pf_objs) == 1 and pf_objs[0].id == pf_resource.id:
                        fip_obj = l3_obj.FloatingIP.get_object(
                            context, id=pf_resource.floatingip_id)
                        fip_obj.update_fields({'router_id': None})
                        fip_obj.update()
                    pf_resource.delete()
                    remove_port_forwarding_list.append(pf_resource)

        if self._rpc_notifications_required:
            self.push_api.push(context, remove_port_forwarding_list,
                               rpc_events.DELETED)
        registry_notify_payload = [
            callbacks.PortForwardingPayload(context, original_pf=pf_obj) for
            pf_obj in remove_port_forwarding_list]
        registry.notify(pf_consts.PORT_FORWARDING, events.AFTER_DELETE, self,
                        payload=registry_notify_payload)
示例#22
0
    def get_dvr_routers_to_remove(self,
                                  context,
                                  deleted_port,
                                  get_related_hosts_info=True):
        """Returns info about which routers should be removed

        In case dvr serviceable port was deleted we need to check
        if any dvr routers should be removed from l3 agent on port's host
        """
        if not n_utils.is_dvr_serviced(deleted_port['device_owner']):
            return []

        admin_context = n_utils.get_elevated_context(context)
        port_host = deleted_port[portbindings.HOST_ID]
        subnet_ids = [ip['subnet_id'] for ip in deleted_port['fixed_ips']]
        router_ids = self.get_dvr_routers_by_subnet_ids(
            admin_context, subnet_ids)
        if not router_ids:
            LOG.debug(
                'No DVR routers for this DVR port %(port)s '
                'on host %(host)s', {
                    'port': deleted_port['id'],
                    'host': port_host
                })
            return []
        agent = self._get_agent_by_type_and_host(context,
                                                 n_const.AGENT_TYPE_L3,
                                                 port_host)
        removed_router_info = []
        # NOTE(Swami): If host has any serviceable ports,
        # we should not remove the router namespace of the
        # port as well as the connected routers namespace.
        # After all serviceable ports in the host for the
        # connected routers are deleted, then we can remove
        # the router namespace.
        host_has_serviceable_port = False
        for router_id in router_ids:
            if rb_obj.RouterL3AgentBinding.objects_exist(context,
                                                         router_id=router_id,
                                                         l3_agent_id=agent.id):
                # not removing from the agent hosting SNAT for the router
                continue
            if self._check_for_rtr_serviceable_ports(admin_context, router_id,
                                                     port_host):
                # once we found a serviceable port there is no need to
                # check further
                host_has_serviceable_port = True
                break
            self._unbind_dvr_port_before_delete(context, router_id, port_host)
            info = {
                'router_id': router_id,
                'host': port_host,
                'agent_id': str(agent.id)
            }
            removed_router_info.append(info)
        # Now collect the connected router info as well to remove
        # it from the agent, only if there is not a serviceable port.
        if not host_has_serviceable_port:
            related_router_ids = set()
            for router_id in router_ids:
                connected_dvr_router_ids = set(
                    self._get_other_dvr_router_ids_connected_router(
                        context, router_id))
                related_router_ids |= connected_dvr_router_ids
            related_router_ids = [
                r_id for r_id in related_router_ids
                if r_id not in list(router_ids)
            ]
            for router_id in related_router_ids:
                if self._check_for_rtr_serviceable_ports(
                        admin_context, router_id, port_host):
                    # once we found a serviceable port there is no need to
                    # check further
                    host_has_serviceable_port = True
                    break
                self._unbind_dvr_port_before_delete(context, router_id,
                                                    port_host)
                info = {
                    'router_id': router_id,
                    'host': port_host,
                    'agent_id': str(agent.id)
                }
                removed_router_info.append(info)
        LOG.debug("Router info to be deleted: %s", removed_router_info)
        return removed_router_info
示例#23
0
 def is_shared_with_tenant(cls, context, obj_id, tenant_id):
     ctx = utils.get_elevated_context(context)
     with cls.db_context_reader(ctx):
         return cls.get_shared_with_tenant(ctx, cls.rbac_db_cls, obj_id,
                                           tenant_id)
示例#24
0
    def create_ha_port_and_bind(self,
                                plugin,
                                context,
                                router_id,
                                tenant_id,
                                agent,
                                is_manual_scheduling=False):
        """Creates and binds a new HA port for this agent."""
        ctxt = utils.get_elevated_context(context)
        router_db = plugin._get_router(ctxt, router_id)
        creator = functools.partial(self._add_port_from_net_and_ensure_vr_id,
                                    plugin, ctxt, router_db, tenant_id)
        dep_getter = functools.partial(plugin.get_ha_network, ctxt, tenant_id)
        dep_creator = functools.partial(plugin._create_ha_network, ctxt,
                                        tenant_id)
        dep_deleter = functools.partial(plugin._delete_ha_network, ctxt)
        dep_id_attr = 'network_id'

        # This might fail in case of concurrent calls, which is good for us
        # as we can skip the rest of this function.
        binding = self.bind_router(plugin,
                                   context,
                                   router_id,
                                   agent['id'],
                                   is_manual_scheduling=is_manual_scheduling,
                                   is_ha=True)
        if not binding:
            return

        try:
            port_binding = utils.create_object_with_dependency(
                creator, dep_getter, dep_creator, dep_id_attr, dep_deleter)[0]
            with lib_db_api.CONTEXT_WRITER.using(context):
                port_binding = (
                    l3_hamode_obj.L3HARouterAgentPortBinding.get_object(
                        context, port_id=port_binding['port_id']))
                port_binding.l3_agent_id = agent['id']
                port_binding.update()
        except db_exc.DBDuplicateEntry:
            LOG.debug(
                "Router %(router)s already scheduled for agent "
                "%(agent)s", {
                    'router': router_id,
                    'agent': agent['id']
                })
            port_id = port_binding.port_id
            # Below call will also delete entry from L3HARouterAgentPortBinding
            # and RouterPort tables
            plugin._core_plugin.delete_port(context,
                                            port_id,
                                            l3_port_check=False)
        except l3_exc.RouterNotFound:
            LOG.debug(
                'Router %s has already been removed '
                'by concurrent operation', router_id)
            # we try to clear the HA network here in case the port we created
            # blocked the concurrent router delete operation from getting rid
            # of the HA network
            ha_net = plugin.get_ha_network(ctxt, tenant_id)
            if ha_net:
                plugin.safe_delete_ha_network(ctxt, ha_net, tenant_id)
示例#25
0
 def test_get_policy_obj_not_found(self):
     context = common_utils.get_elevated_context(self.context)
     self.assertRaises(qos_exc.QosPolicyNotFound,
                       policy.QosPolicy.get_policy_obj,
                       context, "fake_id")