Ejemplo n.º 1
0
class Netmtu_db_mixin(object):
    """Mixin class to add network MTU support to db_base_plugin_v2."""
    def _extend_network_dict_mtu(self, network_res, network_db):
        # don't use network_db argument since MTU is not persisted in database
        network_res[netmtu.MTU] = utils.get_deployment_physnet_mtu()
        return network_res

    resource_extend.register_funcs(attributes.NETWORKS,
                                   ['_extend_network_dict_mtu'])
Ejemplo n.º 2
0
class NetworkAvailabilityZoneMixin(net_az.NetworkAvailabilityZonePluginBase):
    """Mixin class to enable network's availability zone attributes."""
    def _extend_availability_zone(self, net_res, net_db):
        net_res[az_ext.AZ_HINTS] = az_ext.convert_az_string_to_list(
            net_db[az_ext.AZ_HINTS])
        net_res[az_ext.AVAILABILITY_ZONES] = (
            self.get_network_availability_zones(net_db))

    resource_extend.register_funcs(attributes.NETWORKS,
                                   ['_extend_availability_zone'])
class SubnetServiceTypeMixin(object):
    """Mixin class to extend subnet with service type attribute"""
    def _extend_subnet_service_types(self, subnet_res, subnet_db):
        subnet_res['service_types'] = [
            service_type['service_type']
            for service_type in subnet_db.service_types
        ]

    resource_extend.register_funcs(attributes.SUBNETS,
                                   [_extend_subnet_service_types])
Ejemplo n.º 4
0
class PortSecurityDbMixin(portsecurity_db_common.PortSecurityDbCommon):
    resource_extend.register_funcs(attrs.NETWORKS,
                                   ['_extend_port_security_dict'])
    resource_extend.register_funcs(attrs.PORTS, ['_extend_port_security_dict'])

    def _extend_port_security_dict(self, response_data, db_data):
        if ('port-security' in getattr(self, 'supported_extension_aliases',
                                       [])):
            super(PortSecurityDbMixin,
                  self)._extend_port_security_dict(response_data, db_data)

    def _determine_port_security_and_has_ip(self, context, port):
        """Returns a tuple of booleans (port_security_enabled, has_ip).

        Port_security is the value associated with the port if one is present
        otherwise the value associated with the network is returned. has_ip is
        if the port is associated with an ip or not.
        """
        has_ip = self._ip_on_port(port)
        # we don't apply security groups for dhcp, router
        if port.get('device_owner') and utils.is_port_trusted(port):
            return (False, has_ip)

        if validators.is_attr_set(port.get(psec.PORTSECURITY)):
            port_security_enabled = port[psec.PORTSECURITY]

        # If port has an ip and security_groups are passed in
        # conveniently set port_security_enabled to true this way
        # user doesn't also have to pass in port_security_enabled=True
        # when creating ports.
        elif has_ip and validators.is_attr_set(port.get('security_groups')):
            port_security_enabled = True
        else:
            port_security_enabled = self._get_network_security_binding(
                context, port['network_id'])

        return (port_security_enabled, has_ip)

    def _ip_on_port(self, port):
        return bool(port.get('fixed_ips'))
Ejemplo n.º 5
0
 def __init__(self):
     resource_extend.register_funcs(
         attributes.NETWORKS, [_extend_network_dict_binding])
     resource_extend.register_funcs(
         attributes.SUBNETS, [_extend_subnet_dict_binding])
     resource_extend.register_funcs(
         attributes.PORTS, [_extend_port_dict_binding])
     self.nova_updater = NovaSegmentNotifier()
Ejemplo n.º 6
0
class L3_NAT_dbonly_mixin(l3_db.L3_NAT_dbonly_mixin):
    """Mixin class to add configurable gateway modes."""

    resource_extend.register_funcs(l3.ROUTERS, ['_extend_router_dict_gw_mode'])

    def _extend_router_dict_gw_mode(self, router_res, router_db):
        if router_db.gw_port_id:
            nw_id = router_db.gw_port['network_id']
            router_res[EXTERNAL_GW_INFO] = {
                'network_id':
                nw_id,
                'enable_snat':
                router_db.enable_snat,
                'external_fixed_ips': [{
                    'subnet_id': ip["subnet_id"],
                    'ip_address': ip["ip_address"]
                } for ip in router_db.gw_port['fixed_ips']]
            }

    def _update_router_gw_info(self, context, router_id, info, router=None):
        # Load the router only if necessary
        if not router:
            router = self._get_router(context, router_id)
        with context.session.begin(subtransactions=True):
            router.enable_snat = self._get_enable_snat(info)

        # Calls superclass, pass router db object for avoiding re-loading
        super(L3_NAT_dbonly_mixin, self)._update_router_gw_info(context,
                                                                router_id,
                                                                info,
                                                                router=router)
        # Returning the router might come back useful if this
        # method is overridden in child classes
        return router

    @staticmethod
    def _get_enable_snat(info):
        if info and 'enable_snat' in info:
            return info['enable_snat']
        # if enable_snat is not specified then use the default value
        return cfg.CONF.enable_snat_by_default

    def _build_routers_list(self, context, routers, gw_ports):
        for rtr in routers:
            gw_port_id = rtr['gw_port_id']
            # Collect gw ports only if available
            if gw_port_id and gw_ports.get(gw_port_id):
                rtr['gw_port'] = gw_ports[gw_port_id]
                # Add enable_snat key
                rtr['enable_snat'] = rtr[EXTERNAL_GW_INFO]['enable_snat']
        return routers
Ejemplo n.º 7
0
    def __init__(self):
        resource_extend.register_funcs(attributes.NETWORKS,
                                       [_extend_network_dict_binding])
        resource_extend.register_funcs(attributes.SUBNETS,
                                       [_extend_subnet_dict_binding])
        resource_extend.register_funcs(attributes.PORTS,
                                       [_extend_port_dict_binding])
        self.nova_updater = NovaSegmentNotifier()

        registry.subscribe(self._prevent_segment_delete_with_subnet_associated,
                           resources.SEGMENT, events.BEFORE_DELETE)
Ejemplo n.º 8
0
class DataPlaneStatusExtensionTestPlugin(db_base_plugin_v2.NeutronDbPluginV2,
                                         dps_db.DataPlaneStatusMixin):

    supported_extension_aliases = ["data-plane-status"]

    def update_port(self, context, id, port):
        with context.session.begin(subtransactions=True):
            ret_port = super(DataPlaneStatusExtensionTestPlugin,
                             self).update_port(context, id, port)
            if dps_lib.DATA_PLANE_STATUS in port['port']:
                self._process_update_port_data_plane_status(context,
                                                            port['port'],
                                                            ret_port)
        return ret_port

    resource_extend.register_funcs(attrs.PORTS,
                                   ['_extend_port_data_plane_status'])
Ejemplo n.º 9
0
 def __init__(self):
     super(RevisionPlugin, self).__init__()
     for resource in standard_attr.get_standard_attr_resource_model_map():
         resource_extend.register_funcs(
             resource, [self.extend_resource_dict_revision])
     db_api.sqla_listen(se.Session, 'before_flush', self.bump_revisions)
Ejemplo n.º 10
0
    def get_plugin_description(self):
        """returns string description of the plugin."""
        return ("L3 Router Service Plugin for basic L3 forwarding"
                " between (L2) Neutron networks and access to external"
                " networks via a NAT gateway.")

    def router_supports_scheduling(self, context, router_id):
        return self.l3_driver_controller.uses_scheduler(context, router_id)

    def create_floatingip(self, context, floatingip):
        """Create floating IP.

        :param context: Neutron request context
        :param floatingip: data for the floating IP being created
        :returns: A floating IP object on success

        As the l3 router plugin asynchronously creates floating IPs
        leveraging the l3 agent, the initial status for the floating
        IP object will be DOWN.
        """
        return super(L3RouterPlugin, self).create_floatingip(
            context, floatingip, initial_status=n_const.FLOATINGIP_STATUS_DOWN)


def add_flavor_id(plugin, router_res, router_db):
    router_res['flavor_id'] = router_db['flavor_id']


resource_extend.register_funcs(l3.ROUTERS, [add_flavor_id])
Ejemplo n.º 11
0
class ExtraDhcpOptMixin(object):
    """Mixin class to add extra options to the DHCP opts file
    and associate them to a port.
    """
    def _is_valid_opt_value(self, opt_name, opt_value):
        # If the dhcp opt is blank-able, it shouldn't be saved to the DB in
        # case that the value is None
        if opt_name in edo_ext.VALID_BLANK_EXTRA_DHCP_OPTS:
            return opt_value is not None

        # Otherwise, it shouldn't be saved to the DB in case that the value
        # is None or empty
        return bool(opt_value)

    def _process_port_create_extra_dhcp_opts(self, context, port,
                                             extra_dhcp_opts):
        if not extra_dhcp_opts:
            return port
        with context.session.begin(subtransactions=True):
            for dopt in extra_dhcp_opts:
                if self._is_valid_opt_value(dopt['opt_name'],
                                            dopt['opt_value']):
                    ip_version = dopt.get('ip_version', 4)
                    extra_dhcp_obj = obj_extra_dhcp.ExtraDhcpOpt(
                        context,
                        port_id=port['id'],
                        opt_name=dopt['opt_name'],
                        opt_value=dopt['opt_value'],
                        ip_version=ip_version)
                    extra_dhcp_obj.create()
        return self._extend_port_extra_dhcp_opts_dict(context, port)

    def _extend_port_extra_dhcp_opts_dict(self, context, port):
        port[edo_ext.EXTRADHCPOPTS] = self._get_port_extra_dhcp_opts_binding(
            context, port['id'])

    def _get_port_extra_dhcp_opts_binding(self, context, port_id):
        opts = obj_extra_dhcp.ExtraDhcpOpt.get_objects(context,
                                                       port_id=port_id)
        # TODO(mhickey): When port serilization is available then
        # the object list should be returned instead
        return [{
            'opt_name': r.opt_name,
            'opt_value': r.opt_value,
            'ip_version': r.ip_version
        } for r in opts]

    def _update_extra_dhcp_opts_on_port(self,
                                        context,
                                        id,
                                        port,
                                        updated_port=None):
        # It is not necessary to update in a transaction, because
        # its called from within one from ovs_neutron_plugin.
        dopts = port['port'].get(edo_ext.EXTRADHCPOPTS)

        if dopts:
            opts = obj_extra_dhcp.ExtraDhcpOpt.get_objects(context, port_id=id)
            # if there are currently no dhcp_options associated to
            # this port, Then just insert the new ones and be done.
            with context.session.begin(subtransactions=True):
                for upd_rec in dopts:
                    for opt in opts:
                        if (opt['opt_name'] == upd_rec['opt_name']
                                and opt['ip_version'] == upd_rec.get(
                                    'ip_version', 4)):
                            # to handle deleting of a opt from the port.
                            if upd_rec['opt_value'] is None:
                                opt.delete()
                            else:
                                if (self._is_valid_opt_value(
                                        opt['opt_name'], upd_rec['opt_value'])
                                        and opt['opt_value'] !=
                                        upd_rec['opt_value']):
                                    opt['opt_value'] = upd_rec['opt_value']
                                    opt.update()
                            break
                    else:
                        if self._is_valid_opt_value(upd_rec['opt_name'],
                                                    upd_rec['opt_value']):
                            ip_version = upd_rec.get('ip_version', 4)
                            extra_dhcp_obj = obj_extra_dhcp.ExtraDhcpOpt(
                                context,
                                port_id=id,
                                opt_name=upd_rec['opt_name'],
                                opt_value=upd_rec['opt_value'],
                                ip_version=ip_version)
                            extra_dhcp_obj.create()

            if updated_port:
                edolist = self._get_port_extra_dhcp_opts_binding(context, id)
                updated_port[edo_ext.EXTRADHCPOPTS] = edolist

        return bool(dopts)

    def _extend_port_dict_extra_dhcp_opt(self, res, port):
        res[edo_ext.EXTRADHCPOPTS] = [{
            'opt_name': dho.opt_name,
            'opt_value': dho.opt_value,
            'ip_version': dho.ip_version
        } for dho in port.dhcp_opts]
        return res

    resource_extend.register_funcs(attributes.PORTS,
                                   ['_extend_port_dict_extra_dhcp_opt'])
Ejemplo n.º 12
0
class AutoAllocatedTopologyMixin(common_db_mixin.CommonDbMixin):

    resource_extend.register_funcs(attributes.NETWORKS,
                                   [_extend_external_network_default])

    def __new__(cls, *args, **kwargs):
        # NOTE(kevinbenton): we subscribe on object construction because
        # the tests blow away the callback manager for each run
        new = super(AutoAllocatedTopologyMixin,
                    cls).__new__(cls, *args, **kwargs)
        registry.subscribe(_ensure_external_network_default_value_callback,
                           resources.EXTERNAL_NETWORK, events.PRECOMMIT_CREATE)
        registry.subscribe(_ensure_external_network_default_value_callback,
                           resources.EXTERNAL_NETWORK, events.BEFORE_UPDATE)
        return new

    # TODO(armax): if a tenant modifies auto allocated resources under
    # the hood the behavior of the get_auto_allocated_topology API is
    # undetermined. Consider adding callbacks to deal with the following
    # situations:
    # - insert subnet -> plug router interface
    # - delete router -> remove the entire topology
    # - update subnet -> prevent operation
    # - update router gateway -> prevent operation
    # - ...

    @property
    def core_plugin(self):
        if not getattr(self, '_core_plugin', None):
            self._core_plugin = directory.get_plugin()
        return self._core_plugin

    @property
    def l3_plugin(self):
        if not getattr(self, '_l3_plugin', None):
            self._l3_plugin = directory.get_plugin(constants.L3)
        return self._l3_plugin

    def get_auto_allocated_topology(self, context, tenant_id, fields=None):
        """Return tenant's network associated to auto-allocated topology.

        The topology will be provisioned upon return, if network is missing.
        """
        fields = fields or []
        tenant_id = self._validate(context, tenant_id)
        if CHECK_REQUIREMENTS in fields:
            # for dry-run requests, simply validates that subsequent
            # requests can be fulfilled based on a set of requirements
            # such as existence of default networks, pools, etc.
            return self._check_requirements(context, tenant_id)
        elif fields:
            raise n_exc.BadRequest(resource='auto_allocate',
                                   msg=_("Unrecognized field"))

        # Check for an existent topology
        network_id = self._get_auto_allocated_network(context, tenant_id)
        if network_id:
            return self._response(network_id, tenant_id, fields=fields)
        # See if we indeed have an external network to connect to, otherwise
        # we will fail fast
        default_external_network = self._get_default_external_network(context)

        # If we reach this point, then we got some work to do!
        network_id = self._build_topology(context, tenant_id,
                                          default_external_network)
        return self._response(network_id, tenant_id, fields=fields)

    def delete_auto_allocated_topology(self, context, tenant_id):
        tenant_id = self._validate(context, tenant_id)
        topology = self._get_auto_allocated_topology(context, tenant_id)
        if topology:
            subnets = self.core_plugin.get_subnets(
                context, filters={'network_id': [topology['network_id']]})
            self._cleanup(context,
                          network_id=topology['network_id'],
                          router_id=topology['router_id'],
                          subnets=subnets)

    def _build_topology(self, context, tenant_id, default_external_network):
        """Build the network topology and returns its network UUID."""
        try:
            subnets = self._provision_tenant_private_network(
                context, tenant_id)
            network_id = subnets[0]['network_id']
            router = self._provision_external_connectivity(
                context, default_external_network, subnets, tenant_id)
            network_id = self._save(context, tenant_id, network_id,
                                    router['id'], subnets)
            return network_id
        except exceptions.UnknownProvisioningError as e:
            # Clean partially provisioned topologies, and reraise the
            # error. If it can be retried, so be it.
            LOG.error(
                _LE("Unknown error while provisioning topology for "
                    "tenant %(tenant_id)s. Reason: %(reason)s"), {
                        'tenant_id': tenant_id,
                        'reason': e
                    })
            self._cleanup(context,
                          network_id=e.network_id,
                          router_id=e.router_id,
                          subnets=e.subnets)
            raise e.error

    def _check_requirements(self, context, tenant_id):
        """Raise if requirements are not met."""
        self._get_default_external_network(context)
        try:
            self._get_supported_subnetpools(context)
        except n_exc.NotFound:
            raise exceptions.AutoAllocationFailure(
                reason=_("No default subnetpools defined"))
        return {'id': 'dry-run=pass', 'tenant_id': tenant_id}

    def _validate(self, context, tenant_id):
        """Validate and return the tenant to be associated to the topology."""
        if tenant_id == 'None':
            # NOTE(HenryG): the client might be sending us astray by
            # passing no tenant; this is really meant to be the tenant
            # issuing the request, therefore let's get it from the context
            tenant_id = context.tenant_id

        if not context.is_admin and tenant_id != context.tenant_id:
            raise n_exc.NotAuthorized()

        return tenant_id

    def _get_auto_allocated_topology(self, context, tenant_id):
        """Return the auto allocated topology record if present or None."""
        return auto_allocate_obj.AutoAllocatedTopology.get_object(
            context, project_id=tenant_id)

    def _get_auto_allocated_network(self, context, tenant_id):
        """Get the auto allocated network for the tenant."""
        network = self._get_auto_allocated_topology(context, tenant_id)
        if network:
            return network['network_id']

    @staticmethod
    def _response(network_id, tenant_id, fields=None):
        """Build response for auto-allocated network."""
        res = {'id': network_id, 'tenant_id': tenant_id}
        return db_utils.resource_fields(res, fields)

    def _get_default_external_network(self, context):
        """Get the default external network for the deployment."""

        default_external_networks = net_obj.ExternalNetwork.get_objects(
            context, is_default=True)

        if not default_external_networks:
            LOG.error(
                _LE("Unable to find default external network "
                    "for deployment, please create/assign one to "
                    "allow auto-allocation to work correctly."))
            raise exceptions.AutoAllocationFailure(
                reason=_("No default router:external network"))
        if len(default_external_networks) > 1:
            LOG.error(
                _LE("Multiple external default networks detected. "
                    "Network %s is true 'default'."),
                default_external_networks[0]['network_id'])
        return default_external_networks[0].network_id

    def _get_supported_subnetpools(self, context):
        """Return the default subnet pools available."""
        default_subnet_pools = [
            self.core_plugin.get_default_subnetpool(context, ver)
            for ver in (4, 6)
        ]
        available_pools = [s for s in default_subnet_pools if s]
        if not available_pools:
            LOG.error(_LE("No default pools available"))
            raise n_exc.NotFound()

        return available_pools

    def _provision_tenant_private_network(self, context, tenant_id):
        """Create a tenant private network/subnets."""
        network = None
        try:
            network_args = {
                'name': 'auto_allocated_network',
                'admin_state_up': False,
                'tenant_id': tenant_id,
                'shared': False
            }
            network = p_utils.create_network(self.core_plugin, context,
                                             {'network': network_args})
            subnets = []
            for pool in self._get_supported_subnetpools(context):
                subnet_args = {
                    'name': 'auto_allocated_subnet_v%s' % pool['ip_version'],
                    'network_id': network['id'],
                    'tenant_id': tenant_id,
                    'ip_version': pool['ip_version'],
                    'subnetpool_id': pool['id'],
                }
                subnets.append(
                    p_utils.create_subnet(self.core_plugin, context,
                                          {'subnet': subnet_args}))
            return subnets
        except (c_exc.SubnetAllocationError, ValueError, n_exc.BadRequest,
                n_exc.NotFound) as e:
            LOG.error(
                _LE("Unable to auto allocate topology for tenant "
                    "%(tenant_id)s due to missing or unmet "
                    "requirements. Reason: %(reason)s"), {
                        'tenant_id': tenant_id,
                        'reason': e
                    })
            if network:
                self._cleanup(context, network['id'])
            raise exceptions.AutoAllocationFailure(
                reason=_("Unable to provide tenant private network"))
        except Exception as e:
            network_id = network['id'] if network else None
            raise exceptions.UnknownProvisioningError(e, network_id=network_id)

    def _provision_external_connectivity(self, context,
                                         default_external_network, subnets,
                                         tenant_id):
        """Uplink tenant subnet(s) to external network."""
        router_args = {
            'name': 'auto_allocated_router',
            l3.EXTERNAL_GW_INFO: {
                'network_id': default_external_network
            },
            'tenant_id': tenant_id,
            'admin_state_up': True
        }
        router = None
        attached_subnets = []
        try:
            router = self.l3_plugin.create_router(context,
                                                  {'router': router_args})
            for subnet in subnets:
                self.l3_plugin.add_router_interface(
                    context, router['id'], {'subnet_id': subnet['id']})
                attached_subnets.append(subnet)
            return router
        except n_exc.BadRequest as e:
            LOG.error(
                _LE("Unable to auto allocate topology for tenant "
                    "%(tenant_id)s because of router errors. "
                    "Reason: %(reason)s"), {
                        'tenant_id': tenant_id,
                        'reason': e
                    })
            router_id = router['id'] if router else None
            self._cleanup(context,
                          network_id=subnets[0]['network_id'],
                          router_id=router_id,
                          subnets=attached_subnets)
            raise exceptions.AutoAllocationFailure(
                reason=_("Unable to provide external connectivity"))
        except Exception as e:
            router_id = router['id'] if router else None
            raise exceptions.UnknownProvisioningError(
                e,
                network_id=subnets[0]['network_id'],
                router_id=router_id,
                subnets=subnets)

    def _save(self, context, tenant_id, network_id, router_id, subnets):
        """Save auto-allocated topology, or revert in case of DB errors."""
        try:
            auto_allocate_obj.AutoAllocatedTopology(
                context,
                project_id=tenant_id,
                network_id=network_id,
                router_id=router_id).create()
            self.core_plugin.update_network(
                context, network_id, {'network': {
                    'admin_state_up': True
                }})
        except obj_exc.NeutronDbObjectDuplicateEntry:
            LOG.debug(
                "Multiple auto-allocated networks detected for "
                "tenant %s. Attempting clean up for network %s "
                "and router %s.", tenant_id, network_id, router_id)
            self._cleanup(context,
                          network_id=network_id,
                          router_id=router_id,
                          subnets=subnets)
            network_id = self._get_auto_allocated_network(context, tenant_id)
        except Exception as e:
            raise exceptions.UnknownProvisioningError(e,
                                                      network_id=network_id,
                                                      router_id=router_id,
                                                      subnets=subnets)
        return network_id

    def _cleanup(self, context, network_id=None, router_id=None, subnets=None):
        """Clean up auto allocated resources."""
        # Concurrent attempts to delete the topology may interleave and
        # cause some operations to fail with NotFound exceptions. Rather
        # than fail partially, the exceptions should be ignored and the
        # cleanup should proceed uninterrupted.
        if router_id:
            for subnet in subnets or []:
                ignore_notfound(self.l3_plugin.remove_router_interface,
                                context, router_id,
                                {'subnet_id': subnet['id']})
            ignore_notfound(self.l3_plugin.delete_router, context, router_id)

        if network_id:
            ignore_notfound(self.core_plugin.delete_network, context,
                            network_id)
Ejemplo n.º 13
0
 def register_dict_extend_funcs(resource, funcs):
     _resource_extend.register_funcs(resource, funcs)
 def __new__(cls, *args, **kwargs):
     for resource in standard_attr.get_standard_attr_resource_model_map():
         resource_extend.register_funcs(
             resource, ['_extend_standard_attr_description'])
     return super(StandardAttrDescriptionMixin, cls).__new__(cls, *args,
                                                             **kwargs)
Ejemplo n.º 15
0
 def register_dict_extend_funcs(resource, funcs):
     _resource_extend.register_funcs(resource, funcs)
Ejemplo n.º 16
0
class AddressScopeDbMixin(ext_address_scope.AddressScopePluginBase):
    """Mixin class to add address scope to db_base_plugin_v2."""

    __native_bulk_support = True

    @staticmethod
    def _make_address_scope_dict(address_scope, fields=None):
        res = {
            'id': address_scope['id'],
            'name': address_scope['name'],
            'tenant_id': address_scope['tenant_id'],
            'shared': address_scope['shared'],
            'ip_version': address_scope['ip_version']
        }
        return db_utils.resource_fields(res, fields)

    def _get_address_scope(self, context, id):
        obj = obj_addr_scope.AddressScope.get_object(context, id=id)
        if obj is None:
            raise ext_address_scope.AddressScopeNotFound(address_scope_id=id)
        return obj

    def is_address_scope_owned_by_tenant(self, context, id):
        """Check if address scope id is owned by the tenant or not.

        AddressScopeNotFound is raised if the
          - address scope id doesn't exist or
          - if the (unshared) address scope id is not owned by this tenant.

        @return Returns true if the user is admin or tenant is owner
                Returns false if the address scope id is shared and not
                owned by the tenant.
        """
        address_scope = self._get_address_scope(context, id)
        return context.is_admin or (address_scope.tenant_id
                                    == context.tenant_id)

    def get_ip_version_for_address_scope(self, context, id):
        address_scope = self._get_address_scope(context, id)
        return address_scope.ip_version

    def create_address_scope(self, context, address_scope):
        """Create an address scope."""
        a_s = address_scope['address_scope']
        address_scope_id = a_s.get('id') or uuidutils.generate_uuid()
        pool_args = {
            'project_id': a_s['tenant_id'],
            'id': address_scope_id,
            'name': a_s['name'],
            'shared': a_s['shared'],
            'ip_version': a_s['ip_version']
        }
        address_scope = obj_addr_scope.AddressScope(context, **pool_args)
        address_scope.create()
        return self._make_address_scope_dict(address_scope)

    def update_address_scope(self, context, id, address_scope):
        a_s = address_scope['address_scope']
        address_scope = self._get_address_scope(context, id)
        if address_scope.shared and not a_s.get('shared', True):
            reason = _("Shared address scope can't be unshared")
            raise ext_address_scope.AddressScopeUpdateError(
                address_scope_id=id, reason=reason)

        address_scope.update_fields(a_s)
        address_scope.update()
        return self._make_address_scope_dict(address_scope)

    def get_address_scope(self, context, id, fields=None):
        address_scope = self._get_address_scope(context, id)
        return self._make_address_scope_dict(address_scope, fields)

    def get_address_scopes(self,
                           context,
                           filters=None,
                           fields=None,
                           sorts=None,
                           limit=None,
                           marker=None,
                           page_reverse=False):
        pager = base_obj.Pager(sorts, limit, page_reverse, marker)
        address_scopes = obj_addr_scope.AddressScope.get_objects(context,
                                                                 _pager=pager,
                                                                 **filters)

        return [
            self._make_address_scope_dict(addr_scope, fields)
            for addr_scope in address_scopes
        ]

    def get_address_scopes_count(self, context, filters=None):
        return obj_addr_scope.AddressScope.count(context, **filters)

    def delete_address_scope(self, context, id):
        with db_api.context_manager.writer.using(context):
            if subnetpool_obj.SubnetPool.get_objects(context,
                                                     address_scope_id=id):
                raise ext_address_scope.AddressScopeInUse(address_scope_id=id)
            address_scope = self._get_address_scope(context, id)
            address_scope.delete()

    def _extend_network_dict_address_scope(self, network_res, network_db):
        network_res[ext_address_scope.IPV4_ADDRESS_SCOPE] = None
        network_res[ext_address_scope.IPV6_ADDRESS_SCOPE] = None
        subnetpools = {
            subnet.subnetpool
            for subnet in network_db.subnets if subnet.subnetpool
        }
        for subnetpool in subnetpools:
            # A network will be constrained to only one subnetpool per address
            # family. Retrieve the address scope of subnetpools as the address
            # scopes of network.
            as_id = subnetpool[ext_address_scope.ADDRESS_SCOPE_ID]
            if subnetpool['ip_version'] == constants.IP_VERSION_4:
                network_res[ext_address_scope.IPV4_ADDRESS_SCOPE] = as_id
            if subnetpool['ip_version'] == constants.IP_VERSION_6:
                network_res[ext_address_scope.IPV6_ADDRESS_SCOPE] = as_id
        return network_res

    resource_extend.register_funcs(attr.NETWORKS,
                                   ['_extend_network_dict_address_scope'])
Ejemplo n.º 17
0
            else:
                host = bind_port.host if bind_port else None
        self._extend_port_dict_binding_host(port, host)

    def get_port_host(self, context, port_id):
        with context.session.begin(subtransactions=True):
            bind_port = (
                context.session.query(pmodels.PortBindingPort).
                filter_by(port_id=port_id).
                first()
            )
            return bind_port.host if bind_port else None

    def _extend_port_dict_binding_host(self, port_res, host):
        super(PortBindingMixin, self).extend_port_dict_binding(
            port_res, None)
        port_res[portbindings.HOST_ID] = host

    def extend_port_dict_binding(self, port_res, port_db):
        host = port_db.portbinding.host if port_db.portbinding else None
        self._extend_port_dict_binding_host(port_res, host)


def _extend_port_dict_binding(plugin, port_res, port_db):
    if not isinstance(plugin, PortBindingMixin):
        return
    plugin.extend_port_dict_binding(port_res, port_db)


resource_extend.register_funcs(attributes.PORTS, [_extend_port_dict_binding])
Ejemplo n.º 18
0
class External_net_db_mixin(object):
    """Mixin class to add external network methods to db_base_plugin_v2."""
    @staticmethod
    def _network_filter_hook(context, original_model, conditions):
        if conditions is not None and not hasattr(conditions, '__iter__'):
            conditions = (conditions, )
        # Apply the external network filter only in non-admin and non-advsvc
        # context
        if db_utils.model_query_scope_is_project(context, original_model):
            # the table will already be joined to the rbac entries for the
            # shared check so we don't need to worry about ensuring that
            rbac_model = original_model.rbac_entries.property.mapper.class_
            tenant_allowed = ((rbac_model.action == 'access_as_external') &
                              (rbac_model.target_tenant == context.tenant_id) |
                              (rbac_model.target_tenant == '*'))
            conditions = expr.or_(tenant_allowed, *conditions)
        return conditions

    def _network_result_filter_hook(self, query, filters):
        vals = filters and filters.get(external_net.EXTERNAL, [])
        if not vals:
            return query
        if vals[0]:
            return query.filter(models_v2.Network.external.has())
        return query.filter(~models_v2.Network.external.has())

    model_query.register_hook(models_v2.Network, "external_net", None,
                              '_network_filter_hook',
                              '_network_result_filter_hook')

    def _network_is_external(self, context, net_id):
        return net_obj.ExternalNetwork.objects_exist(context,
                                                     network_id=net_id)

    def _extend_network_dict_l3(self, network_res, network_db):
        # Comparing with None for converting uuid into bool
        network_res[external_net.EXTERNAL] = network_db.external is not None
        return network_res

    resource_extend.register_funcs(attributes.NETWORKS,
                                   ['_extend_network_dict_l3'])

    def _process_l3_create(self, context, net_data, req_data):
        external = req_data.get(external_net.EXTERNAL)
        external_set = validators.is_attr_set(external)

        if not external_set:
            return

        if external:
            net_obj.ExternalNetwork(context,
                                    network_id=net_data['id']).create()
            context.session.add(
                rbac_db.NetworkRBAC(object_id=net_data['id'],
                                    action='access_as_external',
                                    target_tenant='*',
                                    tenant_id=net_data['tenant_id']))
            try:
                registry.notify(resources.EXTERNAL_NETWORK,
                                events.PRECOMMIT_CREATE,
                                self,
                                context=context,
                                request=req_data,
                                network=net_data)
            except c_exc.CallbackFailure as e:
                # raise the underlying exception
                raise e.errors[0].error
        net_data[external_net.EXTERNAL] = external

    def _process_l3_update(self, context, net_data, req_data, allow_all=True):
        try:
            registry.notify(resources.EXTERNAL_NETWORK,
                            events.BEFORE_UPDATE,
                            self,
                            context=context,
                            request=req_data,
                            network=net_data)
        except c_exc.CallbackFailure as e:
            # raise the underlying exception
            raise e.errors[0].error

        new_value = req_data.get(external_net.EXTERNAL)
        net_id = net_data['id']
        if not validators.is_attr_set(new_value):
            return

        if net_data.get(external_net.EXTERNAL) == new_value:
            return

        if new_value:
            net_obj.ExternalNetwork(context, network_id=net_id).create()
            net_data[external_net.EXTERNAL] = True
            if allow_all:
                context.session.add(
                    rbac_db.NetworkRBAC(object_id=net_id,
                                        action='access_as_external',
                                        target_tenant='*',
                                        tenant_id=net_data['tenant_id']))
        else:
            # must make sure we do not have any external gateway ports
            # (and thus, possible floating IPs) on this network before
            # allow it to be update to external=False
            port = context.session.query(
                models_v2.Port).filter_by(device_owner=DEVICE_OWNER_ROUTER_GW,
                                          network_id=net_data['id']).first()
            if port:
                raise external_net.ExternalNetworkInUse(net_id=net_id)

            net_obj.ExternalNetwork.delete_objects(context, network_id=net_id)
            for rbdb in (context.session.query(rbac_db.NetworkRBAC).filter_by(
                    object_id=net_id, action='access_as_external')):
                context.session.delete(rbdb)
            net_data[external_net.EXTERNAL] = False

    def _process_l3_delete(self, context, network_id):
        l3plugin = directory.get_plugin(constants.L3)
        if l3plugin:
            l3plugin.delete_disassociated_floatingips(context, network_id)

    def get_external_network_id(self, context):
        nets = self.get_networks(context, {external_net.EXTERNAL: [True]})
        if len(nets) > 1:
            raise n_exc.TooManyExternalNetworks()
        else:
            return nets[0]['id'] if nets else None

    @registry.receives('rbac-policy', [events.BEFORE_CREATE])
    def _process_ext_policy_create(self, resource, event, trigger, context,
                                   object_type, policy, **kwargs):
        if (object_type != 'network'
                or policy['action'] != 'access_as_external'):
            return
        net = self.get_network(context, policy['object_id'])
        if not context.is_admin and net['tenant_id'] != context.tenant_id:
            msg = _("Only admins can manipulate policies on networks they "
                    "do not own")
            raise n_exc.InvalidInput(error_message=msg)
        if not self._network_is_external(context, policy['object_id']):
            # we automatically convert the network into an external network
            self._process_l3_update(context,
                                    net, {external_net.EXTERNAL: True},
                                    allow_all=False)

    @registry.receives('rbac-policy',
                       (events.BEFORE_UPDATE, events.BEFORE_DELETE))
    def _validate_ext_not_in_use_by_tenant(self, resource, event, trigger,
                                           context, object_type, policy,
                                           **kwargs):
        if (object_type != 'network'
                or policy['action'] != 'access_as_external'):
            return
        new_tenant = None
        if event == events.BEFORE_UPDATE:
            new_tenant = kwargs['policy_update']['target_tenant']
            if new_tenant == policy['target_tenant']:
                # nothing to validate if the tenant didn't change
                return
        ports = context.session.query(models_v2.Port.id).filter_by(
            device_owner=DEVICE_OWNER_ROUTER_GW,
            network_id=policy['object_id'])
        router = context.session.query(l3_models.Router).filter(
            l3_models.Router.gw_port_id.in_(ports))
        rbac = rbac_db.NetworkRBAC
        if policy['target_tenant'] != '*':
            router = router.filter(
                l3_models.Router.tenant_id == policy['target_tenant'])
            # if there is a wildcard entry we can safely proceed without the
            # router lookup because they will have access either way
            if context.session.query(rbac_db.NetworkRBAC).filter(
                    rbac.object_id == policy['object_id'],
                    rbac.action == 'access_as_external',
                    rbac.target_tenant == '*').count():
                return
        else:
            # deleting the wildcard is okay as long as the tenants with
            # attached routers have their own entries and the network is
            # not the default external network.
            if net_obj.ExternalNetwork.objects_exist(
                    context, network_id=policy['object_id'], is_default=True):
                msg = _("Default external networks must be shared to "
                        "everyone.")
                raise rbac_ext.RbacPolicyInUse(object_id=policy['object_id'],
                                               details=msg)
            tenants_with_entries = (context.session.query(
                rbac.target_tenant).filter(
                    rbac.object_id == policy['object_id'],
                    rbac.action == 'access_as_external',
                    rbac.target_tenant != '*'))
            router = router.filter(
                ~l3_models.Router.tenant_id.in_(tenants_with_entries))
            if new_tenant:
                # if this is an update we also need to ignore any router
                # interfaces that belong to the new target.
                router = router.filter(
                    l3_models.Router.tenant_id != new_tenant)
        if router.count():
            msg = _("There are routers attached to this network that "
                    "depend on this policy for access.")
            raise rbac_ext.RbacPolicyInUse(object_id=policy['object_id'],
                                           details=msg)

    @registry.receives(resources.NETWORK, [events.BEFORE_DELETE])
    def _before_network_delete_handler(self, resource, event, trigger, context,
                                       network_id, **kwargs):
        self._process_l3_delete(context, network_id)
Ejemplo n.º 19
0
class DNSDbMixin(object):
    """Mixin class to add DNS methods to db_base_plugin_v2."""

    _dns_driver = None

    @property
    def dns_driver(self):
        if self._dns_driver:
            return self._dns_driver
        if not cfg.CONF.external_dns_driver:
            return
        try:
            self._dns_driver = driver.ExternalDNSService.get_instance()
            LOG.debug("External DNS driver loaded: %s",
                      cfg.CONF.external_dns_driver)
            return self._dns_driver
        except ImportError:
            LOG.exception(_LE("ImportError exception occurred while loading "
                              "the external DNS service driver"))
            raise dns.ExternalDNSDriverNotFound(
                driver=cfg.CONF.external_dns_driver)

    def _extend_floatingip_dict_dns(self, floatingip_res, floatingip_db):
        floatingip_res['dns_domain'] = ''
        floatingip_res['dns_name'] = ''
        if floatingip_db.dns:
            floatingip_res['dns_domain'] = floatingip_db.dns['dns_domain']
            floatingip_res['dns_name'] = floatingip_db.dns['dns_name']
        return floatingip_res

    resource_extend.register_funcs(
        l3.FLOATINGIPS, ['_extend_floatingip_dict_dns'])

    def _process_dns_floatingip_create_precommit(self, context,
                                                 floatingip_data, req_data):
        # expects to be called within a plugin's session
        dns_domain = req_data.get(dns.DNSDOMAIN)
        if not validators.is_attr_set(dns_domain):
            return
        if not self.dns_driver:
            return

        dns_name = req_data[dns.DNSNAME]
        self._validate_floatingip_dns(dns_name, dns_domain)

        current_dns_name, current_dns_domain = (
            self._get_requested_state_for_external_dns_service_create(
                context, floatingip_data, req_data))
        dns_actions_data = None
        if current_dns_name and current_dns_domain:
            fip_obj.FloatingIPDNS(context,
                floatingip_id=floatingip_data['id'],
                dns_name=req_data[dns.DNSNAME],
                dns_domain=req_data[dns.DNSDOMAIN],
                published_dns_name=current_dns_name,
                published_dns_domain=current_dns_domain).create()
            dns_actions_data = DNSActionsData(
                current_dns_name=current_dns_name,
                current_dns_domain=current_dns_domain)
        floatingip_data['dns_name'] = dns_name
        floatingip_data['dns_domain'] = dns_domain
        return dns_actions_data

    def _process_dns_floatingip_create_postcommit(self, context,
                                                  floatingip_data,
                                                  dns_actions_data):
        if not dns_actions_data:
            return
        self._add_ips_to_external_dns_service(
            context, dns_actions_data.current_dns_domain,
            dns_actions_data.current_dns_name,
            [floatingip_data['floating_ip_address']])

    def _process_dns_floatingip_update_precommit(self, context,
                                                 floatingip_data):
        # expects to be called within a plugin's session
        if not utils.is_extension_supported(self._core_plugin,
                                            dns.Dns.get_alias()):
            return
        if not self.dns_driver:
            return
        dns_data_db = fip_obj.FloatingIPDNS.get_object(
            context, floatingip_id=floatingip_data['id'])
        if dns_data_db and dns_data_db['dns_name']:
            # dns_name and dns_domain assigned for floating ip. It doesn't
            # matter whether they are defined for internal port
            return
        current_dns_name, current_dns_domain = (
            self._get_requested_state_for_external_dns_service_update(
                context, floatingip_data))
        if dns_data_db:
            if (dns_data_db['published_dns_name'] != current_dns_name or
                dns_data_db['published_dns_domain'] != current_dns_domain):
                dns_actions_data = DNSActionsData(
                    previous_dns_name=dns_data_db['published_dns_name'],
                    previous_dns_domain=dns_data_db['published_dns_domain'])
                if current_dns_name and current_dns_domain:
                    dns_data_db['published_dns_name'] = current_dns_name
                    dns_data_db['published_dns_domain'] = current_dns_domain
                    dns_actions_data.current_dns_name = current_dns_name
                    dns_actions_data.current_dns_domain = current_dns_domain
                else:
                    dns_data_db.delete()
                return dns_actions_data
            else:
                return
        if current_dns_name and current_dns_domain:
            fip_obj.FloatingIPDNS(context,
                floatingip_id=floatingip_data['id'],
                dns_name='',
                dns_domain='',
                published_dns_name=current_dns_name,
                published_dns_domain=current_dns_domain).create()
            return DNSActionsData(current_dns_name=current_dns_name,
                                  current_dns_domain=current_dns_domain)

    def _process_dns_floatingip_update_postcommit(self, context,
                                                  floatingip_data,
                                                  dns_actions_data):
        if not dns_actions_data:
            return
        if dns_actions_data.previous_dns_name:
            self._delete_floatingip_from_external_dns_service(
                context, dns_actions_data.previous_dns_domain,
                dns_actions_data.previous_dns_name,
                [floatingip_data['floating_ip_address']])
        if dns_actions_data.current_dns_name:
            self._add_ips_to_external_dns_service(
                context, dns_actions_data.current_dns_domain,
                dns_actions_data.current_dns_name,
                [floatingip_data['floating_ip_address']])

    def _process_dns_floatingip_delete(self, context, floatingip_data):
        if not utils.is_extension_supported(self._core_plugin,
                                            dns.Dns.get_alias()):
            return
        dns_data_db = fip_obj.FloatingIPDNS.get_object(context,
                floatingip_id=floatingip_data['id'])
        if dns_data_db:
            self._delete_floatingip_from_external_dns_service(
                context, dns_data_db['published_dns_domain'],
                dns_data_db['published_dns_name'],
                [floatingip_data['floating_ip_address']])

    def _validate_floatingip_dns(self, dns_name, dns_domain):
        if dns_domain and not dns_name:
            msg = _("dns_domain cannot be specified without a dns_name")
            raise n_exc.BadRequest(resource='floatingip', msg=msg)
        if dns_name and not dns_domain:
            msg = _("dns_name cannot be specified without a dns_domain")
            raise n_exc.BadRequest(resource='floatingip', msg=msg)

    def _get_internal_port_dns_data(self, context, floatingip_data):
        port_dns = port_obj.PortDNS.get_object(
            context, port_id=floatingip_data['port_id'])
        if not (port_dns and port_dns['dns_name']):
            return None, None
        net_dns = network.NetworkDNSDomain.get_net_dns_from_port(
            context=context, port_id=floatingip_data['port_id'])
        if not net_dns:
            return port_dns['dns_name'], None
        return port_dns['dns_name'], net_dns['dns_domain']

    def _delete_floatingip_from_external_dns_service(self, context, dns_domain,
                                                     dns_name, records):
        try:
            self.dns_driver.delete_record_set(context, dns_domain, dns_name,
                                              records)
        except (dns.DNSDomainNotFound, dns.DuplicateRecordSet) as e:
            LOG.exception(_LE("Error deleting Floating IP data from external "
                              "DNS service. Name: '%(name)s'. Domain: "
                              "'%(domain)s'. IP addresses '%(ips)s'. DNS "
                              "service driver message '%(message)s'"),
                          {"name": dns_name,
                           "domain": dns_domain,
                           "message": e.msg,
                           "ips": ', '.join(records)})

    def _get_requested_state_for_external_dns_service_create(self, context,
                                                             floatingip_data,
                                                             req_data):
        fip_dns_name = req_data[dns.DNSNAME]
        if fip_dns_name:
            return fip_dns_name, req_data[dns.DNSDOMAIN]
        if floatingip_data['port_id']:
            return self._get_internal_port_dns_data(context, floatingip_data)
        return None, None

    def _get_requested_state_for_external_dns_service_update(self, context,
                                                             floatingip_data):
        if floatingip_data['port_id']:
            return self._get_internal_port_dns_data(context, floatingip_data)
        return None, None

    def _add_ips_to_external_dns_service(self, context, dns_domain, dns_name,
                                         records):
        try:
            self.dns_driver.create_record_set(context, dns_domain, dns_name,
                                              records)
        except (dns.DNSDomainNotFound, dns.DuplicateRecordSet) as e:
            LOG.exception(_LE("Error publishing floating IP data in external "
                              "DNS service. Name: '%(name)s'. Domain: "
                              "'%(domain)s'. DNS service driver message "
                              "'%(message)s'"),
                          {"name": dns_name,
                           "domain": dns_domain,
                           "message": e.msg})
Ejemplo n.º 20
0
class SecurityGroupDbMixin(ext_sg.SecurityGroupPluginBase):
    """Mixin class to add security group to db_base_plugin_v2."""

    __native_bulk_support = True

    def create_security_group_bulk(self, context, security_groups):
        return self._create_bulk('security_group', context,
                                 security_groups)

    def _registry_notify(self, res, event, id=None, exc_cls=None, **kwargs):
        # NOTE(armax): a callback exception here will prevent the request
        # from being processed. This is a hook point for backend's validation;
        # we raise to propagate the reason for the failure.
        try:
            registry.notify(res, event, self, **kwargs)
        except exceptions.CallbackFailure as e:
            if exc_cls:
                reason = (_('cannot perform %(event)s due to %(reason)s') %
                          {'event': event, 'reason': e})
                raise exc_cls(reason=reason, id=id)

    @db_api.retry_if_session_inactive()
    def create_security_group(self, context, security_group, default_sg=False):
        """Create security group.

        If default_sg is true that means we are a default security group for
        a given tenant if it does not exist.
        """
        s = security_group['security_group']
        kwargs = {
            'context': context,
            'security_group': s,
            'is_default': default_sg,
        }

        self._registry_notify(resources.SECURITY_GROUP, events.BEFORE_CREATE,
                              exc_cls=ext_sg.SecurityGroupConflict, **kwargs)

        tenant_id = s['tenant_id']

        if not default_sg:
            self._ensure_default_security_group(context, tenant_id)
        else:
            existing_def_sg_id = self._get_default_sg_id(context, tenant_id)
            if existing_def_sg_id is not None:
                # default already exists, return it
                return self.get_security_group(context, existing_def_sg_id)

        with db_api.context_manager.writer.using(context):
            security_group_db = sg_models.SecurityGroup(id=s.get('id') or (
                                              uuidutils.generate_uuid()),
                                              description=s['description'],
                                              tenant_id=tenant_id,
                                              name=s['name'])
            context.session.add(security_group_db)
            if default_sg:
                context.session.add(sg_models.DefaultSecurityGroup(
                    security_group=security_group_db,
                    tenant_id=security_group_db['tenant_id']))
            for ethertype in ext_sg.sg_supported_ethertypes:
                if default_sg:
                    # Allow intercommunication
                    ingress_rule = sg_models.SecurityGroupRule(
                        id=uuidutils.generate_uuid(), tenant_id=tenant_id,
                        security_group=security_group_db,
                        direction='ingress',
                        ethertype=ethertype,
                        source_group=security_group_db)
                    context.session.add(ingress_rule)

                egress_rule = sg_models.SecurityGroupRule(
                    id=uuidutils.generate_uuid(), tenant_id=tenant_id,
                    security_group=security_group_db,
                    direction='egress',
                    ethertype=ethertype)
                context.session.add(egress_rule)

            self._registry_notify(resources.SECURITY_GROUP,
                                  events.PRECOMMIT_CREATE,
                                  exc_cls=ext_sg.SecurityGroupConflict,
                                  **kwargs)

        secgroup_dict = self._make_security_group_dict(security_group_db)

        kwargs['security_group'] = secgroup_dict
        registry.notify(resources.SECURITY_GROUP, events.AFTER_CREATE, self,
                        **kwargs)
        return secgroup_dict

    @db_api.retry_if_session_inactive()
    def get_security_groups(self, context, filters=None, fields=None,
                            sorts=None, limit=None,
                            marker=None, page_reverse=False, default_sg=False):

        # If default_sg is True do not call _ensure_default_security_group()
        # so this can be done recursively. Context.tenant_id is checked
        # because all the unit tests do not explicitly set the context on
        # GETS. TODO(arosen)  context handling can probably be improved here.
        if not default_sg and context.tenant_id:
            tenant_id = filters.get('tenant_id')
            if tenant_id:
                tenant_id = tenant_id[0]
            else:
                tenant_id = context.tenant_id
            self._ensure_default_security_group(context, tenant_id)
        marker_obj = self._get_marker_obj(context, 'security_group', limit,
                                          marker)
        return self._get_collection(context,
                                    sg_models.SecurityGroup,
                                    self._make_security_group_dict,
                                    filters=filters, fields=fields,
                                    sorts=sorts,
                                    limit=limit, marker_obj=marker_obj,
                                    page_reverse=page_reverse)

    @db_api.retry_if_session_inactive()
    def get_security_groups_count(self, context, filters=None):
        return self._get_collection_count(context, sg_models.SecurityGroup,
                                          filters=filters)

    @db_api.retry_if_session_inactive()
    def get_security_group(self, context, id, fields=None, tenant_id=None):
        """Tenant id is given to handle the case when creating a security
        group rule on behalf of another use.
        """

        if tenant_id:
            tmp_context_tenant_id = context.tenant_id
            context.tenant_id = tenant_id

        try:
            with db_api.context_manager.reader.using(context):
                ret = self._make_security_group_dict(self._get_security_group(
                                                     context, id), fields)
                ret['security_group_rules'] = self.get_security_group_rules(
                    context, {'security_group_id': [id]})
        finally:
            if tenant_id:
                context.tenant_id = tmp_context_tenant_id
        return ret

    def _get_security_group(self, context, id):
        try:
            query = self._model_query(context, sg_models.SecurityGroup)
            sg = query.filter(sg_models.SecurityGroup.id == id).one()

        except exc.NoResultFound:
            raise ext_sg.SecurityGroupNotFound(id=id)
        return sg

    @db_api.retry_if_session_inactive()
    def delete_security_group(self, context, id):
        filters = {'security_group_id': [id]}
        with db_api.context_manager.reader.using(context):
            ports = self._get_port_security_group_bindings(context, filters)
            if ports:
                raise ext_sg.SecurityGroupInUse(id=id)
            # confirm security group exists
            sg = self._get_security_group(context, id)

            if sg['name'] == 'default' and not context.is_admin:
                raise ext_sg.SecurityGroupCannotRemoveDefault()
        kwargs = {
            'context': context,
            'security_group_id': id,
            'security_group': sg,
        }
        self._registry_notify(resources.SECURITY_GROUP,
                              events.BEFORE_DELETE,
                              exc_cls=ext_sg.SecurityGroupInUse, id=id,
                              **kwargs)

        with db_api.context_manager.writer.using(context):
            # pass security_group_rule_ids to ensure
            # consistency with deleted rules
            # get security_group_bindings and security_group one more time
            # so that they will be attached for session where sg will be
            # deleted
            ports = self._get_port_security_group_bindings(context, filters)
            sg = self._get_security_group(context, id)
            kwargs['security_group_rule_ids'] = [r['id'] for r in sg.rules]
            self._registry_notify(resources.SECURITY_GROUP,
                                  events.PRECOMMIT_DELETE,
                                  exc_cls=ext_sg.SecurityGroupInUse, id=id,
                                  **kwargs)
            context.session.delete(sg)

        kwargs.pop('security_group')
        registry.notify(resources.SECURITY_GROUP, events.AFTER_DELETE,
                        self, **kwargs)

    @db_api.retry_if_session_inactive()
    def update_security_group(self, context, id, security_group):
        s = security_group['security_group']

        kwargs = {
            'context': context,
            'security_group_id': id,
            'security_group': s,
        }
        self._registry_notify(resources.SECURITY_GROUP, events.BEFORE_UPDATE,
                              exc_cls=ext_sg.SecurityGroupConflict, **kwargs)

        with db_api.context_manager.writer.using(context):
            sg = self._get_security_group(context, id)
            if sg['name'] == 'default' and 'name' in s:
                raise ext_sg.SecurityGroupCannotUpdateDefault()
            self._registry_notify(
                    resources.SECURITY_GROUP,
                    events.PRECOMMIT_UPDATE,
                    exc_cls=ext_sg.SecurityGroupConflict, **kwargs)
            sg.update(s)
        sg_dict = self._make_security_group_dict(sg)

        kwargs['security_group'] = sg_dict
        registry.notify(resources.SECURITY_GROUP, events.AFTER_UPDATE, self,
                        **kwargs)
        return sg_dict

    def _make_security_group_dict(self, security_group, fields=None):
        res = {'id': security_group['id'],
               'name': security_group['name'],
               'tenant_id': security_group['tenant_id'],
               'description': security_group['description']}
        res['security_group_rules'] = [self._make_security_group_rule_dict(r)
                                       for r in security_group.rules]
        self._apply_dict_extend_functions(ext_sg.SECURITYGROUPS, res,
                                          security_group)
        return db_utils.resource_fields(res, fields)

    @staticmethod
    def _make_security_group_binding_dict(security_group, fields=None):
        res = {'port_id': security_group['port_id'],
               'security_group_id': security_group['security_group_id']}
        return db_utils.resource_fields(res, fields)

    @db_api.retry_if_session_inactive()
    def _create_port_security_group_binding(self, context, port_id,
                                            security_group_id):
        with db_api.context_manager.writer.using(context):
            db = sg_models.SecurityGroupPortBinding(port_id=port_id,
                                          security_group_id=security_group_id)
            context.session.add(db)

    def _get_port_security_group_bindings(self, context,
                                          filters=None, fields=None):
        return self._get_collection(context,
                                    sg_models.SecurityGroupPortBinding,
                                    self._make_security_group_binding_dict,
                                    filters=filters, fields=fields)

    @db_api.retry_if_session_inactive()
    def _delete_port_security_group_bindings(self, context, port_id):
        with db_api.context_manager.writer.using(context):
            query = self._model_query(context,
                                      sg_models.SecurityGroupPortBinding)
            bindings = query.filter(
                sg_models.SecurityGroupPortBinding.port_id == port_id)
            for binding in bindings:
                context.session.delete(binding)

    @db_api.retry_if_session_inactive()
    def create_security_group_rule_bulk(self, context, security_group_rules):
        return self._create_bulk('security_group_rule', context,
                                 security_group_rules)

    @db_api.retry_if_session_inactive()
    def create_security_group_rule_bulk_native(self, context,
                                               security_group_rules):
        rules = security_group_rules['security_group_rules']
        scoped_session(context.session)
        security_group_id = self._validate_security_group_rules(
            context, security_group_rules)
        with db_api.context_manager.writer.using(context):
            if not self.get_security_group(context, security_group_id):
                raise ext_sg.SecurityGroupNotFound(id=security_group_id)

            self._check_for_duplicate_rules(context, rules)
            ret = []
            for rule_dict in rules:
                res_rule_dict = self._create_security_group_rule(
                    context, rule_dict, validate=False)
                ret.append(res_rule_dict)
        for rdict in ret:
            registry.notify(
                resources.SECURITY_GROUP_RULE, events.AFTER_CREATE, self,
                context=context, security_group_rule=rdict)
        return ret

    @db_api.retry_if_session_inactive()
    def create_security_group_rule(self, context, security_group_rule):
        res = self._create_security_group_rule(context, security_group_rule)
        registry.notify(
            resources.SECURITY_GROUP_RULE, events.AFTER_CREATE, self,
            context=context, security_group_rule=res)
        return res

    def _create_security_group_rule(self, context, security_group_rule,
                                    validate=True):
        if validate:
            self._validate_security_group_rule(context, security_group_rule)
        rule_dict = security_group_rule['security_group_rule']
        kwargs = {
            'context': context,
            'security_group_rule': rule_dict
        }
        self._registry_notify(resources.SECURITY_GROUP_RULE,
                              events.BEFORE_CREATE,
                              exc_cls=ext_sg.SecurityGroupConflict, **kwargs)

        with db_api.context_manager.writer.using(context):
            if validate:
                self._check_for_duplicate_rules_in_db(context,
                                                      security_group_rule)
            db = sg_models.SecurityGroupRule(
                id=(rule_dict.get('id') or uuidutils.generate_uuid()),
                tenant_id=rule_dict['tenant_id'],
                security_group_id=rule_dict['security_group_id'],
                direction=rule_dict['direction'],
                remote_group_id=rule_dict.get('remote_group_id'),
                ethertype=rule_dict['ethertype'],
                protocol=rule_dict['protocol'],
                port_range_min=rule_dict['port_range_min'],
                port_range_max=rule_dict['port_range_max'],
                remote_ip_prefix=rule_dict.get('remote_ip_prefix'),
                description=rule_dict.get('description')
            )
            context.session.add(db)
            self._registry_notify(resources.SECURITY_GROUP_RULE,
                              events.PRECOMMIT_CREATE,
                              exc_cls=ext_sg.SecurityGroupConflict, **kwargs)
        return self._make_security_group_rule_dict(db)

    def _get_ip_proto_number(self, protocol):
        if protocol is None:
            return
        # According to bug 1381379, protocol is always set to string to avoid
        # problems with comparing int and string in PostgreSQL. Here this
        # string is converted to int to give an opportunity to use it as
        # before.
        if protocol in n_const.IP_PROTOCOL_NAME_ALIASES:
            protocol = n_const.IP_PROTOCOL_NAME_ALIASES[protocol]
        return int(constants.IP_PROTOCOL_MAP.get(protocol, protocol))

    def _get_ip_proto_name_and_num(self, protocol):
        if protocol is None:
            return
        protocol = str(protocol)
        if protocol in constants.IP_PROTOCOL_MAP:
            return [protocol, str(constants.IP_PROTOCOL_MAP.get(protocol))]
        elif protocol in n_const.IP_PROTOCOL_NUM_TO_NAME_MAP:
            return [n_const.IP_PROTOCOL_NUM_TO_NAME_MAP.get(protocol),
                    protocol]
        return [protocol, protocol]

    def _validate_port_range(self, rule):
        """Check that port_range is valid."""
        if (rule['port_range_min'] is None and
            rule['port_range_max'] is None):
            return
        if not rule['protocol']:
            raise ext_sg.SecurityGroupProtocolRequiredWithPorts()
        ip_proto = self._get_ip_proto_number(rule['protocol'])
        if ip_proto in [constants.PROTO_NUM_TCP, constants.PROTO_NUM_UDP]:
            if rule['port_range_min'] == 0 or rule['port_range_max'] == 0:
                raise ext_sg.SecurityGroupInvalidPortValue(port=0)
            elif (rule['port_range_min'] is not None and
                rule['port_range_max'] is not None and
                rule['port_range_min'] <= rule['port_range_max']):
                pass
            else:
                raise ext_sg.SecurityGroupInvalidPortRange()
        elif ip_proto in [constants.PROTO_NUM_ICMP,
                          constants.PROTO_NUM_IPV6_ICMP]:
            for attr, field in [('port_range_min', 'type'),
                                ('port_range_max', 'code')]:
                if rule[attr] is not None and not (0 <= rule[attr] <= 255):
                    raise ext_sg.SecurityGroupInvalidIcmpValue(
                        field=field, attr=attr, value=rule[attr])
            if (rule['port_range_min'] is None and
                    rule['port_range_max'] is not None):
                raise ext_sg.SecurityGroupMissingIcmpType(
                    value=rule['port_range_max'])

    def _validate_ethertype_and_protocol(self, rule):
        """Check if given ethertype and  protocol are valid or not"""
        if rule['protocol'] in [constants.PROTO_NAME_IPV6_ENCAP,
                                constants.PROTO_NAME_IPV6_FRAG,
                                constants.PROTO_NAME_IPV6_ICMP,
                                constants.PROTO_NAME_IPV6_ICMP_LEGACY,
                                constants.PROTO_NAME_IPV6_NONXT,
                                constants.PROTO_NAME_IPV6_OPTS,
                                constants.PROTO_NAME_IPV6_ROUTE]:
            if rule['ethertype'] == constants.IPv4:
                raise ext_sg.SecurityGroupEthertypeConflictWithProtocol(
                        ethertype=rule['ethertype'], protocol=rule['protocol'])

    def _validate_single_tenant_and_group(self, security_group_rules):
        """Check that all rules belong to the same security group and tenant
        """
        sg_groups = set()
        tenants = set()
        for rule_dict in security_group_rules['security_group_rules']:
            rule = rule_dict['security_group_rule']
            sg_groups.add(rule['security_group_id'])
            if len(sg_groups) > 1:
                raise ext_sg.SecurityGroupNotSingleGroupRules()

            tenants.add(rule['tenant_id'])
            if len(tenants) > 1:
                raise ext_sg.SecurityGroupRulesNotSingleTenant()
        return sg_groups.pop()

    def _validate_security_group_rule(self, context, security_group_rule):
        rule = security_group_rule['security_group_rule']
        self._validate_port_range(rule)
        self._validate_ip_prefix(rule)
        self._validate_ethertype_and_protocol(rule)

        if rule['remote_ip_prefix'] and rule['remote_group_id']:
            raise ext_sg.SecurityGroupRemoteGroupAndRemoteIpPrefix()

        remote_group_id = rule['remote_group_id']
        # Check that remote_group_id exists for tenant
        if remote_group_id:
            self.get_security_group(context, remote_group_id,
                                    tenant_id=rule['tenant_id'])

        security_group_id = rule['security_group_id']

        # Confirm that the tenant has permission
        # to add rules to this security group.
        self.get_security_group(context, security_group_id,
                                tenant_id=rule['tenant_id'])
        return security_group_id

    def _validate_security_group_rules(self, context, security_group_rules):
        sg_id = self._validate_single_tenant_and_group(security_group_rules)
        for rule in security_group_rules['security_group_rules']:
            self._validate_security_group_rule(context, rule)
        return sg_id

    def _make_security_group_rule_dict(self, security_group_rule, fields=None):
        res = {'id': security_group_rule['id'],
               'tenant_id': security_group_rule['tenant_id'],
               'security_group_id': security_group_rule['security_group_id'],
               'ethertype': security_group_rule['ethertype'],
               'direction': security_group_rule['direction'],
               'protocol': security_group_rule['protocol'],
               'port_range_min': security_group_rule['port_range_min'],
               'port_range_max': security_group_rule['port_range_max'],
               'remote_ip_prefix': security_group_rule['remote_ip_prefix'],
               'remote_group_id': security_group_rule['remote_group_id']}

        self._apply_dict_extend_functions(ext_sg.SECURITYGROUPRULES, res,
                                          security_group_rule)
        return db_utils.resource_fields(res, fields)

    def _make_security_group_rule_filter_dict(self, security_group_rule):
        sgr = security_group_rule['security_group_rule']
        res = {'tenant_id': [sgr['tenant_id']],
               'security_group_id': [sgr['security_group_id']],
               'direction': [sgr['direction']]}

        include_if_present = ['protocol', 'port_range_max', 'port_range_min',
                              'ethertype', 'remote_group_id']
        for key in include_if_present:
            value = sgr.get(key)
            if value:
                res[key] = [value]
        # protocol field will get corresponding name and number
        value = sgr.get('protocol')
        if value:
            res['protocol'] = self._get_ip_proto_name_and_num(value)
        return res

    def _rules_equal(self, rule1, rule2):
        """Determines if two rules are equal ignoring id field."""
        rule1_copy = rule1.copy()
        rule2_copy = rule2.copy()
        rule1_copy.pop('id', None)
        rule2_copy.pop('id', None)
        return rule1_copy == rule2_copy

    def _check_for_duplicate_rules(self, context, security_group_rules):
        for i in security_group_rules:
            found_self = False
            for j in security_group_rules:
                if self._rules_equal(i['security_group_rule'],
                                     j['security_group_rule']):
                    if found_self:
                        raise ext_sg.DuplicateSecurityGroupRuleInPost(rule=i)
                    found_self = True

            self._check_for_duplicate_rules_in_db(context, i)

    def _check_for_duplicate_rules_in_db(self, context, security_group_rule):
        # Check in database if rule exists
        filters = self._make_security_group_rule_filter_dict(
            security_group_rule)
        rule_dict = security_group_rule['security_group_rule'].copy()
        rule_dict.pop('description', None)
        keys = rule_dict.keys()
        fields = list(keys) + ['id']
        if 'remote_ip_prefix' not in fields:
            fields += ['remote_ip_prefix']
        db_rules = self.get_security_group_rules(context, filters,
                                                 fields=fields)
        # Note(arosen): the call to get_security_group_rules wildcards
        # values in the filter that have a value of [None]. For
        # example, filters = {'remote_group_id': [None]} will return
        # all security group rules regardless of their value of
        # remote_group_id. Therefore it is not possible to do this
        # query unless the behavior of _get_collection()
        # is changed which cannot be because other methods are already
        # relying on this behavior. Therefore, we do the filtering
        # below to check for these corner cases.
        rule_dict.pop('id', None)
        sg_protocol = rule_dict.pop('protocol', None)
        remote_ip_prefix = rule_dict.pop('remote_ip_prefix', None)
        for db_rule in db_rules:
            rule_id = db_rule.pop('id', None)
            # remove protocol and match separately for number and type
            db_protocol = db_rule.pop('protocol', None)
            is_protocol_matching = (
                self._get_ip_proto_name_and_num(db_protocol) ==
                self._get_ip_proto_name_and_num(sg_protocol))
            db_remote_ip_prefix = db_rule.pop('remote_ip_prefix', None)
            duplicate_ip_prefix = self._validate_duplicate_ip_prefix(
                                    remote_ip_prefix, db_remote_ip_prefix)
            if (is_protocol_matching and duplicate_ip_prefix and
                    rule_dict == db_rule):
                raise ext_sg.SecurityGroupRuleExists(rule_id=rule_id)

    def _validate_duplicate_ip_prefix(self, ip_prefix, other_ip_prefix):
        all_address = ['0.0.0.0/0', '::/0', None]
        if ip_prefix == other_ip_prefix:
            return True
        elif ip_prefix in all_address and other_ip_prefix in all_address:
            return True
        return False

    def _validate_ip_prefix(self, rule):
        """Check that a valid cidr was specified as remote_ip_prefix

        No need to check that it is in fact an IP address as this is already
        validated by attribute validators.
        Check that rule ethertype is consistent with remote_ip_prefix ip type.
        Add mask to ip_prefix if absent (192.168.1.10 -> 192.168.1.10/32).
        """
        input_prefix = rule['remote_ip_prefix']
        if input_prefix:
            addr = netaddr.IPNetwork(input_prefix)
            # set input_prefix to always include the netmask:
            rule['remote_ip_prefix'] = str(addr)
            # check consistency of ethertype with addr version
            if rule['ethertype'] != "IPv%d" % (addr.version):
                raise ext_sg.SecurityGroupRuleParameterConflict(
                    ethertype=rule['ethertype'], cidr=input_prefix)

    @db_api.retry_if_session_inactive()
    def get_security_group_rules(self, context, filters=None, fields=None,
                                 sorts=None, limit=None, marker=None,
                                 page_reverse=False):
        marker_obj = self._get_marker_obj(context, 'security_group_rule',
                                          limit, marker)
        return self._get_collection(context,
                                    sg_models.SecurityGroupRule,
                                    self._make_security_group_rule_dict,
                                    filters=filters, fields=fields,
                                    sorts=sorts,
                                    limit=limit, marker_obj=marker_obj,
                                    page_reverse=page_reverse)

    @db_api.retry_if_session_inactive()
    def get_security_group_rules_count(self, context, filters=None):
        return self._get_collection_count(context, sg_models.SecurityGroupRule,
                                          filters=filters)

    @db_api.retry_if_session_inactive()
    def get_security_group_rule(self, context, id, fields=None):
        security_group_rule = self._get_security_group_rule(context, id)
        return self._make_security_group_rule_dict(security_group_rule, fields)

    def _get_security_group_rule(self, context, id):
        try:
            query = self._model_query(context, sg_models.SecurityGroupRule)
            sgr = query.filter(sg_models.SecurityGroupRule.id == id).one()
        except exc.NoResultFound:
            raise ext_sg.SecurityGroupRuleNotFound(id=id)
        return sgr

    @db_api.retry_if_session_inactive()
    def delete_security_group_rule(self, context, id):
        kwargs = {
            'context': context,
            'security_group_rule_id': id
        }
        self._registry_notify(resources.SECURITY_GROUP_RULE,
                              events.BEFORE_DELETE, id=id,
                              exc_cls=ext_sg.SecurityGroupRuleInUse, **kwargs)

        with db_api.context_manager.writer.using(context):
            query = self._model_query(context,
                                      sg_models.SecurityGroupRule).filter(
                sg_models.SecurityGroupRule.id == id)

            self._registry_notify(resources.SECURITY_GROUP_RULE,
                                  events.PRECOMMIT_DELETE,
                                  exc_cls=ext_sg.SecurityGroupRuleInUse, id=id,
                                  **kwargs)

            try:
                sg_rule = query.one()
                # As there is a filter on a primary key it is not possible for
                # MultipleResultsFound to be raised
                context.session.delete(sg_rule)
            except exc.NoResultFound:
                raise ext_sg.SecurityGroupRuleNotFound(id=id)

            kwargs['security_group_id'] = sg_rule['security_group_id']

        registry.notify(
            resources.SECURITY_GROUP_RULE, events.AFTER_DELETE, self,
            **kwargs)

    def _extend_port_dict_security_group(self, port_res, port_db):
        # Security group bindings will be retrieved from the SQLAlchemy
        # model. As they're loaded eagerly with ports because of the
        # joined load they will not cause an extra query.
        security_group_ids = [sec_group_mapping['security_group_id'] for
                              sec_group_mapping in port_db.security_groups]
        port_res[ext_sg.SECURITYGROUPS] = security_group_ids
        return port_res

    resource_extend.register_funcs(
        attributes.PORTS, ['_extend_port_dict_security_group'])

    def _process_port_create_security_group(self, context, port,
                                            security_group_ids):
        if validators.is_attr_set(security_group_ids):
            for security_group_id in security_group_ids:
                self._create_port_security_group_binding(context, port['id'],
                                                         security_group_id)
        # Convert to list as a set might be passed here and
        # this has to be serialized
        port[ext_sg.SECURITYGROUPS] = (security_group_ids and
                                       list(security_group_ids) or [])

    def _get_default_sg_id(self, context, tenant_id):
        try:
            query = self._model_query(context, sg_models.DefaultSecurityGroup)
            default_group = query.filter_by(tenant_id=tenant_id).one()
            return default_group['security_group_id']
        except exc.NoResultFound:
            pass

    @registry.receives(resources.PORT, [events.BEFORE_CREATE])
    @registry.receives(resources.NETWORK, [events.BEFORE_CREATE])
    def _ensure_default_security_group_handler(self, resource, event, trigger,
                                               context, **kwargs):
        tenant_id = kwargs[resource]['tenant_id']
        self._ensure_default_security_group(context, tenant_id)

    def _ensure_default_security_group(self, context, tenant_id):
        """Create a default security group if one doesn't exist.

        :returns: the default security group id for given tenant.
        """
        existing = self._get_default_sg_id(context, tenant_id)
        if existing is not None:
            return existing
        security_group = {
            'security_group':
                {'name': 'default',
                 'tenant_id': tenant_id,
                 'description': _('Default security group')}
        }
        return self.create_security_group(context, security_group,
                                          default_sg=True)['id']

    def _get_security_groups_on_port(self, context, port):
        """Check that all security groups on port belong to tenant.

        :returns: all security groups IDs on port belonging to tenant.
        """
        port = port['port']
        if not validators.is_attr_set(port.get(ext_sg.SECURITYGROUPS)):
            return
        if port.get('device_owner') and utils.is_port_trusted(port):
            return

        port_sg = port.get(ext_sg.SECURITYGROUPS, [])
        filters = {'id': port_sg}
        tenant_id = port.get('tenant_id')
        if tenant_id:
            filters['tenant_id'] = [tenant_id]
        valid_groups = set(g['id'] for g in
                           self.get_security_groups(context, fields=['id'],
                                                    filters=filters))

        requested_groups = set(port_sg)
        port_sg_missing = requested_groups - valid_groups
        if port_sg_missing:
            raise ext_sg.SecurityGroupNotFound(id=', '.join(port_sg_missing))

        return requested_groups

    def _ensure_default_security_group_on_port(self, context, port):
        # we don't apply security groups for dhcp, router
        port = port['port']
        if port.get('device_owner') and utils.is_port_trusted(port):
            return
        default_sg = self._ensure_default_security_group(context,
                                                         port['tenant_id'])
        if not validators.is_attr_set(port.get(ext_sg.SECURITYGROUPS)):
            port[ext_sg.SECURITYGROUPS] = [default_sg]

    def _check_update_deletes_security_groups(self, port):
        """Return True if port has as a security group and it's value
        is either [] or not is_attr_set, otherwise return False
        """
        if (ext_sg.SECURITYGROUPS in port['port'] and
            not (validators.is_attr_set(port['port'][ext_sg.SECURITYGROUPS])
                 and port['port'][ext_sg.SECURITYGROUPS] != [])):
            return True
        return False

    def _check_update_has_security_groups(self, port):
        """Return True if port has security_groups attribute set and
        its not empty, or False otherwise.
        This method is called both for port create and port update.
        """
        if (ext_sg.SECURITYGROUPS in port['port'] and
            (validators.is_attr_set(port['port'][ext_sg.SECURITYGROUPS]) and
             port['port'][ext_sg.SECURITYGROUPS] != [])):
            return True
        return False

    def update_security_group_on_port(self, context, id, port,
                                      original_port, updated_port):
        """Update security groups on port.

        This method returns a flag which indicates request notification
        is required and does not perform notification itself.
        It is because another changes for the port may require notification.
        """
        need_notify = False
        port_updates = port['port']
        if (ext_sg.SECURITYGROUPS in port_updates and
            not helpers.compare_elements(
                original_port.get(ext_sg.SECURITYGROUPS),
                port_updates[ext_sg.SECURITYGROUPS])):
            # delete the port binding and read it with the new rules
            port_updates[ext_sg.SECURITYGROUPS] = (
                self._get_security_groups_on_port(context, port))
            self._delete_port_security_group_bindings(context, id)
            self._process_port_create_security_group(
                context,
                updated_port,
                port_updates[ext_sg.SECURITYGROUPS])
            need_notify = True
        else:
            updated_port[ext_sg.SECURITYGROUPS] = (
                original_port[ext_sg.SECURITYGROUPS])
        return need_notify
Ejemplo n.º 21
0
class AllowedAddressPairsMixin(object):
    """Mixin class for allowed address pairs."""

    def _process_create_allowed_address_pairs(self, context, port,
                                              allowed_address_pairs):
        if not validators.is_attr_set(allowed_address_pairs):
            return []
        try:
            with context.session.begin(subtransactions=True):
                for address_pair in allowed_address_pairs:
                    # use port.mac_address if no mac address in address pair
                    if 'mac_address' not in address_pair:
                        address_pair['mac_address'] = port['mac_address']
                    # retain string format as passed through API
                    mac_address = utils.AuthenticEUI(
                        address_pair['mac_address'])
                    ip_address = utils.AuthenticIPNetwork(
                        address_pair['ip_address'])
                    pair_obj = obj_addr_pair.AllowedAddressPair(
                        context,
                        port_id=port['id'],
                        mac_address=mac_address,
                        ip_address=ip_address)
                    pair_obj.create()
        except exceptions.NeutronDbObjectDuplicateEntry:
            raise addr_pair.DuplicateAddressPairInRequest(
                mac_address=address_pair['mac_address'],
                ip_address=address_pair['ip_address'])

        return allowed_address_pairs

    def get_allowed_address_pairs(self, context, port_id):
        pairs = obj_addr_pair.AllowedAddressPair.get_objects(
            context, port_id=port_id)
        return [self._make_allowed_address_pairs_dict(pair.db_obj)
                for pair in pairs]

    def _extend_port_dict_allowed_address_pairs(self, port_res, port_db):
        # If port_db is provided, allowed address pairs will be accessed via
        # sqlalchemy models. As they're loaded together with ports this
        # will not cause an extra query.
        allowed_address_pairs = [
            self._make_allowed_address_pairs_dict(address_pair) for
            address_pair in port_db.allowed_address_pairs]
        port_res[addr_pair.ADDRESS_PAIRS] = allowed_address_pairs
        return port_res

    resource_extend.register_funcs(
        attr.PORTS, ['_extend_port_dict_allowed_address_pairs'])

    def _delete_allowed_address_pairs(self, context, id):
        obj_addr_pair.AllowedAddressPair.delete_objects(
            context, port_id=id)

    @staticmethod
    def _make_allowed_address_pairs_dict(allowed_address_pairs,
                                         fields=None):
        res = {'mac_address': allowed_address_pairs['mac_address'],
               'ip_address': allowed_address_pairs['ip_address']}
        return db_utils.resource_fields(res, fields)

    def _has_address_pairs(self, port):
        return (validators.is_attr_set(port['port'][addr_pair.ADDRESS_PAIRS])
                and port['port'][addr_pair.ADDRESS_PAIRS] != [])

    def _check_update_has_allowed_address_pairs(self, port):
        """Determine if request has an allowed address pair.

        Return True if the port parameter has a non-empty
        'allowed_address_pairs' attribute. Otherwise returns False.
        """
        return (addr_pair.ADDRESS_PAIRS in port['port'] and
                self._has_address_pairs(port))

    def _check_update_deletes_allowed_address_pairs(self, port):
        """Determine if request deletes address pair.

        Return True if port has an allowed address pair and its value
        is either [] or not is_attr_set, otherwise return False
        """
        return (addr_pair.ADDRESS_PAIRS in port['port'] and
                not self._has_address_pairs(port))

    def is_address_pairs_attribute_updated(self, port, update_attrs):
        """Check if the address pairs attribute is being updated.

        Returns True if there is an update. This can be used to decide
        if a port update notification should be sent to agents or third
        party controllers.
        """

        new_pairs = update_attrs.get(addr_pair.ADDRESS_PAIRS)
        if new_pairs is None:
            return False
        old_pairs = port.get(addr_pair.ADDRESS_PAIRS)

        # Missing or unchanged address pairs in attributes mean no update
        return new_pairs != old_pairs

    def update_address_pairs_on_port(self, context, port_id, port,
                                     original_port, updated_port):
        """Update allowed address pairs on port.

        Returns True if an update notification is required. Notification
        is not done here because other changes on the port may need
        notification. This method is expected to be called within
        a transaction.
        """
        new_pairs = port['port'].get(addr_pair.ADDRESS_PAIRS)

        if self.is_address_pairs_attribute_updated(original_port,
                                                   port['port']):
            updated_port[addr_pair.ADDRESS_PAIRS] = new_pairs
            self._delete_allowed_address_pairs(context, port_id)
            self._process_create_allowed_address_pairs(
                context, updated_port, new_pairs)
            return True

        return False
Ejemplo n.º 22
0
class ExtraRoute_dbonly_mixin(l3_db.L3_NAT_dbonly_mixin):
    """Mixin class to support extra route configuration on router."""
    def _extend_router_dict_extraroute(self, router_res, router_db):
        router_res['routes'] = (ExtraRoute_dbonly_mixin._make_extra_route_list(
            router_db['route_list']))

    resource_extend.register_funcs(l3.ROUTERS,
                                   ['_extend_router_dict_extraroute'])

    def update_router(self, context, id, router):
        r = router['router']
        with context.session.begin(subtransactions=True):
            #check if route exists and have permission to access
            router_db = self._get_router(context, id)
            if 'routes' in r:
                self._update_extra_routes(context, router_db, r['routes'])
            routes = self._get_extra_routes_by_router_id(context, id)
        router_updated = super(ExtraRoute_dbonly_mixin,
                               self).update_router(context, id, router)
        router_updated['routes'] = routes

        return router_updated

    def _get_subnets_by_cidr(self, context, cidr):
        query_subnets = context.session.query(models_v2.Subnet)
        return query_subnets.filter_by(cidr=cidr).all()

    def _validate_routes_nexthop(self, cidrs, ips, routes, nexthop):
        #Note(nati): Nexthop should be connected,
        # so we need to check
        # nexthop belongs to one of cidrs of the router ports
        if not netaddr.all_matching_cidrs(nexthop, cidrs):
            raise extraroute.InvalidRoutes(
                routes=routes,
                reason=_('the nexthop is not connected with router'))
        #Note(nati) nexthop should not be same as fixed_ips
        if nexthop in ips:
            raise extraroute.InvalidRoutes(
                routes=routes, reason=_('the nexthop is used by router'))

    def _validate_routes(self, context, router_id, routes):
        if len(routes) > cfg.CONF.max_routes:
            raise extraroute.RoutesExhausted(router_id=router_id,
                                             quota=cfg.CONF.max_routes)

        context = context.elevated()
        filters = {'device_id': [router_id]}
        ports = self._core_plugin.get_ports(context, filters)
        cidrs = []
        ips = []
        for port in ports:
            for ip in port['fixed_ips']:
                cidrs.append(
                    self._core_plugin.get_subnet(context,
                                                 ip['subnet_id'])['cidr'])
                ips.append(ip['ip_address'])
        for route in routes:
            self._validate_routes_nexthop(cidrs, ips, routes, route['nexthop'])

    def _update_extra_routes(self, context, router, routes):
        self._validate_routes(context, router['id'], routes)
        old_routes, routes_dict = self._get_extra_routes_dict_by_router_id(
            context, router['id'])
        added, removed = helpers.diff_list_of_dict(old_routes, routes)
        LOG.debug('Added routes are %s', added)
        for route in added:
            l3_obj.RouterRoute(
                context,
                router_id=router['id'],
                destination=utils.AuthenticIPNetwork(route['destination']),
                nexthop=netaddr.IPAddress(route['nexthop'])).create()

        LOG.debug('Removed routes are %s', removed)
        for route in removed:
            l3_obj.RouterRoute.get_object(context,
                                          router_id=router['id'],
                                          destination=route['destination'],
                                          nexthop=route['nexthop']).delete()

    @staticmethod
    def _make_extra_route_list(extra_routes):
        # NOTE(yamamoto): the extra_routes argument is either object or db row
        return [{
            'destination': str(route['destination']),
            'nexthop': str(route['nexthop'])
        } for route in extra_routes]

    def _get_extra_routes_by_router_id(self, context, id):
        router_objs = l3_obj.RouterRoute.get_objects(context, router_id=id)
        return self._make_extra_route_list(router_objs)

    def _get_extra_routes_dict_by_router_id(self, context, id):
        router_objs = l3_obj.RouterRoute.get_objects(context, router_id=id)
        routes = []
        routes_dict = {}
        for route in router_objs:
            routes.append({
                'destination': route.destination,
                'nexthop': route.nexthop
            })
            routes_dict[(route.destination, route.nexthop)] = route
        return routes, routes_dict

    def _confirm_router_interface_not_in_use(self, context, router_id,
                                             subnet_id):
        super(ExtraRoute_dbonly_mixin,
              self)._confirm_router_interface_not_in_use(
                  context, router_id, subnet_id)
        subnet = self._core_plugin.get_subnet(context, subnet_id)
        subnet_cidr = netaddr.IPNetwork(subnet['cidr'])
        extra_routes = self._get_extra_routes_by_router_id(context, router_id)
        for route in extra_routes:
            if netaddr.all_matching_cidrs(route['nexthop'], [subnet_cidr]):
                raise extraroute.RouterInterfaceInUseByRoute(
                    router_id=router_id, subnet_id=subnet_id)