Exemple #1
0
    def update_port_status(self, context, port_id, status, host=None):
        """
        Returns port_id (non-truncated uuid) if the port exists.
        Otherwise returns None.
        """
        updated = False
        session = context.session
        # REVISIT: Serialize this operation with a semaphore to
        # prevent deadlock waiting to acquire a DB lock held by
        # another thread in the same process, leading to 'lock wait
        # timeout' errors.
        with contextlib.nested(lockutils.lock('db-access'),
                               session.begin(subtransactions=True)):
            port = db.get_port(session, port_id)
            if not port:
                LOG.warning(_("Port %(port)s updated up by agent not found"),
                            {'port': port_id})
                return None
            if port.status != status:
                original_port = self._make_port_dict(port)
                port.status = status
                updated_port = self._make_port_dict(port)
                network = self.get_network(context,
                                           original_port['network_id'])
                mech_context = driver_context.PortContext(
                    self, context, updated_port, network, port.port_binding,
                    original_port=original_port)
                self.mechanism_manager.update_port_precommit(mech_context)
                updated = True

        if updated:
            self.mechanism_manager.update_port_postcommit(mech_context)

        return port['id']
Exemple #2
0
    def update_port_status(self, context, port_id, status):
        updated = False
        session = context.session
        with session.begin(subtransactions=True):
            port = db.get_port(session, port_id)
            if not port:
                LOG.warning(_("Port %(port)s updated up by agent not found"),
                            {'port': port_id})
                return False
            if port.status != status:
                original_port = self._make_port_dict(port)
                port.status = status
                updated_port = self._make_port_dict(port)
                network = self.get_network(context,
                                           original_port['network_id'])
                mech_context = driver_context.PortContext(
                    self, context, updated_port, network,
                    original_port=original_port)
                self.mechanism_manager.update_port_precommit(mech_context)
                updated = True

        if updated:
            self.mechanism_manager.update_port_postcommit(mech_context)

        return True
Exemple #3
0
 def update_device_up(self, rpc_context, **kwargs):
     """Device is up on agent."""
     agent_id = kwargs.get('agent_id')
     device = kwargs.get('device')
     host = kwargs.get('host')
     LOG.debug("Device %(device)s up at agent %(agent_id)s",
               {'device': device, 'agent_id': agent_id})
     plugin = manager.NeutronManager.get_plugin()
     port_id = plugin._device_to_port_id(rpc_context, device)
     port = plugin.port_bound_to_host(rpc_context, port_id, host)
     if host and not port:
         LOG.debug("Device %(device)s not bound to the"
                   " agent host %(host)s",
                   {'device': device, 'host': host})
         return
     if port and port['device_owner'] == n_const.DEVICE_OWNER_DVR_INTERFACE:
         # NOTE(kevinbenton): we have to special case DVR ports because of
         # the special multi-binding status update logic they have that
         # depends on the host
         plugin.update_port_status(rpc_context, port_id,
                                   n_const.PORT_STATUS_ACTIVE, host)
     else:
         # _device_to_port_id may have returned a truncated UUID if the
         # agent did not provide a full one (e.g. Linux Bridge case). We
         # need to look up the full one before calling provisioning_complete
         if not port:
             port = ml2_db.get_port(rpc_context.session, port_id)
         if not port:
             # port doesn't exist, no need to add a provisioning block
             return
         provisioning_blocks.provisioning_complete(
             rpc_context, port['id'], resources.PORT,
             provisioning_blocks.L2_AGENT_ENTITY)
Exemple #4
0
 def update_device_up(self, rpc_context, **kwargs):
     """Device is up on agent."""
     agent_id, host, device = self._get_request_details(kwargs)
     LOG.debug("Device %(device)s up at agent %(agent_id)s",
               {'device': device, 'agent_id': agent_id})
     plugin = directory.get_plugin()
     port_id = plugin._device_to_port_id(rpc_context, device)
     port = plugin.port_bound_to_host(rpc_context, port_id, host)
     if host and not port:
         LOG.debug("Device %(device)s not bound to the"
                   " agent host %(host)s",
                   {'device': device, 'host': host})
         # this might mean that a VM is in the process of live migration
         # and vif was plugged on the destination compute node;
         # need to notify nova explicitly
         port = ml2_db.get_port(rpc_context, port_id)
         # _device_to_port_id may have returned a truncated UUID if the
         # agent did not provide a full one (e.g. Linux Bridge case).
         if not port:
             LOG.debug("Port %s not found, will not notify nova.", port_id)
             return
         else:
             if port.device_owner.startswith(
                     n_const.DEVICE_OWNER_COMPUTE_PREFIX):
                 plugin.nova_notifier.notify_port_active_direct(port)
                 return
     else:
         self.update_port_status_to_active(port, rpc_context, port_id, host)
     self.notify_l2pop_port_wiring(port_id, rpc_context,
                                   n_const.PORT_STATUS_ACTIVE, host)
Exemple #5
0
    def update_port_status(self, context, port_id, status):
        updated = False
        session = context.session
        # REVISIT: Serialize this operation with a semaphore to prevent
        # undesired eventlet yields leading to 'lock wait timeout' errors
        with contextlib.nested(lockutils.lock('db-access'),
                               session.begin(subtransactions=True)):
            port = db.get_port(session, port_id)
            if not port:
                LOG.warning(_("Port %(port)s updated up by agent not found"),
                            {'port': port_id})
                return False
            if port.status != status:
                original_port = self._make_port_dict(port)
                port.status = status
                updated_port = self._make_port_dict(port)
                network = self.get_network(context,
                                           original_port['network_id'])
                mech_context = driver_context.PortContext(
                    self, context, updated_port, network,
                    original_port=original_port)
                self.mechanism_manager.update_port_precommit(mech_context)
                updated = True

        if updated:
            self.mechanism_manager.update_port_postcommit(mech_context)

        return True
Exemple #6
0
    def add_router_interface(self, context, router_id, interface_info):
        """creates vlnk on the fortinet device."""
        LOG.debug("FortinetL3ServicePlugin.add_router_interface: "
                  "router_id=%(router_id)s "
                  "interface_info=%(interface_info)r",
                  {'router_id': router_id, 'interface_info': interface_info})
        with context.session.begin(subtransactions=True):
            info = super(FortinetL3ServicePlugin, self).add_router_interface(
                context, router_id, interface_info)
            port = db.get_port(context.session, info['port_id'])
            port['admin_state_up'] = True
            port['port'] = port
            LOG.debug("FortinetL3ServicePlugin: "
                  "context=%(context)s"
                  "port=%(port)s "
                  "info=%(info)r",
                  {'context': context, 'port': port, 'info': info})

            #self._core_plugin.update_port(context, info["port_id"], port)

            interface_info = info
            subnet = self._core_plugin._get_subnet(context,
                                                   interface_info['subnet_id'])
            network_id = subnet['network_id']
            tenant_id = port['tenant_id']
            port_filters = {'network_id': [network_id],
                            'device_owner': [DEVICE_OWNER_ROUTER_INTF]}
            port_count = self._core_plugin.get_ports_count(context,
                                                           port_filters)
            # port count is checked against 2 since the current port is already
            # added to db
            if port_count == 2:
                # This subnet is already part of some router
                LOG.error(_("FortinetL3ServicePlugin: adding redundant router "
                            "interface is not supported"))
                raise Exception(_("FortinetL3ServicePlugin:adding redundant "
                                  "router interface is not supported"))
            try:
                db_namespace = fortinet_db.query_record(context,
                                        fortinet_db.Fortinet_ML2_Namespace,
                                        tenant_id=tenant_id)
                vlan_inf = utils.get_intf(context, network_id)
                int_intf, ext_intf = utils.get_vlink_intf(self, context,
                                               vdom=db_namespace.vdom)
                utils.add_fwpolicy(self, context,
                                   vdom=db_namespace.vdom,
                                   srcintf=vlan_inf,
                                   dstintf=int_intf,
                                   nat='enable')

            except Exception as e:
                LOG.error(_("Failed to create Fortinet resources to add router "
                            "interface. info=%(info)s, router_id=%(router_id)s"),
                          {"info": info, "router_id": router_id})
                resources.Exinfo(e)
                with excutils.save_and_reraise_exception():
                    self.remove_router_interface(context, router_id,
                                                     interface_info)
        utils.update_status(self, context, t_consts.TaskStatus.COMPLETED)
        return info
Exemple #7
0
    def test_get_port(self):
        network_id = 'foo-network-id'
        port_id = 'foo-port-id'
        self._setup_neutron_network(network_id)
        self._setup_neutron_port(network_id, port_id)

        port = ml2_db.get_port(self.ctx.session, port_id)
        self.assertEqual(port_id, port.id)
Exemple #8
0
    def test_get_port(self):
        network_id = uuidutils.generate_uuid()
        port_id = uuidutils.generate_uuid()
        self._setup_neutron_network(network_id)
        self._setup_neutron_port(network_id, port_id)

        port = ml2_db.get_port(self.ctx, port_id)
        self.assertEqual(port_id, port.id)
Exemple #9
0
    def test_get_port_multiple_results_found(self):
        network_id = 'foo-network-id'
        port_id = 'foo-port-id'
        port_id_one = 'foo-port-id-one'
        port_id_two = 'foo-port-id-two'
        self._setup_neutron_network(network_id)
        self._setup_neutron_port(network_id, port_id_one)
        self._setup_neutron_port(network_id, port_id_two)

        port = ml2_db.get_port(self.ctx.session, port_id)
        self.assertIsNone(port)
Exemple #10
0
 def update_device_up(self, rpc_context, **kwargs):
     """Device is up on agent."""
     refresh_tunnels = kwargs.pop('refresh_tunnels', False)
     if not refresh_tunnels:
         # For backward compatibility with older agents
         refresh_tunnels = kwargs.pop('agent_restarted', False)
     agent_id, host, device = self._get_request_details(kwargs)
     LOG.debug("Device %(device)s up at agent %(agent_id)s", {
         'device': device,
         'agent_id': agent_id
     })
     plugin = directory.get_plugin()
     mac_or_device, pci_slot = self._device_to_mac_pci_slot(device)
     port_id = plugin._device_to_port_id(rpc_context,
                                         mac_or_device,
                                         pci_slot=pci_slot)
     port = plugin.port_bound_to_host(rpc_context, port_id, host)
     if host and not port:
         LOG.debug(
             "Device %(device)s not bound to the"
             " agent host %(host)s", {
                 'device': mac_or_device,
                 'host': host
             })
         # this might mean that a VM is in the process of live migration
         # and vif was plugged on the destination compute node;
         # need to notify nova explicitly
         port = ml2_db.get_port(rpc_context, port_id)
         # _device_to_port_id may have returned a truncated UUID if the
         # agent did not provide a full one (e.g. Linux Bridge case).
         if not port:
             LOG.debug("Port %s not found, will not notify nova.", port_id)
             return
         else:
             if port.device_owner.startswith(
                     n_const.DEVICE_OWNER_COMPUTE_PREFIX):
                 # NOTE(haleyb): It is possible for a test to override a
                 # config option after the plugin has been initialized so
                 # the nova_notifier attribute is not set on the plugin.
                 if (cfg.CONF.notify_nova_on_port_status_changes
                         and hasattr(plugin, 'nova_notifier')):
                     plugin.nova_notifier.notify_port_active_direct(port)
                 return
     else:
         self.update_port_status_to_active(port, rpc_context, port_id, host)
     self.notify_l2pop_port_wiring(port_id, rpc_context,
                                   n_const.PORT_STATUS_ACTIVE, host,
                                   refresh_tunnels)
Exemple #11
0
    def add_router_interface(self, context, router_id, interface_info):
        """creates interface on the tn device."""
        LOG.debug("TNL3ServicePlugin.add_router_interface: "
                  "router_id=%(router_id)s "
                  "interface_info=%(interface_info)r",
                  {'router_id': router_id, 'interface_info': interface_info})

        info = super(TNL3ServicePlugin, self).add_router_interface(
            context, router_id, interface_info)

        port = db.get_port(context, info['port_id'])
        port['admin_state_up'] = True
        port['port'] = port
        LOG.debug("TNL3ServicePlugin: "
                  "context=%(context)s"
                  "port=%(port)s "
                  "info=%(info)r",
                  {'context': context, 'port': port, 'info': info})
        interface_info = info
        subnet = self._core_plugin._get_subnet(context, interface_info['subnet_id'])
        network_id = subnet['network_id']
        port_filters = {'network_id': [network_id],
                        'device_owner': [DEVICE_OWNER_ROUTER_INTF]}
        port_count = self._core_plugin.get_ports_count(context,
                                                       port_filters)
        # port count is checked against 2 since the current port is already
        # added to db
        if port_count == 2:
            # This subnet is already part of some router
            LOG.error(_LE("TNL3ServicePlugin: adding redundant "
                          "router interface is not supported"))
            raise Exception(_("TNL3ServicePlugin:adding redundant "
                              "router interface is not supported"))

        #with context.session.begin(subtransactions=True):
        try:
            self._add_tn_router_interface(context, router_id, port, subnet['gateway_ip'])
        except Exception as e:
            LOG.error(_LE("Failed to create TN resources to add "
                        "router interface. info=%(info)s, "
                        "router_id=%(router_id)s"),
                      {"info": info, "router_id": router_id})


            self.remove_router_interface(context, router_id, interface_info)

        return info
Exemple #12
0
    def notify_l2pop_port_wiring(self,
                                 port_id,
                                 rpc_context,
                                 status,
                                 host,
                                 agent_restarted=False):
        """Notify the L2pop driver that a port has been wired/unwired.

        The L2pop driver uses this notification to broadcast forwarding
        entries to other agents on the same network as the port for port_id.
        """
        plugin = directory.get_plugin()
        l2pop_driver = plugin.mechanism_manager.mech_drivers.get(
            'l2population')
        if not l2pop_driver:
            return
        port = ml2_db.get_port(rpc_context, port_id)
        if not port:
            return
        port_context = plugin.get_bound_port_context(rpc_context, port_id,
                                                     host)
        if not port_context:
            # port deleted
            return
        # NOTE: DVR ports are already handled and updated through l2pop
        # and so we don't need to update it again here. But, l2pop did not
        # handle DVR ports while restart neutron-*-agent, we need to handle
        # it here.
        if (port['device_owner'] == n_const.DEVICE_OWNER_DVR_INTERFACE
                and not agent_restarted):
            return
        port = port_context.current
        if (port['device_owner'] != n_const.DEVICE_OWNER_DVR_INTERFACE
                and status == n_const.PORT_STATUS_ACTIVE
                and port[portbindings.HOST_ID] != host
                and not l3_hamode_db.is_ha_router_port(
                    rpc_context, port['device_owner'], port['device_id'])):
            # don't setup ACTIVE forwarding entries unless bound to this
            # host or if it's an HA or DVR port (which is special-cased in
            # the mech driver)
            return
        port_context.current['status'] = status
        port_context.current[portbindings.HOST_ID] = host
        if status == n_const.PORT_STATUS_ACTIVE:
            l2pop_driver.obj.update_port_up(port_context, agent_restarted)
        else:
            l2pop_driver.obj.update_port_down(port_context)
Exemple #13
0
    def notify_l2pop_port_wiring(self, port_id, rpc_context,
                                 status, host, agent_restarted=None):
        """Notify the L2pop driver that a port has been wired/unwired.

        The L2pop driver uses this notification to broadcast forwarding
        entries to other agents on the same network as the port for port_id.
        """
        plugin = directory.get_plugin()
        l2pop_driver = plugin.mechanism_manager.mech_drivers.get(
                'l2population')
        if not l2pop_driver:
            return
        port = ml2_db.get_port(rpc_context, port_id)
        if not port:
            return
        port_context = plugin.get_bound_port_context(
                rpc_context, port_id, host)
        if not port_context:
            # port deleted
            return
        # NOTE: DVR ports are already handled and updated through l2pop
        # and so we don't need to update it again here. But, l2pop did not
        # handle DVR ports while restart neutron-*-agent, we need to handle
        # it here.
        if agent_restarted is None:
            agent_restarted = l2pop_driver.obj.agent_restarted(port_context)
        if (port['device_owner'] == n_const.DEVICE_OWNER_DVR_INTERFACE and
                not agent_restarted):
            return
        port = port_context.current
        if (port['device_owner'] != n_const.DEVICE_OWNER_DVR_INTERFACE and
                status == n_const.PORT_STATUS_ACTIVE and
                port[portbindings.HOST_ID] != host and
                not l3_hamode_db.is_ha_router_port(rpc_context,
                                                   port['device_owner'],
                                                   port['device_id'])):
            # don't setup ACTIVE forwarding entries unless bound to this
            # host or if it's an HA or DVR port (which is special-cased in
            # the mech driver)
            return
        port_context.current['status'] = status
        port_context.current[portbindings.HOST_ID] = host
        if status == n_const.PORT_STATUS_ACTIVE:
            l2pop_driver.obj.update_port_up(port_context, agent_restarted)
        else:
            l2pop_driver.obj.update_port_down(port_context)
Exemple #14
0
    def update_device_up(self, rpc_context, **kwargs):
        """Device is up on agent."""
        agent_id = kwargs.get('agent_id')
        device = kwargs.get('device')
        LOG.debug(_("Device %(device)s up at agent %(agent_id)s"),
                  {'device': device, 'agent_id': agent_id})
        port_id = self._device_to_port_id(device)

        session = db_api.get_session()
        with session.begin(subtransactions=True):
            port = db.get_port(session, port_id)
            if not port:
                LOG.warning(_("Device %(device)s updated up by agent "
                              "%(agent_id)s not found in database"),
                            {'device': device, 'agent_id': agent_id})
            if port.status != q_const.PORT_STATUS_ACTIVE:
                port.status = q_const.PORT_STATUS_ACTIVE
Exemple #15
0
 def update_device_up(self, rpc_context, **kwargs):
     """Device is up on agent."""
     agent_id = kwargs.get('agent_id')
     device = kwargs.get('device')
     host = kwargs.get('host')
     LOG.debug("Device %(device)s up at agent %(agent_id)s",
               {'device': device, 'agent_id': agent_id})
     plugin = manager.NeutronManager.get_plugin()
     port_id = plugin._device_to_port_id(rpc_context, device)
     port = plugin.port_bound_to_host(rpc_context, port_id, host)
     if host and not port:
         LOG.debug("Device %(device)s not bound to the"
                   " agent host %(host)s",
                   {'device': device, 'host': host})
         # this might mean that a VM is in the process of live migration
         # and vif was plugged on the destination compute node;
         # need to notify nova explicitly
         try:
             port = plugin._get_port(rpc_context, port_id)
         except exceptions.PortNotFound:
             LOG.debug("Port %s not found, will not notify nova.", port_id)
         else:
             if port.device_owner.startswith(
                     n_const.DEVICE_OWNER_COMPUTE_PREFIX):
                 plugin.nova_notifier.notify_port_active_direct(port)
         return
     if port and port['device_owner'] == n_const.DEVICE_OWNER_DVR_INTERFACE:
         # NOTE(kevinbenton): we have to special case DVR ports because of
         # the special multi-binding status update logic they have that
         # depends on the host
         plugin.update_port_status(rpc_context, port_id,
                                   n_const.PORT_STATUS_ACTIVE, host)
     else:
         # _device_to_port_id may have returned a truncated UUID if the
         # agent did not provide a full one (e.g. Linux Bridge case). We
         # need to look up the full one before calling provisioning_complete
         if not port:
             port = ml2_db.get_port(rpc_context.session, port_id)
         if not port:
             # port doesn't exist, no need to add a provisioning block
             return
         provisioning_blocks.provisioning_complete(
             rpc_context, port['id'], resources.PORT,
             provisioning_blocks.L2_AGENT_ENTITY)
Exemple #16
0
    def set_port_status_up(self, port_id):
        # Port provisioning is complete now that OVN has reported that the
        # port is up. Any provisioning block (possibly added during port
        # creation or when OVN reports that the port is down) must be removed.
        LOG.info("OVN reports status up for port: %s", port_id)

        self._update_dnat_entry_if_needed(port_id)
        self._wait_for_metadata_provisioned_if_needed(port_id)

        # If this port is a subport, we need to update the host_id and set it
        # to its parent's. Otherwise, Neutron won't even try to bind it and
        # it will not transition from DOWN to ACTIVE.
        self._update_subport_host_if_needed(port_id)

        provisioning_blocks.provisioning_complete(
            n_context.get_admin_context(),
            port_id,
            resources.PORT,
            provisioning_blocks.L2_AGENT_ENTITY)

        admin_context = n_context.get_admin_context()
        try:
            # NOTE(lucasagomes): Router ports in OVN is never bound
            # to a host given their decentralized nature. By calling
            # provisioning_complete() - as above - don't do it for us
            # becasue the router ports are unbind so, for OVN we are
            # forcing the status here. Maybe it's something that we can
            # change in core Neutron in the future.
            db_port = ml2_db.get_port(admin_context, port_id)
            if not db_port:
                return

            if db_port.device_owner in (const.DEVICE_OWNER_ROUTER_INTF,
                                        const.DEVICE_OWNER_DVR_INTERFACE,
                                        const.DEVICE_OWNER_ROUTER_HA_INTF):
                self._plugin.update_port_status(admin_context, port_id,
                                                const.PORT_STATUS_ACTIVE)
            elif db_port.device_owner.startswith(
                    const.DEVICE_OWNER_COMPUTE_PREFIX):
                self._plugin.nova_notifier.notify_port_active_direct(db_port)
        except (os_db_exc.DBReferenceError, n_exc.PortNotFound):
            LOG.debug('Port not found during OVN status up report: %s',
                      port_id)
 def notify_ha_port_status(self, port_id, rpc_context, status, host):
     plugin = manager.NeutronManager.get_plugin()
     l2pop_driver = plugin.mechanism_manager.mech_drivers.get(
         'l2population')
     if not l2pop_driver:
         return
     port = ml2_db.get_port(rpc_context.session, port_id)
     if not port:
         return
     is_ha_port = l3_hamode_db.is_ha_router_port(port['device_owner'],
                                                 port['device_id'])
     if is_ha_port:
         port_context = plugin.get_bound_port_context(rpc_context, port_id)
         port_context.current['status'] = status
         port_context.current[portbindings.HOST_ID] = host
         if status == n_const.PORT_STATUS_ACTIVE:
             l2pop_driver.obj.update_port_up(port_context)
         else:
             l2pop_driver.obj.update_port_down(port_context)
Exemple #18
0
 def update_port_status_to_active(self, port, rpc_context, port_id, host):
     plugin = directory.get_plugin()
     if port and port['device_owner'] == n_const.DEVICE_OWNER_DVR_INTERFACE:
         # NOTE(kevinbenton): we have to special case DVR ports because of
         # the special multi-binding status update logic they have that
         # depends on the host
         plugin.update_port_status(rpc_context, port_id,
                                   n_const.PORT_STATUS_ACTIVE, host)
     else:
         # _device_to_port_id may have returned a truncated UUID if the
         # agent did not provide a full one (e.g. Linux Bridge case). We
         # need to look up the full one before calling provisioning_complete
         if not port:
             port = ml2_db.get_port(rpc_context, port_id)
         if not port:
             # port doesn't exist, no need to add a provisioning block
             return
         provisioning_blocks.provisioning_complete(
             rpc_context, port['id'], resources.PORT,
             provisioning_blocks.L2_AGENT_ENTITY)
Exemple #19
0
 def update_port_status_to_active(self, port, rpc_context, port_id, host):
     plugin = directory.get_plugin()
     if port and port['device_owner'] == n_const.DEVICE_OWNER_DVR_INTERFACE:
         # NOTE(kevinbenton): we have to special case DVR ports because of
         # the special multi-binding status update logic they have that
         # depends on the host
         plugin.update_port_status(rpc_context, port_id,
                                   n_const.PORT_STATUS_ACTIVE, host)
     else:
         # _device_to_port_id may have returned a truncated UUID if the
         # agent did not provide a full one (e.g. Linux Bridge case). We
         # need to look up the full one before calling provisioning_complete
         if not port:
             port = ml2_db.get_port(rpc_context, port_id)
         if not port:
             # port doesn't exist, no need to add a provisioning block
             return
         provisioning_blocks.provisioning_complete(
             rpc_context, port['id'], resources.PORT,
             provisioning_blocks.L2_AGENT_ENTITY)
Exemple #20
0
    def get_device_details(self, rpc_context, **kwargs):
        """Agent requests device details."""
        agent_id = kwargs.get('agent_id')
        device = kwargs.get('device')
        LOG.debug(_("Device %(device)s details requested by agent "
                    "%(agent_id)s"),
                  {'device': device, 'agent_id': agent_id})
        port_id = self._device_to_port_id(device)

        session = db_api.get_session()
        with session.begin(subtransactions=True):
            port = db.get_port(session, port_id)
            if not port:
                LOG.warning(_("Device %(device)s requested by agent "
                              "%(agent_id)s not found in database"),
                            {'device': device, 'agent_id': agent_id})
                return {'device': device}
            segments = db.get_network_segments(session, port.network_id)
            if not segments:
                LOG.warning(_("Device %(device)s requested by agent "
                              "%(agent_id)s has network %(network_id) with "
                              "no segments"),
                            {'device': device,
                             'agent_id': agent_id,
                             'network_id': port.network_id})
                return {'device': device}
            #TODO(rkukura): Use/create port binding
            segment = segments[0]
            new_status = (q_const.PORT_STATUS_ACTIVE if port.admin_state_up
                          else q_const.PORT_STATUS_DOWN)
            if port.status != new_status:
                port.status = new_status
            entry = {'device': device,
                     'network_id': port.network_id,
                     'port_id': port.id,
                     'admin_state_up': port.admin_state_up,
                     'network_type': segment[api.NETWORK_TYPE],
                     'segmentation_id': segment[api.SEGMENTATION_ID],
                     'physical_network': segment[api.PHYSICAL_NETWORK]}
            LOG.debug(_("Returning: %s"), entry)
            return entry
Exemple #21
0
    def update_device_up(self, rpc_context, **kwargs):
        """Device is up on agent."""
        agent_id = kwargs.get('agent_id')
        device = kwargs.get('device')
        LOG.debug(_("Device %(device)s up at agent %(agent_id)s"), {
            'device': device,
            'agent_id': agent_id
        })
        port_id = self._device_to_port_id(device)

        session = db_api.get_session()
        with session.begin(subtransactions=True):
            port = db.get_port(session, port_id)
            if not port:
                LOG.warning(
                    _("Device %(device)s updated up by agent "
                      "%(agent_id)s not found in database"), {
                          'device': device,
                          'agent_id': agent_id
                      })
            if port.status != q_const.PORT_STATUS_ACTIVE:
                port.status = q_const.PORT_STATUS_ACTIVE
Exemple #22
0
 def update_device_up(self, rpc_context, **kwargs):
     """Device is up on agent."""
     agent_id = kwargs.get('agent_id')
     device = kwargs.get('device')
     host = kwargs.get('host')
     agent_restarted = kwargs.pop('agent_restarted', None)
     LOG.debug("Device %(device)s up at agent %(agent_id)s", {
         'device': device,
         'agent_id': agent_id
     })
     plugin = directory.get_plugin()
     port_id = plugin._device_to_port_id(rpc_context, device)
     port = plugin.port_bound_to_host(rpc_context, port_id, host)
     if host and not port:
         LOG.debug(
             "Device %(device)s not bound to the"
             " agent host %(host)s", {
                 'device': device,
                 'host': host
             })
         # this might mean that a VM is in the process of live migration
         # and vif was plugged on the destination compute node;
         # need to notify nova explicitly
         port = ml2_db.get_port(rpc_context, port_id)
         # _device_to_port_id may have returned a truncated UUID if the
         # agent did not provide a full one (e.g. Linux Bridge case).
         if not port:
             LOG.debug("Port %s not found, will not notify nova.", port_id)
             return
         else:
             if port.device_owner.startswith(
                     n_const.DEVICE_OWNER_COMPUTE_PREFIX):
                 plugin.nova_notifier.notify_port_active_direct(port)
                 return
     else:
         self.update_port_status_to_active(port, rpc_context, port_id, host)
     self.notify_l2pop_port_wiring(port_id, rpc_context,
                                   n_const.PORT_STATUS_ACTIVE, host,
                                   agent_restarted)
Exemple #23
0
 def notify_ha_port_status(self, port_id, rpc_context,
                           status, host, port=None):
     plugin = manager.NeutronManager.get_plugin()
     l2pop_driver = plugin.mechanism_manager.mech_drivers.get(
             'l2population')
     if not l2pop_driver:
         return
     if not port:
         port = ml2_db.get_port(rpc_context.session, port_id)
         if not port:
             return
     is_ha_port = l3_hamode_db.is_ha_router_port(port['device_owner'],
                                                 port['device_id'])
     if is_ha_port:
         port_context = plugin.get_bound_port_context(
                 rpc_context, port_id)
         port_context.current['status'] = status
         port_context.current[portbindings.HOST_ID] = host
         if status == n_const.PORT_STATUS_ACTIVE:
             l2pop_driver.obj.update_port_up(port_context)
         else:
             l2pop_driver.obj.update_port_down(port_context)
Exemple #24
0
    def update_device_down(self, rpc_context, **kwargs):
        """Device no longer exists on agent."""
        # TODO(garyk) - live migration and port status
        agent_id = kwargs.get('agent_id')
        device = kwargs.get('device')
        LOG.debug(_("Device %(device)s no longer exists at agent "
                    "%(agent_id)s"),
                  {'device': device, 'agent_id': agent_id})
        port_id = self._device_to_port_id(device)

        session = db_api.get_session()
        with session.begin(subtransactions=True):
            port = db.get_port(session, port_id)
            if not port:
                LOG.warning(_("Device %(device)s updated down by agent "
                              "%(agent_id)s not found in database"),
                            {'device': device, 'agent_id': agent_id})
                return {'device': device,
                        'exists': False}
            if port.status != q_const.PORT_STATUS_DOWN:
                port.status = q_const.PORT_STATUS_DOWN
            return {'device': device,
                    'exists': True}
Exemple #25
0
 def test_get_port_result_not_found(self):
     port_id = uuidutils.generate_uuid()
     port = ml2_db.get_port(self.ctx.session, port_id)
     self.assertIsNone(port)
Exemple #26
0
    def _process_port_binding(self, mech_context, attrs):
        binding = mech_context._binding
        port = mech_context.current
        self._update_port_dict_binding(port, binding)

        host = attrs and attrs.get(portbindings.HOST_ID)
        host_set = attributes.is_attr_set(host)

        vnic_type = attrs and attrs.get(portbindings.VNIC_TYPE)
        vnic_type_set = attributes.is_attr_set(vnic_type)

        # CLI can't send {}, so treat None as {}
        profile = attrs and attrs.get(portbindings.PROFILE)
        profile_set = profile is not attributes.ATTR_NOT_SPECIFIED
        if profile_set and not profile:
            profile = {}

        if binding.vif_type != portbindings.VIF_TYPE_UNBOUND:
            if (not host_set and not vnic_type_set and not profile_set and
                binding.segment):
                return False
            self._delete_port_binding(mech_context)

        # Return True only if an agent notification is needed.
        # This will happen if a new host, vnic_type, or profile was specified
        # that differs from the current one. Note that host_set is True
        # even if the host is an empty string
        ret_value = ((host_set and binding.get('host') != host) or
                     (vnic_type_set and
                      binding.get('vnic_type') != vnic_type) or
                     (profile_set and self._get_profile(binding) != profile))

        if host_set:
            binding.host = host
            port[portbindings.HOST_ID] = host

        if vnic_type_set:
            binding.vnic_type = vnic_type
            port[portbindings.VNIC_TYPE] = vnic_type

        if profile_set:
            binding.profile = jsonutils.dumps(profile)
            if len(binding.profile) > models.BINDING_PROFILE_LEN:
                msg = _("binding:profile value too large")
                raise exc.InvalidInput(error_message=msg)
            port[portbindings.PROFILE] = profile

        # To try to [re]bind if host is non-empty.
        if binding.host:
            self.mechanism_manager.bind_port(mech_context)
            self._update_port_dict_binding(port, binding)

            # Update the port status if requested by the bound driver.
            if binding.segment and mech_context._new_port_status:
                # REVISIT(rkukura): This function is currently called
                # inside a transaction with the port either newly
                # created or locked for update. After the fix for bug
                # 1276391 is merged, this will no longer be true, and
                # the port status update will need to be handled in
                # the transaction that commits the new binding.
                port_db = db.get_port(mech_context._plugin_context.session,
                                      port['id'])
                port_db.status = mech_context._new_port_status
                port['status'] = mech_context._new_port_status

        return ret_value
Exemple #27
0
 def test_get_port_result_not_found(self):
     port_id = uuidutils.generate_uuid()
     port = ml2_db.get_port(self.ctx.session, port_id)
     self.assertIsNone(port)
Exemple #28
0
    def get_device_details(self, rpc_context, **kwargs):
        """Agent requests device details."""
        agent_id = kwargs.get('agent_id')
        device = kwargs.get('device')
        LOG.debug(_("Device %(device)s details requested by agent "
                    "%(agent_id)s"),
                  {'device': device, 'agent_id': agent_id})
        port_id = self._device_to_port_id(device)

        session = db_api.get_session()
        with session.begin(subtransactions=True):
            port = db.get_port(session, port_id)
            if not port:
                LOG.warning(_("Device %(device)s requested by agent "
                              "%(agent_id)s not found in database"),
                            {'device': device, 'agent_id': agent_id})
                return {'device': device}

            segments = db.get_network_segments(session, port.network_id)
            if not segments:
                LOG.warning(_("Device %(device)s requested by agent "
                              "%(agent_id)s has network %(network_id)s with "
                              "no segments"),
                            {'device': device,
                             'agent_id': agent_id,
                             'network_id': port.network_id})
                return {'device': device}

            binding = db.ensure_port_binding(session, port.id)
            # Fake segment for virl LXC/Other ports, remain unbound officially
            binding_segment = binding.segment
            if (port.device_id or '').startswith('virl-'):
                binding_segment = binding_segment or segments[0][api.ID]
            if not binding_segment:
                LOG.warning(_("Device %(device)s requested by agent "
                              "%(agent_id)s on network %(network_id)s not "
                              "bound, vif_type: %(vif_type)s"),
                            {'device': device,
                             'agent_id': agent_id,
                             'network_id': port.network_id,
                             'vif_type': binding.vif_type})
                return {'device': device}

            segment = self._find_segment(segments, binding_segment)
            if not segment:
                LOG.warning(_("Device %(device)s requested by agent "
                              "%(agent_id)s on network %(network_id)s "
                              "invalid segment, vif_type: %(vif_type)s"),
                            {'device': device,
                             'agent_id': agent_id,
                             'network_id': port.network_id,
                             'vif_type': binding.vif_type})
                return {'device': device}

            new_status = (q_const.PORT_STATUS_BUILD if port.admin_state_up
                          else q_const.PORT_STATUS_DOWN)
            if port.status != new_status:
                plugin = manager.NeutronManager.get_plugin()
                plugin.update_port_status(rpc_context,
                                          port_id,
                                          new_status)
                port.status = new_status
            entry = {'device': device,
                     'network_id': port.network_id,
                     'port_id': port.id,
                     'device_id': port.device_id,
                     'device_owner': port.device_owner,
                     'mac_address': port.mac_address,
                     'admin_state_up': port.admin_state_up,
                     'network_type': segment[api.NETWORK_TYPE],
                     'segmentation_id': segment[api.SEGMENTATION_ID],
                     'physical_network': segment[api.PHYSICAL_NETWORK]}
            LOG.debug(_("Returning: %s"), entry)
            return entry
    def add_router_interface(self, context, router_id, interface_info):
        """creates svi on NOS device and assigns ip addres to SVI."""
        LOG.debug("BrocadeSVIPlugin.add_router_interface on VDX: "
                  "router_id=%(router_id)s "
                  "interface_info=%(interface_info)r",
                  {'router_id': router_id, 'interface_info': interface_info})

        with context.session.begin(subtransactions=True):

            info = super(BrocadeSVIPlugin, self).add_router_interface(
                context, router_id, interface_info)

            port = db.get_port(context.session, info["port_id"])

            # shutting down neutron port to allow NOS to do Arp/Routing
            port['admin_state_up'] = False
            port['port'] = port
            self._core_plugin.update_port(context, info["port_id"], port)

            interface_info = info
            subnet = self._core_plugin._get_subnet(context,
                                                   interface_info["subnet_id"])
            cidr = subnet["cidr"]
            net_addr, net_len = self.net_addr(cidr)
            gateway_ip = subnet["gateway_ip"]
            network_id = subnet['network_id']
            bnet = brocade_db.get_network(context, network_id)
            vlan_id = bnet['vlan']
            gateway_ip_cidr = gateway_ip + '/' + str(net_len)
            LOG.debug("Allocated cidr %(cidr)s from the pool, "
                      "network_id %(net_id)s "
                      "bnet %(bnet)s "
                      "vlan %(vlan_id)d " % ({'cidr': gateway_ip_cidr,
                                              'net_id': network_id,
                                              'bnet': bnet,
                                              'vlan_id': int(vlan_id)}))
            port_filters = {'network_id': [network_id],
                            'device_owner': [DEVICE_OWNER_ROUTER_INTF]}
            port_count = self._core_plugin.get_ports_count(context,
                                                           port_filters)
            LOG.info(_("BrocadeSVIPlugin.add_router_interface ports_count %d"),
                     port_count)

            # port count is checked against 2 since the current port is already
            # added to db
            if port_count == 2:
                # This subnet is already part of some router
                # (this is not supported in this version of brocade svi plugin)
                LOG.error(_("BrocadeSVIPlugin: adding redundant router "
                            "interface is not supported"))
                raise Exception(_("BrocadeSVIPlugin:adding redundant router "
                                  "interface is not supported"))

        try:
            switch = self._switch
            self._driver.create_svi(switch['address'],
                                    switch['username'],
                                    switch['password'],
                                    switch['rbridge_id'],
                                    vlan_id,
                                    gateway_ip_cidr,
                                    str(router_id))
        except Exception:
            LOG.error(_("Failed to create Brocade resources to add router "
                        "interface. info=%(info)s, router_id=%(router_id)s"),
                      {"info": info, "router_id": router_id})
            with excutils.save_and_reraise_exception():
                with context.session.begin(subtransactions=True):
                    self.remove_router_interface(context, router_id,
                                                 interface_info)
        return info
Exemple #30
0
    def _process_port_binding(self, mech_context, attrs):
        binding = mech_context._binding
        port = mech_context.current
        self._update_port_dict_binding(port, binding)

        host = attrs and attrs.get(portbindings.HOST_ID)
        host_set = attributes.is_attr_set(host)

        vnic_type = attrs and attrs.get(portbindings.VNIC_TYPE)
        vnic_type_set = attributes.is_attr_set(vnic_type)

        # CLI can't send {}, so treat None as {}
        profile = attrs and attrs.get(portbindings.PROFILE)
        profile_set = profile is not attributes.ATTR_NOT_SPECIFIED
        if profile_set and not profile:
            profile = {}

        if binding.vif_type != portbindings.VIF_TYPE_UNBOUND:
            if (not host_set and not vnic_type_set and not profile_set
                    and binding.segment):
                return False
            self._delete_port_binding(mech_context)

        # Return True only if an agent notification is needed.
        # This will happen if a new host, vnic_type, or profile was specified
        # that differs from the current one. Note that host_set is True
        # even if the host is an empty string
        ret_value = ((host_set and binding.get('host') != host) or
                     (vnic_type_set and binding.get('vnic_type') != vnic_type)
                     or
                     (profile_set and self._get_profile(binding) != profile))

        if host_set:
            binding.host = host
            port[portbindings.HOST_ID] = host

        if vnic_type_set:
            binding.vnic_type = vnic_type
            port[portbindings.VNIC_TYPE] = vnic_type

        if profile_set:
            binding.profile = jsonutils.dumps(profile)
            if len(binding.profile) > models.BINDING_PROFILE_LEN:
                msg = _("binding:profile value too large")
                raise exc.InvalidInput(error_message=msg)
            port[portbindings.PROFILE] = profile

        # To try to [re]bind if host is non-empty.
        if binding.host:
            self.mechanism_manager.bind_port(mech_context)
            self._update_port_dict_binding(port, binding)

            # Update the port status if requested by the bound driver.
            if binding.segment and mech_context._new_port_status:
                # REVISIT(rkukura): This function is currently called
                # inside a transaction with the port either newly
                # created or locked for update. After the fix for bug
                # 1276391 is merged, this will no longer be true, and
                # the port status update will need to be handled in
                # the transaction that commits the new binding.
                port_db = db.get_port(mech_context._plugin_context.session,
                                      port['id'])
                port_db.status = mech_context._new_port_status
                port['status'] = mech_context._new_port_status

        return ret_value
Exemple #31
0
 def test_get_port_multiple_results_found(self):
     with mock.patch(
             'sqlalchemy.orm.query.Query.one',
             side_effect=exc.MultipleResultsFound):
         port = ml2_db.get_port(self.ctx, 'unused')
     self.assertIsNone(port)
Exemple #32
0
    def get_device_details(self, rpc_context, **kwargs):
        """Agent requests device details."""
        agent_id = kwargs.get('agent_id')
        device = kwargs.get('device')
        LOG.debug(
            _("Device %(device)s details requested by agent "
              "%(agent_id)s"), {
                  'device': device,
                  'agent_id': agent_id
              })
        port_id = self._device_to_port_id(device)

        session = db_api.get_session()
        with session.begin(subtransactions=True):
            port = db.get_port(session, port_id)
            if not port:
                LOG.warning(
                    _("Device %(device)s requested by agent "
                      "%(agent_id)s not found in database"), {
                          'device': device,
                          'agent_id': agent_id
                      })
                return {'device': device}

            segments = db.get_network_segments(session, port.network_id)
            if not segments:
                LOG.warning(
                    _("Device %(device)s requested by agent "
                      "%(agent_id)s has network %(network_id)s with "
                      "no segments"), {
                          'device': device,
                          'agent_id': agent_id,
                          'network_id': port.network_id
                      })
                return {'device': device}

            binding = db.ensure_port_binding(session, port.id)
            if not binding.segment:
                LOG.warning(
                    _("Device %(device)s requested by agent "
                      "%(agent_id)s on network %(network_id)s not "
                      "bound, vif_type: %(vif_type)s"), {
                          'device': device,
                          'agent_id': agent_id,
                          'network_id': port.network_id,
                          'vif_type': binding.vif_type
                      })
                return {'device': device}

            segment = self._find_segment(segments, binding.segment)
            if not segment:
                LOG.warning(
                    _("Device %(device)s requested by agent "
                      "%(agent_id)s on network %(network_id)s "
                      "invalid segment, vif_type: %(vif_type)s"), {
                          'device': device,
                          'agent_id': agent_id,
                          'network_id': port.network_id,
                          'vif_type': binding.vif_type
                      })
                return {'device': device}

            new_status = (q_const.PORT_STATUS_BUILD
                          if port.admin_state_up else q_const.PORT_STATUS_DOWN)
            if port.status != new_status:
                plugin = manager.NeutronManager.get_plugin()
                plugin.update_port_status(rpc_context, port_id, new_status)
                port.status = new_status
            entry = {
                'device': device,
                'network_id': port.network_id,
                'port_id': port.id,
                'admin_state_up': port.admin_state_up,
                'network_type': segment[api.NETWORK_TYPE],
                'segmentation_id': segment[api.SEGMENTATION_ID],
                'physical_network': segment[api.PHYSICAL_NETWORK]
            }
            LOG.debug(_("Returning: %s"), entry)
            return entry
Exemple #33
0
    def add_router_interface(self, context, router_id, interface_info):
        """creates svi on NOS device and assigns ip addres to SVI."""
        LOG.debug(
            "BrocadeSVIPlugin.add_router_interface on VDX: "
            "router_id=%(router_id)s "
            "interface_info=%(interface_info)r", {
                'router_id': router_id,
                'interface_info': interface_info
            })

        with context.session.begin(subtransactions=True):

            info = super(BrocadeSVIPlugin,
                         self).add_router_interface(context, router_id,
                                                    interface_info)

            port = db.get_port(context.session, info["port_id"])

            # shutting down neutron port to allow NOS to do Arp/Routing
            port['admin_state_up'] = False
            port['port'] = port
            self._core_plugin.update_port(context, info["port_id"], port)

            interface_info = info
            subnet = self._core_plugin._get_subnet(context,
                                                   interface_info["subnet_id"])
            cidr = subnet["cidr"]
            net_addr, net_len = self.net_addr(cidr)
            gateway_ip = subnet["gateway_ip"]
            network_id = subnet['network_id']
            bnet = brocade_db.get_network(context, network_id)
            vlan_id = bnet['vlan']
            gateway_ip_cidr = gateway_ip + '/' + str(net_len)
            LOG.debug("Allocated cidr %(cidr)s from the pool, "
                      "network_id %(net_id)s "
                      "bnet %(bnet)s "
                      "vlan %(vlan_id)d " % ({
                          'cidr': gateway_ip_cidr,
                          'net_id': network_id,
                          'bnet': bnet,
                          'vlan_id': int(vlan_id)
                      }))
            port_filters = {
                'network_id': [network_id],
                'device_owner': [DEVICE_OWNER_ROUTER_INTF]
            }
            port_count = self._core_plugin.get_ports_count(
                context, port_filters)
            LOG.info(_("BrocadeSVIPlugin.add_router_interface ports_count %d"),
                     port_count)

            # port count is checked against 2 since the current port is already
            # added to db
            if port_count == 2:
                # This subnet is already part of some router
                # (this is not supported in this version of brocade svi plugin)
                LOG.error(
                    _("BrocadeSVIPlugin: adding redundant router "
                      "interface is not supported"))
                raise Exception(
                    _("BrocadeSVIPlugin:adding redundant router "
                      "interface is not supported"))

        try:
            switch = self._switch
            self._driver.create_svi(switch['address'], switch['username'],
                                    switch['password'], switch['rbridge_id'],
                                    vlan_id, gateway_ip_cidr, str(router_id))
        except Exception:
            LOG.error(
                _("Failed to create Brocade resources to add router "
                  "interface. info=%(info)s, router_id=%(router_id)s"), {
                      "info": info,
                      "router_id": router_id
                  })
            with excutils.save_and_reraise_exception():
                with context.session.begin(subtransactions=True):
                    self.remove_router_interface(context, router_id,
                                                 interface_info)
        return info
Exemple #34
0
 def test_get_port_multiple_results_found(self):
     with mock.patch('sqlalchemy.orm.query.Query.one',
                     side_effect=exc.MultipleResultsFound):
         port = ml2_db.get_port(self.ctx, 'unused')
     self.assertIsNone(port)
Exemple #35
0
    def update_port_status(self, context, port_id, status, host=None):
        updated = False
        session = context.session
        # REVISIT: Serialize this operation with a semaphore to prevent
        # undesired eventlet yields leading to 'lock wait timeout' errors
        with contextlib.nested(lockutils.lock('db-access'),
                               session.begin(subtransactions=True)):
            port = db.get_port(session, port_id)
            if not port:
                LOG.warning(_("Port %(port)s updated up by agent not found"),
                            {'port': port_id})
                return False
            if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
                binding = db.get_dvr_port_binding_by_host(port_id=port['id'],
                                                          host=host,
                                                          session=session)
                if not binding:
                    LOG.error(_("Binding info for port %s not found"),
                              port_id)
                    return False
                binding['status'] = status
                binding.update(binding)

        # binding already updated
        with contextlib.nested(lockutils.lock('db-access'),
                               session.begin(subtransactions=True)):
            port = db.get_port(session, port_id)
            if not port:
                LOG.warning(_("Port %(port)s updated up by agent not found"),
                            {'port': port_id})
                return False
            if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
                original_port = self._make_port_dict(port)
                network = self.get_network(context,
                                           original_port['network_id'])
                port.status = self._generate_dvr_port_status(session,
                                                             port['id'])
                updated_port = self._make_port_dict(port)
                mech_context = (driver_context.PortContext(
                    self, context, updated_port, network,
                    original_port=original_port,
                    binding=binding))
                self.mechanism_manager.update_port_precommit(mech_context)
                updated = True
            elif port.status != status:
                original_port = self._make_port_dict(port)
                port.status = status
                updated_port = self._make_port_dict(port)
                network = self.get_network(context,
                                           original_port['network_id'])
                mech_context = driver_context.PortContext(
                    self, context, updated_port, network,
                    original_port=original_port)
                self.mechanism_manager.update_port_precommit(mech_context)
                updated = True

        if updated:
            self.mechanism_manager.update_port_postcommit(mech_context)

        if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
            self._check_and_delete_dvr_port_binding(mech_context, context)

        return True
Exemple #36
0
    def get_device_details(self, rpc_context, **kwargs):
        """Agent requests device details."""
        agent_id = kwargs.get('agent_id')
        device = kwargs.get('device')
        LOG.debug(_("Device %(device)s details requested by agent "
                    "%(agent_id)s"),
                  {'device': device, 'agent_id': agent_id})
        port_id = self._device_to_port_id(device)

        session = db_api.get_session()
        with session.begin(subtransactions=True):
            port = db.get_port(session, port_id)
            if not port:
                LOG.warning(_("Device %(device)s requested by agent "
                              "%(agent_id)s not found in database"),
                            {'device': device, 'agent_id': agent_id})
                return {'device': device}

            segments = db.get_network_segments(session, port.network_id)
            if not segments:
                LOG.warning(_("Device %(device)s requested by agent "
                              "%(agent_id)s has network %(network_id)s with "
                              "no segments"),
                            {'device': device,
                             'agent_id': agent_id,
                             'network_id': port.network_id})
                return {'device': device}

            binding = db.ensure_port_binding(session, port.id)
            if not binding.segment:
                LOG.warning(_("Device %(device)s requested by agent "
                              "%(agent_id)s on network %(network_id)s not "
                              "bound, vif_type: %(vif_type)s"),
                            {'device': device,
                             'agent_id': agent_id,
                             'network_id': port.network_id,
                             'vif_type': binding.vif_type})
                return {'device': device}

            segment = self._find_segment(segments, binding.segment)
            if not segment:
                LOG.warning(_("Device %(device)s requested by agent "
                              "%(agent_id)s on network %(network_id)s "
                              "invalid segment, vif_type: %(vif_type)s"),
                            {'device': device,
                             'agent_id': agent_id,
                             'network_id': port.network_id,
                             'vif_type': binding.vif_type})
                return {'device': device}

            new_status = (q_const.PORT_STATUS_BUILD if port.admin_state_up
                          else q_const.PORT_STATUS_DOWN)
            if port.status != new_status:
                port.status = new_status
            entry = {'device': device,
                     'network_id': port.network_id,
                     'port_id': port.id,
                     'admin_state_up': port.admin_state_up,
                     'network_type': segment[api.NETWORK_TYPE],
                     'segmentation_id': segment[api.SEGMENTATION_ID],
                     'physical_network': segment[api.PHYSICAL_NETWORK]}
            LOG.debug(_("Returning: %s"), entry)
            return entry
Exemple #37
0
    def add_router_interface(self, context, router_id, interface_info):
        """creates vlnk on the fortinet device."""
        LOG.debug(
            "FortinetL3ServicePlugin.add_router_interface: "
            "router_id=%(router_id)s "
            "interface_info=%(interface_info)r", {
                'router_id': router_id,
                'interface_info': interface_info
            })
        with context.session.begin(subtransactions=True):
            info = super(FortinetL3ServicePlugin,
                         self).add_router_interface(context, router_id,
                                                    interface_info)
            port = db.get_port(context, info['port_id'])
            port['admin_state_up'] = True
            port['port'] = port
            LOG.debug(
                "FortinetL3ServicePlugin: "
                "context=%(context)s"
                "port=%(port)s "
                "info=%(info)r", {
                    'context': context,
                    'port': port,
                    'info': info
                })
            interface_info = info
            subnet = self._core_plugin._get_subnet(context,
                                                   interface_info['subnet_id'])
            network_id = subnet['network_id']
            tenant_id = port['tenant_id']
            port_filters = {
                'network_id': [network_id],
                'device_owner': [DEVICE_OWNER_ROUTER_INTF]
            }
            port_count = self._core_plugin.get_ports_count(
                context, port_filters)
            # port count is checked against 2 since the current port is already
            # added to db
            if port_count == 2:
                # This subnet is already part of some router
                LOG.error(
                    _LE("FortinetL3ServicePlugin: adding redundant "
                        "router interface is not supported"))
                raise Exception(
                    _("FortinetL3ServicePlugin:adding redundant "
                      "router interface is not supported"))
            try:
                db_namespace = fortinet_db.query_record(
                    context,
                    fortinet_db.Fortinet_ML2_Namespace,
                    tenant_id=tenant_id)
                vlan_inf = utils.get_intf(context, network_id)
                int_intf, ext_intf = utils.get_vlink_intf(
                    self, context, vdom=db_namespace.vdom)
                utils.add_fwpolicy(self,
                                   context,
                                   vdom=db_namespace.vdom,
                                   srcintf=vlan_inf,
                                   dstintf=int_intf,
                                   nat='enable')

            except Exception as e:
                LOG.error(
                    _LE("Failed to create Fortinet resources to add "
                        "router interface. info=%(info)s, "
                        "router_id=%(router_id)s"), {
                            "info": info,
                            "router_id": router_id
                        })
                utils._rollback_on_err(self, context, e)
                with excutils.save_and_reraise_exception():
                    self.remove_router_interface(context, router_id,
                                                 interface_info)
        utils.update_status(self, context, t_consts.TaskStatus.COMPLETED)
        return info