Exemple #1
0
    def create_port_precommit(self, context):
        #TODO: mac_learning

        port_data = context.current

        if port_data['device_owner'] == n_const.DEVICE_OWNER_FLOATINGIP:
            return  # no need to process further for fip

        nsx_port = None
        nsx_switch = None

        nsx_switch = self._find_lswitch(
            context,
            port_data['network_id']
        )

        nsx_sec_profile_ids = self._convert_to_nsx_secgroup_ids(
            context,
            port_data.get('security_groups') or []
        )

        nsx_port = switchlib.create_lport(
            self.cluster,
            nsx_switch['uuid'],
            port_data['tenant_id'],
            port_data['id'],
            port_data['name'],
            port_data['device_id'],
            port_data['admin_state_up'],
            port_data['mac_address'],
            port_data['fixed_ips'],
            port_security_enabled=port_data['port_security_enabled'],
            security_profiles=nsx_sec_profile_ids,
            mac_learning_enabled=None,  # TODO
            allowed_address_pairs=port_data['allowed_address_pairs']
        )

        nsx_db.add_neutron_nsx_port_mapping(
            context._plugin_context.session,
            port_data['id'],
            nsx_switch['uuid'],
            nsx_port['uuid']
        )

        if port_data['device_owner']:
            switchlib.plug_vif_interface(
                self.cluster,
                nsx_switch['uuid'],
                nsx_port['uuid'],
                "VifAttachment",
                port_data['id']
            )


        LOG.debug("port created on NSX backend for tenant "
                  "%(tenant_id)s: (%(id)s)", port_data)
Exemple #2
0
def get_nsx_switch_and_port_id(session, cluster, neutron_port_id):
    """Return the NSX switch and port uuids for a given neutron port.

    First, look up the Neutron database. If not found, execute
    a query on NSX platform as the mapping might be missing because
    the port was created before upgrading to grizzly.

    This routine also retrieves the identifier of the logical switch in
    the backend where the port is plugged. Prior to Icehouse this
    information was not available in the Neutron Database. For dealing
    with pre-existing records, this routine will query the backend
    for retrieving the correct switch identifier.

    As of Icehouse release it is not indeed anymore possible to assume
    the backend logical switch identifier is equal to the neutron
    network identifier.
    """
    nsx_switch_id, nsx_port_id = nsx_db.get_nsx_switch_and_port_id(
        session, neutron_port_id)
    if not nsx_switch_id:
        # Find logical switch for port from backend
        # This is a rather expensive query, but it won't be executed
        # more than once for each port in Neutron's lifetime
        nsx_ports = switchlib.query_lswitch_lports(
            cluster,
            '*',
            relations='LogicalSwitchConfig',
            filters={
                'tag': neutron_port_id,
                'tag_scope': 'q_port_id'
            })
        # Only one result expected
        # NOTE(salv-orlando): Not handling the case where more than one
        # port is found with the same neutron port tag
        if not nsx_ports:
            LOG.warn(_("Unable to find NSX port for Neutron port %s"),
                     neutron_port_id)
            # This method is supposed to return a tuple
            return None, None
        nsx_port = nsx_ports[0]
        nsx_switch_id = (nsx_port['_relations']['LogicalSwitchConfig']['uuid'])
        if nsx_port_id:
            # Mapping already exists. Delete before recreating
            nsx_db.delete_neutron_nsx_port_mapping(session, neutron_port_id)
        else:
            nsx_port_id = nsx_port['uuid']
        # (re)Create DB mapping
        nsx_db.add_neutron_nsx_port_mapping(session, neutron_port_id,
                                            nsx_switch_id, nsx_port_id)
    return nsx_switch_id, nsx_port_id
Exemple #3
0
def get_nsx_switch_and_port_id(session, cluster, neutron_port_id):
    """Return the NSX switch and port uuids for a given neutron port.

    First, look up the Neutron database. If not found, execute
    a query on NSX platform as the mapping might be missing because
    the port was created before upgrading to grizzly.

    This routine also retrieves the identifier of the logical switch in
    the backend where the port is plugged. Prior to Icehouse this
    information was not available in the Neutron Database. For dealing
    with pre-existing records, this routine will query the backend
    for retrieving the correct switch identifier.

    As of Icehouse release it is not indeed anymore possible to assume
    the backend logical switch identifier is equal to the neutron
    network identifier.
    """
    nsx_switch_id, nsx_port_id = nsx_db.get_nsx_switch_and_port_id(
        session, neutron_port_id)
    if not nsx_switch_id:
        # Find logical switch for port from backend
        # This is a rather expensive query, but it won't be executed
        # more than once for each port in Neutron's lifetime
        nsx_ports = switchlib.query_lswitch_lports(
            cluster, '*', relations='LogicalSwitchConfig',
            filters={'tag': neutron_port_id,
                     'tag_scope': 'q_port_id'})
        # Only one result expected
        # NOTE(salv-orlando): Not handling the case where more than one
        # port is found with the same neutron port tag
        if not nsx_ports:
            LOG.warn(_("Unable to find NSX port for Neutron port %s"),
                     neutron_port_id)
            # This method is supposed to return a tuple
            return None, None
        nsx_port = nsx_ports[0]
        nsx_switch_id = (nsx_port['_relations']
                         ['LogicalSwitchConfig']['uuid'])
        if nsx_port_id:
            # Mapping already exists. Delete before recreating
            nsx_db.delete_neutron_nsx_port_mapping(
                session, neutron_port_id)
        else:
            nsx_port_id = nsx_port['uuid']
        # (re)Create DB mapping
        nsx_db.add_neutron_nsx_port_mapping(
            session, neutron_port_id,
            nsx_switch_id, nsx_port_id)
    return nsx_switch_id, nsx_port_id
Exemple #4
0
    def test_add_neutron_nsx_port_mapping_raise_on_duplicate_constraint(self):
        neutron_net_id = 'foo_neutron_network_id'
        neutron_port_id = 'foo_neutron_port_id'
        nsx_port_id_1 = 'foo_nsx_port_id_1'
        nsx_port_id_2 = 'foo_nsx_port_id_2'
        nsx_switch_id = 'foo_nsx_switch_id'
        self._setup_neutron_network_and_port(neutron_net_id, neutron_port_id)

        nsx_db.add_neutron_nsx_port_mapping(
            self.ctx.session, neutron_port_id, nsx_switch_id, nsx_port_id_1)
        # Call the method twice to trigger a db duplicate constraint error,
        # this time with a different nsx port id!
        self.assertRaises(d_exc.DBDuplicateEntry,
                          nsx_db.add_neutron_nsx_port_mapping,
                          self.ctx.session, neutron_port_id,
                          nsx_switch_id, nsx_port_id_2)
Exemple #5
0
    def test_add_neutron_nsx_port_mapping_handle_duplicate_constraint(self):
        neutron_net_id = 'foo_neutron_network_id'
        neutron_port_id = 'foo_neutron_port_id'
        nsx_port_id = 'foo_nsx_port_id'
        nsx_switch_id = 'foo_nsx_switch_id'
        self._setup_neutron_network_and_port(neutron_net_id, neutron_port_id)

        nsx_db.add_neutron_nsx_port_mapping(
            self.ctx.session, neutron_port_id, nsx_switch_id, nsx_port_id)
        # Call the method twice to trigger a db duplicate constraint error
        nsx_db.add_neutron_nsx_port_mapping(
            self.ctx.session, neutron_port_id, nsx_switch_id, nsx_port_id)
        result = (self.ctx.session.query(nsx_models.NeutronNsxPortMapping).
                  filter_by(neutron_id=neutron_port_id).one())
        self.assertEqual(nsx_port_id, result.nsx_port_id)
        self.assertEqual(neutron_port_id, result.neutron_id)
Exemple #6
0
    def test_add_neutron_nsx_port_mapping_raise_on_duplicate_constraint(self):
        neutron_net_id = 'foo_neutron_network_id'
        neutron_port_id = 'foo_neutron_port_id'
        nsx_port_id_1 = 'foo_nsx_port_id_1'
        nsx_port_id_2 = 'foo_nsx_port_id_2'
        nsx_switch_id = 'foo_nsx_switch_id'
        self._setup_neutron_network_and_port(neutron_net_id, neutron_port_id)

        nsx_db.add_neutron_nsx_port_mapping(self.ctx.session, neutron_port_id,
                                            nsx_switch_id, nsx_port_id_1)
        # Call the method twice to trigger a db duplicate constraint error,
        # this time with a different nsx port id!
        self.assertRaises(d_exc.DBDuplicateEntry,
                          nsx_db.add_neutron_nsx_port_mapping,
                          self.ctx.session, neutron_port_id, nsx_switch_id,
                          nsx_port_id_2)
Exemple #7
0
    def test_add_neutron_nsx_port_mapping_handle_duplicate_constraint(self):
        neutron_net_id = 'foo_neutron_network_id'
        neutron_port_id = 'foo_neutron_port_id'
        nsx_port_id = 'foo_nsx_port_id'
        nsx_switch_id = 'foo_nsx_switch_id'
        self._setup_neutron_network_and_port(neutron_net_id, neutron_port_id)

        nsx_db.add_neutron_nsx_port_mapping(
            self.ctx.session, neutron_port_id, nsx_switch_id, nsx_port_id)
        # Call the method twice to trigger a db duplicate constraint error
        nsx_db.add_neutron_nsx_port_mapping(
            self.ctx.session, neutron_port_id, nsx_switch_id, nsx_port_id)
        result = (self.ctx.session.query(models.NeutronNsxPortMapping).
                  filter_by(neutron_id=neutron_port_id).one())
        self.assertEqual(nsx_port_id, result.nsx_port_id)
        self.assertEqual(neutron_port_id, result.neutron_id)
    def _nsx_create_port(self, context, port_data):
        """Driver for creating a logical switch port on NSX platform."""
        # FIXME(salvatore-orlando): On the NSX platform we do not really have
        # external networks. So if as user tries and create a "regular" VIF
        # port on an external network we are unable to actually create.
        # However, in order to not break unit tests, we need to still create
        # the DB object and return success

        # NOTE(rods): Reporting mark's comment on havana version of this patch.
        # Akanda does want ports for external networks so this method is
        # basically same with external check removed and the auto plugging of
        # router ports

        # ---------------------------------------------------------------------
        # Note(rods): Remove the check on the external network
        #
        # Original code:
        # if self._network_is_external(context, port_data['network_id']):
        #     LOG.info(_("NSX plugin does not support regular VIF ports on "
        #                "external networks. Port %s will be down."),
        #              port_data['network_id'])
        #     # No need to actually update the DB state - the default is down
        #     return port_data
        # ---------------------------------------------------------------------
        lport = None
        selected_lswitch = None
        try:
            selected_lswitch = self._nsx_find_lswitch_for_port(context,
                                                               port_data)
            lport = self._nsx_create_port_helper(context.session,
                                                 selected_lswitch['uuid'],
                                                 port_data,
                                                 True)
            nsx_db.add_neutron_nsx_port_mapping(
                context.session, port_data['id'],
                selected_lswitch['uuid'], lport['uuid'])
            # -----------------------------------------------------------------
            # Note(rods): Auto plug router ports
            #
            # Original code:
            # if port_data['device_owner'] not in self.port_special_owners:
            #     switchlib.plug_vif_interface(
            #         self.cluster, selected_lswitch['uuid'],
            #         lport['uuid'], "VifAttachment", port_data['id'])

            switchlib.plug_vif_interface(
                self.cluster, selected_lswitch['uuid'],
                lport['uuid'], "VifAttachment", port_data['id'])
            # -----------------------------------------------------------------

            LOG.debug(_("_nsx_create_port completed for port %(name)s "
                        "on network %(network_id)s. The new port id is "
                        "%(id)s."), port_data)
        except (api_exc.NsxApiException, n_exc.NeutronException):
            self._handle_create_port_exception(
                context, port_data['id'],
                selected_lswitch and selected_lswitch['uuid'],
                lport and lport['uuid'])
        except db_exc.DBError as e:
            if (port_data['device_owner'] == constants.DEVICE_OWNER_DHCP and
                    isinstance(e.inner_exception, sql_exc.IntegrityError)):
                msg = (_("Concurrent network deletion detected; Back-end Port "
                         "%(nsx_id)s creation to be rolled back for Neutron "
                         "port: %(neutron_id)s")
                       % {'nsx_id': lport['uuid'],
                          'neutron_id': port_data['id']})
                LOG.warning(msg)
                if selected_lswitch and lport:
                    try:
                        switchlib.delete_port(self.cluster,
                                              selected_lswitch['uuid'],
                                              lport['uuid'])
                    except n_exc.NotFound:
                        LOG.debug(_("NSX Port %s already gone"), lport['uuid'])
    def _nsx_create_port(self, context, port_data):
        """Driver for creating a logical switch port on NSX platform."""
        # FIXME(salvatore-orlando): On the NSX platform we do not really have
        # external networks. So if as user tries and create a "regular" VIF
        # port on an external network we are unable to actually create.
        # However, in order to not break unit tests, we need to still create
        # the DB object and return success

        # NOTE(rods): Reporting mark's comment on havana version of this patch.
        # Akanda does want ports for external networks so this method is
        # basically same with external check removed and the auto plugging of
        # router ports

        # ---------------------------------------------------------------------
        # Note(rods): Remove the check on the external network
        #
        # Original code:
        # if self._network_is_external(context, port_data['network_id']):
        #     LOG.info(_("NSX plugin does not support regular VIF ports on "
        #                "external networks. Port %s will be down."),
        #              port_data['network_id'])
        #     # No need to actually update the DB state - the default is down
        #     return port_data
        # ---------------------------------------------------------------------
        lport = None
        selected_lswitch = None
        try:
            selected_lswitch = self._nsx_find_lswitch_for_port(
                context, port_data)
            lport = self._nsx_create_port_helper(context.session,
                                                 selected_lswitch['uuid'],
                                                 port_data, True)
            nsx_db.add_neutron_nsx_port_mapping(context.session,
                                                port_data['id'],
                                                selected_lswitch['uuid'],
                                                lport['uuid'])
            # -----------------------------------------------------------------
            # Note(rods): Auto plug router ports
            #
            # Original code:
            # if port_data['device_owner'] not in self.port_special_owners:
            #     switchlib.plug_vif_interface(
            #         self.cluster, selected_lswitch['uuid'],
            #         lport['uuid'], "VifAttachment", port_data['id'])

            switchlib.plug_vif_interface(self.cluster,
                                         selected_lswitch['uuid'],
                                         lport['uuid'], "VifAttachment",
                                         port_data['id'])
            # -----------------------------------------------------------------

            LOG.debug(
                _("_nsx_create_port completed for port %(name)s "
                  "on network %(network_id)s. The new port id is "
                  "%(id)s."), port_data)
        except (api_exc.NsxApiException, n_exc.NeutronException):
            self._handle_create_port_exception(
                context, port_data['id'], selected_lswitch
                and selected_lswitch['uuid'], lport and lport['uuid'])
        except db_exc.DBError as e:
            if (port_data['device_owner'] == constants.DEVICE_OWNER_DHCP
                    and isinstance(e.inner_exception, sql_exc.IntegrityError)):
                msg = (_("Concurrent network deletion detected; Back-end Port "
                         "%(nsx_id)s creation to be rolled back for Neutron "
                         "port: %(neutron_id)s") % {
                             'nsx_id': lport['uuid'],
                             'neutron_id': port_data['id']
                         })
                LOG.warning(msg)
                if selected_lswitch and lport:
                    try:
                        switchlib.delete_port(self.cluster,
                                              selected_lswitch['uuid'],
                                              lport['uuid'])
                    except n_exc.NotFound:
                        LOG.debug(_("NSX Port %s already gone"), lport['uuid'])