Exemple #1
0
 def test_query_lswitch_ports(self):
     lswitch, lport = self._create_switch_and_port()
     switch_port_uuids = [
         switchlib.create_lport(
             self.fake_cluster, lswitch['uuid'], 'pippo', 'qportid-%s' % k,
             'port-%s' % k, 'deviceid-%s' % k, True)['uuid']
         for k in range(2)]
     switch_port_uuids.append(lport['uuid'])
     ports = switchlib.query_lswitch_lports(
         self.fake_cluster, lswitch['uuid'])
     self.assertEqual(len(ports), 3)
     for res_port in ports:
         self.assertIn(res_port['uuid'], switch_port_uuids)
Exemple #2
0
def get_nsx_switch_and_port_id(session, cluster, neutron_port_id):
    """Return the NSX switch and port uuids for a given neutron port.

    First, look up the Neutron database. If not found, execute
    a query on NSX platform as the mapping might be missing because
    the port was created before upgrading to grizzly.

    This routine also retrieves the identifier of the logical switch in
    the backend where the port is plugged. Prior to Icehouse this
    information was not available in the Neutron Database. For dealing
    with pre-existing records, this routine will query the backend
    for retrieving the correct switch identifier.

    As of Icehouse release it is not indeed anymore possible to assume
    the backend logical switch identifier is equal to the neutron
    network identifier.
    """
    nsx_switch_id, nsx_port_id = nsx_db.get_nsx_switch_and_port_id(
        session, neutron_port_id)
    if not nsx_switch_id:
        # Find logical switch for port from backend
        # This is a rather expensive query, but it won't be executed
        # more than once for each port in Neutron's lifetime
        nsx_ports = switchlib.query_lswitch_lports(
            cluster,
            '*',
            relations='LogicalSwitchConfig',
            filters={
                'tag': neutron_port_id,
                'tag_scope': 'q_port_id'
            })
        # Only one result expected
        # NOTE(salv-orlando): Not handling the case where more than one
        # port is found with the same neutron port tag
        if not nsx_ports:
            LOG.warn(_("Unable to find NSX port for Neutron port %s"),
                     neutron_port_id)
            # This method is supposed to return a tuple
            return None, None
        nsx_port = nsx_ports[0]
        nsx_switch_id = (nsx_port['_relations']['LogicalSwitchConfig']['uuid'])
        if nsx_port_id:
            # Mapping already exists. Delete before recreating
            nsx_db.delete_neutron_nsx_port_mapping(session, neutron_port_id)
        else:
            nsx_port_id = nsx_port['uuid']
        # (re)Create DB mapping
        nsx_db.add_neutron_nsx_port_mapping(session, neutron_port_id,
                                            nsx_switch_id, nsx_port_id)
    return nsx_switch_id, nsx_port_id
Exemple #3
0
def get_nsx_switch_and_port_id(session, cluster, neutron_port_id):
    """Return the NSX switch and port uuids for a given neutron port.

    First, look up the Neutron database. If not found, execute
    a query on NSX platform as the mapping might be missing because
    the port was created before upgrading to grizzly.

    This routine also retrieves the identifier of the logical switch in
    the backend where the port is plugged. Prior to Icehouse this
    information was not available in the Neutron Database. For dealing
    with pre-existing records, this routine will query the backend
    for retrieving the correct switch identifier.

    As of Icehouse release it is not indeed anymore possible to assume
    the backend logical switch identifier is equal to the neutron
    network identifier.
    """
    nsx_switch_id, nsx_port_id = nsx_db.get_nsx_switch_and_port_id(
        session, neutron_port_id)
    if not nsx_switch_id:
        # Find logical switch for port from backend
        # This is a rather expensive query, but it won't be executed
        # more than once for each port in Neutron's lifetime
        nsx_ports = switchlib.query_lswitch_lports(
            cluster, '*', relations='LogicalSwitchConfig',
            filters={'tag': neutron_port_id,
                     'tag_scope': 'q_port_id'})
        # Only one result expected
        # NOTE(salv-orlando): Not handling the case where more than one
        # port is found with the same neutron port tag
        if not nsx_ports:
            LOG.warn(_("Unable to find NSX port for Neutron port %s"),
                     neutron_port_id)
            # This method is supposed to return a tuple
            return None, None
        nsx_port = nsx_ports[0]
        nsx_switch_id = (nsx_port['_relations']
                         ['LogicalSwitchConfig']['uuid'])
        if nsx_port_id:
            # Mapping already exists. Delete before recreating
            nsx_db.delete_neutron_nsx_port_mapping(
                session, neutron_port_id)
        else:
            nsx_port_id = nsx_port['uuid']
        # (re)Create DB mapping
        nsx_db.add_neutron_nsx_port_mapping(
            session, neutron_port_id,
            nsx_switch_id, nsx_port_id)
    return nsx_switch_id, nsx_port_id