예제 #1
0
def nsx_update_metadata_proxy(resource, event, trigger, **kwargs):
    """Update Metadata proxy for NSXv3 CrossHairs."""

    nsx_version = utils.get_connected_nsxlib().get_version()
    if not nsx_utils.is_nsx_version_1_1_0(nsx_version):
        LOG.info(_LI("This utility is not available for NSX version %s"),
                 nsx_version)
        return

    metadata_proxy_uuid = None
    if kwargs.get('property'):
        properties = admin_utils.parse_multi_keyval_opt(kwargs['property'])
        metadata_proxy_uuid = properties.get('metadata_proxy_uuid')
    if not metadata_proxy_uuid:
        LOG.error(_LE("metadata_proxy_uuid is not defined"))
        return

    cfg.CONF.set_override('dhcp_agent_notification', False)
    cfg.CONF.set_override('native_dhcp_metadata', True, 'nsx_v3')
    cfg.CONF.set_override('metadata_proxy_uuid', metadata_proxy_uuid, 'nsx_v3')

    plugin = utils.NsxV3PluginWrapper()
    nsx_client = utils.get_nsxv3_client()
    port_resource = resources.LogicalPort(nsx_client)

    # For each Neutron network, check if it is an internal metadata network.
    # If yes, delete the network and associated router interface.
    # Otherwise, create a logical switch port with MD-Proxy attachment.
    for network in neutron_client.get_networks():
        if _is_metadata_network(network):
            # It is a metadata network, find the attached router,
            # remove the router interface and the network.
            filters = {'device_owner': const.ROUTER_INTERFACE_OWNERS,
                       'fixed_ips': {
                           'subnet_id': [network['subnets'][0]],
                           'ip_address': [nsx_rpc.METADATA_GATEWAY_IP]}}
            ports = neutron_client.get_ports(filters=filters)
            if not ports:
                continue
            router_id = ports[0]['device_id']
            interface = {'subnet_id': network['subnets'][0]}
            plugin.remove_router_interface(router_id, interface)
            LOG.info(_LI("Removed metadata interface on router %s"), router_id)
            plugin.delete_network(network['id'])
            LOG.info(_LI("Removed metadata network %s"), network['id'])
        else:
            lswitch_id = neutron_client.net_id_to_lswitch_id(network['id'])
            if not lswitch_id:
                continue
            tags = nsx_utils.build_v3_tags_payload(
                network, resource_type='os-neutron-net-id',
                project_name='admin')
            name = nsx_utils.get_name_and_uuid('%s-%s' % (
                'mdproxy', network['name'] or 'network'), network['id'])
            port_resource.create(
                lswitch_id, metadata_proxy_uuid, tags=tags, name=name,
                attachment_type=nsx_constants.ATTACHMENT_MDPROXY)
            LOG.info(_LI("Enabled native metadata proxy for network %s"),
                     network['id'])
예제 #2
0
 def test_build_v3_tags_max_length_payload(self):
     result = utils.build_v3_tags_payload(
         {'id': 'X' * 255,
          'tenant_id': 'X' * 255},
         resource_type='os-neutron-net-id',
         project_name='X' * 255)
     expected = [{'scope': 'os-neutron-net-id', 'tag': 'X' * 40},
                 {'scope': 'os-project-id', 'tag': 'X' * 40},
                 {'scope': 'os-project-name', 'tag': 'X' * 40},
                 {'scope': 'os-api-version',
                  'tag': version.version_info.release_string()}]
     self.assertEqual(expected, result)
예제 #3
0
 def test_build_v3_tags_payload_internal(self):
     result = utils.build_v3_tags_payload(
         {'id': 'fake_id',
          'tenant_id': 'fake_tenant_id'},
         resource_type='os-neutron-net-id',
         project_name=None)
     expected = [{'scope': 'os-neutron-net-id', 'tag': 'fake_id'},
                 {'scope': 'os-project-id', 'tag': 'fake_tenant_id'},
                 {'scope': 'os-project-name', 'tag': 'NSX Neutron plugin'},
                 {'scope': 'os-api-version',
                  'tag': version.version_info.release_string()}]
     self.assertEqual(expected, result)
예제 #4
0
    def test_is_internal_resource(self):
        project_tag = utils.build_v3_tags_payload(
            {'id': 'fake_id',
             'tenant_id': 'fake_tenant_id'},
            resource_type='os-neutron-net-id',
            project_name=None)
        internal_tag = utils.build_v3_api_version_tag()

        expect_false = utils.is_internal_resource({'tags': project_tag})
        self.assertFalse(expect_false)

        expect_true = utils.is_internal_resource({'tags': internal_tag})
        self.assertTrue(expect_true)
예제 #5
0
def build_dhcp_server_config(network, subnet, port, project_name):
    # Prepare the configutation for a new logical DHCP server.
    server_ip = "%s/%u" % (port['fixed_ips'][0]['ip_address'],
                           netaddr.IPNetwork(subnet['cidr']).prefixlen)
    dns_servers = subnet['dns_nameservers']
    if not dns_servers or not validators.is_attr_set(dns_servers):
        dns_servers = cfg.CONF.nsx_v3.nameservers
    gateway_ip = subnet['gateway_ip']
    if not validators.is_attr_set(gateway_ip):
        gateway_ip = None

    # The following code is based on _generate_opts_per_subnet() in
    # neutron/agent/linux/dhcp.py. It prepares DHCP options for a subnet.

    # Add route for directly connected network.
    host_routes = [{'network': subnet['cidr'], 'next_hop': '0.0.0.0'}]
    # Copy routes from subnet host_routes attribute.
    for hr in subnet['host_routes']:
        if hr['destination'] == constants.IPv4_ANY:
            if not gateway_ip:
                gateway_ip = hr['nexthop']
        else:
            host_routes.append({
                'network': hr['destination'],
                'next_hop': hr['nexthop']
            })
    # If gateway_ip is defined, add default route via this gateway.
    if gateway_ip:
        host_routes.append({
            'network': constants.IPv4_ANY,
            'next_hop': gateway_ip
        })

    options = {'option121': {'static_routes': host_routes}}
    name = utils.get_name_and_uuid(network['name'] or 'dhcpserver',
                                   network['id'])
    tags = utils.build_v3_tags_payload(network,
                                       resource_type='os-neutron-net-id',
                                       project_name=project_name)
    return {
        'name': name,
        'dhcp_profile_id': cfg.CONF.nsx_v3.dhcp_profile_uuid,
        'server_ip': server_ip,
        'dns_servers': dns_servers,
        'domain_name': cfg.CONF.nsx_v3.dns_domain,
        'gateway_ip': gateway_ip,
        'options': options,
        'tags': tags
    }
예제 #6
0
 def create_policy(self, context, policy):
     tags = utils.build_v3_tags_payload(
         policy['policy'], resource_type='os-neutron-qos-id',
         project_name=context.tenant_name)
     result = nsxlib.create_qos_switching_profile(
                  tags=tags, name=policy['policy'].get("name"),
                  description=policy['policy'].get("description"))
     policy['policy']['id'] = result['id']
     try:
         policy = policy_object.QosPolicy(context, **policy['policy'])
         policy.create()
         return policy
     except Exception:
         with excutils.save_and_reraise_exception():
             # Undo creation on the backend
             LOG.exception(_LE('Failed to create qos-policy'))
             nsxlib.delete_qos_switching_profile(result['id'])
    def test_policy_create_profile(self, fake_db_add, fake_rbac_create):
        # test the switch profile creation when a QoS policy is created
        with mock.patch.object(nsxlib, 'create_qos_switching_profile',
            return_value=self.fake_profile) as create_profile:
            with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
                return_value=self.policy):
                with mock.patch('neutron.objects.qos.policy.QosPolicy.create'):
                    policy = self.qos_plugin.create_policy(self.ctxt,
                                                           self.policy_data)
                    expected_tags = utils.build_v3_tags_payload(
                        policy,
                        resource_type='os-neutron-qos-id',
                        project_name=self.ctxt.tenant_name)

                    create_profile.assert_called_once_with(
                        description=self.policy_data["policy"]["description"],
                        name=self.policy_data["policy"]["name"],
                        tags=expected_tags)
                    # verify that the policy->profile mapping entry was added
                    self.assertTrue(fake_db_add.called)
    def test_policy_create_profile(self, fake_db_add, fake_rbac_create):
        # test the switch profile creation when a QoS policy is created
        with mock.patch(
                'vmware_nsx.nsxlib.v3.NsxLib.create_qos_switching_profile',
                return_value=self.fake_profile) as create_profile:
            with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
                            return_value=self.policy):
                with mock.patch('neutron.objects.qos.policy.QosPolicy.create'):
                    policy = self.qos_plugin.create_policy(
                        self.ctxt, self.policy_data)
                    expected_tags = utils.build_v3_tags_payload(
                        policy,
                        resource_type='os-neutron-qos-id',
                        project_name=self.ctxt.tenant_name)

                    create_profile.assert_called_once_with(
                        description=self.policy_data["policy"]["description"],
                        name=self.policy_data["policy"]["name"],
                        tags=expected_tags)
                    # verify that the policy->profile mapping entry was added
                    self.assertTrue(fake_db_add.called)
예제 #9
0
 def create_tap_flow_postcommit(self, context):
     """Create tap flow and port mirror session on NSX backend."""
     tf = context.tap_flow
     # Retrieve tap service.
     ts = self._get_tap_service(context._plugin_context,
                                tf.get('tap_service_id'))
     src_port_id = tf.get('source_port')
     dest_port_id = ts.get('port_id')
     tags = nsx_utils.build_v3_tags_payload(
         tf,
         resource_type='os-neutron-mirror-id',
         project_name=context._plugin_context.tenant_name)
     nsx_direction = self._convert_to_backend_direction(tf.get('direction'))
     # Create a port mirroring session object if local SPAN. Otherwise
     # create a port mirroring switching profile for L3SPAN.
     if self._is_local_span(context, src_port_id, dest_port_id):
         self._create_local_span(context, src_port_id, dest_port_id,
                                 nsx_direction, tags)
     else:
         self._create_l3span(context, src_port_id, dest_port_id,
                             nsx_direction, tags)
예제 #10
0
 def test_metadata_proxy_with_create_network(self):
     # Test if native metadata proxy is enabled on a network when it is
     # created.
     with mock.patch.object(nsx_resources.LogicalPort,
                            'create') as create_logical_port:
         with self.network() as network:
             nsx_net_id = self.plugin._get_network_nsx_id(
                 context.get_admin_context(), network['network']['id'])
             tags = utils.build_v3_tags_payload(
                 network['network'],
                 resource_type='os-neutron-net-id',
                 project_name=None)
             name = utils.get_name_and_uuid(
                 '%s-%s' %
                 ('mdproxy', network['network']['name'] or 'network'),
                 network['network']['id'])
             create_logical_port.assert_called_once_with(
                 nsx_net_id,
                 cfg.CONF.nsx_v3.metadata_proxy_uuid,
                 tags=tags,
                 name=name,
                 attachment_type=nsx_constants.ATTACHMENT_MDPROXY)
    def test_policy_update_profile(self, *mocks):
        # test the switch profile update when a QoS policy is updated
        fields = base_object.get_updatable_fields(policy_object.QosPolicy,
                                                  self.policy_data['policy'])
        with mock.patch.object(
                nsxlib, 'update_qos_switching_profile') as update_profile:
            with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
                            return_value=self.policy):
                with mock.patch('neutron.objects.qos.policy.QosPolicy.update'):
                    self.qos_plugin.update_policy(self.ctxt, self.policy.id,
                                                  {'policy': fields})
                    # verify that the profile was updated with the correct data
                    self.policy_data["policy"]["id"] = self.policy.id
                    expected_tags = utils.build_v3_tags_payload(
                        self.policy_data["policy"],
                        resource_type='os-neutron-qos-id',
                        project_name=self.ctxt.tenant_name)

                    update_profile.assert_called_once_with(
                        self.fake_profile_id,
                        description=self.policy_data["policy"]["description"],
                        name=self.policy_data["policy"]["name"],
                        tags=expected_tags)
    def test_policy_update_profile(self, *mocks):
        # test the switch profile update when a QoS policy is updated
        fields = base_object.get_updatable_fields(
            policy_object.QosPolicy, self.policy_data['policy'])
        with mock.patch.object(nsxlib,
            'update_qos_switching_profile') as update_profile:
            with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
                return_value=self.policy):
                with mock.patch('neutron.objects.qos.policy.QosPolicy.update'):
                    self.qos_plugin.update_policy(
                        self.ctxt, self.policy.id, {'policy': fields})
                    # verify that the profile was updated with the correct data
                    self.policy_data["policy"]["id"] = self.policy.id
                    expected_tags = utils.build_v3_tags_payload(
                        self.policy_data["policy"],
                        resource_type='os-neutron-qos-id',
                        project_name=self.ctxt.tenant_name)

                    update_profile.assert_called_once_with(
                        self.fake_profile_id,
                        description=self.policy_data["policy"]["description"],
                        name=self.policy_data["policy"]["name"],
                        tags=expected_tags
                    )
예제 #13
0
 def create_l2_gateway_connection(self, context, l2_gateway_connection):
     """Create a L2 gateway connection."""
     #TODO(abhiraut): Move backend logic in a separate method
     gw_connection = l2_gateway_connection.get(l2gw_const.
                                               CONNECTION_RESOURCE_NAME)
     network_id = gw_connection.get(l2gw_const.NETWORK_ID)
     self._validate_network(context, network_id)
     l2gw_connection = super(
         NsxV3Driver, self).create_l2_gateway_connection(
             context, l2_gateway_connection)
     l2gw_id = gw_connection.get(l2gw_const.L2GATEWAY_ID)
     devices = self._get_l2_gateway_devices(context, l2gw_id)
     # In NSXv3, there will be only one device configured per L2 gateway.
     # The name of the device shall carry the backend bridge cluster's UUID.
     device_name = devices[0].get('device_name')
     # The seg-id will be provided either during gateway create or gateway
     # connection create. l2gateway_db_mixin makes sure that it is
     # configured one way or the other.
     seg_id = gw_connection.get(l2gw_const.SEG_ID)
     if not seg_id:
         # Seg-id was not passed as part of connection-create. Retrieve
         # seg-id from L2 gateway's interface.
         interface = self._get_l2_gw_interfaces(context, devices[0]['id'])
         seg_id = interface[0].get(l2gw_const.SEG_ID)
     self._validate_segment_id(seg_id)
     try:
         tags = nsx_utils.build_v3_tags_payload(
             gw_connection, resource_type='os-neutron-l2gw-id',
             project_name=context.tenant_name)
         bridge_endpoint = nsxlib.create_bridge_endpoint(
             device_name=device_name,
             seg_id=seg_id,
             tags=tags)
     except nsx_exc.ManagerError:
         LOG.exception(_LE("Unable to update NSX backend, rolling back "
                           "changes on neutron"))
         with excutils.save_and_reraise_exception():
             super(NsxV3Driver,
                   self).delete_l2_gateway_connection(context,
                                                      l2gw_connection['id'])
     # Create a logical port and connect it to the bridge endpoint.
     tenant_id = gw_connection['tenant_id']
     if context.is_admin and not tenant_id:
         tenant_id = context.tenant_id
     #TODO(abhiraut): Consider specifying the name of the port
     port_dict = {'port': {
                     'tenant_id': tenant_id,
                     'network_id': network_id,
                     'mac_address': constants.ATTR_NOT_SPECIFIED,
                     'admin_state_up': True,
                     'fixed_ips': [],
                     'device_id': bridge_endpoint['id'],
                     'device_owner': nsx_constants.BRIDGE_ENDPOINT,
                     'name': '', }}
     try:
         #TODO(abhiraut): Consider adding UT for port check once UTs are
         #                refactored
         port = self._core_plugin.create_port(context, port_dict,
                                              l2gw_port_check=True)
         # Deallocate IP address from the port.
         for fixed_ip in port.get('fixed_ips', []):
             self._core_plugin._delete_ip_allocation(context, network_id,
                                                     fixed_ip['subnet_id'],
                                                     fixed_ip['ip_address'])
         LOG.debug("IP addresses deallocated on port %s", port['id'])
     except (nsx_exc.ManagerError,
             n_exc.NeutronException):
         with excutils.save_and_reraise_exception():
             LOG.exception(_LE("Unable to create L2 gateway port, "
                               "rolling back changes on neutron"))
             nsxlib.delete_bridge_endpoint(bridge_endpoint['id'])
             super(NsxV3Driver,
                   self).delete_l2_gateway_connection(context,
                                                      l2gw_connection['id'])
     try:
         # Update neutron's database with the mappings.
         nsx_db.add_l2gw_connection_mapping(
             session=context.session,
             connection_id=l2gw_connection['id'],
             bridge_endpoint_id=bridge_endpoint['id'],
             port_id=port['id'])
     except db_exc.DBError:
         with excutils.save_and_reraise_exception():
             LOG.exception(_LE("Unable to add L2 gateway connection "
                               "mappings, rolling back changes on neutron"))
             nsxlib.delete_bridge_endpoint(bridge_endpoint['id'])
             super(NsxV3Driver,
                   self).delete_l2_gateway_connection(context,
                                                      l2gw_connection['id'])
     return l2gw_connection
예제 #14
0
 def _get_tags(self, context, policy):
     policy_dict = {'id': policy.id, 'tenant_id': policy.tenant_id}
     return utils.build_v3_tags_payload(
         policy_dict, resource_type='os-neutron-qos-id',
         project_name=context.tenant_name)