def _build_gateway_device_body( tenant_id, display_name, neutron_id, connector_type, connector_ip, client_certificate, tz_uuid ): connector_type_mappings = { utils.NetworkTypes.STT: "STTConnector", utils.NetworkTypes.GRE: "GREConnector", utils.NetworkTypes.BRIDGE: "BridgeConnector", "ipsec%s" % utils.NetworkTypes.STT: "IPsecSTT", "ipsec%s" % utils.NetworkTypes.GRE: "IPsecGRE", } nsx_connector_type = connector_type_mappings.get(connector_type) body = { "display_name": utils.check_and_truncate(display_name), "tags": utils.get_tags(os_tid=tenant_id, q_gw_dev_id=neutron_id), "admin_status_enabled": True, } if connector_ip and nsx_connector_type: body["transport_connectors"] = [ {"transport_zone_uuid": tz_uuid, "ip_address": connector_ip, "type": nsx_connector_type} ] if client_certificate: body["credential"] = { "client_certificate": {"pem_encoded": client_certificate}, "type": "SecurityCertificateCredential", } return body
def create_lport(cluster, lswitch_uuid, tenant_id, neutron_port_id, display_name, device_id, admin_status_enabled, mac_address=None, fixed_ips=None, port_security_enabled=None, security_profiles=None, queue_id=None, mac_learning_enabled=None, allowed_address_pairs=None): """Creates a logical port on the assigned logical switch.""" display_name = utils.check_and_truncate(display_name) lport_obj = dict( admin_status_enabled=admin_status_enabled, display_name=display_name, tags=utils.get_tags(os_tid=tenant_id, q_port_id=neutron_port_id, vm_id=utils.device_id_to_vm_id(device_id)) ) _configure_extensions(lport_obj, mac_address, fixed_ips, port_security_enabled, security_profiles, queue_id, mac_learning_enabled, allowed_address_pairs) path = nsxlib._build_uri_path(LSWITCHPORT_RESOURCE, parent_resource_id=lswitch_uuid) result = nsxlib.do_request(HTTP_POST, path, jsonutils.dumps(lport_obj), cluster=cluster) LOG.debug("Created logical port %(result)s on logical switch %(uuid)s", {'result': result['uuid'], 'uuid': lswitch_uuid}) return result
def _build_gateway_device_body(tenant_id, display_name, neutron_id, connector_type, connector_ip, client_certificate, tz_uuid): connector_type_mappings = { utils.NetworkTypes.STT: "STTConnector", utils.NetworkTypes.GRE: "GREConnector", utils.NetworkTypes.BRIDGE: "BridgeConnector", 'ipsec%s' % utils.NetworkTypes.STT: "IPsecSTT", 'ipsec%s' % utils.NetworkTypes.GRE: "IPsecGRE" } nsx_connector_type = connector_type_mappings.get(connector_type) body = { "display_name": utils.check_and_truncate(display_name), "tags": utils.get_tags(os_tid=tenant_id, q_gw_dev_id=neutron_id), "admin_status_enabled": True } if connector_ip and nsx_connector_type: body["transport_connectors"] = [{ "transport_zone_uuid": tz_uuid, "ip_address": connector_ip, "type": nsx_connector_type }] if client_certificate: body["credential"] = { "client_certificate": { "pem_encoded": client_certificate }, "type": "SecurityCertificateCredential" } return body
def create_l2_gw_service(cluster, tenant_id, display_name, devices): """Create a NSX Layer-2 Network Gateway Service. :param cluster: The target NSX cluster :param tenant_id: Identifier of the Openstack tenant for which the gateway service. :param display_name: Descriptive name of this gateway service :param devices: List of transport node uuids (and network interfaces on them) to use for the network gateway service :raise NsxApiException: if there is a problem while communicating with the NSX controller """ # NOTE(salvatore-orlando): This is a little confusing, but device_id in # NSX is actually the identifier a physical interface on the gateway # device, which in the Neutron API is referred as interface_name gateways = [{ "transport_node_uuid": device['id'], "device_id": device['interface_name'], "type": "L2Gateway" } for device in devices] gwservice_obj = { "display_name": utils.check_and_truncate(display_name), "tags": utils.get_tags(os_tid=tenant_id), "gateways": gateways, "type": "L2GatewayServiceConfig" } return nsxlib.do_request(HTTP_POST, nsxlib._build_uri_path(GWSERVICE_RESOURCE), jsonutils.dumps(gwservice_obj), cluster=cluster)
def create_l2_gw_service(cluster, tenant_id, display_name, devices): """Create a NSX Layer-2 Network Gateway Service. :param cluster: The target NSX cluster :param tenant_id: Identifier of the Openstack tenant for which the gateway service. :param display_name: Descriptive name of this gateway service :param devices: List of transport node uuids (and network interfaces on them) to use for the network gateway service :raise NsxApiException: if there is a problem while communicating with the NSX controller """ # NOTE(salvatore-orlando): This is a little confusing, but device_id in # NSX is actually the identifier a physical interface on the gateway # device, which in the Neutron API is referred as interface_name gateways = [{"transport_node_uuid": device['id'], "device_id": device['interface_name'], "type": "L2Gateway"} for device in devices] gwservice_obj = { "display_name": utils.check_and_truncate(display_name), "tags": utils.get_tags(os_tid=tenant_id), "gateways": gateways, "type": "L2GatewayServiceConfig" } return do_request( "POST", _build_uri_path(GWSERVICE_RESOURCE), json.dumps(gwservice_obj), cluster=cluster)
def update_port(cluster, lswitch_uuid, lport_uuid, neutron_port_id, tenant_id, display_name, device_id, admin_status_enabled, mac_address=None, fixed_ips=None, port_security_enabled=None, security_profiles=None, queue_id=None, mac_learning_enabled=None, allowed_address_pairs=None): lport_obj = dict( admin_status_enabled=admin_status_enabled, display_name=utils.check_and_truncate(display_name), tags=utils.get_tags(os_tid=tenant_id, q_port_id=neutron_port_id, vm_id=utils.device_id_to_vm_id(device_id))) _configure_extensions(lport_obj, mac_address, fixed_ips, port_security_enabled, security_profiles, queue_id, mac_learning_enabled, allowed_address_pairs) path = "/ws.v1/lswitch/" + lswitch_uuid + "/lport/" + lport_uuid try: result = nsxlib.do_request(HTTP_PUT, path, jsonutils.dumps(lport_obj), cluster=cluster) LOG.debug("Updated logical port %(result)s " "on logical switch %(uuid)s", {'result': result['uuid'], 'uuid': lswitch_uuid}) return result except exception.NotFound as e: LOG.error(_LE("Port or Network not found, Error: %s"), str(e)) raise exception.PortNotFoundOnNetwork( port_id=lport_uuid, net_id=lswitch_uuid)
def update_security_profile(cluster, spid, name): return do_request(HTTP_PUT, _build_uri_path(SECPROF_RESOURCE, resource_id=spid), json.dumps({ "display_name": utils.check_and_truncate(name) }), cluster=cluster)
def update_port(cluster, lswitch_uuid, lport_uuid, neutron_port_id, tenant_id, display_name, device_id, admin_status_enabled, mac_address=None, fixed_ips=None, port_security_enabled=None, security_profiles=None, queue_id=None, mac_learning_enabled=None, allowed_address_pairs=None): lport_obj = dict( admin_status_enabled=admin_status_enabled, display_name=utils.check_and_truncate(display_name), tags=utils.get_tags(os_tid=tenant_id, q_port_id=neutron_port_id, vm_id=utils.device_id_to_vm_id(device_id))) _configure_extensions(lport_obj, mac_address, fixed_ips, port_security_enabled, security_profiles, queue_id, mac_learning_enabled, allowed_address_pairs) path = "/ws.v1/lswitch/" + lswitch_uuid + "/lport/" + lport_uuid try: result = do_request(HTTP_PUT, path, json.dumps(lport_obj), cluster=cluster) LOG.debug(_("Updated logical port %(result)s " "on logical switch %(uuid)s"), {'result': result['uuid'], 'uuid': lswitch_uuid}) return result except exception.NotFound as e: LOG.error(_("Port or Network not found, Error: %s"), str(e)) raise exception.PortNotFoundOnNetwork( port_id=lport_uuid, net_id=lswitch_uuid)
def create_lqueue(cluster, queue_data): params = { 'name': 'display_name', 'qos_marking': 'qos_marking', 'min': 'min_bandwidth_rate', 'max': 'max_bandwidth_rate', 'dscp': 'dscp' } queue_obj = dict( (nsx_name, queue_data.get(api_name)) for api_name, nsx_name in params.iteritems() if attr.is_attr_set(queue_data.get(api_name)) ) if 'display_name' in queue_obj: queue_obj['display_name'] = utils.check_and_truncate( queue_obj['display_name']) queue_obj['tags'] = utils.get_tags() try: return do_request(HTTP_POST, _build_uri_path(LQUEUE_RESOURCE), jsonutils.dumps(queue_obj), cluster=cluster)['uuid'] except api_exc.NsxApiException: # FIXME(salv-orlando): This should not raise NeutronException with excutils.save_and_reraise_exception(): raise exception.NeutronException()
def create_lswitch(cluster, neutron_net_id, tenant_id, display_name, transport_zones_config, shared=None, **kwargs): # The tag scope adopts a slightly different naming convention for # historical reasons lswitch_obj = { "display_name": utils.check_and_truncate(display_name), "transport_zones": transport_zones_config, "tags": utils.get_tags(os_tid=tenant_id, quantum_net_id=neutron_net_id) } # TODO(salv-orlando): Now that we have async status synchronization # this tag is perhaps not needed anymore if shared: lswitch_obj["tags"].append({"tag": "true", "scope": "shared"}) if "tags" in kwargs: lswitch_obj["tags"].extend(kwargs["tags"]) uri = _build_uri_path(LSWITCH_RESOURCE) lswitch = do_request(HTTP_POST, uri, json.dumps(lswitch_obj), cluster=cluster) LOG.debug(_("Created logical switch: %s"), lswitch['uuid']) return lswitch
def create_lport(cluster, lswitch_uuid, tenant_id, neutron_port_id, display_name, device_id, admin_status_enabled, mac_address=None, fixed_ips=None, port_security_enabled=None, security_profiles=None, queue_id=None, mac_learning_enabled=None, allowed_address_pairs=None): """Creates a logical port on the assigned logical switch.""" display_name = utils.check_and_truncate(display_name) lport_obj = dict( admin_status_enabled=admin_status_enabled, display_name=display_name, tags=utils.get_tags(os_tid=tenant_id, q_port_id=neutron_port_id, vm_id=utils.device_id_to_vm_id(device_id)) ) _configure_extensions(lport_obj, mac_address, fixed_ips, port_security_enabled, security_profiles, queue_id, mac_learning_enabled, allowed_address_pairs) path = _build_uri_path(LSWITCHPORT_RESOURCE, parent_resource_id=lswitch_uuid) result = do_request(HTTP_POST, path, json.dumps(lport_obj), cluster=cluster) LOG.debug(_("Created logical port %(result)s on logical switch %(uuid)s"), {'result': result['uuid'], 'uuid': lswitch_uuid}) return result
def create_lqueue(cluster, queue_data): params = { 'name': 'display_name', 'qos_marking': 'qos_marking', 'min': 'min_bandwidth_rate', 'max': 'max_bandwidth_rate', 'dscp': 'dscp' } queue_obj = dict((nsx_name, queue_data.get(api_name)) for api_name, nsx_name in params.iteritems() if attr.is_attr_set(queue_data.get(api_name))) if 'display_name' in queue_obj: queue_obj['display_name'] = utils.check_and_truncate( queue_obj['display_name']) queue_obj['tags'] = utils.get_tags() try: return nsxlib.do_request(HTTP_POST, nsxlib._build_uri_path(LQUEUE_RESOURCE), jsonutils.dumps(queue_obj), cluster=cluster)['uuid'] except api_exc.NsxApiException: # FIXME(salv-orlando): This should not raise NeutronException with excutils.save_and_reraise_exception(): raise exception.NeutronException()
def create_security_profile(cluster, tenant_id, neutron_id, security_profile): """Create a security profile on the NSX backend. :param cluster: a NSX cluster object reference :param tenant_id: identifier of the Neutron tenant :param neutron_id: neutron security group identifier :param security_profile: dictionary with data for configuring the NSX security profile. """ path = "/ws.v1/security-profile" # Allow all dhcp responses and all ingress traffic hidden_rules = { 'logical_port_egress_rules': [{ 'ethertype': 'IPv4', 'protocol': constants.PROTO_NUM_UDP, 'port_range_min': constants.DHCP_RESPONSE_PORT, 'port_range_max': constants.DHCP_RESPONSE_PORT, 'ip_prefix': '0.0.0.0/0' }], 'logical_port_ingress_rules': [{ 'ethertype': 'IPv4' }, { 'ethertype': 'IPv6' }] } display_name = utils.check_and_truncate(security_profile.get('name')) # NOTE(salv-orlando): neutron-id tags are prepended with 'q' for # historical reasons body = mk_body( tags=utils.get_tags(os_tid=tenant_id, q_sec_group_id=neutron_id), display_name=display_name, logical_port_ingress_rules=( hidden_rules['logical_port_ingress_rules']), logical_port_egress_rules=hidden_rules['logical_port_egress_rules']) rsp = nsxlib.do_request(HTTP_POST, path, body, cluster=cluster) if security_profile.get('name') == 'default': # If security group is default allow ip traffic between # members of the same security profile is allowed and ingress traffic # from the switch rules = { 'logical_port_egress_rules': [{ 'ethertype': 'IPv4', 'profile_uuid': rsp['uuid'] }, { 'ethertype': 'IPv6', 'profile_uuid': rsp['uuid'] }], 'logical_port_ingress_rules': [{ 'ethertype': 'IPv4' }, { 'ethertype': 'IPv6' }] } update_security_group_rules(cluster, rsp['uuid'], rules) LOG.debug(_("Created Security Profile: %s"), rsp) return rsp
def update_l2_gw_service(cluster, gateway_id, display_name): # TODO(salvatore-orlando): Allow updates for gateways too gwservice_obj = get_l2_gw_service(cluster, gateway_id) if not display_name: # Nothing to update return gwservice_obj gwservice_obj["display_name"] = utils.check_and_truncate(display_name) return do_request("PUT", _build_uri_path(GWSERVICE_RESOURCE, resource_id=gateway_id), json.dumps(gwservice_obj), cluster=cluster)
def update_lswitch(cluster, lswitch_id, display_name, tenant_id=None, **kwargs): uri = _build_uri_path(LSWITCH_RESOURCE, resource_id=lswitch_id) lswitch_obj = {"display_name": utils.check_and_truncate(display_name), "tags": utils.get_tags(os_tid=tenant_id)} if "tags" in kwargs: lswitch_obj["tags"].extend(kwargs["tags"]) try: return do_request(HTTP_PUT, uri, json.dumps(lswitch_obj), cluster=cluster) except exception.NotFound as e: LOG.error(_("Network not found, Error: %s"), str(e)) raise exception.NetworkNotFound(net_id=lswitch_id)
def update_lswitch(cluster, lswitch_id, display_name, tenant_id=None, **kwargs): uri = nsxlib._build_uri_path(LSWITCH_RESOURCE, resource_id=lswitch_id) lswitch_obj = {"display_name": utils.check_and_truncate(display_name), "tags": utils.get_tags(os_tid=tenant_id)} if "tags" in kwargs: lswitch_obj["tags"].extend(kwargs["tags"]) try: return nsxlib.do_request(HTTP_PUT, uri, json.dumps(lswitch_obj), cluster=cluster) except exception.NotFound as e: LOG.error(_("Network not found, Error: %s"), str(e)) raise exception.NetworkNotFound(net_id=lswitch_id)
def update_implicit_routing_lrouter(cluster, r_id, display_name, nexthop): lrouter_obj = get_lrouter(cluster, r_id) if not display_name and not nexthop: # Nothing to update return lrouter_obj # It seems that this is faster than the doing an if on display_name lrouter_obj["display_name"] = utils.check_and_truncate(display_name) or lrouter_obj["display_name"] if nexthop: nh_element = lrouter_obj["routing_config"].get("default_route_next_hop") if nh_element: nh_element["gateway_ip_address"] = nexthop return do_request( HTTP_PUT, _build_uri_path(LROUTER_RESOURCE, resource_id=r_id), jsonutils.dumps(lrouter_obj), cluster=cluster )
def _prepare_lrouter_body(name, neutron_router_id, tenant_id, router_type, distributed=None, **kwargs): body = { "display_name": utils.check_and_truncate(name), "tags": utils.get_tags(os_tid=tenant_id, q_router_id=neutron_router_id), "routing_config": {"type": router_type}, "type": "LogicalRouterConfig", "replication_mode": cfg.CONF.NSX.replication_mode, } # add the distributed key only if not None (ie: True or False) if distributed is not None: body["distributed"] = distributed if kwargs: body["routing_config"].update(kwargs) return body
def create_lswitch(self, name, tz_config, tags=None, port_isolation=False, replication_mode="service"): lsconfig = { "display_name": utils.check_and_truncate(name), "tags": tags or [], "type": "LogicalSwitchConfig", "_schema": "/ws.v1/schema/LogicalSwitchConfig", "transport_zones": tz_config, } if port_isolation is bool: lsconfig["port_isolation_enabled"] = port_isolation if replication_mode: lsconfig["replication_mode"] = replication_mode response = self.vcns.create_lswitch(lsconfig)[1] return response
def create_lswitch(self, name, tz_config, tags=None, port_isolation=False, replication_mode="service"): lsconfig = { 'display_name': utils.check_and_truncate(name), "tags": tags or [], "type": "LogicalSwitchConfig", "_schema": "/ws.v1/schema/LogicalSwitchConfig", "transport_zones": tz_config } if port_isolation is bool: lsconfig["port_isolation_enabled"] = port_isolation if replication_mode: lsconfig["replication_mode"] = replication_mode response = self.vcns.create_lswitch(lsconfig)[1] return response
def update_implicit_routing_lrouter(cluster, r_id, display_name, nexthop): lrouter_obj = get_lrouter(cluster, r_id) if not display_name and not nexthop: # Nothing to update return lrouter_obj # It seems that this is faster than the doing an if on display_name lrouter_obj["display_name"] = (utils.check_and_truncate(display_name) or lrouter_obj["display_name"]) if nexthop: nh_element = lrouter_obj["routing_config"].get( "default_route_next_hop") if nh_element: nh_element["gateway_ip_address"] = nexthop return do_request(HTTP_PUT, _build_uri_path(LROUTER_RESOURCE, resource_id=r_id), jsonutils.dumps(lrouter_obj), cluster=cluster)
def create_security_profile(cluster, tenant_id, neutron_id, security_profile): """Create a security profile on the NSX backend. :param cluster: a NSX cluster object reference :param tenant_id: identifier of the Neutron tenant :param neutron_id: neutron security group identifier :param security_profile: dictionary with data for configuring the NSX security profile. """ path = "/ws.v1/security-profile" # Allow all dhcp responses and all ingress traffic hidden_rules = {'logical_port_egress_rules': [{'ethertype': 'IPv4', 'protocol': constants.PROTO_NUM_UDP, 'port_range_min': constants.DHCP_RESPONSE_PORT, 'port_range_max': constants.DHCP_RESPONSE_PORT, 'ip_prefix': '0.0.0.0/0'}], 'logical_port_ingress_rules': [{'ethertype': 'IPv4'}, {'ethertype': 'IPv6'}]} display_name = utils.check_and_truncate(security_profile.get('name')) # NOTE(salv-orlando): neutron-id tags are prepended with 'q' for # historical reasons body = mk_body( tags=utils.get_tags(os_tid=tenant_id, q_sec_group_id=neutron_id), display_name=display_name, logical_port_ingress_rules=( hidden_rules['logical_port_ingress_rules']), logical_port_egress_rules=hidden_rules['logical_port_egress_rules'] ) rsp = nsxlib.do_request(HTTP_POST, path, body, cluster=cluster) if security_profile.get('name') == 'default': # If security group is default allow ip traffic between # members of the same security profile is allowed and ingress traffic # from the switch rules = {'logical_port_egress_rules': [{'ethertype': 'IPv4', 'profile_uuid': rsp['uuid']}, {'ethertype': 'IPv6', 'profile_uuid': rsp['uuid']}], 'logical_port_ingress_rules': [{'ethertype': 'IPv4'}, {'ethertype': 'IPv6'}]} update_security_group_rules(cluster, rsp['uuid'], rules) LOG.debug(_("Created Security Profile: %s"), rsp) return rsp
def _prepare_lrouter_body(name, neutron_router_id, tenant_id, router_type, distributed=None, **kwargs): body = { "display_name": utils.check_and_truncate(name), "tags": utils.get_tags(os_tid=tenant_id, q_router_id=neutron_router_id), "routing_config": { "type": router_type }, "type": "LogicalRouterConfig", "replication_mode": cfg.CONF.NSX.replication_mode, } # add the distributed key only if not None (ie: True or False) if distributed is not None: body['distributed'] = distributed if kwargs: body["routing_config"].update(kwargs) return body
def update_lswitch(cluster, lswitch_id, display_name, tenant_id=None, **kwargs): uri = nsxlib._build_uri_path(LSWITCH_RESOURCE, resource_id=lswitch_id) lswitch_obj = {"display_name": utils.check_and_truncate(display_name)} # NOTE: tag update will not 'merge' existing tags with new ones. tags = [] if tenant_id: tags = utils.get_tags(os_tid=tenant_id) # The 'tags' kwarg might existing and be None tags.extend(kwargs.get('tags') or []) if tags: lswitch_obj['tags'] = tags try: return nsxlib.do_request(HTTP_PUT, uri, jsonutils.dumps(lswitch_obj), cluster=cluster) except exception.NotFound as e: LOG.error(_LE("Network not found, Error: %s"), str(e)) raise exception.NetworkNotFound(net_id=lswitch_id)
def create_lswitch(cluster, neutron_net_id, tenant_id, display_name, transport_zones_config, shared=None, **kwargs): # The tag scope adopts a slightly different naming convention for # historical reasons lswitch_obj = {"display_name": utils.check_and_truncate(display_name), "transport_zones": transport_zones_config, "tags": utils.get_tags(os_tid=tenant_id, quantum_net_id=neutron_net_id)} # TODO(salv-orlando): Now that we have async status synchronization # this tag is perhaps not needed anymore if shared: lswitch_obj["tags"].append({"tag": "true", "scope": "shared"}) if "tags" in kwargs: lswitch_obj["tags"].extend(kwargs["tags"]) uri = _build_uri_path(LSWITCH_RESOURCE) lswitch = do_request(HTTP_POST, uri, json.dumps(lswitch_obj), cluster=cluster) LOG.debug(_("Created logical switch: %s"), lswitch['uuid']) return lswitch
def test_check_and_truncate_name_long_name(self): name = 'this_is_a_port_whose_name_is_longer_than_40_chars' result = utils.check_and_truncate(name) self.assertEqual(len(result), utils.MAX_DISPLAY_NAME_LEN)
def test_check_and_truncate_name_with_short_name(self): name = 'foo_port_name' result = utils.check_and_truncate(name) self.assertEqual(name, result)
def test_check_and_truncate_name_with_none(self): name = None result = utils.check_and_truncate(name) self.assertEqual('', result)