def query_lrouters(cluster, fields=None, filters=None): return get_all_query_pages( _build_uri_path(LROUTER_RESOURCE, fields=fields, relations='LogicalRouterStatus', filters=filters), cluster)
def lookup_switches_by_tag(): # Fetch extra logical switches lswitch_query_path = _build_uri_path( LSWITCH_RESOURCE, fields="uuid,display_name,tags,lport_count", relations="LogicalSwitchStatus", filters={'tag': neutron_net_id, 'tag_scope': 'quantum_net_id'}) return get_all_query_pages(lswitch_query_path, cluster)
def get_l2_gw_services(cluster, tenant_id=None, fields=None, filters=None): actual_filters = dict(filters or {}) if tenant_id: actual_filters['tag'] = tenant_id actual_filters['tag_scope'] = 'os_tid' return get_all_query_pages( _build_uri_path(GWSERVICE_RESOURCE, filters=actual_filters), cluster)
def lookup_switches_by_tag(): # Fetch extra logical switches lswitch_query_path = _build_uri_path( LSWITCH_RESOURCE, fields="uuid,display_name,tags,lport_count", relations="LogicalSwitchStatus", filters={ 'tag': neutron_net_id, 'tag_scope': 'quantum_net_id' }) return get_all_query_pages(lswitch_query_path, cluster)
def query_nat_rules(cluster, router_id, fields="*", filters=None): uri = _build_uri_path(LROUTERNAT_RESOURCE, parent_resource_id=router_id, fields=fields, filters=filters) return get_all_query_pages(uri, cluster)
def get_ports(cluster, networks=None, devices=None, tenants=None): vm_filter_obsolete = "" vm_filter = "" tenant_filter = "" # This is used when calling delete_network. Neutron checks to see if # the network has any ports. if networks: # FIXME (Aaron) If we get more than one network_id this won't work lswitch = networks[0] else: lswitch = "*" if devices: for device_id in devices: vm_filter_obsolete = '&'.join( ["tag_scope=vm_id", "tag=%s" % utils.device_id_to_vm_id(device_id, obfuscate=True), vm_filter_obsolete]) vm_filter = '&'.join( ["tag_scope=vm_id", "tag=%s" % utils.device_id_to_vm_id(device_id), vm_filter]) if tenants: for tenant in tenants: tenant_filter = '&'.join( ["tag_scope=os_tid", "tag=%s" % tenant, tenant_filter]) nsx_lports = {} lport_fields_str = ("tags,admin_status_enabled,display_name," "fabric_status_up") try: lport_query_path_obsolete = ( "/ws.v1/lswitch/%s/lport?fields=%s&%s%stag_scope=q_port_id" "&relations=LogicalPortStatus" % (lswitch, lport_fields_str, vm_filter_obsolete, tenant_filter)) lport_query_path = ( "/ws.v1/lswitch/%s/lport?fields=%s&%s%stag_scope=q_port_id" "&relations=LogicalPortStatus" % (lswitch, lport_fields_str, vm_filter, tenant_filter)) try: # NOTE(armando-migliaccio): by querying with obsolete tag first # current deployments won't take the performance hit of a double # call. In release L-** or M-**, we might want to swap the calls # as it's likely that ports with the new tag would outnumber the # ones with the old tag ports = get_all_query_pages(lport_query_path_obsolete, cluster) if not ports: ports = get_all_query_pages(lport_query_path, cluster) except exception.NotFound: LOG.warn(_("Lswitch %s not found in NSX"), lswitch) ports = None if ports: for port in ports: for tag in port["tags"]: if tag["scope"] == "q_port_id": nsx_lports[tag["tag"]] = port except Exception: err_msg = _("Unable to get ports") LOG.exception(err_msg) raise nsx_exc.NsxPluginException(err_msg=err_msg) return nsx_lports
def get_ports(cluster, networks=None, devices=None, tenants=None): vm_filter_obsolete = "" vm_filter = "" tenant_filter = "" # This is used when calling delete_network. Neutron checks to see if # the network has any ports. if networks: # FIXME (Aaron) If we get more than one network_id this won't work lswitch = networks[0] else: lswitch = "*" if devices: for device_id in devices: vm_filter_obsolete = '&'.join([ "tag_scope=vm_id", "tag=%s" % utils.device_id_to_vm_id(device_id, obfuscate=True), vm_filter_obsolete ]) vm_filter = '&'.join([ "tag_scope=vm_id", "tag=%s" % utils.device_id_to_vm_id(device_id), vm_filter ]) if tenants: for tenant in tenants: tenant_filter = '&'.join( ["tag_scope=os_tid", "tag=%s" % tenant, tenant_filter]) nsx_lports = {} lport_fields_str = ("tags,admin_status_enabled,display_name," "fabric_status_up") try: lport_query_path_obsolete = ( "/ws.v1/lswitch/%s/lport?fields=%s&%s%stag_scope=q_port_id" "&relations=LogicalPortStatus" % (lswitch, lport_fields_str, vm_filter_obsolete, tenant_filter)) lport_query_path = ( "/ws.v1/lswitch/%s/lport?fields=%s&%s%stag_scope=q_port_id" "&relations=LogicalPortStatus" % (lswitch, lport_fields_str, vm_filter, tenant_filter)) try: # NOTE(armando-migliaccio): by querying with obsolete tag first # current deployments won't take the performance hit of a double # call. In release L-** or M-**, we might want to swap the calls # as it's likely that ports with the new tag would outnumber the # ones with the old tag ports = get_all_query_pages(lport_query_path_obsolete, cluster) if not ports: ports = get_all_query_pages(lport_query_path, cluster) except exception.NotFound: LOG.warn(_("Lswitch %s not found in NSX"), lswitch) ports = None if ports: for port in ports: for tag in port["tags"]: if tag["scope"] == "q_port_id": nsx_lports[tag["tag"]] = port except Exception: err_msg = _("Unable to get ports") LOG.exception(err_msg) raise nsx_exc.NsxPluginException(err_msg=err_msg) return nsx_lports
def query_security_profiles(cluster, fields=None, filters=None): return get_all_query_pages( _build_uri_path(SECPROF_RESOURCE, fields=fields, filters=filters), cluster)