def discover_vnfr_using_ip(ip_address=None): """Discover the VNF based on the assigned IP Args: ip_address (str): The assigned IP in VM Returns: dict: The VNF Raises: NotFoundVnfWithGivenIP: in case that there is no VNF/VDU having the given IP """ token = bearer_token(OSM_ADMIN_CREDENTIALS.get('username'), OSM_ADMIN_CREDENTIALS.get('username')) vnfr = Vnf(token) vnfrs = vnfr.get_list() vnf_records = vnfrs.json() for vnf_record in vnf_records: # todo: improve it in case of multiple VDUs per VNF vnf_ip_address = vnf_record.get('ip-address', None) if vnf_ip_address is not None and vnf_ip_address == ip_address: return vnf_record raise VduWithIpNotFound( "Not found VDU with given IP: {}".format(ip_address))
def main(): """ Publish the VNFs related metrics in the KAFKA_OPENNEBULA_TOPIC while the ones coming from standalone VMs such as the Traffic Manager in the KAFKA_TRAFFIC_MANAGER_TOPIC Returns: None """ producer = KafkaProducer( bootstrap_servers=KAFKA_SERVER, api_version=KAFKA_API_VERSION, value_serializer=lambda v: json.dumps(v).encode('utf-8')) # Get the UUID of the OpenNebula VIM token = identity.bearer_token(OSM_ADMIN_CREDENTIALS.get('username'), OSM_ADMIN_CREDENTIALS.get('password')) one_vim_uuid = get_opennebula_vim_uuid(token) # Get the list of VM ids of the OpenNebula NSs vm_ids = get_opennebula_vm_ids(token, one_vim_uuid) logger.info( 'The list of VMs {} have been detected given the VIM uuid `{}``'. format(vm_ids, one_vim_uuid)) # Get the metrics for each running VM in OpenNebula instantiated due to the OSM for vm_id in vm_ids: # Get the info of the VM by given session and VM id one_vm_info = OneVMInfo() response = one_vm_info.get(XML_RPC_SERVER, XML_RPC_SESSION, vm_id) raw_response = response.text # Parse the response and keep the monitoring metrics as a dict monitoring_info, last_poll = export_data_from_one_vm_info(raw_response) if last_poll is None: logger.warning("The last poll is {}".format(last_poll)) return # Convert the unix time in UCT iso8601 format timestamp = convert_unix_timestamp_to_datetime_str(float(last_poll)) for metric, value in monitoring_info.items(): metric_type = metric.lower() payload = { "vdu_uuid": vm_id, "type": metric_type, "value": value, "unit": get_unit_by_metric(metric_type), "timestamp": timestamp } # Publish the metric request = producer.send(KAFKA_OPENNEBULA_TOPIC, payload) try: # set timeout in 5 sec request.get(timeout=5) except KafkaError as ke: logger.error(ke) producer.close()
def __init__(self, ns_uuid): """Constructor Args: ns_uuid (str): The id of the running NS """ self.ns_uuid = ns_uuid self.__token = basic_token(OSM_ADMIN_CREDENTIALS.get('username'), OSM_ADMIN_CREDENTIALS.get('username'))
def get_ns_name(ns_uuid): # Get the NS name based on the NS uuid token = bearer_token(OSM_ADMIN_CREDENTIALS.get('username'), OSM_ADMIN_CREDENTIALS.get('password')) ns = NetworkService(token) response = ns.get(ns_uuid=ns_uuid) data = response.json() return data['name']
def __init__(self, nsd_name, ns_name, vim_account_name): """Constructor Args: nsd_name (str): The name of the NS descriptor ns_name (str): The name of the NS to be instantiated vim_account_name (str): The name of the VIM """ self.nsd_name = nsd_name self.ns_name = ns_name self.vim_account_name = vim_account_name self.__token = basic_token(OSM_ADMIN_CREDENTIALS.get('username'), OSM_ADMIN_CREDENTIALS.get('username'))
def count_running_ns(): """Find the number of the instantiated NSs in OSM r4 """ running_ns = 0 token = bearer_token(OSM_ADMIN_CREDENTIALS.get('username'), OSM_ADMIN_CREDENTIALS.get('username')) ns = Ns(token) request = ns.get_list() ns_list = request.json() for i in ns_list: if i.get("operational-status") == "running": running_ns += 1 return running_ns
def configure_vcdn_ns_after_termination(message): """ Configure the vCDN NS after its termination, especially the vDNS Args: message (dict): The message of termination event in ns topic """ event_state = message.get('operationState', None) # Consider this action only if it is completed if event_state != "PROCESSING": return logger.info('A running vCDN service is terminating. Status: {}'.format(event_state)) ns_uuid = message.get('nsInstanceId', None) logger.info('vCDN service uuid is {}'.format(ns_uuid)) token = bearer_token(OSM_ADMIN_CREDENTIALS.get('username'), OSM_ADMIN_CREDENTIALS.get('password')) # check nsd nsd_ref_name = get_nsd_ref_name(token, ns_uuid) if not (nsd_ref_name and nsd_ref_name.startswith(vCDN_NSD_PREFIX)): return try: # Check if event is `terminate` operation_uuid = message.get('id', None) logger.info("The operation uuid is {}".format(operation_uuid)) event = get_event(token, operation_uuid) if not event or event != "terminate": return # update the vDNS properly clean_vdns_from_regular_vnfs(ns_uuid) # fetch the number of spawned faas vnfs from db and update the vDNS configuration properly last_operation = get_last_operation(ns_uuid) instances_number = last_operation.get('instance_number', 0) if instances_number > 0: # update the vDNS configuration by deleting the vDNS configuration # related to the faas edge vCaches clean_vdns_from_faas_vnfs(ns_uuid, instances_number) except Exception as ex: logger.exception(ex)
def discover_vnf_uuid_by_vnfd_name_index(vnfd_name_index): """ Discover the VDU uuid by given the vnfd name and index Args: vnfd_name_index (str): The VNFd name & index Returns: str: the vnf uuid """ token = bearer_token(OSM_ADMIN_CREDENTIALS.get('username'), OSM_ADMIN_CREDENTIALS.get('username')) # Get the UUIDs of the running NSs ns = Ns(token) request = ns.get_list() nss_response = request.json() ns_uuids = [ns.get('id') for ns in nss_response] # TODO: what if more than one NSs are running # if len(ns_uuids): # raise Exception("More that one NSs are running...") vnf_uuid = None for ns_uuid in ns_uuids: vnf = Vnf(token) request = vnf.get_list_by_ns(ns_uuid=ns_uuid) vnfs = request.json() for i in vnfs: cur_vnfd_name_index = "{}.{}".format( i.get("vnfd-ref"), i.get("member-vnf-index-ref"), ) if vnfd_name_index == cur_vnfd_name_index: return i.get("id") return vnf_uuid
def discover_vdu_uuid_by_vnf_index(vnf_index): """ Discover the VDU uuid by given the vnf index Args: vnf_index (str): The VNF index Returns: str: the vdu uuid """ token = bearer_token(OSM_ADMIN_CREDENTIALS.get('username'), OSM_ADMIN_CREDENTIALS.get('username')) # Get the UUIDs of the running NSs ns = Ns(token) request = ns.get_list() nss_response = request.json() ns_uuids = [ns.get('id') for ns in nss_response] # TODO: what if more than one NSs are running # if len(ns_uuids): # raise Exception("More that one NSs are running...") vdu_uuid = None for ns_uuid in ns_uuids: vnf = Vnf(token) request = vnf.get_list_by_ns(ns_uuid=ns_uuid) vnfs = request.json() for i in vnfs: if vnf_index in i.get("member-vnf-index-ref"): for vdur in i.get("vdur"): vdu_uuid = vdur.get("vim-id") return vdu_uuid return vdu_uuid
def get_vim_info(vim_uuid=None): """Get the VIM name, type, url by given VIM uuid Args: vim_uuid (str): The VIM uuid Returns: dict: the VIM uuid, name, type and url """ if vim_uuid is None: return {"uuid": vim_uuid, "name": None, "type": None, "url": None} token = bearer_token(OSM_ADMIN_CREDENTIALS.get('username'), OSM_ADMIN_CREDENTIALS.get('username')) vim = Vim(token) response = vim.get(vim_uuid=vim_uuid) data = response.json() vim_info = { "uuid": vim_uuid, "name": data.get('name', None), "type": data.get('vim_type', None), "url": data.get('vim_url', None) } return vim_info
def get_vnf_details(vnf_uuid, record, source="vtranscoder3d"): """ Append MANO (OSM) details (ns, vnf and vdu) by given VNF uuid Args: vnf_uuid (str): The uuid of the VNF record (dict): The original metric as it is sent from monitoring metrics generator source (str): The NFVI or application ref. It could be "vtranscoder3d" etc.. Returns: dict: osm info for the given vdu """ mano = {} token = bearer_token(OSM_ADMIN_CREDENTIALS.get('username'), OSM_ADMIN_CREDENTIALS.get('password')) vnfr = Vnf(token) vnf_response = vnfr.get(vnf_uuid=vnf_uuid) vnf_record = vnf_response.json() # Get NS details ns_id = vnf_record.get("nsr-id-ref", None) ns = Ns(token) ns_response = ns.get(ns_uuid=ns_id) nsr = ns_response.json() # VNFd info vnfd = vnf_record.get("vnfd-id", None) # VDUs info vdu_records = vnf_record.get('vdur', []) # Number of VDU per VNFd vdus_count = len(vdu_records) if vdus_count > 1: logger.critical("{} VDUs were found for the VNF with uuid: {}".format( vdus_count, vnf_uuid)) vdu_record = vdu_records[0] try: # Find the VDU info vdu_metadata = record.get("resource_metadata", {}) # If the data coming from VNFs, discover in different way the VDU info if source in [ "telegraf", "vtranscoder3d", "vce", "kubernetes", "opennebula", "vtranscoder3d_spectators" ]: vdu_metadata["instance_id"] = vdu_record.get('vim-id', None) vdu_metadata["flavor"] = {} if vdus_count: # TODO: what about the IPs if exists VNF with multiple VDUs? vdu_metadata["state"] = vdu_record.get('status', "") vdu_metadata['name'] = vdu_record.get('name', "") vdu_metadata["flavor"]["ram"] = None vdu_metadata["flavor"]["vcpus"] = None vdu_metadata["flavor"]["disk"] = None elif source == "openstack": """By default, OpenStack (ceilometer) provides us the following info: vcpus, ram, ephemeral, swap, disk, name, id """ pass # Get IP per VDU vdu_metadata['ip_address'] = vdu_record.get("ip-address", None) # Get the VIM account Info vim_account = VimAccount(token) vim_response = vim_account.get( vim_account_uuid=nsr.get('datacenter', None)) vimr = vim_response.json() # Get the NSd uuid nsd_id = nsr.get('nsdId', None) if nsd_id is None: nsd_id = nsr.get('instantiate_params', {}).get('nsdId', None) mano = { "ns": { "id": ns_id, "name": nsr.get('name-ref', None), "nsd_id": nsd_id, "nsd_name": nsr.get('nsd-name-ref', None) }, "vnf": { "id": vnf_record.get("id", None), "name": vnf_record.get("name", None), # not in osm r5: it could be <vnfd_name>_<index> "index": vnf_record.get("member-vnf-index-ref", 0), "short_name": vnf_record.get("short-name", None), # not in osm r5 "vnfd_id": vnf_record.get("vnfd-id", None), "vnfd_name": vnf_record.get('vnfd-ref', None) }, "vdu": { "id": vdu_metadata.get("instance_id", None), "name": vdu_metadata.get("name", None), "image_id": vdu_metadata.get("image", {}).get("id", None), "flavor": vdu_metadata.get("flavor", {}), "status": vdu_metadata.get("state", None), "ip_address": vdu_metadata['ip_address'], "mgmt-interface": None # future usage }, "vim": { "uuid": vimr.get('_id', None), "name": vimr.get('name', None), "type": vimr.get('vim_type', None), "url": vimr.get('vim_url', None), "tag": source } } logger.debug(mano) except Exception as ex: logger.exception(ex) finally: # TODO: Since we don't know the VDU uuid, the 1st VDU will be used since 1 VDU is used for the VNF (UC1). return mano
def get_faas_vdu_details(vdu_uuid, ro_ns_uuid, vnf_name): mano = {} nsr = {} vnfd = {} token = bearer_token(OSM_ADMIN_CREDENTIALS.get('username'), OSM_ADMIN_CREDENTIALS.get('password')) try: if ro_ns_uuid is None or vnf_name is None: raise Exception('Empty input') vnf_name = vnf_name.split('.') vnfd_name = vnf_name[0] vnf_index = vnf_name[1] # search for ro_ns_uuid in NSs list ns = Ns(token) ns_response = ns.get_list() ns_list = ns_response.json() for instance in ns_list: # Ensure that RO data are available openmano_deployment = instance['_admin'].get('deployed', {}).get('RO', {}) if len(openmano_deployment.keys()) == 0: continue # Compare the container id with the current NS record uuid nsr_id = openmano_deployment.get('nsr_id', None) if nsr_id is None or nsr_id != ro_ns_uuid: continue nsr = instance break # Get the NSd uuid nsd_id = nsr.get('nsdId', None) if nsd_id is None: nsd_id = nsr.get('instantiate_params', {}).get('nsdId', None) # Get the VIM account Info datacenter = nsr.get('datacenter', None) vim_account = VimAccount(token) vim_response = vim_account.get(vim_account_uuid=datacenter) vimr = vim_response.json() # Get vnfd info vnf_descriptor = Vnfd(token) vnfd_req = vnf_descriptor.get_list() vnfd_list = vnfd_req.json() for descriptor in vnfd_list: if 'id' in descriptor.keys() and descriptor['id'] == vnfd_name: vnfd = descriptor break mano = { "ns": { "id": nsr['id'], "name": nsr.get('name-ref', None), "nsd_id": nsd_id, "nsd_name": nsr.get('nsd-name-ref', None) }, "vnf": { "id": '{}-{}-{}'.format(vdu_uuid, vnfd_name, vnf_index), "name": '{}.{}'.format(vnfd_name, vnf_index), "index": vnf_index, "short_name": None, "vnfd_id": vnfd['_id'], "vnfd_name": vnfd_name }, "vdu": { "id": vdu_uuid, "name": vnfd_name, "image_id": vnfd_name, "flavor": {}, "status": 'running', "ip_address": '0.0.0.0', "mgmt-interface": None # future usage }, "vim": { "uuid": vimr.get('_id', None), "name": vimr.get('name', None), "type": vimr.get('vim_type', None), "url": vimr.get('vim_url', None), "tag": 'kubernetes' } } except Exception as ex: logger.exception(ex) finally: return mano
def get_vcdn_net_interfaces(ns_uuid, search_for_mid_cache="vCache-mid-vdu", search_for_edge_cache="vCache-edge-vdu"): """ Get the network interfaces of scaled VNF as well as the current count-index Args: ns_uuid (str): The NS uuid, in which the scaled VNF belongs to search_for_mid_cache (str): Search for the Mid vCache by given explicit name search_for_edge_cache (str): Search for scaled Edge vCache by given explicit name Returns: tuple(dict, int): The details of the VNF interfaces including the VDU index in the VNF ( { "edge": { "user": { "mac-address": "fa:16:3e:0c:94:7f", "ip-address": "192.168.252.12", "name": "ens6", "ns-vld-id": "user" }, "cache": { "mac-address": "fa:16:3e:4d:b9:64", "ip-address": "192.168.253.9", "name": "ens7", "ns-vld-id": "cache" }, "management": { "mgmt-vnf": "true", "mac-address": "fa:16:3e:99:33:43", "ip-address": "192.168.111.29", "name": "ens3", "ns-vld-id": "management" } }, "mid": { "management": { "ip-address": "192.168.111.13", "ns-vld-id": "management", "name": "ens3", "mac-address": "fa:16:3e:02:f5:1c", "mgmt-vnf": true }, "cache": { "ip-address": "192.168.253.12", "name": "ens6", "ns-vld-id": "cache", "mac-address": "fa:16:3e:60:5d:9d" }, "origin": { "ip-address": "192.168.254.5", "name": "ens7", "ns-vld-id": "origin", "mac-address": "fa:16:3e:0d:64:97" } } }, <int|1> ) """ vdus_list = [] interfaces = {"mid": None, "edge": None} edges_interfaces_all = {} count_index = None # Fetch the VNFs by given NS instance token = bearer_token(OSM_ADMIN_CREDENTIALS.get('username'), OSM_ADMIN_CREDENTIALS.get('password')) vnf = Vnf(token) response = vnf.get_list_by_ns(ns_uuid=ns_uuid) vnfs_list = response.json() # Keep the VDUs details for vnf_instance in vnfs_list: vdus_list += vnf_instance.get("vdur", []) # Discover the interfaces of the proper scaled Edge VNF and Mid vCache for vdu in vdus_list: # Get Mid vCache net details if vdu.get('vdu-id-ref', None) is not None and \ vdu['vdu-id-ref'] == search_for_mid_cache and \ vdu.get('count-index', None) == 0: interfaces['mid'] = format_vdu_interfaces(vdu.get( 'interfaces', [])) # Get Edge vCache net details (the new one) if vdu.get('vdu-id-ref', None) is not None and \ vdu['vdu-id-ref'] == search_for_edge_cache and \ vdu.get('count-index', None) >= 0: edges_interfaces_all[str( vdu['count-index'])] = format_vdu_interfaces( vdu.get('interfaces', [])) # Keep the VDU with the greatest count-index latest_vdu_index = max([int(k) for k in list(edges_interfaces_all.keys())]) count_index = latest_vdu_index interfaces['edge'] = edges_interfaces_all[str(latest_vdu_index)] return interfaces, count_index
def configure_vcdn_ns_after_instantiation(message): """ Configure the vCDN NS after its instantiation Args: message (dict): The message of instantiation event in ns topic """ event_state = message.get('operationState', None) # Consider this action only if it is completed if event_state != "COMPLETED": return logger.info('A new vCDN service just instantiated. Status: {}'.format(event_state)) try: token = bearer_token(OSM_ADMIN_CREDENTIALS.get('username'), OSM_ADMIN_CREDENTIALS.get('password')) # check nsd ns_uuid = message.get('nsr_id', None) nsd_ref_name = get_nsd_ref_name(token, ns_uuid) if not (nsd_ref_name and nsd_ref_name.startswith(vCDN_NSD_PREFIX)): return logger.info('vCDN service uuid is {}'.format(ns_uuid)) # Check if event is `instantiate` operation_uuid = message.get('nslcmop_id', None) event = get_event(token, operation_uuid) if not event or event != "instantiate": return # Wait 10 seconds: ensure that the new vCache is up and running. Also, be sure # that the vnf record includes the IPv4 of the new vCache. time.sleep(10) # Discover the vcache_incremental_counter <N> & the net IFs for UC3 net_interfaces, current_vdu_index = get_vcdn_net_interfaces( ns_uuid, search_for_mid_cache="vCache_mid_vdu", search_for_edge_cache="vCache_edge_vdu") edge_net_interfaces = net_interfaces.get('edge', {}) mid_net_interfaces = net_interfaces.get('mid', {}) vcache_incremental_counter = int(current_vdu_index) + 1 # discover the CACHE_NET_IP for UC3 mid_vcache_ip_cache_net = mid_net_interfaces.get('5GMEDIA-CACHE-NET', {}).get('ip-address', None) # discover the MGMT_NET_IP for UC3 edge_vcache_ip_mgmt_net = edge_net_interfaces.get('5GMEDIA_MGMT_NET', {}).get( 'ip-address', None) # discover the CACHE_USER_IP for UC3 edge_vcache_ip_user_net = edge_net_interfaces.get('5GMEDIA-USER-NET', {}).get('ip-address', None) # Check edge vCache net availability using ping ping_edge_vcache_ip(edge_vcache_ip_mgmt_net) # Set day-1,2... vCache configuration - Try every 18 seconds - totally 3 minutes for i in range(1, 10): logger.info("vCache VNF configuration: Attempt #{}".format(i)) if configure_edge_vcache(edge_vcache_ip_mgmt_net, mid_vcache_ip_cache_net, vcache_incremental_counter): break time.sleep(10) # Update the vDNS configure_vdns(edge_vcache_ip_user_net, vcache_incremental_counter) except Exception as ex: logger.exception(ex)
def get_vdus_info(ns_uuid=None): """Get information about NS, VNF(s) and VDU(s) by given NS uuid Args: ns_uuid (str): The NS uuid Returns: dict: ns, vnf and vdu info """ vdus_info = [] if ns_uuid is None: return vdus_info token = bearer_token(OSM_ADMIN_CREDENTIALS.get('username'), OSM_ADMIN_CREDENTIALS.get('username')) ns = Ns(token) ns_response = ns.get(ns_uuid=ns_uuid) nsr = ns_response.json() # Get Vim vim_uuid = nsr.get('datacenter', None) vim_info = get_vim_info(vim_uuid=vim_uuid) # Get the Vnf UUIDs, members of the NS vnf_uuids = nsr.get('constituent-vnfr-ref', []) vnfr = Vnf(token) for vnf_uuid in vnf_uuids: vnf_response = vnfr.get(vnf_uuid=vnf_uuid) vnf_record = vnf_response.json() # VDUs info vdu_records = vnf_record.get('vdur', []) for vdu_record in vdu_records: mano = { "vim": vim_info, "ns": { "id": ns_uuid, "name": nsr.get('name-ref', None), "nsd_id": nsr.get('nsdId', None), "nsd_name": nsr.get('nsd-name-ref', None) }, "vnf": { "id": vnf_record.get("id", None), "name": None, # not provided in osm r4 "short_name": None, # not provided in osm r4 "vnfd_id": vnf_record.get("vnfd-id", None), "vnfd_name": None # not provided in osm r4 }, "vdu": { "id": vdu_record.get("vim-id", None), # NFVI-based uuid "image_id": None, "flavor": {}, "status": vdu_record.get("status", None), "ip_address": vdu_record.get("ip-address", None), "mgmt-interface": None # future usage } } vdus_info.append(mano) return vdus_info
def configure_vcdn_ns_after_scale_out(message): """ Configure the vCDN NS after scaling out operation in a regular edge vCache VNF Args: message (dict): The message of scaled event in ns topic """ event_state = message.get('operationState', None) # Consider this action only if it is completed if event_state != "COMPLETED": return try: token = bearer_token(OSM_ADMIN_CREDENTIALS.get('username'), OSM_ADMIN_CREDENTIALS.get('password')) # check nsd ns_uuid = message.get('nsr_id', None) nsd_ref_name = get_nsd_ref_name(token, ns_uuid) if not (nsd_ref_name and nsd_ref_name.startswith(vCDN_NSD_PREFIX)): return # Detect the event: SCALE_IN, SCALE_OUT or something else operation_uuid = message.get('nslcmop_id', None) event = get_scale_event(token, operation_uuid) # Configure the vCache & vDNS only if SCALE_OUT event if not event or event != "SCALE_OUT": return # Wait 10 seconds: ensure that the new vCache is up and running. Also, be sure # that the vnf record includes the IPv4 of the new vCache. time.sleep(10) # Discover the vcache_incremental_counter <N> & the net IFs for UC3 net_interfaces, current_vdu_index = get_vcdn_net_interfaces( ns_uuid, search_for_mid_cache="vCache_mid_vdu", search_for_edge_cache="vCache_edge_vdu") edge_net_interfaces = net_interfaces.get('edge', {}) mid_net_interfaces = net_interfaces.get('mid', {}) vcache_incremental_counter = int(current_vdu_index) + 1 # discover the CACHE_NET_IP for UC3 mid_vcache_ip_cache_net = mid_net_interfaces.get('5GMEDIA-CACHE-NET', {}).get('ip-address', None) # discover the MGMT_NET_IP for UC3 edge_vcache_ip_mgmt_net = edge_net_interfaces.get('5GMEDIA_MGMT_NET', {}).get( 'ip-address', None) # discover the CACHE_USER_IP for UC3 edge_vcache_ip_user_net = edge_net_interfaces.get('5GMEDIA-USER-NET', {}).get('ip-address', None) # Check edge vCache net availability using ping ping_edge_vcache_ip(edge_vcache_ip_mgmt_net) # Set day-1,2... vCache configuration - Try every 18 seconds - totally 3 minutes for i in range(1, 11): logger.debug("vCache configuration: Attempt #{}".format(i)) if configure_edge_vcache(edge_vcache_ip_mgmt_net, mid_vcache_ip_cache_net, vcache_incremental_counter): break time.sleep(18) # Update the vDNS configure_vdns(edge_vcache_ip_user_net, vcache_incremental_counter) except (VnfdUnexpectedStatusCode, VnfScaleNotCompleted, vCacheConfigurationFailed, VdnsConfigurationFailed) as ex: logger.error(ex) except Exception as ex: logger.exception(ex)
def get_faas_vcdn_net_interfaces(ns_uuid, search_for_mid_cache="vCache_mid_vdu", search_for_edge_cache="vCache_edge_vdu"): """ Get the network interfaces of the VNF Args: ns_uuid (str): The NS uuid, in which the scaled VNF belongs to search_for_mid_cache (str): Search for the Mid vCache by given explicit name search_for_edge_cache (str): Search for scaled Edge vCache by given explicit name Returns: dict: The details of the VNF interfaces ( { "edge": None, "mid": { "management": { "ip-address": "192.168.111.13", "ns-vld-id": "management", "name": "ens3", "mac-address": "fa:16:3e:02:f5:1c", "mgmt-vnf": true }, "cache": { "ip-address": "192.168.253.12", "name": "ens6", "ns-vld-id": "cache", "mac-address": "fa:16:3e:60:5d:9d" }, "origin": { "ip-address": "192.168.254.5", "name": "ens7", "ns-vld-id": "origin", "mac-address": "fa:16:3e:0d:64:97" } } } ) """ vdus_list = [] interfaces = {"mid": None, "edge": None} edges_interfaces_all = {} count_index = None # Fetch the VNFs by given NS instance token = bearer_token(OSM_ADMIN_CREDENTIALS.get('username'), OSM_ADMIN_CREDENTIALS.get('password')) vnf = Vnf(token) response = vnf.get_list_by_ns(ns_uuid=ns_uuid) vnfs_list = response.json() # Keep the VDUs details for vnf_instance in vnfs_list: vdus_list += vnf_instance.get("vdur", []) # Discover the interfaces of the proper scaled Edge VNF and Mid vCache for vdu in vdus_list: # Get Mid vCache net details if vdu.get('vdu-id-ref', None) is not None and \ vdu['vdu-id-ref'] == search_for_mid_cache and \ vdu.get('count-index', None) == 0: interfaces['mid'] = format_vdu_interfaces(vdu.get( 'interfaces', [])) # Get Edge vCache net details (the new one) if vdu.get('vdu-id-ref', None) is not None and \ vdu['vdu-id-ref'] == search_for_edge_cache and \ vdu.get('count-index', None) > 0: edges_interfaces_all[str( vdu['count-index'])] = format_vdu_interfaces( vdu.get('interfaces', [])) return interfaces