def vnf_scaling_in_handler(ns): """Handles scaling-in of NS and closes related sessions on the Billing Services. Args: ns (Instance): The NS under VNF-Scaling In """ # Get auth token from NBI token = bearer_token(settings.OSM_ADMIN_CREDENTIALS.get('username'), settings.OSM_ADMIN_CREDENTIALS.get('password')) # Get VNF UUIDs of NS & Tenant nslcm = NsLcm(token) vnfs = nslcm.get_vnf_list_by_ns(ns.uuid).json() for vnf in vnfs: vnf_object = Vnf.objects.get(uuid=vnf['id']) vdu_object_ids = [vdu.uuid for vdu in vnf_object.vdus.all()] vdu_vdur_ids = [vdur['vim-id'] for vdur in vnf['vdur']] vdu_scaled_in_ids = [ x for x in vdu_object_ids if x not in vdu_vdur_ids ] if len(vdu_scaled_in_ids) == 0: continue else: vdu_scaled_in_id = vdu_scaled_in_ids[0] vdus = Vdu.objects.select_related( 'tenant', 'instance', 'vnf').filter(uuid=vdu_scaled_in_id) vdus.update(state='deleted') accounting_client.close_session(vdus[0].vdu_session_id, 'vdu') logger.info('VDU with UUID {} was deleted'.format(vdus[0].uuid)) break
def discover_vnfr_using_ip(ip_address=None): """Discover the VNF based on the assigned IP Args: ip_address (str): The assigned IP in VM Returns: dict: The VNF Raises: NotFoundVnfWithGivenIP: in case that there is no VNF/VDU having the given IP """ token = bearer_token(OSM_ADMIN_CREDENTIALS.get('username'), OSM_ADMIN_CREDENTIALS.get('username')) vnfr = Vnf(token) vnfrs = vnfr.get_list() vnf_records = vnfrs.json() for vnf_record in vnf_records: # todo: improve it in case of multiple VDUs per VNF vnf_ip_address = vnf_record.get('ip-address', None) if vnf_ip_address is not None and vnf_ip_address == ip_address: return vnf_record raise VduWithIpNotFound( "Not found VDU with given IP: {}".format(ip_address))
def get_ns_name(ns_uuid): # Get the NS name based on the NS uuid token = bearer_token(OSM_ADMIN_CREDENTIALS.get('username'), OSM_ADMIN_CREDENTIALS.get('password')) ns = NetworkService(token) response = ns.get(ns_uuid=ns_uuid) data = response.json() return data['name']
def vnf_scaling_out_handler(ns): """Handlers the scaling-out of a VNF and opens related sessions on the Billing Services. Args: ns (Instance): The NS under VNF-Scaling """ # Get auth token from NBI token = bearer_token(settings.OSM_ADMIN_CREDENTIALS.get('username'), settings.OSM_ADMIN_CREDENTIALS.get('password')) # Get VNFs of NS & Tenant nslcm, vnfpkgm = NsLcm(token), VnfPkgM(token) vnfs = nslcm.get_vnf_list_by_ns(ns.uuid).json() tn = Tenant.objects.get(uuid=ns.tenant.uuid) for vnf in vnfs: # Get VNF v = Vnf.objects.get(uuid=vnf['id']) # Get VM Flavor # TODO: Fix if VNFs include more than one VDU vm_flavor = vnfpkgm.get_vnfd( vnf['vnfd-id']).json()['vdu'][0]['vm-flavor'] # Check if VDU of scaled VNF is found vdu_is_created = False for vdur in vnf['vdur']: vdu_object = Vdu.objects.filter(uuid=vdur['vim-id']) if vdu_object.exists(): continue vdu = Vdu.objects.create(tenant=tn, instance=ns, vnf=v, uuid=vdur['vim-id'], nfvipop_id=ns.nfvipop_id, state='active', project_name=ns.mano_project, vcpu=vm_flavor['vcpu-count'], vram=vm_flavor['memory-mb'], vdisk=vm_flavor['storage-gb'], vim_type=ns.vim_type, flavor='{}_{}_{}'.format( vm_flavor['vcpu-count'], vm_flavor['memory-mb'], vm_flavor['storage-gb'])) vdu.vdu_session_id = accounting_client.open_vdu_session( v.vnf_session_id, vdu) vdu.save() vdu_is_created = True logger.info('New VDU object: {}, VNF: {}, NS: {}'.format( vdu.uuid, v.uuid, ns.uuid)) if vdu_is_created: break
def main(): # Get the auth token in OSM osm_token = bearer_token(OSM_ADMIN_CREDENTIALS['username'], OSM_ADMIN_CREDENTIALS['password']) # Discover the openstack VIMs vim_acc = VimAccount(token=osm_token) vims_req = vim_acc.get_list() vims = vims_req.json() vdus_consumption = get_resources_consumption(vims) recommend_flavor(osm_token, vdus_consumption)
def __init__(self, ns_uuid, vnfd_uuid): """Constructor Args: ns_uuid (str): The uuid of the ns record vnfd_uuid (str): The uuid of the VNFd record """ self.ns_uuid = ns_uuid self.vnfd_uuid = vnfd_uuid self.__token = bearer_token(OSM_ADMIN_CREDENTIALS['username'], OSM_ADMIN_CREDENTIALS['password']) self.scaling_group_name = None self.get_scaling_group_if_any()
def count_running_ns(): """Find the number of the instantiated NSs in OSM r4 """ running_ns = 0 token = bearer_token(OSM_ADMIN_CREDENTIALS.get('username'), OSM_ADMIN_CREDENTIALS.get('username')) ns = Ns(token) request = ns.get_list() ns_list = request.json() for i in ns_list: if i.get("operational-status") == "running": running_ns += 1 return running_ns
def configure_vcdn_ns_after_termination(message): """ Configure the vCDN NS after its termination, especially the vDNS Args: message (dict): The message of termination event in ns topic """ event_state = message.get('operationState', None) # Consider this action only if it is completed if event_state != "PROCESSING": return logger.info('A running vCDN service is terminating. Status: {}'.format(event_state)) ns_uuid = message.get('nsInstanceId', None) logger.info('vCDN service uuid is {}'.format(ns_uuid)) token = bearer_token(OSM_ADMIN_CREDENTIALS.get('username'), OSM_ADMIN_CREDENTIALS.get('password')) # check nsd nsd_ref_name = get_nsd_ref_name(token, ns_uuid) if not (nsd_ref_name and nsd_ref_name.startswith(vCDN_NSD_PREFIX)): return try: # Check if event is `terminate` operation_uuid = message.get('id', None) logger.info("The operation uuid is {}".format(operation_uuid)) event = get_event(token, operation_uuid) if not event or event != "terminate": return # update the vDNS properly clean_vdns_from_regular_vnfs(ns_uuid) # fetch the number of spawned faas vnfs from db and update the vDNS configuration properly last_operation = get_last_operation(ns_uuid) instances_number = last_operation.get('instance_number', 0) if instances_number > 0: # update the vDNS configuration by deleting the vDNS configuration # related to the faas edge vCaches clean_vdns_from_faas_vnfs(ns_uuid, instances_number) except Exception as ex: logger.exception(ex)
def discover_vnf_uuid_by_vnfd_name_index(vnfd_name_index): """ Discover the VDU uuid by given the vnfd name and index Args: vnfd_name_index (str): The VNFd name & index Returns: str: the vnf uuid """ token = bearer_token(OSM_ADMIN_CREDENTIALS.get('username'), OSM_ADMIN_CREDENTIALS.get('username')) # Get the UUIDs of the running NSs ns = Ns(token) request = ns.get_list() nss_response = request.json() ns_uuids = [ns.get('id') for ns in nss_response] # TODO: what if more than one NSs are running # if len(ns_uuids): # raise Exception("More that one NSs are running...") vnf_uuid = None for ns_uuid in ns_uuids: vnf = Vnf(token) request = vnf.get_list_by_ns(ns_uuid=ns_uuid) vnfs = request.json() for i in vnfs: cur_vnfd_name_index = "{}.{}".format( i.get("vnfd-ref"), i.get("member-vnf-index-ref"), ) if vnfd_name_index == cur_vnfd_name_index: return i.get("id") return vnf_uuid
def discover_vdu_uuid_by_vnf_index(vnf_index): """ Discover the VDU uuid by given the vnf index Args: vnf_index (str): The VNF index Returns: str: the vdu uuid """ token = bearer_token(OSM_ADMIN_CREDENTIALS.get('username'), OSM_ADMIN_CREDENTIALS.get('username')) # Get the UUIDs of the running NSs ns = Ns(token) request = ns.get_list() nss_response = request.json() ns_uuids = [ns.get('id') for ns in nss_response] # TODO: what if more than one NSs are running # if len(ns_uuids): # raise Exception("More that one NSs are running...") vdu_uuid = None for ns_uuid in ns_uuids: vnf = Vnf(token) request = vnf.get_list_by_ns(ns_uuid=ns_uuid) vnfs = request.json() for i in vnfs: if vnf_index in i.get("member-vnf-index-ref"): for vdur in i.get("vdur"): vdu_uuid = vdur.get("vim-id") return vdu_uuid return vdu_uuid
def get_vim_info(vim_uuid=None): """Get the VIM name, type, url by given VIM uuid Args: vim_uuid (str): The VIM uuid Returns: dict: the VIM uuid, name, type and url """ if vim_uuid is None: return {"uuid": vim_uuid, "name": None, "type": None, "url": None} token = bearer_token(OSM_ADMIN_CREDENTIALS.get('username'), OSM_ADMIN_CREDENTIALS.get('username')) vim = Vim(token) response = vim.get(vim_uuid=vim_uuid) data = response.json() vim_info = { "uuid": vim_uuid, "name": data.get('name', None), "type": data.get('vim_type', None), "url": data.get('vim_url', None) } return vim_info
def configure_vcdn_ns_after_scale_out(message): """ Configure the vCDN NS after scaling out operation in a regular edge vCache VNF Args: message (dict): The message of scaled event in ns topic """ event_state = message.get('operationState', None) # Consider this action only if it is completed if event_state != "COMPLETED": return try: token = bearer_token(OSM_ADMIN_CREDENTIALS.get('username'), OSM_ADMIN_CREDENTIALS.get('password')) # check nsd ns_uuid = message.get('nsr_id', None) nsd_ref_name = get_nsd_ref_name(token, ns_uuid) if not (nsd_ref_name and nsd_ref_name.startswith(vCDN_NSD_PREFIX)): return # Detect the event: SCALE_IN, SCALE_OUT or something else operation_uuid = message.get('nslcmop_id', None) event = get_scale_event(token, operation_uuid) # Configure the vCache & vDNS only if SCALE_OUT event if not event or event != "SCALE_OUT": return # Wait 10 seconds: ensure that the new vCache is up and running. Also, be sure # that the vnf record includes the IPv4 of the new vCache. time.sleep(10) # Discover the vcache_incremental_counter <N> & the net IFs for UC3 net_interfaces, current_vdu_index = get_vcdn_net_interfaces( ns_uuid, search_for_mid_cache="vCache_mid_vdu", search_for_edge_cache="vCache_edge_vdu") edge_net_interfaces = net_interfaces.get('edge', {}) mid_net_interfaces = net_interfaces.get('mid', {}) vcache_incremental_counter = int(current_vdu_index) + 1 # discover the CACHE_NET_IP for UC3 mid_vcache_ip_cache_net = mid_net_interfaces.get('5GMEDIA-CACHE-NET', {}).get('ip-address', None) # discover the MGMT_NET_IP for UC3 edge_vcache_ip_mgmt_net = edge_net_interfaces.get('5GMEDIA_MGMT_NET', {}).get( 'ip-address', None) # discover the CACHE_USER_IP for UC3 edge_vcache_ip_user_net = edge_net_interfaces.get('5GMEDIA-USER-NET', {}).get('ip-address', None) # Check edge vCache net availability using ping ping_edge_vcache_ip(edge_vcache_ip_mgmt_net) # Set day-1,2... vCache configuration - Try every 18 seconds - totally 3 minutes for i in range(1, 11): logger.debug("vCache configuration: Attempt #{}".format(i)) if configure_edge_vcache(edge_vcache_ip_mgmt_net, mid_vcache_ip_cache_net, vcache_incremental_counter): break time.sleep(18) # Update the vDNS configure_vdns(edge_vcache_ip_user_net, vcache_incremental_counter) except (VnfdUnexpectedStatusCode, VnfScaleNotCompleted, vCacheConfigurationFailed, VdnsConfigurationFailed) as ex: logger.error(ex) except Exception as ex: logger.exception(ex)
def ns_instantiation_handler(ns): """Handles instantiation of NS when deployment of VDUs completes. Args: ns (obj): The NS object under instantiation """ # Get auth token from NBI token = bearer_token(settings.OSM_ADMIN_CREDENTIALS.get('username'), settings.OSM_ADMIN_CREDENTIALS.get('password')) # Get RO id for current NS nslcm, osm_admin, vnfpkgm = NsLcm(token), OsmAdmin(token), VnfPkgM(token) ns_response = nslcm.get_ns(ns.uuid) if ns_response.status_code != HTTP_200_OK: logger.info( 'NS with UUID {} no longer exists. Aborting instantiation'.format( ns.uuid)) ns_object = Instance.objects.filter(uuid=ns.uuid) ns_object.update(state='deleted') return ns_info = ns_response.json() nsr_id = ns_info['_admin']['deployed']['RO']['nsr_id'] # Find tenant to whom this NS belongs tenants = OsmTenant().get_list().json()['tenants'] for tenant in tenants: # Check if tenant exists as an object tn = Tenant.objects.filter(uuid=tenant['uuid']) if not tn.exists(): Tenant.objects.create(created_at=tenant['created_at'], description=tenant['description'], uuid=tenant['uuid'], name=tenant['name']) logger.info('Created new tenant object with uuid {}'.format( tenant['uuid'])) tn = Tenant.objects.get(uuid=tenant['uuid']) # Check if NS belongs to current tenant ns_current = OsmInstance().get(tenant['uuid'], nsr_id) if ns_current.status_code != HTTP_200_OK: continue # Get VIM Information vim_data = osm_admin.get_vim(ns.nfvipop_id).json() # Update NS Information ns.tenant = tn ns.mano_user = tn.name ns.mano_project = ns_info['_admin']['projects_read'][0] # TODO: Change accordingly in case of central OSM ns.nfvipop_id = NFVIPOP_ID_DEFAULT ns.vim_type = vim_data['vim_type'] ns.ns_session_id = accounting_client.open_ns_session(ns) ns.save() logger.info('New NS instance object: {}, Tenant: {}'.format( ns.uuid, tn.uuid)) # Get VNFs of NS vnfs = nslcm.get_vnf_list_by_ns(ns.uuid).json() for vnf in vnfs: # VNF Name vnf_name = '{}.{}'.format(vnf['vnfd-ref'], vnf['member-vnf-index-ref']) # Get VM Flavor # TODO: Fix if VNFs include more than one VDU vm_flavor = vnfpkgm.get_vnfd( vnf['vnfd-id']).json()['vdu'][0]['vm-flavor'] # Create and open VNF session v = Vnf.objects.create(tenant=tn, instance=ns, uuid=vnf['id'], name=vnf_name, state='active', vim_type=ns.vim_type) v.vnf_session_id = accounting_client.open_vnf_session( ns.ns_session_id, v.uuid, v.name) v.save() logger.info('New VNF object: {}, NS instance: {}'.format( v.uuid, ns.uuid)) for vdur in vnf['vdur']: vdu = Vdu.objects.create(tenant=tn, instance=ns, vnf=v, uuid=vdur['vim-id'], nfvipop_id=ns.nfvipop_id, state='active', project_name=ns.mano_project, vcpu=vm_flavor['vcpu-count'], vram=vm_flavor['memory-mb'], vdisk=vm_flavor['storage-gb'], vim_type=ns.vim_type, flavor='{}_{}_{}'.format( vm_flavor['vcpu-count'], vm_flavor['memory-mb'], vm_flavor['storage-gb'])) vdu.vdu_session_id = accounting_client.open_vdu_session( v.vnf_session_id, vdu) vdu.save() logger.info('New VDU object: {}, VNF: {}, NS: {}'.format( vdu.uuid, v.uuid, ns.uuid)) break
def get_vdus_info(ns_uuid=None): """Get information about NS, VNF(s) and VDU(s) by given NS uuid Args: ns_uuid (str): The NS uuid Returns: dict: ns, vnf and vdu info """ vdus_info = [] if ns_uuid is None: return vdus_info token = bearer_token(OSM_ADMIN_CREDENTIALS.get('username'), OSM_ADMIN_CREDENTIALS.get('username')) ns = Ns(token) ns_response = ns.get(ns_uuid=ns_uuid) nsr = ns_response.json() # Get Vim vim_uuid = nsr.get('datacenter', None) vim_info = get_vim_info(vim_uuid=vim_uuid) # Get the Vnf UUIDs, members of the NS vnf_uuids = nsr.get('constituent-vnfr-ref', []) vnfr = Vnf(token) for vnf_uuid in vnf_uuids: vnf_response = vnfr.get(vnf_uuid=vnf_uuid) vnf_record = vnf_response.json() # VDUs info vdu_records = vnf_record.get('vdur', []) for vdu_record in vdu_records: mano = { "vim": vim_info, "ns": { "id": ns_uuid, "name": nsr.get('name-ref', None), "nsd_id": nsr.get('nsdId', None), "nsd_name": nsr.get('nsd-name-ref', None) }, "vnf": { "id": vnf_record.get("id", None), "name": None, # not provided in osm r4 "short_name": None, # not provided in osm r4 "vnfd_id": vnf_record.get("vnfd-id", None), "vnfd_name": None # not provided in osm r4 }, "vdu": { "id": vdu_record.get("vim-id", None), # NFVI-based uuid "image_id": None, "flavor": {}, "status": vdu_record.get("status", None), "ip_address": vdu_record.get("ip-address", None), "mgmt-interface": None # future usage } } vdus_info.append(mano) return vdus_info
def get_vnf_details(vnf_uuid, record, source="vtranscoder3d"): """ Append MANO (OSM) details (ns, vnf and vdu) by given VNF uuid Args: vnf_uuid (str): The uuid of the VNF record (dict): The original metric as it is sent from monitoring metrics generator source (str): The NFVI or application ref. It could be "vtranscoder3d" etc.. Returns: dict: osm info for the given vdu """ mano = {} token = bearer_token(OSM_ADMIN_CREDENTIALS.get('username'), OSM_ADMIN_CREDENTIALS.get('password')) vnfr = Vnf(token) vnf_response = vnfr.get(vnf_uuid=vnf_uuid) vnf_record = vnf_response.json() # Get NS details ns_id = vnf_record.get("nsr-id-ref", None) ns = Ns(token) ns_response = ns.get(ns_uuid=ns_id) nsr = ns_response.json() # VNFd info vnfd = vnf_record.get("vnfd-id", None) # VDUs info vdu_records = vnf_record.get('vdur', []) # Number of VDU per VNFd vdus_count = len(vdu_records) if vdus_count > 1: logger.critical("{} VDUs were found for the VNF with uuid: {}".format( vdus_count, vnf_uuid)) vdu_record = vdu_records[0] try: # Find the VDU info vdu_metadata = record.get("resource_metadata", {}) # If the data coming from VNFs, discover in different way the VDU info if source in [ "telegraf", "vtranscoder3d", "vce", "kubernetes", "opennebula", "vtranscoder3d_spectators" ]: vdu_metadata["instance_id"] = vdu_record.get('vim-id', None) vdu_metadata["flavor"] = {} if vdus_count: # TODO: what about the IPs if exists VNF with multiple VDUs? vdu_metadata["state"] = vdu_record.get('status', "") vdu_metadata['name'] = vdu_record.get('name', "") vdu_metadata["flavor"]["ram"] = None vdu_metadata["flavor"]["vcpus"] = None vdu_metadata["flavor"]["disk"] = None elif source == "openstack": """By default, OpenStack (ceilometer) provides us the following info: vcpus, ram, ephemeral, swap, disk, name, id """ pass # Get IP per VDU vdu_metadata['ip_address'] = vdu_record.get("ip-address", None) # Get the VIM account Info vim_account = VimAccount(token) vim_response = vim_account.get( vim_account_uuid=nsr.get('datacenter', None)) vimr = vim_response.json() # Get the NSd uuid nsd_id = nsr.get('nsdId', None) if nsd_id is None: nsd_id = nsr.get('instantiate_params', {}).get('nsdId', None) mano = { "ns": { "id": ns_id, "name": nsr.get('name-ref', None), "nsd_id": nsd_id, "nsd_name": nsr.get('nsd-name-ref', None) }, "vnf": { "id": vnf_record.get("id", None), "name": vnf_record.get("name", None), # not in osm r5: it could be <vnfd_name>_<index> "index": vnf_record.get("member-vnf-index-ref", 0), "short_name": vnf_record.get("short-name", None), # not in osm r5 "vnfd_id": vnf_record.get("vnfd-id", None), "vnfd_name": vnf_record.get('vnfd-ref', None) }, "vdu": { "id": vdu_metadata.get("instance_id", None), "name": vdu_metadata.get("name", None), "image_id": vdu_metadata.get("image", {}).get("id", None), "flavor": vdu_metadata.get("flavor", {}), "status": vdu_metadata.get("state", None), "ip_address": vdu_metadata['ip_address'], "mgmt-interface": None # future usage }, "vim": { "uuid": vimr.get('_id', None), "name": vimr.get('name', None), "type": vimr.get('vim_type', None), "url": vimr.get('vim_url', None), "tag": source } } logger.debug(mano) except Exception as ex: logger.exception(ex) finally: # TODO: Since we don't know the VDU uuid, the 1st VDU will be used since 1 VDU is used for the VNF (UC1). return mano
def get_faas_vdu_details(vdu_uuid, ro_ns_uuid, vnf_name): mano = {} nsr = {} vnfd = {} token = bearer_token(OSM_ADMIN_CREDENTIALS.get('username'), OSM_ADMIN_CREDENTIALS.get('password')) try: if ro_ns_uuid is None or vnf_name is None: raise Exception('Empty input') vnf_name = vnf_name.split('.') vnfd_name = vnf_name[0] vnf_index = vnf_name[1] # search for ro_ns_uuid in NSs list ns = Ns(token) ns_response = ns.get_list() ns_list = ns_response.json() for instance in ns_list: # Ensure that RO data are available openmano_deployment = instance['_admin'].get('deployed', {}).get('RO', {}) if len(openmano_deployment.keys()) == 0: continue # Compare the container id with the current NS record uuid nsr_id = openmano_deployment.get('nsr_id', None) if nsr_id is None or nsr_id != ro_ns_uuid: continue nsr = instance break # Get the NSd uuid nsd_id = nsr.get('nsdId', None) if nsd_id is None: nsd_id = nsr.get('instantiate_params', {}).get('nsdId', None) # Get the VIM account Info datacenter = nsr.get('datacenter', None) vim_account = VimAccount(token) vim_response = vim_account.get(vim_account_uuid=datacenter) vimr = vim_response.json() # Get vnfd info vnf_descriptor = Vnfd(token) vnfd_req = vnf_descriptor.get_list() vnfd_list = vnfd_req.json() for descriptor in vnfd_list: if 'id' in descriptor.keys() and descriptor['id'] == vnfd_name: vnfd = descriptor break mano = { "ns": { "id": nsr['id'], "name": nsr.get('name-ref', None), "nsd_id": nsd_id, "nsd_name": nsr.get('nsd-name-ref', None) }, "vnf": { "id": '{}-{}-{}'.format(vdu_uuid, vnfd_name, vnf_index), "name": '{}.{}'.format(vnfd_name, vnf_index), "index": vnf_index, "short_name": None, "vnfd_id": vnfd['_id'], "vnfd_name": vnfd_name }, "vdu": { "id": vdu_uuid, "name": vnfd_name, "image_id": vnfd_name, "flavor": {}, "status": 'running', "ip_address": '0.0.0.0', "mgmt-interface": None # future usage }, "vim": { "uuid": vimr.get('_id', None), "name": vimr.get('name', None), "type": vimr.get('vim_type', None), "url": vimr.get('vim_url', None), "tag": 'kubernetes' } } except Exception as ex: logger.exception(ex) finally: return mano
def get_vcdn_net_interfaces(ns_uuid, search_for_mid_cache="vCache-mid-vdu", search_for_edge_cache="vCache-edge-vdu"): """ Get the network interfaces of scaled VNF as well as the current count-index Args: ns_uuid (str): The NS uuid, in which the scaled VNF belongs to search_for_mid_cache (str): Search for the Mid vCache by given explicit name search_for_edge_cache (str): Search for scaled Edge vCache by given explicit name Returns: tuple(dict, int): The details of the VNF interfaces including the VDU index in the VNF ( { "edge": { "user": { "mac-address": "fa:16:3e:0c:94:7f", "ip-address": "192.168.252.12", "name": "ens6", "ns-vld-id": "user" }, "cache": { "mac-address": "fa:16:3e:4d:b9:64", "ip-address": "192.168.253.9", "name": "ens7", "ns-vld-id": "cache" }, "management": { "mgmt-vnf": "true", "mac-address": "fa:16:3e:99:33:43", "ip-address": "192.168.111.29", "name": "ens3", "ns-vld-id": "management" } }, "mid": { "management": { "ip-address": "192.168.111.13", "ns-vld-id": "management", "name": "ens3", "mac-address": "fa:16:3e:02:f5:1c", "mgmt-vnf": true }, "cache": { "ip-address": "192.168.253.12", "name": "ens6", "ns-vld-id": "cache", "mac-address": "fa:16:3e:60:5d:9d" }, "origin": { "ip-address": "192.168.254.5", "name": "ens7", "ns-vld-id": "origin", "mac-address": "fa:16:3e:0d:64:97" } } }, <int|1> ) """ vdus_list = [] interfaces = {"mid": None, "edge": None} edges_interfaces_all = {} count_index = None # Fetch the VNFs by given NS instance token = bearer_token(OSM_ADMIN_CREDENTIALS.get('username'), OSM_ADMIN_CREDENTIALS.get('password')) vnf = Vnf(token) response = vnf.get_list_by_ns(ns_uuid=ns_uuid) vnfs_list = response.json() # Keep the VDUs details for vnf_instance in vnfs_list: vdus_list += vnf_instance.get("vdur", []) # Discover the interfaces of the proper scaled Edge VNF and Mid vCache for vdu in vdus_list: # Get Mid vCache net details if vdu.get('vdu-id-ref', None) is not None and \ vdu['vdu-id-ref'] == search_for_mid_cache and \ vdu.get('count-index', None) == 0: interfaces['mid'] = format_vdu_interfaces(vdu.get( 'interfaces', [])) # Get Edge vCache net details (the new one) if vdu.get('vdu-id-ref', None) is not None and \ vdu['vdu-id-ref'] == search_for_edge_cache and \ vdu.get('count-index', None) >= 0: edges_interfaces_all[str( vdu['count-index'])] = format_vdu_interfaces( vdu.get('interfaces', [])) # Keep the VDU with the greatest count-index latest_vdu_index = max([int(k) for k in list(edges_interfaces_all.keys())]) count_index = latest_vdu_index interfaces['edge'] = edges_interfaces_all[str(latest_vdu_index)] return interfaces, count_index
def configure_vcdn_ns_after_instantiation(message): """ Configure the vCDN NS after its instantiation Args: message (dict): The message of instantiation event in ns topic """ event_state = message.get('operationState', None) # Consider this action only if it is completed if event_state != "COMPLETED": return logger.info('A new vCDN service just instantiated. Status: {}'.format(event_state)) try: token = bearer_token(OSM_ADMIN_CREDENTIALS.get('username'), OSM_ADMIN_CREDENTIALS.get('password')) # check nsd ns_uuid = message.get('nsr_id', None) nsd_ref_name = get_nsd_ref_name(token, ns_uuid) if not (nsd_ref_name and nsd_ref_name.startswith(vCDN_NSD_PREFIX)): return logger.info('vCDN service uuid is {}'.format(ns_uuid)) # Check if event is `instantiate` operation_uuid = message.get('nslcmop_id', None) event = get_event(token, operation_uuid) if not event or event != "instantiate": return # Wait 10 seconds: ensure that the new vCache is up and running. Also, be sure # that the vnf record includes the IPv4 of the new vCache. time.sleep(10) # Discover the vcache_incremental_counter <N> & the net IFs for UC3 net_interfaces, current_vdu_index = get_vcdn_net_interfaces( ns_uuid, search_for_mid_cache="vCache_mid_vdu", search_for_edge_cache="vCache_edge_vdu") edge_net_interfaces = net_interfaces.get('edge', {}) mid_net_interfaces = net_interfaces.get('mid', {}) vcache_incremental_counter = int(current_vdu_index) + 1 # discover the CACHE_NET_IP for UC3 mid_vcache_ip_cache_net = mid_net_interfaces.get('5GMEDIA-CACHE-NET', {}).get('ip-address', None) # discover the MGMT_NET_IP for UC3 edge_vcache_ip_mgmt_net = edge_net_interfaces.get('5GMEDIA_MGMT_NET', {}).get( 'ip-address', None) # discover the CACHE_USER_IP for UC3 edge_vcache_ip_user_net = edge_net_interfaces.get('5GMEDIA-USER-NET', {}).get('ip-address', None) # Check edge vCache net availability using ping ping_edge_vcache_ip(edge_vcache_ip_mgmt_net) # Set day-1,2... vCache configuration - Try every 18 seconds - totally 3 minutes for i in range(1, 10): logger.info("vCache VNF configuration: Attempt #{}".format(i)) if configure_edge_vcache(edge_vcache_ip_mgmt_net, mid_vcache_ip_cache_net, vcache_incremental_counter): break time.sleep(10) # Update the vDNS configure_vdns(edge_vcache_ip_user_net, vcache_incremental_counter) except Exception as ex: logger.exception(ex)
def main(): """ Detect the failed scale out operations and check it is triggered from the limitation of the maximum allowed VNF instances. """ kafka_consumer = init_consumer() kafka_consumer.subscribe(pattern=OSM_KAFKA_NS_TOPIC) # Run any incoming message in the intra-OSM kafka bus, topic `NS` for message in kafka_consumer: action = message.key.decode('utf-8', 'ignore') payload = yaml.safe_load(message.value.decode('utf-8', 'ignore')) if action != "scaled": continue event_state = payload.get('operationState', None) if event_state != "FAILED": continue ns_uuid = payload.get('nsr_id', None) operation_uuid = payload.get('nslcmop_id', None) if operation_uuid is None: continue logger.info("A new event for NS_UUID `{}` in state `{}` was detected.".format( action, ns_uuid, event_state)) # Detect the event: SCALE_IN, SCALE_OUT or something else osm_token = bearer_token(OSM_ADMIN_CREDENTIALS['username'], OSM_ADMIN_CREDENTIALS['username']) ns_operation = NsLcmOperation(osm_token) request = ns_operation.get(operation_uuid=operation_uuid) response = request.json() event = response.get('operationParams', {}).get('scaleVnfData', {}).get( 'scaleVnfType', None) # Skip it if not a scale out operation if not event or event != "SCALE_OUT": continue # Get the VNF index that was scaled vnf_index = response.get('operationParams', {}).get('scaleVnfData', {}).get( 'scaleByStepData', {}).get('member-vnf-index', 0) if vnf_index == 0: continue # Get the list of involved VNFs in the NS vnf = Vnf(osm_token) vnfs_request = vnf.get_list_by_ns(ns_uuid=ns_uuid) vnfs_list = vnfs_request.json() # Detect the VNFr and VNFd that probably scaled_vnfr = None for vnfr in vnfs_list: if int(vnfr['member-vnf-index-ref']) == int(vnf_index): vnf_request = vnf.get(vnf_uuid=vnfr['_id']) scaled_vnfr = vnf_request.json() break if scaled_vnfr is None: continue # Get the Vim details that host this NS/VNF instances vim_acc = VimAccount(token=osm_token) vim_req = vim_acc.get(scaled_vnfr['vim-account-id']) vim_info = vim_req.json() # Get VNFD info vnfd = Vnfd(token=osm_token) vnfd_req = vnfd.get(scaled_vnfr['vnfd-id']) vnfd_info = vnfd_req.json() # Compose the recommendation message recommend_vnfd_scaling_group(vim_info, vnfd_info, scaled_vnfr)
def get_faas_vcdn_net_interfaces(ns_uuid, search_for_mid_cache="vCache_mid_vdu", search_for_edge_cache="vCache_edge_vdu"): """ Get the network interfaces of the VNF Args: ns_uuid (str): The NS uuid, in which the scaled VNF belongs to search_for_mid_cache (str): Search for the Mid vCache by given explicit name search_for_edge_cache (str): Search for scaled Edge vCache by given explicit name Returns: dict: The details of the VNF interfaces ( { "edge": None, "mid": { "management": { "ip-address": "192.168.111.13", "ns-vld-id": "management", "name": "ens3", "mac-address": "fa:16:3e:02:f5:1c", "mgmt-vnf": true }, "cache": { "ip-address": "192.168.253.12", "name": "ens6", "ns-vld-id": "cache", "mac-address": "fa:16:3e:60:5d:9d" }, "origin": { "ip-address": "192.168.254.5", "name": "ens7", "ns-vld-id": "origin", "mac-address": "fa:16:3e:0d:64:97" } } } ) """ vdus_list = [] interfaces = {"mid": None, "edge": None} edges_interfaces_all = {} count_index = None # Fetch the VNFs by given NS instance token = bearer_token(OSM_ADMIN_CREDENTIALS.get('username'), OSM_ADMIN_CREDENTIALS.get('password')) vnf = Vnf(token) response = vnf.get_list_by_ns(ns_uuid=ns_uuid) vnfs_list = response.json() # Keep the VDUs details for vnf_instance in vnfs_list: vdus_list += vnf_instance.get("vdur", []) # Discover the interfaces of the proper scaled Edge VNF and Mid vCache for vdu in vdus_list: # Get Mid vCache net details if vdu.get('vdu-id-ref', None) is not None and \ vdu['vdu-id-ref'] == search_for_mid_cache and \ vdu.get('count-index', None) == 0: interfaces['mid'] = format_vdu_interfaces(vdu.get( 'interfaces', [])) # Get Edge vCache net details (the new one) if vdu.get('vdu-id-ref', None) is not None and \ vdu['vdu-id-ref'] == search_for_edge_cache and \ vdu.get('count-index', None) > 0: edges_interfaces_all[str( vdu['count-index'])] = format_vdu_interfaces( vdu.get('interfaces', [])) return interfaces