def instantiate_ns_process(nsId, body, nestedInfo=None): """ Function description Parameters ---------- nsId: string Identifier of the service body: struct Object having the deployment flavour and the instantiation level. nsdIdc: string Identifier of the nested service to be instantiated, only available when composing Returns ------- name: type return description """ log_queue.put(["INFO", "*****Time measure: SOEc SOEc instantiating a NS"]) log_queue.put([ "INFO", "SOEc instantiate_ns_process with nsId %s, body %s" % (nsId, body) ]) # get the nsdId that corresponds to nsId nsdId = ns_db.get_nsdId(nsId) if nestedInfo: nsdId = next(iter(nestedInfo)) # first get the ns and vnfs descriptors nsd_json = nsd_db.get_nsd_json(nsdId, None) vnfds_json = {} # for each vnf in the NSD, get its json descriptor vnfdIds = nsd_json["nsd"]["vnfdId"] vnfds_json = {} for vnfdId in vnfdIds: log_queue.put(["DEBUG", vnfdId]) vnfds_json[vnfdId] = vnfd_db.get_vnfd_json(vnfdId, None) # request RO rooe.instantiate_ns(nsId, nsd_json, vnfds_json, body, nestedInfo) log_queue.put( ["INFO", "*****Time measure: SOEc updated databases instantiating"]) sap_info = ns_db.get_ns_sap_info(nsId) if (len(sap_info) > 0): log_queue.put(["INFO", "sapInfo: %s" % (sap_info)]) monitoring.configure_ns_monitoring(nsId, nsd_json, vnfds_json, sap_info) log_queue.put([ "INFO", "instantiate_ns monitoring exporters created for nsId %s" % (nsId) ]) # initiate alerts alert_configure.configure_ns_alerts(nsId, nsdId, nsd_json, vnfds_json, sap_info) log_queue.put([ "INFO", "*****Time measure: SOEc created monitoring exporters and alerts" ]) log_queue.put([ "INFO", "*****Time measure: SOEc instantiate_ns_process finished for nsId %s" % (nsId) ])
def query_vnfd(vnfdId, version): """ Function to get the IFA014 json of the VNFD defined by vnfdId and version. Parameters ---------- vnfdId: string Identifier of the virtual network function descriptor. version: string Version of the virtual network function descriptor. Returns ------- dict virtual network function IFA014 json descriptor """ vnfd_json = vnfd_db.get_vnfd_json(vnfdId, version) if vnfd_json is None: return 404 return vnfd_json
def scale_ns_process(nsId, body): """ Performs the scaling of the service identified by "nsId" according to the info at body Parameters ---------- nsId: string Identifier of the Network Service Instance. body: request body including scaling operation Returns ------- """ log_queue.put( ["INFO", "scale_ns_process with nsId %s, body %s" % (nsId, body)]) # get the nsdId that corresponds to nsId nsdId = ns_db.get_nsdId(nsId) # get current instantiation level current_df = ns_db.get_ns_df(nsId) current_il = ns_db.get_ns_il(nsId) # first get the ns and vnfs descriptors nsd_json = nsd_db.get_nsd_json(nsdId, None) # for each vnf in the NSD, get its json descriptor vnfdIds = nsd_json["nsd"]["vnfdId"] vnfds_json = {} for vnfdId in vnfdIds: vnfds_json[vnfdId] = vnfd_db.get_vnfd_json(vnfdId, None) #request RO sap_info_pre_scaling = ns_db.get_ns_sap_info(nsId) rooe.scale_ns(nsId, nsd_json, vnfds_json, body, current_df, current_il) # maybe we have to update the monitoring jobs: we assume that new performance monitoring jobs # will be similar to one already present sap_info = ns_db.get_ns_sap_info(nsId) log_queue.put(["INFO", "new sapInfo after scaling: %s" % (sap_info)]) monitoring.update_ns_monitoring(nsId, nsd_json, vnfds_json, sap_info) log_queue.put([ "DEBUG", "monitoring exporters updated after scaling for nsId %s" % (nsId) ]) # update alerts: it is not needed log_queue.put(["INFO", "scale_ns_process finished for nsId %s" % (nsId)])
def instantiate_ns_process(nsId, body): """ Function description Parameters ---------- param1: type param1 description Returns ------- name: type return description """ log_queue.put([ "DEBUG", "instantiate_ns_process with nsId %s, body %s" % (nsId, body) ]) # get the nsdId that corresponds to nsId nsdId = ns_db.get_nsdId(nsId) # first get the ns and vnfs descriptors nsd_json = nsd_db.get_nsd_json(nsdId, "0.2") log_queue.put([ "DEBUG", "instantiate_ns_process with nsId %s, body %s" % (nsId, body) ]) log_queue.put(["DEBUG", "NSD:"]) log_queue.put(["DEBUG", dumps(nsd_json, indent=4)]) vnfds_json = {} # for each vnf in the NSD, get its json descriptor vnfdIds = nsd_json["nsd"]["vnfdId"] vnfds_json = {} for vnfdId in vnfdIds: log_queue.put(["DEBUG", vnfdId]) vnfds_json[vnfdId] = vnfd_db.get_vnfd_json(vnfdId, None) log_queue.put(["DEBUG", "VNFDs:"]) log_queue.put(["DEBUG", dumps(vnfds_json, indent=4)]) # request RO rooe.instantiate_ns(nsId, nsd_json, vnfds_json, body)
def descriptor_viewer(): """ This function just responds to the browser Url :return: the rendered template 'descriptor.html' """ if request.method == 'POST': try: already_onboarded_in_so = False # retrieving the IFA descriptor # print(request.form, request.files) if 'convert_text' in request.form: ifa_json = json.loads(request.form['convert_text']) elif 'file_to_convert' in request.files: f = request.files['file_to_convert'] response = f.read() ifa_json = json.loads(response.decode('utf-8')) elif 'show_json' in request.form: ifa_json = eval(request.form['show_json']) already_onboarded_in_so = True elif 'onboard_json' in request.form: ifa_json = eval(request.form['onboard_json']) record = {} if 'nsd' in ifa_json: # nsd case if 'vnfdId' in ifa_json['nsd']: record = { "nsdId": ifa_json["nsd"]["nsdIdentifier"], "nsdCloudifyId": {}, "version": ifa_json["nsd"]["version"], "nsdName": ifa_json["nsd"]["nsdName"], "nsdJson": ifa_json, "shareable": True, "domain": "local" } if nsd_db.get_nsd_json(nsdId=record['nsdId']) is None: nsd_db.insert_nsd(record) message = { "Success": 'nsdId : {} onboarded on SO with success!'. format(record['nsdId']) } else: log_queue.put( ["DEBUG", 'nsdId already in the SO DB']) raise ValueError('nsdId already in the SO DB') # nsd-composite case else: record = { "nsdId": ifa_json["nsd"]["nsdIdentifier"], "nsdCloudifyId": {}, "version": ifa_json["nsd"]["version"], "nsdName": ifa_json["nsd"]["nsdName"], "nsdJson": ifa_json, "shareable": False, "domain": "Composite" } if nsd_db.get_nsd_json(nsdId=record['nsdId']) is None: nsd_db.insert_nsd(record) message = { "Success": 'nsdId : {} onboarded on SO with success!'. format(record['nsdId']) } else: log_queue.put( ["DEBUG", 'nsdId already in the SO DB']) raise ValueError('nsdId already in the SO DB') # vnfd case else: record = { "vnfdId": ifa_json["vnfdId"], "vnfdVersion": ifa_json["vnfdVersion"], "vnfdName": ifa_json["vnfProductName"], "vnfdJson": ifa_json } if vnfd_db.get_vnfd_json( vnfdId=ifa_json["vnfdId"]) is None: vnfd_db.insert_vnfd(record) message = { 'Success': 'vnfdId : {} onboarded on SO with success!'.format( record['vnfdId']) } else: log_queue.put(["DEBUG", 'vnfdId already in the SO DB']) raise ValueError('vnfdId already in the SO DB') log_queue.put(["INFO", message["Success"]]) flash(message['Success'], 'success') already_onboarded_in_so = True else: raise ValueError('No text/file valid') if 'nsd' in ifa_json: if 'vnfdId' in ifa_json['nsd']: # convert a NSD list_osm_json, default_index = gui_utils.ifa014_conversion( ifa_json) default_osm_json = list_osm_json[default_index] osm_json_network = [] for level in list_osm_json: osm_json_network.append( json_graph.node_link_data( gui_utils.json_network_nsd(level))) descriptor_type = 'nsd' else: # convert a composite NSD list_osm_json, default_index = gui_utils.composite_desc_conversion( ifa_json) default_osm_json = list_osm_json[default_index] osm_json_network = [] for level in list_osm_json: osm_json_network.append( json_graph.node_link_data( gui_utils.json_network_composite_nsd(level))) descriptor_type = 'nsd-composite' else: # convert a VNFD list_osm_json = [gui_utils.ifa011_conversion(ifa_json)] default_osm_json = list_osm_json[ 0] # done in case of possible list of ifa vnfd conversion osm_json_network = [ json_graph.node_link_data( gui_utils.json_network_vnfd(default_osm_json)) ] descriptor_type = 'vnfd' yaml_descriptor_list = [] for osm_json in list_osm_json: yaml_descriptor_list.append( yaml.safe_dump(osm_json, default_flow_style=False)) yaml_ifa_descriptor = yaml.safe_dump(ifa_json, default_flow_style=False) return render_template( 'descriptor.html', html_title='Descriptor Viewer', descriptor_type=descriptor_type, yaml_network=osm_json_network, list_osm_json=list_osm_json, yaml_osm_descriptor=yaml_descriptor_list, yaml_ifa_descriptor=yaml_ifa_descriptor, ifa_json=ifa_json, already_onboarded_in_so=already_onboarded_in_so) except (TypeError, KeyError, ValueError) as error: message = {'Error': 'Error: {}'.format(error)} log_queue.put(["ERROR", message['Error']]) flash(message['Error'], 'danger') return redirect(url_for('home'))
def instantiate_ns_process(nsId, body, nestedInfo=None): """ Function description Parameters ---------- nsId: string Identifier of the service body: struct Object having the deployment flavour and the instantiation level. nsdIdc: string Identifier of the nested service to be instantiated, only available when composing Returns ------- name: type return description """ log_queue.put([ "INFO", "*****Time measure for nsId: %s: SOEc SOEc instantiating a NS" % (nsId) ]) log_queue.put([ "INFO", "SOEc instantiate_ns_process with nsId %s, body %s" % (nsId, body) ]) # get the nsdId that corresponds to nsId nsdId = ns_db.get_nsdId(nsId) if nestedInfo: nsdId = next(iter(nestedInfo)) # first get the ns and vnfs descriptors nsd_json = nsd_db.get_nsd_json(nsdId, None) vnfds_json = {} # for each vnf in the NSD, get its json descriptor vnfdIds = nsd_json["nsd"]["vnfdId"] vnfds_json = {} for vnfdId in vnfdIds: log_queue.put(["DEBUG", vnfdId]) vnfds_json[vnfdId] = vnfd_db.get_vnfd_json(vnfdId, None) # request RO log_queue.put([ "INFO", "*****Time measure for nsId: %s: SOEc SOEc retreiving descriptors for a NS" % (nsId) ]) rooe.instantiate_ns(nsId, nsd_json, vnfds_json, body, nestedInfo) log_queue.put([ "INFO", "*****Time measure for nsId: %s: SOEc SOEc-ROE updated DBs instantiating a NS" % (nsId) ]) sap_info = ns_db.get_ns_sap_info(nsId) if (len(sap_info) > 0): log_queue.put(["INFO", "sapInfo: %s" % (sap_info)]) monitoring.configure_ns_monitoring(nsId, nsd_json, vnfds_json, sap_info) log_queue.put([ "INFO", "*****Time measure for nsId: %s: SOEc SOEc finished configuring monitoring/dashboard" % (nsId) ]) log_queue.put([ "INFO", "instantiate_ns monitoring exporters created for nsId %s" % (nsId) ]) # initiate alerts alert_configure.configure_ns_alerts(nsId, nsdId, nsd_json, vnfds_json, sap_info) log_queue.put([ "INFO", "*****Time measure for nsId: %s: SOEc SOEc finished configuring Threshold-based alerts" % (nsId) ]) # initiate aiml work for "scaling" problem alert_configure.configure_ns_aiml_scale_work(nsId, nsdId, nsd_json, vnfds_json, sap_info) log_queue.put([ "INFO", "*****Time measure for nsId: %s: SOEc SOEc finished configuring AIML alerts" % (nsId) ]) log_queue.put([ "INFO", "*****Time measure for nsId: %s: SOEc SOEc finished instantiating a NS" % (nsId) ]) notification_db.create_notification_record({ "nsId": nsId, "type": "fa-send", "text": nsId + " INSTANTIATED", "time": datetime.now().strftime("%d/%m/%Y %H:%M:%S.%f") })
def scale_ns_process(nsId, body, nestedInfo=None): """ Performs the scaling of the service identified by "nsId" according to the info at body Parameters ---------- nsId: string Identifier of the Network Service Instance. body: request body including scaling operation Returns ------- """ log_queue.put([ "INFO", "*****Time measure for nsId: %s: SOEc SOEc scaling a nested/regular NS" % nsId ]) log_queue.put( ["INFO", "scale_ns_process with nsId %s, body %s" % (nsId, body)]) # get the nsdId that corresponds to nsId if nestedInfo: nsdId = next(iter(nestedInfo)) current_df = nestedInfo[nsdId][0] current_il = nestedInfo[nsdId][1] else: nsdId = ns_db.get_nsdId(nsId) # get current instantiation level current_df = ns_db.get_ns_df(nsId) current_il = ns_db.get_ns_il(nsId) # first get the ns and vnfs descriptors nsd_json = nsd_db.get_nsd_json(nsdId, None) # for each vnf in the NSD, get its json descriptor vnfdIds = nsd_json["nsd"]["vnfdId"] vnfds_json = {} for vnfdId in vnfdIds: vnfds_json[vnfdId] = vnfd_db.get_vnfd_json(vnfdId, None) #request RO sap_info_pre_scaling = ns_db.get_ns_sap_info(nsId) log_queue.put([ "INFO", "*****Time measure for nsId: %s: SOEc SOEc-ROE prepared info for scaling" % (nsId) ]) rooe.scale_ns(nsId, nsd_json, vnfds_json, body, current_df, current_il, nestedInfo) log_queue.put([ "INFO", "*****Time measure for nsId: %s: SOEc SOEc-ROE updated DBs scaling a NS" % (nsId) ]) # checks the result of scaling, maybe it has not be done due to lack of resources operationId = operation_db.get_operationId(nsId, "INSTANTIATION") if ((operation_db.get_operation_status(operationId) == "SUCCESSFULLY_DONE") and ns_db.get_ns_status(nsId) == "INSTANTIATED"): # maybe we have to update the monitoring jobs: we assume that new performance monitoring jobs # will be similar to one already present sap_info = ns_db.get_ns_sap_info(nsId) log_queue.put(["INFO", "new sapInfo after scaling: %s" % (sap_info)]) monitoring.update_ns_monitoring(nsId, nsd_json, vnfds_json, sap_info) log_queue.put([ "INFO", "*****Time measure for nsId: %s: SOEc SOEc updated monitoring info" % nsId ]) log_queue.put([ "DEBUG", "monitoring exporters updated after scaling for nsId %s" % (nsId) ]) # update alerts: it is not needed # however, in the case of aiml_scaling it is needed, to restart the spark job else: if ns_db.get_ns_status(nsId) == "INSTANTIATED": log_queue.put( ["DEBUG", "Scaling operation failed due to lack of resources"]) elif ns_db.get_ns_status(nsId) == "FAILED": log_queue.put( ["DEBUG", "Scaling operation failed at the MANO platform"]) aiml_scaling_info = ns_db.get_aiml_info(nsId, "scaling") if (aiml_scaling_info and (ns_db.get_ns_status(nsId) == "INSTANTIATED")): log_queue.put( ["DEBUG", "The AIML platform is triggering the scaling operation"]) alert_configure.update_ns_aiml_scale_work(nsId, aiml_scaling_info) log_queue.put([ "INFO", "*****Time measure for nsId: %s: SOEc SOEc updated AIML alert job" % nsId ]) log_queue.put(["INFO", "scale_ns_process finished for nsId %s" % (nsId)]) log_queue.put([ "INFO", "*****Time measure for nsId: %s: SOEc SOEc finished scaling a nested/regular NS" % (nsId) ]) notification_db.create_notification_record({ "nsId": nsId, "type": "fa-gears", "text": nsId + " SCALED", "time": datetime.now().strftime("%d/%m/%Y %H:%M:%S.%f") })