def ns_view(ns_id): # get the nsdId that corresponds to nsId nsd_id = ns_db.get_nsdId(ns_id) domain = nsd_db.get_nsd_domain(nsd_id) nsd_json = nsd_db.get_nsd_json(nsd_id) flavour_id = ns_db.get_ns_flavour_id(ns_id) ns_instantiation_level_id = ns_db.get_ns_instantiation_level_id(ns_id) # print("Flavor /instantiation level of ns_id to be visualize: {} {}".format(flavour_id, ns_instantiation_level_id)) ns_network = {} level = "{}_{}_{}".format(nsd_id, flavour_id, ns_instantiation_level_id) # print("Total Level: {}".format(level)) if domain == 'local': html_page = 'ns_view.html' placement_info = nsir_db.get_placement_info(ns_id) list_osm_json, default_index = gui_utils.ifa014_conversion(nsd_json) for element in list_osm_json: if element['nsd:nsd-catalog']['nsd'][0]['id'] == level: ns_network = gui_utils.json_network_nsd( element, placement_info) elif domain == 'Composite': html_page = 'ns_composite_view.html' list_osm_json, default_index = gui_utils.composite_desc_conversion( nsd_json) for element in list_osm_json: if element['nsd:nsd-catalog']['nsd-composite'][0]['id'] == level: ns_network = gui_utils.json_network_composite_ns( element, ns_id) else: message = {'Error': 'Error: Something Wrong with domain of Descriptor'} log_queue.put(["ERROR", message['Error']]) flash(message['Error'], 'danger') return redirect(request.referrer) return render_template(html_page, html_title=ns_db.get_ns_name(ns_id), d3_network=json_graph.node_link_data(ns_network))
def instantiate_ns_process(nsId, body, nestedInfo=None): """ Function description Parameters ---------- nsId: string Identifier of the service body: struct Object having the deployment flavour and the instantiation level. nsdIdc: string Identifier of the nested service to be instantiated, only available when composing Returns ------- name: type return description """ log_queue.put(["INFO", "*****Time measure: SOEc SOEc instantiating a NS"]) log_queue.put([ "INFO", "SOEc instantiate_ns_process with nsId %s, body %s" % (nsId, body) ]) # get the nsdId that corresponds to nsId nsdId = ns_db.get_nsdId(nsId) if nestedInfo: nsdId = next(iter(nestedInfo)) # first get the ns and vnfs descriptors nsd_json = nsd_db.get_nsd_json(nsdId, None) vnfds_json = {} # for each vnf in the NSD, get its json descriptor vnfdIds = nsd_json["nsd"]["vnfdId"] vnfds_json = {} for vnfdId in vnfdIds: log_queue.put(["DEBUG", vnfdId]) vnfds_json[vnfdId] = vnfd_db.get_vnfd_json(vnfdId, None) # request RO rooe.instantiate_ns(nsId, nsd_json, vnfds_json, body, nestedInfo) log_queue.put( ["INFO", "*****Time measure: SOEc updated databases instantiating"]) sap_info = ns_db.get_ns_sap_info(nsId) if (len(sap_info) > 0): log_queue.put(["INFO", "sapInfo: %s" % (sap_info)]) monitoring.configure_ns_monitoring(nsId, nsd_json, vnfds_json, sap_info) log_queue.put([ "INFO", "instantiate_ns monitoring exporters created for nsId %s" % (nsId) ]) # initiate alerts alert_configure.configure_ns_alerts(nsId, nsdId, nsd_json, vnfds_json, sap_info) log_queue.put([ "INFO", "*****Time measure: SOEc created monitoring exporters and alerts" ]) log_queue.put([ "INFO", "*****Time measure: SOEc instantiate_ns_process finished for nsId %s" % (nsId) ])
def query_ns(nsId): """ Returns the information of the Network Service Instance identified by nsId. Parameters ---------- nsId: string Identifier of the NS instance to terminate. Returns ------- dict Information of the Network Service Instance. """ if not ns_db.exists_nsId(nsId): # TODO create errors return 404 # TODO: lines below are a stub status = ns_db.get_ns_status(nsId) vs_status = "FAILED" if status in ["TERMINATED", "INSTANTIATING", "TERMINATING"]: vs_status = "NOT_INSTANTIATED" elif status == "INSTANTIATED": vs_status = "INSTANTIATED" nsd_id = ns_db.get_nsdId(nsId) nsd_json = nsd_db.get_nsd_json(nsd_id) ns_name = ns_db.get_ns_name(nsId) ns_description = ns_db.get_ns_description(nsId) flavour_id = ns_db.get_ns_flavour_id(nsId) info = { "nsInstanceId": nsId, "nsName": ns_name, "description": ns_description, "nsdId": nsd_id, "flavourId": flavour_id, "nsState": vs_status, } #turn void the sap_info and the monitoring if the service is terminated if vs_status == "NOT_INSTANTIATED": info["sapInfo"] = [] log_queue.put(["INFO", "hola"]) return info if "sapd" in nsd_json["nsd"]: total_sap_info = get_ns_sap_info(nsId, nsd_json["nsd"]["sapd"]) if total_sap_info is not None: info["sapInfo"] = total_sap_info dashboard_info = ns_db.get_dashboard_info(nsId) if "dashboardUrl" in dashboard_info.keys(): info["monitoringDashboardUrl"] = dashboard_info["dashboardUrl"] log_queue.put( ["INFO", "query_result: %s" % dumps(info, indent=4, sort_keys=True)]) return info
def get_blueprint_id(self, nsi_id, body): """ Checks if a deployment has pending workflows Parameters ---------- deployment: string identifier of the network service instance used as deployment id Returns ------- Boolean """ nsd_id = get_nsdId(nsi_id) bp_id = get_nsd_cloudify_id(nsd_id) log_queue.put( ["DEBUG", "CLOUDIFY_WRAPPER: get_blueprint_id nsi_id:%s nsd_id:%s bp_id:%s" % (nsi_id, nsd_id, bp_id)]) return bp_id
def scale_ns_process(nsId, body): """ Performs the scaling of the service identified by "nsId" according to the info at body Parameters ---------- nsId: string Identifier of the Network Service Instance. body: request body including scaling operation Returns ------- """ log_queue.put( ["INFO", "scale_ns_process with nsId %s, body %s" % (nsId, body)]) # get the nsdId that corresponds to nsId nsdId = ns_db.get_nsdId(nsId) # get current instantiation level current_df = ns_db.get_ns_df(nsId) current_il = ns_db.get_ns_il(nsId) # first get the ns and vnfs descriptors nsd_json = nsd_db.get_nsd_json(nsdId, None) # for each vnf in the NSD, get its json descriptor vnfdIds = nsd_json["nsd"]["vnfdId"] vnfds_json = {} for vnfdId in vnfdIds: vnfds_json[vnfdId] = vnfd_db.get_vnfd_json(vnfdId, None) #request RO sap_info_pre_scaling = ns_db.get_ns_sap_info(nsId) rooe.scale_ns(nsId, nsd_json, vnfds_json, body, current_df, current_il) # maybe we have to update the monitoring jobs: we assume that new performance monitoring jobs # will be similar to one already present sap_info = ns_db.get_ns_sap_info(nsId) log_queue.put(["INFO", "new sapInfo after scaling: %s" % (sap_info)]) monitoring.update_ns_monitoring(nsId, nsd_json, vnfds_json, sap_info) log_queue.put([ "DEBUG", "monitoring exporters updated after scaling for nsId %s" % (nsId) ]) # update alerts: it is not needed log_queue.put(["INFO", "scale_ns_process finished for nsId %s" % (nsId)])
def query_ns(nsId): """ Returns the information of the Network Service Instance identified by nsId. Parameters ---------- nsId: string Identifier of the NS instance to terminate. Returns ------- dict Information of the Network Service Instance. """ if not ns_db.exists_nsId(nsId): return 404 status = ns_db.get_ns_status(nsId) nsd_id = ns_db.get_nsdId(nsId) nsd_json = nsd_db.get_nsd_json(nsd_id) ns_name = ns_db.get_ns_name(nsId) ns_description = ns_db.get_ns_description(nsId) flavour_id = ns_db.get_ns_flavour_id(nsId) info = { "nsInstanceId": nsId, "nsName": ns_name, "description": ns_description, "nsdId": nsd_id, "flavourId": flavour_id, "nsState": status, } if "sapd" in nsd_json["nsd"]: info["sapInfo"] = get_ns_sap_info(nsId, nsd_json["nsd"]["sapd"]) query_result = {"queryNsResult": [info]} log_queue.put([ "DEBUG", "query_result: %s" % dumps(query_result, indent=4, sort_keys=True) ]) return query_result
def instantiate_ns_process(nsId, body): """ Function description Parameters ---------- param1: type param1 description Returns ------- name: type return description """ log_queue.put([ "DEBUG", "instantiate_ns_process with nsId %s, body %s" % (nsId, body) ]) # get the nsdId that corresponds to nsId nsdId = ns_db.get_nsdId(nsId) # first get the ns and vnfs descriptors nsd_json = nsd_db.get_nsd_json(nsdId, "0.2") log_queue.put([ "DEBUG", "instantiate_ns_process with nsId %s, body %s" % (nsId, body) ]) log_queue.put(["DEBUG", "NSD:"]) log_queue.put(["DEBUG", dumps(nsd_json, indent=4)]) vnfds_json = {} # for each vnf in the NSD, get its json descriptor vnfdIds = nsd_json["nsd"]["vnfdId"] vnfds_json = {} for vnfdId in vnfdIds: log_queue.put(["DEBUG", vnfdId]) vnfds_json[vnfdId] = vnfd_db.get_vnfd_json(vnfdId, None) log_queue.put(["DEBUG", "VNFDs:"]) log_queue.put(["DEBUG", dumps(vnfds_json, indent=4)]) # request RO rooe.instantiate_ns(nsId, nsd_json, vnfds_json, body)
def json_network_composite_ns(descriptor, ns_id=None): """ Return the node-link format of the YAML NS Composite Instance :param descriptor: yaml object representing the NS composite descriptor :param ns_id: ns_id (useful to visualize the NSD instance :return: nx json graph format """ # retrieve the list of nested ns id for placement info list_of_nested_ns_id = [] ns_table_entry = ns_db.get_ns_record(ns_id) if 'nestedNsId' in ns_table_entry: # ASSUMPTION: ONLY ONE NSID IN THIS COLUMN list_of_nested_ns_id.append({ "nsd_id": ns_db.get_nsdId(ns_table_entry['nestedNsId']), "ns_id": ns_table_entry['nestedNsId'], # ASSUMPTION: instantiate a composite service from a "local" nested service "federated_domain": "local" }) if 'nested_service_info' in ns_table_entry: for nested_id in ns_table_entry['nested_service_info']: list_of_nested_ns_id.append({ "nsd_id": nested_id['nested_id'], "ns_id": nested_id['nested_instance_id'], "federated_domain": nested_id['domain'] }) else: # TODO intrinsic issue with composite, review the dbs pass nsd_response = descriptor['nsd:nsd-catalog']['nsd-composite'][0] # create a nx.Graph g = nx.Graph() # dealing with the 'constituent-nsd' nsd_array = nsd_response['constituent-nsd'] # for each constituent-nsd of composite remapping of vl names (if necessary) and create the graph for nsd_nested_item in nsd_array: ns_ir_net_map = nsir_db.get_network_mapping(ns_id) ns_ir_rename_map = nsir_db.get_renaming_network_mapping(ns_id) # print(ns_ir_net_map, ns_ir_rename_map) # network remapping part mapping = {} if bool(ns_ir_net_map): # case of composite NOT from scratch if bool(ns_ir_rename_map): for vl in ns_ir_net_map['nestedVirtualLinkConnectivity'][ nsd_nested_item['nested-nsd-id']]: for key_vl, value_vl in vl.items(): for key_rename, value_rename in ns_ir_rename_map.items( ): if value_vl == value_rename: mapping[key_vl] = key_rename # case of composite from scratch else: for vl in ns_ir_net_map['nestedVirtualLinkConnectivity'][ nsd_nested_item['nested-nsd-id']]: for key_vl, value_vl in vl.items(): mapping[key_vl] = value_vl else: # TODO evaluate this possibility, maybe an intrinsic issue of dbs print("TODO") pass # print('Network mapping {}:'.format(mapping)) nsd_json = nsd_db.get_nsd_json(nsd_nested_item['nested-nsd-id']) domain_federation = next( item['federated_domain'] for item in list_of_nested_ns_id if item.get("nsd_id") == nsd_nested_item['nested-nsd-id']) if isinstance(domain_federation, dict): domain_federation = next(iter(domain_federation)) # print("Federated domain: {}".format(domain_federation)) # find df and instantiation level of composite nsd level = "{}_{}_{}".format(nsd_nested_item['nested-nsd-id'], nsd_nested_item['nested-ns-df-id'], nsd_nested_item['nested-ns-inst-level-id']) # TODO verify that level should be the same in below case # level = nsd_nested_item['nsd-id-ref'] ns_network = {} # retrieve the json of composite nsd (with correct df and instantiation level) list_osm_json, default_index = ifa014_conversion(nsd_json) for element in list_osm_json: if element['nsd:nsd-catalog']['nsd'][0]['id'] == level: ns_network = element # creating graph of nested NS nsd_response = ns_network['nsd:nsd-catalog']['nsd'][0] # renaming involved network for vld_item in nsd_response['vld']: vld_name = vld_item['name'] if vld_item['name'] in mapping: vld_item['short-name'] = mapping[vld_name] vld_item['id'] = mapping[vld_name] vld_item['name'] = mapping[vld_name] # dealing with the 'constituent-vnfd' vnfd_array = nsd_response['constituent-vnfd'] for nsd_item in vnfd_array: # TODO verify the correct behaviour among the two following lines code current_node_id = g.number_of_nodes() # current_node_id = selected_node(g, nsd_item['vnfd-id-ref']) # add a node in the nx.Graph for every vnfd g.add_node(current_node_id, features="VNFD: {}".format(nsd_item['vnfd-id-ref']), ref_node_id=nsd_item['member-vnf-index'], name=nsd_item['vnfd-id-ref'], type="VNFD", shape="circle", nested=[nsd_nested_item['nested-nsd-id']], federation=[str(domain_federation)], group=1) # dealing with the 'vld' vld_array = nsd_response['vld'] for vld_item in vld_array: vld_connections_array = vld_item['vnfd-connection-point-ref'] # current_node_id = g.number_of_nodes() current_node_id = selected_node(g, vld_item['name']) # add a node in the nx.Graph for every vld g.add_node( current_node_id, features="VLD: {}".format(vld_item['name']), name=vld_item['name'], type="VLD", shape="rect", # nested=[nsd_nested_item['nested-nsd-id']], # federation=str(domain_federation), group=2) # nested parameters is a list of shared components if 'nested' in g.nodes[current_node_id]: g.nodes[current_node_id]['nested'].append( nsd_nested_item['nested-nsd-id']) else: g.nodes[current_node_id]['nested'] = [ nsd_nested_item['nested-nsd-id'] ] # federation parameters is a list of shared components if 'federation' in g.nodes[current_node_id]: g.nodes[current_node_id]['federation'].append( str(domain_federation)) else: g.nodes[current_node_id]['federation'] = [ str(domain_federation) ] list_ids = list() # dealing with the corresponding links between different elements for vld_connection_item in vld_connections_array: list_ids.append((vld_connection_item['member-vnf-index-ref'], vld_connection_item['vnfd-id-ref'])) for element in list_ids: for nodes in g: if g.nodes[nodes]['group'] == 1: if element[0] == g.nodes[nodes][ 'ref_node_id'] and element[1] == g.nodes[ nodes]['name']: # add a link g.add_edge(nodes, selected_node(g, vld_item['name'])) # adding placement info id_for_placement = '' for nested_ns_id in list_of_nested_ns_id: if nested_ns_id['nsd_id'] == nsd_nested_item['nested-nsd-id']: id_for_placement = nested_ns_id['ns_id'] if nsir_db.exists_nsir(id_for_placement): placement_info = nsir_db.get_placement_info(id_for_placement) # update the name of vl in common for composite for placement algorithm for used_vls in placement_info['usedVLs']: for i, mapped_vl in enumerate(used_vls['mappedVLs']): if mapped_vl in mapping: used_vls['mappedVLs'][i] = mapping[mapped_vl] if placement_info: g = add_placement(g, placement_info) # d = json_graph.node_link_data(g) # node-link format to serialize # print(d) # return d return g
def instantiate_ns_process(nsId, body, nestedInfo=None): """ Function description Parameters ---------- nsId: string Identifier of the service body: struct Object having the deployment flavour and the instantiation level. nsdIdc: string Identifier of the nested service to be instantiated, only available when composing Returns ------- name: type return description """ log_queue.put([ "INFO", "*****Time measure for nsId: %s: SOEc SOEc instantiating a NS" % (nsId) ]) log_queue.put([ "INFO", "SOEc instantiate_ns_process with nsId %s, body %s" % (nsId, body) ]) # get the nsdId that corresponds to nsId nsdId = ns_db.get_nsdId(nsId) if nestedInfo: nsdId = next(iter(nestedInfo)) # first get the ns and vnfs descriptors nsd_json = nsd_db.get_nsd_json(nsdId, None) vnfds_json = {} # for each vnf in the NSD, get its json descriptor vnfdIds = nsd_json["nsd"]["vnfdId"] vnfds_json = {} for vnfdId in vnfdIds: log_queue.put(["DEBUG", vnfdId]) vnfds_json[vnfdId] = vnfd_db.get_vnfd_json(vnfdId, None) # request RO log_queue.put([ "INFO", "*****Time measure for nsId: %s: SOEc SOEc retreiving descriptors for a NS" % (nsId) ]) rooe.instantiate_ns(nsId, nsd_json, vnfds_json, body, nestedInfo) log_queue.put([ "INFO", "*****Time measure for nsId: %s: SOEc SOEc-ROE updated DBs instantiating a NS" % (nsId) ]) sap_info = ns_db.get_ns_sap_info(nsId) if (len(sap_info) > 0): log_queue.put(["INFO", "sapInfo: %s" % (sap_info)]) monitoring.configure_ns_monitoring(nsId, nsd_json, vnfds_json, sap_info) log_queue.put([ "INFO", "*****Time measure for nsId: %s: SOEc SOEc finished configuring monitoring/dashboard" % (nsId) ]) log_queue.put([ "INFO", "instantiate_ns monitoring exporters created for nsId %s" % (nsId) ]) # initiate alerts alert_configure.configure_ns_alerts(nsId, nsdId, nsd_json, vnfds_json, sap_info) log_queue.put([ "INFO", "*****Time measure for nsId: %s: SOEc SOEc finished configuring Threshold-based alerts" % (nsId) ]) # initiate aiml work for "scaling" problem alert_configure.configure_ns_aiml_scale_work(nsId, nsdId, nsd_json, vnfds_json, sap_info) log_queue.put([ "INFO", "*****Time measure for nsId: %s: SOEc SOEc finished configuring AIML alerts" % (nsId) ]) log_queue.put([ "INFO", "*****Time measure for nsId: %s: SOEc SOEc finished instantiating a NS" % (nsId) ]) notification_db.create_notification_record({ "nsId": nsId, "type": "fa-send", "text": nsId + " INSTANTIATED", "time": datetime.now().strftime("%d/%m/%Y %H:%M:%S.%f") })
def scale_ns_process(nsId, body, nestedInfo=None): """ Performs the scaling of the service identified by "nsId" according to the info at body Parameters ---------- nsId: string Identifier of the Network Service Instance. body: request body including scaling operation Returns ------- """ log_queue.put([ "INFO", "*****Time measure for nsId: %s: SOEc SOEc scaling a nested/regular NS" % nsId ]) log_queue.put( ["INFO", "scale_ns_process with nsId %s, body %s" % (nsId, body)]) # get the nsdId that corresponds to nsId if nestedInfo: nsdId = next(iter(nestedInfo)) current_df = nestedInfo[nsdId][0] current_il = nestedInfo[nsdId][1] else: nsdId = ns_db.get_nsdId(nsId) # get current instantiation level current_df = ns_db.get_ns_df(nsId) current_il = ns_db.get_ns_il(nsId) # first get the ns and vnfs descriptors nsd_json = nsd_db.get_nsd_json(nsdId, None) # for each vnf in the NSD, get its json descriptor vnfdIds = nsd_json["nsd"]["vnfdId"] vnfds_json = {} for vnfdId in vnfdIds: vnfds_json[vnfdId] = vnfd_db.get_vnfd_json(vnfdId, None) #request RO sap_info_pre_scaling = ns_db.get_ns_sap_info(nsId) log_queue.put([ "INFO", "*****Time measure for nsId: %s: SOEc SOEc-ROE prepared info for scaling" % (nsId) ]) rooe.scale_ns(nsId, nsd_json, vnfds_json, body, current_df, current_il, nestedInfo) log_queue.put([ "INFO", "*****Time measure for nsId: %s: SOEc SOEc-ROE updated DBs scaling a NS" % (nsId) ]) # checks the result of scaling, maybe it has not be done due to lack of resources operationId = operation_db.get_operationId(nsId, "INSTANTIATION") if ((operation_db.get_operation_status(operationId) == "SUCCESSFULLY_DONE") and ns_db.get_ns_status(nsId) == "INSTANTIATED"): # maybe we have to update the monitoring jobs: we assume that new performance monitoring jobs # will be similar to one already present sap_info = ns_db.get_ns_sap_info(nsId) log_queue.put(["INFO", "new sapInfo after scaling: %s" % (sap_info)]) monitoring.update_ns_monitoring(nsId, nsd_json, vnfds_json, sap_info) log_queue.put([ "INFO", "*****Time measure for nsId: %s: SOEc SOEc updated monitoring info" % nsId ]) log_queue.put([ "DEBUG", "monitoring exporters updated after scaling for nsId %s" % (nsId) ]) # update alerts: it is not needed # however, in the case of aiml_scaling it is needed, to restart the spark job else: if ns_db.get_ns_status(nsId) == "INSTANTIATED": log_queue.put( ["DEBUG", "Scaling operation failed due to lack of resources"]) elif ns_db.get_ns_status(nsId) == "FAILED": log_queue.put( ["DEBUG", "Scaling operation failed at the MANO platform"]) aiml_scaling_info = ns_db.get_aiml_info(nsId, "scaling") if (aiml_scaling_info and (ns_db.get_ns_status(nsId) == "INSTANTIATED")): log_queue.put( ["DEBUG", "The AIML platform is triggering the scaling operation"]) alert_configure.update_ns_aiml_scale_work(nsId, aiml_scaling_info) log_queue.put([ "INFO", "*****Time measure for nsId: %s: SOEc SOEc updated AIML alert job" % nsId ]) log_queue.put(["INFO", "scale_ns_process finished for nsId %s" % (nsId)]) log_queue.put([ "INFO", "*****Time measure for nsId: %s: SOEc SOEc finished scaling a nested/regular NS" % (nsId) ]) notification_db.create_notification_record({ "nsId": nsId, "type": "fa-gears", "text": nsId + " SCALED", "time": datetime.now().strftime("%d/%m/%Y %H:%M:%S.%f") })