def delete(self, uuid): """ Delete a specific wim. used by: `katana wim rm [uuid]` """ wim = mongoUtils.get("wim", uuid) if wim: if wim["slices"]: return "Cannot delete wim {} - In use".format(uuid), 400 mongoUtils.delete("wim_obj", uuid) mongoUtils.delete("wim", uuid) try: monitoring_url = wim["monitoring-url"] except KeyError: pass else: try: with open("/targets/wim_targets.json", mode="r") as prom_file: prom = json.load(prom_file) prom.remove({"targets": [monitoring_url], "labels": {"wim_id": wim["id"]}}) with open("/targets/wim_targets.json", mode="w") as prom_file: json.dump(prom, prom_file) except ValueError: pass return "Deleted WIM {}".format(uuid), 200 else: # if uuid is not found, return error return "Error: No such wim: {}".format(uuid), 404
def put(self, uuid): """ Add or update a new supported network function. The request must provide the service details. used by: `katana function update -f [yaml file]` """ data = request.json data['_id'] = uuid old_data = mongoUtils.get("func", uuid) if old_data: data["created_at"] = old_data["created_at"] data["tenants"] = [] if len(data["tenants"]) > 0: return f"Error: Func is used by slices {data['tenants']}" mongoUtils.update("func", uuid, data) return f"Modified {uuid}", 200 else: new_uuid = uuid data = request.json data['_id'] = new_uuid data['created_at'] = time.time() # unix epoch data["tenants"] = [] for field in self.req_fields: try: _ = data[field] except KeyError: return f"Error: Required fields: {self.req_fields}", 400 try: new_uuid = mongoUtils.add('func', data) except pymongo.errors.DuplicateKeyError: return f"Function with id {data['id']} already exists", 400 return f"Created {new_uuid}", 201
def delete(self, uuid): """ Delete a specific slice. used by: `katana slice rm [uuid]` """ # Check if slice uuid exists delete_json = mongoUtils.get("slice", uuid) try: force = request.args["force"] except KeyError: force = None else: force = force if force == "true" else None if not delete_json: return "Error: No such slice: {}".format(uuid), 404 else: # Send the message to katana-mngr producer = kafkaUtils.create_producer() slice_message = { "action": "delete", "message": uuid, "force": force } producer.send("slice", value=slice_message) return "Deleting {0}".format(uuid), 200
def put(self, uuid): """ Update a registered platform location used by: `katana location update [uuid] -f [file]` """ for ifield in self.req_fields: if not request.json.get(ifield, None): return f"Field {ifield} is missing" else: # Lowercase the location request.json["id"] = request.json["id"].lower() data = request.json data["_id"] = uuid old_data = mongoUtils.get("location", uuid) if old_data: if old_data["vims"] or old_data["functions"]: return ( f"Location {data['_id']} is in use by another component, cannot update it", 400, ) data["created_at"] = old_data["created_at"] data["vims"] = [] data["functions"] = [] mongoUtils.update("location", uuid, data) return f"Modified location {data['id']}", 200 else: data["created_at"] = time.time() # unix epoch data["vims"] = [] data["functions"] = [] new_uuid = mongoUtils.add("location", request.json) return new_uuid, 201
def show_time(self, uuid): """ Returns deployment time of a slice """ islice = mongoUtils.get("slice", uuid) if islice: return dumps(islice["deployment_time"]), 200 else: return "Not Found", 404
def neat(self, slice_id): """ Send the slice parameters to the neat UE Policy System """ slice_parameters = mongoUtils.get("slice", slice_id) if slice_parameters: return slice_parameters, 200 else: return f"Slice with id {slice_id} was not found", 404
def get(self, uuid): """ Returns the details of specific policy management system, used by: `katana policy inspect [uuid]` """ data = mongoUtils.get("policy", uuid) if data: return dumps(data), 200 else: return "Not Found", 404
def get(self, uuid): """ Returns the details of specific EMS, used by: `katana ems inspect [uuid]` """ data = (mongoUtils.get("ems", uuid)) if data: return dumps(data), 200 else: return "Not Found", 404
def get(self, uuid): """ Returns the details of a specific platform location used by: `katana location inspect [uuid]` """ data = mongoUtils.get("location", uuid) if data: return dumps(data), 200 else: return f"Location {uuid} not found", 404
def get(self, uuid): """ Returns the details of specific Slice Descriptor, used by: `katana slice_des inspect [uuid]` """ data = (mongoUtils.get("base_slice_des_ref", uuid)) if data: return dumps(data), 200 else: return "Not Found", 404
def get(self, uuid): """ Returns the details of specific slice, used by: `katana slice inspect [uuid]` """ data = mongoUtils.get("slice", uuid) if data: return dumps(data), 200 else: return "Not Found", 404
def put(self, uuid): """ Update the details of a specific wim. used by: `katana wim update -f [yaml file] [uuid]` """ data = request.json data["_id"] = uuid old_data = mongoUtils.get("wim", uuid) if old_data: data["created_at"] = old_data["created_at"] data["slices"] = old_data["slices"] try: for entry in self.req_fields: if data[entry] != old_data[entry]: return "Cannot update field: " + entry, 400 except KeyError: return f"Error: Required fields: {self.req_fields}", 400 else: mongoUtils.update("wim", uuid, data) return f"Modified {uuid}", 200 else: new_uuid = uuid data = request.json data["_id"] = new_uuid data["created_at"] = time.time() # unix epoch try: wim_id = request.json["id"] if request.json["type"] == "odl-wim": wim = odl_wimUtils.Wim(request.json["url"]) elif request.json["type"] == "test-wim": wim = test_wimUtils.Wim(request.json["url"]) else: return "Error: Not supported WIM type", 400 except KeyError: return f"Error: Required fields: {self.req_fields}", 400 thebytes = pickle.dumps(wim) obj_json = {"_id": new_uuid, "id": data["id"], "obj": Binary(thebytes)} data["slices"] = {} try: new_uuid = mongoUtils.add("wim", data) except pymongo.errors.DuplicateKeyError: return f"WIM with id {wim_id} already exists", 400 try: monitoring_url = request.json["monitoring-url"] except KeyError: pass else: with open("/targets/wim_targets.json", mode="r") as prom_file: prom = json.load(prom_file) prom.append({"targets": [monitoring_url], "labels": {"wim_id": wim_id}}) with open("/targets/wim_targets.json", mode="w") as prom_file: json.dump(prom, prom_file) mongoUtils.add("wim_obj", obj_json) return f"Created {new_uuid}", 201
def put(self, uuid): """ Update the details of a specific policy engine system. used by: `katana policy update [uuid] -f [yaml file]` """ data = request.json data["_id"] = uuid old_data = mongoUtils.get("policy", uuid) if old_data: data["created_at"] = old_data["created_at"] try: for entry in self.req_fields: if data[entry] != old_data[entry]: return "Cannot update field: " + entry, 400 except KeyError: return f"Error: Required fields: {self.req_fields}", 400 else: mongoUtils.update("policy", uuid, data) return f"Modified {uuid}", 200 else: # Create the object and store it in the object collection try: if request.json["type"] == "test-policy": policy = test_policyUtils.Policy(id=request.json["id"], url=request.json["url"]) elif request.json["type"] == "neat": policy = neatUtils.Policy(id=request.json["id"], url=request.json["url"]) else: return "Error: Not supported Policy system type", 400 except KeyError: return f"Error: Required fields: {self.req_fields}", 400 new_uuid = str(uuid.uuid4()) request.json["_id"] = new_uuid request.json["created_at"] = time.time() # unix epoch try: new_uuid = mongoUtils.add("policy", request.json) except pymongo.errors.DuplicateKeyError: return ( "Policy management system with id {0} already exists". format(request.json["id"]), 400, ) # Store the policy object to the mongo db thebytes = pickle.dumps(policy) obj_json = { "_id": new_uuid, "id": request.json["id"], "obj": Binary(thebytes) } mongoUtils.add("policy_obj", obj_json) return f"Created {new_uuid}", 201
def vim_update(): """ Gets the resources of the stored VIMs """ for vim in mongoUtils.find_all("vim"): if vim["type"] == "openstack": vim_obj = pickle.loads(mongoUtils.get("vim_obj", vim["_id"])["obj"]) resources = vim_obj.get_resources() vim["resources"] = resources mongoUtils.update("vim", vim["_id"], vim) else: resources = "N/A"
def delete(self, uuid): """ Delete a specific network function. used by: `katana function rm [uuid]` """ result = mongoUtils.get("func", uuid) if result: if len(result["tenants"]) > 0: return f"Error: Function is used by slices {result['tenants']}" mongoUtils.delete("func", uuid) return "Deleted Network Function {}".format(uuid), 200 else: # if uuid is not found, return error return "Error: No such Network Function: {}".format(uuid), 404
def put(self, uuid): """ Update the details of a specific EMS. used by: `katana ems update -f [yaml file] [uuid]` """ data = request.json data["_id"] = uuid old_data = mongoUtils.get("ems", uuid) if old_data: data["created_at"] = old_data["created_at"] try: for entry in self.req_fields: if data[entry] != old_data[entry]: return "Cannot update field: " + entry, 400 except KeyError: return f"Error: Required fields: {self.req_fields}", 400 else: mongoUtils.update("ems", uuid, data) return f"Modified {uuid}", 200 else: new_uuid = uuid data = request.json data["_id"] = new_uuid data["created_at"] = time.time() # unix epoch new_uuid = str(uuid.uuid4()) # Create the object and store it in the object collection try: ems_id = request.json["id"] if request.json["type"] == "amarisoft-ems": ems = amar_emsUtils.Ems(request.json["url"]) elif request.json["type"] == "test-ems": ems = test_emsUtils.Ems(request.json["url"]) else: return "Error: Not supported EMS type", 400 except KeyError: return f"Error: Required fields: {self.req_fields}", 400 thebytes = pickle.dumps(ems) obj_json = { "_id": new_uuid, "id": data["id"], "obj": Binary(thebytes) } try: new_uuid = mongoUtils.add("ems", data), 201 except pymongo.errors.DuplicateKeyError: return f"EMS with id {ems_id} already exists", 400 mongoUtils.add("ems_obj", obj_json) return f"Created {new_uuid}", 201
def put(self, uuid): """ Update the details of a specific wim. used by: `katana wim update -f [yaml file] [uuid]` """ data = request.json data['_id'] = uuid old_data = mongoUtils.get("wim", uuid) if old_data: data["created_at"] = old_data["created_at"] data["slices"] = old_data["slices"] try: for entry in self.req_fields: if data[entry] != old_data[entry]: return "Cannot update field: " + entry, 400 except KeyError: return f"Error: Required fields: {self.req_fields}", 400 else: mongoUtils.update("wim", uuid, data) return f"Modified {uuid}", 200 else: new_uuid = uuid data = request.json data['_id'] = new_uuid data['created_at'] = time.time() # unix epoch try: wim_id = request.json["id"] if request.json["type"] == "odl-wim": wim = odl_wimUtils.Wim(request.json['url']) elif request.json["type"] == "test-wim": wim = test_wimUtils.Wim(request.json['url']) else: return "Error: Not supported WIM type", 400 except KeyError: return f"Error: Required fields: {self.req_fields}", 400 thebytes = pickle.dumps(wim) obj_json = { "_id": new_uuid, "id": data["id"], "obj": Binary(thebytes) } data['slices'] = {} try: new_uuid = mongoUtils.add('wim', data) except pymongo.errors.DuplicateKeyError: return f"WIM with id {wim_id} already exists", 400 mongoUtils.add('wim_obj', obj_json) return f"Created {new_uuid}", 201
def delete(self, uuid): """ Delete a specific wim. used by: `katana wim rm [uuid]` """ wim = mongoUtils.get("wim", uuid) if wim: if wim["slices"]: return "Cannot delete wim {} - In use".format(uuid), 400 mongoUtils.delete("wim_obj", uuid) mongoUtils.delete("wim", uuid) return "Deleted WIM {}".format(uuid), 200 else: # if uuid is not found, return error return "Error: No such wim: {}".format(uuid), 404
def delete(self, uuid): """ Delete a specific vim. used by: `katana vim rm [uuid]` """ vim = mongoUtils.get("vim", uuid) if vim: if vim["tenants"]: return "Cannot delete vim {} - In use".format(uuid), 400 mongoUtils.delete("vim_obj", uuid) mongoUtils.delete("vim", uuid) return "Deleted VIM {}".format(uuid), 200 else: # if uuid is not found, return error return "Error: No such vim: {}".format(uuid), 404
def delete(self, uuid): """ Delete a specific nfvo. used by: `katana nfvo rm [uuid]` """ del_nfvo = mongoUtils.get("nfvo", uuid) if del_nfvo: if del_nfvo["tenants"]: return "Cannot delete nfvo {} - In use".format(uuid), 400 mongoUtils.delete("nfvo_obj", uuid) mongoUtils.delete_all("nsd", {"nfvo_id": del_nfvo["id"]}) mongoUtils.delete_all("vnfd", {"nfvoid": del_nfvo["id"]}) mongoUtils.delete("nfvo", uuid) return "Deleted NFVO {}".format(uuid), 200 else: # if uuid is not found, return error return "Error: No such nfvo: {}".format(uuid), 404
def delete(self, uuid): """ Delete a specific slice. used by: `katana slice rm [uuid]` """ # Check if slice uuid exists delete_json = mongoUtils.get("slice", uuid) if not delete_json: return "Error: No such slice: {}".format(uuid), 404 else: # Send the message to katana-mngr producer = kafkaUtils.create_producer() slice_message = {"action": "delete", "message": delete_json} producer.send("slice", value=slice_message) return "Deleting {0}".format(uuid), 200
def delete(self, uuid): """ Delete a registered platform location used by: `katana location rm [uuid] """ del_location = mongoUtils.get("location", uuid) if del_location["id"] == "core": return "You cannot delete core location", 400 if del_location: if del_location["vims"] or del_location["functions"]: return ( f"Location {uuid} is in use by another component, cannot update it", 400, ) del_location = mongoUtils.delete("location", uuid) return f"Deleted location {uuid}", 200 else: return f"Error: No such location {uuid}", 404
def put(self, uuid): """ Add or update a new base slice descriptor. The request must provide the service details. used by: `katana slice_des update -f [file]` """ data = request.json data["_id"] = uuid old_data = mongoUtils.get("base_slice_des_ref", uuid) if old_data: mongoUtils.update("base_slice_des_ref", uuid, data) return f"Modified {uuid}", 200 else: new_uuid = uuid data = request.json data["_id"] = new_uuid return str(mongoUtils.add("base_slice_des_ref", data)), 201
def delete(self, uuid): """ Delete a specific vim. used by: `katana vim rm [uuid]` """ vim = mongoUtils.get("vim", uuid) if vim: if vim["tenants"]: return "Cannot delete vim {} - In use".format(uuid), 400 mongoUtils.delete("vim_obj", uuid) mongoUtils.delete("vim", uuid) # Update the location removing the VIM location = mongoUtils.find("location", {"id": vim["location"].lower()}) if location: location["vims"].remove(vim["id"]) mongoUtils.update("location", location["_id"], location) return "Deleted VIM {}".format(uuid), 200 else: # if uuid is not found, return error return "Error: No such vim: {}".format(uuid), 404
def nest_mapping(req): """ Function that maps nest to the underlying network functions """ # Store the gst in DB mongoUtils.add("gst", req) nest = {"_id": req["_id"]} # Recreate the nest req # Check if the base_slice_des_ref or the required fields are set base_slice_des_ref = req["base_slice_descriptor"].get("base_slice_des_ref", None) if not base_slice_des_ref: for req_key in REQ_FIELDS: if req_key not in req["base_slice_descriptor"]: logger.error("Required field base_slice_descriptor.{} is missing".format(req_key)) return ( "Error: Required field base_slice_descriptor.{} is missing".format(req_key), 400, ) for field in NEST_FIELDS: req[field] = req.get(field, None) # ****** STEP 1: Slice Descriptor ****** if not req["base_slice_descriptor"]: logger.error("No Base Slice Descriptor given - Exit") return "NEST Error: No Base Slice Descriptor given", 400 req_slice_des = req["base_slice_descriptor"] # *** Recreate the NEST *** for req_key in SLICE_DES_OBJ: req_slice_des[req_key] = req_slice_des.get(req_key, None) for req_key in SLICE_DES_LIST: req_slice_des[req_key] = req_slice_des.get(req_key, []) # *** Check if there are references for slice *** if req_slice_des["base_slice_des_ref"]: ref_slice = mongoUtils.find( "base_slice_des_ref", {"base_slice_des_id": req_slice_des["base_slice_des_ref"]} ) if ref_slice: for key, value in req_slice_des.items(): try: if value is None or value == []: req_slice_des[key] = ref_slice[key] except KeyError: continue else: logger.error( "slice_descriptor {} not found".format(req_slice_des["base_slice_des_ref"]) ) return "Error: referenced slice_descriptor not found", 400 # Create the shared value nest["shared"] = { "isolation": req_slice_des["isolation"], "simultaneous_nsi": req_slice_des["simultaneous_nsi"], } # Check that the location in coverage field is registered not_supp_loc = [] for location_id in req_slice_des["coverage"]: if not mongoUtils.find("location", {"id": location_id.lower()}): not_supp_loc.append(location_id) logger.warning(f"Location {location_id} is not registered") for location_id in not_supp_loc: req_slice_des["coverage"].remove(location_id) # *************************** Start the mapping *************************** # Currently supports: # 1) If delay_tolerance --> EMBB else --> URLLC # If EMBB --> EPC Placement=@Core. If URLLC --> EPC Placement=@Edge # 2) If network throughput > 100 Mbps --> Type=5G # ************************************************************************* functions_list = [] if req_slice_des["network_DL_throughput"]["guaranteed"] > 100000: gen = 5 else: gen = 4 # *** Calculate the type of the slice (sst) *** if req_slice_des["delay_tolerance"]: # EMBB nest["sst"] = 1 # Find the registered function for Core Function epc = mongoUtils.find("func", calc_find_data(gen, "core", 0)) if not epc: return "Error: Not available Core Network Functions", 400 # Check if the nest allows shareable functions, if the function is shareable if ( req_slice_des["isolation"] != 1 and req_slice_des["isolation"] != 3 and epc["shared"]["availability"] ): found_list_key = None max_len = epc["shared"].get("max_shared", 0) for grouped_nest_key, grouped_nest_list in epc["shared"]["sharing_list"].items(): if len(grouped_nest_list) < max_len or not max_len: found_list_key = grouped_nest_key grouped_nest_list.append(nest["_id"]) sharing_list = mongoUtils.get("sharing_lists", found_list_key) sharing_list["nest_list"].append(nest["_id"]) mongoUtils.update("sharing_lists", found_list_key, sharing_list) break if not found_list_key: found_list_key = str(uuid.uuid4()) epc["shared"]["sharing_list"][found_list_key] = [nest["_id"]] data = { "_id": found_list_key, "nest_list": [nest["_id"]], "ns_list": {}, } mongoUtils.add("sharing_lists", data) nest["shared"]["core"] = {epc["_id"]: found_list_key} connections = [] not_supp_loc = [] for location in req_slice_des["coverage"]: enb = mongoUtils.find("func", calc_find_data(gen, location.lower(), 1)) if not enb: not_supp_loc.append(location) else: # Check if the nest allows shareable functions, if the function is shareable if req_slice_des["isolation"] < 2 and enb["shared"]["availability"]: found_list_key = None max_len = enb["shared"].get("max_shared", 0) for grouped_nest_key, grouped_nest_list in enb["shared"][ "sharing_list" ].items(): if len(grouped_nest_list) < max_len or not max_len: found_list_key = grouped_nest_key grouped_nest_list.append(nest["_id"]) sharing_list = mongoUtils.get("sharing_lists", found_list_key) sharing_list["nest_list"].append(nest["_id"]) mongoUtils.update("sharing_lists", found_list_key, sharing_list) break if not found_list_key: found_list_key = str(uuid.uuid4()) enb["shared"]["sharing_list"][found_list_key] = [nest["_id"]] data = { "_id": found_list_key, "nest_list": [nest["_id"]], "ns_list": {}, } mongoUtils.add("sharing_lists", data) nest["shared"]["radio"] = nest["shared"].get("radio", {}) nest["shared"]["radio"][enb["_id"]] = found_list_key connections.append({"core": epc, "radio": enb}) enb["tenants"].append(nest["_id"]) mongoUtils.update("func", enb["_id"], enb) functions_list.append(enb["_id"]) if not epc or not connections: return "Error: Not available Network Functions", 400 epc["tenants"].append(nest["_id"]) mongoUtils.update("func", epc["_id"], epc) functions_list.append(epc["_id"]) for location in not_supp_loc: logger.warning(f"Location {location} not supported") req_slice_des["coverage"].remove(location) else: # URLLC nest["sst"] = 2 connections = [] not_supp_loc = [] for location in req_slice_des["coverage"]: epc = mongoUtils.find("func", calc_find_data(gen, location.lower(), 0)) enb = mongoUtils.find("func", calc_find_data(gen, location.lower(), 1)) if not epc or not enb: not_supp_loc.append(location) else: # Check if the nest allows shareable functions, if the function is shareable # For the Core function if ( req_slice_des["isolation"] != 1 and req_slice_des["isolation"] != 3 and epc["shared"]["availability"] ): found_list_key = None max_len = epc["shared"].get("max_shared", 0) for grouped_nest_key, grouped_nest_list in epc["shared"][ "sharing_list" ].items(): if len(grouped_nest_list) < max_len or not max_len: found_list_key = grouped_nest_key grouped_nest_list.append(nest["_id"]) sharing_list = mongoUtils.get("sharing_lists", found_list_key) sharing_list["nest_list"].append(nest["_id"]) mongoUtils.update("sharing_lists", found_list_key, sharing_list) break if not found_list_key: found_list_key = str(uuid.uuid4()) epc["shared"]["sharing_list"][found_list_key] = [nest["_id"]] data = { "_id": found_list_key, "nest_list": [nest["_id"]], "ns_list": {}, } mongoUtils.add("sharing_lists", data) nest["shared"]["core"] = nest["shared"].get("core", {}) nest["shared"]["core"][epc["_id"]] = found_list_key # For the RAN function if req_slice_des["isolation"] < 2 and enb["shared"]["availability"]: found_list_key = None max_len = enb["shared"].get("max_shared", 0) for grouped_nest_key, grouped_nest_list in enb["shared"][ "sharing_list" ].items(): if len(grouped_nest_list) < max_len or not max_len: found_list_key = grouped_nest_key grouped_nest_list.append(nest["_id"]) sharing_list = mongoUtils.get("sharing_lists", found_list_key) sharing_list["nest_list"].append(nest["_id"]) mongoUtils.update("sharing_lists", found_list_key, sharing_list) break if not found_list_key: found_list_key = str(uuid.uuid4()) enb["shared"]["sharing_list"][found_list_key] = [nest["_id"]] data = { "_id": found_list_key, "nest_list": [nest["_id"]], "ns_list": {}, } mongoUtils.add("sharing_lists", data) nest["shared"]["radio"] = nest["shared"].get("radio", {}) nest["shared"]["radio"][enb["_id"]] = found_list_key connections.append({"core": epc, "radio": enb}) epc["tenants"].append(nest["_id"]) enb["tenants"].append(nest["_id"]) mongoUtils.update("func", enb["_id"], enb) mongoUtils.update("func", epc["_id"], epc) functions_list.extend([epc["_id"], enb["_id"]]) if not connections: return "Error: Not available Network Functions", 400 for location in not_supp_loc: logger.warning(f"Location {location} not supported") req_slice_des["coverage"].remove(location) nest["connections"] = connections nest["functions"] = functions_list # Values to be copied to NEST KEYS_TO_BE_COPIED = ( "network_DL_throughput", "ue_DL_throughput", "network_UL_throughput", "ue_UL_throughput", "group_communication_support", "mtu", "number_of_terminals", "positional_support", "radio_spectrum", "device_velocity", "terminal_density", "coverage", ) for key in KEYS_TO_BE_COPIED: nest[key] = req_slice_des[key] # Add slice_name to NEST based on the base_slice_des id nest["slice_name"] = req_slice_des["base_slice_des_id"] # ****** STEP 2: Service Descriptor ****** if req["service_descriptor"]: req_service_des = req["service_descriptor"] # *** Recreate the NEST *** for req_key in SERVICE_DES_OBJ: req_service_des[req_key] = req_service_des.get(req_key, None) for req_key in SERVICE_DES_LIST: req_service_des[req_key] = req_service_des.get(req_key, []) # Create the NS field on Nest nest["ns_list"] = req_service_des["ns_list"] # # Replace Placement with location in each NS # for ns in nest["ns_list"]: # ns["placement"] = ( # lambda x: {"location": ["Core"]} if not x else # {"location": req_slice_des["coverage"]})(ns["placement"]) # ****** STEP 3: Test Descriptor ****** if req["test_descriptor"]: req_test_des = req["test_descriptor"] # *** Recreate the NEST *** for req_key in TEST_DES_OBJ: req_test_des[req_key] = req_test_des.get(req_key, None) for req_key in TEST_DES_LIST: req_test_des[req_key] = req_test_des.get(req_key, []) # Create the Probe field on Nest nest["probe_list"] = req_test_des["probe_list"] if ( not mongoUtils.find( "base_slice_des_ref", {"base_slice_des_id": req["base_slice_descriptor"]["base_slice_des_id"]}, ) and req["base_slice_descriptor"]["base_slice_des_id"] ): new_uuid = str(uuid.uuid4()) req["base_slice_descriptor"]["_id"] = new_uuid mongoUtils.add("base_slice_des_ref", req["base_slice_descriptor"]) return nest, 0
def delete_slice(slice_id, force=False): """ Deletes the given network slice """ # Update the slice status in mongo db slice_json = mongoUtils.get("slice", slice_id) slice_json["status"] = "Terminating" mongoUtils.update("slice", slice_json["_id"], slice_json) logger.info(f"{slice_json['_id']} Status: Terminating") # *** Step-1: Radio Slice Configuration *** if slice_json["conf_comp"]["ems"]: ems_messages = slice_json.get("ems_data", None) if ems_messages: for ems_id, ems_message in ems_messages.items(): # Find the EMS target_ems = mongoUtils.find("ems", {"id": ems_id}) if not target_ems or ems_id not in slice_json["conf_comp"]["ems"]: # Error handling: There is no such EMS logger.error("EMS {} not found - No configuration".format(ems_id)) continue target_ems_obj = pickle.loads(mongoUtils.find("ems_obj", {"id": ems_id})["obj"]) target_ems_obj.del_slice(ems_message) else: logger.info("There was not EMS configuration") # *** Step-2: WAN Slice *** wim_data = slice_json.get("wim_data", None) if wim_data: # Select WIM - Assume that there is only one registered wim_list = list(mongoUtils.index("wim")) if wim_list: target_wim = wim_list[0] target_wim_id = target_wim["id"] target_wim_obj = pickle.loads(mongoUtils.find("wim_obj", {"id": target_wim_id})["obj"]) target_wim_obj.del_slice(slice_id) try: del target_wim["slices"][slice_json["_id"]] except KeyError: logger.warning(f"Slice {slice_id} not in WIM {target_wim_id}") else: mongoUtils.update("wim", target_wim["_id"], target_wim) else: err = "Cannot find WIM - WAN Slice will not be deleted" logger.warning(err) slice_json["status"] = "Error" slice_json["error"] = slice_json.get("error", "") + err mongoUtils.update("slice", slice_json["_id"], slice_json) else: logger.info("There was no WIM configuration") # *** Step-3: Cloud *** vim_error_list = [] try: total_ns_list = slice_json["total_ns_list"] ns_inst_info = slice_json["ns_inst_info"] for ns in total_ns_list: if ns["nsd-id"] not in slice_json["conf_comp"]["nf"]: logger.error(f"{ns['nsd-id']} was not instantiated successfully") continue # Get the NFVO nfvo_id = ns["nfvo-id"] target_nfvo = mongoUtils.find("nfvo", {"id": ns["nfvo-id"]}) if not target_nfvo: logger.warning( "NFVO with id {} was not found - NSs won't terminate".format(nfvo_id) ) vim_error_list += ns["vims"] continue target_nfvo_obj = pickle.loads( mongoUtils.find("nfvo_obj", {"id": ns["nfvo-id"]})["obj"] ) # Stop the NS nfvo_inst_ns = ns_inst_info[ns["ns-id"]][ns["placement_loc"]["location"]][ "nfvo_inst_ns" ] target_nfvo_obj.deleteNs(nfvo_inst_ns) while True: if target_nfvo_obj.checkNsLife(nfvo_inst_ns): break time.sleep(5) except KeyError as e: err = f"Error, not all NSs started or terminated correctly {e}" logger.warning(err) slice_json["status"] = "Error" slice_json["error"] = slice_json.get("error", "") + err mongoUtils.update("slice", slice_json["_id"], slice_json) vim_dict = slice_json["vim_list"] for vim, vim_info in vim_dict.items(): try: # Delete the new tenants from the NFVO for nfvo, vim_account in vim_info["nfvo_vim_account"].items(): # Get the NFVO target_nfvo = mongoUtils.find("nfvo", {"id": nfvo}) target_nfvo_obj = pickle.loads(mongoUtils.find("nfvo_obj", {"id": nfvo})["obj"]) # Delete the VIM and update nfvo db target_nfvo_obj.deleteVim(vim_account) target_nfvo["tenants"][slice_json["_id"]].remove(vim_account) if len(target_nfvo["tenants"][slice_json["_id"]]) == 0: try: del target_nfvo["tenants"][slice_json["_id"]] except KeyError: logger.warning(f"Slice {slice_id} not in NFO {nfvo}") else: mongoUtils.update("nfvo", target_nfvo["_id"], target_nfvo) # Delete the tenants from every vim if vim not in vim_error_list: # Get the VIM target_vim = mongoUtils.find("vim", {"id": vim}) if not target_vim: logger.warning("VIM id {} was not found - Tenant won't be deleted".format(vim)) continue target_vim_obj = pickle.loads(mongoUtils.find("vim_obj", {"id": vim})["obj"]) target_vim_obj.delete_proj_user(target_vim["tenants"][slice_json["_id"]]) try: del target_vim["tenants"][slice_json["_id"]] except KeyError: logger.warning(f"Slice {slice_id} not in VIM {vim}") else: mongoUtils.update("vim", target_vim["_id"], target_vim) except KeyError as e: err = f"Error, not all tenants created or removed correctly {e}" logger.warning(err) slice_json["status"] = "Error" slice_json["error"] = slice_json.get("error", "") + err mongoUtils.update("slice", slice_json["_id"], slice_json) if "error" not in slice_json: mongoUtils.delete("slice", slice_json["_id"]) elif "error" in slice_json and force: mongoUtils.delete("slice", slice_json["_id"]) # Remove Slice from the tenants list on functions for func_id in slice_json["functions"]: ifunc = mongoUtils.get("func", func_id) try: ifunc["tenants"].remove(slice_json["_id"]) except (KeyError, ValueError): logger.warning(f"Slice {slice_id} not in function {func_id}") else: mongoUtils.update("func", func_id, ifunc)
def put(self, uuid): """ Update the details of a specific nfvo. used by: `katana nfvo update -f [yaml file] [uuid]` """ data = request.json data["_id"] = uuid old_data = mongoUtils.get("nfvo", uuid) if old_data: data["created_at"] = old_data["created_at"] data["tenants"] = old_data["tenants"] try: for entry in self.req_fields: if data[entry] != old_data[entry]: return "Cannot update field: " + entry, 400 except KeyError: return f"Error: Required fields: {self.req_fields}", 400 else: mongoUtils.update("nfvo", uuid, data) return f"Modified {uuid}", 200 else: new_uuid = uuid data = request.json data["_id"] = new_uuid data["created_at"] = time.time() # unix epoch data["tenants"] = {} if request.json["type"] == "OSM": # Create the NFVO object try: osm_username = request.json["nfvousername"] osm_password = request.json["nfvopassword"] osm_ip = request.json["nfvoip"] osm_project_name = request.json["tenantname"] nfvo_id = request.json["id"] except KeyError: return f"Error: Required fields: {self.req_fields}", 400 else: osm = osmUtils.Osm(nfvo_id, osm_ip, osm_username, osm_password, osm_project_name) try: osm.getToken() except ConnectTimeout as e: logger.exception("Connection Timeout: {}".format(e)) response = dumps({"error": "Unable to connect to NFVO"}) return (response, 400) except ConnectionError as e: logger.exception("Connection Error: {}".format(e)) response = dumps({"error": "Unable to connect to NFVO"}) return (response, 400) else: # Store the osm object to the mongo db thebytes = pickle.dumps(osm) obj_json = { "_id": new_uuid, "id": data["id"], "obj": Binary(thebytes) } try: new_uuid = mongoUtils.add("nfvo", data) except pymongo.errors.DuplicateKeyError: return f"NFVO with id {nfvo_id} already exists", 400 mongoUtils.add("nfvo_obj", obj_json) # Get information regarding VNFDs and NSDs osmUtils.bootstrapNfvo(osm) else: response = dumps({"error": "This type nfvo is not supported"}) return response, 400 return f"Created {new_uuid}", 201
def delete_slice(slice_id, force=False): """ Deletes the given network slice """ # Update the slice status in mongo db slice_json = mongoUtils.get("slice", slice_id) slice_json["status"] = "Terminating" mongoUtils.update("slice", slice_json["_id"], slice_json) logger.info(f"{slice_json['_id']} Status: Terminating") # Check if slice monitoring has been enabled monitoring = os.getenv("KATANA_MONITORING", None) slice_monitoring = slice_json.get("slice_monitoring", None) mon_producer = None if monitoring: # Create the Kafka producer mon_producer = create_producer() mon_producer.send( "nfv_mon", value={ "action": "katana_mon", "slice_info": { "slice_id": slice_id, "status": "terminating" }, }, ) # *** Step-1: Radio Slice Configuration *** if slice_json["conf_comp"]["ems"]: ems_messages = slice_json.get("ems_data", None) if ems_messages: for ems_id, ems_message in ems_messages.items(): # Find the EMS target_ems = mongoUtils.find("ems", {"id": ems_id}) if not target_ems or ems_id not in slice_json["conf_comp"][ "ems"]: # Error handling: There is no such EMS logger.error( "EMS {} not found - No configuration".format(ems_id)) continue target_ems_obj = pickle.loads( mongoUtils.find("ems_obj", {"id": ems_id})["obj"]) target_ems_obj.del_slice(ems_message) else: logger.info("There was not EMS configuration") # *** Step-2: WAN Slice *** wim_data = slice_json.get("wim_data", None) if wim_data: # Select WIM - Assume that there is only one registered wim_list = list(mongoUtils.index("wim")) if wim_list: target_wim = wim_list[0] target_wim_id = target_wim["id"] target_wim_obj = pickle.loads( mongoUtils.find("wim_obj", {"id": target_wim_id})["obj"]) target_wim_obj.del_slice(slice_id) try: del target_wim["slices"][slice_json["_id"]] except KeyError: logger.warning(f"Slice {slice_id} not in WIM {target_wim_id}") else: mongoUtils.update("wim", target_wim["_id"], target_wim) else: err = "Cannot find WIM - WAN Slice will not be deleted" logger.warning(err) slice_json["status"] = "Error" if monitoring: mon_producer.send( "nfv_mon", value={ "action": "katana_mon", "slice_info": { "slice_id": slice_id, "status": "error" }, }, ) slice_json["error"] = slice_json.get("error", "") + err mongoUtils.update("slice", slice_json["_id"], slice_json) else: logger.info("There was no WIM configuration") # *** Step-3: Cloud *** vim_error_list = [] try: total_ns_list = slice_json["total_ns_list"] ns_inst_info = slice_json["ns_inst_info"] for ns in total_ns_list: # Check if the NS is shared. If it is, check if there is another running slice on this # sharing list if ns["shared_function"]: # Find the shared list shared_list = mongoUtils.get("sharing_lists", ns["shared_slice_key"]) # If there is another running slice, don't terminate the NS if len(shared_list["nest_list"]) > 1: continue if ns["nsd-id"] not in slice_json["conf_comp"]["nf"]: logger.error( f"{ns['nsd-id']} was not instantiated successfully") continue # Get the NFVO nfvo_id = ns["nfvo-id"] target_nfvo = mongoUtils.find("nfvo", {"id": ns["nfvo-id"]}) if not target_nfvo: logger.warning( "NFVO with id {} was not found - NSs won't terminate". format(nfvo_id)) vim_error_list += ns["vims"] continue target_nfvo_obj = pickle.loads( mongoUtils.find("nfvo_obj", {"id": ns["nfvo-id"]})["obj"]) # Stop the NS nfvo_inst_ns = ns_inst_info[ns["ns-id"]][ ns["placement_loc"]["location"]]["nfvo_inst_ns"] target_nfvo_obj.deleteNs(nfvo_inst_ns) while True: if target_nfvo_obj.checkNsLife(nfvo_inst_ns): break time.sleep(5) except KeyError as e: err = f"Error, not all NSs started or terminated correctly {e}" logger.warning(err) slice_json["status"] = "Error" if monitoring: mon_producer.send( "nfv_mon", value={ "action": "katana_mon", "slice_info": { "slice_id": slice_id, "status": "error" }, }, ) slice_json["error"] = slice_json.get("error", "") + err mongoUtils.update("slice", slice_json["_id"], slice_json) vim_dict = slice_json.get("vim_list", {}) for vim, vim_info in vim_dict.items(): # Check if the VIM is shared with other slices. If yes, skip the deletion tenant_name = slice_id if vim_info["shared"]: shared_list = mongoUtils.get("sharing_lists", vim_info["shared_slice_list_key"]) tenant_name = vim_info["shared_slice_list_key"] vim_id = vim[:-2] # If there is another running slice, don't delete the VIM account if len(shared_list["nest_list"]) > 1: continue else: vim_id = vim try: # Delete the new tenants from the NFVO for nfvo, vim_account in vim_info["nfvo_vim_account"].items(): # Get the NFVO target_nfvo = mongoUtils.find("nfvo", {"id": nfvo}) target_nfvo_obj = pickle.loads( mongoUtils.find("nfvo_obj", {"id": nfvo})["obj"]) # Delete the VIM and update nfvo db target_nfvo_obj.deleteVim(vim_account) target_nfvo["tenants"][tenant_name].remove(vim_account) if len(target_nfvo["tenants"][tenant_name]) == 0: try: del target_nfvo["tenants"][tenant_name] except KeyError: logger.warning( f"Slice {tenant_name} not in NFO {nfvo}") else: mongoUtils.update("nfvo", target_nfvo["_id"], target_nfvo) # Delete the tenants from every vim if vim not in vim_error_list: # Get the VIM target_vim = mongoUtils.find("vim", {"id": vim_id}) if not target_vim: logger.warning( "VIM id {} was not found - Tenant won't be deleted". format(vim)) continue target_vim_obj = pickle.loads( mongoUtils.find("vim_obj", {"id": vim_id})["obj"]) if vim_info["shared"]: tenant_name = vim_info["shared_slice_list_key"] else: tenant_name = slice_json["_id"] target_vim_obj.delete_proj_user( target_vim["tenants"][tenant_name]) try: del target_vim["tenants"][tenant_name] except KeyError: logger.warning(f"Slice {slice_id} not in VIM {vim}") else: mongoUtils.update("vim", target_vim["_id"], target_vim) except KeyError as e: err = f"Error, not all tenants created or removed correctly {e}" logger.warning(err) slice_json["status"] = "Error" if monitoring: mon_producer.send( "nfv_mon", value={ "action": "katana_mon", "slice_info": { "slice_id": slice_id, "status": "error" }, }, ) slice_json["error"] = slice_json.get("error", "") + err mongoUtils.update("slice", slice_json["_id"], slice_json) if "error" not in slice_json: mongoUtils.delete("slice", slice_json["_id"]) if monitoring: mon_producer.send( "nfv_mon", value={ "action": "katana_mon", "slice_info": { "slice_id": slice_id, "status": "deleted" }, }, ) elif "error" in slice_json and force: mongoUtils.delete("slice", slice_json["_id"]) if monitoring: mon_producer.send( "nfv_mon", value={ "action": "katana_mon", "slice_info": { "slice_id": slice_id, "status": "deleted" }, }, ) # Remove Slice from the tenants list on functions for func_id in slice_json["functions"]: ifunc = mongoUtils.get("func", func_id) try: ifunc["tenants"].remove(slice_json["_id"]) except (KeyError, ValueError): logger.warning(f"Slice {slice_id} not in function {func_id}") else: mongoUtils.update("func", func_id, ifunc) # Remove Slice dashboard if monitoring and slice_monitoring: # Use the Grafana API in order to delete the new dashboard for the new slice grafana_url = f"http://katana-grafana:3000/api/dashboards/uid/{slice_id}" headers = { "accept": "application/json", "content-type": "application/json" } grafana_user = os.getenv("GF_SECURITY_ADMIN_USER", "admin") grafana_passwd = os.getenv("GF_SECURITY_ADMIN_PASSWORD", "admin") r = requests.delete( url=grafana_url, headers=headers, auth=(grafana_user, grafana_passwd), ) logger.info(f"Deleted Grafana dashboard for slice {slice_id}") # Stop the threads monitoring NS status of the slice ns_inst_info = slice_json["ns_inst_info"] mon_producer.send(topic="nfv_mon", value={ "action": "delete", "ns_list": ns_inst_info }) # Check if the NSI has shared NSSIs and remove them try: for func_key, shared_list_key in slice_json["shared"]["core"].items(): # Remove the slice from the shared list try: shared_list = mongoUtils.get("sharing_lists", shared_list_key) func = mongoUtils.get("func", func_key) if len(shared_list["nest_list"]) > 1: # Remove the Slice from the list shared_list["nest_list"].remove(slice_id) mongoUtils.update("sharing_lists", shared_list_key, shared_list) func["shared"]["sharing_list"][shared_list_key].remove( slice_id) else: # Remove the the shared list mongoUtils.delete("sharing_lists", shared_list_key) del func["shared"]["sharing_list"][shared_list_key] mongoUtils.update("func", func_key, func) except ValueError as e: pass except KeyError as e: pass try: for func_key, shared_list_key in slice_json["shared"]["radio"].items(): # Remove the slice from the shared list try: shared_list = mongoUtils.get("sharing_lists", shared_list_key) func = mongoUtils.get("func", func_key) if len(shared_list["nest_list"]) > 1: # Remove the Slice from the list shared_list["nest_list"].remove(slice_id) mongoUtils.update("sharing_lists", shared_list_key, shared_list) func["shared"]["sharing_list"][shared_list_key].remove( slice_id) else: # Remove the the shared list mongoUtils.delete("sharing_lists", shared_list_key) del func["shared"]["sharing_list"][shared_list_key] mongoUtils.update("func", func_key, func) except ValueError as e: pass except KeyError as e: pass
def add_slice(nest_req): """ Creates the network slice """ # Recreate the NEST with None options where missiong nest = { "_id": nest_req["_id"], "status": "Init", "created_at": time.time(), # unix epoch "deployment_time": { "Placement_Time": None, "Provisioning_Time": None, "WAN_Deployment_Time": None, "NS_Deployment_Time": None, "Radio_Configuration_Time": None, "Slice_Deployment_Time": None, }, } mongoUtils.add("slice", nest) for nest_key in NEST_KEYS_OBJ: nest[nest_key] = nest_req.get(nest_key, None) for nest_key in NEST_KEYS_LIST: nest[nest_key] = nest_req.get(nest_key, []) # Check if slice monitoring has been enabled monitoring = os.getenv("KATANA_MONITORING", None) wim_monitoring = {} mon_producer = None if monitoring: # Create the Kafka producer mon_producer = create_producer() nest["slice_monitoring"] = {} # **** STEP-1: Placement **** nest["status"] = "Placement" if monitoring: mon_producer.send( "nfv_mon", value={ "action": "katana_mon", "slice_info": { "slice_id": nest["_id"], "status": "placement" }, }, ) nest["conf_comp"] = {"nf": [], "ems": []} mongoUtils.update("slice", nest["_id"], nest) logger.info(f"{nest['_id']} Status: Placement") placement_start_time = time.time() # Initiate the lists vim_dict = {} total_ns_list = [] ems_messages = {} # Get Details for the Network Services # i) The NS part of the core slice inst_functions = {} for connection in nest["connections"]: for key in connection: # Check if the function has been instantiated from another connection if connection[key]["_id"] in inst_functions: connection[key] = inst_functions[connection[key]["_id"]] continue # Check if the function is shared with another slice # shared_check values: 0: No shared, 1: First shared, 2: Shared shared_check = 0 shared_slice_list_key = None try: shared_slice_list_key = nest["shared"][key][connection[key] ["_id"]] shared_slice_list = connection[key]["shared"]["sharing_list"][ shared_slice_list_key] if len(shared_slice_list) > 1: shared_check = 2 else: shared_check = 1 except KeyError: pass try: err, pop_list = ns_details( connection[key]["ns_list"], connection[key]["location"], vim_dict, total_ns_list, shared_check, shared_slice_list_key, ) if pop_list: connection[key]["ns_list"] = [ x for x in connection[key]["ns_list"] if x not in pop_list ] if err: nest["status"] = f"Failed - {err}" nest["ns_inst_info"] = {} nest["total_ns_list"] = [] mongoUtils.update("slice", nest["_id"], nest) return inst_functions[connection[key]["_id"]] = connection[key] except KeyError: continue # ii) The extra NS of the slice for location in nest["coverage"]: err, _ = ns_details(nest["ns_list"], location, vim_dict, total_ns_list) if err: nest["status"] = f"Failed - {err}" nest["ns_inst_info"] = {} nest["total_ns_list"] = [] mongoUtils.update("slice", nest["_id"], nest) return nest["vim_list"] = vim_dict nest["total_ns_list"] = total_ns_list nest["deployment_time"]["Placement_Time"] = format( time.time() - placement_start_time, ".4f") # **** STEP-2: Resource Provisioning **** nest["status"] = "Provisioning" if monitoring: mon_producer.send( "nfv_mon", value={ "action": "katana_mon", "slice_info": { "slice_id": nest["_id"], "status": "provisioning" }, }, ) mongoUtils.update("slice", nest["_id"], nest) logger.info(f"{nest['_id']} Status: Provisioning") prov_start_time = time.time() # *** STEP-2a: Cloud *** # *** STEP-2a-i: Create the new tenant/project on the VIM *** for num, (vim, vim_info) in enumerate(vim_dict.items()): if vim_info["shared"]: vim_id = vim[:-2] else: vim_id = vim target_vim = mongoUtils.find("vim", {"id": vim_id}) target_vim_obj = pickle.loads( mongoUtils.find("vim_obj", {"id": vim_id})["obj"]) # Define project parameters if vim_info["shared"] == 1: name = "vim_{0}_katana_{1}_shared".format( num, vim_info["shared_slice_list_key"]) tenant_name = vim_info["shared_slice_list_key"] elif vim_info["shared"] == 0: name = "vim_{0}_katana_{1}".format(num, nest["_id"]) tenant_name = nest["_id"] else: # Find the shared list sharing_lists = mongoUtils.get("sharing_lists", vim_info["shared_slice_list_key"]) vim_dict[vim] = sharing_lists["vims"][target_vim["id"]] mongoUtils.update("slice", nest["_id"], nest) continue tenant_project_name = name tenant_project_description = name tenant_project_user = name tenant_project_password = "******" # If the vim is Openstack type, set quotas quotas = (vim_info["resources"] if target_vim["type"] == "openstack" or target_vim["type"] == "Openstack" else None) ids = target_vim_obj.create_slice_prerequisites( tenant_project_name, tenant_project_description, tenant_project_user, tenant_project_password, nest["_id"], quotas=quotas, ) # Register the tenant to the mongo db target_vim["tenants"][tenant_name] = name mongoUtils.update("vim", target_vim["_id"], target_vim) # STEP-2a-ii: Αdd the new VIM tenant to NFVO if target_vim["type"] == "openstack": # Update the config parameter for the tenant config_param = dict(security_groups=ids["secGroupName"]) elif target_vim["type"] == "opennebula": config_param = target_vim["config"] else: config_param = {} for nfvo_id in vim_info["nfvo_list"]: target_nfvo = mongoUtils.find("nfvo", {"id": nfvo_id}) target_nfvo_obj = pickle.loads( mongoUtils.find("nfvo_obj", {"id": nfvo_id})["obj"]) vim_id = target_nfvo_obj.addVim( tenant_project_name, target_vim["password"], target_vim["type"], target_vim["auth_url"], target_vim["username"], config_param, ) vim_info["nfvo_vim_account"] = vim_info.get("nfvo_vim_account", {}) vim_info["nfvo_vim_account"][nfvo_id] = vim_id # Register the tenant to the mongo db target_nfvo["tenants"][tenant_name] = target_nfvo["tenants"].get( nest["_id"], []) target_nfvo["tenants"][tenant_name].append(vim_id) mongoUtils.update("nfvo", target_nfvo["_id"], target_nfvo) if vim_info["shared"] == 1: sharing_lists = mongoUtils.get("sharing_lists", vim_info["shared_slice_list_key"]) sharing_lists["vims"] = sharing_lists.get("vims", {}) sharing_lists["vims"][target_vim["id"]] = vim_info mongoUtils.update("sharing_lists", vim_info["shared_slice_list_key"], sharing_lists) mongoUtils.update("slice", nest["_id"], nest) # *** STEP-2b: WAN *** if mongoUtils.count("wim") <= 0: logger.warning("There is no registered WIM") else: wan_start_time = time.time() # Crate the data for the WIM wim_data = { "_id": nest["_id"], "core_connections": [], "extra_ns": [], "slice_sla": {} } # i) Create the slice_sla data for the WIM wim_data["slice_sla"] = { "network_DL_throughput": nest["network_DL_throughput"], "network_UL_throughput": nest["network_UL_throughput"], "mtu": nest["mtu"], } # ii) Add the connections for connection in nest["connections"]: data = {} for key in connection: key_data = {} try: ns_l = connection[key]["ns_list"] except KeyError: pass else: key_data["ns"] = [] for ns in ns_l: if ns["placement_loc"] not in key_data["ns"]: key_data["ns"].append(ns["placement_loc"]) try: pnf_l = connection[key]["pnf_list"] except KeyError: pass else: key_data["pnf"] = pnf_l if key_data: data[key] = key_data if data: wim_data["core_connections"].append(data) # iii) Add the extra Network Services for ns in nest["ns_list"]: if ns["placement_loc"] not in wim_data["extra_ns"]: wim_data["extra_ns"].append(ns["placement_loc"]) # iV) Add the probes wim_data["probes"] = nest["probe_list"] # Select WIM - Assume that there is only one registered wim_list = list(mongoUtils.index("wim")) target_wim = wim_list[0] target_wim_id = target_wim["id"] target_wim_obj = pickle.loads( mongoUtils.find("wim_obj", {"id": target_wim_id})["obj"]) target_wim_obj.create_slice(wim_data) nest["wim_data"] = wim_data target_wim["slices"][nest["_id"]] = nest["_id"] mongoUtils.update("slice", nest["_id"], nest) mongoUtils.update("wim", target_wim["_id"], target_wim) # Add monitoring from WIM in nest try: wim_monitoring = target_wim["monitoring-url"] nest["slice_monitoring"]["WIM"] = wim_monitoring except KeyError: pass nest["deployment_time"]["WAN_Deployment_Time"] = format( time.time() - wan_start_time, ".4f") nest["deployment_time"]["Provisioning_Time"] = format( time.time() - prov_start_time, ".4f") # **** STEP-3: Slice Activation Phase**** nest["status"] = "Activation" if monitoring: mon_producer.send( "nfv_mon", value={ "action": "katana_mon", "slice_info": { "slice_id": nest["_id"], "status": "activation" }, }, ) mongoUtils.update("slice", nest["_id"], nest) logger.info(f"{nest['_id']} Status: Activation") # *** STEP-3a: Cloud *** # Instantiate NS # Store info about instantiated NSs ns_inst_info = {} nest["deployment_time"]["NS_Deployment_Time"] = {} for ns in total_ns_list: ns["start_time"] = time.time() if ns["shared_function"] == 2: # The ns is already instantiated and there is no need to instantiate again # Find the sharing list shared_list = mongoUtils.get("sharing_lists", ns["shared_slice_key"]) ns_inst_info[ns["ns-id"]] = shared_list["ns_list"][ns["nsd-id"]] nest["conf_comp"]["nf"].append(ns["nsd-id"]) continue ns_inst_info[ns["ns-id"]] = {} target_nfvo = mongoUtils.find("nfvo", {"id": ns["nfvo-id"]}) target_nfvo_obj = pickle.loads( mongoUtils.find("nfvo_obj", {"id": ns["nfvo-id"]})["obj"]) selected_vim = ns["placement_loc"]["vim"] nfvo_vim_account = vim_dict[selected_vim]["nfvo_vim_account"][ ns["nfvo-id"]] nfvo_inst_ns = target_nfvo_obj.instantiateNs(ns["ns-name"], ns["nsd-id"], nfvo_vim_account) ns_inst_info[ns["ns-id"]][ns["placement_loc"]["location"]] = { "nfvo_inst_ns": nfvo_inst_ns, "nfvo-id": ns["nfvo-id"], "ns-name": ns["ns-name"], "slice_id": nest["_id"], "vim": selected_vim, } # Check if this the first slice of a sharing list if ns["shared_function"] == 1: shared_list = mongoUtils.get("sharing_lists", ns["shared_slice_key"]) ns_inst_info[ns["ns-id"]][ns["placement_loc"] ["location"]]["shared"] = True ns_inst_info[ns["ns-id"]][ns["placement_loc"]["location"]][ "sharing_list"] = ns["shared_slice_key"] shared_list["ns_list"][ns["nsd-id"]] = ns_inst_info[ns["ns-id"]] mongoUtils.update("sharing_lists", ns["shared_slice_key"], shared_list) nest["conf_comp"]["nf"].append(ns["nsd-id"]) time.sleep(4) time.sleep(2) # Get the nsr for each service and wait for the activation for ns in total_ns_list: target_nfvo = mongoUtils.find("nfvo", {"id": ns["nfvo-id"]}) target_nfvo_obj = pickle.loads( mongoUtils.find("nfvo_obj", {"id": ns["nfvo-id"]})["obj"]) site = ns["placement_loc"] nfvo_inst_ns_id = ns_inst_info[ns["ns-id"]][ site["location"]]["nfvo_inst_ns"] insr = target_nfvo_obj.getNsr(nfvo_inst_ns_id) while insr["operational-status"] != "running" or insr[ "config-status"] != "configured": if insr["operational-status"] == "failed": error_message = ( f"Network Service {ns['nsd-id']} failed to start on NFVO {ns['nfvo-id']}." ) logger.error(error_message) nest["ns_inst_info"] = ns_inst_info nest["status"] = f"Failed - {error_message}" mongoUtils.update("slice", nest["_id"], nest) return time.sleep(10) insr = target_nfvo_obj.getNsr(nfvo_inst_ns_id) nest["deployment_time"]["NS_Deployment_Time"][ns["ns-name"]] = format( time.time() - ns["start_time"], ".4f") # Get the IPs of the instantiated NS vnf_list = [] vnfr_id_list = target_nfvo_obj.getVnfrId(insr) for ivnfr_id in vnfr_id_list: vnfr = target_nfvo_obj.getVnfr(ivnfr_id) vnf_list.append(target_nfvo_obj.getIPs(vnfr)) ns_inst_info[ns["ns-id"]][site["location"]]["vnfr"] = vnf_list nest["ns_inst_info"] = ns_inst_info mongoUtils.update("slice", nest["_id"], nest) # If monitoring parameter is set, send the ns_list to nfv_mon module if monitoring and mon_producer: mon_producer.send(topic="nfv_mon", value={ "action": "create", "ns_list": ns_inst_info }) nest["slice_monitoring"]["nfv_ns_status_monitoring"] = True # *** STEP-3b: Radio Slice Configuration *** if mongoUtils.count("ems") <= 0: logger.warning("There is no registered EMS") else: # Add the management IPs for the NS sent ems in ems_messages: ems_radio_data = { "ue_DL_throughput": nest["ue_DL_throughput"], "ue_UL_throughput": nest["ue_UL_throughput"], "group_communication_support": nest["group_communication_support"], "number_of_terminals": nest["number_of_terminals"], "positional_support": nest["positional_support"], "radio_spectrum": nest["radio_spectrum"], "device_velocity": nest["device_velocity"], "terminal_density": nest["terminal_density"], } radio_start_time = time.time() iii = 0 for connection in nest["connections"]: iii += 1 data = {} ems_id_list = [] for key in connection: # Check if the connection is shared try: shared_slice_list_key = nest["shared"][key][connection[key] ["_id"]] shared_slice_list = connection[key]["shared"][ "sharing_list"][shared_slice_list_key] shared = True if len(shared_slice_list) > 1: shared_check = 2 else: shared_check = 1 except KeyError: shared_slice_list_key = None shared = False key_data = {} try: ems_id = connection[key]["ems-id"] except KeyError: continue else: if ems_id not in ems_id_list: ems_id_list.append(ems_id) try: ns_l = connection[key]["ns_list"] except KeyError: pass else: key_data["ns"] = [] for ns in ns_l: try: ns_info = ns_inst_info[ns["ns-id"]][ connection[key]["location"]] except KeyError: ns_info = ns_inst_info[ns["ns-id"]]["Core"] ns_data = { "name": ns["ns-name"], "location": ns["placement_loc"]["location"], "vnf_list": ns_info["vnfr"], } # Add the shared information for the ns, if any if shared: ns_data["shared"] = ns_inst_info[ns["ns-id"]][ connection[key]["location"]]["shared"] ns_data["sharing_list"] = ns_inst_info[ ns["ns-id"]][connection[key] ["location"]]["sharing_list"] else: ns_data["shared"] = False key_data["ns"].append(ns_data) try: key_data["pnf"] = connection[key]["pnf_list"] except KeyError: pass else: if shared: for ipnf in connection[key]["pnf_list"]: ipnf["shared"] = True ipnf["sharing_list"] = shared_slice_list_key if key_data: data[key] = key_data if data: data["slice_sla"] = ems_radio_data data["slice_id"] = nest["_id"] for ems_id in ems_id_list: messages = ems_messages.get(ems_id, []) messages.append(data) ems_messages[ems_id] = messages for ems_id, ems_message in ems_messages.items(): # Find the EMS target_ems = mongoUtils.find("ems", {"id": ems_id}) if not target_ems: # Error handling: There is no such EMS logger.error( "EMS {} not found - No configuration".format(ems_id)) continue target_ems_obj = pickle.loads( mongoUtils.find("ems_obj", {"id": ems_id})["obj"]) # Send the message for imessage in ems_message: target_ems_obj.conf_radio(imessage) nest["conf_comp"]["ems"].append(ems_id) nest["ems_data"] = ems_messages nest["deployment_time"]["Radio_Configuration_Time"] = format( time.time() - radio_start_time, ".4f") # *** STEP-4: Finalize *** # Create Grafana Dashboard for monitoring # Create the NS status panel if monitoring: # Open the Grafana Dashboard template monitoring_slice_id = "slice_" + nest["_id"].replace("-", "_") with open("/katana-grafana/templates/new_dashboard.json", mode="r") as dashboard_file: new_dashboard = json.load(dashboard_file) new_dashboard["dashboard"]["title"] = monitoring_slice_id new_dashboard["dashboard"]["uid"] = nest["_id"] # Add the dashboard panels # Add the NS Status panels expr = "ns_status" + '{slice_id="' + nest["_id"] + '"}' targets = [{ "expr": expr, "legendFormat": "", "interval": "", "format": "table", "instant": True }] infra_targets = {} for ns in ns_inst_info.values(): for key, value in ns.items(): # Check if the VIM supports infrastructure monitoring search_vim_id = value["vim"] if value.get("shared", False): search_vim_id = search_vim_id[:-2] selected_vim = mongoUtils.find("vim", {"id": search_vim_id}) try: vim_monitoring = selected_vim["type"] vim_monitoring_list = infra_targets.get(vim_monitoring, []) for ivnf in value["vnfr"]: vim_monitoring_list += ivnf["vm_list"] infra_targets[vim_monitoring] = vim_monitoring_list except KeyError: pass # Create the VM Monitoring panels PANELS = [ "vm_state", "vm_cpu_cpu_time", "vm_cpu_overall_cpu_usage", "vm_memory_actual", "vm_memory_available", "vm_memory_usage", "vm_disk_read_bytes", "vm_disk_write_bytes", "vm_disk_errors", ] with open("/katana-grafana/templates/new_vm_monitoring_panel.json", mode="r") as panel_file: vm_panel_template = json.load(panel_file) for i, panel in enumerate(PANELS): vm_panel = copy.deepcopy(vm_panel_template) vm_panel["title"] = panel vm_panel["gridPos"] = {"h": 8, "w": 12, "x": 13, "y": i * 9} vm_panel["id"] = 10 + i vm_targets = [] for vim_type, vm_list in infra_targets.items(): for vm in vm_list: expr = (vim_type + "_" + panel + '{project=~".*' + nest["_id"] + '",vm_name="' + vm + '"}') vm_targets.append({ "expr": expr, "interval": "", "legendFormat": "" }) vm_panel["targets"] = vm_targets new_dashboard["dashboard"]["panels"].append(vm_panel) # Read and fill the NS Status panel template with open("/katana-grafana/templates/new_ns_status_panel.json", mode="r") as panel_file: ns_panel = json.load(panel_file) ns_panel["targets"] = targets new_dashboard["dashboard"]["panels"].append(ns_panel) # Add the WIM Monitoring panel if wim_monitoring: # Read and fill the panel template with open("/katana-grafana/templates/new_wim_panel.json", mode="r") as panel_file: wim_panel = json.load(panel_file) wim_panel["targets"].append({ "expr": f"rate({monitoring_slice_id}_flows[1m])", "interval": "", "legendFormat": "", "refId": "A", }) new_dashboard["dashboard"]["panels"].append(wim_panel) mon_producer.send( "nfv_mon", value={ "action": "katana_mon", "slice_info": { "slice_id": nest["_id"], "status": "running" }, }, ) # Use the Grafana API in order to create the new dashboard for the new slice grafana_url = "http://katana-grafana:3000/api/dashboards/db" headers = { "accept": "application/json", "content-type": "application/json" } grafana_user = os.getenv("GF_SECURITY_ADMIN_USER", "admin") grafana_passwd = os.getenv("GF_SECURITY_ADMIN_PASSWORD", "admin") r = requests.post( url=grafana_url, headers=headers, auth=(grafana_user, grafana_passwd), data=json.dumps(new_dashboard), ) logger.info(f"Created new Grafana dashboard for slice {nest['_id']}") logger.info(f"{nest['_id']} Status: Running") nest["status"] = "Running" nest["deployment_time"]["Slice_Deployment_Time"] = format( time.time() - nest["created_at"], ".4f") mongoUtils.update("slice", nest["_id"], nest)
def get(self, uuid): """ Returns the details of specific function, used by: `katana function inspect [uuid]` """ return dumps(mongoUtils.get("func", uuid)), 200