Exemple #1
0
def get_ns_sap_info(nsi_id, nsd_saps):
    sap_list = []
    nsi_sap_info = ns_db.get_ns_sap_info(nsi_id)
    for current_sap in nsd_saps:
        if current_sap["cpdId"] in nsi_sap_info:
            sap_address = "test for future"
            user_access_info = []
            for address in nsi_sap_info[current_sap["cpdId"]]:
                for key in address.keys():
                    #user_info_dict = {'address':'','sapdId': '', 'vnfdId':''}
                    user_info_dict = {}
                    user_info_dict['address'] = address[key]
                    user_info_dict['sapdId'] = current_sap["cpdId"]
                    user_info_dict['vnfdId'] = key
                    user_access_info.append(user_info_dict)
                new_sap = {
                    "sapInstanceId": "0",
                    "sapdId": current_sap["cpdId"],
                    "sapName": current_sap["cpdId"],
                    "description": current_sap["description"],
                    "address": sap_address,
                    "userAccessInfo": user_access_info
                }
            sap_list.append(new_sap)
    return sap_list
Exemple #2
0
def get_ns_sap_info(nsi_id, nsd_saps):
    log_queue.put(
        ["DEBUG",
         "get_ns_sap_info nsi_id:%s nsd_sap:%s" % (nsi_id, nsd_saps)])
    sap_list = []
    nsi_sap_info = ns_db.get_ns_sap_info(nsi_id)
    if nsi_sap_info is None:
        return None
    log_queue.put([
        "DEBUG",
        "get_ns_sap_info nsi_id:%s nsi_sap:%s" % (nsi_id, nsi_sap_info)
    ])
    for current_sap in nsd_saps:
        if current_sap["cpdId"] in nsi_sap_info:
            sap_address = nsi_sap_info[current_sap["cpdId"]]
            user_access_info = {
                "sapdId": current_sap["cpdId"],
                "address": sap_address
            }

            new_sap = {
                "sapInstanceId": "0",
                "sapdId": current_sap["cpdId"],
                "sapName": current_sap["cpdId"],
                "description": current_sap["description"],
                "address": sap_address,
                "userAccessInfo": [user_access_info]
            }
            sap_list.append(new_sap)
    log_queue.put([
        "DEBUG",
        "get_ns_sap_info output nsi_id:%s nsi_sap:%s" % (nsi_id, sap_list)
    ])
    return sap_list
Exemple #3
0
def instantiate_ns_process(nsId, body, nestedInfo=None):
    """
    Function description
    Parameters
    ----------
    nsId: string
        Identifier of the service
    body: struct
        Object having the deployment flavour and the instantiation level.
    nsdIdc: string
        Identifier of the nested service to be instantiated, only available when composing
    Returns
    -------
    name: type
        return description
    """
    log_queue.put(["INFO", "*****Time measure: SOEc SOEc instantiating a NS"])
    log_queue.put([
        "INFO",
        "SOEc instantiate_ns_process with nsId %s, body %s" % (nsId, body)
    ])
    # get the nsdId that corresponds to nsId
    nsdId = ns_db.get_nsdId(nsId)
    if nestedInfo:
        nsdId = next(iter(nestedInfo))
    # first get the ns and vnfs descriptors
    nsd_json = nsd_db.get_nsd_json(nsdId, None)
    vnfds_json = {}
    # for each vnf in the NSD, get its json descriptor
    vnfdIds = nsd_json["nsd"]["vnfdId"]
    vnfds_json = {}
    for vnfdId in vnfdIds:
        log_queue.put(["DEBUG", vnfdId])
        vnfds_json[vnfdId] = vnfd_db.get_vnfd_json(vnfdId, None)
    # request RO
    rooe.instantiate_ns(nsId, nsd_json, vnfds_json, body, nestedInfo)
    log_queue.put(
        ["INFO", "*****Time measure: SOEc updated databases instantiating"])
    sap_info = ns_db.get_ns_sap_info(nsId)
    if (len(sap_info) > 0):
        log_queue.put(["INFO", "sapInfo: %s" % (sap_info)])
        monitoring.configure_ns_monitoring(nsId, nsd_json, vnfds_json,
                                           sap_info)
        log_queue.put([
            "INFO",
            "instantiate_ns monitoring exporters created for nsId %s" % (nsId)
        ])
        # initiate alerts
        alert_configure.configure_ns_alerts(nsId, nsdId, nsd_json, vnfds_json,
                                            sap_info)
        log_queue.put([
            "INFO",
            "*****Time measure: SOEc created monitoring exporters and alerts"
        ])
    log_queue.put([
        "INFO",
        "*****Time measure: SOEc instantiate_ns_process finished for nsId %s" %
        (nsId)
    ])
Exemple #4
0
def scale_ns_process(nsId, body):
    """
    Performs the scaling of the service identified by "nsId" according to the info at body 
    Parameters
    ----------
    nsId: string
        Identifier of the Network Service Instance.
    body: request body including scaling operation
    Returns
    -------
    """
    log_queue.put(
        ["INFO",
         "scale_ns_process with nsId %s, body %s" % (nsId, body)])
    # get the nsdId that corresponds to nsId
    nsdId = ns_db.get_nsdId(nsId)
    # get current instantiation level
    current_df = ns_db.get_ns_df(nsId)
    current_il = ns_db.get_ns_il(nsId)
    # first get the ns and vnfs descriptors
    nsd_json = nsd_db.get_nsd_json(nsdId, None)
    # for each vnf in the NSD, get its json descriptor
    vnfdIds = nsd_json["nsd"]["vnfdId"]
    vnfds_json = {}
    for vnfdId in vnfdIds:
        vnfds_json[vnfdId] = vnfd_db.get_vnfd_json(vnfdId, None)
    #request RO
    sap_info_pre_scaling = ns_db.get_ns_sap_info(nsId)
    rooe.scale_ns(nsId, nsd_json, vnfds_json, body, current_df, current_il)
    # maybe we have to update the monitoring jobs: we assume that new performance monitoring jobs
    # will be similar to one already present
    sap_info = ns_db.get_ns_sap_info(nsId)
    log_queue.put(["INFO", "new sapInfo after scaling: %s" % (sap_info)])
    monitoring.update_ns_monitoring(nsId, nsd_json, vnfds_json, sap_info)
    log_queue.put([
        "DEBUG",
        "monitoring exporters updated after scaling for nsId %s" % (nsId)
    ])
    # update alerts: it is not needed
    log_queue.put(["INFO", "scale_ns_process finished for nsId %s" % (nsId)])
Exemple #5
0
def instantiate_ns_process(nsId, body, nestedInfo=None):
    """
    Function description
    Parameters
    ----------
    nsId: string
        Identifier of the service
    body: struct
        Object having the deployment flavour and the instantiation level.
    nsdIdc: string
        Identifier of the nested service to be instantiated, only available when composing
    Returns
    -------
    name: type
        return description
    """
    log_queue.put([
        "INFO",
        "*****Time measure for nsId: %s: SOEc SOEc instantiating a NS" % (nsId)
    ])
    log_queue.put([
        "INFO",
        "SOEc instantiate_ns_process with nsId %s, body %s" % (nsId, body)
    ])
    # get the nsdId that corresponds to nsId
    nsdId = ns_db.get_nsdId(nsId)
    if nestedInfo:
        nsdId = next(iter(nestedInfo))
    # first get the ns and vnfs descriptors
    nsd_json = nsd_db.get_nsd_json(nsdId, None)
    vnfds_json = {}
    # for each vnf in the NSD, get its json descriptor
    vnfdIds = nsd_json["nsd"]["vnfdId"]
    vnfds_json = {}
    for vnfdId in vnfdIds:
        log_queue.put(["DEBUG", vnfdId])
        vnfds_json[vnfdId] = vnfd_db.get_vnfd_json(vnfdId, None)
    # request RO
    log_queue.put([
        "INFO",
        "*****Time measure for nsId: %s: SOEc SOEc retreiving descriptors for a NS"
        % (nsId)
    ])
    rooe.instantiate_ns(nsId, nsd_json, vnfds_json, body, nestedInfo)
    log_queue.put([
        "INFO",
        "*****Time measure for nsId: %s: SOEc SOEc-ROE updated DBs instantiating a NS"
        % (nsId)
    ])
    sap_info = ns_db.get_ns_sap_info(nsId)
    if (len(sap_info) > 0):
        log_queue.put(["INFO", "sapInfo: %s" % (sap_info)])
        monitoring.configure_ns_monitoring(nsId, nsd_json, vnfds_json,
                                           sap_info)
        log_queue.put([
            "INFO",
            "*****Time measure for nsId: %s: SOEc SOEc finished configuring monitoring/dashboard"
            % (nsId)
        ])
        log_queue.put([
            "INFO",
            "instantiate_ns monitoring exporters created for nsId %s" % (nsId)
        ])
        # initiate alerts
        alert_configure.configure_ns_alerts(nsId, nsdId, nsd_json, vnfds_json,
                                            sap_info)
        log_queue.put([
            "INFO",
            "*****Time measure for nsId: %s: SOEc SOEc finished configuring Threshold-based alerts"
            % (nsId)
        ])
        # initiate aiml work for "scaling" problem
        alert_configure.configure_ns_aiml_scale_work(nsId, nsdId, nsd_json,
                                                     vnfds_json, sap_info)
        log_queue.put([
            "INFO",
            "*****Time measure for nsId: %s: SOEc SOEc finished configuring AIML alerts"
            % (nsId)
        ])
    log_queue.put([
        "INFO",
        "*****Time measure for nsId: %s: SOEc SOEc finished instantiating a NS"
        % (nsId)
    ])
    notification_db.create_notification_record({
        "nsId":
        nsId,
        "type":
        "fa-send",
        "text":
        nsId + " INSTANTIATED",
        "time":
        datetime.now().strftime("%d/%m/%Y %H:%M:%S.%f")
    })
Exemple #6
0
def scale_ns_process(nsId, body, nestedInfo=None):
    """
    Performs the scaling of the service identified by "nsId" according to the info at body 
    Parameters
    ----------
    nsId: string
        Identifier of the Network Service Instance.
    body: request body including scaling operation
    Returns
    -------
    """
    log_queue.put([
        "INFO",
        "*****Time measure for nsId: %s: SOEc SOEc scaling a nested/regular NS"
        % nsId
    ])
    log_queue.put(
        ["INFO",
         "scale_ns_process with nsId %s, body %s" % (nsId, body)])
    # get the nsdId that corresponds to nsId
    if nestedInfo:
        nsdId = next(iter(nestedInfo))
        current_df = nestedInfo[nsdId][0]
        current_il = nestedInfo[nsdId][1]
    else:
        nsdId = ns_db.get_nsdId(nsId)
        # get current instantiation level
        current_df = ns_db.get_ns_df(nsId)
        current_il = ns_db.get_ns_il(nsId)
    # first get the ns and vnfs descriptors
    nsd_json = nsd_db.get_nsd_json(nsdId, None)
    # for each vnf in the NSD, get its json descriptor
    vnfdIds = nsd_json["nsd"]["vnfdId"]
    vnfds_json = {}
    for vnfdId in vnfdIds:
        vnfds_json[vnfdId] = vnfd_db.get_vnfd_json(vnfdId, None)
    #request RO
    sap_info_pre_scaling = ns_db.get_ns_sap_info(nsId)
    log_queue.put([
        "INFO",
        "*****Time measure for nsId: %s: SOEc SOEc-ROE prepared info for scaling"
        % (nsId)
    ])
    rooe.scale_ns(nsId, nsd_json, vnfds_json, body, current_df, current_il,
                  nestedInfo)
    log_queue.put([
        "INFO",
        "*****Time measure for nsId: %s: SOEc SOEc-ROE updated DBs scaling a NS"
        % (nsId)
    ])
    # checks the result of scaling, maybe it has not be done due to lack of resources
    operationId = operation_db.get_operationId(nsId, "INSTANTIATION")
    if ((operation_db.get_operation_status(operationId) == "SUCCESSFULLY_DONE")
            and ns_db.get_ns_status(nsId) == "INSTANTIATED"):
        # maybe we have to update the monitoring jobs: we assume that new performance monitoring jobs
        # will be similar to one already present
        sap_info = ns_db.get_ns_sap_info(nsId)
        log_queue.put(["INFO", "new sapInfo after scaling: %s" % (sap_info)])
        monitoring.update_ns_monitoring(nsId, nsd_json, vnfds_json, sap_info)
        log_queue.put([
            "INFO",
            "*****Time measure for nsId: %s: SOEc SOEc updated monitoring info"
            % nsId
        ])
        log_queue.put([
            "DEBUG",
            "monitoring exporters updated after scaling for nsId %s" % (nsId)
        ])
        # update alerts: it is not needed
    # however, in the case of aiml_scaling it is needed, to restart the spark job
    else:
        if ns_db.get_ns_status(nsId) == "INSTANTIATED":
            log_queue.put(
                ["DEBUG", "Scaling operation failed due to lack of resources"])
        elif ns_db.get_ns_status(nsId) == "FAILED":
            log_queue.put(
                ["DEBUG", "Scaling operation failed at the MANO platform"])
    aiml_scaling_info = ns_db.get_aiml_info(nsId, "scaling")
    if (aiml_scaling_info and (ns_db.get_ns_status(nsId) == "INSTANTIATED")):
        log_queue.put(
            ["DEBUG", "The AIML platform is triggering the scaling operation"])
        alert_configure.update_ns_aiml_scale_work(nsId, aiml_scaling_info)
        log_queue.put([
            "INFO",
            "*****Time measure for nsId: %s: SOEc SOEc updated AIML alert job"
            % nsId
        ])
    log_queue.put(["INFO", "scale_ns_process finished for nsId %s" % (nsId)])
    log_queue.put([
        "INFO",
        "*****Time measure for nsId: %s: SOEc SOEc finished scaling a nested/regular NS"
        % (nsId)
    ])
    notification_db.create_notification_record({
        "nsId":
        nsId,
        "type":
        "fa-gears",
        "text":
        nsId + " SCALED",
        "time":
        datetime.now().strftime("%d/%m/%Y %H:%M:%S.%f")
    })