def initialize(self): """ .. seealso:: :func:`AbstractAPI.initialize() <escape.util.api.AbstractAPI.initialize>` """ log.debug("Initializing Resource Orchestration Sublayer...") self.resource_orchestrator = ResourceOrchestrator(self) if self._nffg_file: try: service_request = self._read_data_from_file(self._nffg_file) service_request = NFFG.parse(service_request) self.__proceed_instantiation(nffg=service_request) except (ValueError, IOError, TypeError) as e: log.error("Can't load service request from file because of: " + str(e)) else: log.info("Graph representation is loaded successfully!") # Initiate ROS REST-API if needed if self._agent or self._rosapi: self._initiate_ros_api() # Initiate Cf-Or REST-API if needed if self._cfor: self._initiate_cfor_api() log.info("Resource Orchestration Sublayer has been initialized!") if self._agent: log.warning( "In AGENT mode Service Layer is not going to be initialized!")
def __init_from_file (self, path, format=DEFAULT_NFFG_FORMAT): """ Build a pre-defined topology from an NFFG stored in a file. The file path is searched in CONFIG with tha name ``TOPO``. :param path: file path :type path: str :param format: NF-FG storing format (default: internal NFFG representation) :type format: str :return: None """ if path is None: log.error("Missing file path of Topology description") return try: with open(path, 'r') as f: log.info("Load topology from file: %s" % path) if format == self.DEFAULT_NFFG_FORMAT: log.info("Using file format: %s" % format) self.__init_from_NFFG(nffg=NFFG.parse(f.read())) else: raise TopologyBuilderException("Unsupported file format: %s!" % format) except IOError: log.warning("Additional topology file not found: %s" % path) raise TopologyBuilderException("Missing topology file!") except ValueError as e: log.error("An error occurred when load topology from file: %s" % e.message) raise TopologyBuilderException("File parsing error!")
def main(substrate, loops=False, vnf_sharing=0.0, seed=0, multiple_scs=False, use_saps_once=False, max_sc_count=2, chain_maxlen=8, max_cpu=4, max_mem=1600, max_storage=3, max_bw=7, max_e2e_lat_multiplier=20, min_e2e_lat_multiplier=1.1): nf_types = [] request = None rnd.seed(seed) with open(substrate, "r") as f: substrate_nffg = NFFG.parse(f.read()) for infra in substrate_nffg.infras: nf_types.extend(infra.supported) nf_types = list(set(nf_types)) all_saps_ending = [s.id for s in substrate_nffg.saps] all_saps_beginning = [s.id for s in substrate_nffg.saps] bare_substrate_nffg = copy.deepcopy(substrate_nffg) for n in substrate_nffg.nfs: bare_substrate_nffg.del_node(n) path_calc_graph = nx.MultiDiGraph() for l in bare_substrate_nffg.links: path_calc_graph.add_edge(l.src.node.id, l.dst.node.id, l.id, delay=l.delay) avg_shp_len = nx.average_shortest_path_length(path_calc_graph, weight='delay') request = generateRequestForCarrierTopo( all_saps_ending, all_saps_beginning, avg_shp_len, nf_types, loops=loops, use_saps_once=use_saps_once, vnf_sharing_probabilty=vnf_sharing, multiSC=multiple_scs, max_sc_count=max_sc_count, chain_maxlen=chain_maxlen, max_cpu=max_cpu, max_mem=max_mem, max_storage=max_storage, max_bw=max_bw, max_e2e_lat_multiplier=max_e2e_lat_multiplier, min_e2e_lat_multiplier=min_e2e_lat_multiplier) return request
def initialize(self): """ .. seealso:: :func:`AbstractAPI.initialize() <escape.util.api.AbstractAPI.initialize>` """ log.debug("Initializing Service Layer...") self.__sid = CONFIG.get_service_layer_id() if self.__sid is not None: log.debug("Setup ID for Service Layer: %s" % self.__sid) else: self.__sid = self.LAYER_ID log.error( "Missing ID of Service Layer from config. Using default value: %s" % self.__sid) # Set element manager self.elementManager = ClickManager() # Init central object of Service layer self.service_orchestrator = ServiceOrchestrator(self) # Read input from file if it's given and initiate SG if self._sg_file: try: stats.init_request_measurement(request_id=self._sg_file) service_request = self._read_data_from_file(self._sg_file) log.info("Graph representation is loaded successfully!") if service_request.startswith('{'): log.debug( "Detected format: JSON - Parsing from NFFG format...") nffg = NFFG.parse(raw_data=service_request) elif service_request.startswith('<'): log.debug( "Detected format: XML - Parsing from Virtualizer format..." ) converter = NFFGConverter(domain="INTERNAL", logger=log) nffg = converter.parse_from_Virtualizer( vdata=service_request) else: log.warning("Detected unexpected format...") return if nffg.mode is not None: log.info('Detected mapping mode in NFFG: %s' % nffg.mode) else: nffg.mode = NFFG.MODE_ADD log.info("No mapping mode has been detected in NFFG! " "Set default mode: %s" % nffg.mode) log.info("Schedule service request delayed by %d seconds..." % SCHEDULED_SERVICE_REQUEST_DELAY) stats.set_request_id(request_id=nffg.id) self.api_sas_sg_request_delayed(service_nffg=nffg) except (ValueError, IOError, TypeError) as e: log.error("Can't load service request from file because of: " + str(e)) quit_with_error(msg=str(e), logger=log) else: # Init REST-API if no input file is given self._initiate_rest_api() # Init GUI if self._gui: self._initiate_gui() log.info("Service Layer has been initialized!")
def __init__(self, mgr, global_res=None): """ Init. :param mgr: global domain resource manager :type mgr: :any:`GlobalResourceManager` :param global_res: initial global resource (optional) :type global_res: :any:`NFFG` :return: None """ super(DomainVirtualizer, self).__init__(type=self.DOMAIN_VIRTUALIZER) log.debug( "Init DomainVirtualizer with name: %s - initial resource: %s" % (DoV, global_res)) # Garbage-collector safe self._mgr = weakref.proxy(mgr) # Define DoV az an empty NFFG by default self.__global_nffg = NFFG(id=DoV, name=DoV + "-uninitialized") if global_res is not None: self.set_domain_as_global_view(domain=NFFG.DEFAULT_DOMAIN, nffg=global_res)
def _onlySAPsRequest(): nffg = NFFG(id="BME-req-001") sap1 = nffg.add_sap(name="SAP1", id="sap1") sap2 = nffg.add_sap(name="SAP2", id="sap2") nffg.add_sglink(sap1.add_port(0), sap2.add_port(0)) # nffg.add_sglink(sap1.add_port(1), sap2.add_port(1)) nffg.add_req(sap1.ports[0], sap2.ports[0], bandwidth=1000, delay=24) nffg.add_req(sap1.ports[0], sap2.ports[0], bandwidth=1000, delay=24) return nffg
def __init__ (self, standalone=False, **kwargs): """ .. seealso:: :func:`AbstractAPI.__init__() <escape.util.api.AbstractAPI.__init__>` """ log.info("Starting Service Layer...") # Mandatory super() call self.last_sg = NFFG(id=0, name='empty') # Set element manager self.__sid = None self.elementManager = None self.service_orchestrator = None self.gui_proc = None super(ServiceLayerAPI, self).__init__(standalone, **kwargs)
def initialize (self): """ .. seealso:: :func:`AbstractAPI.initialize() <escape.util.api.AbstractAPI.initialize>` """ log.debug("Initializing Controller Adaptation Sublayer...") self.controller_adapter = ControllerAdapter(self, with_infr=self._with_infr) if self._mapped_nffg: try: mapped_request = self._read_data_from_file(self._mapped_nffg) mapped_request = NFFG.parse(mapped_request) self.__proceed_installation(mapped_nffg=mapped_request) except (ValueError, IOError, TypeError) as e: log.error("Can't load service request from file because of: " + str(e)) else: log.debug("Graph representation is loaded successfully!") log.info("Controller Adaptation Sublayer has been initialized!")
def __init__(self, standalone=False, **kwargs): """ .. seealso:: :func:`AbstractAPI.__init__() <escape.util.api.AbstractAPI.__init__>` """ log.info("Starting Service Layer...") # Mandatory super() call self.last_sg = NFFG(id=0, name='empty') # Set element manager self.__sid = None self.elementManager = None self.service_orchestrator = None """:type ServiceOrchestrator""" self.gui_proc = None self.api_mgr = RESTAPIManager( unique_bb_id=False, unique_nf_id=CONFIG.ensure_unique_vnf_id(), logger=log) super(ServiceLayerAPI, self).__init__(standalone, **kwargs)
def sg (self): """ Main API function for Service Graph initiation. Same functionality as "get-config" in UNIFY interface. Bounded to POST HTTP verb. :return: None """ self.log.debug("Called REST-API function: sg") self.log.info(int(round(time.time() * 1000))) # Obtain NFFG from request body log.debug("Detected response format: %s" % self.headers.get("Content-Type")) body = self._get_body() # log.getChild("REST-API").debug("Request body:\n%s" % body) if body is None or not body: log.warning("Received data is empty!") self.send_error(400, "Missing body!") return # Expect XML format --> need to convert first if self.virtualizer_format_enabled: if self.headers.get("Content-Type") != "application/xml" or \ not body.startswith("<?xml version="): log.error("Received data is not in XML format despite of the UNIFY " "interface is enabled!") self.send_error(415) return # Convert response's body to NFFG nffg = NFFGConverter(domain="INTERNAL", logger=log).parse_from_Virtualizer(vdata=body) else: try: nffg = NFFG.parse(body) # Initialize NFFG from JSON representation except Exception as e: self.log.error( "Abort request! Received exception during payload parsing: %s" % e) return self.log.debug("Parsed service request: %s" % nffg) self._proceed_API_call('api_sas_sg_request', nffg) self.send_acknowledge() self.log.debug("%s function: get-config ended!" % self.LOGGER_NAME)
def getPicoTopo(): """ Not carrier style topo. Few nodes with big resources. """ random.seed(0) nffg = NFFG(id="SmallExampleTopo") switch = {'cpu': 0, 'mem': 0, 'storage': 0, 'delay': 0.5, 'bandwidth': 1000, 'infra_type': NFFG.TYPE_INFRA_SDN_SW} sw = nffg.add_infra(id = getName("sw"), **switch) infra = {'cpu': 400, 'mem': 320000, 'storage': 1500, 'delay': 1.0, 'bandwidth': 10000, 'infra_type': NFFG.TYPE_INFRA_EE} linkres = {'bandwidth': 1000, 'delay': 0.5} inf1 = nffg.add_infra(id = getName("infra"), **infra) inf0 = inf1 inf1.add_supported_type(list(string.ascii_uppercase)[:10]) for i in range(0,4): if i == 3: inf2 = inf0 else: inf2 = nffg.add_infra(id = getName("infra"), **infra) inf2.add_supported_type(list(string.ascii_uppercase)[:10]) nameid = getName("sap") sap = nffg.add_sap(id = nameid, name = nameid) # add links nffg.add_undirected_link(sw.add_port(), inf2.add_port(), **linkres) nffg.add_undirected_link(inf1.add_port(), inf2.add_port(), **linkres) nffg.add_undirected_link(inf2.add_port(), sap.add_port(), **linkres) inf1 = inf2 return nffg
def getCarrierTopo(params, increment_port_ids=False): """ Construct the core network and add PoPs with their parameters. params is a list of dictionaries with PoP data: 'Retail': (BNAS, RCpb, RCT) 'Business': (PE, BCpb, BCT) 'CloudNFV': (CL,CH,SE,SAN_bw,SAN_sto,NF_types,SE_cores,SE_mem,SE_sto, CL_bw, CH_links) WARNING: using this function with increment_port_ids=True this function is not thread safe, because it uses global variable then! """ # This initializes the random generator always to the same value, so the # returned index sequence, and thus the network parameters will be generated # always the same (we want a fixed network environment) # The generated identifiers are still different between genereations, but # those does not influence the mapping process random.seed(0) popcnt = 0 nffg = NFFG(id="CarrierTopo") p = increment_port_ids backbone_res = {'cpu': 0, 'mem': 0, 'storage': 0, 'delay': 0.5, 'bandwidth': 1000, 'infra_type': NFFG.TYPE_INFRA_SDN_SW} bn0 = nffg.add_infra(id=getName("bn"), **backbone_res) bn1 = nffg.add_infra(id=getName("bn"), **backbone_res) bn2 = nffg.add_infra(id=getName("bn"), **backbone_res) bn3 = nffg.add_infra(id=getName("bn"), **backbone_res) nffg.add_undirected_link(add_port(bn0, p), add_port(bn1, p), bandwidth=1000, delay=10) nffg.add_undirected_link(add_port(bn1, p), add_port(bn2, p), bandwidth=1000, delay=10) nffg.add_undirected_link(add_port(bn2, p), add_port(bn3, p), bandwidth=1000, delay=10) nffg.add_undirected_link(add_port(bn3, p), add_port(bn0, p), bandwidth=1000, delay=10) backbones = (bn0, bn1, bn2, bn3) bnlen = len(backbones) for popdata in params: tmp = [] tmp.extend(popdata['Retail']) tmp.extend(popdata['Business']) tmp.extend(popdata['CloudNFV']) addPoP(nffg, popcnt, backbones[popcnt%bnlen], backbones[(popcnt+1)%bnlen], p, *tmp) popcnt += 1 """ # BNAS,RCpb, RCT, PE,BCpb, BCT, CL,CH,SE, SAN_bw, addPoP(nffg, bn2, bn3, 2, 10000, 0.2, 2, 4000, 0.2, 2, 8, 8, 160000, # SAN_sto,NF_types, SE_cores, SE_mem, SE_sto, CL_bw, CH_links 100000, ['A','B'], [8,12,16], [32000,64000], [150], 40000, 4) # BNAS, RCpb, RCT, PE,BCpb, BCT, CL,CH, SE, SAN_bw, addPoP(nffg, bn1, bn2, 10, 40000, 0.2, 8, 4000, 0.2, 4, 40, 8, 160000, # SAN_sto,NF_types, SE_cores, SE_mem, SE_sto, 100000, ['A','B','C','D','E'],[8,12,16], [32000,64000], [150,200], # CL_bw, CH_links 80000, 8) """ log.debug("Carrier topology construction finished!") return nffg
class DomainVirtualizer(AbstractVirtualizer): """ Specific Virtualizer class for global domain virtualization. Implement the same interface as :class:`AbstractVirtualizer <escape.orchest.virtualization_mgmt.AbstractVirtualizer>` Use :class:`NFFG` format to store the global infrastructure info. """ # Events raised by this class _eventMixin_events = {DoVChangedEvent} """Events raised by this class""" TYPE = 'DOV' # Reentrant lock to synchronize the access to the DoV __DoV_lock = threading.RLock() # noinspection PyUnusedLocal def __init__(self, mgr, global_res=None, **kwargs): """ Init. :param mgr: global domain resource manager :type mgr: :any:`GlobalResourceManager` :param global_res: initial global resource (optional) :type global_res: :class:`NFFG` :param kwargs: optional parameters for Virtualizer :type kwargs: dict :return: None """ super(DomainVirtualizer, self).__init__() log.debug( "Init DomainVirtualizer with name: %s - initial resource: %s" % (DoV, global_res)) # Garbage-collector safe self._mgr = weakref.proxy(mgr) # Define DoV az an empty NFFG by default self.__global_nffg = NFFG(id=DoV, name=DoV + "-uninitialized") if global_res is not None: self.set_domain_as_global_view(domain=NFFG.DEFAULT_DOMAIN, nffg=global_res) @property def name(self): """ Return with the name of the View. :return: name of the view :rtype: str """ if self.__global_nffg is not None and hasattr(self.__global_nffg, 'name'): return self.__global_nffg.name else: return DoV + "-uninitialized" def __str__(self): """ Return with specific string representation. :return: string representation :rtype: str """ return "DomainVirtualizer(name=%s)" % self.name def __repr__(self): """ Return with specific representation. :return: spec representation :rtype: str """ return super(DomainVirtualizer, self).__repr__() @synchronized(__DoV_lock) def is_empty(self): """ Return True if the stored topology is empty. :return: topology is empty or not :rtype: bool """ # If Dov has not been set yet if self.__global_nffg is None: return True # If Dov does not contain any Node elif self.__global_nffg.is_empty(): return True else: return False @synchronized(__DoV_lock) def get_resource_info(self): """ Return the copy of the global resource info represented this class. :return: global resource info :rtype: :class:`NFFG` """ return self.__global_nffg.copy() @synchronized(__DoV_lock) def set_domain_as_global_view(self, domain, nffg): """ Set the copy of given NFFG as the global view of DoV. Add the specific :attr:`DoV` id and generated name to the global view. :param nffg: NFFG instance intended to use as the global view :type nffg: :class:`NFFG` :param domain: name of the merging domain :type domain: str :return: updated Dov :rtype: :class:`NFFG` """ log.debug("Set domain: %s as the global view!" % domain) if not self.__global_nffg.is_empty(): log.warning( "Global view is not empty! Current state will be lost!") self.__global_nffg = nffg.copy() self.__global_nffg.id = DoV self.__global_nffg.name = DoV log.debug("DoV stat:\n%s" % self.__global_nffg.get_stat()) # Raise event for observing Virtualizers about topology change self.raiseEventNoErrors(DoVChangedEvent, cause=DoVChangedEvent.TYPE.UPDATE) return self.__global_nffg @synchronized(__DoV_lock) def update_full_global_view(self, nffg): """ Update the merged Global view with the given probably modified global view. Reserve id, name values of the global view. :param nffg: updated global view which replace the stored one :type nffg: :class:`NFFG` :return: updated Dov :rtype: :class:`NFFG` """ dov_id = self.__global_nffg.id dov_name = self.__global_nffg.name self.__global_nffg = nffg.copy() self.__global_nffg.id, self.__global_nffg.name = dov_id, dov_name log.debug("DoV stat:\n%s" % self.__global_nffg.get_stat()) # Raise event for observing Virtualizers about topology change self.raiseEventNoErrors(DoVChangedEvent, cause=DoVChangedEvent.TYPE.UPDATE) return self.__global_nffg @synchronized(__DoV_lock) def merge_new_domain_into_dov(self, nffg): """ Add a newly detected domain to DoV. Based on the feature: escape.util.nffg.NFFGToolBox#merge_domains :param nffg: NFFG object need to be merged into DoV :type nffg: :class:`NFFG` :return: updated Dov :rtype: :class:`NFFG` """ # Using general merging function from NFFGToolBox and return the updated # NFFG NFFGToolBox.merge_new_domain(base=self.__global_nffg, nffg=nffg, log=log) # Raise event for observing Virtualizers about topology change log.debug("DoV stat:\n%s" % self.__global_nffg.get_stat()) log.log(VERBOSE, "Merged Dov:\n%s" % self.__global_nffg.dump()) self.raiseEventNoErrors(DoVChangedEvent, cause=DoVChangedEvent.TYPE.EXTEND) return self.__global_nffg @synchronized(__DoV_lock) def remerge_domain_in_dov(self, domain, nffg): """ Update the existing domain in the merged Global view with explicit domain remove and re-add. :param nffg: changed infrastructure info :type nffg: :class:`NFFG` :param domain: name of the merging domain :type domain: str :return: updated Dov :rtype: :class:`NFFG` """ NFFGToolBox.remove_domain(base=self.__global_nffg, domain=domain, log=log) # log.log(VERBOSE, "Reduced Dov:\n%s" % self.__global_nffg.dump()) NFFGToolBox.merge_new_domain(base=self.__global_nffg, nffg=nffg, log=log) log.debug("DoV stat:\n%s" % self.__global_nffg.get_stat()) log.log(VERBOSE, "Re-merged DoV:\n%s" % self.__global_nffg.dump()) if self.__global_nffg.is_empty(): log.warning( "No Node had been remained after updating the domain part: " "%s! DoV is empty!" % domain) # Raise event for observing Virtualizers about topology change self.raiseEventNoErrors(DoVChangedEvent, cause=DoVChangedEvent.TYPE.CHANGE) return self.__global_nffg @synchronized(__DoV_lock) def update_domain_in_dov(self, domain, nffg): """ Update the existing domain in the merged Global view. :param nffg: changed infrastructure info :type nffg: :class:`NFFG` :param domain: name of the merging domain :type domain: str :return: updated Dov :rtype: :class:`NFFG` """ NFFGToolBox.update_domain(base=self.__global_nffg, updated=nffg, log=log) if self.__global_nffg.is_empty(): log.warning( "No Node had been remained after updating the domain part: " "%s! DoV is empty!" % domain) log.debug("DoV stat:\n%s" % self.__global_nffg.get_stat()) log.log(VERBOSE, "Updated DoV:\n%s" % self.__global_nffg.dump()) # Raise event for observing Virtualizers about topology change self.raiseEventNoErrors(DoVChangedEvent, cause=DoVChangedEvent.TYPE.CHANGE) return self.__global_nffg @synchronized(__DoV_lock) def remove_domain_from_dov(self, domain): """ Remove the nodes and edges with the given from Global view. :param domain: domain name :type domain: str :return: updated Dov :rtype: :class:`NFFG` """ NFFGToolBox.remove_domain(base=self.__global_nffg, domain=domain, log=log) if self.__global_nffg.is_empty(): log.warning( "No Node had been remained after updating the domain part: " "%s! DoV is empty!" % domain) log.debug("DoV stat:\n%s" % self.__global_nffg.get_stat()) log.log(VERBOSE, "Reduced Dov:\n%s" % self.__global_nffg.dump()) # Raise event for observing Virtualizers about topology change self.raiseEventNoErrors(DoVChangedEvent, cause=DoVChangedEvent.TYPE.REDUCE) return self.__global_nffg @synchronized(__DoV_lock) def clean_domain_from_dov(self, domain): """ Clean domain by removing initiated NFs and flowrules related to BiSBiS nodes of the given domain :param domain: domain name :type domain: str :return: updated Dov :rtype: :class:`NFFG` """ if self.__global_nffg.is_empty(): log.debug("DoV is empty! Skip cleanup domain: %s" % domain) return self.__global_nffg if self.__global_nffg.is_bare(): log.debug("No initiated service has been detected in DoV! " "Skip cleanup domain: %s" % domain) return self.__global_nffg NFFGToolBox.clear_domain(base=self.__global_nffg, domain=domain, log=log) log.debug("DoV stat:\n%s" % self.__global_nffg.get_stat()) log.log(VERBOSE, "Cleaned Dov:\n%s" % self.__global_nffg.dump()) self.raiseEventNoErrors(DoVChangedEvent, cause=DoVChangedEvent.TYPE.CHANGE) return self.__global_nffg @synchronized(__DoV_lock) def update_domain_status_in_dov(self, domain, nffg): """ Set status of initiated NFs and flowrules related to BiSBiS nodes of the given domain. :param domain: domain name :type domain: str :param nffg: changed infrastructure info :type nffg: :class:`NFFG` :return: updated Dov :rtype: :class:`NFFG` """ if self.__global_nffg.is_empty(): log.debug("DoV is empty! Skip cleanup domain: %s" % domain) return self.__global_nffg NFFGToolBox.update_status_info(nffg=nffg, status=NFFG.STATUS_DEPLOY) NFFGToolBox.update_nffg_by_status(base=self.__global_nffg, updated=nffg, log=log) log.log(VERBOSE, "Updated Dov:\n%s" % self.__global_nffg.dump()) self.raiseEventNoErrors(DoVChangedEvent, cause=DoVChangedEvent.TYPE.CHANGE) return self.__global_nffg @synchronized(__DoV_lock) def remove_deployed_elements(self): """ Remove all the NFs, flowrules and dynamic ports from DoV. :return: updated Dov :rtype: :class:`NFFG` """ if self.__global_nffg.is_empty(): log.debug("DoV is empty! Skip DoV cleanup") return self.__global_nffg NFFGToolBox.remove_deployed_services(nffg=self.__global_nffg, log=log) log.debug("DoV stat:\n%s" % self.__global_nffg.get_stat()) log.log(VERBOSE, "Cleared Dov:\n%s" % self.__global_nffg.dump()) self.raiseEventNoErrors(DoVChangedEvent, cause=DoVChangedEvent.TYPE.CHANGE) return self.__global_nffg
def generateRequestForCarrierTopo(all_saps_ending, all_saps_beginning, avg_shp_len, nf_types, max_e2e_lat_multiplier=20, min_e2e_lat_multiplier=1.1, loops=False, use_saps_once=True, vnf_sharing_probabilty=0.0, multiSC=False, max_sc_count=2, chain_maxlen=8, max_cpu=4, max_mem=1600, max_storage=3, max_bw=7): """ By default generates VNF-disjoint SC-s starting/ending only once in each SAP. With the 'loops' option, only loop SC-s are generated. 'vnf_sharing_probabilty' determines the ratio of #(VNF-s used by at least two SC-s)/#(not shared VNF-s). """ sc_count = 1 gen = NameGenerator() # maximal possible bandwidth for chains if multiSC: sc_count = rnd.randint(2, max_sc_count) while len(all_saps_ending) > sc_count and len( all_saps_beginning) > sc_count: nffg = NFFG(id="E2e_req_test_nffg") nffg.mode = NFFG.MODE_ADD # newly added NF-s of one request current_nfs = [] for scid in xrange(0, sc_count): # find two SAP-s for chain ends. nfs_this_sc = [] sapid = all_saps_beginning.pop() if use_saps_once else \ rnd.choice(all_saps_beginning) if sapid not in nffg: sap1 = nffg.add_sap(id=sapid) else: sap1 = nffg.network.node[sapid] sap2 = None if loops: sap2 = sap1 else: tmpid = all_saps_ending.pop() if use_saps_once else \ rnd.choice(all_saps_ending) while True: if tmpid != sap1.id: if tmpid not in nffg: sap2 = nffg.add_sap(id=tmpid) else: sap2 = nffg.network.node[tmpid] break else: tmpid = all_saps_ending.pop() if use_saps_once else \ rnd.choice(all_saps_ending) sg_path = [] if len(sap1.ports) > 0: for sap1port in sap1.ports: break else: sap1port = sap1.add_port(id=gen.get_name("port")) last_req_port = sap1port # generate some VNF-s connecting the two SAP-s vnf_cnt = next(gen_seq()) % chain_maxlen + 1 for vnf in xrange(0, vnf_cnt): # in the first case p is used to determine which previous chain should # be used to share the VNF, in the latter case it is used to determine # whether we should share now. p = rnd.random() if multiSC and \ p < vnf_sharing_probabilty and len(current_nfs) > 0: # this influences the the given VNF sharing probability... if reduce(lambda a, b: a and b, [v in nfs_this_sc for v in current_nfs]): log.warn( "All shareable VNF-s are already added to this chain! " "Skipping VNF sharing...") continue else: nf = rnd.choice(current_nfs) while nf in nfs_this_sc: nf = rnd.choice(current_nfs) # the VNF is already in the subchain, we just need to add the # links # vnf_added = True else: nf = nffg.add_nf(id="-".join( ("SC", str(scid), "VNF", str(vnf))), func_type=rnd.choice(nf_types), cpu=rnd.random() * max_cpu, mem=rnd.random() * max_mem, storage=rnd.random() * max_storage) nfs_this_sc.append(nf) newport = nf.add_port(id=gen.get_name("port")) sglink = nffg.add_sglink(last_req_port, newport, id=gen.get_name("link")) sg_path.append(sglink.id) last_req_port = nf.add_port(id=gen.get_name("port")) if len(sap2.ports) > 0: for sap2port in sap2.ports: break else: sap2port = sap2.add_port(id=gen.get_name("port")) sglink = nffg.add_sglink(last_req_port, sap2port, id=gen.get_name("link")) sg_path.append(sglink.id) # WARNING: this is completly a wild guess! Failing due to this doesn't # necessarily mean algorithm failure # Bandwidth maximal random value should be min(SAP1acces_bw, # SAP2access_bw) # MAYBE: each SAP can only be once in the reqgraph? - this is the case # now. minlat = avg_shp_len * min_e2e_lat_multiplier maxlat = avg_shp_len * max_e2e_lat_multiplier nffg.add_req(sap1port, sap2port, delay=rnd.uniform(minlat, maxlat), bandwidth=rnd.random() * max_bw, sg_path=sg_path, id=gen.get_name("req")) # log.debug( # "Service Chain on NF-s added: %s" % [nf.id for nf in nfs_this_sc]) # this prevents loops in the chains and makes new and old NF-s equally # preferable in total for NF sharing new_nfs = [vnf for vnf in nfs_this_sc if vnf not in current_nfs] for tmp in xrange(0, scid + 1): current_nfs.extend(new_nfs) if not multiSC: return nffg if multiSC: return nffg return None
def _constructExampleRequest(): nffg = NFFG(id="BME-req-001") sap0 = nffg.add_sap(name="SAP0", id="sap0") sap1 = nffg.add_sap(name="SAP1", id="sap1") # add NF requirements. # Note: storage is used now for the first time, it comes in with the # NodeResource class # Note: internal latency is only forwarded to lower layer # Note: internal bw is untested yet, even before the NFFG support nf0 = nffg.add_nf(id="NF0", name="NetFunc0", func_type='A', cpu=2, mem=2, storage=2, bandwidth=100) nf1 = nffg.add_nf(id="NF1", name="NetFunc1", func_type='B', cpu=1.5, mem=1.5, storage=1.5, delay=50) nf2 = nffg.add_nf(id="NF2", name="NetFunc2", func_type='C', cpu=3, mem=3, storage=3, bandwidth=500) nf3 = nffg.add_nf(id="NF3", name="NetFunc3", func_type='A', cpu=2, mem=2, storage=2, bandwidth=100, delay=50) nf4 = nffg.add_nf(id="NF4", name="NetFunc4", func_type='C', cpu=0, mem=0, storage=0, bandwidth=500) # directed SG links # flowclass default: None, meaning: match all traffic # some agreement on flowclass format is required. nffg.add_sglink(sap0.add_port(0), nf0.add_port(0)) nffg.add_sglink(nf0.add_port(1), nf1.add_port(0), flowclass="HTTP") nffg.add_sglink(nf1.add_port(1), nf2.add_port(0), flowclass="HTTP") nffg.add_sglink(nf2.add_port(1), sap1.add_port(1)) nffg.add_sglink(nf0.add_port(2), nf3.add_port(0), flowclass="non-HTTP") nffg.add_sglink(nf3.add_port(1), nf2.add_port(2), flowclass="non-HTTP") nffg.add_sglink(nf1.add_port(2), nf4.add_port(0), flowclass="index.com") nffg.add_sglink(nf4.add_port(1), nf2.add_port(3), flowclass="index.com") # add EdgeReqs nffg.add_req(sap0.ports[0], sap1.ports[1], delay=40, bandwidth=1500) nffg.add_req(nf1.ports[1], nf2.ports[0], delay=3.5) nffg.add_req(nf3.ports[1], nf2.ports[2], bandwidth=500) nffg.add_req(sap0.ports[0], nf0.ports[0], delay=3.0) # force collocation of NF0 and NF3 # nffg.add_req(nf0.ports[2], nf3.ports[0], delay=1.0) # not SAP-to-SAP requests are not taken into account yet, these are ignored nffg.add_req(nf0.ports[1], nf2.ports[0], delay=1.0) # test Infra node removal from the request NFFG infra1 = nffg.add_infra(id="BiS-BiS1") infra2 = nffg.add_infra(id="BiS-BiS2") nffg.add_undirected_link(infra1.add_port(0), nf0.add_port(3), dynamic=True) nffg.add_undirected_link(infra1.add_port(1), nf0.add_port(4), dynamic=True) nffg.add_undirected_link(infra1.add_port(2), nf1.add_port(3), dynamic=True) nffg.add_undirected_link(infra2.add_port(0), nf2.add_port(4), dynamic=True) nffg.add_undirected_link(infra2.add_port(1), nf3.add_port(2), dynamic=True) nffg.add_undirected_link(infra1.add_port(3), infra2.add_port(2), bandwidth=31241242) return nffg
def get_topo_desc (): # Create NFFG nffg = NFFG(id="DYNAMIC-FALLBACK-TOPO", name="fallback-dynamic") # Add NETCONF capable containers a.k.a. Execution Environments nc1 = nffg.add_infra(id="nc1", name="NC1", domain="INTERNAL", infra_type=NFFG.TYPE_INFRA_EE, cpu=5, mem=5, storage=5, delay=0.9, bandwidth=5000) nc2 = nffg.add_infra(id="nc2", name="NC2", domain="INTERNAL", infra_type=NFFG.TYPE_INFRA_EE, cpu=5, mem=5, storage=5, delay=0.9, bandwidth=5000) nc1.add_supported_type(['A', 'B']) nc2.add_supported_type(['A', 'C']) # Add inter-EE switches sw3 = nffg.add_infra(id="sw3", name="SW3", domain="INTERNAL", infra_type=NFFG.TYPE_INFRA_SDN_SW, delay=0.2, bandwidth=10000) sw4 = nffg.add_infra(id="sw4", name="SW4", domain="INTERNAL", infra_type=NFFG.TYPE_INFRA_SDN_SW, delay=0.2, bandwidth=10000) # Add SAPs sap1 = nffg.add_sap(id="sap1", name="SAP1") sap2 = nffg.add_sap(id="sap2", name="SAP2") # Add links linkres = {'delay': 1.5, 'bandwidth': 2000} nffg.add_link(nc1.add_port(1), sw3.add_port(1), id="l1", **linkres) nffg.add_link(nc2.add_port(1), sw4.add_port(1), id="l2", **linkres) nffg.add_link(sw3.add_port(2), sw4.add_port(2), id="l3", **linkres) nffg.add_link(sw3.add_port(3), sap1.add_port(1), id="l4", **linkres) nffg.add_link(sw4.add_port(3), sap2.add_port(1), id="l5", **linkres) # Duplicate one-way static links to become undirected in order to fit to # the orchestration algorithm # No need for that, ESCAPENetworkBridge do this later # nffg.duplicate_static_links() return nffg
def main(argv): try: opts, args = getopt.getopt(argv, "hl:s:", [ "hist=", "add_hist_values", "hist_format=", "starting_lvl=", "one", "cdf_format=", "cdf", "print_devs", "print_avgs", "print_cdf_data=", "print_minmax", "no_cdf_interpolation", "consider_seeds", "plot_aspect=" ]) except getopt.GetoptError as goe: print helpmsg raise loc_tgz = "" draw_hist = False reskeys = ['cpu', 'mem', 'storage', 'bandwidth'] add_hist_values = False hist_format = "png" starting_lvl = 0 process_only_one = False draw_cdf = False cdf_format = "png" print_avgs = False print_devs = False print_cdf_data = False res_cdf_to_print = None no_cdf_interpolation = True print_minmax = False seednum = None plot_aspect = 1 consider_seeds = False for opt, arg in opts: if opt == "-h": print helpmsg sys.exit() elif opt == "-l": loc_tgz = arg elif opt == "-s": seednum = int(arg) elif opt == "--hist": draw_hist = True hist_aggr_size = float(arg) hist = {} for res in reskeys + ['link_bw']: hist[res] = OrderedDict() for aggr in np.arange(hist_aggr_size, 1.0, hist_aggr_size): hist[res][float("%.4f" % aggr)] = 0 hist[res][1.0] = 0 elif opt == "--add_hist_values": add_hist_values = True elif opt == "--hist_format": hist_format = arg elif opt == "--starting_lvl": starting_lvl = int(arg) elif opt == "--one": process_only_one = True elif opt == "--cdf": draw_cdf = True cdf = {} for res in reskeys + ['link_bw']: cdf[res] = [] elif opt == "--cdf_format": cdf_format = arg elif opt == "--print_devs": print_devs = True elif opt == "--print_avgs": print_avgs = True elif opt == "--print_cdf_data": print_cdf_data = True res_cdf_to_print = arg elif opt == "--no_cdf_interpolation": no_cdf_interpolation = True elif opt == "--print_minmax": print_minmax = True elif opt == "--consider_seeds": consider_seeds = True elif opt == "--plot_aspect": plot_aspect = float(arg) nffg_num_list = [] bashCommand = "ls -x " + loc_tgz process = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE) tgz_files = process.communicate()[0] for filen in tgz_files.replace("\n", " ").split(" "): if 'test_lvl' in filen: nffg_num_list.append(int(filen.split('-')[1].split('.')[0])) nffg_num_list = sorted(filter(lambda x: x >= starting_lvl, nffg_num_list)) if print_avgs: print "test_lvl, avg(link_bw), ",", ".join(["".join(["avg(",noderes,")"]) \ for noderes in reskeys]) if print_devs: print "test_lvl, ", ", ".join(["".join(["dev(",noderes,")"]) \ for noderes in cdf]) if print_minmax: print "test_lvl, ", ", ".join(["min(%s), max(%s)"%(res, res) for res in \ reskeys + ['link_bw']]) if draw_hist: empty_hist = copy.deepcopy(hist) if draw_cdf: empty_cdf = copy.deepcopy(cdf) for test_lvl in nffg_num_list: filename = "test_lvl-%s.nffg.tgz" % test_lvl os.system("".join(["tar -xf ", loc_tgz, "/", filename])) # decompress # after decompression nffg-s end up two folder deep. if consider_seeds: nffg_prefix = "nffgs-seed%s-batch_tests/" % seednum + loc_tgz.split( "/")[-1] + "/" else: nffg_prefix = "nffgs-batch_tests/" + loc_tgz.split("/")[-1] + "/" with open("".join([nffg_prefix, "test_lvl-", str(test_lvl), ".nffg"]), "r") as f: nffg = NFFG.parse(f.read()) nffg.calculate_available_node_res() nffg.calculate_available_link_res([]) # calculate avg. res utils by resource types. avgs = {} cnts = {} mins = {} maxs = {} if draw_hist: hist = copy.deepcopy(empty_hist) if draw_cdf: cdf = copy.deepcopy(empty_cdf) for noderes in reskeys: avgs[noderes] = 0.0 cnts[noderes] = 0 for i in nffg.infras: # only count nodes which had these resources initially if i.resources[noderes] > 1e-10: util = float(i.resources[noderes] - i.availres[noderes]) / \ i.resources[noderes] avgs[noderes] += util cnts[noderes] += 1 # maintain max/min struct if noderes in mins: if mins[noderes] > util: mins[noderes] = util else: mins[noderes] = util if noderes in maxs: if maxs[noderes] < util: maxs[noderes] = util else: maxs[noderes] = util if draw_hist: increment_util_counter(hist[noderes], util, hist_aggr_size) if draw_cdf: cdf[noderes].append(util) avgs[noderes] /= cnts[noderes] avg_linkutil = 0.0 linkcnt = 0 for l in nffg.links: if l.type == 'STATIC': link_util = float(l.bandwidth - l.availbandwidth) / l.bandwidth avg_linkutil += link_util linkcnt += 1 # maintain max/min struct if 'link_bw' in mins: if mins['link_bw'] > link_util: mins['link_bw'] = link_util else: mins['link_bw'] = link_util if 'link_bw' in maxs: if maxs['link_bw'] < link_util: maxs['link_bw'] = link_util else: maxs['link_bw'] = link_util if draw_hist: increment_util_counter(hist['link_bw'], link_util, hist_aggr_size) if draw_cdf: cdf['link_bw'].append(link_util) avg_linkutil /= linkcnt if print_avgs: to_print = [test_lvl, avg_linkutil] to_print.extend([avgs[res] for res in reskeys]) print ",".join(map(str, to_print)) if print_devs: avgs['link_bw'] = avg_linkutil devs = {} for res in cdf: devs[res] = math.sqrt(sum([(avgs[res]-u)**2 for u in cdf[res]]) / \ (len(cdf[res])-1)) to_print = [test_lvl] to_print.extend([devs[res] for res in cdf]) print ",".join(map(str, to_print)) if print_minmax: to_print = [test_lvl] for res in reskeys + ['link_bw']: to_print.append(mins[res]) to_print.append(maxs[res]) print ",".join(map(str, to_print)) # delete the NFFG and its parent folders os.system("rm -rf nffgs-batch_tests/") # we can only know the number of CDF points after the first processing. # this number should stay the same for all consequential NFFG-s. if print_cdf_data and test_lvl == nffg_num_list[0]: print ",".join(["test_lvl"] + \ [res_cdf_to_print+"_cdf_point"+str(i) \ for i in range(0,len(cdf[res_cdf_to_print])+2)]) if draw_hist: # normalize the histogram to [0,1], so the resource types could be plotted # on the same bar chart for res in hist: sum_util_cnt = sum( [hist[res][util_range] for util_range in hist[res]]) for util_range in hist[res]: hist[res][util_range] = float( hist[res][util_range]) / sum_util_cnt # print "test_lvl", test_lvl, pformat(hist),"\n" # plot the histograms. fig, ax = plt.subplots() ax.set_ylim((0.00, 1.10)) range_seq = np.array([float("%.4f"%(aggr/hist_aggr_size)) for aggr in \ np.arange(hist_aggr_size, 1.0, hist_aggr_size)]) range_seq = np.append(range_seq, [1.0 / hist_aggr_size]) width = range_seq[-1] / (len(hist) + 2) / len(range_seq) colors = iter(['r', 'g', 'b', 'c', 'y']) i = 0 rects = [] for res in hist: rect = ax.bar((range_seq - 1)* (len(hist)+2) * width + \ (i+4.5) * width, [hist[res][util_range] for util_range in hist[res]], width, color = next(colors)) rects.append((res, rect)) i += 1 if add_hist_values: autolabel(rect, ax) ax.set_ylabel("Ratio of network element counts to total count") ax.set_xlabel("Resource utilization intervals [%]") ax.set_xticks(range_seq * (len(hist) + 2) * width) ax.set_xticklabels( [str(int(100 * util_range)) for util_range in hist['cpu']]) ax.set_aspect(plot_aspect) ax.legend([r[0] for r in zip(*rects)[1]], zip(*rects)[0], ncol=5, loc='upper left', fontsize=8, bbox_to_anchor=(0, 1)) plt.savefig('plots/hist-test_lvl-%s.%s' % (test_lvl, hist_format), bbox_inches='tight') plt.close(fig) if draw_cdf: # sort util values incrementing in each resource type for res in cdf: cdf[res] = sorted(cdf[res]) fig, ax = plt.subplots() ax.set_xlim((-0.05, 1.05)) ax.set_ylim((-0.05, 1.19)) colors = iter(['r', 'g', 'b', 'c', 'y']) styles = iter([[8, 4, 2, 4, 2, 4], [4, 2], [8, 4, 4, 2], [8, 4, 2, 4], []]) markers = iter(['o', 'v', '+', 's', '']) for res in cdf: last_point = (0, 0) vertical_step = 1.0 / len(cdf[res]) rescolor = next(colors) resline = next(styles) resmarker = next(markers) reslab = res if print_cdf_data and res == res_cdf_to_print: cdf_plot_data = [last_point] for point in zip( cdf[res], np.append(np.arange(vertical_step, 1.0, vertical_step), [1.0])): if no_cdf_interpolation: plt.plot((last_point[0], point[0]), (last_point[1], last_point[1]), color=rescolor, lw=1.5, label=reslab, dashes=resline, marker=resmarker) plt.plot((point[0], point[0]), (last_point[1], point[1]), color=rescolor, lw=1.5, dashes=resline, marker=resmarker) else: plt.plot((last_point[0], point[0]), (last_point[1], point[1]), color=rescolor, lw=1.5, dashes=resline, label=reslab, marker=resmarker) reslab = None if print_cdf_data and res == res_cdf_to_print: cdf_plot_data.append(point) last_point = point plt.plot((last_point[0], 1.0), (last_point[1], 1.0), color=rescolor, lw=1.5, dashes=resline, label=reslab, marker=resmarker) if print_cdf_data and res == res_cdf_to_print: cdf_plot_data.append((1.0, 1.0)) print test_lvl, ",", ",".join( map(lambda t: "(%.6f; %.6f)" % (t[0], t[1]), cdf_plot_data)) ax.set_ylabel("CDF") ax.set_xlabel("Resource utilization [%]") ax.set_aspect(plot_aspect) ax.set_xticks([float(i) / 100 for i in xrange(0, 101, 20)]) ax.set_xticklabels([str(i) for i in xrange(0, 101, 20)]) ax.legend(bbox_to_anchor=(0, 1), loc='upper left', ncol=5, fontsize=12, columnspacing=0.9) plt.savefig('plots/cdf-test_lvl-%s.%s' % (test_lvl, cdf_format), bbox_inches='tight') plt.close(fig) # maybe finish after one iteration if process_only_one: break
def convert_mip_solution_to_nffg (reqs, net, file_inputs=False, full_remap=False): if file_inputs: request_seq = [] for reqfile in reqs: with open(reqfile, "r") as f: req = NFFG.parse(f.read()) request_seq.append(req) with open(net, "r") as g: net = NFFG.parse(g.read()) else: request_seq = reqs # all input NFFG-s are obtained somehow ###################################################################### ##### This is taken from the MappingAlgorithms.MAP() function #### ###################################################################### request = request_seq[0] # batch together all nffgs for r in request_seq[1:]: request = NFFGToolBox.merge_nffgs (request, r) chainlist = [] cid = 1 edgereqlist = [] for req in request.reqs: edgereqlist.append(req) request.del_edge(req.src, req.dst, req.id) # construct chains from EdgeReqs for req in edgereqlist: if len(req.sg_path) == 1: # then add it as linklocal req instead of E2E req log.info("Interpreting one SGHop long EdgeReq (id: %s) as link " "requirement on SGHop: %s."%(req.id, req.sg_path[0])) reqlink = None for sg_link in request.sg_hops: if sg_link.id == req.sg_path[0]: reqlink = sg_link break if reqlink is None: log.warn("EdgeSGLink object not found for EdgeSGLink ID %s! " "(maybe ID-s stored in EdgeReq.sg_path are not the " "same type as EdgeSGLink ID-s?)") if req.delay is not None: setattr(reqlink, 'delay', req.delay) if req.bandwidth is not None: setattr(reqlink, 'bandwidth', req.bandwidth) elif len(req.sg_path) == 0: raise uet.BadInputException( "If EdgeReq is given, it should specify which SGHop path does it " "apply to", "Empty SGHop path was given to %s EdgeReq!" % req.id) else: try: chain = {'id': cid, 'link_ids': req.sg_path, 'bandwidth': req.bandwidth if req.bandwidth is not None else 0, 'delay': req.delay if req.delay is not None else float("inf")} except AttributeError: raise uet.BadInputException( "EdgeReq attributes are: sg_path, bandwidth, delay", "Missing attribute of EdgeReq") # reconstruct NF path from EdgeSGLink path nf_chain = [] for reqlinkid in req.sg_path: # find EdgeSGLink object of 'reqlinkid' reqlink = None for sg_link in request.sg_hops: if sg_link.id == reqlinkid: reqlink = sg_link break else: raise uet.BadInputException( "Elements of EdgeReq.sg_path should be EdgeSGLink.id-s.", "SG link %s couldn't be found in input request NFFG" % reqlinkid) # add the source node id of the EdgeSGLink to NF path nf_chain.append(reqlink.src.node.id) # add the destination node id of the last EdgeSGLink to NF path if reqlinkid == req.sg_path[-1]: if reqlink.dst.node.id != req.dst.node.id: raise uet.BadInputException( "EdgeReq.sg_path should select a path between its two ends", "Last NF (%s) of EdgeReq.sg_path and destination of EdgeReq (" "%s) are not the same!" % (reqlink.dst.node.id, req.dst.node.id)) nf_chain.append(reqlink.dst.node.id) # validate EdgeReq ends. if reqlinkid == req.sg_path[0] and \ reqlink.src.node.id != req.src.node.id: raise uet.BadInputException( "EdgeReq.sg_path should select a path between its two ends", "First NF (%s) of EdgeReq.sg_path and source of EdgeReq (%s) are " "not the same!" % (reqlink.src.node.id, req.src.node.id)) chain['chain'] = nf_chain cid += 1 chainlist.append(chain) # if some resource value is not set (is None) then be permissive and set it # to a comportable value. for respar in ('cpu', 'mem', 'storage', 'delay', 'bandwidth'): for n in net.infras: if n.resources[respar] is None: if respar == 'delay': log.warn("Resource parameter %s is not given in %s, " "substituting with 0!"%(respar, n.id)) n.resources[respar] = 0 else: log.warn("Resource parameter %s is not given in %s, " "substituting with infinity!"%(respar, n.id)) n.resources[respar] = float("inf") # If link res is None or doesn't exist, replace it with a neutral value. for i, j, d in net.network.edges_iter(data=True): if d.type == 'STATIC': if getattr(d, 'delay', None) is None: if d.src.node.type != 'SAP' and d.dst.node.type != 'SAP': log.warn("Resource parameter delay is not given in link %s " "substituting with zero!"%d.id) setattr(d, 'delay', 0) if getattr(d, 'bandwidth', None) is None: if d.src.node.type != 'SAP' and d.dst.node.type != 'SAP': log.warn("Resource parameter bandwidth is not given in link %s " "substituting with infinity!"%d.id) setattr(d, 'bandwidth', float("inf")) # create the class of the algorithm # unnecessary preprocessing is executed ############################################################################ # HACK: We only want to use the algorithm class to generate an NFFG, we will # fill the mapping struct with the one found by MIP alg = CoreAlgorithm(net, request, chainlist, full_remap, False) # move 'availres' and 'availbandwidth' values of the network to maxres, # because the MIP solution takes them as availabel resource. net = alg.bare_infrastucture_nffg for n in net.infras: n.resources = n.availres for d in net.links: # there shouldn't be any Dynamic links by now. d.bandwidth = d.availbandwidth mapping_of_reqs = get_MIP_solution(request_seq, net) mappedNFFG = NFFG(id="MILP-mapped") for transformed_req in mapping_of_reqs: if mapping_of_reqs[transformed_req].is_embedded: alg.manager.vnf_mapping = [] alg.manager.link_mapping = nx.MultiDiGraph() for n, vlist in mapping_of_reqs[transformed_req].\ snode_to_hosted_vnodes.items(): for v in vlist: alg.manager.vnf_mapping.append((v, n)) trans_link_mapping = mapping_of_reqs[transformed_req].vedge_to_spath for trans_sghop in trans_link_mapping: vnf1 = trans_sghop[0] vnf2 = trans_sghop[3] reqlid = get_edge_id(alg.req, vnf1, trans_sghop[1], trans_sghop[2], vnf2) mapped_path = [] path_link_ids = [] for trans_link in trans_link_mapping[trans_sghop]: n1 = trans_link[0] n2 = trans_link[3] lid = get_edge_id(alg.net, n1, trans_link[1], trans_link[2], n2) mapped_path.append(n1) path_link_ids.append(lid) if len(trans_link_mapping[trans_sghop]) == 0: mapped_path.append(alg.manager.getIdOfChainEnd_fromNetwork(vnf1)) else: mapped_path.append(n2) alg.manager.link_mapping.add_edge(vnf1, vnf2, key=reqlid, mapped_to=mapped_path, path_link_ids=path_link_ids) oneNFFG = alg.constructOutputNFFG() mappedNFFG = NFFGToolBox.merge_nffgs(mappedNFFG, oneNFFG) else: print "MILP didn't produce a mapping for request %s"%transformed_req return None # replace Infinity values MappingAlgorithms._purgeNFFGFromInfinityValues(mappedNFFG) # print mappedNFFG.dump() return mappedNFFG
def _testRequestForBacktrack(): nffg = NFFG(id="backtracktest-req", name="btreq") sap1 = nffg.add_sap(name="SAP1", id="sap1req") sap2 = nffg.add_sap(name="SAP2", id="sap2req") a = nffg.add_nf(id="a", name="NetFunc0", func_type='A', cpu=3, mem=3, storage=3) b = nffg.add_nf(id="b", name="NetFunc1", func_type='A', cpu=3, mem=3, storage=3) c = nffg.add_nf(id="c", name="NetFunc2", func_type='A', cpu=3, mem=3, storage=3) nffg.add_sglink(sap1.add_port(0), a.add_port(0), id="sa") nffg.add_sglink(a.add_port(1), b.add_port(0), id="ab") nffg.add_sglink(b.add_port(1), c.add_port(0), id="bc") nffg.add_sglink(c.add_port(1), sap2.add_port(0), id="cs") nffg.add_req(a.ports[0], b.ports[1], delay=1.0, sg_path=["ab"]) nffg.add_req(b.ports[0], c.ports[1], delay=1.0, sg_path=["bc"]) nffg.add_req(c.ports[0], sap2.ports[0], delay=1.0, sg_path=["cs"]) nffg.add_req(sap1.ports[0], sap2.ports[0], delay=50, bandwidth=10, sg_path=["sa", "ab", "bc", "cs"]) return nffg
def _testNetworkForBacktrack(): nffg = NFFG(id="backtracktest", name="backtrack") sap1 = nffg.add_sap(name="SAP1", id="sap1") sap2 = nffg.add_sap(name="SAP2", id="sap2") uniformnoderes = { 'cpu': 5, 'mem': 5, 'storage': 5, 'delay': 0.4, 'bandwidth': 5500 } infra0 = nffg.add_infra(id="node0", name="INFRA0", **uniformnoderes) uniformnoderes2 = { 'cpu': 9, 'mem': 9, 'storage': 9, 'delay': 0.4, 'bandwidth': 5500 } infra1 = nffg.add_infra(id="node1", name="INFRA1", **uniformnoderes2) swres = { 'cpu': 0, 'mem': 0, 'storage': 0, 'delay': 0.0, 'bandwidth': 10000 } sw = nffg.add_infra(id="sw", name="sw1", **swres) infra0.add_supported_type(['A']) infra1.add_supported_type(['A']) unilinkres = {'delay': 0.0, 'bandwidth': 2000} nffg.add_undirected_link(sap1.add_port(0), infra0.add_port(0), **unilinkres) nffg.add_undirected_link(sap2.add_port(0), infra1.add_port(0), **unilinkres) rightlink = {'delay': 10.0, 'bandwidth': 2000} leftlink = {'delay': 0.01, 'bandwidth': 5000} nffg.add_link(infra0.add_port(1), sw.add_port(0), id="n0sw", **rightlink) nffg.add_link(sw.add_port(1), infra1.add_port(1), id="swn1", **rightlink) nffg.add_link(sw.ports[0], infra0.ports[1], id="swn0", **leftlink) nffg.add_link(infra1.ports[1], sw.ports[1], id="n1sw", **leftlink) return nffg
def _example_request_for_fallback(): nffg = NFFG(id="FALLBACK-REQ", name="fallback-req") sap1 = nffg.add_sap(name="SAP1", id="sap1") sap2 = nffg.add_sap(name="SAP2", id="sap2") # add NF requirements. nf0 = nffg.add_nf(id="NF0", name="NetFunc0", func_type='B', cpu=2, mem=2, storage=2, bandwidth=100) nf1 = nffg.add_nf(id="NF1", name="NetFunc1", func_type='A', cpu=1.5, mem=1.5, storage=1.5, delay=50) nf2 = nffg.add_nf(id="NF2", name="NetFunc2", func_type='C', cpu=3, mem=3, storage=3, bandwidth=500) nf3 = nffg.add_nf(id="NF3", name="NetFunc3", func_type='A', cpu=2, mem=2, storage=2, bandwidth=100, delay=50) # add SG hops nffg.add_sglink(sap1.add_port(0), nf0.add_port(0), id="s1n0") nffg.add_sglink(nf0.add_port(1), nf1.add_port(0), id="n0n1") nffg.add_sglink(nf1.add_port(1), nf2.add_port(0), id="n1n2") nffg.add_sglink(nf1.add_port(2), nf3.add_port(0), id="n1n3") nffg.add_sglink(nf2.add_port(1), sap2.add_port(0), id="n2s2") nffg.add_sglink(nf3.add_port(1), sap2.add_port(1), id="n3s2") # add EdgeReqs # port number on SAP2 doesn`t count nffg.add_req(sap1.ports[0], sap2.ports[1], bandwidth=1000, delay=24) nffg.add_req(nf0.ports[1], nf1.ports[0], bandwidth=200) nffg.add_req(nf0.ports[1], nf1.ports[0], delay=3) # set placement criteria. Should be used to enforce the placement decision of # the upper orchestration layer. Placement criteria can contain multiple # InfraNode id-s, if the BiS-BiS is decomposed to multiple InfraNodes in this # layer. # setattr(nf1, 'placement_criteria', ['nc2']) return nffg
def _constructExampleNetwork(): nffg = NFFG(id="BME-net-001") uniformnoderes = { 'cpu': 5, 'mem': 5, 'storage': 5, 'delay': 0.9, 'bandwidth': 5500 } infra0 = nffg.add_infra(id="node0", name="INFRA0", **uniformnoderes) uniformnoderes['cpu'] = None infra1 = nffg.add_infra(id="node1", name="INFRA1", **uniformnoderes) uniformnoderes['mem'] = None infra2 = nffg.add_infra(id="node2", name="INFRA2", **uniformnoderes) uniformnoderes['storage'] = None switch = nffg.add_infra(id="sw0", name="FastSwitcher", delay=0.01, bandwidth=10000) infra0.add_supported_type('A') infra1.add_supported_type(['B', 'C']) infra2.add_supported_type(['A', 'B', 'C']) sap0 = nffg.add_sap(name="SAP0", id="sap0innet") sap1 = nffg.add_sap(name="SAP1", id="sap1innet") unilinkres = {'delay': 1.5, 'bandwidth': 2000} # Infra links should be undirected, according to the currnet NFFG model # Infra link model is full duplex now. nffg.add_undirected_link(sap0.add_port(0), infra0.add_port(0), **unilinkres) nffg.add_undirected_link(sap1.add_port(0), infra1.add_port(0), **unilinkres) nffg.add_undirected_link(infra1.add_port(1), infra0.add_port(2), **unilinkres) unilinkres['bandwidth'] = None nffg.add_undirected_link(infra0.add_port(1), infra2.add_port(0), **unilinkres) nffg.add_undirected_link(infra1.add_port(2), infra2.add_port(1), **unilinkres) unilinkres['delay'] = 0.2 unilinkres['bandwidth'] = 5000 nffg.add_undirected_link(switch.add_port(0), infra0.add_port(3), **unilinkres) unilinkres['delay'] = None nffg.add_undirected_link(switch.add_port(1), infra1.add_port(3), **unilinkres) nffg.add_undirected_link(switch.add_port(2), infra2.add_port(2), **unilinkres) # test VNF mapping removal, and resource update in the substrate NFFG nf4 = nffg.add_nf(id="NF4inNet", name="NetFunc4", func_type='B', cpu=1, mem=1, storage=1, bandwidth=100, delay=50) nffg.add_undirected_link(infra1.add_port(3), nf4.add_port(0), dynamic=True) nffg.add_undirected_link(infra1.add_port(4), nf4.add_port(1), dynamic=True) return nffg
# distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.nse # along with POX. If not, see <http://www.gnu.org/licenses/>. """ Reads an input NFFG object into memory and starts an interactive Python interpreter to edit the NFFG object. """ import sys try: from escape.nffg_lib.nffg import NFFG, NFFGToolBox except ImportError: import os sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "../escape/escape/nffg_lib/"))) from nffg import NFFG, NFFGToolBox if __name__ == '__main__': argv = sys.argv[1:] if "-h" in argv or "--help" in argv: print """ Reads an input NFFG object into memory and starts an interactive Python interpreter to edit the NFFG object. The object can be accessed by 'nffg' name. """ with open(argv[0], "r") as f: nffg = NFFG.parse(f.read())
def getSNDlib_dfn_gwin(save_to_file = False): """ Topology taken from SNDlib, dfn-gwin. """ random.seed(0) gwin = nx.read_gml("dfn-gwin.gml") nffg = NFFG(id="dfn-gwin") nf_types = list(string.ascii_uppercase)[:10] switch = {'cpu': 0, 'mem': 0, 'storage': 0, 'delay': 0.5, 'bandwidth': 40000, 'infra_type': NFFG.TYPE_INFRA_SDN_SW} infrares = {'cpu': 400, 'mem': 320000, 'storage': 1500, 'delay': 1.0, 'bandwidth': 40000, 'infra_type': NFFG.TYPE_INFRA_EE} corelinkres = {'bandwidth': 10000, 'delay': 1.0} aggrlinkres = {'bandwidth': 1000, 'delay': 5.0} acclinkres = {'bandwidth': 100, 'delay': 1.0} gwinnodes = [] for n in gwin.nodes_iter(): gwinnodes.append(n.rstrip('.')) # get topology from dfn-gwin for n in gwinnodes: nffg.add_infra(id=n, **switch) for i,j in gwin.edges_iter(): nffg.add_undirected_link(nffg.network.node[i.rstrip('.')].add_port(), nffg.network.node[j.rstrip('.')].add_port(), **corelinkres) nodeset1 = random.sample(gwinnodes, 3) nodeset1.extend(random.sample(gwinnodes, 3)) # add cloud nodes to 6 random nodes. for n in nodeset1: infra = nffg.add_infra(id=getName(n+"Host"), **infrares) infra.add_supported_type(random.sample(nf_types, 6)) nffg.add_undirected_link(nffg.network.node[n].add_port(), infra.add_port(), **corelinkres) nodeset2 = random.sample(gwinnodes, 3) nodeset2.extend(random.sample(gwinnodes, 3)) # add access switched to 6 random nodes for n in nodeset2: sw = nffg.add_infra(id=getName(n+"Sw"), **switch) nffg.add_undirected_link(nffg.network.node[n].add_port(), sw.add_port(), **aggrlinkres) for i in xrange(0,random.randint(3,4)): nameid = getName(n+"SAP") sap = nffg.add_sap(id=nameid, name=nameid) nffg.add_undirected_link(sap.add_port(), sw.add_port(), **acclinkres) # save it to file if save_to_file: augmented_gwin = nx.MultiDiGraph() augmented_gwin.add_nodes_from(nffg.network.nodes_iter()) augmented_gwin.add_edges_from(nffg.network.edges_iter()) nx.write_gml(augmented_gwin, "augmented-dfn-gwin.gml") return nffg
def edit_config(self): """ Receive configuration and initiate orchestration. :return: None """ self.log.info("Call %s function: edit-config" % self.LOGGER_NAME) # Obtain NFFG from request body self.log.debug("Detected response format: %s" % self.headers.get("Content-Type")) raw_body = self._get_body() # log.getChild("REST-API").debug("Request body:\n%s" % body) if raw_body is None or not raw_body: log.warning("Received data is empty!") self.send_error(400, "Missing body!") return # Expect XML format --> need to convert first if self.virtualizer_format_enabled: if self.headers.get("Content-Type") != "application/xml" and \ not raw_body.startswith("<?xml version="): self.log.error( "Received data is not in XML format despite of the " "UNIFY interface is enabled!") self.send_error(415) return # Get received Virtualizer received_cfg = Virtualizer.parse_from_text(text=raw_body) self.log.log(VERBOSE, "Received request for 'edit-config':\n%s" % raw_body) # If there was not get-config request so far if self.DEFAULT_DIFF: if self.server.last_response is None: self.log.info( "Missing cached Virtualizer! Acquiring topology now..." ) config = self._proceed_API_call(self.API_CALL_GET_CONFIG) if config is None: self.log.error("Requested resource info is missing!") self.send_error(404, message="Resource info is missing!") return elif config is False: self.log.warning( "Requested info is unchanged but has not found!") self.send_error(404, message="Resource info is missing!") else: # Convert required NFFG if needed if self.virtualizer_format_enabled: self.log.debug( "Convert internal NFFG to Virtualizer...") converter = NFFGConverter(domain=None, logger=log) v_topology = converter.dump_to_Virtualizer( nffg=config) # Cache converted data for edit-config patching self.log.debug("Cache converted topology...") self.server.last_response = v_topology else: self.log.debug("Cache acquired topology...") self.server.last_response = config # Perform patching full_cfg = self.__recreate_full_request(diff=received_cfg) else: full_cfg = received_cfg self.log.log(VERBOSE, "Generated request:\n%s" % full_cfg.xml()) # Convert response's body to NFFG self.log.info("Converting full request data...") converter = NFFGConverter(domain="REMOTE", logger=log) nffg = converter.parse_from_Virtualizer(vdata=full_cfg) else: if self.headers.get("Content-Type") != "application/json": self.log.error( "Received data is not in JSON format despite of the " "UNIFY interface is disabled!") self.send_error(415) return # Initialize NFFG from JSON representation self.log.info("Parsing request into internal NFFG format...") nffg = NFFG.parse(raw_body) self.log.debug("Parsed NFFG install request: %s" % nffg) self._proceed_API_call(self.API_CALL_EDIT_CONFIG, nffg) self.send_acknowledge() self.log.debug("%s function: edit-config ended!" % self.LOGGER_NAME)
def generateRequestForCarrierTopo(test_lvl, all_saps_beginning, all_saps_ending, running_nfs, loops=False, use_saps_once=True, vnf_sharing_probabilty=0.0, vnf_sharing_same_sg=0.0, shareable_sg_count=9999999999999999, multiSC=False, max_sc_count=2): """ By default generates VNF-disjoint SC-s starting/ending only once in each SAP. With the 'loops' option, only loop SC-s are generated. 'vnf_sharing_probabilty' determines the ratio of #(VNF-s used by at least two SC-s)/#(not shared VNF-s). NOTE: some kind of periodicity is included to make the effect of batching visible. But it is (and must be) independent of the batch_length. WARNING!! batch_length meaining is changed if --poisson is set! Generate exponential arrival time for VNF-s to make Batching more reasonable. inter arrival time is Exp(1) so if we are batching for 4 time units, the expected SG count is 4, because the sum of 4 Exp(1) is Exp(4). BUT we wait for 1 SG at least, but if by that time 4 units has already passed, map the SG alone (unbatched). """ chain_maxlen = 8 sc_count = 1 # maximal possible bandwidth for chains max_bw = 7.0 if multiSC: sc_count = random.randint(2, max_sc_count) while len(all_saps_ending) > sc_count and len( all_saps_beginning) > sc_count: nffg = NFFG(id="Benchmark-Req-" + str(test_lvl) + "-Piece") # newly added NF-s of one request current_nfs = [] for scid in xrange(0, sc_count): # find two SAP-s for chain ends. nfs_this_sc = [] sap1 = nffg.add_sap(id = all_saps_beginning.pop() if use_saps_once else \ random.choice(all_saps_beginning)) sap2 = None if loops: sap2 = sap1 else: tmpid = all_saps_ending.pop() if use_saps_once else \ random.choice(all_saps_ending) while True: if tmpid != sap1.id: sap2 = nffg.add_sap(id=tmpid) break else: tmpid = all_saps_ending.pop() if use_saps_once else \ random.choice(all_saps_ending) sg_path = [] sap1port = sap1.add_port() last_req_port = sap1port # generate some VNF-s connecting the two SAP-s vnf_cnt = next(gen_seq()) % chain_maxlen + 1 for vnf in xrange(0, vnf_cnt): # in the first case p is used to determine which previous chain should # be used to share the VNF, in the latter case it is used to determine # whether we should share now. vnf_added = False p = random.random() if random.random() < vnf_sharing_probabilty and len(running_nfs) > 0 \ and not multiSC: vnf_added, nf = _shareVNFFromEarlierSG( nffg, running_nfs, nfs_this_sc, p) elif multiSC and \ p < vnf_sharing_probabilty and len(current_nfs) > 0 \ and len(running_nfs) > 0: # this influences the the given VNF sharing probability... if reduce(lambda a, b: a and b, [v in nfs_this_sc for v in current_nfs]): log.warn( "All shareable VNF-s are already added to this chain! " "Skipping VNF sharing...") elif random.random() < vnf_sharing_same_sg: nf = random.choice(current_nfs) while nf in nfs_this_sc: nf = random.choice(current_nfs) # the VNF is already in the subchain, we just need to add the links # vnf_added = True else: # this happens when VNF sharing is needed but not with the actual SG vnf_added, nf = _shareVNFFromEarlierSG( nffg, running_nfs, nfs_this_sc, p) else: nf = nffg.add_nf(id="-".join(("Test",str(test_lvl),"SC",str(scid), "VNF",str(vnf))), func_type=random.choice(['A','B','C']), cpu=random.randint(1 + (2 if test_lvl%4 == 3 else 0), 4 + (6 if test_lvl%4 == 3 else 0)), mem=random.random()*1000 + \ (1000 if test_lvl%4 > 1 else 0), storage=random.random()*3 + \ (6 if test_lvl%4 > 1 else 0), delay=1 + random.random()*10, bandwidth=random.random()) vnf_added = True if vnf_added: # add olny the newly added VNF-s, not the shared ones. nfs_this_sc.append(nf) newport = nf.add_port() sglink = nffg.add_sglink(last_req_port, newport) sg_path.append(sglink.id) last_req_port = nf.add_port() sap2port = sap2.add_port() sglink = nffg.add_sglink(last_req_port, sap2port) sg_path.append(sglink.id) # WARNING: this is completly a wild guess! Failing due to this doesn't # necessarily mean algorithm failure # Bandwidth maximal random value should be min(SAP1acces_bw, SAP2access_bw) # MAYBE: each SAP can only be once in the reqgraph? - this is the case now. if multiSC: minlat = 5.0 * (len(nfs_this_sc) + 2) maxlat = 13.0 * (len(nfs_this_sc) + 2) else: # nfcnt = len([i for i in nffg.nfs]) minlat = 45.0 - 10.0 * (test_lvl % 4) maxlat = 60.0 - 12.25 * (test_lvl % 4) nffg.add_req(sap1port, sap2port, delay=random.uniform(minlat, maxlat), bandwidth=random.random() * (max_bw + test_lvl % 4), sg_path=sg_path) log.info("Service Chain on NF-s added: %s" % [nf.id for nf in nfs_this_sc]) # this prevents loops in the chains and makes new and old NF-s equally # preferable in total for NF sharing new_nfs = [vnf for vnf in nfs_this_sc if vnf not in current_nfs] for tmp in xrange(0, scid + 1): current_nfs.extend(new_nfs) if not multiSC: return nffg, all_saps_beginning, all_saps_ending if multiSC: return nffg, all_saps_beginning, all_saps_ending return None, all_saps_beginning, all_saps_ending
def get_topo_desc (): # Create NFFG nffg = NFFG(id="STATIC-FALLBACK-TOPO", name="fallback-static") # Add switches sw1 = nffg.add_infra(id="sw1", name="SW1", domain="INTERNAL", infra_type=NFFG.TYPE_INFRA_SDN_SW) sw2 = nffg.add_infra(id="sw2", name="SW2", domain="INTERNAL", infra_type=NFFG.TYPE_INFRA_SDN_SW) sw3 = nffg.add_infra(id="sw3", name="SW3", domain="INTERNAL", infra_type=NFFG.TYPE_INFRA_SDN_SW) sw4 = nffg.add_infra(id="sw4", name="SW4", domain="INTERNAL", infra_type=NFFG.TYPE_INFRA_SDN_SW) # Add SAPs sap1 = nffg.add_sap(id="sap1", name="SAP1") sap2 = nffg.add_sap(id="sap2", name="SAP2") # Add links nffg.add_link(sw1.add_port(1), sw3.add_port(1), id="l1") nffg.add_link(sw2.add_port(1), sw4.add_port(1), id="l2") nffg.add_link(sw3.add_port(2), sw4.add_port(2), id="l3") nffg.add_link(sw3.add_port(3), sap1.add_port(1), id="l4") nffg.add_link(sw4.add_port(3), sap2.add_port(1), id="l5") # Duplicate one-way static links to become undirected in order to fit to # the orchestration algorithm # nffg.duplicate_static_links() return nffg
def StressTestCore(seed, loops, use_saps_once, vnf_sharing, multiple_scs, max_sc_count, vnf_sharing_same_sg, fullremap, batch_length, shareable_sg_count, sliding_share, poisson, topo_name, bw_factor, res_factor, lat_factor, bt_limit, bt_br_factor, outputfile, queue=None, shortest_paths_precalc=None, filehandler=None): """ If queue is given, the result will be put in that Queue object too. Meanwhile if shortest_paths_precalc is not given, it means the caller needs the shortest_paths, so we send it back. In this case the resulting test_lvl will be sent by the queue. NOTE: outputfile is only used inside the function if an exception is thrown and than it is logged there. """ total_vnf_count = 0 mapped_vnf_count = 0 network = None if topo_name == "picotopo": network = CarrierTopoBuilder.getPicoTopo() elif topo_name == "gwin": network = CarrierTopoBuilder.getSNDlib_dfn_gwin(save_to_file=True) max_test_lvl = 50000 test_lvl = 1 all_saps_ending = [s.id for s in network.saps] all_saps_beginning = [s.id for s in network.saps] running_nfs = OrderedDict() random.seed(0) random.jumpahead(seed) random.shuffle(all_saps_beginning) random.shuffle(all_saps_ending) shortest_paths = shortest_paths_precalc ppid_pid = "" # log.addHandler(logging.StreamHandler()) log.setLevel(logging.WARN) if filehandler is not None: log.addHandler(filehandler) if shortest_paths is not None and type(shortest_paths) != dict: excp = Exception( "StressTest received something else other than shortest_" "paths dictionary: %s" % type(shortest_paths)) if queue is not None: queue.put(excp) raise excp if queue is not None: ppid_pid = "%s.%s:" % (os.getppid(), os.getpid()) try: try: batch_count = 0 batched_request = NFFG(id="Benchmark-Req-" + str(test_lvl)) # built-in libs can change the state of random module during mapping. random_state = None while batched_request is not None: if test_lvl > max_test_lvl: break if (len(all_saps_ending) < batch_length or \ len(all_saps_beginning) < batch_length) and use_saps_once: log.warn( "Can't start batching because all SAPs should only be used" " once for SC origin and destination and there are not " "enough SAPs!") batched_request = None elif batch_count < batch_length or len( [nf for nf in request.nfs]) == 0: request, all_saps_beginning, all_saps_ending = \ generateRequestForCarrierTopo(test_lvl, all_saps_beginning, all_saps_ending, running_nfs, loops=loops, use_saps_once=use_saps_once, vnf_sharing_probabilty=vnf_sharing, vnf_sharing_same_sg=vnf_sharing_same_sg, multiSC=multiple_scs, max_sc_count=max_sc_count) if request is None: break else: batch_count += (random.expovariate(1.0) if poisson else 1) if poisson: log.debug( "Time passed since last batched mapping: %s" % batch_count) running_nfs[test_lvl] = [ nf for nf in request.nfs if nf.id.split("-")[1] == str(test_lvl) ] # using merge to create the union of the NFFG-s! batched_request = NFFGToolBox.merge_nffgs( batched_request, request) if len(running_nfs) > shareable_sg_count: # make the ordered dict function as FIFO running_nfs.popitem(last=False) test_lvl += 1 if not sliding_share and test_lvl % shareable_sg_count == 0: running_nfs = OrderedDict() log.debug("Batching Service Graph number %s..." % batch_count) else: batch_count = 0 total_vnf_count += len([nf for nf in batched_request.nfs]) random_state = random.getstate() network, shortest_paths = MappingAlgorithms.MAP( batched_request, network, full_remap=fullremap, enable_shortest_path_cache=True, bw_factor=bw_factor, res_factor=res_factor, lat_factor=lat_factor, shortest_paths=shortest_paths, return_dist=True, bt_limit=bt_limit, bt_branching_factor=bt_br_factor) log.debug(ppid_pid + "Mapping successful on test level %s with batch" " length %s!" % (test_lvl, batch_length)) random.setstate(random_state) mapped_vnf_count += len([nf for nf in batched_request.nfs]) batched_request = NFFG(id="Benchmark-Req-" + str(test_lvl)) except uet.MappingException as me: log.info(ppid_pid + "Mapping failed: %s" % me.msg) if not me.backtrack_possible: # NOTE: peak SC count is only corret to add to test_lvl if SC-s are # disjoint on VNFs. if poisson: log.warn( "Peak mapped VNF count is %s in the last run, test level: " "UNKNOWN because of Poisson" % me.peak_mapped_vnf_count) else: log.warn("Peak mapped VNF count is %s in the last run, test level: %s"% (me.peak_mapped_vnf_count, test_lvl - batch_length + \ (me.peak_sc_cnt if me.peak_sc_cnt is not None else 0))) mapped_vnf_count += me.peak_mapped_vnf_count log.warn( "All-time peak mapped VNF count: %s, All-time total VNF " "count %s, Acceptance ratio: %s" % (mapped_vnf_count, total_vnf_count, float(mapped_vnf_count) / total_vnf_count)) # break if request is None or batched_request is None: log.warn(ppid_pid + "Request generation reached its end!") # break except uet.UnifyException as ue: log.error(ppid_pid + ue.msg) log.error(ppid_pid + traceback.format_exc()) with open(outputfile, "a") as f: f.write("\n".join( ("UnifyException cought during StressTest: ", ue.msg, traceback.format_exc()))) if queue is not None: queue.put(str(ue.__class__)) return test_lvl - 1 except Exception as e: log.error(ppid_pid + traceback.format_exc()) with open(outputfile, "a") as f: f.write("\n".join(("Exception cought during StressTest: ", traceback.format_exc()))) if queue is not None: queue.put(str(e.__class__)) return test_lvl - 1 # put the result to the queue if queue is not None: log.info(ppid_pid + "Putting %s to communication queue" % (test_lvl - 1)) queue.put(test_lvl - 1) if shortest_paths_precalc is None: log.info(ppid_pid + "Returning shortest_paths!") return shortest_paths # if returned_test_lvl is 0, we failed at the very fist mapping! return test_lvl - 1
def getDecomps(self, nffg): """ Get all decompositions for a given nffg. : param nffg: the nffg for which the decompositions should be returned : type nffg: nffg : return: all the decompositions for the given nffg : rtype: dict """ decompositions = {} nodes_list = [] index = 0 for n in nffg.nfs: node = list(self.graph_db.find('NF', 'node_id', n.id)) if len(node) != 0: nodes_list.append(node[0]) else: log.debug("NF %s does not exist in the DB" % n.id) return None queue = deque([nodes_list]) queue_nffg = deque([nffg]) while len(queue) > 0: nodes = queue.popleft() nffg_init = queue_nffg.popleft() indicator = 0 for node in nodes: rels_DECOMPOSED = list( self.graph_db.match(start_node=node, rel_type='DECOMPOSED')) for rel in rels_DECOMPOSED: indicator = 1 nffg_temp = NFFG() graph, rels = self.getSingleDecomp( rel.end_node.properties['node_id']) for n in graph.nodes(): if graph.node[n]['properties']['label'] == 'NF': nffg_temp.add_nf( id=n, dep_type=graph.node[n]['properties']['type'], cpu=graph.node[n]['properties']['cpu'], mem=graph.node[n]['properties']['mem'], storage=graph.node[n]['properties']['storage']) elif graph.node[n]['properties']['label'] == 'SAP': nffg_temp.add_sap(id=n) counter = 0 for edge in graph.edges(): for nf in nffg_temp.nfs: if nf.id == edge[0]: node0 = nf if nf.id == edge[1]: node1 = nf for sap in nffg_temp.saps: if sap.id == edge[0]: node0 = sap if sap.id == edge[1]: node1 = sap # FIXME - czentye --> There is a chance node0, node1 variables # not defined yet until here and add_port will be raise an exception nffg_temp.add_sglink( node0.add_port(graph.edge[edge[0]][edge[1]] ['properties']['src_port']), node1.add_port(graph.edge[edge[0]][edge[1]] ['properties']['dst_port']), id='hop' + str(counter)) for n in nffg_init.nfs: nffg_temp.add_node(n) for n in nffg_init.saps: nffg_temp.add_node(n) for n in nffg_init.infras: nffg_temp.add_node(n) for l in nffg_init.links: nffg_temp.add_edge(l.src.node, l.dst.node, l) for l in nffg_init.sg_hops: nffg_temp.add_edge(l.src.node, l.dst.node, l) for l in nffg_init.reqs: nffg_temp.add_edge(l.src.node, l.dst.node, l) extra_nodes = [] for l in nffg_temp.sg_hops: if node.properties['node_id'] == l.src.node.id: src_port = l.src dst_port = l.dst for edge in graph.edges(): if graph.node[edge[1]]['properties'][ 'label'] == 'SAP': if str(src_port.id) == str( graph.edge[edge[0]][edge[1]] ['properties']['dst_port']): for e in nffg_temp.sg_hops: if e.src.node.id == edge[ 0] and e.dst.node.id == edge[ 1]: nffg_temp.add_sglink( e.src, dst_port) extra_nodes.append(edge[1]) if node.properties['node_id'] == l.dst.node.id: dst_port = l.dst src_port = l.src for edge in graph.edges(): if graph.node[edge[0]]['properties'][ 'label'] == 'SAP': if str(dst_port.id) == str( graph.edge[edge[0]][edge[1]] ['properties']['src_port']): for e in nffg_temp.sg_hops: if e.src.node.id == edge[ 0] and e.dst.node.id == edge[ 1]: nffg_temp.add_sglink( src_port, e.dst) extra_nodes.append(edge[0]) nffg_temp.del_node(node.properties['node_id']) for extra in extra_nodes: nffg_temp.del_node(extra) queue_nffg.append(nffg_temp) nodes_copy = list(nodes) new_nodes = map(lambda x: x.end_node, rels) nodes_copy.remove(node) queue.append(nodes_copy + new_nodes) if indicator == 1: break if indicator == 0: decompositions['D' + str(index)] = nffg_init index += 1 return decompositions
nffg.add_req(sap1.ports[0], sap2.ports[0], delay=50, bandwidth=10, sg_path=["sa", "ab", "bc", "cs"]) return nffg if __name__ == '__main__': try: # req = _constructExampleRequest() # net = _constructExampleNetwork() # req = _example_request_for_fallback() # print req.dump() # req = _onlySAPsRequest() # print net.dump() # req = _testRequestForBacktrack() # net = _testNetworkForBacktrack() with open('../examples/escape-mn-req.nffg', "r") as f: req = NFFG.parse(f.read()) with open('../examples/escape-mn-topo.nffg', "r") as g: net = NFFG.parse(g.read()) net.duplicate_static_links() mapped = MAP(req, net, full_remap=False) print mapped.dump() except uet.UnifyException as ue: print ue, ue.msg print traceback.format_exc()