def get_topo_desc (): # Create NFFG nffg = NFFG(id="STATIC-FALLBACK-TOPO", name="fallback-static") # Add switches sw1 = nffg.add_infra(id="sw1", name="SW1", domain="INTERNAL", infra_type=NFFG.TYPE_INFRA_SDN_SW) sw2 = nffg.add_infra(id="sw2", name="SW2", domain="INTERNAL", infra_type=NFFG.TYPE_INFRA_SDN_SW) sw3 = nffg.add_infra(id="sw3", name="SW3", domain="INTERNAL", infra_type=NFFG.TYPE_INFRA_SDN_SW) sw4 = nffg.add_infra(id="sw4", name="SW4", domain="INTERNAL", infra_type=NFFG.TYPE_INFRA_SDN_SW) # Add SAPs sap1 = nffg.add_sap(id="sap1", name="SAP1") sap2 = nffg.add_sap(id="sap2", name="SAP2") # Add links nffg.add_link(sw1.add_port(1), sw3.add_port(1), id="l1") nffg.add_link(sw2.add_port(1), sw4.add_port(1), id="l2") nffg.add_link(sw3.add_port(2), sw4.add_port(2), id="l3") nffg.add_link(sw3.add_port(3), sap1.add_port(1), id="l4") nffg.add_link(sw4.add_port(3), sap2.add_port(1), id="l5") # Duplicate one-way static links to become undirected in order to fit to # the orchestration algorithm # nffg.duplicate_static_links() return nffg
def getPicoTopo(): """ Not carrier style topo. Few nodes with big resources. """ random.seed(0) nffg = NFFG(id="SmallExampleTopo") switch = {'cpu': 0, 'mem': 0, 'storage': 0, 'delay': 0.5, 'bandwidth': 1000, 'infra_type': NFFG.TYPE_INFRA_SDN_SW} sw = nffg.add_infra(id = getName("sw"), **switch) infra = {'cpu': 400, 'mem': 320000, 'storage': 1500, 'delay': 1.0, 'bandwidth': 10000, 'infra_type': NFFG.TYPE_INFRA_EE} linkres = {'bandwidth': 1000, 'delay': 0.5} inf1 = nffg.add_infra(id = getName("infra"), **infra) inf0 = inf1 inf1.add_supported_type(list(string.ascii_uppercase)[:10]) for i in range(0,4): if i == 3: inf2 = inf0 else: inf2 = nffg.add_infra(id = getName("infra"), **infra) inf2.add_supported_type(list(string.ascii_uppercase)[:10]) nameid = getName("sap") sap = nffg.add_sap(id = nameid, name = nameid) # add links nffg.add_undirected_link(sw.add_port(), inf2.add_port(), **linkres) nffg.add_undirected_link(inf1.add_port(), inf2.add_port(), **linkres) nffg.add_undirected_link(inf2.add_port(), sap.add_port(), **linkres) inf1 = inf2 return nffg
def get_topo_desc (): # Create NFFG nffg = NFFG(id="DYNAMIC-FALLBACK-TOPO", name="fallback-dynamic") # Add NETCONF capable containers a.k.a. Execution Environments nc1 = nffg.add_infra(id="nc1", name="NC1", domain="INTERNAL", infra_type=NFFG.TYPE_INFRA_EE, cpu=5, mem=5, storage=5, delay=0.9, bandwidth=5000) nc2 = nffg.add_infra(id="nc2", name="NC2", domain="INTERNAL", infra_type=NFFG.TYPE_INFRA_EE, cpu=5, mem=5, storage=5, delay=0.9, bandwidth=5000) nc1.add_supported_type(['A', 'B']) nc2.add_supported_type(['A', 'C']) # Add inter-EE switches sw3 = nffg.add_infra(id="sw3", name="SW3", domain="INTERNAL", infra_type=NFFG.TYPE_INFRA_SDN_SW, delay=0.2, bandwidth=10000) sw4 = nffg.add_infra(id="sw4", name="SW4", domain="INTERNAL", infra_type=NFFG.TYPE_INFRA_SDN_SW, delay=0.2, bandwidth=10000) # Add SAPs sap1 = nffg.add_sap(id="sap1", name="SAP1") sap2 = nffg.add_sap(id="sap2", name="SAP2") # Add links linkres = {'delay': 1.5, 'bandwidth': 2000} nffg.add_link(nc1.add_port(1), sw3.add_port(1), id="l1", **linkres) nffg.add_link(nc2.add_port(1), sw4.add_port(1), id="l2", **linkres) nffg.add_link(sw3.add_port(2), sw4.add_port(2), id="l3", **linkres) nffg.add_link(sw3.add_port(3), sap1.add_port(1), id="l4", **linkres) nffg.add_link(sw4.add_port(3), sap2.add_port(1), id="l5", **linkres) # Duplicate one-way static links to become undirected in order to fit to # the orchestration algorithm # No need for that, ESCAPENetworkBridge do this later # nffg.duplicate_static_links() return nffg
def getCarrierTopo(params, increment_port_ids=False): """ Construct the core network and add PoPs with their parameters. params is a list of dictionaries with PoP data: 'Retail': (BNAS, RCpb, RCT) 'Business': (PE, BCpb, BCT) 'CloudNFV': (CL,CH,SE,SAN_bw,SAN_sto,NF_types,SE_cores,SE_mem,SE_sto, CL_bw, CH_links) WARNING: using this function with increment_port_ids=True this function is not thread safe, because it uses global variable then! """ # This initializes the random generator always to the same value, so the # returned index sequence, and thus the network parameters will be generated # always the same (we want a fixed network environment) # The generated identifiers are still different between genereations, but # those does not influence the mapping process random.seed(0) popcnt = 0 nffg = NFFG(id="CarrierTopo") p = increment_port_ids backbone_res = {'cpu': 0, 'mem': 0, 'storage': 0, 'delay': 0.5, 'bandwidth': 1000, 'infra_type': NFFG.TYPE_INFRA_SDN_SW} bn0 = nffg.add_infra(id=getName("bn"), **backbone_res) bn1 = nffg.add_infra(id=getName("bn"), **backbone_res) bn2 = nffg.add_infra(id=getName("bn"), **backbone_res) bn3 = nffg.add_infra(id=getName("bn"), **backbone_res) nffg.add_undirected_link(add_port(bn0, p), add_port(bn1, p), bandwidth=1000, delay=10) nffg.add_undirected_link(add_port(bn1, p), add_port(bn2, p), bandwidth=1000, delay=10) nffg.add_undirected_link(add_port(bn2, p), add_port(bn3, p), bandwidth=1000, delay=10) nffg.add_undirected_link(add_port(bn3, p), add_port(bn0, p), bandwidth=1000, delay=10) backbones = (bn0, bn1, bn2, bn3) bnlen = len(backbones) for popdata in params: tmp = [] tmp.extend(popdata['Retail']) tmp.extend(popdata['Business']) tmp.extend(popdata['CloudNFV']) addPoP(nffg, popcnt, backbones[popcnt%bnlen], backbones[(popcnt+1)%bnlen], p, *tmp) popcnt += 1 """ # BNAS,RCpb, RCT, PE,BCpb, BCT, CL,CH,SE, SAN_bw, addPoP(nffg, bn2, bn3, 2, 10000, 0.2, 2, 4000, 0.2, 2, 8, 8, 160000, # SAN_sto,NF_types, SE_cores, SE_mem, SE_sto, CL_bw, CH_links 100000, ['A','B'], [8,12,16], [32000,64000], [150], 40000, 4) # BNAS, RCpb, RCT, PE,BCpb, BCT, CL,CH, SE, SAN_bw, addPoP(nffg, bn1, bn2, 10, 40000, 0.2, 8, 4000, 0.2, 4, 40, 8, 160000, # SAN_sto,NF_types, SE_cores, SE_mem, SE_sto, 100000, ['A','B','C','D','E'],[8,12,16], [32000,64000], [150,200], # CL_bw, CH_links 80000, 8) """ log.debug("Carrier topology construction finished!") return nffg
def _example_request_for_fallback(): nffg = NFFG(id="FALLBACK-REQ", name="fallback-req") sap1 = nffg.add_sap(name="SAP1", id="sap1") sap2 = nffg.add_sap(name="SAP2", id="sap2") # add NF requirements. nf0 = nffg.add_nf(id="NF0", name="NetFunc0", func_type='B', cpu=2, mem=2, storage=2, bandwidth=100) nf1 = nffg.add_nf(id="NF1", name="NetFunc1", func_type='A', cpu=1.5, mem=1.5, storage=1.5, delay=50) nf2 = nffg.add_nf(id="NF2", name="NetFunc2", func_type='C', cpu=3, mem=3, storage=3, bandwidth=500) nf3 = nffg.add_nf(id="NF3", name="NetFunc3", func_type='A', cpu=2, mem=2, storage=2, bandwidth=100, delay=50) # add SG hops nffg.add_sglink(sap1.add_port(0), nf0.add_port(0), id="s1n0") nffg.add_sglink(nf0.add_port(1), nf1.add_port(0), id="n0n1") nffg.add_sglink(nf1.add_port(1), nf2.add_port(0), id="n1n2") nffg.add_sglink(nf1.add_port(2), nf3.add_port(0), id="n1n3") nffg.add_sglink(nf2.add_port(1), sap2.add_port(0), id="n2s2") nffg.add_sglink(nf3.add_port(1), sap2.add_port(1), id="n3s2") # add EdgeReqs # port number on SAP2 doesn`t count nffg.add_req(sap1.ports[0], sap2.ports[1], bandwidth=1000, delay=24) nffg.add_req(nf0.ports[1], nf1.ports[0], bandwidth=200) nffg.add_req(nf0.ports[1], nf1.ports[0], delay=3) # set placement criteria. Should be used to enforce the placement decision of # the upper orchestration layer. Placement criteria can contain multiple # InfraNode id-s, if the BiS-BiS is decomposed to multiple InfraNodes in this # layer. # setattr(nf1, 'placement_criteria', ['nc2']) return nffg
def getSNDlib_dfn_gwin(save_to_file = False): """ Topology taken from SNDlib, dfn-gwin. """ random.seed(0) gwin = nx.read_gml("dfn-gwin.gml") nffg = NFFG(id="dfn-gwin") nf_types = list(string.ascii_uppercase)[:10] switch = {'cpu': 0, 'mem': 0, 'storage': 0, 'delay': 0.5, 'bandwidth': 40000, 'infra_type': NFFG.TYPE_INFRA_SDN_SW} infrares = {'cpu': 400, 'mem': 320000, 'storage': 1500, 'delay': 1.0, 'bandwidth': 40000, 'infra_type': NFFG.TYPE_INFRA_EE} corelinkres = {'bandwidth': 10000, 'delay': 1.0} aggrlinkres = {'bandwidth': 1000, 'delay': 5.0} acclinkres = {'bandwidth': 100, 'delay': 1.0} gwinnodes = [] for n in gwin.nodes_iter(): gwinnodes.append(n.rstrip('.')) # get topology from dfn-gwin for n in gwinnodes: nffg.add_infra(id=n, **switch) for i,j in gwin.edges_iter(): nffg.add_undirected_link(nffg.network.node[i.rstrip('.')].add_port(), nffg.network.node[j.rstrip('.')].add_port(), **corelinkres) nodeset1 = random.sample(gwinnodes, 3) nodeset1.extend(random.sample(gwinnodes, 3)) # add cloud nodes to 6 random nodes. for n in nodeset1: infra = nffg.add_infra(id=getName(n+"Host"), **infrares) infra.add_supported_type(random.sample(nf_types, 6)) nffg.add_undirected_link(nffg.network.node[n].add_port(), infra.add_port(), **corelinkres) nodeset2 = random.sample(gwinnodes, 3) nodeset2.extend(random.sample(gwinnodes, 3)) # add access switched to 6 random nodes for n in nodeset2: sw = nffg.add_infra(id=getName(n+"Sw"), **switch) nffg.add_undirected_link(nffg.network.node[n].add_port(), sw.add_port(), **aggrlinkres) for i in xrange(0,random.randint(3,4)): nameid = getName(n+"SAP") sap = nffg.add_sap(id=nameid, name=nameid) nffg.add_undirected_link(sap.add_port(), sw.add_port(), **acclinkres) # save it to file if save_to_file: augmented_gwin = nx.MultiDiGraph() augmented_gwin.add_nodes_from(nffg.network.nodes_iter()) augmented_gwin.add_edges_from(nffg.network.edges_iter()) nx.write_gml(augmented_gwin, "augmented-dfn-gwin.gml") return nffg
def _onlySAPsRequest(): nffg = NFFG(id="BME-req-001") sap1 = nffg.add_sap(name="SAP1", id="sap1") sap2 = nffg.add_sap(name="SAP2", id="sap2") nffg.add_sglink(sap1.add_port(0), sap2.add_port(0)) # nffg.add_sglink(sap1.add_port(1), sap2.add_port(1)) nffg.add_req(sap1.ports[0], sap2.ports[0], bandwidth=1000, delay=24) nffg.add_req(sap1.ports[0], sap2.ports[0], bandwidth=1000, delay=24) return nffg
def __init__ (self, standalone=False, **kwargs): """ .. seealso:: :func:`AbstractAPI.__init__() <escape.util.api.AbstractAPI.__init__>` """ log.info("Starting Service Layer...") # Mandatory super() call self.last_sg = NFFG(id=0, name='empty') # Set element manager self.__sid = None self.elementManager = None self.service_orchestrator = None self.gui_proc = None super(ServiceLayerAPI, self).__init__(standalone, **kwargs)
def _testNetworkForBacktrack(): nffg = NFFG(id="backtracktest", name="backtrack") sap1 = nffg.add_sap(name="SAP1", id="sap1") sap2 = nffg.add_sap(name="SAP2", id="sap2") uniformnoderes = { 'cpu': 5, 'mem': 5, 'storage': 5, 'delay': 0.4, 'bandwidth': 5500 } infra0 = nffg.add_infra(id="node0", name="INFRA0", **uniformnoderes) uniformnoderes2 = { 'cpu': 9, 'mem': 9, 'storage': 9, 'delay': 0.4, 'bandwidth': 5500 } infra1 = nffg.add_infra(id="node1", name="INFRA1", **uniformnoderes2) swres = { 'cpu': 0, 'mem': 0, 'storage': 0, 'delay': 0.0, 'bandwidth': 10000 } sw = nffg.add_infra(id="sw", name="sw1", **swres) infra0.add_supported_type(['A']) infra1.add_supported_type(['A']) unilinkres = {'delay': 0.0, 'bandwidth': 2000} nffg.add_undirected_link(sap1.add_port(0), infra0.add_port(0), **unilinkres) nffg.add_undirected_link(sap2.add_port(0), infra1.add_port(0), **unilinkres) rightlink = {'delay': 10.0, 'bandwidth': 2000} leftlink = {'delay': 0.01, 'bandwidth': 5000} nffg.add_link(infra0.add_port(1), sw.add_port(0), id="n0sw", **rightlink) nffg.add_link(sw.add_port(1), infra1.add_port(1), id="swn1", **rightlink) nffg.add_link(sw.ports[0], infra0.ports[1], id="swn0", **leftlink) nffg.add_link(infra1.ports[1], sw.ports[1], id="n1sw", **leftlink) return nffg
def __init__(self, standalone=False, **kwargs): """ .. seealso:: :func:`AbstractAPI.__init__() <escape.util.api.AbstractAPI.__init__>` """ log.info("Starting Service Layer...") # Mandatory super() call self.last_sg = NFFG(id=0, name='empty') # Set element manager self.__sid = None self.elementManager = None self.service_orchestrator = None """:type ServiceOrchestrator""" self.gui_proc = None self.api_mgr = RESTAPIManager( unique_bb_id=False, unique_nf_id=CONFIG.ensure_unique_vnf_id(), logger=log) super(ServiceLayerAPI, self).__init__(standalone, **kwargs)
def __init__(self, mgr, global_res=None): """ Init. :param mgr: global domain resource manager :type mgr: :any:`GlobalResourceManager` :param global_res: initial global resource (optional) :type global_res: :any:`NFFG` :return: None """ super(DomainVirtualizer, self).__init__(type=self.DOMAIN_VIRTUALIZER) log.debug( "Init DomainVirtualizer with name: %s - initial resource: %s" % (DoV, global_res)) # Garbage-collector safe self._mgr = weakref.proxy(mgr) # Define DoV az an empty NFFG by default self.__global_nffg = NFFG(id=DoV, name=DoV + "-uninitialized") if global_res is not None: self.set_domain_as_global_view(domain=NFFG.DEFAULT_DOMAIN, nffg=global_res)
def _testRequestForBacktrack(): nffg = NFFG(id="backtracktest-req", name="btreq") sap1 = nffg.add_sap(name="SAP1", id="sap1req") sap2 = nffg.add_sap(name="SAP2", id="sap2req") a = nffg.add_nf(id="a", name="NetFunc0", func_type='A', cpu=3, mem=3, storage=3) b = nffg.add_nf(id="b", name="NetFunc1", func_type='A', cpu=3, mem=3, storage=3) c = nffg.add_nf(id="c", name="NetFunc2", func_type='A', cpu=3, mem=3, storage=3) nffg.add_sglink(sap1.add_port(0), a.add_port(0), id="sa") nffg.add_sglink(a.add_port(1), b.add_port(0), id="ab") nffg.add_sglink(b.add_port(1), c.add_port(0), id="bc") nffg.add_sglink(c.add_port(1), sap2.add_port(0), id="cs") nffg.add_req(a.ports[0], b.ports[1], delay=1.0, sg_path=["ab"]) nffg.add_req(b.ports[0], c.ports[1], delay=1.0, sg_path=["bc"]) nffg.add_req(c.ports[0], sap2.ports[0], delay=1.0, sg_path=["cs"]) nffg.add_req(sap1.ports[0], sap2.ports[0], delay=50, bandwidth=10, sg_path=["sa", "ab", "bc", "cs"]) return nffg
def _constructExampleRequest(): nffg = NFFG(id="BME-req-001") sap0 = nffg.add_sap(name="SAP0", id="sap0") sap1 = nffg.add_sap(name="SAP1", id="sap1") # add NF requirements. # Note: storage is used now for the first time, it comes in with the # NodeResource class # Note: internal latency is only forwarded to lower layer # Note: internal bw is untested yet, even before the NFFG support nf0 = nffg.add_nf(id="NF0", name="NetFunc0", func_type='A', cpu=2, mem=2, storage=2, bandwidth=100) nf1 = nffg.add_nf(id="NF1", name="NetFunc1", func_type='B', cpu=1.5, mem=1.5, storage=1.5, delay=50) nf2 = nffg.add_nf(id="NF2", name="NetFunc2", func_type='C', cpu=3, mem=3, storage=3, bandwidth=500) nf3 = nffg.add_nf(id="NF3", name="NetFunc3", func_type='A', cpu=2, mem=2, storage=2, bandwidth=100, delay=50) nf4 = nffg.add_nf(id="NF4", name="NetFunc4", func_type='C', cpu=0, mem=0, storage=0, bandwidth=500) # directed SG links # flowclass default: None, meaning: match all traffic # some agreement on flowclass format is required. nffg.add_sglink(sap0.add_port(0), nf0.add_port(0)) nffg.add_sglink(nf0.add_port(1), nf1.add_port(0), flowclass="HTTP") nffg.add_sglink(nf1.add_port(1), nf2.add_port(0), flowclass="HTTP") nffg.add_sglink(nf2.add_port(1), sap1.add_port(1)) nffg.add_sglink(nf0.add_port(2), nf3.add_port(0), flowclass="non-HTTP") nffg.add_sglink(nf3.add_port(1), nf2.add_port(2), flowclass="non-HTTP") nffg.add_sglink(nf1.add_port(2), nf4.add_port(0), flowclass="index.com") nffg.add_sglink(nf4.add_port(1), nf2.add_port(3), flowclass="index.com") # add EdgeReqs nffg.add_req(sap0.ports[0], sap1.ports[1], delay=40, bandwidth=1500) nffg.add_req(nf1.ports[1], nf2.ports[0], delay=3.5) nffg.add_req(nf3.ports[1], nf2.ports[2], bandwidth=500) nffg.add_req(sap0.ports[0], nf0.ports[0], delay=3.0) # force collocation of NF0 and NF3 # nffg.add_req(nf0.ports[2], nf3.ports[0], delay=1.0) # not SAP-to-SAP requests are not taken into account yet, these are ignored nffg.add_req(nf0.ports[1], nf2.ports[0], delay=1.0) # test Infra node removal from the request NFFG infra1 = nffg.add_infra(id="BiS-BiS1") infra2 = nffg.add_infra(id="BiS-BiS2") nffg.add_undirected_link(infra1.add_port(0), nf0.add_port(3), dynamic=True) nffg.add_undirected_link(infra1.add_port(1), nf0.add_port(4), dynamic=True) nffg.add_undirected_link(infra1.add_port(2), nf1.add_port(3), dynamic=True) nffg.add_undirected_link(infra2.add_port(0), nf2.add_port(4), dynamic=True) nffg.add_undirected_link(infra2.add_port(1), nf3.add_port(2), dynamic=True) nffg.add_undirected_link(infra1.add_port(3), infra2.add_port(2), bandwidth=31241242) return nffg
def generateRequestForCarrierTopo(all_saps_ending, all_saps_beginning, avg_shp_len, nf_types, max_e2e_lat_multiplier=20, min_e2e_lat_multiplier=1.1, loops=False, use_saps_once=True, vnf_sharing_probabilty=0.0, multiSC=False, max_sc_count=2, chain_maxlen=8, max_cpu=4, max_mem=1600, max_storage=3, max_bw=7): """ By default generates VNF-disjoint SC-s starting/ending only once in each SAP. With the 'loops' option, only loop SC-s are generated. 'vnf_sharing_probabilty' determines the ratio of #(VNF-s used by at least two SC-s)/#(not shared VNF-s). """ sc_count = 1 gen = NameGenerator() # maximal possible bandwidth for chains if multiSC: sc_count = rnd.randint(2, max_sc_count) while len(all_saps_ending) > sc_count and len( all_saps_beginning) > sc_count: nffg = NFFG(id="E2e_req_test_nffg") nffg.mode = NFFG.MODE_ADD # newly added NF-s of one request current_nfs = [] for scid in xrange(0, sc_count): # find two SAP-s for chain ends. nfs_this_sc = [] sapid = all_saps_beginning.pop() if use_saps_once else \ rnd.choice(all_saps_beginning) if sapid not in nffg: sap1 = nffg.add_sap(id=sapid) else: sap1 = nffg.network.node[sapid] sap2 = None if loops: sap2 = sap1 else: tmpid = all_saps_ending.pop() if use_saps_once else \ rnd.choice(all_saps_ending) while True: if tmpid != sap1.id: if tmpid not in nffg: sap2 = nffg.add_sap(id=tmpid) else: sap2 = nffg.network.node[tmpid] break else: tmpid = all_saps_ending.pop() if use_saps_once else \ rnd.choice(all_saps_ending) sg_path = [] if len(sap1.ports) > 0: for sap1port in sap1.ports: break else: sap1port = sap1.add_port(id=gen.get_name("port")) last_req_port = sap1port # generate some VNF-s connecting the two SAP-s vnf_cnt = next(gen_seq()) % chain_maxlen + 1 for vnf in xrange(0, vnf_cnt): # in the first case p is used to determine which previous chain should # be used to share the VNF, in the latter case it is used to determine # whether we should share now. p = rnd.random() if multiSC and \ p < vnf_sharing_probabilty and len(current_nfs) > 0: # this influences the the given VNF sharing probability... if reduce(lambda a, b: a and b, [v in nfs_this_sc for v in current_nfs]): log.warn( "All shareable VNF-s are already added to this chain! " "Skipping VNF sharing...") continue else: nf = rnd.choice(current_nfs) while nf in nfs_this_sc: nf = rnd.choice(current_nfs) # the VNF is already in the subchain, we just need to add the # links # vnf_added = True else: nf = nffg.add_nf(id="-".join( ("SC", str(scid), "VNF", str(vnf))), func_type=rnd.choice(nf_types), cpu=rnd.random() * max_cpu, mem=rnd.random() * max_mem, storage=rnd.random() * max_storage) nfs_this_sc.append(nf) newport = nf.add_port(id=gen.get_name("port")) sglink = nffg.add_sglink(last_req_port, newport, id=gen.get_name("link")) sg_path.append(sglink.id) last_req_port = nf.add_port(id=gen.get_name("port")) if len(sap2.ports) > 0: for sap2port in sap2.ports: break else: sap2port = sap2.add_port(id=gen.get_name("port")) sglink = nffg.add_sglink(last_req_port, sap2port, id=gen.get_name("link")) sg_path.append(sglink.id) # WARNING: this is completly a wild guess! Failing due to this doesn't # necessarily mean algorithm failure # Bandwidth maximal random value should be min(SAP1acces_bw, # SAP2access_bw) # MAYBE: each SAP can only be once in the reqgraph? - this is the case # now. minlat = avg_shp_len * min_e2e_lat_multiplier maxlat = avg_shp_len * max_e2e_lat_multiplier nffg.add_req(sap1port, sap2port, delay=rnd.uniform(minlat, maxlat), bandwidth=rnd.random() * max_bw, sg_path=sg_path, id=gen.get_name("req")) # log.debug( # "Service Chain on NF-s added: %s" % [nf.id for nf in nfs_this_sc]) # this prevents loops in the chains and makes new and old NF-s equally # preferable in total for NF sharing new_nfs = [vnf for vnf in nfs_this_sc if vnf not in current_nfs] for tmp in xrange(0, scid + 1): current_nfs.extend(new_nfs) if not multiSC: return nffg if multiSC: return nffg return None
def StressTestCore(seed, loops, use_saps_once, vnf_sharing, multiple_scs, max_sc_count, vnf_sharing_same_sg, fullremap, batch_length, shareable_sg_count, sliding_share, poisson, topo_name, bw_factor, res_factor, lat_factor, bt_limit, bt_br_factor, outputfile, queue=None, shortest_paths_precalc=None, filehandler=None): """ If queue is given, the result will be put in that Queue object too. Meanwhile if shortest_paths_precalc is not given, it means the caller needs the shortest_paths, so we send it back. In this case the resulting test_lvl will be sent by the queue. NOTE: outputfile is only used inside the function if an exception is thrown and than it is logged there. """ total_vnf_count = 0 mapped_vnf_count = 0 network = None if topo_name == "picotopo": network = CarrierTopoBuilder.getPicoTopo() elif topo_name == "gwin": network = CarrierTopoBuilder.getSNDlib_dfn_gwin(save_to_file=True) max_test_lvl = 50000 test_lvl = 1 all_saps_ending = [s.id for s in network.saps] all_saps_beginning = [s.id for s in network.saps] running_nfs = OrderedDict() random.seed(0) random.jumpahead(seed) random.shuffle(all_saps_beginning) random.shuffle(all_saps_ending) shortest_paths = shortest_paths_precalc ppid_pid = "" # log.addHandler(logging.StreamHandler()) log.setLevel(logging.WARN) if filehandler is not None: log.addHandler(filehandler) if shortest_paths is not None and type(shortest_paths) != dict: excp = Exception( "StressTest received something else other than shortest_" "paths dictionary: %s" % type(shortest_paths)) if queue is not None: queue.put(excp) raise excp if queue is not None: ppid_pid = "%s.%s:" % (os.getppid(), os.getpid()) try: try: batch_count = 0 batched_request = NFFG(id="Benchmark-Req-" + str(test_lvl)) # built-in libs can change the state of random module during mapping. random_state = None while batched_request is not None: if test_lvl > max_test_lvl: break if (len(all_saps_ending) < batch_length or \ len(all_saps_beginning) < batch_length) and use_saps_once: log.warn( "Can't start batching because all SAPs should only be used" " once for SC origin and destination and there are not " "enough SAPs!") batched_request = None elif batch_count < batch_length or len( [nf for nf in request.nfs]) == 0: request, all_saps_beginning, all_saps_ending = \ generateRequestForCarrierTopo(test_lvl, all_saps_beginning, all_saps_ending, running_nfs, loops=loops, use_saps_once=use_saps_once, vnf_sharing_probabilty=vnf_sharing, vnf_sharing_same_sg=vnf_sharing_same_sg, multiSC=multiple_scs, max_sc_count=max_sc_count) if request is None: break else: batch_count += (random.expovariate(1.0) if poisson else 1) if poisson: log.debug( "Time passed since last batched mapping: %s" % batch_count) running_nfs[test_lvl] = [ nf for nf in request.nfs if nf.id.split("-")[1] == str(test_lvl) ] # using merge to create the union of the NFFG-s! batched_request = NFFGToolBox.merge_nffgs( batched_request, request) if len(running_nfs) > shareable_sg_count: # make the ordered dict function as FIFO running_nfs.popitem(last=False) test_lvl += 1 if not sliding_share and test_lvl % shareable_sg_count == 0: running_nfs = OrderedDict() log.debug("Batching Service Graph number %s..." % batch_count) else: batch_count = 0 total_vnf_count += len([nf for nf in batched_request.nfs]) random_state = random.getstate() network, shortest_paths = MappingAlgorithms.MAP( batched_request, network, full_remap=fullremap, enable_shortest_path_cache=True, bw_factor=bw_factor, res_factor=res_factor, lat_factor=lat_factor, shortest_paths=shortest_paths, return_dist=True, bt_limit=bt_limit, bt_branching_factor=bt_br_factor) log.debug(ppid_pid + "Mapping successful on test level %s with batch" " length %s!" % (test_lvl, batch_length)) random.setstate(random_state) mapped_vnf_count += len([nf for nf in batched_request.nfs]) batched_request = NFFG(id="Benchmark-Req-" + str(test_lvl)) except uet.MappingException as me: log.info(ppid_pid + "Mapping failed: %s" % me.msg) if not me.backtrack_possible: # NOTE: peak SC count is only corret to add to test_lvl if SC-s are # disjoint on VNFs. if poisson: log.warn( "Peak mapped VNF count is %s in the last run, test level: " "UNKNOWN because of Poisson" % me.peak_mapped_vnf_count) else: log.warn("Peak mapped VNF count is %s in the last run, test level: %s"% (me.peak_mapped_vnf_count, test_lvl - batch_length + \ (me.peak_sc_cnt if me.peak_sc_cnt is not None else 0))) mapped_vnf_count += me.peak_mapped_vnf_count log.warn( "All-time peak mapped VNF count: %s, All-time total VNF " "count %s, Acceptance ratio: %s" % (mapped_vnf_count, total_vnf_count, float(mapped_vnf_count) / total_vnf_count)) # break if request is None or batched_request is None: log.warn(ppid_pid + "Request generation reached its end!") # break except uet.UnifyException as ue: log.error(ppid_pid + ue.msg) log.error(ppid_pid + traceback.format_exc()) with open(outputfile, "a") as f: f.write("\n".join( ("UnifyException cought during StressTest: ", ue.msg, traceback.format_exc()))) if queue is not None: queue.put(str(ue.__class__)) return test_lvl - 1 except Exception as e: log.error(ppid_pid + traceback.format_exc()) with open(outputfile, "a") as f: f.write("\n".join(("Exception cought during StressTest: ", traceback.format_exc()))) if queue is not None: queue.put(str(e.__class__)) return test_lvl - 1 # put the result to the queue if queue is not None: log.info(ppid_pid + "Putting %s to communication queue" % (test_lvl - 1)) queue.put(test_lvl - 1) if shortest_paths_precalc is None: log.info(ppid_pid + "Returning shortest_paths!") return shortest_paths # if returned_test_lvl is 0, we failed at the very fist mapping! return test_lvl - 1
def generateRequestForCarrierTopo(test_lvl, all_saps_beginning, all_saps_ending, running_nfs, loops=False, use_saps_once=True, vnf_sharing_probabilty=0.0, vnf_sharing_same_sg=0.0, shareable_sg_count=9999999999999999, multiSC=False, max_sc_count=2): """ By default generates VNF-disjoint SC-s starting/ending only once in each SAP. With the 'loops' option, only loop SC-s are generated. 'vnf_sharing_probabilty' determines the ratio of #(VNF-s used by at least two SC-s)/#(not shared VNF-s). NOTE: some kind of periodicity is included to make the effect of batching visible. But it is (and must be) independent of the batch_length. WARNING!! batch_length meaining is changed if --poisson is set! Generate exponential arrival time for VNF-s to make Batching more reasonable. inter arrival time is Exp(1) so if we are batching for 4 time units, the expected SG count is 4, because the sum of 4 Exp(1) is Exp(4). BUT we wait for 1 SG at least, but if by that time 4 units has already passed, map the SG alone (unbatched). """ chain_maxlen = 8 sc_count = 1 # maximal possible bandwidth for chains max_bw = 7.0 if multiSC: sc_count = random.randint(2, max_sc_count) while len(all_saps_ending) > sc_count and len( all_saps_beginning) > sc_count: nffg = NFFG(id="Benchmark-Req-" + str(test_lvl) + "-Piece") # newly added NF-s of one request current_nfs = [] for scid in xrange(0, sc_count): # find two SAP-s for chain ends. nfs_this_sc = [] sap1 = nffg.add_sap(id = all_saps_beginning.pop() if use_saps_once else \ random.choice(all_saps_beginning)) sap2 = None if loops: sap2 = sap1 else: tmpid = all_saps_ending.pop() if use_saps_once else \ random.choice(all_saps_ending) while True: if tmpid != sap1.id: sap2 = nffg.add_sap(id=tmpid) break else: tmpid = all_saps_ending.pop() if use_saps_once else \ random.choice(all_saps_ending) sg_path = [] sap1port = sap1.add_port() last_req_port = sap1port # generate some VNF-s connecting the two SAP-s vnf_cnt = next(gen_seq()) % chain_maxlen + 1 for vnf in xrange(0, vnf_cnt): # in the first case p is used to determine which previous chain should # be used to share the VNF, in the latter case it is used to determine # whether we should share now. vnf_added = False p = random.random() if random.random() < vnf_sharing_probabilty and len(running_nfs) > 0 \ and not multiSC: vnf_added, nf = _shareVNFFromEarlierSG( nffg, running_nfs, nfs_this_sc, p) elif multiSC and \ p < vnf_sharing_probabilty and len(current_nfs) > 0 \ and len(running_nfs) > 0: # this influences the the given VNF sharing probability... if reduce(lambda a, b: a and b, [v in nfs_this_sc for v in current_nfs]): log.warn( "All shareable VNF-s are already added to this chain! " "Skipping VNF sharing...") elif random.random() < vnf_sharing_same_sg: nf = random.choice(current_nfs) while nf in nfs_this_sc: nf = random.choice(current_nfs) # the VNF is already in the subchain, we just need to add the links # vnf_added = True else: # this happens when VNF sharing is needed but not with the actual SG vnf_added, nf = _shareVNFFromEarlierSG( nffg, running_nfs, nfs_this_sc, p) else: nf = nffg.add_nf(id="-".join(("Test",str(test_lvl),"SC",str(scid), "VNF",str(vnf))), func_type=random.choice(['A','B','C']), cpu=random.randint(1 + (2 if test_lvl%4 == 3 else 0), 4 + (6 if test_lvl%4 == 3 else 0)), mem=random.random()*1000 + \ (1000 if test_lvl%4 > 1 else 0), storage=random.random()*3 + \ (6 if test_lvl%4 > 1 else 0), delay=1 + random.random()*10, bandwidth=random.random()) vnf_added = True if vnf_added: # add olny the newly added VNF-s, not the shared ones. nfs_this_sc.append(nf) newport = nf.add_port() sglink = nffg.add_sglink(last_req_port, newport) sg_path.append(sglink.id) last_req_port = nf.add_port() sap2port = sap2.add_port() sglink = nffg.add_sglink(last_req_port, sap2port) sg_path.append(sglink.id) # WARNING: this is completly a wild guess! Failing due to this doesn't # necessarily mean algorithm failure # Bandwidth maximal random value should be min(SAP1acces_bw, SAP2access_bw) # MAYBE: each SAP can only be once in the reqgraph? - this is the case now. if multiSC: minlat = 5.0 * (len(nfs_this_sc) + 2) maxlat = 13.0 * (len(nfs_this_sc) + 2) else: # nfcnt = len([i for i in nffg.nfs]) minlat = 45.0 - 10.0 * (test_lvl % 4) maxlat = 60.0 - 12.25 * (test_lvl % 4) nffg.add_req(sap1port, sap2port, delay=random.uniform(minlat, maxlat), bandwidth=random.random() * (max_bw + test_lvl % 4), sg_path=sg_path) log.info("Service Chain on NF-s added: %s" % [nf.id for nf in nfs_this_sc]) # this prevents loops in the chains and makes new and old NF-s equally # preferable in total for NF sharing new_nfs = [vnf for vnf in nfs_this_sc if vnf not in current_nfs] for tmp in xrange(0, scid + 1): current_nfs.extend(new_nfs) if not multiSC: return nffg, all_saps_beginning, all_saps_ending if multiSC: return nffg, all_saps_beginning, all_saps_ending return None, all_saps_beginning, all_saps_ending
def _constructExampleNetwork(): nffg = NFFG(id="BME-net-001") uniformnoderes = { 'cpu': 5, 'mem': 5, 'storage': 5, 'delay': 0.9, 'bandwidth': 5500 } infra0 = nffg.add_infra(id="node0", name="INFRA0", **uniformnoderes) uniformnoderes['cpu'] = None infra1 = nffg.add_infra(id="node1", name="INFRA1", **uniformnoderes) uniformnoderes['mem'] = None infra2 = nffg.add_infra(id="node2", name="INFRA2", **uniformnoderes) uniformnoderes['storage'] = None switch = nffg.add_infra(id="sw0", name="FastSwitcher", delay=0.01, bandwidth=10000) infra0.add_supported_type('A') infra1.add_supported_type(['B', 'C']) infra2.add_supported_type(['A', 'B', 'C']) sap0 = nffg.add_sap(name="SAP0", id="sap0innet") sap1 = nffg.add_sap(name="SAP1", id="sap1innet") unilinkres = {'delay': 1.5, 'bandwidth': 2000} # Infra links should be undirected, according to the currnet NFFG model # Infra link model is full duplex now. nffg.add_undirected_link(sap0.add_port(0), infra0.add_port(0), **unilinkres) nffg.add_undirected_link(sap1.add_port(0), infra1.add_port(0), **unilinkres) nffg.add_undirected_link(infra1.add_port(1), infra0.add_port(2), **unilinkres) unilinkres['bandwidth'] = None nffg.add_undirected_link(infra0.add_port(1), infra2.add_port(0), **unilinkres) nffg.add_undirected_link(infra1.add_port(2), infra2.add_port(1), **unilinkres) unilinkres['delay'] = 0.2 unilinkres['bandwidth'] = 5000 nffg.add_undirected_link(switch.add_port(0), infra0.add_port(3), **unilinkres) unilinkres['delay'] = None nffg.add_undirected_link(switch.add_port(1), infra1.add_port(3), **unilinkres) nffg.add_undirected_link(switch.add_port(2), infra2.add_port(2), **unilinkres) # test VNF mapping removal, and resource update in the substrate NFFG nf4 = nffg.add_nf(id="NF4inNet", name="NetFunc4", func_type='B', cpu=1, mem=1, storage=1, bandwidth=100, delay=50) nffg.add_undirected_link(infra1.add_port(3), nf4.add_port(0), dynamic=True) nffg.add_undirected_link(infra1.add_port(4), nf4.add_port(1), dynamic=True) return nffg
def getDecomps(self, nffg): """ Get all decompositions for a given nffg. : param nffg: the nffg for which the decompositions should be returned : type nffg: nffg : return: all the decompositions for the given nffg : rtype: dict """ decompositions = {} nodes_list = [] index = 0 for n in nffg.nfs: node = list(self.graph_db.find('NF', 'node_id', n.id)) if len(node) != 0: nodes_list.append(node[0]) else: log.debug("NF %s does not exist in the DB" % n.id) return None queue = deque([nodes_list]) queue_nffg = deque([nffg]) while len(queue) > 0: nodes = queue.popleft() nffg_init = queue_nffg.popleft() indicator = 0 for node in nodes: rels_DECOMPOSED = list( self.graph_db.match(start_node=node, rel_type='DECOMPOSED')) for rel in rels_DECOMPOSED: indicator = 1 nffg_temp = NFFG() graph, rels = self.getSingleDecomp( rel.end_node.properties['node_id']) for n in graph.nodes(): if graph.node[n]['properties']['label'] == 'NF': nffg_temp.add_nf( id=n, dep_type=graph.node[n]['properties']['type'], cpu=graph.node[n]['properties']['cpu'], mem=graph.node[n]['properties']['mem'], storage=graph.node[n]['properties']['storage']) elif graph.node[n]['properties']['label'] == 'SAP': nffg_temp.add_sap(id=n) counter = 0 for edge in graph.edges(): for nf in nffg_temp.nfs: if nf.id == edge[0]: node0 = nf if nf.id == edge[1]: node1 = nf for sap in nffg_temp.saps: if sap.id == edge[0]: node0 = sap if sap.id == edge[1]: node1 = sap # FIXME - czentye --> There is a chance node0, node1 variables # not defined yet until here and add_port will be raise an exception nffg_temp.add_sglink( node0.add_port(graph.edge[edge[0]][edge[1]] ['properties']['src_port']), node1.add_port(graph.edge[edge[0]][edge[1]] ['properties']['dst_port']), id='hop' + str(counter)) for n in nffg_init.nfs: nffg_temp.add_node(n) for n in nffg_init.saps: nffg_temp.add_node(n) for n in nffg_init.infras: nffg_temp.add_node(n) for l in nffg_init.links: nffg_temp.add_edge(l.src.node, l.dst.node, l) for l in nffg_init.sg_hops: nffg_temp.add_edge(l.src.node, l.dst.node, l) for l in nffg_init.reqs: nffg_temp.add_edge(l.src.node, l.dst.node, l) extra_nodes = [] for l in nffg_temp.sg_hops: if node.properties['node_id'] == l.src.node.id: src_port = l.src dst_port = l.dst for edge in graph.edges(): if graph.node[edge[1]]['properties'][ 'label'] == 'SAP': if str(src_port.id) == str( graph.edge[edge[0]][edge[1]] ['properties']['dst_port']): for e in nffg_temp.sg_hops: if e.src.node.id == edge[ 0] and e.dst.node.id == edge[ 1]: nffg_temp.add_sglink( e.src, dst_port) extra_nodes.append(edge[1]) if node.properties['node_id'] == l.dst.node.id: dst_port = l.dst src_port = l.src for edge in graph.edges(): if graph.node[edge[0]]['properties'][ 'label'] == 'SAP': if str(dst_port.id) == str( graph.edge[edge[0]][edge[1]] ['properties']['src_port']): for e in nffg_temp.sg_hops: if e.src.node.id == edge[ 0] and e.dst.node.id == edge[ 1]: nffg_temp.add_sglink( src_port, e.dst) extra_nodes.append(edge[0]) nffg_temp.del_node(node.properties['node_id']) for extra in extra_nodes: nffg_temp.del_node(extra) queue_nffg.append(nffg_temp) nodes_copy = list(nodes) new_nodes = map(lambda x: x.end_node, rels) nodes_copy.remove(node) queue.append(nodes_copy + new_nodes) if indicator == 1: break if indicator == 0: decompositions['D' + str(index)] = nffg_init index += 1 return decompositions
def convert_mip_solution_to_nffg (reqs, net, file_inputs=False, full_remap=False): if file_inputs: request_seq = [] for reqfile in reqs: with open(reqfile, "r") as f: req = NFFG.parse(f.read()) request_seq.append(req) with open(net, "r") as g: net = NFFG.parse(g.read()) else: request_seq = reqs # all input NFFG-s are obtained somehow ###################################################################### ##### This is taken from the MappingAlgorithms.MAP() function #### ###################################################################### request = request_seq[0] # batch together all nffgs for r in request_seq[1:]: request = NFFGToolBox.merge_nffgs (request, r) chainlist = [] cid = 1 edgereqlist = [] for req in request.reqs: edgereqlist.append(req) request.del_edge(req.src, req.dst, req.id) # construct chains from EdgeReqs for req in edgereqlist: if len(req.sg_path) == 1: # then add it as linklocal req instead of E2E req log.info("Interpreting one SGHop long EdgeReq (id: %s) as link " "requirement on SGHop: %s."%(req.id, req.sg_path[0])) reqlink = None for sg_link in request.sg_hops: if sg_link.id == req.sg_path[0]: reqlink = sg_link break if reqlink is None: log.warn("EdgeSGLink object not found for EdgeSGLink ID %s! " "(maybe ID-s stored in EdgeReq.sg_path are not the " "same type as EdgeSGLink ID-s?)") if req.delay is not None: setattr(reqlink, 'delay', req.delay) if req.bandwidth is not None: setattr(reqlink, 'bandwidth', req.bandwidth) elif len(req.sg_path) == 0: raise uet.BadInputException( "If EdgeReq is given, it should specify which SGHop path does it " "apply to", "Empty SGHop path was given to %s EdgeReq!" % req.id) else: try: chain = {'id': cid, 'link_ids': req.sg_path, 'bandwidth': req.bandwidth if req.bandwidth is not None else 0, 'delay': req.delay if req.delay is not None else float("inf")} except AttributeError: raise uet.BadInputException( "EdgeReq attributes are: sg_path, bandwidth, delay", "Missing attribute of EdgeReq") # reconstruct NF path from EdgeSGLink path nf_chain = [] for reqlinkid in req.sg_path: # find EdgeSGLink object of 'reqlinkid' reqlink = None for sg_link in request.sg_hops: if sg_link.id == reqlinkid: reqlink = sg_link break else: raise uet.BadInputException( "Elements of EdgeReq.sg_path should be EdgeSGLink.id-s.", "SG link %s couldn't be found in input request NFFG" % reqlinkid) # add the source node id of the EdgeSGLink to NF path nf_chain.append(reqlink.src.node.id) # add the destination node id of the last EdgeSGLink to NF path if reqlinkid == req.sg_path[-1]: if reqlink.dst.node.id != req.dst.node.id: raise uet.BadInputException( "EdgeReq.sg_path should select a path between its two ends", "Last NF (%s) of EdgeReq.sg_path and destination of EdgeReq (" "%s) are not the same!" % (reqlink.dst.node.id, req.dst.node.id)) nf_chain.append(reqlink.dst.node.id) # validate EdgeReq ends. if reqlinkid == req.sg_path[0] and \ reqlink.src.node.id != req.src.node.id: raise uet.BadInputException( "EdgeReq.sg_path should select a path between its two ends", "First NF (%s) of EdgeReq.sg_path and source of EdgeReq (%s) are " "not the same!" % (reqlink.src.node.id, req.src.node.id)) chain['chain'] = nf_chain cid += 1 chainlist.append(chain) # if some resource value is not set (is None) then be permissive and set it # to a comportable value. for respar in ('cpu', 'mem', 'storage', 'delay', 'bandwidth'): for n in net.infras: if n.resources[respar] is None: if respar == 'delay': log.warn("Resource parameter %s is not given in %s, " "substituting with 0!"%(respar, n.id)) n.resources[respar] = 0 else: log.warn("Resource parameter %s is not given in %s, " "substituting with infinity!"%(respar, n.id)) n.resources[respar] = float("inf") # If link res is None or doesn't exist, replace it with a neutral value. for i, j, d in net.network.edges_iter(data=True): if d.type == 'STATIC': if getattr(d, 'delay', None) is None: if d.src.node.type != 'SAP' and d.dst.node.type != 'SAP': log.warn("Resource parameter delay is not given in link %s " "substituting with zero!"%d.id) setattr(d, 'delay', 0) if getattr(d, 'bandwidth', None) is None: if d.src.node.type != 'SAP' and d.dst.node.type != 'SAP': log.warn("Resource parameter bandwidth is not given in link %s " "substituting with infinity!"%d.id) setattr(d, 'bandwidth', float("inf")) # create the class of the algorithm # unnecessary preprocessing is executed ############################################################################ # HACK: We only want to use the algorithm class to generate an NFFG, we will # fill the mapping struct with the one found by MIP alg = CoreAlgorithm(net, request, chainlist, full_remap, False) # move 'availres' and 'availbandwidth' values of the network to maxres, # because the MIP solution takes them as availabel resource. net = alg.bare_infrastucture_nffg for n in net.infras: n.resources = n.availres for d in net.links: # there shouldn't be any Dynamic links by now. d.bandwidth = d.availbandwidth mapping_of_reqs = get_MIP_solution(request_seq, net) mappedNFFG = NFFG(id="MILP-mapped") for transformed_req in mapping_of_reqs: if mapping_of_reqs[transformed_req].is_embedded: alg.manager.vnf_mapping = [] alg.manager.link_mapping = nx.MultiDiGraph() for n, vlist in mapping_of_reqs[transformed_req].\ snode_to_hosted_vnodes.items(): for v in vlist: alg.manager.vnf_mapping.append((v, n)) trans_link_mapping = mapping_of_reqs[transformed_req].vedge_to_spath for trans_sghop in trans_link_mapping: vnf1 = trans_sghop[0] vnf2 = trans_sghop[3] reqlid = get_edge_id(alg.req, vnf1, trans_sghop[1], trans_sghop[2], vnf2) mapped_path = [] path_link_ids = [] for trans_link in trans_link_mapping[trans_sghop]: n1 = trans_link[0] n2 = trans_link[3] lid = get_edge_id(alg.net, n1, trans_link[1], trans_link[2], n2) mapped_path.append(n1) path_link_ids.append(lid) if len(trans_link_mapping[trans_sghop]) == 0: mapped_path.append(alg.manager.getIdOfChainEnd_fromNetwork(vnf1)) else: mapped_path.append(n2) alg.manager.link_mapping.add_edge(vnf1, vnf2, key=reqlid, mapped_to=mapped_path, path_link_ids=path_link_ids) oneNFFG = alg.constructOutputNFFG() mappedNFFG = NFFGToolBox.merge_nffgs(mappedNFFG, oneNFFG) else: print "MILP didn't produce a mapping for request %s"%transformed_req return None # replace Infinity values MappingAlgorithms._purgeNFFGFromInfinityValues(mappedNFFG) # print mappedNFFG.dump() return mappedNFFG