def create_topology1(): # create topology net = DCNetwork(controller=RemoteController, monitor=False, enable_learning = False) dc1 = net.addDatacenter("dc1") dc2 = net.addDatacenter("dc2") s1 = net.addSwitch("s1") net.addLink(dc1, s1, delay="10ms") net.addLink(dc2, s1, delay="20ms") # add the command line interface endpoint to each DC (REST API) rapi1 = RestApiEndpoint("0.0.0.0", 5001) rapi1.connectDatacenter(dc1) rapi1.connectDatacenter(dc2) # run API endpoint server (in another thread, don't block) rapi1.start() # add the SONATA dummy gatekeeper to each DC sdkg1 = SonataDummyGatekeeperEndpoint("0.0.0.0", 5000) sdkg1.connectDatacenter(dc1) sdkg1.connectDatacenter(dc2) # run the dummy gatekeeper (in another thread, don't block) sdkg1.start() # start the emulation platform net.start()
def create_topology1(): # create topology net = DCNetwork(controller=RemoteController, monitor=False, enable_learning=True) dc1 = net.addDatacenter("dc1") dc2 = net.addDatacenter("dc2") s1 = net.addSwitch("s1") net.addLink(dc1, s1, delay="10ms") net.addLink(dc2, s1, delay="20ms") # add the command line interface endpoint to each DC (REST API) rapi1 = RestApiEndpoint("0.0.0.0", 5001) rapi1.connectDCNetwork(net) rapi1.connectDatacenter(dc1) rapi1.connectDatacenter(dc2) # run API endpoint server (in another thread, don't block) rapi1.start() # add the SONATA dummy gatekeeper to each DC sdkg1 = SonataDummyGatekeeperEndpoint("0.0.0.0", 5000, deploy_sap=False) sdkg1.connectDatacenter(dc1) sdkg1.connectDatacenter(dc2) # run the dummy gatekeeper (in another thread, don't block) sdkg1.start() # start the emulation platform net.start() net.CLI() rapi1.stop() net.stop()
def __init__(self, port=5000): GracefulKiller(self) # create topology self.net = DCNetwork(controller=RemoteController, monitor=False, enable_learning=False) self.dc = self.net.addDatacenter("dc1") # add the command line interface endpoint to each DC (REST API) self.rapi1 = RestApiEndpoint("0.0.0.0", port + 1) self.rapi1.connectDCNetwork(self.net) self.rapi1.connectDatacenter(self.dc) # run API endpoint server (in another thread, don't block) self.rapi1.start() # add the SONATA dummy gatekeeper to each DC self.sdkg1 = SonataDummyGatekeeperEndpoint("0.0.0.0", port, deploy_sap=False) self.sdkg1.connectDatacenter(self.dc) # run the dummy gatekeeper (in another thread, don't block) self.sdkg1.start() self.net.start() LOG.info("Started topology") while (not self.stop_now): sleep(1) self.net.stop() LOG.info("Stopped topology")
def _create_rest_api_endpoints(self): # create apiR = RestApiEndpoint("0.0.0.0", 5001) # connect PoPs apiR.connectDatacenter(self.pop1) apiR.connectDatacenter(self.pop2) apiR.connectDatacenter(self.pop3) apiR.connectDatacenter(self.pop4) # connect network apiR.connectDCNetwork(self) # start apiR.start()
def create_topology(): net = DCNetwork(monitor=False, enable_learning=True) dc1 = net.addDatacenter("dc1") # add OpenStack-like APIs to the emulated DC api1 = OpenstackApiEndpoint("0.0.0.0", 6001) api1.connect_datacenter(dc1) api1.start() api1.connect_dc_network(net) # add the command line interface endpoint to the emulated DC (REST API) rapi1 = RestApiEndpoint("0.0.0.0", 5001) rapi1.connectDCNetwork(net) rapi1.connectDatacenter(dc1) rapi1.start() s1 = net.addSwitch('s1') h1 = net.addHost('h1') h2 = net.addDocker('h2',dimage='host:server') net.addLink(h1, s1, delay='20ms') net.addLink(h2, s1, delay='20ms') net.addLink(dc1, s1, delay='20ms') net.start() net.CLI() # when the user types exit in the CLI, we stop the emulator net.stop()
def DemoTopology(): net = DCNetwork(monitor=True, enable_learning=True) dc1 = net.addDatacenter("osm-pop1") dc2 = net.addDatacenter("osm-pop2") s1 = net.addSwitch("s1") net.addLink(dc1, s1) net.addLink(dc2, s1) # add OpenStack-like APIs to the emulated DC api1 = OpenstackApiEndpoint("127.0.0.1", 6001) api2 = OpenstackApiEndpoint("127.0.0.1", 6002) api1.connect_datacenter(dc1) api2.connect_datacenter(dc2) api1.start() api2.start() api1.connect_dc_network(net) api2.connect_dc_network(net) # add the command line interface endpoint to the emulated DC (REST API) Rapi = RestApiEndpoint("0.0.0.0", 5001) Rapi.connectDCNetwork(net) Rapi.connectDatacenter(dc1) Rapi.connectDatacenter(dc2) Rapi.start() net.start() net.CLI() # when the user types exit in the CLI, we stop the emulator net.stop()
def create_topology1(): # create topology net = DCNetwork(controller=RemoteController, monitor=True, enable_learning=True) dc1 = net.addDatacenter("dc1") # add the command line interface endpoint to each DC (REST API) rapi1 = RestApiEndpoint("0.0.0.0", 5001) rapi1.connectDCNetwork(net) rapi1.connectDatacenter(dc1) # run API endpoint server (in another thread, don't block) rapi1.start() # specify a vnfd file to be deployed as internal SAP: sap_vnfd = 'custom_sap_vnfd.yml' dir_path = os.path.dirname(__file__) sap_vnfd_path = os.path.join(dir_path, sap_vnfd) # sap_vnfd_path = None # add the SONATA dummy gatekeeper to each DC sdkg1 = SonataDummyGatekeeperEndpoint("0.0.0.0", 5000, deploy_sap=True, auto_deploy=True, docker_management=True, auto_delete=True, sap_vnfd_path=sap_vnfd_path) sdkg1.connectDatacenter(dc1) # run the dummy gatekeeper (in another thread, don't block) sdkg1.start() # start the emulation platform net.start() net.CLI() net.stop()
def create_topology1(): # create topology net = DCNetwork(controller=RemoteController, monitor=False, enable_learning=False) # add datecenters dc1 = net.addDatacenter("dc1") dc2 = net.addDatacenter("dc2") # add some intermediate switch s1 = net.addSwitch("s1") # connect data centers net.addLink(dc1, s1, delay="10ms") net.addLink(dc2, s1, delay="20ms") # add REST control endpoints to each datacenter (to be used with son-emu-cli) rapi1 = RestApiEndpoint("0.0.0.0", 5001) rapi1.connectDCNetwork(net) rapi1.connectDatacenter(dc1) rapi1.connectDatacenter(dc2) rapi1.start() # start the emulation platform net.start() net.CLI() net.stop()
def create_topology(): net = DCNetwork(monitor=False, enable_learning=True) dc1 = net.addDatacenter("dc1") # add OpenStack-like APIs to the emulated DC api1 = OpenstackApiEndpoint("0.0.0.0", 6001) api1.connect_datacenter(dc1) api1.start() api1.connect_dc_network(net) # add the command line interface endpoint to the emulated DC (REST API) rapi1 = RestApiEndpoint("0.0.0.0", 5001) rapi1.connectDCNetwork(net) rapi1.connectDatacenter(dc1) rapi1.start() s1 = net.addSwitch('s1') d1 = net.addDocker('d1', ip='10.100.0.1', dimage="ubuntu:trusty") d2 = net.addDocker('d2', ip='10.100.0.2', dimage="ubuntu:trusty") net.addLink(s1, d1) net.addLink(s1, d2) net.addLink(s1, dc1) net.start() net.CLI() # when the user types exit in the CLI, we stop the emulator net.stop()
def create_topology(): net = DCNetwork(monitor=False, enable_learning=True) dc1 = net.addDatacenter("dc1") # add OpenStack-like APIs to the emulated DC api1 = OpenstackApiEndpoint("0.0.0.0", 6001) api1.connect_datacenter(dc1) api1.start() api1.connect_dc_network(net) # add the command line interface endpoint to the emulated DC (REST API) rapi1 = RestApiEndpoint("0.0.0.0", 5001) rapi1.connectDCNetwork(net) rapi1.connectDatacenter(dc1) rapi1.start() s1 = net.addSwitch('s1') s2 = net.addSwitch('s2') s3 = net.addSwitch('s3') h1 = net.addHost('h1') h2 = net.addHost('h2') h3 = net.addHost('h3') net.addLink(h1, s1) net.addLink(h2, s2) net.addLink(h3, s3) #create a switch triangle net.addLink(s1, s2) net.addLink(s2, s3) net.addLink(s1, s3) net.start() net.CLI() # when the user types exit in the CLI, we stop the emulator net.stop()
def create_topology1(): # create topology net = DCNetwork(controller=RemoteController, monitor=False, enable_learning=False) # add datecenters dc1 = net.addDatacenter("dc1") dc2 = net.addDatacenter("dc2") # add some intermediate switch s1 = net.addSwitch("s1") # connect data centers net.addLink(dc1, s1, delay="10ms") net.addLink(dc2, s1, delay="20ms") # add REST control endpoints to each datacenter (to be used with son-emu-cli) rapi1 = RestApiEndpoint("0.0.0.0", 5001) rapi1.connectDCNetwork(net) rapi1.connectDatacenter(dc1) rapi1.connectDatacenter(dc2) rapi1.start() # add the SONATA dummy gatekeeper to each DC sdkg1 = SonataDummyGatekeeperEndpoint("0.0.0.0", 5000) sdkg1.connectDatacenter(dc1) sdkg1.connectDatacenter(dc2) # run the dummy gatekeeper (in another thread, don't block) sdkg1.start() # start the emulation platform net.start() net.CLI() net.stop()
def create_topology(): net = DCNetwork(monitor=False, enable_learning=True) # create two data centers dc1 = net.addDatacenter("dc1") dc2 = net.addDatacenter("dc2") # interconnect data centers net.addLink(dc1, dc2, delay="20ms") # add the command line interface endpoint to the emulated DC (REST API) rapi1 = RestApiEndpoint("0.0.0.0", 5001) rapi1.connectDCNetwork(net) rapi1.connectDatacenter(dc1) rapi1.connectDatacenter(dc2) rapi1.start() # add the 5GTANGO lightweight life cycle manager (LLCM) to the topology llcm1 = TangoLLCMEndpoint("0.0.0.0", 5000, deploy_sap=False, placement_algorithm_obj=StaticConfigPlacement( "~/static_placement.yml")) llcm1.connectDatacenter(dc1) llcm1.connectDatacenter(dc2) # run the dummy gatekeeper (in another thread, don't block) llcm1.start() # start the emulation and enter interactive CLI net.start() net.CLI() # when the user types exit in the CLI, we stop the emulator net.stop()
class EmulatorProfilingTopology(object): def __init__(self): pass def start(self): LOG.info("Starting emulation ...") setLogLevel('info') # set Mininet loglevel # create topology self.net = DCNetwork(monitor=False, enable_learning=False) # we only need one DC for benchmarking dc = self.net.addDatacenter("dc1") # add the command line interface endpoint to each DC (REST API) self.rapi1 = RestApiEndpoint("0.0.0.0", 5001) self.rapi1.connectDCNetwork(self.net) self.rapi1.connectDatacenter(dc) self.rapi1.start() # add the 5GTANGO lightweight life cycle manager (LLCM) to the topology self.llcm1 = TangoLLCMEndpoint("0.0.0.0", 5000, deploy_sap=False) self.llcm1.connectDatacenter(dc) self.llcm1.start() self.net.start() def stop(self): LOG.info("Stopping emulation ...") self.rapi1.stop() self.llcm1.stop() self.net.stop()
def create_topology1(): # create topology net = DCNetwork(controller=RemoteController, monitor=False, enable_learning=True) # add datecenters dc1 = net.addDatacenter("dc1") # add REST control endpoints to datacenter (to be used with son-emu-cli) rapi1 = RestApiEndpoint("0.0.0.0", 5001) rapi1.connectDCNetwork(net) rapi1.connectDatacenter(dc1) rapi1.start() # add OpenStack/like interface endpoints to dc1 api1 = OpenstackApiEndpoint("0.0.0.0", 6001) # connect PoPs api1.connect_datacenter(dc1) # connect network api1.connect_dc_network(net) # start api1.start() # start the emulation platform net.start() net.CLI() net.stop()
def _create_rest_api_endpoints(self): # create api1 = RestApiEndpoint("0.0.0.0", 5001) # connect PoPs api1.connectDatacenter(self.pop1) api1.connectDatacenter(self.pop2) api1.connectDatacenter(self.pop3) api1.connectDatacenter(self.pop4) # connect network api1.connectDCNetwork(self) # start api1.start()
def __init__(self): GracefulKiller(self) # create topology self.net = DCNetwork(controller=RemoteController, monitor=False, enable_learning=False) self.dc = self.net.addDatacenter("dc1") # add the command line interface endpoint to each DC (REST API) self.rapi1 = RestApiEndpoint("0.0.0.0", 5001) self.rapi1.connectDCNetwork(self.net) self.rapi1.connectDatacenter(self.dc) # run API endpoint server (in another thread, don't block) self.rapi1.start() # add the SONATA dummy gatekeeper to each DC self.sdkg1 = SonataDummyGatekeeperEndpoint("0.0.0.0", 5000, deploy_sap=False) self.sdkg1.connectDatacenter(self.dc) # run the dummy gatekeeper (in another thread, don't block) self.sdkg1.start() self.net.start() LOG.info("Started topology") while(not self.stop_now): sleep(1) self.net.stop() LOG.info("Stopped topology")
def create_topology(self): self.net = DCNetwork(monitor=False, enable_learning=True) self.dc1 = self.net.addDatacenter("dc1") self.dc2 = self.net.addDatacenter("dc2") self.net.addLink(self.dc1, self.dc2, cls=TCLink, delay="50ms") # add OpenStack-like APIs to the emulated DC self.api1 = OpenstackApiEndpoint("0.0.0.0", 6001) self.api1.connect_datacenter(self.dc1) self.api1.connect_dc_network(self.net) self.api2 = OpenstackApiEndpoint("0.0.0.0", 6002) self.api2.connect_datacenter(self.dc2) self.api2.connect_dc_network(self.net) # add the command line interface endpoint to the emulated DC (REST API) self.rapi1 = RestApiEndpoint("0.0.0.0", 5001) self.rapi1.connectDCNetwork(self.net) self.rapi1.connectDatacenter(self.dc1) self.rapi1.connectDatacenter(self.dc2)
def create_topology1(): # create topology net = DCNetwork(controller=RemoteController, monitor=True, enable_learning=True) dc1 = net.addDatacenter("dc1") # add the command line interface endpoint to each DC (REST API) rapi1 = RestApiEndpoint("0.0.0.0", 5001) rapi1.connectDCNetwork(net) rapi1.connectDatacenter(dc1) # run API endpoint server (in another thread, don't block) rapi1.start() # specify a vnfd file to be deployed as internal SAP: sap_vnfd = 'custom_sap_vnfd.yml' dir_path = os.path.dirname(__file__) sap_vnfd_path = os.path.join(dir_path, sap_vnfd) sap_vnfd_path = None # add the SONATA dummy gatekeeper to each DC sdkg1 = SonataDummyGatekeeperEndpoint("0.0.0.0", 5000, deploy_sap=True, auto_deploy=True, docker_management=True, auto_delete=True, sap_vnfd_path=sap_vnfd_path) sdkg1.connectDatacenter(dc1) # run the dummy gatekeeper (in another thread, don't block) sdkg1.start() # start the emulation platform net.start() net.CLI() net.stop()
def create_topology(httpmode=False): net = DCNetwork(monitor=False, enable_learning=True) DC = net.addDatacenter("DC") # add OpenStack-like APIs to the emulated DC api1 = OpenstackApiEndpoint("0.0.0.0", 6001) api1.connect_datacenter(DC) api1.start() api1.connect_dc_network(net) # add the command line interface endpoint to the emulated DC (REST API) rapi1 = RestApiEndpoint("0.0.0.0", 5001) rapi1.connectDCNetwork(net) rapi1.connectDatacenter(DC) rapi1.start() #Generation Switches s1 = net.addSwitch('s1') s2 = net.addSwitch('s2') s3 = net.addSwitch('s3') #Generation Hosts S = createHost(httpmode, net, 'S') GI = createGW(httpmode, net, 'GI') GFA = createGW(httpmode, net, 'GFA') DVA = createDV(httpmode, net, 'DVA') #Generation Links net.addLink(S, s1) net.addLink(GI, s2) net.addLink(GFA, s3) net.addLink(DVA, s3) net.addLink(s1, s2) net.addLink(s2, s3) net.addLink(DC, s3) #Run Services (in order) if httpmode: S.cmd("startup --local_ip 10.0.0.1 --local_port 8080 --local_name srv") GI.cmd( "startup --local_ip 10.0.0.2 --local_port 8181 --local_name gwi --remote_ip 10.0.0.1 --remote_port 8080 --remote_name srv" ) GFA.cmd( "startup --local_ip 10.0.0.3 --local_port 8282 --local_name gwfa --remote_ip 10.0.0.2 --remote_port 8181 --remote_name gwi" ) DVA.cmd( "startup --local_ip 10.0.0.4 --local_port 8888 --local_name dva --remote_ip 10.0.0.3 --remote_port 8282 --remote_name gfa --send_period 3000" ) #Do not remove net.start() net.CLI() # when the user types exit in the CLI, we stop the emulator net.stop()
class DaemonTopology(object): """ Topology with two datacenters: dc1 <-- 50ms --> dc2 """ def __init__(self): self.running = True signal.signal(signal.SIGINT, self._stop_by_signal) signal.signal(signal.SIGTERM, self._stop_by_signal) # create and start topology self.create_topology() self.start_topology() self.daemonize() self.stop_topology() def create_topology(self): self.net = DCNetwork(monitor=False, enable_learning=True) self.dc1 = self.net.addDatacenter("dc1") self.dc2 = self.net.addDatacenter("dc2") self.net.addLink(self.dc1, self.dc2, cls=TCLink, delay="50ms") # add OpenStack-like APIs to the emulated DC self.api1 = OpenstackApiEndpoint("0.0.0.0", 6001) self.api1.connect_datacenter(self.dc1) self.api1.connect_dc_network(self.net) self.api2 = OpenstackApiEndpoint("0.0.0.0", 6002) self.api2.connect_datacenter(self.dc2) self.api2.connect_dc_network(self.net) # add the command line interface endpoint to the emulated DC (REST API) self.rapi1 = RestApiEndpoint("0.0.0.0", 5001) self.rapi1.connectDCNetwork(self.net) self.rapi1.connectDatacenter(self.dc1) self.rapi1.connectDatacenter(self.dc2) def start_topology(self): self.api1.start() self.api2.start() self.rapi1.start() self.net.start() def daemonize(self): print("Daemonizing vim-emu. Send SIGTERM or SIGKILL to stop.") while self.running: time.sleep(1) def _stop_by_signal(self, signum, frame): print("Received SIGNAL {}. Stopping.".format(signum)) self.running = False def stop_topology(self): self.api1.stop() self.api2.stop() self.rapi1.stop() self.net.stop()
def create_topology(): net = DCNetwork(monitor=False, enable_learning=True) dc1 = net.addDatacenter("dc1") # add OpenStack-like APIs to the emulated DC api1 = OpenstackApiEndpoint("0.0.0.0", 6001) api1.connect_datacenter(dc1) api1.start() api1.connect_dc_network(net) # add the command line interface endpoint to the emulated DC (REST API) rapi1 = RestApiEndpoint("0.0.0.0", 5001) rapi1.connectDCNetwork(net) rapi1.connectDatacenter(dc1) rapi1.start() Serv = net.addDocker('Serv', ip='10.0.0.200', dimage=image, dcmd="sh ./server.sh") GI = net.addDocker('GI', ip='10.0.0.201', dimage=image, dcmd="sh ./gi.sh") GF1 = net.addDocker('GF1', ip='10.0.0.202', dimage=image, dcmd="sh ./gf1.sh") GF2 = net.addDocker('GF2', ip='10.0.0.203', dimage=image, dcmd="sh ./gf2.sh") GF3 = net.addDocker('GF3', ip='10.0.0.204', dimage=image, dcmd="sh ./gf3.sh") s1 = net.addSwitch('s1') s2 = net.addSwitch('s2') net.addLink(Serv, s1) net.addLink(GI, s1) net.addLink(s1, s2, cls=TCLink, delay='100ms', bw=1) net.addLink(GF1, s2) net.addLink(GF2, s2) net.addLink(GF3, s2) net.addLink(dc1, s2) net.start() net.ping([GF1, GI]) net.ping([GF2, GI]) net.ping([GF3, GI]) net.ping([Serv, GI]) net.CLI() net.stop()
def createNet(self, nswitches=0, ndatacenter=0, nhosts=0, ndockers=0, autolinkswitches=False, controller=Controller, **kwargs): """ Creates a Mininet instance and automatically adds some nodes to it. Attention, we should always use Mininet's default controller for our tests. Only use other controllers if you want to test specific controller functionality. """ self.net = DCNetwork(controller=controller, **kwargs) self.api = RestApiEndpoint("127.0.0.1", 5001, self.net) # add some switches # start from s1 because ovs does not like to have dpid = 0 # and switch name-number is being used by mininet to set the dpid for i in range(1, nswitches + 1): self.s.append(self.net.addSwitch('s%d' % i)) # if specified, chain all switches if autolinkswitches: for i in range(0, len(self.s) - 1): self.net.addLink(self.s[i], self.s[i + 1]) # add some data centers for i in range(0, ndatacenter): self.dc.append( self.net.addDatacenter('datacenter%d' % i, metadata={"unittest_dc": i})) # connect data centers to the endpoint for i in range(0, ndatacenter): self.api.connectDatacenter(self.dc[i]) # add some hosts for i in range(0, nhosts): self.h.append(self.net.addHost('h%d' % i)) # add some dockers for i in range(0, ndockers): self.d.append(self.net.addDocker('d%d' % i, dimage="ubuntu:trusty"))
def setup_topology(net): _LOGGER.info("Setting up the topology") dc = net.addDatacenter("dc1") # pylint: disable=invalid-name net.addLink(dc, net.addSwitch("s1"), delay="10ms") # add the SONATA dummy gatekeeper to each DC rapi1 = RestApiEndpoint("0.0.0.0", 5001) rapi1.connectDCNetwork(net) rapi1.connectDatacenter(dc) rapi1.start() sdkg1 = SonataDummyGatekeeperEndpoint("0.0.0.0", 5000, deploy_sap=False) sdkg1.connectDatacenter(dc) # run the dummy gatekeeper (in another thread, don't block) sdkg1.start()
def create_topology(self): self.net = DCNetwork(monitor=False, enable_learning=True) self.client_dc = self.net.addDatacenter("client_dc") self.vnfs_dc = self.net.addDatacenter("vnfs_dc") self.server_dc = self.net.addDatacenter("server_dc") self.switch1 = self.net.addSwitch("switch1") self.switch2 = self.net.addSwitch("switch2") linkopts = dict(delay="1ms", bw=100) self.net.addLink(self.client_dc, self.switch1, **linkopts) self.net.addLink(self.vnfs_dc, self.switch1, **linkopts) self.net.addLink(self.switch1, self.switch2, **linkopts) self.net.addLink(self.vnfs_dc, self.switch2, **linkopts) self.net.addLink(self.switch2, self.server_dc, **linkopts) # add the command line interface endpoint to the emulated DC (REST API) self.rest = RestApiEndpoint("0.0.0.0", 5001) self.rest.connectDCNetwork(self.net) self.rest.connectDatacenter(self.client_dc) self.rest.connectDatacenter(self.vnfs_dc) self.rest.connectDatacenter(self.server_dc)
def __init__(self, args): # run daemonized to stop on signal self.running = True signal.signal(signal.SIGINT, self._stop_by_signal) signal.signal(signal.SIGTERM, self._stop_by_signal) self.args = args self.G = self._load_graphml(args.graph_file) self.G_name = self.G.graph.get("label", args.graph_file) LOG.debug("Graph label: {}".format(self.G_name)) self.net = None self.pops = list() # initialize global rest api self.rest_api = RestApiEndpoint("0.0.0.0", 5001) self.rest_api.start() # initialize and start topology self.create_environment() self.create_pops() self.create_links() self.start_topology() self.daemonize() self.stop_topology()
def start(self): """ Run the Emulator and the endpoints. """ super(Emulator, self).start() initialize_GK() self.net = DCNetwork(controller=RemoteController, monitor=False, enable_learning=self.enable_learning) self.datacenter = self.net.addDatacenter('dc1') endpoint_ip = '0.0.0.0' endpoint_port = self.endpoint_port or get_free_tcp_port() self.endpoint = 'http://{}:{}'.format(endpoint_ip, endpoint_port) self.rest_api = RestApiEndpoint(endpoint_ip, endpoint_port) self.rest_api.connectDCNetwork(self.net) self.rest_api.connectDatacenter(self.datacenter) self.rest_api.start() sonata_ip = '0.0.0.0' sonata_port = self.sonata_port or get_free_tcp_port() self.sonata_address = 'http://{}:{}'.format(sonata_ip, sonata_port) self.sonata_gatekeeper = SonataDummyGatekeeperEndpoint( sonata_ip, sonata_port) self.sonata_gatekeeper.connectDatacenter(self.datacenter) self.sonata_gatekeeper.start() tango_ip = '0.0.0.0' tango_port = self.tango_port or get_free_tcp_port() self.tango_address = 'http://{}:{}'.format(tango_ip, tango_port) self.tango_gatekeeper = TangoLLCMEndpoint(tango_ip, tango_port) self.tango_gatekeeper.connectDatacenter(self.datacenter) self.tango_gatekeeper.start() self.net.start()
def create_topology1(): # create topology net = DCNetwork(controller=RemoteController, monitor=True, enable_learning=True) dc1 = net.addDatacenter("dc1") # add the command line interface endpoint to each DC (REST API) rapi1 = RestApiEndpoint("0.0.0.0", 5001) rapi1.connectDCNetwork(net) rapi1.connectDatacenter(dc1) # run API endpoint server (in another thread, don't block) rapi1.start() # add the SONATA dummy gatekeeper to each DC #sdkg1 = SonataDummyGatekeeperEndpoint("0.0.0.0", 5000, deploy_sap=True) #sdkg1.connectDatacenter(dc1) # run the dummy gatekeeper (in another thread, don't block) #sdkg1.start() # start the emulation platform net.start() # here the docker host default ip is configured # topology must be started before hosts are added cache = dc1.startCompute('cache', image="squid-vnf", network=[{"ip": "10.10.0.1/24", "id": "client", 'mac': "aa:aa:aa:00:00:01"}, {"ip": "10.20.0.1/24", "id": "server", "mac": "aa:aa:aa:00:00:02"}]) client = dc1.startCompute('client', image='vcdn-client', network=[{"ip": "10.10.0.2/24", "id": "client"}]) server = dc1.startCompute('server', image='webserver', network=[{"ip": "10.20.0.2/24", "id": "server"}]) #client = net.addDocker('client', ip='10.10.0.1/24', dimage="vcdn-client") #cache = net.addDocker('cache', dimage="squid-vnf") #server = net.addDocker('server', ip='10.20.0.1/24', dimage="webserver") #net.addLink(dc1, client, intfName1='dc-cl', intfName2='client') #net.addLink(dc1, server, intfName1='dc-sv', intfName2='server') #net.addLink(dc1, cache, intfName1='dc-ccl', intfName2='client', params1={'ip': '10.10.0.2/24'}) #net.addLink(dc1, cache, intfName1='dc-csv', intfName2='server',params1={'ip': '10.20.0.2/24'}) # initialise VNFs cache.cmd("./start.sh", detach=True) client.cmd("./start.sh", detach=True) server.cmd('./start.sh', detach=True) # startup script hangs if we use other startup command # command="./start.sh" net.CLI() net.stop() while not net.exit: pass
def create_topology(httpmode=False): net = DCNetwork(monitor=False, enable_learning=True) DC = net.addDatacenter("DC") # add OpenStack-like APIs to the emulated DC api1 = OpenstackApiEndpoint("0.0.0.0", 6001) api1.connect_datacenter(DC) api1.start() api1.connect_dc_network(net) # add the command line interface endpoint to the emulated DC (REST API) rapi1 = RestApiEndpoint("0.0.0.0", 5001) rapi1.connectDCNetwork(net) rapi1.connectDatacenter(DC) rapi1.start() s1 = net.addSwitch('s1') s2 = net.addSwitch('s2') s3 = net.addSwitch('s3') s4 = net.addSwitch('s4') S = net.addDocker("S", dimage="host:server") GI = net.addDocker("GI", dimage="host:gateway") GFA = createHost(httpmode, net, 'GFA') GFB = createHost(httpmode, net, 'GFB') GFC = createHost(httpmode, net, 'GFC') net.addLink(S, s1) net.addLink(GI, s2) net.addLink(GFA, s3) net.addLink(GFB, s3) net.addLink(GFC, s4) net.addLink(s1, s2) net.addLink(s2, s3) net.addLink(s2, s4) net.addLink(DC, s2) #Do not remove net.start() net.CLI() # when the user types exit in the CLI, we stop the emulator net.stop()
def create_topology(): net = DCNetwork(monitor=False, enable_learning=False) dc1 = net.addDatacenter("dc1") # add OpenStack-like APIs to the emulated DC api1 = OpenstackApiEndpoint("0.0.0.0", 6001) api1.connect_datacenter(dc1) api1.start() api1.connect_dc_network(net) # add the command line interface endpoint to the emulated DC (REST API) rapi1 = RestApiEndpoint("0.0.0.0", 5001) rapi1.connectDCNetwork(net) rapi1.connectDatacenter(dc1) rapi1.start() net.start() net.CLI() # when the user types exit in the CLI, we stop the emulator net.stop()
def create_topology1(): global exit # create topology net = DCNetwork(controller=RemoteController, monitor=True, enable_learning=False) dc1 = net.addDatacenter("dc1") dc2 = net.addDatacenter("dc2") s1 = net.addSwitch("s1") net.addLink(dc1, s1, delay="10ms") net.addLink(dc2, s1, delay="20ms") # add the command line interface endpoint to each DC (REST API) rapi1 = RestApiEndpoint("0.0.0.0", 5001) rapi1.connectDatacenter(dc1) rapi1.connectDatacenter(dc2) # connect total network also, needed to do the chaining and monitoring rapi1.connectDCNetwork(net) # run API endpoint server (in another thread, don't block) rapi1.start() # add the SONATA dummy gatekeeper to each DC sdkg1 = SonataDummyGatekeeperEndpoint("0.0.0.0", 5000, deploy_sap=True) sdkg1.connectDatacenter(dc1) sdkg1.connectDatacenter(dc2) # run the dummy gatekeeper (in another thread, don't block) sdkg1.start() # start the emulation platform net.start() #does not work from docker compose (cannot start container in interactive mode) #cli = net.CLI() # instead wait here: logging.info("waiting for SIGTERM or SIGINT signal") while not exit: time.sleep(1) logging.info("got SIG signal") net.stop()
def start(self): LOG.info("Starting emulation ...") # pylint: disable=E0401 from mininet.log import setLogLevel from emuvim.dcemulator.net import DCNetwork from emuvim.api.rest.rest_api_endpoint import RestApiEndpoint from emuvim.api.tango import TangoLLCMEndpoint setLogLevel('info') # set Mininet loglevel # create topology self.net = DCNetwork(monitor=False, enable_learning=False) # we only need one DC for benchmarking dc = self.net.addDatacenter("dc1") # add the command line interface endpoint to each DC (REST API) rapi1 = RestApiEndpoint("0.0.0.0", 5001) rapi1.connectDCNetwork(self.net) rapi1.connectDatacenter(dc) rapi1.start() # add the 5GTANGO lightweight life cycle manager (LLCM) to the topology llcm1 = TangoLLCMEndpoint("0.0.0.0", 5000, deploy_sap=False) llcm1.connectDatacenter(dc) llcm1.start() self.net.start()
def createNet( self, nswitches=0, ndatacenter=0, nhosts=0, ndockers=0, autolinkswitches=False, controller=Controller, **kwargs): """ Creates a Mininet instance and automatically adds some nodes to it. Attention, we should always use Mininet's default controller for our tests. Only use other controllers if you want to test specific controller functionality. """ self.net = DCNetwork(controller=controller, **kwargs) self.api = RestApiEndpoint("127.0.0.1", 5001, self.net) # add some switches # start from s1 because ovs does not like to have dpid = 0 # and switch name-number is being used by mininet to set the dpid for i in range(1, nswitches + 1): self.s.append(self.net.addSwitch('s%d' % i)) # if specified, chain all switches if autolinkswitches: for i in range(0, len(self.s) - 1): self.net.addLink(self.s[i], self.s[i + 1]) # add some data centers for i in range(0, ndatacenter): self.dc.append( self.net.addDatacenter( 'datacenter%d' % i, metadata={"unittest_dc": i})) # connect data centers to the endpoint for i in range(0, ndatacenter): self.api.connectDatacenter(self.dc[i]) # add some hosts for i in range(0, nhosts): self.h.append(self.net.addHost('h%d' % i)) # add some dockers for i in range(0, ndockers): self.d.append(self.net.addDocker('d%d' % i, dimage="ubuntu:trusty"))
def create_topology1(): global exit # create topology net = DCNetwork(controller=RemoteController, monitor=True, enable_learning=False) dc1 = net.addDatacenter("dc1") dc2 = net.addDatacenter("dc2") s1 = net.addSwitch("s1") net.addLink(dc1, s1, delay="10ms") net.addLink(dc2, s1, delay="20ms") # add the command line interface endpoint to each DC (REST API) rapi1 = RestApiEndpoint("0.0.0.0", 5001) rapi1.connectDatacenter(dc1) rapi1.connectDatacenter(dc2) # connect total network also, needed to do the chaining and monitoring rapi1.connectDCNetwork(net) # run API endpoint server (in another thread, don't block) rapi1.start() # add the SONATA dummy gatekeeper to each DC sdkg1 = SonataDummyGatekeeperEndpoint("0.0.0.0", 5000, deploy_sap=True) sdkg1.connectDatacenter(dc1) sdkg1.connectDatacenter(dc2) # run the dummy gatekeeper (in another thread, don't block) sdkg1.start() # start the emulation platform net.start() # does not work from docker compose (cannot start container in interactive mode) # cli = net.CLI() # instead wait here: logging.info("waiting for SIGTERM or SIGINT signal") while not exit: time.sleep(1) logging.info("got SIG signal") net.stop()
class Profiling: stop_now = False """ Set up a simple topology and start it :port: the port the REST interface will be using, port+1 will be in use as well """ def __init__(self, port=5000): GracefulKiller(self) # create topology self.net = DCNetwork(controller=RemoteController, monitor=False, enable_learning=False) self.dc = self.net.addDatacenter("dc1") # add the command line interface endpoint to each DC (REST API) self.rapi1 = RestApiEndpoint("0.0.0.0", port + 1) self.rapi1.connectDCNetwork(self.net) self.rapi1.connectDatacenter(self.dc) # run API endpoint server (in another thread, don't block) self.rapi1.start() # add the SONATA dummy gatekeeper to each DC self.sdkg1 = SonataDummyGatekeeperEndpoint("0.0.0.0", port, deploy_sap=False) self.sdkg1.connectDatacenter(self.dc) # run the dummy gatekeeper (in another thread, don't block) self.sdkg1.start() self.net.start() LOG.info("Started topology") while (not self.stop_now): sleep(1) self.net.stop() LOG.info("Stopped topology") """ Set stop value to stop the topology """ def stop_it(self): self.stop_now = True
def create_topology1(): # create topology net = DCNetwork(controller=RemoteController, monitor=True, enable_learning=True) dc1 = net.addDatacenter("dc1") # add the command line interface endpoint to each DC (REST API) rapi1 = RestApiEndpoint("0.0.0.0", 5001) rapi1.connectDCNetwork(net) rapi1.connectDatacenter(dc1) # run API endpoint server (in another thread, don't block) rapi1.start() # add the SONATA dummy gatekeeper to each DC sdkg1 = SonataDummyGatekeeperEndpoint("0.0.0.0", 5000, deploy_sap=True) sdkg1.connectDatacenter(dc1) # run the dummy gatekeeper (in another thread, don't block) sdkg1.start() # start the emulation platform net.start() net.CLI() net.stop()
def create_topology(): net = DCNetwork(monitor=False, enable_learning=True) # create two data centers dc1 = net.addDatacenter("dc1") dc2 = net.addDatacenter("dc2") # interconnect data centers net.addLink(dc1, dc2, delay="20ms") # add the command line interface endpoint to the emulated DC (REST API) rapi1 = RestApiEndpoint("0.0.0.0", 5001) rapi1.connectDCNetwork(net) rapi1.connectDatacenter(dc1) rapi1.connectDatacenter(dc2) rapi1.start() # add the 5GTANGO lightweight life cycle manager (LLCM) to the topology llcm1 = TangoLLCMEndpoint("0.0.0.0", 5000, deploy_sap=False) llcm1.connectDatacenter(dc1) llcm1.connectDatacenter(dc2) # run the dummy gatekeeper (in another thread, don't block) llcm1.start() # start the emulation and enter interactive CLI net.start() net.CLI() # when the user types exit in the CLI, we stop the emulator net.stop()
def create_topology1(): """ 1. Create a data center network object (DCNetwork) with monitoring enabled """ net = DCNetwork(monitor=True, enable_learning=False) """ 1b. Add endpoint APIs for the whole DCNetwork, to access and control the networking from outside. e.g., to setup forwarding paths between compute instances aka. VNFs (represented by Docker containers), passing through different switches and datacenters of the emulated topology """ # create monitoring api endpoint for backwards compatibility with zerorpc api mon_api = ZeroRpcApiEndpointDCNetwork("0.0.0.0", 5151) mon_api.connectDCNetwork(net) mon_api.start() """ 2. Add (logical) data centers to the topology (each data center is one "bigswitch" in our simplified first prototype) """ dc1 = net.addDatacenter("dc1") dc2 = net.addDatacenter("dc2") """ 3. You can add additional SDN switches for data center interconnections to the network. """ s1 = net.addSwitch("s1") """ 4. Add links between your data centers and additional switches to define you topology. These links can use Mininet's features to limit bw, add delay or jitter. """ net.addLink(dc1, s1) net.addLink(s1, dc2) """ 5. We want to access and control our data centers from the outside, e.g., we want to connect an orchestrator to start/stop compute resources aka. VNFs (represented by Docker containers in the emulated) So we need to instantiate API endpoints (e.g. a zerorpc or REST interface). Depending on the endpoint implementations, we can connect one or more data centers to it, which can then be controlled through this API, e.g., start/stop/list compute instances. """ # keep the old zeroRPC interface for the prometheus metric query test zapi1 = ZeroRpcApiEndpoint("0.0.0.0", 4242) # connect data centers to this endpoint zapi1.connectDatacenter(dc1) zapi1.connectDatacenter(dc2) # run API endpoint server (in another thread, don't block) zapi1.start() # create a new instance of a endpoint implementation # the restapi handles all compute, networking and monitoring commands in one api endpoint api1 = RestApiEndpoint("0.0.0.0", 5001) # connect data centers to this endpoint api1.connectDatacenter(dc1) api1.connectDatacenter(dc2) # connect total network also, needed to do the chaining and monitoring api1.connectDCNetwork(net) # run API endpoint server (in another thread, don't block) api1.start() """ 5.1. For our example, we create a second endpoint to illustrate that this is supported by our design. This feature allows us to have one API endpoint for each data center. This makes the emulation environment more realistic because you can easily create one OpenStack-like REST API endpoint for *each* data center. This will look like a real-world multi PoP/data center deployment from the perspective of an orchestrator. """ #zapi2 = ZeroRpcApiEndpoint("0.0.0.0", 4343) #zapi2.connectDatacenter(dc3) #zapi2.connectDatacenter(dc4) #zapi2.start() """ 6. Finally we are done and can start our network (the emulator). We can also enter the Mininet CLI to interactively interact with our compute resources (just like in default Mininet). But we can also implement fully automated experiments that can be executed again and again. """ net.start() net.CLI() # when the user types exit in the CLI, we stop the emulator net.stop()
def create_topology(httpmode=False, port_default=8888, device_rate=1500): net = DCNetwork(monitor=False, enable_learning=True) DC = net.addDatacenter("DC") # add OpenStack-like APIs to the emulated DC api1 = OpenstackApiEndpoint("0.0.0.0", 6001) api1.connect_datacenter(DC) api1.start() api1.connect_dc_network(net) # add the command line interface endpoint to the emulated DC (REST API) rapi1 = RestApiEndpoint("0.0.0.0", 5001) rapi1.connectDCNetwork(net) rapi1.connectDatacenter(DC) rapi1.start() #Generation Switch s1 = net.addSwitch('s1') s2 = net.addSwitch('s2') s3 = net.addSwitch('s3') s4 = net.addSwitch('s4') S = createHost(httpmode, net, 'S', "host:server") GI = createHost(httpmode, net, 'GI', "host:gateway") GFA = createHost(httpmode, net, 'GFA', "host:gwfinal") GFB = createHost(httpmode, net, 'GFB', "host:gwfinal") GFC = createHost(httpmode, net, 'GFC', "host:gwfinal") #Genration of link net.addLink(S, s1) net.addLink(GI, s2) net.addLink(GFA, s3) net.addLink(GFB, s3) net.addLink(GFC, s4) net.addLink(s1, s2) net.addLink(s2, s3) net.addLink(s2, s4) net.addLink(DC, s4) #Do not remove net.start() #Run gateways and devices print("Starting Server node") S.cmd("startup --local_ip 10.0.0.1 --local_port 8888 --local_name srv") print("Waiting for server node to complete startup") time.sleep(2) print("Starting GI node") GI.cmd( "startup --local_ip 10.0.0.2 --local_port 8888 --local_name gwi --remote_ip 10.0.0.1 --remote_port 8888 --remote_name srv" ) print("Waiting for GI node to complete startup") time.sleep(2) print("Starting GFA node") GFA.cmd( "startup --local_ip 10.0.0.3 --local_port 8888 --local_name gwfa --remote_ip 10.0.0.2 --remote_port 8888 --remote_name gwi" ) print("Waiting for GFA node to complete startup") time.sleep(2) print("Starting GFA devices") GFA.cmd("start_devices 10.0.0.3 9001 {0} gwfa {1}".format( port_default, device_rate)) print("Starting GFB node") GFB.cmd( "startup --local_ip 10.0.0.4 --local_port 8888 --local_name gwfb --remote_ip 10.0.0.2 --remote_port 8888 --remote_name gwi" ) print("Waiting for GFB node to complete startup") time.sleep(2) print("Starting GFB devices") GFB.cmd("start_devices 10.0.0.4 9001 {0} gwfb {1}".format( port_default, device_rate)) print("Starting GFC node") GFC.cmd( "startup --local_ip 10.0.0.5 --local_port 8888 --local_name gwfc --remote_ip 10.0.0.2 --remote_port 8888 --remote_name gwi" ) print("Waiting for GFC node to complete startup") time.sleep(2) print("Starting GFC devices") GFC.cmd("start_devices 10.0.0.5 9001 {0} gwfc {1}".format( port_default, device_rate)) #Start the command line net.CLI() # when the user types exit in the CLI, we stop the emulator net.stop()
monitoring = net.addDocker('monitoring', ip='10.0.0.15', dimage=X, dcmd="sh -c 'cd /Projet-SDCI && git pull; cd Monitoring; sh monitoring.sh; tail -f /dev/null'") dc = net.addDatacenter("dc") info('*** Adding switches\n') s1 = net.addSwitch('s1') info('*** Creating links\n') net.addLink(server, s1, delay="20ms") net.addLink(gwi1, s1, delay="20ms") net.addLink(gwf1, s1, delay="20ms") net.addLink(gwf2, s1, delay="20ms") net.addLink(gwf3, s1, delay="20ms") net.addLink(monitoring, s1, delay="20ms") net.addLink(dc, s1, delay="20ms") info('*** Starting RestApi\n') rapi1 = RestApiEndpoint("0.0.0.0", 5001) rapi1.connectDCNetwork(net) rapi1.connectDatacenter(dc) rapi1.start() info('*** Starting network\n') net.start() info('*** Running CLI\n') CLI(net) info('*** Stopping network') net.stop()
class SimpleTestTopology(unittest.TestCase): """ Helper class to do basic test setups. s1 -- s2 -- s3 -- ... -- sN """ def __init__(self, *args, **kwargs): self.net = None self.api = None self.s = [] # list of switches self.h = [] # list of hosts self.d = [] # list of docker containers self.dc = [] # list of data centers self.docker_cli = None super(SimpleTestTopology, self).__init__(*args, **kwargs) def createNet( self, nswitches=0, ndatacenter=0, nhosts=0, ndockers=0, autolinkswitches=False, controller=Controller, **kwargs): """ Creates a Mininet instance and automatically adds some nodes to it. Attention, we should always use Mininet's default controller for our tests. Only use other controllers if you want to test specific controller functionality. """ self.net = DCNetwork(controller=controller, **kwargs) self.api = RestApiEndpoint("127.0.0.1", 5001, self.net) # add some switches # start from s1 because ovs does not like to have dpid = 0 # and switch name-number is being used by mininet to set the dpid for i in range(1, nswitches+1): self.s.append(self.net.addSwitch('s%d' % i)) # if specified, chain all switches if autolinkswitches: for i in range(0, len(self.s) - 1): self.net.addLink(self.s[i], self.s[i + 1]) # add some data centers for i in range(0, ndatacenter): self.dc.append( self.net.addDatacenter( 'datacenter%d' % i, metadata={"unittest_dc": i})) # connect data centers to the endpoint for i in range(0, ndatacenter): self.api.connectDatacenter(self.dc[i]) # add some hosts for i in range(0, nhosts): self.h.append(self.net.addHost('h%d' % i)) # add some dockers for i in range(0, ndockers): self.d.append(self.net.addDocker('d%d' % i, dimage="ubuntu:trusty")) def startApi(self): self.api.start() def stopApi(self): self.api.stop() def startNet(self): self.net.start() def stopNet(self): self.net.stop() def getDockerCli(self): """ Helper to interact with local docker instance. """ if self.docker_cli is None: self.docker_cli = docker.APIClient( base_url='unix://var/run/docker.sock') return self.docker_cli def getContainernetContainers(self): """ List the containers managed by containernet """ return self.getDockerCli().containers(filters={"label": "com.containernet"}) @staticmethod def setUp(): pass @staticmethod def tearDown(): cleanup() # make sure that all pending docker containers are killed with open(os.devnull, 'w') as devnull: subprocess.call( "sudo docker rm -f $(sudo docker ps --filter 'label=com.containernet' -a -q)", stdout=devnull, stderr=devnull, shell=True)
def create_topology1(): # create topology net = DCNetwork(controller=RemoteController, monitor=True, enable_learning=True) dc1 = net.addDatacenter("dc1") # add the command line interface endpoint to each DC (REST API) rapi1 = RestApiEndpoint("0.0.0.0", 5001) rapi1.connectDCNetwork(net) rapi1.connectDatacenter(dc1) # run API endpoint server (in another thread, don't block) rapi1.start() # add the SONATA dummy gatekeeper to each DC #sdkg1 = SonataDummyGatekeeperEndpoint("0.0.0.0", 5000, deploy_sap=True) #sdkg1.connectDatacenter(dc1) # run the dummy gatekeeper (in another thread, don't block) #sdkg1.start() # start the emulation platform net.start() # here the docker host default ip is configured # topology must be started before hosts are added #cache = net.addDocker('cache', dimage="squid-vnf") cache = dc1.startCompute('cache', image="squid-vnf", network=[{ "ip": "10.10.0.1/24", "id": "client" }, { "ip": "10.20.0.1/24", "id": "server" }]) #client = net.addDocker('client', ip='10.10.0.1/24', dimage="vcdn-client") client = dc1.startCompute('client', image='vcdn-client', network=[{ "ip": "10.10.0.2/24", "id": "client" }]) #server = net.addDocker('server', ip='10.20.0.1/24', dimage="webserver") server = dc1.startCompute('server', image='webserver', network=[{ "ip": "10.20.0.2/24", "id": "server" }]) #net.addLink(dc1, client, intfName1='dc-cl', intfName2='client') #net.addLink(dc1, server, intfName1='dc-sv', intfName2='server') #net.addLink(dc1, cache, intfName1='dc-ccl', intfName2='client', params1={'ip': '10.10.0.2/24'}) #net.addLink(dc1, cache, intfName1='dc-csv', intfName2='server',params1={'ip': '10.20.0.2/24'}) # initialise VNFs cache.cmd("./start.sh", detach=True) client.cmd("./start.sh", detach=True) server.cmd('./start.sh', detach=True) # startup script hangs if we use other startup command # command="./start.sh" net.CLI() net.stop()
def create_topology1(): """ 1. Create a data center network object (DCNetwork) """ net = DCNetwork() """ 1b. add a monitoring agent to the DCNetwork """ mon_api = ZeroRpcApiEndpointDCNetwork("0.0.0.0", 5151) mon_api.connectDCNetwork(net) mon_api.start() """ 2. Add (logical) data centers to the topology (each data center is one "bigswitch" in our simplified first prototype) """ dc1 = net.addDatacenter("dc1") dc2 = net.addDatacenter("dc2") dc3 = net.addDatacenter("long_data_center_name3") dc4 = net.addDatacenter( "dc4", metadata={"mydata": "we can also add arbitrary metadata to each DC"}) """ 3. You can add additional SDN switches for data center interconnections to the network. """ s1 = net.addSwitch("s1") """ 4. Add links between your data centers and additional switches to define you topology. These links can use Mininet's features to limit bw, add delay or jitter. """ net.addLink(dc1, dc2) net.addLink("dc1", s1) net.addLink(s1, dc3) net.addLink(s1, "dc4") """ 5. We want to access and control our data centers from the outside, e.g., we want to connect an orchestrator to start/stop compute resources aka. VNFs (represented by Docker containers in the emulated) So we need to instantiate API endpoints (e.g. a zerorpc or REST interface). Depending on the endpoint implementations, we can connect one or more data centers to it, which can then be controlled through this API, e.g., start/stop/list compute instances. """ # create a new instance of a endpoint implementation zapi1 = ZeroRpcApiEndpoint("0.0.0.0", 4242) rapi1 = RestApiEndpoint("127.0.0.1", 5001) # connect data centers to this endpoint zapi1.connectDatacenter(dc1) zapi1.connectDatacenter(dc2) zapi1.connectDatacenter(dc3) zapi1.connectDatacenter(dc4) rapi1.connectDatacenter(dc1) rapi1.connectDatacenter(dc2) rapi1.connectDatacenter(dc3) rapi1.connectDatacenter(dc4) # run API endpoint server (in another thread, don't block) zapi1.start() rapi1.start() """ 5.1. For our example, we create a second endpoint to illustrate that this is supported by our design. This feature allows us to have one API endpoint for each data center. This makes the emulation environment more realistic because you can easily create one OpenStack-like REST API endpoint for *each* data center. This will look like a real-world multi PoP/data center deployment from the perspective of an orchestrator. """ zapi2 = ZeroRpcApiEndpoint("0.0.0.0", 4343) zapi2.connectDatacenter(dc3) zapi2.connectDatacenter(dc4) zapi2.start() """ 6. Finally we are done and can start our network (the emulator). We can also enter the Mininet CLI to interactively interact with our compute resources (just like in default Mininet). But we can also implement fully automated experiments that can be executed again and again. """ net.start() net.CLI() # when the user types exit in the CLI, we stop the emulator net.stop()
def create_topology1(): """ 1. Create a data center network object (DCNetwork) """ net = DCNetwork(monitor=True, enable_learning=True) """ 1b. add a monitoring agent to the DCNetwork """ #keep old zeroRPC interface to test the prometheus metric query mon_api = ZeroRpcApiEndpointDCNetwork("0.0.0.0", 5151) mon_api.connectDCNetwork(net) mon_api.start() """ 2. Add (logical) data centers to the topology (each data center is one "bigswitch" in our simplified first prototype) """ dc1 = net.addDatacenter("dc1") dc2 = net.addDatacenter("dc2") dc3 = net.addDatacenter("long_data_center_name3") dc4 = net.addDatacenter( "dc4", metadata={"mydata": "we can also add arbitrary metadata to each DC"}) """ 3. You can add additional SDN switches for data center interconnections to the network. """ s1 = net.addSwitch("s1") """ 4. Add links between your data centers and additional switches to define you topology. These links can use Mininet's features to limit bw, add delay or jitter. """ net.addLink(dc1, dc2) net.addLink("dc1", s1) net.addLink(s1, dc3) net.addLink(s1, "dc4") """ 5. We want to access and control our data centers from the outside, e.g., we want to connect an orchestrator to start/stop compute resources aka. VNFs (represented by Docker containers in the emulated) So we need to instantiate API endpoints (e.g. a zerorpc or REST interface). Depending on the endpoint implementations, we can connect one or more data centers to it, which can then be controlled through this API, e.g., start/stop/list compute instances. """ # keep the old zeroRPC interface for the prometheus metric query test zapi1 = ZeroRpcApiEndpoint("0.0.0.0", 4242) # connect data centers to this endpoint zapi1.connectDatacenter(dc1) zapi1.connectDatacenter(dc2) # run API endpoint server (in another thread, don't block) zapi1.start() # create a new instance of a endpoint implementation api1 = RestApiEndpoint("127.0.0.1", 5001) # connect data centers to this endpoint api1.connectDatacenter(dc1) api1.connectDatacenter(dc2) api1.connectDatacenter(dc3) api1.connectDatacenter(dc4) # connect total network also, needed to do the chaining and monitoring api1.connectDCNetwork(net) # run API endpoint server (in another thread, don't block) api1.start() """ 6. Finally we are done and can start our network (the emulator). We can also enter the Mininet CLI to interactively interact with our compute resources (just like in default Mininet). But we can also implement fully automated experiments that can be executed again and again. """ net.start() net.CLI() # when the user types exit in the CLI, we stop the emulator net.stop()