예제 #1
0
def create_topology():
    net = DCNetwork(monitor=False, enable_learning=True)

    dc1 = net.addDatacenter("dc1")
    # add OpenStack-like APIs to the emulated DC
    api1 = OpenstackApiEndpoint("0.0.0.0", 6001)
    api1.connect_datacenter(dc1)
    api1.start()
    api1.connect_dc_network(net)
    # add the command line interface endpoint to the emulated DC (REST API)
    rapi1 = RestApiEndpoint("0.0.0.0", 5001)
    rapi1.connectDCNetwork(net)
    rapi1.connectDatacenter(dc1)
    rapi1.start()
    
    s1 = net.addSwitch('s1')
    s2 = net.addSwitch('s2')
    s3 = net.addSwitch('s3')

    h1 = net.addHost('h1')
    h2 = net.addHost('h2')
    h3 = net.addHost('h3')

    net.addLink(h1, s1)
    net.addLink(h2, s2)
    net.addLink(h3, s3)
    #create a switch triangle
    net.addLink(s1, s2)
    net.addLink(s2, s3)
    net.addLink(s1, s3)

    net.start()
    net.CLI()
    # when the user types exit in the CLI, we stop the emulator
    net.stop()
예제 #2
0
def create_topology(httpmode=False):
    net = DCNetwork(monitor=False, enable_learning=True)

    DC = net.addDatacenter("DC")

    # add OpenStack-like APIs to the emulated DC
    api1 = OpenstackApiEndpoint("0.0.0.0", 6001)
    api1.connect_datacenter(DC)
    api1.start()
    api1.connect_dc_network(net)

    # add the command line interface endpoint to the emulated DC (REST API)
    rapi1 = RestApiEndpoint("0.0.0.0", 5001)
    rapi1.connectDCNetwork(net)
    rapi1.connectDatacenter(DC)
    rapi1.start()

    #Generation Switches
    s1 = net.addSwitch('s1')
    s2 = net.addSwitch('s2')
    s3 = net.addSwitch('s3')

    #Generation Hosts
    S = createHost(httpmode, net, 'S')
    GI = createGW(httpmode, net, 'GI')
    GFA = createGW(httpmode, net, 'GFA')
    DVA = createDV(httpmode, net, 'DVA')

    #Generation Links
    net.addLink(S, s1)
    net.addLink(GI, s2)
    net.addLink(GFA, s3)
    net.addLink(DVA, s3)
    net.addLink(s1, s2)
    net.addLink(s2, s3)
    net.addLink(DC, s3)

    #Run Services (in order)
    if httpmode:
        S.cmd("startup --local_ip 10.0.0.1 --local_port 8080 --local_name srv")
        GI.cmd(
            "startup  --local_ip 10.0.0.2 --local_port 8181 --local_name gwi --remote_ip 10.0.0.1 --remote_port 8080 --remote_name srv"
        )
        GFA.cmd(
            "startup  --local_ip 10.0.0.3 --local_port 8282 --local_name gwfa --remote_ip 10.0.0.2 --remote_port 8181 --remote_name gwi"
        )
        DVA.cmd(
            "startup  --local_ip 10.0.0.4 --local_port 8888 --local_name dva --remote_ip 10.0.0.3 --remote_port 8282 --remote_name gfa --send_period 3000"
        )

    #Do not remove
    net.start()
    net.CLI()
    # when the user types exit in the CLI, we stop the emulator
    net.stop()
예제 #3
0
def create_topology():
    net = DCNetwork(monitor=False, enable_learning=True)

    dc1 = net.addDatacenter("dc1")
    # add OpenStack-like APIs to the emulated DC
    api1 = OpenstackApiEndpoint("0.0.0.0", 6001)
    api1.connect_datacenter(dc1)
    api1.start()
    api1.connect_dc_network(net)
    # add the command line interface endpoint to the emulated DC (REST API)
    rapi1 = RestApiEndpoint("0.0.0.0", 5001)
    rapi1.connectDCNetwork(net)
    rapi1.connectDatacenter(dc1)
    rapi1.start()
    
    s1 = net.addSwitch('s1')
    h1 = net.addHost('h1')
    h2 = net.addDocker('h2',dimage='host:server')
    net.addLink(h1, s1, delay='20ms')
    net.addLink(h2, s1, delay='20ms')
    net.addLink(dc1, s1, delay='20ms')
    
    net.start()
    net.CLI()
    # when the user types exit in the CLI, we stop the emulator
    net.stop()
예제 #4
0
def create_topology():
    net = DCNetwork(monitor=False, enable_learning=True)

    dc1 = net.addDatacenter("dc1")
    # add OpenStack-like APIs to the emulated DC
    api1 = OpenstackApiEndpoint("0.0.0.0", 6001)
    api1.connect_datacenter(dc1)
    api1.start()
    api1.connect_dc_network(net)
    # add the command line interface endpoint to the emulated DC (REST API)
    rapi1 = RestApiEndpoint("0.0.0.0", 5001)
    rapi1.connectDCNetwork(net)
    rapi1.connectDatacenter(dc1)
    rapi1.start()

    s1 = net.addSwitch('s1')
    d1 = net.addDocker('d1', ip='10.100.0.1', dimage="ubuntu:trusty")
    d2 = net.addDocker('d2', ip='10.100.0.2', dimage="ubuntu:trusty")
    net.addLink(s1, d1)
    net.addLink(s1, d2)
    net.addLink(s1, dc1)

    net.start()
    net.CLI()
    # when the user types exit in the CLI, we stop the emulator
    net.stop()
def create_topology1():
    # create topology
    net = DCNetwork(controller=RemoteController, monitor=False, enable_learning=True)
    dc1 = net.addDatacenter("dc1")
    dc2 = net.addDatacenter("dc2")
    s1 = net.addSwitch("s1")
    net.addLink(dc1, s1, delay="10ms")
    net.addLink(dc2, s1, delay="20ms")

    # add the command line interface endpoint to each DC (REST API)
    rapi1 = RestApiEndpoint("0.0.0.0", 5001)
    rapi1.connectDCNetwork(net)
    rapi1.connectDatacenter(dc1)
    rapi1.connectDatacenter(dc2)
    # run API endpoint server (in another thread, don't block)
    rapi1.start()

    # add the SONATA dummy gatekeeper to each DC
    sdkg1 = SonataDummyGatekeeperEndpoint("0.0.0.0", 5000, deploy_sap=False)
    sdkg1.connectDatacenter(dc1)
    sdkg1.connectDatacenter(dc2)
    # run the dummy gatekeeper (in another thread, don't block)
    sdkg1.start()

    # start the emulation platform
    net.start()
    net.CLI()
    rapi1.stop()
    net.stop()
예제 #6
0
def DemoTopology():
    net = DCNetwork(monitor=True, enable_learning=True)

    dc1 = net.addDatacenter("osm-pop1")
    dc2 = net.addDatacenter("osm-pop2")

    s1 = net.addSwitch("s1")

    net.addLink(dc1, s1)
    net.addLink(dc2, s1)

    # add OpenStack-like APIs to the emulated DC
    api1 = OpenstackApiEndpoint("127.0.0.1", 6001)
    api2 = OpenstackApiEndpoint("127.0.0.1", 6002)

    api1.connect_datacenter(dc1)
    api2.connect_datacenter(dc2)

    api1.start()
    api2.start()

    api1.connect_dc_network(net)
    api2.connect_dc_network(net)

    # add the command line interface endpoint to the emulated DC (REST API)
    Rapi = RestApiEndpoint("0.0.0.0", 5001)
    Rapi.connectDCNetwork(net)
    Rapi.connectDatacenter(dc1)
    Rapi.connectDatacenter(dc2)
    Rapi.start()

    net.start()
    net.CLI()
    # when the user types exit in the CLI, we stop the emulator
    net.stop()
예제 #7
0
def create_topology1():
    # create topology
    net = DCNetwork(controller=RemoteController,
                    monitor=True,
                    enable_learning=True)

    dc1 = net.addDatacenter("dc1")
    net.addLink(dc1, net.addSwitch("s1"), delay="10ms")

    # add the command line interface endpoint to each DC (REST API)
    rapi1 = RestApiEndpoint("0.0.0.0", 5001)
    rapi1.connectDCNetwork(net)
    rapi1.connectDatacenter(dc1)
    # run API endpoint server (in another thread, don't block)
    rapi1.start()

    # add the SONATA dummy gatekeeper to each DC
    sdkg1 = SonataDummyGatekeeperEndpoint("0.0.0.0",
                                          5000,
                                          deploy_sap=True,
                                          auto_deploy=True)
    sdkg1.connectDatacenter(dc1)
    # run the dummy gatekeeper (in another thread, don't block)
    sdkg1.start()

    # start the emulation platform
    net.start()
    net.CLI()
    net.stop()
예제 #8
0
def create_topology1():
    # create topology
    net = DCNetwork(controller=RemoteController,
                    monitor=False,
                    enable_learning=False)
    # add datecenters
    dc1 = net.addDatacenter("dc1")
    dc2 = net.addDatacenter("dc2")
    # add some intermediate switch
    s1 = net.addSwitch("s1")
    # connect data centers
    net.addLink(dc1, s1, delay="10ms")
    net.addLink(dc2, s1, delay="20ms")

    # add REST control endpoints to each datacenter (to be used with son-emu-cli)
    rapi1 = RestApiEndpoint("0.0.0.0", 5001)
    rapi1.connectDCNetwork(net)
    rapi1.connectDatacenter(dc1)
    rapi1.connectDatacenter(dc2)
    rapi1.start()

    # start the emulation platform
    net.start()
    net.CLI()
    net.stop()
예제 #9
0
def create_topology1():
    # create topology
    net = DCNetwork(controller=RemoteController,
                    monitor=False,
                    enable_learning=False)
    # add datecenters
    dc1 = net.addDatacenter("dc1")
    dc2 = net.addDatacenter("dc2")
    # add some intermediate switch
    s1 = net.addSwitch("s1")
    # connect data centers
    net.addLink(dc1, s1, delay="10ms")
    net.addLink(dc2, s1, delay="20ms")

    # add REST control endpoints to each datacenter (to be used with son-emu-cli)
    rapi1 = RestApiEndpoint("0.0.0.0", 5001)
    rapi1.connectDCNetwork(net)
    rapi1.connectDatacenter(dc1)
    rapi1.connectDatacenter(dc2)
    rapi1.start()

    # add the SONATA dummy gatekeeper to each DC
    sdkg1 = SonataDummyGatekeeperEndpoint("0.0.0.0", 5000)
    sdkg1.connectDatacenter(dc1)
    sdkg1.connectDatacenter(dc2)
    # run the dummy gatekeeper (in another thread, don't block)
    sdkg1.start()

    # start the emulation platform
    net.start()
    net.CLI()
    net.stop()
예제 #10
0
def create_topology(httpmode=False):
    net = DCNetwork(monitor=False, enable_learning=True)

    DC = net.addDatacenter("DC")

    # add OpenStack-like APIs to the emulated DC
    api1 = OpenstackApiEndpoint("0.0.0.0", 6001)
    api1.connect_datacenter(DC)
    api1.start()
    api1.connect_dc_network(net)

    # add the command line interface endpoint to the emulated DC (REST API)
    rapi1 = RestApiEndpoint("0.0.0.0", 5001)
    rapi1.connectDCNetwork(net)
    rapi1.connectDatacenter(DC)
    rapi1.start()

    s1 = net.addSwitch('s1')
    s2 = net.addSwitch('s2')
    s3 = net.addSwitch('s3')
    s4 = net.addSwitch('s4')

    S = net.addDocker("S", dimage="host:server")
    GI = net.addDocker("GI", dimage="host:gateway")
    GFA = createHost(httpmode, net, 'GFA')
    GFB = createHost(httpmode, net, 'GFB')
    GFC = createHost(httpmode, net, 'GFC')

    net.addLink(S, s1)
    net.addLink(GI, s2)
    net.addLink(GFA, s3)
    net.addLink(GFB, s3)
    net.addLink(GFC, s4)

    net.addLink(s1, s2)
    net.addLink(s2, s3)
    net.addLink(s2, s4)
    net.addLink(DC, s2)

    #Do not remove
    net.start()
    net.CLI()
    # when the user types exit in the CLI, we stop the emulator
    net.stop()
예제 #11
0
def create_topology():
    net = DCNetwork(monitor=False, enable_learning=True)

    dc1 = net.addDatacenter("dc1")
    # add OpenStack-like APIs to the emulated DC
    api1 = OpenstackApiEndpoint("0.0.0.0", 6001)
    api1.connect_datacenter(dc1)
    api1.start()
    api1.connect_dc_network(net)
    # add the command line interface endpoint to the emulated DC (REST API)
    rapi1 = RestApiEndpoint("0.0.0.0", 5001)
    rapi1.connectDCNetwork(net)
    rapi1.connectDatacenter(dc1)
    rapi1.start()

    Serv = net.addDocker('Serv',
                         ip='10.0.0.200',
                         dimage=image,
                         dcmd="sh ./server.sh")
    GI = net.addDocker('GI', ip='10.0.0.201', dimage=image, dcmd="sh ./gi.sh")
    GF1 = net.addDocker('GF1',
                        ip='10.0.0.202',
                        dimage=image,
                        dcmd="sh ./gf1.sh")
    GF2 = net.addDocker('GF2',
                        ip='10.0.0.203',
                        dimage=image,
                        dcmd="sh ./gf2.sh")
    GF3 = net.addDocker('GF3',
                        ip='10.0.0.204',
                        dimage=image,
                        dcmd="sh ./gf3.sh")

    s1 = net.addSwitch('s1')
    s2 = net.addSwitch('s2')

    net.addLink(Serv, s1)
    net.addLink(GI, s1)
    net.addLink(s1, s2, cls=TCLink, delay='100ms', bw=1)
    net.addLink(GF1, s2)
    net.addLink(GF2, s2)
    net.addLink(GF3, s2)
    net.addLink(dc1, s2)

    net.start()

    net.ping([GF1, GI])
    net.ping([GF2, GI])
    net.ping([GF3, GI])
    net.ping([Serv, GI])

    net.CLI()

    net.stop()
예제 #12
0
class DaemonTopology(object):
    def __init__(self):
        self.running = True
        signal.signal(signal.SIGINT, self._stop_by_signal)
        signal.signal(signal.SIGTERM, self._stop_by_signal)
        # create and start topology
        self.create_topology()
        self.start_topology()
        self.daemonize()
        self.stop_topology()

    def create_topology(self):
        self.net = DCNetwork(monitor=False, enable_learning=True)
        self.client_dc = self.net.addDatacenter("client_dc")
        self.vnfs_dc = self.net.addDatacenter("vnfs_dc")
        self.server_dc = self.net.addDatacenter("server_dc")

        self.switch1 = self.net.addSwitch("switch1")
        self.switch2 = self.net.addSwitch("switch2")

        linkopts = dict(delay="1ms", bw=100)
        self.net.addLink(self.client_dc, self.switch1, **linkopts)
        self.net.addLink(self.vnfs_dc, self.switch1, **linkopts)
        self.net.addLink(self.switch1, self.switch2, **linkopts)
        self.net.addLink(self.vnfs_dc, self.switch2, **linkopts)
        self.net.addLink(self.switch2, self.server_dc, **linkopts)

        # add the command line interface endpoint to the emulated DC (REST API)
        self.rest = RestApiEndpoint("0.0.0.0", 5001)
        self.rest.connectDCNetwork(self.net)
        self.rest.connectDatacenter(self.client_dc)
        self.rest.connectDatacenter(self.vnfs_dc)
        self.rest.connectDatacenter(self.server_dc)

    def start_topology(self):
        self.rest.start()
        self.net.start()
        subprocess.call("./res/scripts/init_two_clients_servers.sh",
                        shell=True)

    def daemonize(self):
        print("Daemonizing vim-emu. Send SIGTERM or SIGKILL to stop.")
        while self.running:
            time.sleep(1)

    def _stop_by_signal(self, signum, frame):
        print("Received SIGNAL {}. Stopping.".format(signum))
        self.running = False

    def stop_topology(self):
        self.rest.stop()
        self.net.stop()
예제 #13
0
def create_topology1():
    # create topology
    net = DCNetwork(controller=RemoteController,
                    monitor=True,
                    enable_learning=True)
    dc1 = net.addDatacenter("dc1")
    dc2 = net.addDatacenter("dc2")
    dc3 = net.addDatacenter("dc3")
    s1 = net.addSwitch("s1")
    net.addLink(dc1, s1)
    net.addLink(dc2, s1)
    net.addLink(dc3, s1)

    # add the command line interface endpoint to each DC (REST API)
    rapi1 = RestApiEndpoint("0.0.0.0", 5001)
    rapi1.connectDCNetwork(net)
    rapi1.connectDatacenter(dc1)
    rapi1.connectDatacenter(dc2)
    rapi1.connectDatacenter(dc3)
    # run API endpoint server (in another thread, don't block)
    rapi1.start()

    # specify a vnfd file to be deployed as internal SAP:
    sap_vnfd = 'custom_sap_vnfd.yml'
    dir_path = os.path.dirname(__file__)
    sap_vnfd_path = os.path.join(dir_path, sap_vnfd)
    # sap_vnfd_path = None
    # add the SONATA dummy gatekeeper to each DC
    sdkg1 = SonataDummyGatekeeperEndpoint("0.0.0.0",
                                          5000,
                                          deploy_sap=True,
                                          auto_deploy=True,
                                          docker_management=True,
                                          auto_delete=True,
                                          sap_vnfd_path=sap_vnfd_path,
                                          bidirectional=True,
                                          placement='CustomPlacementvCDN')
    sdkg1.connectDatacenter(dc1)
    sdkg1.connectDatacenter(dc2)
    sdkg1.connectDatacenter(dc3)
    # run the dummy gatekeeper (in another thread, don't block)
    sdkg1.start()

    # start the emulation platform
    net.start()
    net.CLI()
    net.stop()
def create_topology1():
    cleanup()
    # create topology
    # use a maximum of 50% cpu time for containers added to data centers
    net = DCNetwork(dc_emulation_max_cpu=0.5, controller=Controller)
    # add some data centers and create a topology
    dc1 = net.addDatacenter("dc1", resource_log_path=RESOURCE_LOG_PATH)
    dc2 = net.addDatacenter("dc2", resource_log_path=RESOURCE_LOG_PATH)
    s1 = net.addSwitch("s1")
    net.addLink(dc1, s1, delay="10ms")
    net.addLink(dc2, s1, delay="20ms")

    # create and assign resource models for each DC
    rm1 = UpbSimpleCloudDcRM(max_cu=4, max_mu=1024)
    rm2 = UpbOverprovisioningCloudDcRM(max_cu=4)
    dc1.assignResourceModel(rm1)
    dc2.assignResourceModel(rm2)

    # add the command line interface endpoint to each DC
    zapi1 = ZeroRpcApiEndpoint("0.0.0.0", 4242)
    zapi1.connectDatacenter(dc1)
    zapi1.connectDatacenter(dc2)
    # run API endpoint server (in another thread, don't block)
    zapi1.start()

    # start the emulation platform
    net.start()
    print "Wait a moment and allocate some compute start some compute resources..."
    time.sleep(2)
    dc1.startCompute("vnf1")
    dc1.startCompute("vnf2", flavor_name="tiny")
    dc1.startCompute("vnf3", flavor_name="small")
    dc2.startCompute("vnf4", flavor_name="medium")
    dc2.startCompute("vnf5", flavor_name="medium")
    dc2.startCompute("vnf6", flavor_name="medium")
    print "... done."
    time.sleep(5)
    print "Removing instances ..."
    dc1.stopCompute("vnf1")
    dc2.stopCompute("vnf4")
    print "... done"
    net.CLI()
    net.stop()
def create_topology1():
    cleanup()
    # create topology
    # use a maximum of 50% cpu time for containers added to data centers
    net = DCNetwork(dc_emulation_max_cpu=0.5, controller=Controller)
    # add some data centers and create a topology
    dc1 = net.addDatacenter("dc1", resource_log_path=RESOURCE_LOG_PATH)
    dc2 = net.addDatacenter("dc2", resource_log_path=RESOURCE_LOG_PATH)
    s1 = net.addSwitch("s1")
    net.addLink(dc1, s1, delay="10ms")
    net.addLink(dc2, s1, delay="20ms")

    # create and assign resource models for each DC
    rm1 = UpbSimpleCloudDcRM(max_cu=4, max_mu=1024)
    rm2 = UpbOverprovisioningCloudDcRM(max_cu=4)
    dc1.assignResourceModel(rm1)
    dc2.assignResourceModel(rm2)

    # add the command line interface endpoint to each DC
    zapi1 = ZeroRpcApiEndpoint("0.0.0.0", 4242)
    zapi1.connectDatacenter(dc1)
    zapi1.connectDatacenter(dc2)
    # run API endpoint server (in another thread, don't block)
    zapi1.start()

    # start the emulation platform
    net.start()
    print "Wait a moment and allocate some compute start some compute resources..."
    time.sleep(2)
    dc1.startCompute("vnf1")
    dc1.startCompute("vnf2", flavor_name="tiny")
    dc1.startCompute("vnf3", flavor_name="small")
    dc2.startCompute("vnf4", flavor_name="medium")
    dc2.startCompute("vnf5", flavor_name="medium")
    dc2.startCompute("vnf6", flavor_name="medium")
    print "... done."
    time.sleep(5)
    print "Removing instances ..."
    dc1.stopCompute("vnf1")
    dc2.stopCompute("vnf4")
    print "... done"
    net.CLI()
    net.stop()
예제 #16
0
def create_topology1():

    global exit

    # create topology
    net = DCNetwork(controller=RemoteController,
                    monitor=True,
                    enable_learning=False)
    dc1 = net.addDatacenter("dc1")
    dc2 = net.addDatacenter("dc2")
    s1 = net.addSwitch("s1")
    net.addLink(dc1, s1, delay="10ms")
    net.addLink(dc2, s1, delay="20ms")

    # add the command line interface endpoint to each DC (REST API)
    rapi1 = RestApiEndpoint("0.0.0.0", 5001)
    rapi1.connectDatacenter(dc1)
    rapi1.connectDatacenter(dc2)
    # connect total network also, needed to do the chaining and monitoring
    rapi1.connectDCNetwork(net)
    # run API endpoint server (in another thread, don't block)
    rapi1.start()

    # add the SONATA dummy gatekeeper to each DC
    sdkg1 = SonataDummyGatekeeperEndpoint("0.0.0.0", 5000, deploy_sap=True)
    sdkg1.connectDatacenter(dc1)
    sdkg1.connectDatacenter(dc2)
    # run the dummy gatekeeper (in another thread, don't block)
    sdkg1.start()

    # start the emulation platform
    net.start()

    #does not work from docker compose (cannot start container in interactive mode)
    #cli = net.CLI()
    # instead wait here:
    logging.info("waiting for SIGTERM or SIGINT signal")
    while not exit:
        time.sleep(1)
    logging.info("got SIG signal")
    net.stop()
예제 #17
0
def create_topology1():
    # create topology
    net = DCNetwork(controller=RemoteController, monitor=True, enable_learning=True)
    dc1 = net.addDatacenter("dc1")
    dc2 = net.addDatacenter("dc2")
    dc3 = net.addDatacenter("dc3")
    s1 = net.addSwitch("s1")
    net.addLink(dc1, s1)
    net.addLink(dc2, s1)
    net.addLink(dc3, s1)

    # add the command line interface endpoint to each DC (REST API)
    rapi1 = RestApiEndpoint("0.0.0.0", 5001)
    rapi1.connectDCNetwork(net)
    rapi1.connectDatacenter(dc1)
    rapi1.connectDatacenter(dc2)
    rapi1.connectDatacenter(dc3)
    # run API endpoint server (in another thread, don't block)
    rapi1.start()


    # specify a vnfd file to be deployed as internal SAP:
    sap_vnfd = 'custom_sap_vnfd.yml'
    dir_path = os.path.dirname(__file__)
    sap_vnfd_path = os.path.join(dir_path, sap_vnfd)
    # sap_vnfd_path = None
    # add the SONATA dummy gatekeeper to each DC
    sdkg1 = SonataDummyGatekeeperEndpoint("0.0.0.0", 5000, deploy_sap=True, auto_deploy=True,
                                          docker_management=True, auto_delete=True,
                                          sap_vnfd_path=sap_vnfd_path)
    sdkg1.connectDatacenter(dc1)
    sdkg1.connectDatacenter(dc2)
    sdkg1.connectDatacenter(dc3)
    # run the dummy gatekeeper (in another thread, don't block)
    sdkg1.start()

    # start the emulation platform
    net.start()
    net.CLI()
    net.stop()
예제 #18
0
def create_topology1():

    global exit

    # create topology
    net = DCNetwork(controller=RemoteController, monitor=True, enable_learning=False)
    dc1 = net.addDatacenter("dc1")
    dc2 = net.addDatacenter("dc2")
    s1 = net.addSwitch("s1")
    net.addLink(dc1, s1, delay="10ms")
    net.addLink(dc2, s1, delay="20ms")

    # add the command line interface endpoint to each DC (REST API)
    rapi1 = RestApiEndpoint("0.0.0.0", 5001)
    rapi1.connectDatacenter(dc1)
    rapi1.connectDatacenter(dc2)
    # connect total network also, needed to do the chaining and monitoring
    rapi1.connectDCNetwork(net)
    # run API endpoint server (in another thread, don't block)
    rapi1.start()

    # add the SONATA dummy gatekeeper to each DC
    sdkg1 = SonataDummyGatekeeperEndpoint("0.0.0.0", 5000, deploy_sap=True)
    sdkg1.connectDatacenter(dc1)
    sdkg1.connectDatacenter(dc2)
    # run the dummy gatekeeper (in another thread, don't block)
    sdkg1.start()

    # start the emulation platform
    net.start()

    # does not work from docker compose (cannot start container in interactive mode)
    # cli = net.CLI()
    # instead wait here:
    logging.info("waiting for SIGTERM or SIGINT signal")
    while not exit:
        time.sleep(1)
    logging.info("got SIG signal")
    net.stop()
예제 #19
0
def create_topology1():
    net = DCNetwork(monitor=True, enable_learning=False)
    dc1 = net.addDatacenter("dc1")
    dc2 = net.addDatacenter("dc2")
    dc3 = net.addDatacenter("dc3")
    dc4 = net.addDatacenter("dc4")

    heatapi1 = OpenstackApiEndpoint("0.0.0.0", 5001)
    heatapi2 = OpenstackApiEndpoint("0.0.0.0", 5002)
    heatapi3 = OpenstackApiEndpoint("0.0.0.0", 5003)
    heatapi4 = OpenstackApiEndpoint("0.0.0.0", 5004)

    # connect data centers to this endpoint
    heatapi1.connect_datacenter(dc1)
    heatapi2.connect_datacenter(dc2)
    heatapi3.connect_datacenter(dc3)
    heatapi4.connect_datacenter(dc4)

    s1 = net.addSwitch("s1")
    net.addLink(dc1, s1, jitter="10ms", delay="12ms", loss=0, bw=0.5)
    net.addLink(dc2, s1, bw=0.5, loss=0, delay="20ms", jitter="15ms")
    net.addLink(dc3, s1, delay="30ms", loss=1, bw=0.5, jitter="10ms")
    net.addLink(dc4, s1, delay="40ms", loss=2, bw=1, jitter="10ms")
    # heatapirun API endpoint server (in another thread, don't block)
    heatapi1.start()
    heatapi2.start()
    heatapi3.start()
    heatapi4.start()

    heatapi1.connect_dc_network(net)
    heatapi2.connect_dc_network(net)
    heatapi3.connect_dc_network(net)
    heatapi4.connect_dc_network(net)

    net.start()
    net.CLI()
    # when the user types exit in the CLI, we stop the emulator
    net.stop()
예제 #20
0
def create_topology1():
    """
    1. Create a data center network object (DCNetwork)
    """
    net = DCNetwork()

    """
    1b. add a monitoring agent to the DCNetwork
    """
    sonata_api = RestApiEndpoint("127.0.0.1", 5001)
    sonata_api.connect_dc_network(net)
    sonata_api.start()


    """
    2. Add (logical) data centers to the topology
       (each data center is one "bigswitch" in our simplified
        first prototype)
    """
    dc1 = net.addDatacenter("datacenter1")
    dc2 = net.addDatacenter("datacenter2")
    dc3 = net.addDatacenter("long_data_center_name3")
    dc4 = net.addDatacenter(
        "datacenter4",
        metadata={"mydata": "we can also add arbitrary metadata to each DC"})

    """
    3. You can add additional SDN switches for data center
       interconnections to the network.
    """
    s1 = net.addSwitch("s1")

    """
    4. Add links between your data centers and additional switches
       to define you topology.
       These links can use Mininet's features to limit bw, add delay or jitter.
    """
    net.addLink(dc1, dc2)
    net.addLink("datacenter1", s1)
    net.addLink(s1, dc3)
    net.addLink(s1, "datacenter4")

    """
    5. We want to access and control our data centers from the outside,
       e.g., we want to connect an orchestrator to start/stop compute
       resources aka. VNFs (represented by Docker containers in the emulated)

       So we need to instantiate API endpoints (e.g. a zerorpc or REST
       interface). Depending on the endpoint implementations, we can connect
       one or more data centers to it, which can then be controlled through
       this API, e.g., start/stop/list compute instances.
    """
    # create a new instance of a endpoint implementation
    sonata_api = RestApiEndpoint("127.0.0.1", 5005)
    # connect data centers to this endpoint
    sonata_api.connect_datacenter(dc1)
    sonata_api.connect_datacenter(dc2)
    sonata_api.connect_datacenter(dc3)
    sonata_api.connect_datacenter(dc4)
    # run API endpoint server (in another thread, don't block)
    sonata_api.start()

    """
    6. Finally we are done and can start our network (the emulator).
       We can also enter the Mininet CLI to interactively interact
       with our compute resources (just like in default Mininet).
       But we can also implement fully automated experiments that
       can be executed again and again.
    """
    net.start()
    net.CLI()
    # when the user types exit in the CLI, we stop the emulator
    net.stop()
예제 #21
0
파일: base.py 프로젝트: hadik3r/son-emu
class SimpleTestTopology(unittest.TestCase):
    """
        Helper class to do basic test setups.
        s1 -- s2 -- s3 -- ... -- sN
    """

    def __init__(self, *args, **kwargs):
        self.net = None
        self.s = []   # list of switches
        self.h = []   # list of hosts
        self.d = []   # list of docker containers
        self.dc = []  # list of data centers
        self.docker_cli = None
        super(SimpleTestTopology, self).__init__(*args, **kwargs)

    def createNet(
            self,
            nswitches=0, ndatacenter=0, nhosts=0, ndockers=0,
            autolinkswitches=False, controller=Controller, **kwargs):
        """
        Creates a Mininet instance and automatically adds some
        nodes to it.

        Attention, we should always use Mininet's default controller
        for our tests. Only use other controllers if you want to test
        specific controller functionality.
        """
        self.net = DCNetwork(controller=controller, **kwargs)

        # add some switches
        # start from s1 because ovs does not like to have dpid = 0
        # and switch name-number is being used by mininet to set the dpid
        for i in range(1, nswitches+1):
            self.s.append(self.net.addSwitch('s%d' % i))
        # if specified, chain all switches
        if autolinkswitches:
            for i in range(0, len(self.s) - 1):
                self.net.addLink(self.s[i], self.s[i + 1])
        # add some data centers
        for i in range(0, ndatacenter):
            self.dc.append(
                self.net.addDatacenter(
                    'datacenter%d' % i,
                    metadata={"unittest_dc": i}))
        # add some hosts
        for i in range(0, nhosts):
            self.h.append(self.net.addHost('h%d' % i))
        # add some dockers
        for i in range(0, ndockers):
            self.d.append(self.net.addDocker('d%d' % i, dimage="ubuntu:trusty"))

    def startNet(self):
        self.net.start()

    def stopNet(self):
        self.net.stop()

    def getDockerCli(self):
        """
        Helper to interact with local docker instance.
        """
        if self.docker_cli is None:
            self.docker_cli = docker.Client(
                base_url='unix://var/run/docker.sock')
        return self.docker_cli

    def getContainernetContainers(self):
        """
        List the containers managed by containernet
        """
        return self.getDockerCli().containers(filters={"label": "com.containernet"})

    @staticmethod
    def setUp():
        pass

    @staticmethod
    def tearDown():
        cleanup()
        # make sure that all pending docker containers are killed
        with open(os.devnull, 'w') as devnull:
            subprocess.call(
                "sudo docker rm -f $(sudo docker ps --filter 'label=com.containernet' -a -q)",
                stdout=devnull,
                stderr=devnull,
                shell=True)
예제 #22
0
def create_topology1():
    """
    1. Create a data center network object (DCNetwork)
    """
    net = DCNetwork(controller=RemoteController,
                    monitor=False,
                    enable_learning=True)
    """
    2. Add (logical) data centers to the topology
       (each data center is one "bigswitch" in our simplified
        first prototype)
    """
    dc1 = net.addDatacenter("datacenter1")
    dc2 = net.addDatacenter("datacenter2")
    dc3 = net.addDatacenter("long_data_center_name3")
    dc4 = net.addDatacenter(
        "datacenter4",
        metadata={"mydata": "we can also add arbitrary metadata to each DC"})
    """
    3. You can add additional SDN switches for data center
       interconnections to the network.
    """
    s1 = net.addSwitch("s1")
    """
    4. Add links between your data centers and additional switches
       to define you topology.
       These links can use Mininet's features to limit bw, add delay or jitter.
    """
    net.addLink(dc1, dc2)
    net.addLink("datacenter1", s1)
    net.addLink(s1, dc3)
    net.addLink(s1, "datacenter4")
    """
    5. We want to access and control our data centers from the outside,
       e.g., we want to connect an orchestrator to start/stop compute
       resources aka. VNFs (represented by Docker containers in the emulated)

       So we need to instantiate API endpoints (e.g. a zerorpc or REST
       interface). Depending on the endpoint implementations, we can connect
       one or more data centers to it, which can then be controlled through
       this API, e.g., start/stop/list compute instances.
    """
    # create a new instance of a endpoint implementation
    rapi1 = RestApiEndpoint("127.0.0.1", 5001)
    # connect data centers to this endpoint
    rapi1.connectDatacenter(dc1)
    rapi1.connectDatacenter(dc2)
    rapi1.connectDatacenter(dc3)
    rapi1.connectDatacenter(dc4)
    # run API endpoint server (in another thread, don't block)

    rapi1.start()
    """
    6. Finally we are done and can start our network (the emulator).
       We can also enter the Mininet CLI to interactively interact
       with our compute resources (just like in default Mininet).
       But we can also implement fully automated experiments that
       can be executed again and again.
    """
    net.start()
    net.CLI()
    # when the user types exit in the CLI, we stop the emulator
    net.stop()
예제 #23
0
class ApiBaseHeat(unittest.TestCase):
    """
        Helper class to do basic test setups.
        s1 -- s2 -- s3 -- ... -- sN
    """

    def __init__(self, *args, **kwargs):
        self.net = None
        self.api = []
        self.s = []   # list of switches
        self.h = []   # list of hosts
        self.d = []   # list of docker containers
        self.dc = []  # list of data centers
        self.docker_cli = None
        super(ApiBaseHeat, self).__init__(*args, **kwargs)

    def createNet(
            self,
            nswitches=0, ndatacenter=0, nhosts=0, ndockers=0,
            autolinkswitches=False, controller=Controller, **kwargs):
        """
        Creates a Mininet instance and automatically adds some
        nodes to it.

        Attention, we should always use Mininet's default controller
        for our tests. Only use other controllers if you want to test
        specific controller functionality.
        """
        self.net = DCNetwork(controller=controller, **kwargs)
        for i in range(0, ndatacenter):
            self.api.append(OpenstackApiEndpoint("0.0.0.0", 5000+i))

        # add some switches
        # start from s1 because ovs does not like to have dpid = 0
        # and switch name-number is being used by mininet to set the dpid
        for i in range(1, nswitches+1):
            self.s.append(self.net.addSwitch('s%d' % i))
        # if specified, chain all switches
        if autolinkswitches:
            for i in range(0, len(self.s) - 1):
                self.net.addLink(self.s[i], self.s[i + 1])
        # add some data centers
        for i in range(0, ndatacenter):
            self.dc.append(
                self.net.addDatacenter(
                    'dc%d' % i,
                    metadata={"unittest_dc": i}))
        # connect data centers to the endpoint
        for i in range(0, ndatacenter):
            self.api[i].connect_datacenter(self.dc[i])
            self.api[i].connect_dc_network(self.net)
        # add some hosts
        for i in range(0, nhosts):
            self.h.append(self.net.addHost('h%d' % i))
        # add some dockers
        for i in range(0, ndockers):
            self.d.append(self.net.addDocker('d%d' % i, dimage="ubuntu:trusty"))

    def startApi(self):
        for i in self.api:
            i.start()

    def stopApi(self):
        for i in self.api:
            i.stop()

    def startNet(self):
        self.net.start()

    def stopNet(self):
        self.net.stop()

    def getDockerCli(self):
        """
        Helper to interact with local docker instance.
        """
        if self.docker_cli is None:
            self.docker_cli = docker.Client(
                base_url='unix://var/run/docker.sock')
        return self.docker_cli

    def getContainernetContainers(self):
        """
        List the containers managed by containernet
        """
        return self.getDockerCli().containers(filters={"label": "com.containernet"})

    @staticmethod
    def setUp():
        pass


    def tearDown(self):
        print('->>>>>>> tear everything down ->>>>>>>>>>>>>>>')
        self.stopApi() # stop all flask threads
        self.stopNet() # stop some mininet and containernet stuff
        cleanup()
        # make sure that all pending docker containers are killed
        with open(os.devnull, 'w') as devnull: # kill a possibly running docker process that blocks the open ports
            subprocess.call("kill $(netstat -npl | grep '5000' | grep -o -e'[0-9]\+/docker' | grep -o -e '[0-9]\+')",
                stdout=devnull,
                stderr=devnull,
                shell=True)

        with open(os.devnull, 'w') as devnull:
            subprocess.call(
                "sudo docker rm -f $(sudo docker ps --filter 'label=com.containernet' -a -q)",
                stdout=devnull,
                stderr=devnull,
                shell=True)
예제 #24
0
def create_topology():
    net = DCNetwork(monitor=False, enable_learning=True)

    dc1 = net.addDatacenter("dc1")
    # add OpenStack-like APIs to the emulated DC
    api1 = OpenstackApiEndpoint("0.0.0.0", 6001)
    api1.connect_datacenter(dc1)
    api1.start()
    api1.connect_dc_network(net)
    # add the command line interface endpoint to the emulated DC (REST API)
    rapi1 = RestApiEndpoint("0.0.0.0", 5001)
    rapi1.connectDCNetwork(net)
    rapi1.connectDatacenter(dc1)
    rapi1.start()

    info('*** Adding docker containers\n')
    srv = net.addDocker('srv',
                        ip='10.0.0.203',
                        dimage="constancegay/projet_sdci:server")

    time.sleep(5)
    GI = net.addDocker('GI',
                       ip='10.0.0.202',
                       dimage="constancegay/projet_sdci:gateway",
                       environment={
                           "loc_ip": "10.0.0.202",
                           "loc_port": "8181",
                           "loc_name": "GI",
                           "rem_ip": "10.0.0.203",
                           "rem_port": "8080",
                           "rem_name": "srv"
                       })
    time.sleep(5)

    mon = net.addDocker('mon',
                        ip='10.0.0.204',
                        dimage="constancegay/projet_sdci:mon")

    # GFs
    gf1 = net.addDocker('GF1',
                        ip='10.0.0.201',
                        dimage="constancegay/projet_sdci:gateway",
                        environment={
                            "loc_ip": "10.0.0.201",
                            "loc_port": "8282",
                            "loc_name": "GF1",
                            "rem_ip": "10.0.0.202",
                            "rem_port": "8181",
                            "rem_name": "GI"
                        })

    gf2 = net.addDocker('GF2',
                        ip='10.0.0.208',
                        dimage="constancegay/projet_sdci:gateway",
                        environment={
                            "loc_ip": "10.0.0.208",
                            "loc_port": "9004",
                            "loc_name": "GF2",
                            "rem_ip": "10.0.0.202",
                            "rem_port": "8181",
                            "rem_name": "GI"
                        })

    gf3 = net.addDocker('GF3',
                        ip='10.0.0.212',
                        dimage="constancegay/projet_sdci:gateway",
                        environment={
                            "loc_ip": "10.0.0.212",
                            "loc_port": "9008",
                            "loc_name": "GF3",
                            "rem_ip": "10.0.0.202",
                            "rem_port": "8181",
                            "rem_name": "GI"
                        })

    time.sleep(5)
    # ZONE 1 devices
    dev1 = net.addDocker('dev1',
                         ip='10.0.0.205',
                         dimage="constancegay/projet_sdci:dev",
                         environment={
                             "loc_ip": "10.0.0.205",
                             "loc_port": "9001",
                             "loc_name": "dev1",
                             "rem_ip": "10.0.0.201",
                             "rem_port": "8282",
                             "rem_name": "GF1"
                         })
    dev2 = net.addDocker('dev2',
                         ip='10.0.0.206',
                         dimage="constancegay/projet_sdci:dev",
                         environment={
                             "loc_ip": "10.0.0.206",
                             "loc_port": "9002",
                             "loc_name": "dev2",
                             "rem_ip": "10.0.0.201",
                             "rem_port": "8282",
                             "rem_name": "GF1"
                         })
    dev3 = net.addDocker('dev3',
                         ip='10.0.0.207',
                         dimage="constancegay/projet_sdci:dev",
                         environment={
                             "loc_ip": "10.0.0.207",
                             "loc_port": "9003",
                             "loc_name": "dev3",
                             "rem_ip": "10.0.0.201",
                             "rem_port": "8282",
                             "rem_name": "GF1"
                         })

    # ZONE 2 devices
    dev4 = net.addDocker('dev4',
                         ip='10.0.0.209',
                         dimage="constancegay/projet_sdci:dev",
                         environment={
                             "loc_ip": "10.0.0.209",
                             "loc_port": "9005",
                             "loc_name": "dev4",
                             "rem_ip": "10.0.0.208",
                             "rem_port": "9004",
                             "rem_name": "GF2"
                         })
    dev5 = net.addDocker('dev5',
                         ip='10.0.0.210',
                         dimage="constancegay/projet_sdci:dev",
                         environment={
                             "loc_ip": "10.0.0.210",
                             "loc_port": "9006",
                             "loc_name": "dev5",
                             "rem_ip": "10.0.0.208",
                             "rem_port": "9004",
                             "rem_name": "GF2"
                         })
    dev6 = net.addDocker('dev6',
                         ip='10.0.0.211',
                         dimage="constancegay/projet_sdci:dev",
                         environment={
                             "loc_ip": "10.0.0.211",
                             "loc_port": "9007",
                             "loc_name": "dev6",
                             "rem_ip": "10.0.0.208",
                             "rem_port": "9004",
                             "rem_name": "GF2"
                         })

    # ZONE 3 devices
    dev7 = net.addDocker('dev7',
                         ip='10.0.0.213',
                         dimage="constancegay/projet_sdci:dev",
                         environment={
                             "loc_ip": "10.0.0.213",
                             "loc_port": "9009",
                             "loc_name": "dev7",
                             "rem_ip": "10.0.0.212",
                             "rem_port": "9008",
                             "rem_name": "GF3"
                         })
    dev8 = net.addDocker('dev8',
                         ip='10.0.0.214',
                         dimage="constancegay/projet_sdci:dev",
                         environment={
                             "loc_ip": "10.0.0.214",
                             "loc_port": "9010",
                             "loc_name": "dev8",
                             "rem_ip": "10.0.0.212",
                             "rem_port": "9008",
                             "rem_name": "GF3"
                         })
    dev9 = net.addDocker('dev9',
                         ip='10.0.0.215',
                         dimage="constancegay/projet_sdci:dev",
                         environment={
                             "loc_ip": "10.0.0.215",
                             "loc_port": "9011",
                             "loc_name": "dev9",
                             "rem_ip": "10.0.0.212",
                             "rem_port": "9008",
                             "rem_name": "GF3"
                         })

    info('*** Adding switches\n')
    s1 = net.addSwitch('s1')
    s2 = net.addSwitch('s2')
    s3 = net.addSwitch('s3')
    s4 = net.addSwitch('s4')
    s5 = net.addSwitch('s5')

    info('*** Creating links\n')
    net.addLink(s1, srv)
    net.addLink(s1, GI)
    net.addLink(s1, mon)

    net.addLink(s2, s1)
    net.addLink(s2, dc1)

    net.addLink(s3, s2)
    net.addLink(s4, s2)
    net.addLink(s5, s2)

    # ZONE 1
    net.addLink(s3, gf1)
    net.addLink(s3, dev1)
    net.addLink(s3, dev2)
    net.addLink(s3, dev3)

    # ZONE 2
    net.addLink(s4, gf2)
    net.addLink(s4, dev4)
    net.addLink(s4, dev5)
    net.addLink(s4, dev6)

    # ZONE 3
    net.addLink(s5, gf3)
    net.addLink(s5, dev7)
    net.addLink(s5, dev8)
    net.addLink(s5, dev9)

    info('*** Starting network\n')
    net.start()
    info('*** Testing connectivity\n')
    net.ping([srv, dev1])
    info('*** Running CLI\n')
    CLI(net)
    info('*** Stopping network')
    net.stop()
예제 #25
0
info('*** Adding controller\n')
#net.addController('c0')

info('*** Adding docker containers\n')
srv = net.addDocker('srv', ip='10.0.0.30', dimage="server:latest")
gwi = net.addDocker('gwi', ip='10.0.0.31', dimage="gwi:latest")
gwf1 = net.addDocker('gwf1', ip='10.0.0.32', dimage="gwf:one")
dev1 = net.addDocker('dev1', ip='10.0.0.33', dimage="dev:one")
gwf2 = net.addDocker('gwf2', ip='10.0.0.34', dimage="gwf:two")
dev2 = net.addDocker('dev2', ip='10.0.0.35', dimage="dev:two")
gwf3 = net.addDocker('gwf3', ip='10.0.0.36', dimage="gwf:three")
dev3 = net.addDocker('dev3', ip='10.0.0.37', dimage="dev:three")

info('*** Adding switches\n')
s1 = net.addSwitch('s1')
s2 = net.addSwitch('s2')
s3 = net.addSwitch('s3')
s4 = net.addSwitch('s4')
s5 = net.addSwitch('s5')

info('*** Creating links\n')
net.addLink(srv, s1)
net.addLink(gwi, s1)
net.addLink(dc1, s1)
net.addLink(gwf1, s3)
net.addLink(dev1, s3)
net.addLink(gwf2, s4)
net.addLink(dev2, s4)
net.addLink(gwf3, s5)
net.addLink(dev3, s5)
예제 #26
0
파일: base.py 프로젝트: splietker/son-emu
class SimpleTestTopology(unittest.TestCase):
    """
        Helper class to do basic test setups.
        s1 -- s2 -- s3 -- ... -- sN
    """
    def __init__(self, *args, **kwargs):
        self.net = None
        self.s = []  # list of switches
        self.h = []  # list of hosts
        self.d = []  # list of docker containers
        self.dc = []  # list of data centers
        self.docker_cli = None
        super(SimpleTestTopology, self).__init__(*args, **kwargs)

    def createNet(self,
                  nswitches=0,
                  ndatacenter=0,
                  nhosts=0,
                  ndockers=0,
                  autolinkswitches=False,
                  controller=Controller,
                  **kwargs):
        """
        Creates a Mininet instance and automatically adds some
        nodes to it.

        Attention, we should always use Mininet's default controller
        for our tests. Only use other controllers if you want to test
        specific controller functionality.
        """
        self.net = DCNetwork(controller=controller, **kwargs)

        # add some switches
        # start from s1 because ovs does not like to have dpid = 0
        # and switch name-number is being used by mininet to set the dpid
        for i in range(1, nswitches + 1):
            self.s.append(self.net.addSwitch('s%d' % i))
        # if specified, chain all switches
        if autolinkswitches:
            for i in range(0, len(self.s) - 1):
                self.net.addLink(self.s[i], self.s[i + 1])
        # add some data centers
        for i in range(0, ndatacenter):
            self.dc.append(
                self.net.addDatacenter('datacenter%d' % i,
                                       metadata={"unittest_dc": i}))
        # add some hosts
        for i in range(0, nhosts):
            self.h.append(self.net.addHost('h%d' % i))
        # add some dockers
        for i in range(0, ndockers):
            self.d.append(self.net.addDocker('d%d' % i,
                                             dimage="ubuntu:trusty"))

    def startNet(self):
        self.net.start()

    def stopNet(self):
        self.net.stop()

    def getDockerCli(self):
        """
        Helper to interact with local docker instance.
        """
        if self.docker_cli is None:
            self.docker_cli = docker.APIClient(
                base_url='unix://var/run/docker.sock')
        return self.docker_cli

    def getContainernetContainers(self):
        """
        List the containers managed by containernet
        """
        return self.getDockerCli().containers(
            filters={"label": "com.containernet"})

    @staticmethod
    def setUp():
        pass

    @staticmethod
    def tearDown():
        cleanup()
        # make sure that all pending docker containers are killed
        with open(os.devnull, 'w') as devnull:
            subprocess.call(
                "sudo docker rm -f $(sudo docker ps --filter 'label=com.containernet' -a -q)",
                stdout=devnull,
                stderr=devnull,
                shell=True)
예제 #27
0
def create_topology1():
    # create topology
    net = DCNetwork(controller=RemoteController, monitor=False, enable_learning=False)
    dc1 = net.addDatacenter("dc1")
    dc2 = net.addDatacenter("dc2")
    s1 = net.addSwitch("s1")
    linkopts = dict(delay="1ms",bw=100)
    net.addLink(dc1, s1, **linkopts)
    net.addLink(dc2, s1, **linkopts)

    # add the command line interface endpoint to each DC (REST API)
    rapi1 = RestApiEndpoint("0.0.0.0", 5001)
    rapi1.connectDCNetwork(net)
    rapi1.connectDatacenter(dc1)
    rapi1.connectDatacenter(dc2)
    # run API endpoint server (in another thread, don't block)
    rapi1.start()

    # add the SONATA dummy gatekeeper to each DC
    sdkg1 = SonataDummyGatekeeperEndpoint("0.0.0.0", 5000)
    sdkg1.connectDatacenter(dc1)
    sdkg1.connectDatacenter(dc2)
    # run the dummy gatekeeper (in another thread, don't block)
    sdkg1.start()

    # start the emulation platform
    net.start()
    # create hosts and vnfs
    #use ./init_vnfs_rubis for rubis experiments
    #use ./init_vnfs for stratos experiments
    subprocess.call("./init_vnfs_rubis.sh",shell=True)
    subprocess.call("./chain_vnfs.sh",shell=True)

    fw, snort, client, server = net.getNodeByName('fw','snort','client','server')
    print "Waiting warmup"
    time.sleep(10)
    #run experiment
    #CONFIGURE number of cores
    cores = 4
    for i in range(0,4): #Set here the number of repetitions 
       for fwbw in [5,50,100]: # set here the network bandwidth range for the firewall
          for snortbw in [5,50,100]:  # set here the network bandwidth range for the dpi
             for reqsize in ['128KB']: #available sizes are: '4KB','8KB','16KB','32KB','64KB','128KB','256KB','512KB','1024KB','2048KB','4096KB','8192KB','16384KB','32768KB']: 
                for fwcpu in [5,50,100]: # set here the cpu capacity range for the firewall, 5 means 5% of one cpu
                   for snortcpu in [5,50,100]: # set here the cpu capacity range for the dpi, 5 means 5% of one cpu
                	r=0
                	fw.setParam(r,'setCPUFrac',cpu=fwcpu/(cores*100))
                	snort.setParam(r,'setCPUFrac',cpu=snortcpu/(cores*100))
                	strcmd = "%s %d %d %d %d %s %d &" % ('./start_firewall.sh',fwbw,snortbw,fwcpu,snortcpu,reqsize,i) 
                	fw.cmd(strcmd)
                	time.sleep(1)
                	strcmd = "%s %d %d %d %d %s %d &" % ('./start_snort.sh',fwbw,snortbw,fwcpu,snortcpu,reqsize,i)
                	snort.cmd(strcmd)
                	strcmd = "%s %d %d %d %d %s %d &" % ('./start_server.sh',fwbw,snortbw,fwcpu,snortcpu,reqsize,i)
                	server.cmd(strcmd)
                	time.sleep(1)
                	client.cmd("ping -c 2 10.0.0.50 >> log-ping")
                	client.cmd("ping -c 2 10.0.0.50 >> log-ping")        	                     
                        strcmd = "%s %d %d %d %d %s %d &" % ('./start_client.sh',fwbw,snortbw,fwcpu,snortcpu,reqsize,i)
                        client.cmd(strcmd)
                	#the parameter for iperfc is the target bandwidth
                	strcmd = "%s %d" % ('./start_iperfc.sh',30)
                        client.cmd(strcmd)
                	print "Waiting to the experiment %d-%d-%d-%d-%s-%d"%(fwbw,snortbw,fwcpu,snortcpu,reqsize,i)
                	#use 180 for rubis workload
                    #use 100 for the stratos
			        time.sleep(180)
                	print "Copy results and cleanup"
                	strcmd = "scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no guiltiness* [email protected]:/home/vagrant/son-emu/logs/"
                	fw.cmd(strcmd)
                        snort.cmd(strcmd)
                	strcmd = "scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no log* [email protected]:/home/vagrant/son-emu/logs/"
                	client.cmd(strcmd)
                	server.cmd(strcmd)
                	fw.cmd("rm guiltiness*")
                	snort.cmd("rm guiltiness*")
                	client.cmd("rm log*")
                	server.cmd("rm log*")
예제 #28
0
파일: topology.py 프로젝트: Thomas-Cnt/sdci
def create_topology():
    net = DCNetwork(monitor=False, enable_learning=True)

    info('*** Adding datacenters\n')
    dc1 = net.addDatacenter("dc1")
    dc2 = net.addDatacenter("dc2")
    # add OpenStack-like APIs to the emulated DC
    api1 = OpenstackApiEndpoint("0.0.0.0", 6001)
    api1.connect_datacenter(dc1)
    api1.connect_datacenter(dc2)
    api1.start()
    api1.connect_dc_network(net)
    # add the command line interface endpoint to the emulated DC (REST API)
    rapi1 = RestApiEndpoint("0.0.0.0", 5001)
    rapi1.connectDCNetwork(net)
    rapi1.connectDatacenter(dc1)
    rapi1.connectDatacenter(dc2)
    rapi1.start()

    # params
    SRV_PORT = 8080

    info('*** Adding switches\n')
    sw1 = net.addSwitch('sw1')
    sw2 = net.addSwitch('sw2')

    info('*** Adding docker containers\n')

    server = net.addDocker(
        'server',
        ip='10.100.0.10',
        dimage="alpine:4",
        dcmd=
        "node ./server.js --local_ip '127.0.0.1' --local_port 8080 --local_name 'srv' &"
    )

    GW = net.addDocker('GW', ip='10.100.0.4', dimage="alpine:4")

    GF1 = net.addDocker('GF1', ip='10.100.0.1', dimage="alpine:4")

    GF2 = net.addDocker('GF2', ip='10.100.0.2', dimage="alpine:4")
    GF3 = net.addDocker('GF3', ip='10.100.0.3', dimage="alpine:4")

    net.addLink(server, sw1)
    net.addLink(GW, sw1)
    net.addLink(sw1, sw2)
    net.addLink(GF1, sw2)
    net.addLink(GF2, sw2)
    net.addLink(GF3, sw2)

    info('*** Starting network\n')
    net.start()

    time.sleep(5)

    # launch gateway GW
    info("//// Starting GW gateway\n")
    GW.cmd(
        "node ./gateway.js --local_ip '127.0.0.1' --local_port 9000 --local_name 'GW' --remote_ip '10.100.0.10' --remote_port 8080 --remote_name 'srv' &"
    )
    info('//// started !\n')

    time.sleep(5)

    # launch gateways GFs

    info("//// Starting GF1 gateway\n")
    GF1.cmd(
        "node ./gateway.js --local_ip '127.0.0.1' --local_port 5000 --local_name 'GF1' --remote_ip '10.100.0.4' --remote_port 9000 --remote_name 'GW' &"
    )
    info('//// started !\n')

    info("//// Starting GF2 gateway\n")
    GF2.cmd(
        "node ./gateway.js --local_ip '127.0.0.1' --local_port 5000 --local_name 'GF2' --remote_ip '10.100.0.4' --remote_port 9000 --remote_name 'GW' &"
    )
    info('//// started !\n')

    info("//// Starting GF3 gateway\n")
    GF3.cmd(
        "node ./gateway.js --local_ip '127.0.0.1' --local_port 5000 --local_name 'GF3' --remote_ip '10.100.0.4' --remote_port 9000 --remote_name 'GW' &"
    )
    info('//// started !\n')

    # launch devices

    info("//// Starting devices on GF1\n")
    GF1.cmd(
        "node ./device.js --local_ip '127.0.0.1' --local_port 5001 --local_name 'device-1' --remote_ip '127.0.0.1' --remote_port 5000 --remote_name 'GF1' --send_period 3 &"
    )
    info('//// started !\n')

    info('*** Testing connectivity\n')
    net.ping([GW, GF1])
    info('*** Running CLI\n')
    net.CLI()
    info('*** Stopping network\n')
    net.stop()
예제 #29
0
X = "krustylebot/repo:sdci_containernet"

info('*** Adding docker containers using krustylebot/repo:sdci_containernet images\n')
server = net.addDocker('server', ip='10.0.0.10', dimage=X, dcmd="sh -c 'cd /Projet-SDCI/docker && git pull && sh script_server.sh 10.0.0.10; tail -f /dev/null'")
gwi1 = net.addDocker('gwi1', ip='10.0.0.11', dimage=X, dcmd="sh -c 'cd /Projet-SDCI/docker && git pull && sh script_gi.sh 10.0e.0.11 10.0.0.10 gwi1; tail -f /dev/null'")
gwf1 = net.addDocker('gwf1', ip='10.0.0.12', dimage=X, dcmd="sh -c 'cd /Projet-SDCI/docker && git pull && sh script_gf.sh 10.0.0.12 10.0.0.11 gwf1 gwi1 300; tail -f /dev/null'")
gwf2 = net.addDocker('gwf2', ip='10.0.0.13', dimage=X, dcmd="sh -c 'cd /Projet-SDCI/docker && git pull && sh script_gf.sh 10.0.0.13 10.0.0.11 gwf2 gwi1 300; tail -f /dev/null'")
gwf3 = net.addDocker('gwf3', ip='10.0.0.14', dimage=X, dcmd="sh -c 'cd /Projet-SDCI/docker && git pull && sh script_gf.sh 10.0.0.14 10.0.0.11 gwf3 gwi1 300; tail -f /dev/null'")

monitoring = net.addDocker('monitoring', ip='10.0.0.15', dimage=X, dcmd="sh -c 'cd /Projet-SDCI && git pull; cd Monitoring; sh monitoring.sh; tail -f /dev/null'")

dc = net.addDatacenter("dc")

info('*** Adding switches\n')
s1 = net.addSwitch('s1')

info('*** Creating links\n')
net.addLink(server, s1, delay="20ms")
net.addLink(gwi1, s1, delay="20ms")
net.addLink(gwf1, s1, delay="20ms")
net.addLink(gwf2, s1, delay="20ms")
net.addLink(gwf3, s1, delay="20ms")
net.addLink(monitoring, s1, delay="20ms")
net.addLink(dc, s1, delay="20ms")


info('*** Starting RestApi\n')
rapi1 = RestApiEndpoint("0.0.0.0", 5001)
rapi1.connectDCNetwork(net)
rapi1.connectDatacenter(dc)
예제 #30
0
class ApiBaseOpenStack(unittest.TestCase):
    """
        Helper class to do basic test setups.
        s1 -- s2 -- s3 -- ... -- sN
    """
    def __init__(self, *args, **kwargs):
        self.net = None
        self.api = []
        self.s = []  # list of switches
        self.h = []  # list of hosts
        self.d = []  # list of docker containers
        self.dc = []  # list of data centers
        self.docker_cli = None
        super(ApiBaseOpenStack, self).__init__(*args, **kwargs)

    def createNet(self,
                  nswitches=0,
                  ndatacenter=0,
                  nhosts=0,
                  ndockers=0,
                  autolinkswitches=False,
                  controller=Controller,
                  **kwargs):
        """
        Creates a Mininet instance and automatically adds some
        nodes to it.

        Attention, we should always use Mininet's default controller
        for our tests. Only use other controllers if you want to test
        specific controller functionality.
        """
        self.net = DCNetwork(controller=controller, **kwargs)
        for i in range(0, ndatacenter):
            self.api.append(OpenstackApiEndpoint("0.0.0.0", 15000 + i))

        # add some switches
        # start from s1 because ovs does not like to have dpid = 0
        # and switch name-number is being used by mininet to set the dpid
        for i in range(1, nswitches + 1):
            self.s.append(self.net.addSwitch('s%d' % i))
        # if specified, chain all switches
        if autolinkswitches:
            for i in range(0, len(self.s) - 1):
                self.net.addLink(self.s[i], self.s[i + 1])
            self.net.addLink(self.s[2],
                             self.s[0])  # link switches s1, s2 and s3

        # add some data centers
        for i in range(0, ndatacenter):
            self.dc.append(
                self.net.addDatacenter('dc%d' % i, metadata={"unittest_dc":
                                                             i}))
        self.net.addLink(self.dc[0].switch,
                         self.s[0])  # link switches dc0.s1 with s1
        # connect data centers to the endpoint
        for i in range(0, ndatacenter):
            self.api[i].connect_datacenter(self.dc[i])
            self.api[i].connect_dc_network(self.net)
        # add some hosts
        for i in range(0, nhosts):
            self.h.append(self.net.addHost('h%d' % i))
        # add some dockers
        for i in range(0, ndockers):
            self.d.append(self.net.addDocker('d%d' % i,
                                             dimage="ubuntu:trusty"))

    def startApi(self):
        for i in self.api:
            i.start(wait_for_port=True)

    def stopApi(self):
        for i in self.api:
            i.manage.stop_floating_network()
            i.stop()

    def startNet(self):
        self.net.start()

    def stopNet(self):
        self.net.stop()

    def getDockerCli(self):
        """
        Helper to interact with local docker instance.
        """
        if self.docker_cli is None:
            self.docker_cli = docker.Client(
                base_url='unix://var/run/docker.sock')
        return self.docker_cli

    def getContainernetContainers(self):
        """
        List the containers managed by containernet
        """
        return self.getDockerCli().containers(
            filters={"label": "com.containernet"})

    @staticmethod
    def setUp():
        pass

    def tearDown(self):
        time.sleep(2)
        print('->>>>>>> tear everything down ->>>>>>>>>>>>>>>')
        self.stopApi()  # stop all flask threads
        self.stopNet()  # stop some mininet and containernet stuff
        cleanup()
        # make sure that all pending docker containers are killed
        with open(
                os.devnull, 'w'
        ) as devnull:  # kill a possibly running docker process that blocks the open ports
            subprocess.call(
                "kill $(netstat -npl | grep '15000' | grep -o -e'[0-9]\+/docker' | grep -o -e '[0-9]\+')",
                stdout=devnull,
                stderr=devnull,
                shell=True)

        with open(os.devnull, 'w') as devnull:
            subprocess.call(
                "sudo docker rm -f $(sudo docker ps --filter 'label=com.containernet' -a -q)",
                stdout=devnull,
                stderr=devnull,
                shell=True)
        time.sleep(2)
예제 #31
0
def create_topology(httpmode=False, port_default=8888, device_rate=1500):
    net = DCNetwork(monitor=False, enable_learning=True)

    DC = net.addDatacenter("DC")

    # add OpenStack-like APIs to the emulated DC
    api1 = OpenstackApiEndpoint("0.0.0.0", 6001)
    api1.connect_datacenter(DC)
    api1.start()
    api1.connect_dc_network(net)

    # add the command line interface endpoint to the emulated DC (REST API)
    rapi1 = RestApiEndpoint("0.0.0.0", 5001)
    rapi1.connectDCNetwork(net)
    rapi1.connectDatacenter(DC)
    rapi1.start()

    #Generation Switch
    s1 = net.addSwitch('s1')
    s2 = net.addSwitch('s2')
    s3 = net.addSwitch('s3')
    s4 = net.addSwitch('s4')

    S = createHost(httpmode, net, 'S', "host:server")
    GI = createHost(httpmode, net, 'GI', "host:gateway")
    GFA = createHost(httpmode, net, 'GFA', "host:gwfinal")
    GFB = createHost(httpmode, net, 'GFB', "host:gwfinal")
    GFC = createHost(httpmode, net, 'GFC', "host:gwfinal")

    #Genration of link
    net.addLink(S, s1)
    net.addLink(GI, s2)
    net.addLink(GFA, s3)
    net.addLink(GFB, s3)
    net.addLink(GFC, s4)

    net.addLink(s1, s2)
    net.addLink(s2, s3)
    net.addLink(s2, s4)
    net.addLink(DC, s4)

    #Do not remove
    net.start()

    #Run gateways and devices
    print("Starting Server node")
    S.cmd("startup --local_ip 10.0.0.1 --local_port 8888 --local_name srv")
    print("Waiting for server node to complete startup")
    time.sleep(2)
    print("Starting GI node")
    GI.cmd(
        "startup --local_ip 10.0.0.2 --local_port 8888 --local_name gwi --remote_ip 10.0.0.1 --remote_port 8888 --remote_name srv"
    )
    print("Waiting for GI node to complete startup")
    time.sleep(2)
    print("Starting GFA node")
    GFA.cmd(
        "startup --local_ip 10.0.0.3 --local_port 8888 --local_name gwfa --remote_ip 10.0.0.2 --remote_port 8888 --remote_name gwi"
    )
    print("Waiting for GFA node to complete startup")
    time.sleep(2)
    print("Starting GFA devices")
    GFA.cmd("start_devices 10.0.0.3 9001 {0} gwfa {1}".format(
        port_default, device_rate))
    print("Starting GFB node")
    GFB.cmd(
        "startup --local_ip 10.0.0.4 --local_port 8888 --local_name gwfb --remote_ip 10.0.0.2 --remote_port 8888 --remote_name gwi"
    )
    print("Waiting for GFB node to complete startup")
    time.sleep(2)
    print("Starting GFB devices")
    GFB.cmd("start_devices 10.0.0.4 9001 {0} gwfb {1}".format(
        port_default, device_rate))
    print("Starting GFC node")
    GFC.cmd(
        "startup --local_ip 10.0.0.5 --local_port 8888 --local_name gwfc --remote_ip 10.0.0.2 --remote_port 8888 --remote_name gwi"
    )
    print("Waiting for GFC node to complete startup")
    time.sleep(2)
    print("Starting GFC devices")
    GFC.cmd("start_devices 10.0.0.5 9001 {0} gwfc {1}".format(
        port_default, device_rate))
    #Start the command line
    net.CLI()
    # when the user types exit in the CLI, we stop the emulator
    net.stop()
def create_topology1():
    """
    1. Create a data center network object (DCNetwork) with monitoring enabled
    """
    net = DCNetwork(monitor=True, enable_learning=False)
    """
    1b. Add endpoint APIs for the whole DCNetwork,
        to access and control the networking from outside.
        e.g., to setup forwarding paths between compute
        instances aka. VNFs (represented by Docker containers), passing through
        different switches and datacenters of the emulated topology
    """
    # create monitoring api endpoint for backwards compatibility with zerorpc api
    mon_api = ZeroRpcApiEndpointDCNetwork("0.0.0.0", 5151)
    mon_api.connectDCNetwork(net)
    mon_api.start()
    """
    2. Add (logical) data centers to the topology
       (each data center is one "bigswitch" in our simplified
        first prototype)
    """
    dc1 = net.addDatacenter("datacenter1")
    dc2 = net.addDatacenter("datacenter2")
    """
    3. You can add additional SDN switches for data center
       interconnections to the network.
    """
    s1 = net.addSwitch("s1")
    """
    4. Add links between your data centers and additional switches
       to define you topology.
       These links can use Mininet's features to limit bw, add delay or jitter.
    """

    net.addLink(dc1, s1)
    net.addLink(s1, dc2)
    """
    5. We want to access and control our data centers from the outside,
       e.g., we want to connect an orchestrator to start/stop compute
       resources aka. VNFs (represented by Docker containers in the emulated)

       So we need to instantiate API endpoints (e.g. a zerorpc or REST
       interface). Depending on the endpoint implementations, we can connect
       one or more data centers to it, which can then be controlled through
       this API, e.g., start/stop/list compute instances.
    """
    # keep the old zeroRPC interface for the prometheus metric query test
    zapi1 = ZeroRpcApiEndpoint("0.0.0.0", 4242)
    # connect data centers to this endpoint
    zapi1.connectDatacenter(dc1)
    zapi1.connectDatacenter(dc2)
    # run API endpoint server (in another thread, don't block)
    zapi1.start()

    # create a new instance of a endpoint implementation
    # the restapi handles all compute, networking and monitoring commands in one api endpoint
    api1 = RestApiEndpoint("0.0.0.0", 5001)
    # connect data centers to this endpoint
    api1.connectDatacenter(dc1)
    api1.connectDatacenter(dc2)
    # connect total network also, needed to do the chaining and monitoring
    api1.connectDCNetwork(net)
    # run API endpoint server (in another thread, don't block)
    api1.start()
    """
    5.1. For our example, we create a second endpoint to illustrate that
         this is supported by our design. This feature allows us to have
         one API endpoint for each data center. This makes the emulation
         environment more realistic because you can easily create one
         OpenStack-like REST API endpoint for *each* data center.
         This will look like a real-world multi PoP/data center deployment
         from the perspective of an orchestrator.
    """
    #zapi2 = ZeroRpcApiEndpoint("0.0.0.0", 4343)
    #zapi2.connectDatacenter(dc3)
    #zapi2.connectDatacenter(dc4)
    #zapi2.start()
    """
    6. Finally we are done and can start our network (the emulator).
       We can also enter the Mininet CLI to interactively interact
       with our compute resources (just like in default Mininet).
       But we can also implement fully automated experiments that
       can be executed again and again.
    """
    net.start()
    net.CLI()
    # when the user types exit in the CLI, we stop the emulator
    net.stop()
def create_topology1():
    """
    1. Create a data center network object (DCNetwork) with monitoring enabled
    """
    net = DCNetwork(monitor=True, enable_learning=False)

    """
    1b. Add endpoint APIs for the whole DCNetwork,
        to access and control the networking from outside.
        e.g., to setup forwarding paths between compute
        instances aka. VNFs (represented by Docker containers), passing through
        different switches and datacenters of the emulated topology
    """
    mon_api = ZeroRpcApiEndpointDCNetwork("0.0.0.0", 5151)
    mon_api.connectDCNetwork(net)
    mon_api.start()

    """
    2. Add (logical) data centers to the topology
       (each data center is one "bigswitch" in our simplified
        first prototype)
    """
    dc1 = net.addDatacenter("datacenter1")
    dc2 = net.addDatacenter("datacenter2")
    #dc3 = net.addDatacenter("long_data_center_name3")
    #dc4 = net.addDatacenter(
    #    "datacenter4",
    #    metadata={"mydata": "we can also add arbitrary metadata to each DC"})

    """
    3. You can add additional SDN switches for data center
       interconnections to the network.
    """
    s1 = net.addSwitch("s1")

    """
    4. Add links between your data centers and additional switches
       to define you topology.
       These links can use Mininet's features to limit bw, add delay or jitter.
    """
    #net.addLink(dc1, dc2, delay="10ms")
    #net.addLink(dc1, dc2)
    net.addLink(dc1, s1)
    net.addLink(s1, dc2)
    #net.addLink("datacenter1", s1, delay="20ms")
    #net.addLink(s1, dc3)
    #net.addLink(s1, "datacenter4")


    """
    5. We want to access and control our data centers from the outside,
       e.g., we want to connect an orchestrator to start/stop compute
       resources aka. VNFs (represented by Docker containers in the emulated)

       So we need to instantiate API endpoints (e.g. a zerorpc or REST
       interface). Depending on the endpoint implementations, we can connect
       one or more data centers to it, which can then be controlled through
       this API, e.g., start/stop/list compute instances.
    """
    # create a new instance of a endpoint implementation
    zapi1 = ZeroRpcApiEndpoint("0.0.0.0", 4242)
    # connect data centers to this endpoint
    zapi1.connectDatacenter(dc1)
    zapi1.connectDatacenter(dc2)
    #zapi1.connectDatacenter(dc3)
    #zapi1.connectDatacenter(dc4)
    # run API endpoint server (in another thread, don't block)
    zapi1.start()

    """
    5.1. For our example, we create a second endpoint to illustrate that
         this is supported by our design. This feature allows us to have
         one API endpoint for each data center. This makes the emulation
         environment more realistic because you can easily create one
         OpenStack-like REST API endpoint for *each* data center.
         This will look like a real-world multi PoP/data center deployment
         from the perspective of an orchestrator.
    """
    #zapi2 = ZeroRpcApiEndpoint("0.0.0.0", 4343)
    #zapi2.connectDatacenter(dc3)
    #zapi2.connectDatacenter(dc4)
    #zapi2.start()

    """
    6. Finally we are done and can start our network (the emulator).
       We can also enter the Mininet CLI to interactively interact
       with our compute resources (just like in default Mininet).
       But we can also implement fully automated experiments that
       can be executed again and again.
    """
    net.start()
    net.CLI()
    # when the user types exit in the CLI, we stop the emulator
    net.stop()
예제 #34
0
파일: topology.py 프로젝트: diarraas/SDCI
def create_topology():
	net = DCNetwork(monitor=False, enable_learning=True,autoSetMacs=True)

	dc1 = net.addDatacenter("dc1")
	# add OpenStack-like APIs to the emulated DC
	api1 = OpenstackApiEndpoint("0.0.0.0", 6001)
	api1.connect_datacenter(dc1)
	api1.start()
	api1.connect_dc_network(net)
	# add the command line interface endpoint to the emulated DC (REST API)
	rapi1 = RestApiEndpoint("0.0.0.0", 5001)
	rapi1.connectDCNetwork(net)
	rapi1.connectDatacenter(dc1)
	rapi1.start()

	s1 = net.addSwitch('s1')
	s2 = net.addSwitch('s2')
	s3 = net.addSwitch('s3')
	s4 = net.addSwitch('s4')

	srv = net.addDocker('srv', ip='192.168.0.1', dimage="server:latest")
	time.sleep(5)
	gwi = net.addDocker('gwi', ip='192.168.0.254', dimage="gateway:latest")
	time.sleep(5)
	#First cluster
	gwf1 = net.addDocker('gwf1', ip='192.168.0.252', dimage="gwf1:latest")
	device11 = net.addDocker('device11', ip='192.168.0.241', dimage="device1:latest")
	time.sleep(5)
	device12 = net.addDocker('device12', ip='192.168.0.242', dimage="device1:latest")
	device13 = net.addDocker('device13', ip='192.168.0.243', dimage="device1:latest")
	
	#Second cluster
	gwf2 = net.addDocker('gwf2', ip='192.168.0.251', dimage="gwf2:latest")
	time.sleep(5)
	device21 = net.addDocker('device21', ip='192.168.0.231', dimage="device2:latest")
	device22 = net.addDocker('device22', ip='192.168.0.232', dimage="device2:latest")
	device23 = net.addDocker('device23', ip='192.168.0.233', dimage="device2:latest")
	
	#Third cluster
	gwf3 = net.addDocker('gwf3', ip='192.168.0.250', dimage="gwf3:latest")
	time.sleep(5)
	device31 = net.addDocker('device31', ip='192.168.0.221', dimage="device3:latest")
	device32 = net.addDocker('device32', ip='192.168.0.222', dimage="device3:latest")
	device33 = net.addDocker('device33', ip='192.168.0.223', dimage="device3:latest")
		
	#Switch links
	net.addLink(s1, s2)
	net.addLink(s2, s3)
	net.addLink(s2, s4)
	net.addLink(s4,dc1)
	net.addLink(s1, srv)
	net.addLink(s2, gwi)

	#First cluster link
	net.addLink(s3, gwf1)
	net.addLink(gwf1,device11)
	net.addLink(gwf1,device12)
	net.addLink(gwf1,device13)
	
	#Second cluster link
	net.addLink(s3, gwf2)
	net.addLink(gwf2,device21)
	net.addLink(gwf2,device22)
	net.addLink(gwf2,device23)
	
	#Third cluster link
	net.addLink(s4, gwf3)
	net.addLink(gwf3,device31)
	net.addLink(gwf3,device32)
	net.addLink(gwf3,device33)
	

	net.start()
	"""
  srv.cmd("node srv/server.js --local_ip '192.168.0.1' --local_port 8080 --local_name 'srv'")
  time.sleep(2)

  gwi.cmd("node srv/gateway.js --local_ip '192.168.0.254' --local_port 8181 --local_name 'gwi1' --remote_ip '192.168.0.1' --remote_port 8080 --remote_name 'srv'")
  time.sleep(2)

  gwf1.cmd("node srv/gateway.js --local_ip '192.168.0.252' --local_port 8282 --local_name 'gwf1' --remote_ip '192.168.0.254' --remote_port 8181 --remote_name 'gwi1'")
  time.sleep(2)
  device11.cmd("node srv/device.js --local_ip '192.168.0.241' --local_port 9001 --local_name 'device11' --remote_ip '192.168.0.252' --remote_port 8282 --remote_name 'gwf1' --send_period 3000")
  device12.cmd("node srv/device.js --local_ip '192.168.0.242' --local_port 9001 --local_name 'device12' --remote_ip '192.168.0.252' --remote_port 8282 --remote_name 'gwf1' --send_period 3000")
  device13.cmd("node srv/device.js --local_ip '192.168.0.243' --local_port 9001 --local_name 'device13' --remote_ip '192.168.0.252' --remote_port 8282 --remote_name 'gwf1' --send_period 3000")

  gwf2.cmd("node srv/gateway.js --local_ip '192.168.0.251' --local_port 8282 --local_name 'gwf2' --remote_ip '192.168.0.254' --remote_port 8181 --remote_name 'gwi1'")
  time.sleep(2)
  device21.cmd("node srv/device.js --local_ip '192.168.0.231' --local_port 9001 --local_name 'device21' --remote_ip '192.168.0.251' --remote_port 8282 --remote_name 'gwf2' --send_period 3000")
  device22.cmd("node srv/device.js --local_ip '192.168.0.232' --local_port 9001 --local_name 'device22' --remote_ip '192.168.0.251' --remote_port 8282 --remote_name 'gwf2' --send_period 3000")
  device23.cmd("node srv/device.js --local_ip '192.168.0.233' --local_port 9001 --local_name 'device23' --remote_ip '192.168.0.251' --remote_port 8282 --remote_name 'gwf2' --send_period 3000")

  gwf3.cmd("node srv/gateway.js --local_ip '192.168.0.250' --local_port 8282 --local_name 'gwf3' --remote_ip '192.168.0.254' --remote_port 8181 --remote_name 'gwi1'")
  time.sleep(2)
  device31.cmd("node srv/device.js --local_ip '192.168.0.221' --local_port 9001 --local_name 'device31' --remote_ip '192.168.0.250' --remote_port 8282 --remote_name 'gwf3' --send_period 3000")
  device32.cmd("node srv/device.js --local_ip '192.168.0.222' --local_port 9001 --local_name 'device32' --remote_ip '192.168.0.250' --remote_port 8282 --remote_name 'gwf3' --send_period 3000")
  device33.cmd("node srv/device.js --local_ip '192.168.0.221' --local_port 9001 --local_name 'device33' --remote_ip '192.168.0.250' --remote_port 8282 --remote_name 'gwf3' --send_period 3000")
		"""
	net.CLI()
	# when the user types exit in the CLI, we stop the emulator
	net.stop()