Beispiel #1
0
def DemoTopology():
    net = DCNetwork(monitor=True, enable_learning=True)

    dc1 = net.addDatacenter("osm-pop1")
    dc2 = net.addDatacenter("osm-pop2")

    s1 = net.addSwitch("s1")

    net.addLink(dc1, s1)
    net.addLink(dc2, s1)

    # add OpenStack-like APIs to the emulated DC
    api1 = OpenstackApiEndpoint("127.0.0.1", 6001)
    api2 = OpenstackApiEndpoint("127.0.0.1", 6002)

    api1.connect_datacenter(dc1)
    api2.connect_datacenter(dc2)

    api1.start()
    api2.start()

    api1.connect_dc_network(net)
    api2.connect_dc_network(net)

    # add the command line interface endpoint to the emulated DC (REST API)
    Rapi = RestApiEndpoint("0.0.0.0", 5001)
    Rapi.connectDCNetwork(net)
    Rapi.connectDatacenter(dc1)
    Rapi.connectDatacenter(dc2)
    Rapi.start()

    net.start()
    net.CLI()
    # when the user types exit in the CLI, we stop the emulator
    net.stop()
Beispiel #2
0
def create_topology():
    net = DCNetwork(monitor=False, enable_learning=True)

    dc1 = net.addDatacenter("dc1")
    # add OpenStack-like APIs to the emulated DC
    api1 = OpenstackApiEndpoint("0.0.0.0", 6001)
    api1.connect_datacenter(dc1)
    api1.start()
    api1.connect_dc_network(net)
    # add the command line interface endpoint to the emulated DC (REST API)
    rapi1 = RestApiEndpoint("0.0.0.0", 5001)
    rapi1.connectDCNetwork(net)
    rapi1.connectDatacenter(dc1)
    rapi1.start()
    
    s1 = net.addSwitch('s1')
    h1 = net.addHost('h1')
    h2 = net.addDocker('h2',dimage='host:server')
    net.addLink(h1, s1, delay='20ms')
    net.addLink(h2, s1, delay='20ms')
    net.addLink(dc1, s1, delay='20ms')
    
    net.start()
    net.CLI()
    # when the user types exit in the CLI, we stop the emulator
    net.stop()
Beispiel #3
0
def create_topology():
    net = DCNetwork(monitor=False, enable_learning=True)
    # create two data centers
    dc1 = net.addDatacenter("dc1")
    dc2 = net.addDatacenter("dc2")
    # interconnect data centers
    net.addLink(dc1, dc2, delay="20ms")
    # add the command line interface endpoint to the emulated DC (REST API)
    rapi1 = RestApiEndpoint("0.0.0.0", 5001)
    rapi1.connectDCNetwork(net)
    rapi1.connectDatacenter(dc1)
    rapi1.connectDatacenter(dc2)
    rapi1.start()
    # add the 5GTANGO lightweight life cycle manager (LLCM) to the topology
    llcm1 = TangoLLCMEndpoint("0.0.0.0",
                              5000,
                              deploy_sap=False,
                              placement_algorithm_obj=StaticConfigPlacement(
                                  "~/static_placement.yml"))
    llcm1.connectDatacenter(dc1)
    llcm1.connectDatacenter(dc2)
    # run the dummy gatekeeper (in another thread, don't block)
    llcm1.start()
    # start the emulation and enter interactive CLI
    net.start()
    net.CLI()
    # when the user types exit in the CLI, we stop the emulator
    net.stop()
def create_topology1():
    # create topology
    net = DCNetwork(controller=RemoteController, monitor=False, enable_learning=True)
    # add datecenters
    dc1 = net.addDatacenter("dc1")

    # add REST control endpoints to datacenter (to be used with son-emu-cli)
    rapi1 = RestApiEndpoint("0.0.0.0", 5001)
    rapi1.connectDCNetwork(net)
    rapi1.connectDatacenter(dc1)
    rapi1.start()
    
    # add OpenStack/like interface endpoints to dc1
    api1 = OpenstackApiEndpoint("0.0.0.0", 6001)
    # connect PoPs
    api1.connect_datacenter(dc1)
    # connect network
    api1.connect_dc_network(net)
    # start
    api1.start()

    # start the emulation platform
    net.start()
    net.CLI()
    net.stop()
def create_topology1():
    # create topology
    net = DCNetwork(controller=RemoteController, monitor=True, enable_learning=True)
    dc1 = net.addDatacenter("dc1")


    # add the command line interface endpoint to each DC (REST API)
    rapi1 = RestApiEndpoint("0.0.0.0", 5001)
    rapi1.connectDCNetwork(net)
    rapi1.connectDatacenter(dc1)
    # run API endpoint server (in another thread, don't block)
    rapi1.start()


    # specify a vnfd file to be deployed as internal SAP:
    sap_vnfd = 'custom_sap_vnfd.yml'
    dir_path = os.path.dirname(__file__)
    sap_vnfd_path = os.path.join(dir_path, sap_vnfd)
    sap_vnfd_path = None
    # add the SONATA dummy gatekeeper to each DC
    sdkg1 = SonataDummyGatekeeperEndpoint("0.0.0.0", 5000, deploy_sap=True, auto_deploy=True,
                                          docker_management=True, auto_delete=True,
                                          sap_vnfd_path=sap_vnfd_path)
    sdkg1.connectDatacenter(dc1)
    # run the dummy gatekeeper (in another thread, don't block)
    sdkg1.start()

    # start the emulation platform
    net.start()
    net.CLI()
    net.stop()
Beispiel #6
0
def create_topology():
    net = DCNetwork(monitor=False, enable_learning=True)

    dc1 = net.addDatacenter("dc1")
    # add OpenStack-like APIs to the emulated DC
    api1 = OpenstackApiEndpoint("0.0.0.0", 6001)
    api1.connect_datacenter(dc1)
    api1.start()
    api1.connect_dc_network(net)
    # add the command line interface endpoint to the emulated DC (REST API)
    rapi1 = RestApiEndpoint("0.0.0.0", 5001)
    rapi1.connectDCNetwork(net)
    rapi1.connectDatacenter(dc1)
    rapi1.start()

    s1 = net.addSwitch('s1')
    d1 = net.addDocker('d1', ip='10.100.0.1', dimage="ubuntu:trusty")
    d2 = net.addDocker('d2', ip='10.100.0.2', dimage="ubuntu:trusty")
    net.addLink(s1, d1)
    net.addLink(s1, d2)
    net.addLink(s1, dc1)

    net.start()
    net.CLI()
    # when the user types exit in the CLI, we stop the emulator
    net.stop()
def create_topology1():
    # create topology
    net = DCNetwork(controller=RemoteController,
                    monitor=True,
                    enable_learning=True)

    dc1 = net.addDatacenter("dc1")
    net.addLink(dc1, net.addSwitch("s1"), delay="10ms")

    # add the command line interface endpoint to each DC (REST API)
    rapi1 = RestApiEndpoint("0.0.0.0", 5001)
    rapi1.connectDCNetwork(net)
    rapi1.connectDatacenter(dc1)
    # run API endpoint server (in another thread, don't block)
    rapi1.start()

    # add the SONATA dummy gatekeeper to each DC
    sdkg1 = SonataDummyGatekeeperEndpoint("0.0.0.0",
                                          5000,
                                          deploy_sap=True,
                                          auto_deploy=True)
    sdkg1.connectDatacenter(dc1)
    # run the dummy gatekeeper (in another thread, don't block)
    sdkg1.start()

    # start the emulation platform
    net.start()
    net.CLI()
    net.stop()
Beispiel #8
0
def create_topology1():
    # create topology
    net = DCNetwork(controller=RemoteController,
                    monitor=True,
                    enable_learning=True)
    dc1 = net.addDatacenter("dc1")

    # add the command line interface endpoint to each DC (REST API)
    rapi1 = RestApiEndpoint("0.0.0.0", 5001)
    rapi1.connectDCNetwork(net)
    rapi1.connectDatacenter(dc1)
    # run API endpoint server (in another thread, don't block)
    rapi1.start()

    # specify a vnfd file to be deployed as internal SAP:
    sap_vnfd = 'custom_sap_vnfd.yml'
    dir_path = os.path.dirname(__file__)
    sap_vnfd_path = os.path.join(dir_path, sap_vnfd)
    # sap_vnfd_path = None
    # add the SONATA dummy gatekeeper to each DC
    sdkg1 = SonataDummyGatekeeperEndpoint("0.0.0.0",
                                          5000,
                                          deploy_sap=True,
                                          auto_deploy=True,
                                          docker_management=True,
                                          auto_delete=True,
                                          sap_vnfd_path=sap_vnfd_path)
    sdkg1.connectDatacenter(dc1)
    # run the dummy gatekeeper (in another thread, don't block)
    sdkg1.start()

    # start the emulation platform
    net.start()
    net.CLI()
    net.stop()
def create_topology1():
    # create topology
    net = DCNetwork(controller=RemoteController, monitor=False, enable_learning=True)
    dc1 = net.addDatacenter("dc1")
    dc2 = net.addDatacenter("dc2")
    s1 = net.addSwitch("s1")
    net.addLink(dc1, s1, delay="10ms")
    net.addLink(dc2, s1, delay="20ms")

    # add the command line interface endpoint to each DC (REST API)
    rapi1 = RestApiEndpoint("0.0.0.0", 5001)
    rapi1.connectDCNetwork(net)
    rapi1.connectDatacenter(dc1)
    rapi1.connectDatacenter(dc2)
    # run API endpoint server (in another thread, don't block)
    rapi1.start()

    # add the SONATA dummy gatekeeper to each DC
    sdkg1 = SonataDummyGatekeeperEndpoint("0.0.0.0", 5000, deploy_sap=False)
    sdkg1.connectDatacenter(dc1)
    sdkg1.connectDatacenter(dc2)
    # run the dummy gatekeeper (in another thread, don't block)
    sdkg1.start()

    # start the emulation platform
    net.start()
    net.CLI()
    rapi1.stop()
    net.stop()
class EmulatorProfilingTopology(object):
    def __init__(self):
        pass

    def start(self):
        LOG.info("Starting emulation ...")
        setLogLevel('info')  # set Mininet loglevel
        # create topology
        self.net = DCNetwork(monitor=False, enable_learning=False)
        # we only need one DC for benchmarking
        dc = self.net.addDatacenter("dc1")
        # add the command line interface endpoint to each DC (REST API)
        self.rapi1 = RestApiEndpoint("0.0.0.0", 5001)
        self.rapi1.connectDCNetwork(self.net)
        self.rapi1.connectDatacenter(dc)
        self.rapi1.start()
        # add the 5GTANGO lightweight life cycle manager (LLCM) to the topology
        self.llcm1 = TangoLLCMEndpoint("0.0.0.0", 5000, deploy_sap=False)
        self.llcm1.connectDatacenter(dc)
        self.llcm1.start()
        self.net.start()

    def stop(self):
        LOG.info("Stopping emulation ...")
        self.rapi1.stop()
        self.llcm1.stop()
        self.net.stop()
def create_topology1():
    # create topology
    net = DCNetwork(controller=RemoteController,
                    monitor=False,
                    enable_learning=False)
    # add datecenters
    dc1 = net.addDatacenter("dc1")
    dc2 = net.addDatacenter("dc2")
    # add some intermediate switch
    s1 = net.addSwitch("s1")
    # connect data centers
    net.addLink(dc1, s1, delay="10ms")
    net.addLink(dc2, s1, delay="20ms")

    # add REST control endpoints to each datacenter (to be used with son-emu-cli)
    rapi1 = RestApiEndpoint("0.0.0.0", 5001)
    rapi1.connectDCNetwork(net)
    rapi1.connectDatacenter(dc1)
    rapi1.connectDatacenter(dc2)
    rapi1.start()

    # add the SONATA dummy gatekeeper to each DC
    sdkg1 = SonataDummyGatekeeperEndpoint("0.0.0.0", 5000)
    sdkg1.connectDatacenter(dc1)
    sdkg1.connectDatacenter(dc2)
    # run the dummy gatekeeper (in another thread, don't block)
    sdkg1.start()

    # start the emulation platform
    net.start()
    net.CLI()
    net.stop()
def create_topology1():
    # create topology
    net = DCNetwork(controller=RemoteController,
                    monitor=False,
                    enable_learning=False)
    # add datecenters
    dc1 = net.addDatacenter("dc1")
    dc2 = net.addDatacenter("dc2")
    # add some intermediate switch
    s1 = net.addSwitch("s1")
    # connect data centers
    net.addLink(dc1, s1, delay="10ms")
    net.addLink(dc2, s1, delay="20ms")

    # add REST control endpoints to each datacenter (to be used with son-emu-cli)
    rapi1 = RestApiEndpoint("0.0.0.0", 5001)
    rapi1.connectDCNetwork(net)
    rapi1.connectDatacenter(dc1)
    rapi1.connectDatacenter(dc2)
    rapi1.start()

    # start the emulation platform
    net.start()
    net.CLI()
    net.stop()
Beispiel #13
0
def create_topology():
    net = DCNetwork(monitor=False, enable_learning=True)

    dc1 = net.addDatacenter("dc1")
    # add OpenStack-like APIs to the emulated DC
    api1 = OpenstackApiEndpoint("0.0.0.0", 6001)
    api1.connect_datacenter(dc1)
    api1.start()
    api1.connect_dc_network(net)
    # add the command line interface endpoint to the emulated DC (REST API)
    rapi1 = RestApiEndpoint("0.0.0.0", 5001)
    rapi1.connectDCNetwork(net)
    rapi1.connectDatacenter(dc1)
    rapi1.start()
    
    s1 = net.addSwitch('s1')
    s2 = net.addSwitch('s2')
    s3 = net.addSwitch('s3')

    h1 = net.addHost('h1')
    h2 = net.addHost('h2')
    h3 = net.addHost('h3')

    net.addLink(h1, s1)
    net.addLink(h2, s2)
    net.addLink(h3, s3)
    #create a switch triangle
    net.addLink(s1, s2)
    net.addLink(s2, s3)
    net.addLink(s1, s3)

    net.start()
    net.CLI()
    # when the user types exit in the CLI, we stop the emulator
    net.stop()
Beispiel #14
0
 def _create_rest_api_endpoints(self):
     # create
     apiR = RestApiEndpoint("0.0.0.0", 5001)
     # connect PoPs
     apiR.connectDatacenter(self.pop1)
     apiR.connectDatacenter(self.pop2)
     # connect network
     apiR.connectDCNetwork(self)
     # start
     apiR.start()
Beispiel #15
0
def create_topology(httpmode=False):
    net = DCNetwork(monitor=False, enable_learning=True)

    DC = net.addDatacenter("DC")

    # add OpenStack-like APIs to the emulated DC
    api1 = OpenstackApiEndpoint("0.0.0.0", 6001)
    api1.connect_datacenter(DC)
    api1.start()
    api1.connect_dc_network(net)

    # add the command line interface endpoint to the emulated DC (REST API)
    rapi1 = RestApiEndpoint("0.0.0.0", 5001)
    rapi1.connectDCNetwork(net)
    rapi1.connectDatacenter(DC)
    rapi1.start()

    #Generation Switches
    s1 = net.addSwitch('s1')
    s2 = net.addSwitch('s2')
    s3 = net.addSwitch('s3')

    #Generation Hosts
    S = createHost(httpmode, net, 'S')
    GI = createGW(httpmode, net, 'GI')
    GFA = createGW(httpmode, net, 'GFA')
    DVA = createDV(httpmode, net, 'DVA')

    #Generation Links
    net.addLink(S, s1)
    net.addLink(GI, s2)
    net.addLink(GFA, s3)
    net.addLink(DVA, s3)
    net.addLink(s1, s2)
    net.addLink(s2, s3)
    net.addLink(DC, s3)

    #Run Services (in order)
    if httpmode:
        S.cmd("startup --local_ip 10.0.0.1 --local_port 8080 --local_name srv")
        GI.cmd(
            "startup  --local_ip 10.0.0.2 --local_port 8181 --local_name gwi --remote_ip 10.0.0.1 --remote_port 8080 --remote_name srv"
        )
        GFA.cmd(
            "startup  --local_ip 10.0.0.3 --local_port 8282 --local_name gwfa --remote_ip 10.0.0.2 --remote_port 8181 --remote_name gwi"
        )
        DVA.cmd(
            "startup  --local_ip 10.0.0.4 --local_port 8888 --local_name dva --remote_ip 10.0.0.3 --remote_port 8282 --remote_name gfa --send_period 3000"
        )

    #Do not remove
    net.start()
    net.CLI()
    # when the user types exit in the CLI, we stop the emulator
    net.stop()
Beispiel #16
0
def create_topology():
    net = DCNetwork(monitor=False, enable_learning=True)

    dc1 = net.addDatacenter("dc1")
    # add OpenStack-like APIs to the emulated DC
    api1 = OpenstackApiEndpoint("0.0.0.0", 6001)
    api1.connect_datacenter(dc1)
    api1.start()
    api1.connect_dc_network(net)
    # add the command line interface endpoint to the emulated DC (REST API)
    rapi1 = RestApiEndpoint("0.0.0.0", 5001)
    rapi1.connectDCNetwork(net)
    rapi1.connectDatacenter(dc1)
    rapi1.start()

    Serv = net.addDocker('Serv',
                         ip='10.0.0.200',
                         dimage=image,
                         dcmd="sh ./server.sh")
    GI = net.addDocker('GI', ip='10.0.0.201', dimage=image, dcmd="sh ./gi.sh")
    GF1 = net.addDocker('GF1',
                        ip='10.0.0.202',
                        dimage=image,
                        dcmd="sh ./gf1.sh")
    GF2 = net.addDocker('GF2',
                        ip='10.0.0.203',
                        dimage=image,
                        dcmd="sh ./gf2.sh")
    GF3 = net.addDocker('GF3',
                        ip='10.0.0.204',
                        dimage=image,
                        dcmd="sh ./gf3.sh")

    s1 = net.addSwitch('s1')
    s2 = net.addSwitch('s2')

    net.addLink(Serv, s1)
    net.addLink(GI, s1)
    net.addLink(s1, s2, cls=TCLink, delay='100ms', bw=1)
    net.addLink(GF1, s2)
    net.addLink(GF2, s2)
    net.addLink(GF3, s2)
    net.addLink(dc1, s2)

    net.start()

    net.ping([GF1, GI])
    net.ping([GF2, GI])
    net.ping([GF3, GI])
    net.ping([Serv, GI])

    net.CLI()

    net.stop()
class DaemonTopology(object):
    """
    Topology with two datacenters:

        dc1 <-- 50ms --> dc2
    """
    def __init__(self):
        self.running = True
        signal.signal(signal.SIGINT, self._stop_by_signal)
        signal.signal(signal.SIGTERM, self._stop_by_signal)
        # create and start topology
        self.create_topology()
        self.start_topology()
        self.daemonize()
        self.stop_topology()

    def create_topology(self):
        self.net = DCNetwork(monitor=False, enable_learning=True)
        self.dc1 = self.net.addDatacenter("dc1")
        self.dc2 = self.net.addDatacenter("dc2")
        self.net.addLink(self.dc1, self.dc2, cls=TCLink, delay="50ms")
        # add OpenStack-like APIs to the emulated DC
        self.api1 = OpenstackApiEndpoint("0.0.0.0", 6001)
        self.api1.connect_datacenter(self.dc1)
        self.api1.connect_dc_network(self.net)
        self.api2 = OpenstackApiEndpoint("0.0.0.0", 6002)
        self.api2.connect_datacenter(self.dc2)
        self.api2.connect_dc_network(self.net)
        # add the command line interface endpoint to the emulated DC (REST API)
        self.rapi1 = RestApiEndpoint("0.0.0.0", 5001)
        self.rapi1.connectDCNetwork(self.net)
        self.rapi1.connectDatacenter(self.dc1)
        self.rapi1.connectDatacenter(self.dc2)

    def start_topology(self):
        self.api1.start()
        self.api2.start()
        self.rapi1.start()
        self.net.start()

    def daemonize(self):
        print("Daemonizing vim-emu. Send SIGTERM or SIGKILL to stop.")
        while self.running:
            time.sleep(1)

    def _stop_by_signal(self, signum, frame):
        print("Received SIGNAL {}. Stopping.".format(signum))
        self.running = False

    def stop_topology(self):
        self.api1.stop()
        self.api2.stop()
        self.rapi1.stop()
        self.net.stop()
Beispiel #18
0
 def _create_rest_api_endpoints(self):
     # create
     api1 = RestApiEndpoint("0.0.0.0", 5001)
     # connect PoPs
     api1.connectDatacenter(self.pop1)
     api1.connectDatacenter(self.pop2)
     api1.connectDatacenter(self.pop3)
     api1.connectDatacenter(self.pop4)
     # connect network
     api1.connectDCNetwork(self)
     # start
     api1.start()
Beispiel #19
0
class DaemonTopology(object):
    def __init__(self):
        self.running = True
        signal.signal(signal.SIGINT, self._stop_by_signal)
        signal.signal(signal.SIGTERM, self._stop_by_signal)
        # create and start topology
        self.create_topology()
        self.start_topology()
        self.daemonize()
        self.stop_topology()

    def create_topology(self):
        self.net = DCNetwork(monitor=False, enable_learning=True)
        self.client_dc = self.net.addDatacenter("client_dc")
        self.vnfs_dc = self.net.addDatacenter("vnfs_dc")
        self.server_dc = self.net.addDatacenter("server_dc")

        self.switch1 = self.net.addSwitch("switch1")
        self.switch2 = self.net.addSwitch("switch2")

        linkopts = dict(delay="1ms", bw=100)
        self.net.addLink(self.client_dc, self.switch1, **linkopts)
        self.net.addLink(self.vnfs_dc, self.switch1, **linkopts)
        self.net.addLink(self.switch1, self.switch2, **linkopts)
        self.net.addLink(self.vnfs_dc, self.switch2, **linkopts)
        self.net.addLink(self.switch2, self.server_dc, **linkopts)

        # add the command line interface endpoint to the emulated DC (REST API)
        self.rest = RestApiEndpoint("0.0.0.0", 5001)
        self.rest.connectDCNetwork(self.net)
        self.rest.connectDatacenter(self.client_dc)
        self.rest.connectDatacenter(self.vnfs_dc)
        self.rest.connectDatacenter(self.server_dc)

    def start_topology(self):
        self.rest.start()
        self.net.start()
        subprocess.call("./res/scripts/init_two_clients_servers.sh",
                        shell=True)

    def daemonize(self):
        print("Daemonizing vim-emu. Send SIGTERM or SIGKILL to stop.")
        while self.running:
            time.sleep(1)

    def _stop_by_signal(self, signum, frame):
        print("Received SIGNAL {}. Stopping.".format(signum))
        self.running = False

    def stop_topology(self):
        self.rest.stop()
        self.net.stop()
Beispiel #20
0
def create_topology1():
    # create topology
    net = DCNetwork(controller=RemoteController, monitor=True, enable_learning=True)
    dc1 = net.addDatacenter("dc1")


    # add the command line interface endpoint to each DC (REST API)
    rapi1 = RestApiEndpoint("0.0.0.0", 5001)
    rapi1.connectDCNetwork(net)
    rapi1.connectDatacenter(dc1)
    # run API endpoint server (in another thread, don't block)
    rapi1.start()

    # add the SONATA dummy gatekeeper to each DC
    #sdkg1 = SonataDummyGatekeeperEndpoint("0.0.0.0", 5000, deploy_sap=True)
    #sdkg1.connectDatacenter(dc1)
    # run the dummy gatekeeper (in another thread, don't block)
    #sdkg1.start()

    # start the emulation platform
    net.start()  # here the docker host default ip is configured

    # topology must be started before hosts are added
    cache = dc1.startCompute('cache', image="squid-vnf", network=[{"ip": "10.10.0.1/24", "id": "client", 'mac': "aa:aa:aa:00:00:01"},
                                                                  {"ip": "10.20.0.1/24", "id": "server", "mac": "aa:aa:aa:00:00:02"}])

    client = dc1.startCompute('client', image='vcdn-client', network=[{"ip": "10.10.0.2/24", "id": "client"}])

    server = dc1.startCompute('server', image='webserver', network=[{"ip": "10.20.0.2/24", "id": "server"}])


    
    #client = net.addDocker('client', ip='10.10.0.1/24', dimage="vcdn-client") 
    #cache = net.addDocker('cache', dimage="squid-vnf")
    #server = net.addDocker('server', ip='10.20.0.1/24', dimage="webserver")
    #net.addLink(dc1, client,  intfName1='dc-cl', intfName2='client')
    #net.addLink(dc1, server,  intfName1='dc-sv', intfName2='server')
    #net.addLink(dc1, cache,  intfName1='dc-ccl', intfName2='client', params1={'ip': '10.10.0.2/24'})
    #net.addLink(dc1, cache,  intfName1='dc-csv', intfName2='server',params1={'ip': '10.20.0.2/24'})

    # initialise VNFs
    cache.cmd("./start.sh", detach=True)
    client.cmd("./start.sh", detach=True)
    server.cmd('./start.sh', detach=True)

# startup script hangs if we use other startup command
# command="./start.sh"

    net.CLI()
    net.stop()
    while not net.exit:
        pass
Beispiel #21
0
def setup_topology(net):
    _LOGGER.info("Setting up the topology")
    dc = net.addDatacenter("dc1")  # pylint: disable=invalid-name
    net.addLink(dc, net.addSwitch("s1"), delay="10ms")
    # add the SONATA dummy gatekeeper to each DC
    rapi1 = RestApiEndpoint("0.0.0.0", 5001)
    rapi1.connectDCNetwork(net)
    rapi1.connectDatacenter(dc)
    rapi1.start()
    sdkg1 = SonataDummyGatekeeperEndpoint("0.0.0.0", 5000, deploy_sap=False)
    sdkg1.connectDatacenter(dc)
    # run the dummy gatekeeper (in another thread, don't block)
    sdkg1.start()
Beispiel #22
0
def create_topology(httpmode=False):
    net = DCNetwork(monitor=False, enable_learning=True)

    DC = net.addDatacenter("DC")

    # add OpenStack-like APIs to the emulated DC
    api1 = OpenstackApiEndpoint("0.0.0.0", 6001)
    api1.connect_datacenter(DC)
    api1.start()
    api1.connect_dc_network(net)

    # add the command line interface endpoint to the emulated DC (REST API)
    rapi1 = RestApiEndpoint("0.0.0.0", 5001)
    rapi1.connectDCNetwork(net)
    rapi1.connectDatacenter(DC)
    rapi1.start()

    s1 = net.addSwitch('s1')
    s2 = net.addSwitch('s2')
    s3 = net.addSwitch('s3')
    s4 = net.addSwitch('s4')

    S = net.addDocker("S", dimage="host:server")
    GI = net.addDocker("GI", dimage="host:gateway")
    GFA = createHost(httpmode, net, 'GFA')
    GFB = createHost(httpmode, net, 'GFB')
    GFC = createHost(httpmode, net, 'GFC')

    net.addLink(S, s1)
    net.addLink(GI, s2)
    net.addLink(GFA, s3)
    net.addLink(GFB, s3)
    net.addLink(GFC, s4)

    net.addLink(s1, s2)
    net.addLink(s2, s3)
    net.addLink(s2, s4)
    net.addLink(DC, s2)

    #Do not remove
    net.start()
    net.CLI()
    # when the user types exit in the CLI, we stop the emulator
    net.stop()
Beispiel #23
0
def create_topology():
    net = DCNetwork(monitor=False, enable_learning=False)

    dc1 = net.addDatacenter("dc1")
    # add OpenStack-like APIs to the emulated DC
    api1 = OpenstackApiEndpoint("0.0.0.0", 6001)
    api1.connect_datacenter(dc1)
    api1.start()
    api1.connect_dc_network(net)
    # add the command line interface endpoint to the emulated DC (REST API)
    rapi1 = RestApiEndpoint("0.0.0.0", 5001)
    rapi1.connectDCNetwork(net)
    rapi1.connectDatacenter(dc1)
    rapi1.start()

    net.start()
    net.CLI()
    # when the user types exit in the CLI, we stop the emulator
    net.stop()
Beispiel #24
0
class Profiling:

    stop_now = False
    """
     Set up a simple topology and start it
     :port: the port the REST interface will be using, port+1 will be in use as well
    """
    def __init__(self, port=5000):
        GracefulKiller(self)
        # create topology
        self.net = DCNetwork(controller=RemoteController,
                             monitor=False,
                             enable_learning=False)
        self.dc = self.net.addDatacenter("dc1")

        # add the command line interface endpoint to each DC (REST API)
        self.rapi1 = RestApiEndpoint("0.0.0.0", port + 1)
        self.rapi1.connectDCNetwork(self.net)
        self.rapi1.connectDatacenter(self.dc)
        # run API endpoint server (in another thread, don't block)
        self.rapi1.start()

        # add the SONATA dummy gatekeeper to each DC
        self.sdkg1 = SonataDummyGatekeeperEndpoint("0.0.0.0",
                                                   port,
                                                   deploy_sap=False)
        self.sdkg1.connectDatacenter(self.dc)
        # run the dummy gatekeeper (in another thread, don't block)
        self.sdkg1.start()

        self.net.start()
        LOG.info("Started topology")
        while (not self.stop_now):
            sleep(1)
        self.net.stop()
        LOG.info("Stopped topology")

    """
     Set stop value to stop the topology
    """

    def stop_it(self):
        self.stop_now = True
def create_topology1():

    global exit

    # create topology
    net = DCNetwork(controller=RemoteController,
                    monitor=True,
                    enable_learning=False)
    dc1 = net.addDatacenter("dc1")
    dc2 = net.addDatacenter("dc2")
    s1 = net.addSwitch("s1")
    net.addLink(dc1, s1, delay="10ms")
    net.addLink(dc2, s1, delay="20ms")

    # add the command line interface endpoint to each DC (REST API)
    rapi1 = RestApiEndpoint("0.0.0.0", 5001)
    rapi1.connectDatacenter(dc1)
    rapi1.connectDatacenter(dc2)
    # connect total network also, needed to do the chaining and monitoring
    rapi1.connectDCNetwork(net)
    # run API endpoint server (in another thread, don't block)
    rapi1.start()

    # add the SONATA dummy gatekeeper to each DC
    sdkg1 = SonataDummyGatekeeperEndpoint("0.0.0.0", 5000, deploy_sap=True)
    sdkg1.connectDatacenter(dc1)
    sdkg1.connectDatacenter(dc2)
    # run the dummy gatekeeper (in another thread, don't block)
    sdkg1.start()

    # start the emulation platform
    net.start()

    #does not work from docker compose (cannot start container in interactive mode)
    #cli = net.CLI()
    # instead wait here:
    logging.info("waiting for SIGTERM or SIGINT signal")
    while not exit:
        time.sleep(1)
    logging.info("got SIG signal")
    net.stop()
Beispiel #26
0
class Profiling:

    stop_now = False

    """
     Set up a simple topology and start it
    """
    def __init__(self):
        GracefulKiller(self)
        # create topology
        self.net = DCNetwork(controller=RemoteController, monitor=False, enable_learning=False)
        self.dc = self.net.addDatacenter("dc1")

        # add the command line interface endpoint to each DC (REST API)
        self.rapi1 = RestApiEndpoint("0.0.0.0", 5001)
        self.rapi1.connectDCNetwork(self.net)
        self.rapi1.connectDatacenter(self.dc)
        # run API endpoint server (in another thread, don't block)
        self.rapi1.start()

        # add the SONATA dummy gatekeeper to each DC
        self.sdkg1 = SonataDummyGatekeeperEndpoint("0.0.0.0", 5000, deploy_sap=False)
        self.sdkg1.connectDatacenter(self.dc)
        # run the dummy gatekeeper (in another thread, don't block)
        self.sdkg1.start()


        self.net.start()
        LOG.info("Started topology")
        while(not self.stop_now):
            sleep(1)
        self.net.stop()
        LOG.info("Stopped topology")

    """
     Set stop value to stop the topology
    """
    def stop_it(self):
        self.stop_now = True
Beispiel #27
0
 def start(self):
     LOG.info("Starting emulation ...")
     # pylint: disable=E0401
     from mininet.log import setLogLevel
     from emuvim.dcemulator.net import DCNetwork
     from emuvim.api.rest.rest_api_endpoint import RestApiEndpoint
     from emuvim.api.tango import TangoLLCMEndpoint
     setLogLevel('info')  # set Mininet loglevel
     # create topology
     self.net = DCNetwork(monitor=False, enable_learning=False)
     # we only need one DC for benchmarking
     dc = self.net.addDatacenter("dc1")
     # add the command line interface endpoint to each DC (REST API)
     rapi1 = RestApiEndpoint("0.0.0.0", 5001)
     rapi1.connectDCNetwork(self.net)
     rapi1.connectDatacenter(dc)
     rapi1.start()
     # add the 5GTANGO lightweight life cycle manager (LLCM) to the topology
     llcm1 = TangoLLCMEndpoint("0.0.0.0", 5000, deploy_sap=False)
     llcm1.connectDatacenter(dc)
     llcm1.start()
     self.net.start()
Beispiel #28
0
def create_topology1():

    global exit

    # create topology
    net = DCNetwork(controller=RemoteController, monitor=True, enable_learning=False)
    dc1 = net.addDatacenter("dc1")
    dc2 = net.addDatacenter("dc2")
    s1 = net.addSwitch("s1")
    net.addLink(dc1, s1, delay="10ms")
    net.addLink(dc2, s1, delay="20ms")

    # add the command line interface endpoint to each DC (REST API)
    rapi1 = RestApiEndpoint("0.0.0.0", 5001)
    rapi1.connectDatacenter(dc1)
    rapi1.connectDatacenter(dc2)
    # connect total network also, needed to do the chaining and monitoring
    rapi1.connectDCNetwork(net)
    # run API endpoint server (in another thread, don't block)
    rapi1.start()

    # add the SONATA dummy gatekeeper to each DC
    sdkg1 = SonataDummyGatekeeperEndpoint("0.0.0.0", 5000, deploy_sap=True)
    sdkg1.connectDatacenter(dc1)
    sdkg1.connectDatacenter(dc2)
    # run the dummy gatekeeper (in another thread, don't block)
    sdkg1.start()

    # start the emulation platform
    net.start()

    # does not work from docker compose (cannot start container in interactive mode)
    # cli = net.CLI()
    # instead wait here:
    logging.info("waiting for SIGTERM or SIGINT signal")
    while not exit:
        time.sleep(1)
    logging.info("got SIG signal")
    net.stop()
def create_topology():
    net = DCNetwork(monitor=False, enable_learning=True)
    # create two data centers
    dc1 = net.addDatacenter("dc1")
    dc2 = net.addDatacenter("dc2")
    # interconnect data centers
    net.addLink(dc1, dc2, delay="20ms")
    # add the command line interface endpoint to the emulated DC (REST API)
    rapi1 = RestApiEndpoint("0.0.0.0", 5001)
    rapi1.connectDCNetwork(net)
    rapi1.connectDatacenter(dc1)
    rapi1.connectDatacenter(dc2)
    rapi1.start()
    # add the 5GTANGO lightweight life cycle manager (LLCM) to the topology
    llcm1 = TangoLLCMEndpoint("0.0.0.0", 5000, deploy_sap=False)
    llcm1.connectDatacenter(dc1)
    llcm1.connectDatacenter(dc2)
    # run the dummy gatekeeper (in another thread, don't block)
    llcm1.start()
    # start the emulation and enter interactive CLI
    net.start()
    net.CLI()
    # when the user types exit in the CLI, we stop the emulator
    net.stop()
def create_topology1():
    """
    1. Create a data center network object (DCNetwork) with monitoring enabled
    """
    net = DCNetwork(monitor=True, enable_learning=False)

    """
    1b. Add endpoint APIs for the whole DCNetwork,
        to access and control the networking from outside.
        e.g., to setup forwarding paths between compute
        instances aka. VNFs (represented by Docker containers), passing through
        different switches and datacenters of the emulated topology
    """
    # create monitoring api endpoint for backwards compatibility with zerorpc api 
    mon_api = ZeroRpcApiEndpointDCNetwork("0.0.0.0", 5151)
    mon_api.connectDCNetwork(net)
    mon_api.start()

    """
    2. Add (logical) data centers to the topology
       (each data center is one "bigswitch" in our simplified
        first prototype)
    """
    dc1 = net.addDatacenter("dc1")
    dc2 = net.addDatacenter("dc2")

    """
    3. You can add additional SDN switches for data center
       interconnections to the network.
    """
    s1 = net.addSwitch("s1")

    """
    4. Add links between your data centers and additional switches
       to define you topology.
       These links can use Mininet's features to limit bw, add delay or jitter.
    """

    net.addLink(dc1, s1)
    net.addLink(s1, dc2)


    """
    5. We want to access and control our data centers from the outside,
       e.g., we want to connect an orchestrator to start/stop compute
       resources aka. VNFs (represented by Docker containers in the emulated)

       So we need to instantiate API endpoints (e.g. a zerorpc or REST
       interface). Depending on the endpoint implementations, we can connect
       one or more data centers to it, which can then be controlled through
       this API, e.g., start/stop/list compute instances.
    """
    # keep the old zeroRPC interface for the prometheus metric query test
    zapi1 = ZeroRpcApiEndpoint("0.0.0.0", 4242)
    # connect data centers to this endpoint
    zapi1.connectDatacenter(dc1)
    zapi1.connectDatacenter(dc2)
    # run API endpoint server (in another thread, don't block)
    zapi1.start()

    # create a new instance of a endpoint implementation
    # the restapi handles all compute, networking and monitoring commands in one api endpoint
    api1 = RestApiEndpoint("0.0.0.0", 5001)
    # connect data centers to this endpoint
    api1.connectDatacenter(dc1)
    api1.connectDatacenter(dc2)
    # connect total network also, needed to do the chaining and monitoring
    api1.connectDCNetwork(net)
    # run API endpoint server (in another thread, don't block)
    api1.start()

    """
    5.1. For our example, we create a second endpoint to illustrate that
         this is supported by our design. This feature allows us to have
         one API endpoint for each data center. This makes the emulation
         environment more realistic because you can easily create one
         OpenStack-like REST API endpoint for *each* data center.
         This will look like a real-world multi PoP/data center deployment
         from the perspective of an orchestrator.
    """
    #zapi2 = ZeroRpcApiEndpoint("0.0.0.0", 4343)
    #zapi2.connectDatacenter(dc3)
    #zapi2.connectDatacenter(dc4)
    #zapi2.start()

    """
    6. Finally we are done and can start our network (the emulator).
       We can also enter the Mininet CLI to interactively interact
       with our compute resources (just like in default Mininet).
       But we can also implement fully automated experiments that
       can be executed again and again.
    """
    net.start()
    net.CLI()
    # when the user types exit in the CLI, we stop the emulator
    net.stop()
Beispiel #31
0
def create_topology(httpmode=False, port_default=8888, device_rate=1500):
    net = DCNetwork(monitor=False, enable_learning=True)

    DC = net.addDatacenter("DC")

    # add OpenStack-like APIs to the emulated DC
    api1 = OpenstackApiEndpoint("0.0.0.0", 6001)
    api1.connect_datacenter(DC)
    api1.start()
    api1.connect_dc_network(net)

    # add the command line interface endpoint to the emulated DC (REST API)
    rapi1 = RestApiEndpoint("0.0.0.0", 5001)
    rapi1.connectDCNetwork(net)
    rapi1.connectDatacenter(DC)
    rapi1.start()

    #Generation Switch
    s1 = net.addSwitch('s1')
    s2 = net.addSwitch('s2')
    s3 = net.addSwitch('s3')
    s4 = net.addSwitch('s4')

    S = createHost(httpmode, net, 'S', "host:server")
    GI = createHost(httpmode, net, 'GI', "host:gateway")
    GFA = createHost(httpmode, net, 'GFA', "host:gwfinal")
    GFB = createHost(httpmode, net, 'GFB', "host:gwfinal")
    GFC = createHost(httpmode, net, 'GFC', "host:gwfinal")

    #Genration of link
    net.addLink(S, s1)
    net.addLink(GI, s2)
    net.addLink(GFA, s3)
    net.addLink(GFB, s3)
    net.addLink(GFC, s4)

    net.addLink(s1, s2)
    net.addLink(s2, s3)
    net.addLink(s2, s4)
    net.addLink(DC, s4)

    #Do not remove
    net.start()

    #Run gateways and devices
    print("Starting Server node")
    S.cmd("startup --local_ip 10.0.0.1 --local_port 8888 --local_name srv")
    print("Waiting for server node to complete startup")
    time.sleep(2)
    print("Starting GI node")
    GI.cmd(
        "startup --local_ip 10.0.0.2 --local_port 8888 --local_name gwi --remote_ip 10.0.0.1 --remote_port 8888 --remote_name srv"
    )
    print("Waiting for GI node to complete startup")
    time.sleep(2)
    print("Starting GFA node")
    GFA.cmd(
        "startup --local_ip 10.0.0.3 --local_port 8888 --local_name gwfa --remote_ip 10.0.0.2 --remote_port 8888 --remote_name gwi"
    )
    print("Waiting for GFA node to complete startup")
    time.sleep(2)
    print("Starting GFA devices")
    GFA.cmd("start_devices 10.0.0.3 9001 {0} gwfa {1}".format(
        port_default, device_rate))
    print("Starting GFB node")
    GFB.cmd(
        "startup --local_ip 10.0.0.4 --local_port 8888 --local_name gwfb --remote_ip 10.0.0.2 --remote_port 8888 --remote_name gwi"
    )
    print("Waiting for GFB node to complete startup")
    time.sleep(2)
    print("Starting GFB devices")
    GFB.cmd("start_devices 10.0.0.4 9001 {0} gwfb {1}".format(
        port_default, device_rate))
    print("Starting GFC node")
    GFC.cmd(
        "startup --local_ip 10.0.0.5 --local_port 8888 --local_name gwfc --remote_ip 10.0.0.2 --remote_port 8888 --remote_name gwi"
    )
    print("Waiting for GFC node to complete startup")
    time.sleep(2)
    print("Starting GFC devices")
    GFC.cmd("start_devices 10.0.0.5 9001 {0} gwfc {1}".format(
        port_default, device_rate))
    #Start the command line
    net.CLI()
    # when the user types exit in the CLI, we stop the emulator
    net.stop()
Beispiel #32
0
monitoring = net.addDocker('monitoring', ip='10.0.0.15', dimage=X, dcmd="sh -c 'cd /Projet-SDCI && git pull; cd Monitoring; sh monitoring.sh; tail -f /dev/null'")

dc = net.addDatacenter("dc")

info('*** Adding switches\n')
s1 = net.addSwitch('s1')

info('*** Creating links\n')
net.addLink(server, s1, delay="20ms")
net.addLink(gwi1, s1, delay="20ms")
net.addLink(gwf1, s1, delay="20ms")
net.addLink(gwf2, s1, delay="20ms")
net.addLink(gwf3, s1, delay="20ms")
net.addLink(monitoring, s1, delay="20ms")
net.addLink(dc, s1, delay="20ms")


info('*** Starting RestApi\n')
rapi1 = RestApiEndpoint("0.0.0.0", 5001)
rapi1.connectDCNetwork(net)
rapi1.connectDatacenter(dc)
rapi1.start()

info('*** Starting network\n')
net.start()

info('*** Running CLI\n')
CLI(net)

info('*** Stopping network')
net.stop()
Beispiel #33
0
def create_topology():
    net = DCNetwork(monitor=False, enable_learning=True)

    info('*** Adding datacenters\n')
    dc1 = net.addDatacenter("dc1")
    dc2 = net.addDatacenter("dc2")
    # add OpenStack-like APIs to the emulated DC
    api1 = OpenstackApiEndpoint("0.0.0.0", 6001)
    api1.connect_datacenter(dc1)
    api1.connect_datacenter(dc2)
    api1.start()
    api1.connect_dc_network(net)
    # add the command line interface endpoint to the emulated DC (REST API)
    rapi1 = RestApiEndpoint("0.0.0.0", 5001)
    rapi1.connectDCNetwork(net)
    rapi1.connectDatacenter(dc1)
    rapi1.connectDatacenter(dc2)
    rapi1.start()

    # params
    SRV_PORT = 8080

    info('*** Adding switches\n')
    sw1 = net.addSwitch('sw1')
    sw2 = net.addSwitch('sw2')

    info('*** Adding docker containers\n')

    server = net.addDocker(
        'server',
        ip='10.100.0.10',
        dimage="alpine:4",
        dcmd=
        "node ./server.js --local_ip '127.0.0.1' --local_port 8080 --local_name 'srv' &"
    )

    GW = net.addDocker('GW', ip='10.100.0.4', dimage="alpine:4")

    GF1 = net.addDocker('GF1', ip='10.100.0.1', dimage="alpine:4")

    GF2 = net.addDocker('GF2', ip='10.100.0.2', dimage="alpine:4")
    GF3 = net.addDocker('GF3', ip='10.100.0.3', dimage="alpine:4")

    net.addLink(server, sw1)
    net.addLink(GW, sw1)
    net.addLink(sw1, sw2)
    net.addLink(GF1, sw2)
    net.addLink(GF2, sw2)
    net.addLink(GF3, sw2)

    info('*** Starting network\n')
    net.start()

    time.sleep(5)

    # launch gateway GW
    info("//// Starting GW gateway\n")
    GW.cmd(
        "node ./gateway.js --local_ip '127.0.0.1' --local_port 9000 --local_name 'GW' --remote_ip '10.100.0.10' --remote_port 8080 --remote_name 'srv' &"
    )
    info('//// started !\n')

    time.sleep(5)

    # launch gateways GFs

    info("//// Starting GF1 gateway\n")
    GF1.cmd(
        "node ./gateway.js --local_ip '127.0.0.1' --local_port 5000 --local_name 'GF1' --remote_ip '10.100.0.4' --remote_port 9000 --remote_name 'GW' &"
    )
    info('//// started !\n')

    info("//// Starting GF2 gateway\n")
    GF2.cmd(
        "node ./gateway.js --local_ip '127.0.0.1' --local_port 5000 --local_name 'GF2' --remote_ip '10.100.0.4' --remote_port 9000 --remote_name 'GW' &"
    )
    info('//// started !\n')

    info("//// Starting GF3 gateway\n")
    GF3.cmd(
        "node ./gateway.js --local_ip '127.0.0.1' --local_port 5000 --local_name 'GF3' --remote_ip '10.100.0.4' --remote_port 9000 --remote_name 'GW' &"
    )
    info('//// started !\n')

    # launch devices

    info("//// Starting devices on GF1\n")
    GF1.cmd(
        "node ./device.js --local_ip '127.0.0.1' --local_port 5001 --local_name 'device-1' --remote_ip '127.0.0.1' --remote_port 5000 --remote_name 'GF1' --send_period 3 &"
    )
    info('//// started !\n')

    info('*** Testing connectivity\n')
    net.ping([GW, GF1])
    info('*** Running CLI\n')
    net.CLI()
    info('*** Stopping network\n')
    net.stop()
Beispiel #34
0
class Emulator(DockerBasedVIM):
    """
    This class can be used to run tests on the VIM-EMU emulator.
    In order to use this class you need VIM-EMU to be installed locally.
    More information about VIM-EMU and installation instructions can be found on the project wiki-page:
    https://osm.etsi.org/wikipub/index.php/VIM_emulator

    Example:
        >>> from tangotest.vim.emulator import Emulator
        >>> vim = Emulator()
        >>> vim.start()
        >>> /* your code here */
        >>> vim.stop()

        You can also use this class with the context manager:

        >>> with Emulator() as vim:
        >>>      /* your code here */
    """
    def __init__(self,
                 endpoint_port=None,
                 tango_port=None,
                 sonata_port=None,
                 enable_learning=False,
                 vnv_checker=False,
                 *args,
                 **kwargs):
        """
        Initialize the Emulator.
        This method doesn't start the Emulator.

        Args:
            endpoint_port (int): vim-emu REST API port. Default: random free port
            tango_port (int): Sonata gatekeeper port. Default: random free port
            sonata_port (int): Tango gatekeeper port. Default: random free port
            vnv_checker (bool): Check if the code can be reused on the 5GTANGO V&V platform
            enable_learning (bool): Enable learning switch
        """
        super(Emulator, self).__init__(*args, **kwargs)
        self.endpoint_port = endpoint_port
        self.tango_port = tango_port
        self.sonata_port = sonata_port
        self.vnv_checker = vnv_checker
        self.enable_learning = enable_learning

    @property
    def InstanceClass(self):
        return EmulatorInstance

    @vnv_checker_start
    def start(self):
        """
        Run the Emulator and the endpoints.
        """
        super(Emulator, self).start()

        initialize_GK()

        self.net = DCNetwork(controller=RemoteController,
                             monitor=False,
                             enable_learning=self.enable_learning)
        self.datacenter = self.net.addDatacenter('dc1')

        endpoint_ip = '0.0.0.0'
        endpoint_port = self.endpoint_port or get_free_tcp_port()
        self.endpoint = 'http://{}:{}'.format(endpoint_ip, endpoint_port)

        self.rest_api = RestApiEndpoint(endpoint_ip, endpoint_port)
        self.rest_api.connectDCNetwork(self.net)
        self.rest_api.connectDatacenter(self.datacenter)
        self.rest_api.start()

        sonata_ip = '0.0.0.0'
        sonata_port = self.sonata_port or get_free_tcp_port()
        self.sonata_address = 'http://{}:{}'.format(sonata_ip, sonata_port)
        self.sonata_gatekeeper = SonataDummyGatekeeperEndpoint(
            sonata_ip, sonata_port)
        self.sonata_gatekeeper.connectDatacenter(self.datacenter)
        self.sonata_gatekeeper.start()

        tango_ip = '0.0.0.0'
        tango_port = self.tango_port or get_free_tcp_port()
        self.tango_address = 'http://{}:{}'.format(tango_ip, tango_port)
        self.tango_gatekeeper = TangoLLCMEndpoint(tango_ip, tango_port)
        self.tango_gatekeeper.connectDatacenter(self.datacenter)
        self.tango_gatekeeper.start()

        self.net.start()

    @vnv_checker_stop
    def stop(self):
        """
        Stop the Emulator and the endpoints.
        """
        self.rest_api.stop()
        self.net.stop()

        super(Emulator, self).stop()

    @vnv_called_once
    def add_instances_from_package(self, package, package_format='tango'):
        if not os.path.isfile(package):
            raise Exception('Package {} not found'.format(package))

        if package_format == 'tango':
            gatekeeper_address = self.tango_address
        elif package_format == 'sonata':
            gatekeeper_address = self.sonata_address
        else:
            raise Exception(
                'package_format must be "tango" or "sonata", passed {}.'.
                format(package_format))

        # Upload the package
        with open(package, 'rb') as package_content:
            files = {'package': package_content}
            url = '{}/packages'.format(gatekeeper_address)
            response = requests.post(url, files=files)
            if not response.ok:
                raise Exception('Something went wrong during uploading.')

        # Instantiate the service
        url = '{}/instantiations'.format(gatekeeper_address)
        response = requests.post(url, data='{}')
        if not response.ok:
            raise Exception('Something went wrong during instantiation.')

        instances = []
        for name, instance in self.datacenter.containers.items():
            if name in self.instances:
                continue
            instances.append(self._add_instance(name))

        return instances

    @vnv_called_without_parameter('interfaces')
    def add_instance_from_image(self,
                                name,
                                image,
                                interfaces=None,
                                docker_command=None):
        """
        Run a Docker image on the Emulator.

        Args:
            name (str): The name of an instance
            image (str): The name of an image
            interfaces (int), (list) (str) or (dict): Network configuration
            docker_command (str): The command to execute when starting the instance

        Returns:
            (EmulatorInstance): The added instance
        """

        if not self._image_exists(image):
            raise Exception('Docker image {} not found'.format(image))

        if not interfaces:
            interfaces = '(id=emu0)'
        elif isinstance(interfaces, str):
            pass
        elif isinstance(interfaces, int):
            interfaces = ','.join(
                ['(id=emu{})'.format(i) for i in range(interfaces)])
        elif isinstance(interfaces, list):
            interfaces = ','.join(['(id={})'.format(i) for i in interfaces])
        elif isinstance(interfaces, dict):
            interfaces = ','.join(
                ['(id={},ip={})'.format(k, v) for k, v in interfaces.items()])
        else:
            raise Exception(
                'Wrong network configuration: {}'.format(interfaces))

        params = {
            'name': name,
            'image': image,
            'command': docker_command,
            'network': interfaces,
            'endpoint': self.endpoint,
            'datacenter': 'dc1'
        }

        EmuComputeClient().start(params)

        return self._add_instance(name)

    @vnv_called_without_parameter('interfaces')
    def add_instance_from_source(self,
                                 name,
                                 path,
                                 interfaces=None,
                                 image_name=None,
                                 docker_command=None,
                                 **docker_build_args):
        """
        Build and run a Docker image on the Emulator.

        Args:
            name (str): The name of an instance
            path (str): The path to the directory containing Dockerfile
            interfaces (int), (list) (str) or (dict): Network configuration
            image_name (str): The name of an image. Default: tangotest<name>
            docker_command (str): The command to execute when starting the instance
            **docker_build_args: Extra arguments to be used by the Docker engine to build the image

        Returns:
            (EmulatorInstance): The added instance
        """
        return super(Emulator,
                     self).add_instance_from_source(name, path, interfaces,
                                                    image_name, docker_command,
                                                    **docker_build_args)

    @vnv_not_called
    def add_link(self,
                 src_vnf,
                 src_if,
                 dst_vnf,
                 dst_if,
                 sniff=False,
                 **kwargs):
        result = super(Emulator, self).add_link(src_vnf, src_if, dst_vnf,
                                                dst_if, sniff, **kwargs)

        if result:
            return result

        params = {
            'source': '{}:{}'.format(src_vnf, src_if),
            'destination': '{}:{}'.format(dst_vnf, dst_if),
            'weight': kwargs.get('weight'),
            'match': kwargs.get('match'),
            'bidirectional': kwargs.get('bidirectional', True),
            'cookie': kwargs.get('cookie'),
            'priority': kwargs.get('priority'),
            'endpoint': self.endpoint
        }

        return EmuNetworkClient().add(params)
Beispiel #35
0
def create_topology1():
    # create topology
    net = DCNetwork(controller=RemoteController,
                    monitor=True,
                    enable_learning=True)
    dc1 = net.addDatacenter("dc1")

    # add the command line interface endpoint to each DC (REST API)
    rapi1 = RestApiEndpoint("0.0.0.0", 5001)
    rapi1.connectDCNetwork(net)
    rapi1.connectDatacenter(dc1)
    # run API endpoint server (in another thread, don't block)
    rapi1.start()

    # add the SONATA dummy gatekeeper to each DC
    #sdkg1 = SonataDummyGatekeeperEndpoint("0.0.0.0", 5000, deploy_sap=True)
    #sdkg1.connectDatacenter(dc1)
    # run the dummy gatekeeper (in another thread, don't block)
    #sdkg1.start()

    # start the emulation platform
    net.start()  # here the docker host default ip is configured

    # topology must be started before hosts are added
    #cache = net.addDocker('cache', dimage="squid-vnf")
    cache = dc1.startCompute('cache',
                             image="squid-vnf",
                             network=[{
                                 "ip": "10.10.0.1/24",
                                 "id": "client"
                             }, {
                                 "ip": "10.20.0.1/24",
                                 "id": "server"
                             }])

    #client = net.addDocker('client', ip='10.10.0.1/24', dimage="vcdn-client")
    client = dc1.startCompute('client',
                              image='vcdn-client',
                              network=[{
                                  "ip": "10.10.0.2/24",
                                  "id": "client"
                              }])

    #server = net.addDocker('server', ip='10.20.0.1/24', dimage="webserver")
    server = dc1.startCompute('server',
                              image='webserver',
                              network=[{
                                  "ip": "10.20.0.2/24",
                                  "id": "server"
                              }])
    #net.addLink(dc1, client,  intfName1='dc-cl', intfName2='client')
    #net.addLink(dc1, server,  intfName1='dc-sv', intfName2='server')
    #net.addLink(dc1, cache,  intfName1='dc-ccl', intfName2='client', params1={'ip': '10.10.0.2/24'})
    #net.addLink(dc1, cache,  intfName1='dc-csv', intfName2='server',params1={'ip': '10.20.0.2/24'})

    # initialise VNFs
    cache.cmd("./start.sh", detach=True)
    client.cmd("./start.sh", detach=True)
    server.cmd('./start.sh', detach=True)

    # startup script hangs if we use other startup command
    # command="./start.sh"

    net.CLI()
    net.stop()
class TopologyZooTopology(object):
    def __init__(self, args):
        # run daemonized to stop on signal
        self.running = True
        signal.signal(signal.SIGINT, self._stop_by_signal)
        signal.signal(signal.SIGTERM, self._stop_by_signal)

        self.args = args
        self.G = self._load_graphml(args.graph_file)
        self.G_name = self.G.graph.get("label", args.graph_file)
        LOG.debug("Graph label: {}".format(self.G_name))
        self.net = None
        self.pops = list()
        # initialize global rest api
        self.rest_api = RestApiEndpoint("0.0.0.0", 5001)
        self.rest_api.start()
        # initialize and start topology
        self.create_environment()
        self.create_pops()
        self.create_links()
        self.start_topology()
        self.daemonize()
        self.stop_topology()

    def _load_graphml(self, path):
        try:
            G = nx.read_graphml(path, node_type=int)
            LOG.info(
                "Loaded graph from '{}' with {} nodes and {} edges.".format(
                    path, G.__len__(), G.size()))
            LOG.debug(G.adjacency_list())
            return G
        except:
            LOG.exception("Could not read {}".format(path))
        return None

    def create_environment(self):
        self.net = DCNetwork(monitor=False, enable_learning=False)
        self.rest_api.connectDCNetwork(self.net)

    def create_pops(self):
        i = 0
        for n in self.G.nodes(data=True):
            # name = n[1].get("label").replace(" ", "_")  # human readable names
            name = "pop{}".format(n[0])  # use ID as name
            p = self.net.addDatacenter(name)
            self.rest_api.connectDatacenter(p)
            self.pops.append(p)
            LOG.info("Created pop: {} representing {}".format(
                p, n[1].get("label", n[0])))

    def create_links(self):
        for e in self.G.edges(data=True):
            # parse bw limit from edge
            bw_mbps = self._parse_bandwidth(e)
            # calculate delay from nodes; use np.around for consistent rounding behavior in phyton2 and 3
            delay = int(np.around(self._calc_delay_ms(e[0], e[1])))
            try:
                self.net.addLink(self.pops[e[0]],
                                 self.pops[e[1]],
                                 cls=TCLink,
                                 delay='{}ms'.format(delay),
                                 bw=min(bw_mbps, 1000))
                LOG.info("Created link {} with delay {}".format(e, delay))
            except:
                LOG.exception("Error in experiment")

    def _parse_bandwidth(self, e):
        """
        Calculate the link bandwith based on LinkLabel field.
        Default: 100 Mbps (if field is not given)
        Result is returned in Mbps and down scaled by 10x to fit in the Mininet range.
        """
        ll = e[2].get("LinkLabel")
        if ll is None:
            return 100  # default
        ll = ll.strip(" <>=")
        mbits_factor = 1.0
        if "g" in ll.lower():
            mbits_factor = 1000
        elif "k" in ll.lower():
            mbits_factor = (1.0 / 1000)
        ll = ll.strip("KMGkmpsbit/-+ ")
        try:
            bw = float(ll) * mbits_factor
        except:
            LOG.warning("Could not parse bandwidth: {}".format(ll))
            bw = 100  # default
        LOG.debug("- Bandwidth {}-{} = {} Mbps".format(e[0], e[1], bw))
        return bw * BW_SCALE_FACTOR  # downscale to fit in mininet supported range

    def _calc_distance_meter(self, n1id, n2id):
        """
        Calculate distance in meter between two geo positions.
        """
        n1 = self.G.nodes(data=True)[n1id]
        n2 = self.G.nodes(data=True)[n2id]
        n1_lat, n1_long = n1[1].get("Latitude"), n1[1].get("Longitude")
        n2_lat, n2_long = n2[1].get("Latitude"), n2[1].get("Longitude")
        try:
            return vincenty((n1_lat, n1_long), (n2_lat, n2_long)).meters
        except:
            LOG.exception(
                "Could calculate distance between nodes: {}/{}".format(
                    n1id, n2id))
        return 0

    def _calc_delay_ms(self, n1id, n2id):
        meter = self._calc_distance_meter(n1id, n2id)
        if meter <= 0:
            return 0  # default 0 ms delay
        LOG.debug("- Distance {}-{} = {} km".format(n1id, n2id, meter / 1000))
        # calc delay
        delay = (meter / SPEED_OF_LIGHT *
                 1000) * PROPAGATION_FACTOR  # in milliseconds
        LOG.debug("- Delay {}-{} = {} ms (rounded: {} ms)".format(
            n1id, n2id, delay, round(delay)))
        return delay

    def start_topology(self):
        print("start_topology")
        self.net.start()

    def cli(self):
        self.net.CLI()

    def daemonize(self):
        print("Daemonizing vim-emu. Send SIGTERM or SIGKILL to stop.")
        while self.running:
            time.sleep(1)

    def _stop_by_signal(self, signum, frame):
        print("Received SIGNAL {}. Stopping.".format(signum))
        self.running = False

    def stop_topology(self):
        self.rest_api.stop()
        self.net.stop()
Beispiel #37
0
def create_topology1():
    # create topology
    net = DCNetwork(controller=RemoteController, monitor=False, enable_learning=False)
    dc1 = net.addDatacenter("dc1")
    dc2 = net.addDatacenter("dc2")
    s1 = net.addSwitch("s1")
    linkopts = dict(delay="1ms",bw=100)
    net.addLink(dc1, s1, **linkopts)
    net.addLink(dc2, s1, **linkopts)

    # add the command line interface endpoint to each DC (REST API)
    rapi1 = RestApiEndpoint("0.0.0.0", 5001)
    rapi1.connectDCNetwork(net)
    rapi1.connectDatacenter(dc1)
    rapi1.connectDatacenter(dc2)
    # run API endpoint server (in another thread, don't block)
    rapi1.start()

    # add the SONATA dummy gatekeeper to each DC
    sdkg1 = SonataDummyGatekeeperEndpoint("0.0.0.0", 5000)
    sdkg1.connectDatacenter(dc1)
    sdkg1.connectDatacenter(dc2)
    # run the dummy gatekeeper (in another thread, don't block)
    sdkg1.start()

    # start the emulation platform
    net.start()
    # create hosts and vnfs
    #use ./init_vnfs_rubis for rubis experiments
    #use ./init_vnfs for stratos experiments
    subprocess.call("./init_vnfs_rubis.sh",shell=True)
    subprocess.call("./chain_vnfs.sh",shell=True)

    fw, snort, client, server = net.getNodeByName('fw','snort','client','server')
    print "Waiting warmup"
    time.sleep(10)
    #run experiment
    #CONFIGURE number of cores
    cores = 4
    for i in range(0,4): #Set here the number of repetitions 
       for fwbw in [5,50,100]: # set here the network bandwidth range for the firewall
          for snortbw in [5,50,100]:  # set here the network bandwidth range for the dpi
             for reqsize in ['128KB']: #available sizes are: '4KB','8KB','16KB','32KB','64KB','128KB','256KB','512KB','1024KB','2048KB','4096KB','8192KB','16384KB','32768KB']: 
                for fwcpu in [5,50,100]: # set here the cpu capacity range for the firewall, 5 means 5% of one cpu
                   for snortcpu in [5,50,100]: # set here the cpu capacity range for the dpi, 5 means 5% of one cpu
                	r=0
                	fw.setParam(r,'setCPUFrac',cpu=fwcpu/(cores*100))
                	snort.setParam(r,'setCPUFrac',cpu=snortcpu/(cores*100))
                	strcmd = "%s %d %d %d %d %s %d &" % ('./start_firewall.sh',fwbw,snortbw,fwcpu,snortcpu,reqsize,i) 
                	fw.cmd(strcmd)
                	time.sleep(1)
                	strcmd = "%s %d %d %d %d %s %d &" % ('./start_snort.sh',fwbw,snortbw,fwcpu,snortcpu,reqsize,i)
                	snort.cmd(strcmd)
                	strcmd = "%s %d %d %d %d %s %d &" % ('./start_server.sh',fwbw,snortbw,fwcpu,snortcpu,reqsize,i)
                	server.cmd(strcmd)
                	time.sleep(1)
                	client.cmd("ping -c 2 10.0.0.50 >> log-ping")
                	client.cmd("ping -c 2 10.0.0.50 >> log-ping")        	                     
                        strcmd = "%s %d %d %d %d %s %d &" % ('./start_client.sh',fwbw,snortbw,fwcpu,snortcpu,reqsize,i)
                        client.cmd(strcmd)
                	#the parameter for iperfc is the target bandwidth
                	strcmd = "%s %d" % ('./start_iperfc.sh',30)
                        client.cmd(strcmd)
                	print "Waiting to the experiment %d-%d-%d-%d-%s-%d"%(fwbw,snortbw,fwcpu,snortcpu,reqsize,i)
                	#use 180 for rubis workload
                    #use 100 for the stratos
			        time.sleep(180)
                	print "Copy results and cleanup"
                	strcmd = "scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no guiltiness* [email protected]:/home/vagrant/son-emu/logs/"
                	fw.cmd(strcmd)
                        snort.cmd(strcmd)
                	strcmd = "scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no log* [email protected]:/home/vagrant/son-emu/logs/"
                	client.cmd(strcmd)
                	server.cmd(strcmd)
                	fw.cmd("rm guiltiness*")
                	snort.cmd("rm guiltiness*")
                	client.cmd("rm log*")
                	server.cmd("rm log*")
def create_topology1():
    """
    1. Create a data center network object (DCNetwork) with monitoring enabled
    """
    net = DCNetwork(monitor=True, enable_learning=False)
    """
    1b. Add endpoint APIs for the whole DCNetwork,
        to access and control the networking from outside.
        e.g., to setup forwarding paths between compute
        instances aka. VNFs (represented by Docker containers), passing through
        different switches and datacenters of the emulated topology
    """
    # create monitoring api endpoint for backwards compatibility with zerorpc api
    mon_api = ZeroRpcApiEndpointDCNetwork("0.0.0.0", 5151)
    mon_api.connectDCNetwork(net)
    mon_api.start()
    """
    2. Add (logical) data centers to the topology
       (each data center is one "bigswitch" in our simplified
        first prototype)
    """
    dc1 = net.addDatacenter("datacenter1")
    dc2 = net.addDatacenter("datacenter2")
    """
    3. You can add additional SDN switches for data center
       interconnections to the network.
    """
    s1 = net.addSwitch("s1")
    """
    4. Add links between your data centers and additional switches
       to define you topology.
       These links can use Mininet's features to limit bw, add delay or jitter.
    """

    net.addLink(dc1, s1)
    net.addLink(s1, dc2)
    """
    5. We want to access and control our data centers from the outside,
       e.g., we want to connect an orchestrator to start/stop compute
       resources aka. VNFs (represented by Docker containers in the emulated)

       So we need to instantiate API endpoints (e.g. a zerorpc or REST
       interface). Depending on the endpoint implementations, we can connect
       one or more data centers to it, which can then be controlled through
       this API, e.g., start/stop/list compute instances.
    """
    # keep the old zeroRPC interface for the prometheus metric query test
    zapi1 = ZeroRpcApiEndpoint("0.0.0.0", 4242)
    # connect data centers to this endpoint
    zapi1.connectDatacenter(dc1)
    zapi1.connectDatacenter(dc2)
    # run API endpoint server (in another thread, don't block)
    zapi1.start()

    # create a new instance of a endpoint implementation
    # the restapi handles all compute, networking and monitoring commands in one api endpoint
    api1 = RestApiEndpoint("0.0.0.0", 5001)
    # connect data centers to this endpoint
    api1.connectDatacenter(dc1)
    api1.connectDatacenter(dc2)
    # connect total network also, needed to do the chaining and monitoring
    api1.connectDCNetwork(net)
    # run API endpoint server (in another thread, don't block)
    api1.start()
    """
    5.1. For our example, we create a second endpoint to illustrate that
         this is supported by our design. This feature allows us to have
         one API endpoint for each data center. This makes the emulation
         environment more realistic because you can easily create one
         OpenStack-like REST API endpoint for *each* data center.
         This will look like a real-world multi PoP/data center deployment
         from the perspective of an orchestrator.
    """
    #zapi2 = ZeroRpcApiEndpoint("0.0.0.0", 4343)
    #zapi2.connectDatacenter(dc3)
    #zapi2.connectDatacenter(dc4)
    #zapi2.start()
    """
    6. Finally we are done and can start our network (the emulator).
       We can also enter the Mininet CLI to interactively interact
       with our compute resources (just like in default Mininet).
       But we can also implement fully automated experiments that
       can be executed again and again.
    """
    net.start()
    net.CLI()
    # when the user types exit in the CLI, we stop the emulator
    net.stop()
def create_topology1():
    """
    1. Create a data center network object (DCNetwork)
    """
    net = DCNetwork(monitor=True, enable_learning=True)

    """
    1b. add a monitoring agent to the DCNetwork
    """
    #keep old zeroRPC interface to test the prometheus metric query
    mon_api = ZeroRpcApiEndpointDCNetwork("0.0.0.0", 5151)
    mon_api.connectDCNetwork(net)
    mon_api.start()
    """
    2. Add (logical) data centers to the topology
       (each data center is one "bigswitch" in our simplified
        first prototype)
    """
    dc1 = net.addDatacenter("dc1")
    dc2 = net.addDatacenter("dc2")
    dc3 = net.addDatacenter("long_data_center_name3")
    dc4 = net.addDatacenter(
        "dc4",
        metadata={"mydata": "we can also add arbitrary metadata to each DC"})

    """
    3. You can add additional SDN switches for data center
       interconnections to the network.
    """
    s1 = net.addSwitch("s1")

    """
    4. Add links between your data centers and additional switches
       to define you topology.
       These links can use Mininet's features to limit bw, add delay or jitter.
    """
    net.addLink(dc1, dc2)
    net.addLink("dc1", s1)
    net.addLink(s1, dc3)
    net.addLink(s1, "dc4")

    """
    5. We want to access and control our data centers from the outside,
       e.g., we want to connect an orchestrator to start/stop compute
       resources aka. VNFs (represented by Docker containers in the emulated)

       So we need to instantiate API endpoints (e.g. a zerorpc or REST
       interface). Depending on the endpoint implementations, we can connect
       one or more data centers to it, which can then be controlled through
       this API, e.g., start/stop/list compute instances.
    """
    # keep the old zeroRPC interface for the prometheus metric query test
    zapi1 = ZeroRpcApiEndpoint("0.0.0.0", 4242)
    # connect data centers to this endpoint
    zapi1.connectDatacenter(dc1)
    zapi1.connectDatacenter(dc2)
    # run API endpoint server (in another thread, don't block)
    zapi1.start()

    # create a new instance of a endpoint implementation
    api1 = RestApiEndpoint("127.0.0.1", 5001)
    # connect data centers to this endpoint
    api1.connectDatacenter(dc1)
    api1.connectDatacenter(dc2)
    api1.connectDatacenter(dc3)
    api1.connectDatacenter(dc4)
    # connect total network also, needed to do the chaining and monitoring
    api1.connectDCNetwork(net)
    # run API endpoint server (in another thread, don't block)
    api1.start()

    """
    6. Finally we are done and can start our network (the emulator).
       We can also enter the Mininet CLI to interactively interact
       with our compute resources (just like in default Mininet).
       But we can also implement fully automated experiments that
       can be executed again and again.
    """
    net.start()
    net.CLI()
    # when the user types exit in the CLI, we stop the emulator
    net.stop()