예제 #1
0
class EmulatorProfilingTopology(object):
    def __init__(self):
        pass

    def start(self):
        LOG.info("Starting emulation ...")
        setLogLevel('info')  # set Mininet loglevel
        # create topology
        self.net = DCNetwork(monitor=False, enable_learning=False)
        # we only need one DC for benchmarking
        dc = self.net.addDatacenter("dc1")
        # add the command line interface endpoint to each DC (REST API)
        self.rapi1 = RestApiEndpoint("0.0.0.0", 5001)
        self.rapi1.connectDCNetwork(self.net)
        self.rapi1.connectDatacenter(dc)
        self.rapi1.start()
        # add the 5GTANGO lightweight life cycle manager (LLCM) to the topology
        self.llcm1 = TangoLLCMEndpoint("0.0.0.0", 5000, deploy_sap=False)
        self.llcm1.connectDatacenter(dc)
        self.llcm1.start()
        self.net.start()

    def stop(self):
        LOG.info("Stopping emulation ...")
        self.rapi1.stop()
        self.llcm1.stop()
        self.net.stop()
예제 #2
0
    def __init__(self, port=5000):
        GracefulKiller(self)
        # create topology
        self.net = DCNetwork(controller=RemoteController,
                             monitor=False,
                             enable_learning=False)
        self.dc = self.net.addDatacenter("dc1")

        # add the command line interface endpoint to each DC (REST API)
        self.rapi1 = RestApiEndpoint("0.0.0.0", port + 1)
        self.rapi1.connectDCNetwork(self.net)
        self.rapi1.connectDatacenter(self.dc)
        # run API endpoint server (in another thread, don't block)
        self.rapi1.start()

        # add the SONATA dummy gatekeeper to each DC
        self.sdkg1 = SonataDummyGatekeeperEndpoint("0.0.0.0",
                                                   port,
                                                   deploy_sap=False)
        self.sdkg1.connectDatacenter(self.dc)
        # run the dummy gatekeeper (in another thread, don't block)
        self.sdkg1.start()

        self.net.start()
        LOG.info("Started topology")
        while (not self.stop_now):
            sleep(1)
        self.net.stop()
        LOG.info("Stopped topology")
예제 #3
0
def create_topology():
    net = DCNetwork(monitor=False, enable_learning=True)
    # create two data centers
    dc1 = net.addDatacenter("dc1")
    dc2 = net.addDatacenter("dc2")
    # interconnect data centers
    net.addLink(dc1, dc2, delay="20ms")
    # add the command line interface endpoint to the emulated DC (REST API)
    rapi1 = RestApiEndpoint("0.0.0.0", 5001)
    rapi1.connectDCNetwork(net)
    rapi1.connectDatacenter(dc1)
    rapi1.connectDatacenter(dc2)
    rapi1.start()
    # add the 5GTANGO lightweight life cycle manager (LLCM) to the topology
    llcm1 = TangoLLCMEndpoint("0.0.0.0",
                              5000,
                              deploy_sap=False,
                              placement_algorithm_obj=StaticConfigPlacement(
                                  "~/static_placement.yml"))
    llcm1.connectDatacenter(dc1)
    llcm1.connectDatacenter(dc2)
    # run the dummy gatekeeper (in another thread, don't block)
    llcm1.start()
    # start the emulation and enter interactive CLI
    net.start()
    net.CLI()
    # when the user types exit in the CLI, we stop the emulator
    net.stop()
def create_topology1():
    # create topology
    net = DCNetwork(controller=RemoteController, monitor=False, enable_learning=True)
    dc1 = net.addDatacenter("dc1")
    dc2 = net.addDatacenter("dc2")
    s1 = net.addSwitch("s1")
    net.addLink(dc1, s1, delay="10ms")
    net.addLink(dc2, s1, delay="20ms")

    # add the command line interface endpoint to each DC (REST API)
    rapi1 = RestApiEndpoint("0.0.0.0", 5001)
    rapi1.connectDCNetwork(net)
    rapi1.connectDatacenter(dc1)
    rapi1.connectDatacenter(dc2)
    # run API endpoint server (in another thread, don't block)
    rapi1.start()

    # add the SONATA dummy gatekeeper to each DC
    sdkg1 = SonataDummyGatekeeperEndpoint("0.0.0.0", 5000, deploy_sap=False)
    sdkg1.connectDatacenter(dc1)
    sdkg1.connectDatacenter(dc2)
    # run the dummy gatekeeper (in another thread, don't block)
    sdkg1.start()

    # start the emulation platform
    net.start()
    net.CLI()
    rapi1.stop()
    net.stop()
예제 #5
0
class DaemonTopology(object):
    def __init__(self):
        self.running = True
        signal.signal(signal.SIGINT, self._stop_by_signal)
        signal.signal(signal.SIGTERM, self._stop_by_signal)
        # create and start topology
        self.create_topology()
        self.start_topology()
        self.daemonize()
        self.stop_topology()

    def create_topology(self):
        self.net = DCNetwork(monitor=False, enable_learning=False)
        self.dc1 = self.net.addDatacenter("dc1")
        #self.dc2 = self.net.addDatacenter("dc2")
        #self.net.addLink(self.dc1, self.dc2, cls=TCLink, delay="20ms")
        # add OpenStack-like APIs to the emulated DC
        self.api1 = OpenstackApiEndpoint("0.0.0.0", 6001)
        self.api1.connect_datacenter(self.dc1)
        #self.api1.connect_datacenter(self.dc2)
        self.api1.connect_dc_network(self.net)
        # add the command line interface endpoint to the emulated DC (REST API)
        self.rapi1 = RestApiEndpoint("0.0.0.0", 5001)
        self.rapi1.connectDCNetwork(self.net)
        self.rapi1.connectDatacenter(self.dc1)
        #self.rapi1.connectDatacenter(self.dc2)

        self.sdkg1 = SonataDummyGatekeeperEndpoint("0.0.0.0",
                                                   5000,
                                                   deploy_sap=False)
        self.sdkg1.connectDatacenter(self.dc1)
        #self.sdkg1.connectDatacenter(self.dc2)

    def start_topology(self):
        self.api1.start()
        self.rapi1.start()
        self.sdkg1.start()
        self.net.start()

    def daemonize(self):
        print("Daemonizing vim-emu. Send SIGTERM or SIGKILL to stop.")
        while self.running:
            time.sleep(1)

    def _stop_by_signal(self, signum, frame):
        print("Received SIGNAL {}. Stopping.".format(signum))
        self.running = False

    def stop_topology(self):
        self.api1.stop()
        self.rapi1.stop()
        #self.sdkg1.stop()
        self.net.stop()
예제 #6
0
def create_topology1():
    # create topology
    net = DCNetwork(controller=RemoteController, monitor=False, enable_learning=True)
    # add datecenters
    dc1 = net.addDatacenter("dc1")

    # add REST control endpoints to datacenter (to be used with son-emu-cli)
    rapi1 = RestApiEndpoint("0.0.0.0", 5001)
    rapi1.connectDCNetwork(net)
    rapi1.connectDatacenter(dc1)
    rapi1.start()
    
    # add OpenStack/like interface endpoints to dc1
    api1 = OpenstackApiEndpoint("0.0.0.0", 6001)
    # connect PoPs
    api1.connect_datacenter(dc1)
    # connect network
    api1.connect_dc_network(net)
    # start
    api1.start()

    # start the emulation platform
    net.start()
    net.CLI()
    net.stop()
예제 #7
0
def create_topology1():
    # create topology
    net = DCNetwork(controller=RemoteController,
                    monitor=True,
                    enable_learning=True)
    dc1 = net.addDatacenter("dc1")

    # add the command line interface endpoint to each DC (REST API)
    rapi1 = RestApiEndpoint("0.0.0.0", 5001)
    rapi1.connectDCNetwork(net)
    rapi1.connectDatacenter(dc1)
    # run API endpoint server (in another thread, don't block)
    rapi1.start()

    # specify a vnfd file to be deployed as internal SAP:
    sap_vnfd = 'custom_sap_vnfd.yml'
    dir_path = os.path.dirname(__file__)
    sap_vnfd_path = os.path.join(dir_path, sap_vnfd)
    # sap_vnfd_path = None
    # add the SONATA dummy gatekeeper to each DC
    sdkg1 = SonataDummyGatekeeperEndpoint("0.0.0.0",
                                          5000,
                                          deploy_sap=True,
                                          auto_deploy=True,
                                          docker_management=True,
                                          auto_delete=True,
                                          sap_vnfd_path=sap_vnfd_path)
    sdkg1.connectDatacenter(dc1)
    # run the dummy gatekeeper (in another thread, don't block)
    sdkg1.start()

    # start the emulation platform
    net.start()
    net.CLI()
    net.stop()
예제 #8
0
    def createNet(self,
                  nswitches=0,
                  ndatacenter=0,
                  nhosts=0,
                  ndockers=0,
                  autolinkswitches=False,
                  controller=Controller,
                  **kwargs):
        """
        Creates a Mininet instance and automatically adds some
        nodes to it.

        Attention, we should always use Mininet's default controller
        for our tests. Only use other controllers if you want to test
        specific controller functionality.
        """
        self.net = DCNetwork(controller=controller, **kwargs)
        for i in range(0, ndatacenter):
            self.api.append(OpenstackApiEndpoint("0.0.0.0", 15000 + i))

        # add some switches
        # start from s1 because ovs does not like to have dpid = 0
        # and switch name-number is being used by mininet to set the dpid
        for i in range(1, nswitches + 1):
            self.s.append(self.net.addSwitch('s%d' % i))
        # if specified, chain all switches
        if autolinkswitches:
            for i in range(0, len(self.s) - 1):
                self.net.addLink(self.s[i], self.s[i + 1])
            self.net.addLink(self.s[2],
                             self.s[0])  # link switches s1, s2 and s3

        # add some data centers
        for i in range(0, ndatacenter):
            self.dc.append(
                self.net.addDatacenter('dc%d' % i, metadata={"unittest_dc":
                                                             i}))
        self.net.addLink(self.dc[0].switch,
                         self.s[0])  # link switches dc0.s1 with s1
        # connect data centers to the endpoint
        for i in range(0, ndatacenter):
            self.api[i].connect_datacenter(self.dc[i])
            self.api[i].connect_dc_network(self.net)
        # add some hosts
        for i in range(0, nhosts):
            self.h.append(self.net.addHost('h%d' % i))
        # add some dockers
        for i in range(0, ndockers):
            self.d.append(self.net.addDocker('d%d' % i,
                                             dimage="ubuntu:trusty"))
def create_topology1():
    net = DCNetwork(monitor=False, enable_learning=True)
    dc1 = net.addDatacenter("dc1")

    heatapi1 = OpenstackApiEndpoint("0.0.0.0", 5001)

    # connect data center to this endpoint
    heatapi1.connect_datacenter(dc1)

    # heatapirun API endpoint server (in another thread, don't block)
    heatapi1.start()

    heatapi1.connect_dc_network(net)

    net.start()
예제 #10
0
파일: profiling.py 프로젝트: CN-UPB/son-emu
    def __init__(self):
        GracefulKiller(self)
        # create topology
        self.net = DCNetwork(controller=RemoteController, monitor=False, enable_learning=False)
        self.dc = self.net.addDatacenter("dc1")

        # add the command line interface endpoint to each DC (REST API)
        self.rapi1 = RestApiEndpoint("0.0.0.0", 5001)
        self.rapi1.connectDCNetwork(self.net)
        self.rapi1.connectDatacenter(self.dc)
        # run API endpoint server (in another thread, don't block)
        self.rapi1.start()

        # add the SONATA dummy gatekeeper to each DC
        self.sdkg1 = SonataDummyGatekeeperEndpoint("0.0.0.0", 5000, deploy_sap=False)
        self.sdkg1.connectDatacenter(self.dc)
        # run the dummy gatekeeper (in another thread, don't block)
        self.sdkg1.start()


        self.net.start()
        LOG.info("Started topology")
        while(not self.stop_now):
            sleep(1)
        self.net.stop()
        LOG.info("Stopped topology")
예제 #11
0
파일: base.py 프로젝트: hadik3r/son-emu
    def createNet(
            self,
            nswitches=0, ndatacenter=0, nhosts=0, ndockers=0,
            autolinkswitches=False, controller=Controller, **kwargs):
        """
        Creates a Mininet instance and automatically adds some
        nodes to it.

        Attention, we should always use Mininet's default controller
        for our tests. Only use other controllers if you want to test
        specific controller functionality.
        """
        self.net = DCNetwork(controller=controller, **kwargs)

        # add some switches
        # start from s1 because ovs does not like to have dpid = 0
        # and switch name-number is being used by mininet to set the dpid
        for i in range(1, nswitches+1):
            self.s.append(self.net.addSwitch('s%d' % i))
        # if specified, chain all switches
        if autolinkswitches:
            for i in range(0, len(self.s) - 1):
                self.net.addLink(self.s[i], self.s[i + 1])
        # add some data centers
        for i in range(0, ndatacenter):
            self.dc.append(
                self.net.addDatacenter(
                    'datacenter%d' % i,
                    metadata={"unittest_dc": i}))
        # add some hosts
        for i in range(0, nhosts):
            self.h.append(self.net.addHost('h%d' % i))
        # add some dockers
        for i in range(0, ndockers):
            self.d.append(self.net.addDocker('d%d' % i, dimage="ubuntu:trusty"))
 def create_topology(self):
     self.net = DCNetwork(monitor=False, enable_learning=True)
     self.dc1 = self.net.addDatacenter("dc1")
     self.dc2 = self.net.addDatacenter("dc2")
     self.net.addLink(self.dc1, self.dc2, cls=TCLink, delay="50ms")
     # add OpenStack-like APIs to the emulated DC
     self.api1 = OpenstackApiEndpoint("0.0.0.0", 6001)
     self.api1.connect_datacenter(self.dc1)
     self.api1.connect_dc_network(self.net)
     self.api2 = OpenstackApiEndpoint("0.0.0.0", 6002)
     self.api2.connect_datacenter(self.dc2)
     self.api2.connect_dc_network(self.net)
     # add the command line interface endpoint to the emulated DC (REST API)
     self.rapi1 = RestApiEndpoint("0.0.0.0", 5001)
     self.rapi1.connectDCNetwork(self.net)
     self.rapi1.connectDatacenter(self.dc1)
     self.rapi1.connectDatacenter(self.dc2)
예제 #13
0
class Profiling:

    stop_now = False
    """
     Set up a simple topology and start it
     :port: the port the REST interface will be using, port+1 will be in use as well
    """
    def __init__(self, port=5000):
        GracefulKiller(self)
        # create topology
        self.net = DCNetwork(controller=RemoteController,
                             monitor=False,
                             enable_learning=False)
        self.dc = self.net.addDatacenter("dc1")

        # add the command line interface endpoint to each DC (REST API)
        self.rapi1 = RestApiEndpoint("0.0.0.0", port + 1)
        self.rapi1.connectDCNetwork(self.net)
        self.rapi1.connectDatacenter(self.dc)
        # run API endpoint server (in another thread, don't block)
        self.rapi1.start()

        # add the SONATA dummy gatekeeper to each DC
        self.sdkg1 = SonataDummyGatekeeperEndpoint("0.0.0.0",
                                                   port,
                                                   deploy_sap=False)
        self.sdkg1.connectDatacenter(self.dc)
        # run the dummy gatekeeper (in another thread, don't block)
        self.sdkg1.start()

        self.net.start()
        LOG.info("Started topology")
        while (not self.stop_now):
            sleep(1)
        self.net.stop()
        LOG.info("Stopped topology")

    """
     Set stop value to stop the topology
    """

    def stop_it(self):
        self.stop_now = True
예제 #14
0
def create_topology1():

    global exit

    # create topology
    net = DCNetwork(controller=RemoteController,
                    monitor=True,
                    enable_learning=False)
    dc1 = net.addDatacenter("dc1")
    dc2 = net.addDatacenter("dc2")
    s1 = net.addSwitch("s1")
    net.addLink(dc1, s1, delay="10ms")
    net.addLink(dc2, s1, delay="20ms")

    # add the command line interface endpoint to each DC (REST API)
    rapi1 = RestApiEndpoint("0.0.0.0", 5001)
    rapi1.connectDatacenter(dc1)
    rapi1.connectDatacenter(dc2)
    # connect total network also, needed to do the chaining and monitoring
    rapi1.connectDCNetwork(net)
    # run API endpoint server (in another thread, don't block)
    rapi1.start()

    # add the SONATA dummy gatekeeper to each DC
    sdkg1 = SonataDummyGatekeeperEndpoint("0.0.0.0", 5000, deploy_sap=True)
    sdkg1.connectDatacenter(dc1)
    sdkg1.connectDatacenter(dc2)
    # run the dummy gatekeeper (in another thread, don't block)
    sdkg1.start()

    # start the emulation platform
    net.start()

    #does not work from docker compose (cannot start container in interactive mode)
    #cli = net.CLI()
    # instead wait here:
    logging.info("waiting for SIGTERM or SIGINT signal")
    while not exit:
        time.sleep(1)
    logging.info("got SIG signal")
    net.stop()
예제 #15
0
def create_topology():
    net = DCNetwork(monitor=False, enable_learning=False)

    dc1 = net.addDatacenter("dc1")

    api1 = OpenstackApiEndpoint("0.0.0.0", 6001)
    api1.connect_datacenter(dc1)
    api1.start()
    api1.connect_dc_network(net)

    net.start()
    net.CLI()
    # when the user types exit in the CLI, we stop the emulator
    net.stop()
예제 #16
0
def create_topology1():
    # create topology
    net = DCNetwork(controller=RemoteController, monitor=True, enable_learning=True)
    dc1 = net.addDatacenter("dc1")


    # add the command line interface endpoint to each DC (REST API)
    rapi1 = RestApiEndpoint("0.0.0.0", 5001)
    rapi1.connectDCNetwork(net)
    rapi1.connectDatacenter(dc1)
    # run API endpoint server (in another thread, don't block)
    rapi1.start()


    # specify a vnfd file to be deployed as internal SAP:
    sap_vnfd = 'custom_sap_vnfd.yml'
    dir_path = os.path.dirname(__file__)
    sap_vnfd_path = os.path.join(dir_path, sap_vnfd)
    sap_vnfd_path = None
    # add the SONATA dummy gatekeeper to each DC
    sdkg1 = SonataDummyGatekeeperEndpoint("0.0.0.0", 5000, deploy_sap=True, auto_deploy=True,
                                          docker_management=True, auto_delete=True,
                                          sap_vnfd_path=sap_vnfd_path)
    sdkg1.connectDatacenter(dc1)
    # run the dummy gatekeeper (in another thread, don't block)
    sdkg1.start()

    # start the emulation platform
    net.start()
    net.CLI()
    net.stop()
예제 #17
0
    def create_topology(self):
        self.net = DCNetwork(monitor=False, enable_learning=True)
        self.client_dc = self.net.addDatacenter("client_dc")
        self.vnfs_dc = self.net.addDatacenter("vnfs_dc")
        self.server_dc = self.net.addDatacenter("server_dc")

        self.switch1 = self.net.addSwitch("switch1")
        self.switch2 = self.net.addSwitch("switch2")

        linkopts = dict(delay="1ms", bw=100)
        self.net.addLink(self.client_dc, self.switch1, **linkopts)
        self.net.addLink(self.vnfs_dc, self.switch1, **linkopts)
        self.net.addLink(self.switch1, self.switch2, **linkopts)
        self.net.addLink(self.vnfs_dc, self.switch2, **linkopts)
        self.net.addLink(self.switch2, self.server_dc, **linkopts)

        # add the command line interface endpoint to the emulated DC (REST API)
        self.rest = RestApiEndpoint("0.0.0.0", 5001)
        self.rest.connectDCNetwork(self.net)
        self.rest.connectDatacenter(self.client_dc)
        self.rest.connectDatacenter(self.vnfs_dc)
        self.rest.connectDatacenter(self.server_dc)
예제 #18
0
 def start(self):
     LOG.info("Starting emulation ...")
     # pylint: disable=E0401
     from mininet.log import setLogLevel
     from emuvim.dcemulator.net import DCNetwork
     from emuvim.api.rest.rest_api_endpoint import RestApiEndpoint
     from emuvim.api.tango import TangoLLCMEndpoint
     setLogLevel('info')  # set Mininet loglevel
     # create topology
     self.net = DCNetwork(monitor=False, enable_learning=False)
     # we only need one DC for benchmarking
     dc = self.net.addDatacenter("dc1")
     # add the command line interface endpoint to each DC (REST API)
     rapi1 = RestApiEndpoint("0.0.0.0", 5001)
     rapi1.connectDCNetwork(self.net)
     rapi1.connectDatacenter(dc)
     rapi1.start()
     # add the 5GTANGO lightweight life cycle manager (LLCM) to the topology
     llcm1 = TangoLLCMEndpoint("0.0.0.0", 5000, deploy_sap=False)
     llcm1.connectDatacenter(dc)
     llcm1.start()
     self.net.start()
def create_topology1():
    cleanup()
    # create topology
    # use a maximum of 50% cpu time for containers added to data centers
    net = DCNetwork(dc_emulation_max_cpu=0.5, controller=Controller)
    # add some data centers and create a topology
    dc1 = net.addDatacenter("dc1", resource_log_path=RESOURCE_LOG_PATH)
    dc2 = net.addDatacenter("dc2", resource_log_path=RESOURCE_LOG_PATH)
    s1 = net.addSwitch("s1")
    net.addLink(dc1, s1, delay="10ms")
    net.addLink(dc2, s1, delay="20ms")

    # create and assign resource models for each DC
    rm1 = UpbSimpleCloudDcRM(max_cu=4, max_mu=1024)
    rm2 = UpbOverprovisioningCloudDcRM(max_cu=4)
    dc1.assignResourceModel(rm1)
    dc2.assignResourceModel(rm2)

    # add the command line interface endpoint to each DC
    zapi1 = ZeroRpcApiEndpoint("0.0.0.0", 4242)
    zapi1.connectDatacenter(dc1)
    zapi1.connectDatacenter(dc2)
    # run API endpoint server (in another thread, don't block)
    zapi1.start()

    # start the emulation platform
    net.start()
    print "Wait a moment and allocate some compute start some compute resources..."
    time.sleep(2)
    dc1.startCompute("vnf1")
    dc1.startCompute("vnf2", flavor_name="tiny")
    dc1.startCompute("vnf3", flavor_name="small")
    dc2.startCompute("vnf4", flavor_name="medium")
    dc2.startCompute("vnf5", flavor_name="medium")
    dc2.startCompute("vnf6", flavor_name="medium")
    print "... done."
    time.sleep(5)
    print "Removing instances ..."
    dc1.stopCompute("vnf1")
    dc2.stopCompute("vnf4")
    print "... done"
    net.CLI()
    net.stop()
예제 #20
0
    def start(self):
        """
        Run the Emulator and the endpoints.
        """
        super(Emulator, self).start()

        initialize_GK()

        self.net = DCNetwork(controller=RemoteController,
                             monitor=False,
                             enable_learning=self.enable_learning)
        self.datacenter = self.net.addDatacenter('dc1')

        endpoint_ip = '0.0.0.0'
        endpoint_port = self.endpoint_port or get_free_tcp_port()
        self.endpoint = 'http://{}:{}'.format(endpoint_ip, endpoint_port)

        self.rest_api = RestApiEndpoint(endpoint_ip, endpoint_port)
        self.rest_api.connectDCNetwork(self.net)
        self.rest_api.connectDatacenter(self.datacenter)
        self.rest_api.start()

        sonata_ip = '0.0.0.0'
        sonata_port = self.sonata_port or get_free_tcp_port()
        self.sonata_address = 'http://{}:{}'.format(sonata_ip, sonata_port)
        self.sonata_gatekeeper = SonataDummyGatekeeperEndpoint(
            sonata_ip, sonata_port)
        self.sonata_gatekeeper.connectDatacenter(self.datacenter)
        self.sonata_gatekeeper.start()

        tango_ip = '0.0.0.0'
        tango_port = self.tango_port or get_free_tcp_port()
        self.tango_address = 'http://{}:{}'.format(tango_ip, tango_port)
        self.tango_gatekeeper = TangoLLCMEndpoint(tango_ip, tango_port)
        self.tango_gatekeeper.connectDatacenter(self.datacenter)
        self.tango_gatekeeper.start()

        self.net.start()
예제 #21
0
def create_topology1():

    global exit

    # create topology
    net = DCNetwork(controller=RemoteController, monitor=True, enable_learning=False)
    dc1 = net.addDatacenter("dc1")
    dc2 = net.addDatacenter("dc2")
    s1 = net.addSwitch("s1")
    net.addLink(dc1, s1, delay="10ms")
    net.addLink(dc2, s1, delay="20ms")

    # add the command line interface endpoint to each DC (REST API)
    rapi1 = RestApiEndpoint("0.0.0.0", 5001)
    rapi1.connectDatacenter(dc1)
    rapi1.connectDatacenter(dc2)
    # connect total network also, needed to do the chaining and monitoring
    rapi1.connectDCNetwork(net)
    # run API endpoint server (in another thread, don't block)
    rapi1.start()

    # add the SONATA dummy gatekeeper to each DC
    sdkg1 = SonataDummyGatekeeperEndpoint("0.0.0.0", 5000, deploy_sap=True)
    sdkg1.connectDatacenter(dc1)
    sdkg1.connectDatacenter(dc2)
    # run the dummy gatekeeper (in another thread, don't block)
    sdkg1.start()

    # start the emulation platform
    net.start()

    # does not work from docker compose (cannot start container in interactive mode)
    # cli = net.CLI()
    # instead wait here:
    logging.info("waiting for SIGTERM or SIGINT signal")
    while not exit:
        time.sleep(1)
    logging.info("got SIG signal")
    net.stop()
예제 #22
0
def create_topology():
    net = DCNetwork(monitor=False, enable_learning=False)

    dc1 = net.addDatacenter("dc1")
    # add OpenStack-like APIs to the emulated DC
    api1 = OpenstackApiEndpoint("0.0.0.0", 6001)
    api1.connect_datacenter(dc1)
    api1.start()
    api1.connect_dc_network(net)
    # add the command line interface endpoint to the emulated DC (REST API)
    rapi1 = RestApiEndpoint("0.0.0.0", 5001)
    rapi1.connectDCNetwork(net)
    rapi1.connectDatacenter(dc1)
    rapi1.start()

    net.start()
    net.CLI()
    # when the user types exit in the CLI, we stop the emulator
    net.stop()
예제 #23
0
def create_topology1():
    # create topology
    net = DCNetwork(controller=RemoteController, monitor=True, enable_learning=True)
    dc1 = net.addDatacenter("dc1")


    # add the command line interface endpoint to each DC (REST API)
    rapi1 = RestApiEndpoint("0.0.0.0", 5001)
    rapi1.connectDCNetwork(net)
    rapi1.connectDatacenter(dc1)
    # run API endpoint server (in another thread, don't block)
    rapi1.start()

    # add the SONATA dummy gatekeeper to each DC
    #sdkg1 = SonataDummyGatekeeperEndpoint("0.0.0.0", 5000, deploy_sap=True)
    #sdkg1.connectDatacenter(dc1)
    # run the dummy gatekeeper (in another thread, don't block)
    #sdkg1.start()

    # start the emulation platform
    net.start()  # here the docker host default ip is configured

    # topology must be started before hosts are added
    cache = dc1.startCompute('cache', image="squid-vnf", network=[{"ip": "10.10.0.1/24", "id": "client", 'mac': "aa:aa:aa:00:00:01"},
                                                                  {"ip": "10.20.0.1/24", "id": "server", "mac": "aa:aa:aa:00:00:02"}])

    client = dc1.startCompute('client', image='vcdn-client', network=[{"ip": "10.10.0.2/24", "id": "client"}])

    server = dc1.startCompute('server', image='webserver', network=[{"ip": "10.20.0.2/24", "id": "server"}])


    
    #client = net.addDocker('client', ip='10.10.0.1/24', dimage="vcdn-client") 
    #cache = net.addDocker('cache', dimage="squid-vnf")
    #server = net.addDocker('server', ip='10.20.0.1/24', dimage="webserver")
    #net.addLink(dc1, client,  intfName1='dc-cl', intfName2='client')
    #net.addLink(dc1, server,  intfName1='dc-sv', intfName2='server')
    #net.addLink(dc1, cache,  intfName1='dc-ccl', intfName2='client', params1={'ip': '10.10.0.2/24'})
    #net.addLink(dc1, cache,  intfName1='dc-csv', intfName2='server',params1={'ip': '10.20.0.2/24'})

    # initialise VNFs
    cache.cmd("./start.sh", detach=True)
    client.cmd("./start.sh", detach=True)
    server.cmd('./start.sh', detach=True)

# startup script hangs if we use other startup command
# command="./start.sh"

    net.CLI()
    net.stop()
    while not net.exit:
        pass
def create_topology():
    net = DCNetwork(monitor=False, enable_learning=True)
    # create two data centers
    dc1 = net.addDatacenter("dc1")
    dc2 = net.addDatacenter("dc2")
    # interconnect data centers
    net.addLink(dc1, dc2, delay="20ms")
    # add the command line interface endpoint to the emulated DC (REST API)
    rapi1 = RestApiEndpoint("0.0.0.0", 5001)
    rapi1.connectDCNetwork(net)
    rapi1.connectDatacenter(dc1)
    rapi1.connectDatacenter(dc2)
    rapi1.start()
    # add the 5GTANGO lightweight life cycle manager (LLCM) to the topology
    llcm1 = TangoLLCMEndpoint("0.0.0.0", 5000, deploy_sap=False)
    llcm1.connectDatacenter(dc1)
    llcm1.connectDatacenter(dc2)
    # run the dummy gatekeeper (in another thread, don't block)
    llcm1.start()
    # start the emulation and enter interactive CLI
    net.start()
    net.CLI()
    # when the user types exit in the CLI, we stop the emulator
    net.stop()
예제 #25
0
def create_topology1():
    # create topology
    net = DCNetwork(controller=RemoteController,
                    monitor=True,
                    enable_learning=True)
    dc1 = net.addDatacenter("dc1")

    # add the command line interface endpoint to each DC (REST API)
    rapi1 = RestApiEndpoint("0.0.0.0", 5001)
    rapi1.connectDCNetwork(net)
    rapi1.connectDatacenter(dc1)
    # run API endpoint server (in another thread, don't block)
    rapi1.start()

    # add the SONATA dummy gatekeeper to each DC
    sdkg1 = SonataDummyGatekeeperEndpoint("0.0.0.0", 5000, deploy_sap=True)
    sdkg1.connectDatacenter(dc1)
    # run the dummy gatekeeper (in another thread, don't block)
    sdkg1.start()

    # start the emulation platform
    net.start()
    net.CLI()
    net.stop()
예제 #26
0
def create_topology1():
    net = DCNetwork(monitor=True, enable_learning=True)
    dc1 = net.addDatacenter("datacenter1")

    heatapi1 = OpenstackApiEndpoint("131.234.31.45", 5001)

    # connect data center to this endpoint
    heatapi1.connect_datacenter(dc1)

    # heatapirun API endpoint server (in another thread, don't block)
    heatapi1.start()

    heatapi1.connect_dc_network(net)

    net.start()
    net.CLI()
    # when the user types exit in the CLI, we stop the emulator
    net.stop()
예제 #27
0
def create_and_start_topology(lock, restart_lock):
    _LOGGER.info("Creating and starting the topology")
    net = DCNetwork(controller=RemoteController,
                    monitor=True,
                    enable_learning=True)
    restart_lock.acquire()
    setup_topology(net)
    try:
        net.start()  # non blocking call
        _LOGGER.info("Waiting for the barrier to stop the topology")
        lock.acquire()
        _LOGGER.info("Stopping the topology")
        net.stop()
        lock.release()
    except Exception as e:
        _LOGGER.error("Ignoring exception in thread: {!s}".format(e))
    restart_lock.release()
    exit(1)
예제 #28
0
def create_topology1():
    net = DCNetwork(monitor=True, enable_learning=False)
    dc1 = net.addDatacenter("dc1")
    dc2 = net.addDatacenter("dc2")
    dc3 = net.addDatacenter("dc3")
    dc4 = net.addDatacenter("dc4")

    heatapi1 = OpenstackApiEndpoint("0.0.0.0", 5001)
    heatapi2 = OpenstackApiEndpoint("0.0.0.0", 5002)
    heatapi3 = OpenstackApiEndpoint("0.0.0.0", 5003)
    heatapi4 = OpenstackApiEndpoint("0.0.0.0", 5004)

    # connect data centers to this endpoint
    heatapi1.connect_datacenter(dc1)
    heatapi2.connect_datacenter(dc2)
    heatapi3.connect_datacenter(dc3)
    heatapi4.connect_datacenter(dc4)

    s1 = net.addSwitch("s1")
    net.addLink(dc1, s1, jitter="10ms", delay="12ms", loss=0, bw=0.5)
    net.addLink(dc2, s1, bw=0.5, loss=0, delay="20ms", jitter="15ms")
    net.addLink(dc3, s1, delay="30ms", loss=1, bw=0.5, jitter="10ms")
    net.addLink(dc4, s1, delay="40ms", loss=2, bw=1, jitter="10ms")
    # heatapirun API endpoint server (in another thread, don't block)
    heatapi1.start()
    heatapi2.start()
    heatapi3.start()
    heatapi4.start()

    heatapi1.connect_dc_network(net)
    heatapi2.connect_dc_network(net)
    heatapi3.connect_dc_network(net)
    heatapi4.connect_dc_network(net)

    net.start()
    net.CLI()
    # when the user types exit in the CLI, we stop the emulator
    net.stop()
예제 #29
0
def create_topology1():
    # create topology
    net = DCNetwork(controller=RemoteController,
                    monitor=False,
                    enable_learning=False)
    # add datecenters
    dc1 = net.addDatacenter("dc1")
    dc2 = net.addDatacenter("dc2")
    # add some intermediate switch
    s1 = net.addSwitch("s1")
    # connect data centers
    net.addLink(dc1, s1, delay="10ms")
    net.addLink(dc2, s1, delay="20ms")

    # add REST control endpoints to each datacenter (to be used with son-emu-cli)
    rapi1 = RestApiEndpoint("0.0.0.0", 5001)
    rapi1.connectDCNetwork(net)
    rapi1.connectDatacenter(dc1)
    rapi1.connectDatacenter(dc2)
    rapi1.start()

    # add the SONATA dummy gatekeeper to each DC
    sdkg1 = SonataDummyGatekeeperEndpoint("0.0.0.0", 5000)
    sdkg1.connectDatacenter(dc1)
    sdkg1.connectDatacenter(dc2)
    # run the dummy gatekeeper (in another thread, don't block)
    sdkg1.start()

    # start the emulation platform
    net.start()
    net.CLI()
    net.stop()
def create_topology1():
    """
    1. Create a data center network object (DCNetwork) with monitoring enabled
    """
    net = DCNetwork(monitor=True, enable_learning=False)
    """
    1b. Add endpoint APIs for the whole DCNetwork,
        to access and control the networking from outside.
        e.g., to setup forwarding paths between compute
        instances aka. VNFs (represented by Docker containers), passing through
        different switches and datacenters of the emulated topology
    """
    # create monitoring api endpoint for backwards compatibility with zerorpc api
    mon_api = ZeroRpcApiEndpointDCNetwork("0.0.0.0", 5151)
    mon_api.connectDCNetwork(net)
    mon_api.start()
    """
    2. Add (logical) data centers to the topology
       (each data center is one "bigswitch" in our simplified
        first prototype)
    """
    dc1 = net.addDatacenter("datacenter1")
    dc2 = net.addDatacenter("datacenter2")
    """
    3. You can add additional SDN switches for data center
       interconnections to the network.
    """
    s1 = net.addSwitch("s1")
    """
    4. Add links between your data centers and additional switches
       to define you topology.
       These links can use Mininet's features to limit bw, add delay or jitter.
    """

    net.addLink(dc1, s1)
    net.addLink(s1, dc2)
    """
    5. We want to access and control our data centers from the outside,
       e.g., we want to connect an orchestrator to start/stop compute
       resources aka. VNFs (represented by Docker containers in the emulated)

       So we need to instantiate API endpoints (e.g. a zerorpc or REST
       interface). Depending on the endpoint implementations, we can connect
       one or more data centers to it, which can then be controlled through
       this API, e.g., start/stop/list compute instances.
    """
    # keep the old zeroRPC interface for the prometheus metric query test
    zapi1 = ZeroRpcApiEndpoint("0.0.0.0", 4242)
    # connect data centers to this endpoint
    zapi1.connectDatacenter(dc1)
    zapi1.connectDatacenter(dc2)
    # run API endpoint server (in another thread, don't block)
    zapi1.start()

    # create a new instance of a endpoint implementation
    # the restapi handles all compute, networking and monitoring commands in one api endpoint
    api1 = RestApiEndpoint("0.0.0.0", 5001)
    # connect data centers to this endpoint
    api1.connectDatacenter(dc1)
    api1.connectDatacenter(dc2)
    # connect total network also, needed to do the chaining and monitoring
    api1.connectDCNetwork(net)
    # run API endpoint server (in another thread, don't block)
    api1.start()
    """
    5.1. For our example, we create a second endpoint to illustrate that
         this is supported by our design. This feature allows us to have
         one API endpoint for each data center. This makes the emulation
         environment more realistic because you can easily create one
         OpenStack-like REST API endpoint for *each* data center.
         This will look like a real-world multi PoP/data center deployment
         from the perspective of an orchestrator.
    """
    #zapi2 = ZeroRpcApiEndpoint("0.0.0.0", 4343)
    #zapi2.connectDatacenter(dc3)
    #zapi2.connectDatacenter(dc4)
    #zapi2.start()
    """
    6. Finally we are done and can start our network (the emulator).
       We can also enter the Mininet CLI to interactively interact
       with our compute resources (just like in default Mininet).
       But we can also implement fully automated experiments that
       can be executed again and again.
    """
    net.start()
    net.CLI()
    # when the user types exit in the CLI, we stop the emulator
    net.stop()
def create_topology1():
    """
    1. Create a data center network object (DCNetwork) with monitoring enabled
    """
    net = DCNetwork(monitor=True, enable_learning=False)

    """
    1b. Add endpoint APIs for the whole DCNetwork,
        to access and control the networking from outside.
        e.g., to setup forwarding paths between compute
        instances aka. VNFs (represented by Docker containers), passing through
        different switches and datacenters of the emulated topology
    """
    mon_api = ZeroRpcApiEndpointDCNetwork("0.0.0.0", 5151)
    mon_api.connectDCNetwork(net)
    mon_api.start()

    """
    2. Add (logical) data centers to the topology
       (each data center is one "bigswitch" in our simplified
        first prototype)
    """
    dc1 = net.addDatacenter("datacenter1")
    dc2 = net.addDatacenter("datacenter2")
    #dc3 = net.addDatacenter("long_data_center_name3")
    #dc4 = net.addDatacenter(
    #    "datacenter4",
    #    metadata={"mydata": "we can also add arbitrary metadata to each DC"})

    """
    3. You can add additional SDN switches for data center
       interconnections to the network.
    """
    s1 = net.addSwitch("s1")

    """
    4. Add links between your data centers and additional switches
       to define you topology.
       These links can use Mininet's features to limit bw, add delay or jitter.
    """
    #net.addLink(dc1, dc2, delay="10ms")
    #net.addLink(dc1, dc2)
    net.addLink(dc1, s1)
    net.addLink(s1, dc2)
    #net.addLink("datacenter1", s1, delay="20ms")
    #net.addLink(s1, dc3)
    #net.addLink(s1, "datacenter4")


    """
    5. We want to access and control our data centers from the outside,
       e.g., we want to connect an orchestrator to start/stop compute
       resources aka. VNFs (represented by Docker containers in the emulated)

       So we need to instantiate API endpoints (e.g. a zerorpc or REST
       interface). Depending on the endpoint implementations, we can connect
       one or more data centers to it, which can then be controlled through
       this API, e.g., start/stop/list compute instances.
    """
    # create a new instance of a endpoint implementation
    zapi1 = ZeroRpcApiEndpoint("0.0.0.0", 4242)
    # connect data centers to this endpoint
    zapi1.connectDatacenter(dc1)
    zapi1.connectDatacenter(dc2)
    #zapi1.connectDatacenter(dc3)
    #zapi1.connectDatacenter(dc4)
    # run API endpoint server (in another thread, don't block)
    zapi1.start()

    """
    5.1. For our example, we create a second endpoint to illustrate that
         this is supported by our design. This feature allows us to have
         one API endpoint for each data center. This makes the emulation
         environment more realistic because you can easily create one
         OpenStack-like REST API endpoint for *each* data center.
         This will look like a real-world multi PoP/data center deployment
         from the perspective of an orchestrator.
    """
    #zapi2 = ZeroRpcApiEndpoint("0.0.0.0", 4343)
    #zapi2.connectDatacenter(dc3)
    #zapi2.connectDatacenter(dc4)
    #zapi2.start()

    """
    6. Finally we are done and can start our network (the emulator).
       We can also enter the Mininet CLI to interactively interact
       with our compute resources (just like in default Mininet).
       But we can also implement fully automated experiments that
       can be executed again and again.
    """
    net.start()
    net.CLI()
    # when the user types exit in the CLI, we stop the emulator
    net.stop()
예제 #32
0
def create_topology(httpmode=False, port_default=8888, device_rate=1500):
    net = DCNetwork(monitor=False, enable_learning=True)

    DC = net.addDatacenter("DC")

    # add OpenStack-like APIs to the emulated DC
    api1 = OpenstackApiEndpoint("0.0.0.0", 6001)
    api1.connect_datacenter(DC)
    api1.start()
    api1.connect_dc_network(net)

    # add the command line interface endpoint to the emulated DC (REST API)
    rapi1 = RestApiEndpoint("0.0.0.0", 5001)
    rapi1.connectDCNetwork(net)
    rapi1.connectDatacenter(DC)
    rapi1.start()

    #Generation Switch
    s1 = net.addSwitch('s1')
    s2 = net.addSwitch('s2')
    s3 = net.addSwitch('s3')
    s4 = net.addSwitch('s4')

    S = createHost(httpmode, net, 'S', "host:server")
    GI = createHost(httpmode, net, 'GI', "host:gateway")
    GFA = createHost(httpmode, net, 'GFA', "host:gwfinal")
    GFB = createHost(httpmode, net, 'GFB', "host:gwfinal")
    GFC = createHost(httpmode, net, 'GFC', "host:gwfinal")

    #Genration of link
    net.addLink(S, s1)
    net.addLink(GI, s2)
    net.addLink(GFA, s3)
    net.addLink(GFB, s3)
    net.addLink(GFC, s4)

    net.addLink(s1, s2)
    net.addLink(s2, s3)
    net.addLink(s2, s4)
    net.addLink(DC, s4)

    #Do not remove
    net.start()

    #Run gateways and devices
    print("Starting Server node")
    S.cmd("startup --local_ip 10.0.0.1 --local_port 8888 --local_name srv")
    print("Waiting for server node to complete startup")
    time.sleep(2)
    print("Starting GI node")
    GI.cmd(
        "startup --local_ip 10.0.0.2 --local_port 8888 --local_name gwi --remote_ip 10.0.0.1 --remote_port 8888 --remote_name srv"
    )
    print("Waiting for GI node to complete startup")
    time.sleep(2)
    print("Starting GFA node")
    GFA.cmd(
        "startup --local_ip 10.0.0.3 --local_port 8888 --local_name gwfa --remote_ip 10.0.0.2 --remote_port 8888 --remote_name gwi"
    )
    print("Waiting for GFA node to complete startup")
    time.sleep(2)
    print("Starting GFA devices")
    GFA.cmd("start_devices 10.0.0.3 9001 {0} gwfa {1}".format(
        port_default, device_rate))
    print("Starting GFB node")
    GFB.cmd(
        "startup --local_ip 10.0.0.4 --local_port 8888 --local_name gwfb --remote_ip 10.0.0.2 --remote_port 8888 --remote_name gwi"
    )
    print("Waiting for GFB node to complete startup")
    time.sleep(2)
    print("Starting GFB devices")
    GFB.cmd("start_devices 10.0.0.4 9001 {0} gwfb {1}".format(
        port_default, device_rate))
    print("Starting GFC node")
    GFC.cmd(
        "startup --local_ip 10.0.0.5 --local_port 8888 --local_name gwfc --remote_ip 10.0.0.2 --remote_port 8888 --remote_name gwi"
    )
    print("Waiting for GFC node to complete startup")
    time.sleep(2)
    print("Starting GFC devices")
    GFC.cmd("start_devices 10.0.0.5 9001 {0} gwfc {1}".format(
        port_default, device_rate))
    #Start the command line
    net.CLI()
    # when the user types exit in the CLI, we stop the emulator
    net.stop()
예제 #33
0
class ApiBaseHeat(unittest.TestCase):
    """
        Helper class to do basic test setups.
        s1 -- s2 -- s3 -- ... -- sN
    """

    def __init__(self, *args, **kwargs):
        self.net = None
        self.api = []
        self.s = []   # list of switches
        self.h = []   # list of hosts
        self.d = []   # list of docker containers
        self.dc = []  # list of data centers
        self.docker_cli = None
        super(ApiBaseHeat, self).__init__(*args, **kwargs)

    def createNet(
            self,
            nswitches=0, ndatacenter=0, nhosts=0, ndockers=0,
            autolinkswitches=False, controller=Controller, **kwargs):
        """
        Creates a Mininet instance and automatically adds some
        nodes to it.

        Attention, we should always use Mininet's default controller
        for our tests. Only use other controllers if you want to test
        specific controller functionality.
        """
        self.net = DCNetwork(controller=controller, **kwargs)
        for i in range(0, ndatacenter):
            self.api.append(OpenstackApiEndpoint("0.0.0.0", 5000+i))

        # add some switches
        # start from s1 because ovs does not like to have dpid = 0
        # and switch name-number is being used by mininet to set the dpid
        for i in range(1, nswitches+1):
            self.s.append(self.net.addSwitch('s%d' % i))
        # if specified, chain all switches
        if autolinkswitches:
            for i in range(0, len(self.s) - 1):
                self.net.addLink(self.s[i], self.s[i + 1])
        # add some data centers
        for i in range(0, ndatacenter):
            self.dc.append(
                self.net.addDatacenter(
                    'dc%d' % i,
                    metadata={"unittest_dc": i}))
        # connect data centers to the endpoint
        for i in range(0, ndatacenter):
            self.api[i].connect_datacenter(self.dc[i])
            self.api[i].connect_dc_network(self.net)
        # add some hosts
        for i in range(0, nhosts):
            self.h.append(self.net.addHost('h%d' % i))
        # add some dockers
        for i in range(0, ndockers):
            self.d.append(self.net.addDocker('d%d' % i, dimage="ubuntu:trusty"))

    def startApi(self):
        for i in self.api:
            i.start()

    def stopApi(self):
        for i in self.api:
            i.stop()

    def startNet(self):
        self.net.start()

    def stopNet(self):
        self.net.stop()

    def getDockerCli(self):
        """
        Helper to interact with local docker instance.
        """
        if self.docker_cli is None:
            self.docker_cli = docker.Client(
                base_url='unix://var/run/docker.sock')
        return self.docker_cli

    def getContainernetContainers(self):
        """
        List the containers managed by containernet
        """
        return self.getDockerCli().containers(filters={"label": "com.containernet"})

    @staticmethod
    def setUp():
        pass


    def tearDown(self):
        print('->>>>>>> tear everything down ->>>>>>>>>>>>>>>')
        self.stopApi() # stop all flask threads
        self.stopNet() # stop some mininet and containernet stuff
        cleanup()
        # make sure that all pending docker containers are killed
        with open(os.devnull, 'w') as devnull: # kill a possibly running docker process that blocks the open ports
            subprocess.call("kill $(netstat -npl | grep '5000' | grep -o -e'[0-9]\+/docker' | grep -o -e '[0-9]\+')",
                stdout=devnull,
                stderr=devnull,
                shell=True)

        with open(os.devnull, 'w') as devnull:
            subprocess.call(
                "sudo docker rm -f $(sudo docker ps --filter 'label=com.containernet' -a -q)",
                stdout=devnull,
                stderr=devnull,
                shell=True)
def create_topology1():
    cleanup()
    # create topology
    # use a maximum of 50% cpu time for containers added to data centers
    net = DCNetwork(dc_emulation_max_cpu=0.5, controller=Controller)
    # add some data centers and create a topology
    dc1 = net.addDatacenter("dc1", resource_log_path=RESOURCE_LOG_PATH)
    dc2 = net.addDatacenter("dc2", resource_log_path=RESOURCE_LOG_PATH)
    s1 = net.addSwitch("s1")
    net.addLink(dc1, s1, delay="10ms")
    net.addLink(dc2, s1, delay="20ms")

    # create and assign resource models for each DC
    rm1 = UpbSimpleCloudDcRM(max_cu=4, max_mu=1024)
    rm2 = UpbOverprovisioningCloudDcRM(max_cu=4)
    dc1.assignResourceModel(rm1)
    dc2.assignResourceModel(rm2)

    # add the command line interface endpoint to each DC
    zapi1 = ZeroRpcApiEndpoint("0.0.0.0", 4242)
    zapi1.connectDatacenter(dc1)
    zapi1.connectDatacenter(dc2)
    # run API endpoint server (in another thread, don't block)
    zapi1.start()

    # start the emulation platform
    net.start()
    print "Wait a moment and allocate some compute start some compute resources..."
    time.sleep(2)
    dc1.startCompute("vnf1")
    dc1.startCompute("vnf2", flavor_name="tiny")
    dc1.startCompute("vnf3", flavor_name="small")
    dc2.startCompute("vnf4", flavor_name="medium")
    dc2.startCompute("vnf5", flavor_name="medium")
    dc2.startCompute("vnf6", flavor_name="medium")
    print "... done."
    time.sleep(5)
    print "Removing instances ..."
    dc1.stopCompute("vnf1")
    dc2.stopCompute("vnf4")
    print "... done"
    net.CLI()
    net.stop()
예제 #35
0
파일: base.py 프로젝트: hadik3r/son-emu
class SimpleTestTopology(unittest.TestCase):
    """
        Helper class to do basic test setups.
        s1 -- s2 -- s3 -- ... -- sN
    """

    def __init__(self, *args, **kwargs):
        self.net = None
        self.s = []   # list of switches
        self.h = []   # list of hosts
        self.d = []   # list of docker containers
        self.dc = []  # list of data centers
        self.docker_cli = None
        super(SimpleTestTopology, self).__init__(*args, **kwargs)

    def createNet(
            self,
            nswitches=0, ndatacenter=0, nhosts=0, ndockers=0,
            autolinkswitches=False, controller=Controller, **kwargs):
        """
        Creates a Mininet instance and automatically adds some
        nodes to it.

        Attention, we should always use Mininet's default controller
        for our tests. Only use other controllers if you want to test
        specific controller functionality.
        """
        self.net = DCNetwork(controller=controller, **kwargs)

        # add some switches
        # start from s1 because ovs does not like to have dpid = 0
        # and switch name-number is being used by mininet to set the dpid
        for i in range(1, nswitches+1):
            self.s.append(self.net.addSwitch('s%d' % i))
        # if specified, chain all switches
        if autolinkswitches:
            for i in range(0, len(self.s) - 1):
                self.net.addLink(self.s[i], self.s[i + 1])
        # add some data centers
        for i in range(0, ndatacenter):
            self.dc.append(
                self.net.addDatacenter(
                    'datacenter%d' % i,
                    metadata={"unittest_dc": i}))
        # add some hosts
        for i in range(0, nhosts):
            self.h.append(self.net.addHost('h%d' % i))
        # add some dockers
        for i in range(0, ndockers):
            self.d.append(self.net.addDocker('d%d' % i, dimage="ubuntu:trusty"))

    def startNet(self):
        self.net.start()

    def stopNet(self):
        self.net.stop()

    def getDockerCli(self):
        """
        Helper to interact with local docker instance.
        """
        if self.docker_cli is None:
            self.docker_cli = docker.Client(
                base_url='unix://var/run/docker.sock')
        return self.docker_cli

    def getContainernetContainers(self):
        """
        List the containers managed by containernet
        """
        return self.getDockerCli().containers(filters={"label": "com.containernet"})

    @staticmethod
    def setUp():
        pass

    @staticmethod
    def tearDown():
        cleanup()
        # make sure that all pending docker containers are killed
        with open(os.devnull, 'w') as devnull:
            subprocess.call(
                "sudo docker rm -f $(sudo docker ps --filter 'label=com.containernet' -a -q)",
                stdout=devnull,
                stderr=devnull,
                shell=True)
예제 #36
0
def create_topology1():
    """
    1. Create a data center network object (DCNetwork)
    """
    net = DCNetwork(controller=RemoteController,
                    monitor=False,
                    enable_learning=True)
    """
    2. Add (logical) data centers to the topology
       (each data center is one "bigswitch" in our simplified
        first prototype)
    """
    ci = net.addDatacenter("cloud_instance")

    fi1 = net.addDatacenter("fog_instance_1")
    fi2 = net.addDatacenter("fog_instance_2")
    fi3 = net.addDatacenter("fog_instance_3")

    ii1 = net.addDatacenter("iot_instance_1")
    ii2 = net.addDatacenter("iot_instance_2")
    ii3 = net.addDatacenter("iot_instance_3")
    ii4 = net.addDatacenter("iot_instance_4")
    ii5 = net.addDatacenter("iot_instance_5")
    ii6 = net.addDatacenter("iot_instance_6")
    """
    3. You can add additional SDN switches for data center
       interconnections to the network.
    """
    """
    cis1 = net.addSwitch("cis1")
    cis2 = net.addSwitch("cis2")
    fi1s = net.addSwitch("fi1s")
    fi2s = net.addSwitch("fi2s")
    ii1s = net.addSwitch("ii1s")
    ii2s = net.addSwitch("ii2s")
    ii3s = net.addSwitch("ii3s")
    ii4s = net.addSwitch("ii4s")
    """
    """
    4. Add links between your data centers and additional switches
       to define you topology.
       These links can use Mininet's features to limit bw, add delay or jitter.
    """
    net.addLink(ii1, fi1, delay="10ms")
    net.addLink(ii2, fi1, delay="10ms")
    net.addLink(ii3, fi2, delay="10ms")
    net.addLink(ii4, fi2, delay="10ms")
    net.addLink(ii5, fi3, delay="10ms")
    net.addLink(ii6, fi3, delay="10ms")
    net.addLink(fi1, ci, delay="100ms")
    net.addLink(fi2, ci, delay="100ms")
    net.addLink(fi3, ci, delay="100ms")
    """
    5. We want to access and control our data centers from the outside,
       e.g., we want to connect an orchestrator to start/stop compute
       resources aka. VNFs (represented by Docker containers in the emulated)

       So we need to instantiate API endpoints (e.g. a zerorpc or REST
       interface). Depending on the endpoint implementations, we can connect
       one or more data centers to it, which can then be controlled through
       this API, e.g., start/stop/list compute instances.
    """
    # create a new instance of a endpoint implementation
    rapi1 = RestApiEndpoint("127.0.0.1", 5001, net)
    # connect data centers to this endpoint
    rapi1.connectDatacenter(ci)
    rapi1.connectDatacenter(fi1)
    rapi1.connectDatacenter(fi2)
    rapi1.connectDatacenter(fi3)
    rapi1.connectDatacenter(ii1)
    rapi1.connectDatacenter(ii2)
    rapi1.connectDatacenter(ii3)
    rapi1.connectDatacenter(ii4)
    rapi1.connectDatacenter(ii5)
    rapi1.connectDatacenter(ii6)
    # run API endpoint server (in another thread, don't block)

    rapi1.start()
    """
    6. Finally we are done and can start our network (the emulator).
       We can also enter the Mininet CLI to interactively interact
       with our compute resources (just like in default Mininet).
       But we can also implement fully automated experiments that
       can be executed again and again.
    """
    net.start()
    net.CLI()
    # when the user types exit in the CLI, we stop the emulator
    net.stop()
예제 #37
0
def create_topology():
    net = DCNetwork(monitor=False, enable_learning=True)

    dc1 = net.addDatacenter("dc1")
    # add OpenStack-like APIs to the emulated DC
    api1 = OpenstackApiEndpoint("0.0.0.0", 6001)
    api1.connect_datacenter(dc1)
    api1.start()
    api1.connect_dc_network(net)
    # add the command line interface endpoint to the emulated DC (REST API)
    rapi1 = RestApiEndpoint("0.0.0.0", 5001)
    rapi1.connectDCNetwork(net)
    rapi1.connectDatacenter(dc1)
    rapi1.start()
    
    s1 = net.addSwitch('s1')
    h1 = net.addHost('h1')
    h2 = net.addDocker('h2',dimage='host:server')
    net.addLink(h1, s1, delay='20ms')
    net.addLink(h2, s1, delay='20ms')
    net.addLink(dc1, s1, delay='20ms')
    
    net.start()
    net.CLI()
    # when the user types exit in the CLI, we stop the emulator
    net.stop()
예제 #38
0
from mininet.net import Containernet
from mininet.node import Controller
from mininet.cli import CLI
from mininet.link import TCLink
from mininet.log import info, setLogLevel
import logging
from emuvim.dcemulator.net import DCNetwork
from emuvim.api.rest.rest_api_endpoint import RestApiEndpoint
from emuvim.api.tango import TangoLLCMEndpoint
import time

setLogLevel('info')

net = DCNetwork(monitor=False, enable_learning=True)

X = "krustylebot/repo:sdci_containernet"

info('*** Adding docker containers using krustylebot/repo:sdci_containernet images\n')
server = net.addDocker('server', ip='10.0.0.10', dimage=X, dcmd="sh -c 'cd /Projet-SDCI/docker && git pull && sh script_server.sh 10.0.0.10; tail -f /dev/null'")
gwi1 = net.addDocker('gwi1', ip='10.0.0.11', dimage=X, dcmd="sh -c 'cd /Projet-SDCI/docker && git pull && sh script_gi.sh 10.0e.0.11 10.0.0.10 gwi1; tail -f /dev/null'")
gwf1 = net.addDocker('gwf1', ip='10.0.0.12', dimage=X, dcmd="sh -c 'cd /Projet-SDCI/docker && git pull && sh script_gf.sh 10.0.0.12 10.0.0.11 gwf1 gwi1 300; tail -f /dev/null'")
gwf2 = net.addDocker('gwf2', ip='10.0.0.13', dimage=X, dcmd="sh -c 'cd /Projet-SDCI/docker && git pull && sh script_gf.sh 10.0.0.13 10.0.0.11 gwf2 gwi1 300; tail -f /dev/null'")
gwf3 = net.addDocker('gwf3', ip='10.0.0.14', dimage=X, dcmd="sh -c 'cd /Projet-SDCI/docker && git pull && sh script_gf.sh 10.0.0.14 10.0.0.11 gwf3 gwi1 300; tail -f /dev/null'")

monitoring = net.addDocker('monitoring', ip='10.0.0.15', dimage=X, dcmd="sh -c 'cd /Projet-SDCI && git pull; cd Monitoring; sh monitoring.sh; tail -f /dev/null'")

dc = net.addDatacenter("dc")

info('*** Adding switches\n')
s1 = net.addSwitch('s1')
예제 #39
0
def create_topology1():
    """
    1. Create a data center network object (DCNetwork)
    """
    net = DCNetwork()

    """
    1b. add a monitoring agent to the DCNetwork
    """
    sonata_api = RestApiEndpoint("127.0.0.1", 5001)
    sonata_api.connect_dc_network(net)
    sonata_api.start()


    """
    2. Add (logical) data centers to the topology
       (each data center is one "bigswitch" in our simplified
        first prototype)
    """
    dc1 = net.addDatacenter("datacenter1")
    dc2 = net.addDatacenter("datacenter2")
    dc3 = net.addDatacenter("long_data_center_name3")
    dc4 = net.addDatacenter(
        "datacenter4",
        metadata={"mydata": "we can also add arbitrary metadata to each DC"})

    """
    3. You can add additional SDN switches for data center
       interconnections to the network.
    """
    s1 = net.addSwitch("s1")

    """
    4. Add links between your data centers and additional switches
       to define you topology.
       These links can use Mininet's features to limit bw, add delay or jitter.
    """
    net.addLink(dc1, dc2)
    net.addLink("datacenter1", s1)
    net.addLink(s1, dc3)
    net.addLink(s1, "datacenter4")

    """
    5. We want to access and control our data centers from the outside,
       e.g., we want to connect an orchestrator to start/stop compute
       resources aka. VNFs (represented by Docker containers in the emulated)

       So we need to instantiate API endpoints (e.g. a zerorpc or REST
       interface). Depending on the endpoint implementations, we can connect
       one or more data centers to it, which can then be controlled through
       this API, e.g., start/stop/list compute instances.
    """
    # create a new instance of a endpoint implementation
    sonata_api = RestApiEndpoint("127.0.0.1", 5005)
    # connect data centers to this endpoint
    sonata_api.connect_datacenter(dc1)
    sonata_api.connect_datacenter(dc2)
    sonata_api.connect_datacenter(dc3)
    sonata_api.connect_datacenter(dc4)
    # run API endpoint server (in another thread, don't block)
    sonata_api.start()

    """
    6. Finally we are done and can start our network (the emulator).
       We can also enter the Mininet CLI to interactively interact
       with our compute resources (just like in default Mininet).
       But we can also implement fully automated experiments that
       can be executed again and again.
    """
    net.start()
    net.CLI()
    # when the user types exit in the CLI, we stop the emulator
    net.stop()
예제 #40
0
파일: base.py 프로젝트: splietker/son-emu
class SimpleTestTopology(unittest.TestCase):
    """
        Helper class to do basic test setups.
        s1 -- s2 -- s3 -- ... -- sN
    """
    def __init__(self, *args, **kwargs):
        self.net = None
        self.s = []  # list of switches
        self.h = []  # list of hosts
        self.d = []  # list of docker containers
        self.dc = []  # list of data centers
        self.docker_cli = None
        super(SimpleTestTopology, self).__init__(*args, **kwargs)

    def createNet(self,
                  nswitches=0,
                  ndatacenter=0,
                  nhosts=0,
                  ndockers=0,
                  autolinkswitches=False,
                  controller=Controller,
                  **kwargs):
        """
        Creates a Mininet instance and automatically adds some
        nodes to it.

        Attention, we should always use Mininet's default controller
        for our tests. Only use other controllers if you want to test
        specific controller functionality.
        """
        self.net = DCNetwork(controller=controller, **kwargs)

        # add some switches
        # start from s1 because ovs does not like to have dpid = 0
        # and switch name-number is being used by mininet to set the dpid
        for i in range(1, nswitches + 1):
            self.s.append(self.net.addSwitch('s%d' % i))
        # if specified, chain all switches
        if autolinkswitches:
            for i in range(0, len(self.s) - 1):
                self.net.addLink(self.s[i], self.s[i + 1])
        # add some data centers
        for i in range(0, ndatacenter):
            self.dc.append(
                self.net.addDatacenter('datacenter%d' % i,
                                       metadata={"unittest_dc": i}))
        # add some hosts
        for i in range(0, nhosts):
            self.h.append(self.net.addHost('h%d' % i))
        # add some dockers
        for i in range(0, ndockers):
            self.d.append(self.net.addDocker('d%d' % i,
                                             dimage="ubuntu:trusty"))

    def startNet(self):
        self.net.start()

    def stopNet(self):
        self.net.stop()

    def getDockerCli(self):
        """
        Helper to interact with local docker instance.
        """
        if self.docker_cli is None:
            self.docker_cli = docker.APIClient(
                base_url='unix://var/run/docker.sock')
        return self.docker_cli

    def getContainernetContainers(self):
        """
        List the containers managed by containernet
        """
        return self.getDockerCli().containers(
            filters={"label": "com.containernet"})

    @staticmethod
    def setUp():
        pass

    @staticmethod
    def tearDown():
        cleanup()
        # make sure that all pending docker containers are killed
        with open(os.devnull, 'w') as devnull:
            subprocess.call(
                "sudo docker rm -f $(sudo docker ps --filter 'label=com.containernet' -a -q)",
                stdout=devnull,
                stderr=devnull,
                shell=True)
예제 #41
0
def create_topology1():
    # create topology
    net = DCNetwork(controller=RemoteController,
                    monitor=True,
                    enable_learning=True)
    dc1 = net.addDatacenter("dc1")

    # add the command line interface endpoint to each DC (REST API)
    rapi1 = RestApiEndpoint("0.0.0.0", 5001)
    rapi1.connectDCNetwork(net)
    rapi1.connectDatacenter(dc1)
    # run API endpoint server (in another thread, don't block)
    rapi1.start()

    # add the SONATA dummy gatekeeper to each DC
    #sdkg1 = SonataDummyGatekeeperEndpoint("0.0.0.0", 5000, deploy_sap=True)
    #sdkg1.connectDatacenter(dc1)
    # run the dummy gatekeeper (in another thread, don't block)
    #sdkg1.start()

    # start the emulation platform
    net.start()  # here the docker host default ip is configured

    # topology must be started before hosts are added
    #cache = net.addDocker('cache', dimage="squid-vnf")
    cache = dc1.startCompute('cache',
                             image="squid-vnf",
                             network=[{
                                 "ip": "10.10.0.1/24",
                                 "id": "client"
                             }, {
                                 "ip": "10.20.0.1/24",
                                 "id": "server"
                             }])

    #client = net.addDocker('client', ip='10.10.0.1/24', dimage="vcdn-client")
    client = dc1.startCompute('client',
                              image='vcdn-client',
                              network=[{
                                  "ip": "10.10.0.2/24",
                                  "id": "client"
                              }])

    #server = net.addDocker('server', ip='10.20.0.1/24', dimage="webserver")
    server = dc1.startCompute('server',
                              image='webserver',
                              network=[{
                                  "ip": "10.20.0.2/24",
                                  "id": "server"
                              }])
    #net.addLink(dc1, client,  intfName1='dc-cl', intfName2='client')
    #net.addLink(dc1, server,  intfName1='dc-sv', intfName2='server')
    #net.addLink(dc1, cache,  intfName1='dc-ccl', intfName2='client', params1={'ip': '10.10.0.2/24'})
    #net.addLink(dc1, cache,  intfName1='dc-csv', intfName2='server',params1={'ip': '10.20.0.2/24'})

    # initialise VNFs
    cache.cmd("./start.sh", detach=True)
    client.cmd("./start.sh", detach=True)
    server.cmd('./start.sh', detach=True)

    # startup script hangs if we use other startup command
    # command="./start.sh"

    net.CLI()
    net.stop()
예제 #42
0
def create_topology():
    net = DCNetwork(monitor=False, enable_learning=True)

    dc1 = net.addDatacenter("dc1")
    # add OpenStack-like APIs to the emulated DC
    api1 = OpenstackApiEndpoint("0.0.0.0", 6001)
    api1.connect_datacenter(dc1)
    api1.start()
    api1.connect_dc_network(net)
    # add the command line interface endpoint to the emulated DC (REST API)
    rapi1 = RestApiEndpoint("0.0.0.0", 5001)
    rapi1.connectDCNetwork(net)
    rapi1.connectDatacenter(dc1)
    rapi1.start()

    s1 = net.addSwitch('s1')
    d1 = net.addDocker('d1', ip='10.100.0.1', dimage="ubuntu:trusty")
    d2 = net.addDocker('d2', ip='10.100.0.2', dimage="ubuntu:trusty")
    net.addLink(s1, d1)
    net.addLink(s1, d2)
    net.addLink(s1, dc1)

    net.start()
    net.CLI()
    # when the user types exit in the CLI, we stop the emulator
    net.stop()
예제 #43
0
def create_topology1():
    # create topology
    net = DCNetwork(controller=RemoteController, monitor=False, enable_learning=True)
    # add datecenters
    dc1 = net.addDatacenter("dc1")
    dc2 = net.addDatacenter("dc2")
    # add some intermediate switch
    s1 = net.addSwitch("s1")
    # connect data centers
    net.addLink(dc1, s1, delay="10ms")
    net.addLink(dc2, s1, delay="20ms")

    # add REST control endpoints to each datacenter (to be used with son-emu-cli)
    rapi1 = RestApiEndpoint("0.0.0.0", 5001)
    rapi1.connectDCNetwork(net)
    rapi1.connectDatacenter(dc1)
    rapi1.connectDatacenter(dc2)
    rapi1.start()

    # start the emulation platform
    net.start()
    net.CLI()
    net.stop()
예제 #44
0
from mininet.log import setLogLevel

setLogLevel('debug')

COUNT = 15

with open('osm_component_startup_%d.csv' % time.time(), 'w') as csvfile:
    fieldnames = [
        'other', 'zookeeper', 'kafka', 'mongo', 'nbi', 'ro_db', 'ro', 'lcm'
    ]
    writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
    writer.writeheader()

    for i in range(COUNT):
        start = time.time()
        net = DCNetwork(monitor=False, enable_learning=True)
        api = None
        try:
            dc1 = net.addDatacenter("dc1")
            api = OpenstackApiEndpoint("0.0.0.0", 6001)
            api.connect_datacenter(dc1)
            api.connect_dc_network(net)

            s1 = net.addSwitch('s1')

            zookeeper_ip = '10.0.0.96'
            kafka_ip = '10.0.0.97'
            mongo_ip = '10.0.0.98'
            nbi_ip = '10.0.0.99'
            ro_db_ip = '10.0.0.100'
            ro_ip = '10.0.0.101'
예제 #45
0
def create_topology(httpmode=False):
    net = DCNetwork(monitor=False, enable_learning=True)

    DC = net.addDatacenter("DC")

    # add OpenStack-like APIs to the emulated DC
    api1 = OpenstackApiEndpoint("0.0.0.0", 6001)
    api1.connect_datacenter(DC)
    api1.start()
    api1.connect_dc_network(net)

    # add the command line interface endpoint to the emulated DC (REST API)
    rapi1 = RestApiEndpoint("0.0.0.0", 5001)
    rapi1.connectDCNetwork(net)
    rapi1.connectDatacenter(DC)
    rapi1.start()

    s1 = net.addSwitch('s1')
    s2 = net.addSwitch('s2')
    s3 = net.addSwitch('s3')
    s4 = net.addSwitch('s4')

    S = net.addDocker("S", dimage="host:server")
    GI = net.addDocker("GI", dimage="host:gateway")
    GFA = createHost(httpmode, net, 'GFA')
    GFB = createHost(httpmode, net, 'GFB')
    GFC = createHost(httpmode, net, 'GFC')

    net.addLink(S, s1)
    net.addLink(GI, s2)
    net.addLink(GFA, s3)
    net.addLink(GFB, s3)
    net.addLink(GFC, s4)

    net.addLink(s1, s2)
    net.addLink(s2, s3)
    net.addLink(s2, s4)
    net.addLink(DC, s2)

    #Do not remove
    net.start()
    net.CLI()
    # when the user types exit in the CLI, we stop the emulator
    net.stop()