Пример #1
0
def topology():
    "Create a network with some docker containers acting as hosts."
    edgefile = os.path.join(RESULTS_FOLDER, "./substrate.edges.empty.data")
    nodesfile = os.path.join(RESULTS_FOLDER, "./substrate.nodes.data")
    CDNfile = os.path.join(RESULTS_FOLDER, "CDN.nodes.data")
    startersFile = os.path.join(RESULTS_FOLDER, "starters.nodes.data")
    solutionsFile = os.path.join(RESULTS_FOLDER, "solutions.data")
    service_edges = os.path.join(RESULTS_FOLDER, "./service.edges.data")
    switch = partial( OVSSwitch, protocols='OpenFlow13')

    topo = loadTopo(edgefile, nodesfile, CDNfile, startersFile, solutionsFile,service_edges)

    c = RemoteController('c', '0.0.0.0', 6633)
    # topodock=  loaddocker(os.path.join(RESULTS_FOLDER, "./substrate.edges.data"), os.path.join(RESULTS_FOLDER, "./substrate.nodes.data"))
    info('*** Start Containernet\n')
    net = Containernet(topo=topo, controller=c, link=TCLink,switch=switch)
    for host in net.hosts:
        if host.name in topo._cmd:
            for cmd in topo._cmd[host.name]:
                print("send cmd")
                print((host.sendCmd(cmd)))



    info('*** Starting network\n')
    net.start()

    info('*** Running CLI\n')
    CLI(net)

    info('*** Stopping network')
    net.stop()
Пример #2
0
    def __init__(self):
        self.containers = {}
        self.switches = {}

        self.cn = Containernet(controller=Controller)

        info('*** Adding controller\n')
        self.cn.addController('c0')

        info('*** Adding switches\n')
        self.switches['sw0'] = self.cn.addSwitch('sw0')

        info('*** Adding docker containers\n')
        self.containers['u0'] = self.cn.addDocker('u0',
                                                  ip='10.0.0.10',
                                                  dimage="ubuntu:trusty")
        self.containers['u1'] = self.cn.addDocker('u1',
                                                  ip='10.0.0.11',
                                                  dimage="ubuntu:trusty")
        self.containers['p0'] = self.cn.addDocker(
            'p0',
            ip='10.0.0.100',
            dimage="cachecashproject/go-cachecash",
            dcmd='')

        info('*** Creating links\n')
        for c in self.containers.values():
            self.cn.addLink(c, self.switches['sw0'])
Пример #3
0
 def testaddDocker(self):
     net = Containernet(controller=Controller)
     path = find_test_container("webserver_curl")
     d2 = net.addDocker("d2",
                        ip='10.0.0.252',
                        build_params={
                            "dockerfile": "Dockerfile.server",
                            "path": path
                        })
     self.assertTrue(d2._check_image_exists(_id=d2.dimage))
     d3 = net.addDocker("d3",
                        ip='10.0.0.253',
                        dimage="webserver_curl_test",
                        build_params={
                            "dockerfile": "Dockerfile.server",
                            "path": path
                        })
     self.assertTrue(d3._check_image_exists("webserver_curl_test"))
     d4 = net.addDocker("d4",
                        ip='10.0.0.254',
                        build_params={
                            "dockerfile": "Dockerfile.server",
                            "tag": "webserver_curl_test2",
                            "path": path
                        })
     self.assertTrue(d4._check_image_exists("webserver_curl_test2"))
Пример #4
0
    def start(self):
        # deal with kind
        # Now by default it's kind node, can integrate other kind of node.
        if len(self.kubeCluster) > 0:
            self.boostKubeCluster()

        for k in self.kubeCluster:
            self.kubeCluster[k].init()
            # TODO:allowing to add sth other than kind?
            # TODO: make sure the container ID and process ID is get

        for l in self.linksNotProcessed:
            Containernet.addLink(self,
                                 l[0],
                                 l[1],
                                 port1=l[2],
                                 port2=l[3],
                                 cls=l[4],
                                 **l[5])

        Containernet.start(self)

        for k in self.kubeCluster:
            self.kubeCluster[k].bringIntfUp()
            self.kubeCluster[k].setupKube()
Пример #5
0
    def createNet(self,
                  nswitches=1,
                  nhosts=0,
                  ndockers=0,
                  autolinkswitches=False):
        """
        Creates a Mininet instance and automatically adds some
        nodes to it.
        """
        self.net = Containernet(controller=Controller)
        self.net.addController('c0')

        # add some switches
        for i in range(0, nswitches):
            self.s.append(self.net.addSwitch('s%d' % i))
        # if specified, chain all switches
        if autolinkswitches:
            for i in range(0, len(self.s) - 1):
                self.net.addLink(self.s[i], self.s[i + 1])
        # add some hosts
        for i in range(0, nhosts):
            self.h.append(self.net.addHost('h%d' % i))
        # add some dockers
        for i in range(0, ndockers):
            self.d.append(self.net.addDocker('d%d' % i,
                                             dimage="ubuntu:trusty"))
Пример #6
0
 def create_network(self, controller=Controller):
     """Create Containernet network."""
     info('*** Running Cleanup\n')
     cleanup()
     self.net = Containernet(controller=controller)
     if controller is not None:
         self.add_controller()
  def __init__(self):
    setLogLevel('info')

    self.net = Containernet(controller=Controller)
    self.net.addController('c0')

    self.nodes = {}
    self.types = {}
Пример #8
0
 def __init__(self, **params):
     # call original Containernet.__init__
     Containernet.__init__(self, **params)
     self.kubeCluster = {}
     self.linksNotProcessed = []
     self.clusterName = ""
     self.numController = 0
     self.numWorker = 0
Пример #9
0
def get_default_net(options=None):
    """ """
    if not options:
        options = {}

    net = Containernet()
    remote_controller = options.get("remote_controller", None)
    net.addController(get_controller(remote_controller))

    return net
Пример #10
0
    def stop(self):

        # stop the monitor agent
        if self.monitor_agent is not None:
            self.monitor_agent.stop()

        # stop emulator net
        Containernet.stop(self)

        # stop Ryu controller
        self.killRyu()
Пример #11
0
    def stop(self):

        # stop the monitor agent
        if self.monitor_agent is not None:
            self.monitor_agent.stop()

        # stop emulator net
        Containernet.stop(self)

        # stop Ryu controller
        self.stopRyu()
Пример #12
0
    def __init__(self, controller=RemoteController, monitor=False,
                 enable_learning = True,   # in case of RemoteController (Ryu), learning switch behavior can be turned off/on
                 dc_emulation_max_cpu=1.0,  # fraction of overall CPU time for emulation
                 dc_emulation_max_mem=512,  # emulation max mem in MB
                 **kwargs):
        """
        Create an extended version of a Containernet network
        :param dc_emulation_max_cpu: max. CPU time used by containers in data centers
        :param kwargs: path through for Mininet parameters
        :return:
        """
        self.dcs = {}

        # make sure any remaining Ryu processes are killed
        self.killRyu()
        # make sure no containers are left over from a previous emulator run.
        self.removeLeftoverContainers()

        # call original Docker.__init__ and setup default controller
        Containernet.__init__(
            self, switch=OVSKernelSwitch, controller=controller, **kwargs)


        # Ryu management
        self.ryu_process = None
        if controller == RemoteController:
            # start Ryu controller
            self.startRyu(learning_switch=enable_learning)

        # add the specified controller
        self.addController('c0', controller=controller)

        # graph of the complete DC network
        self.DCNetwork_graph = nx.MultiDiGraph()

        # initialize pool of vlan tags to setup the SDN paths
        self.vlans = range(4096)[::-1]

        # link to Ryu REST_API
        ryu_ip = '0.0.0.0'
        ryu_port = '8080'
        self.ryu_REST_api = 'http://{0}:{1}'.format(ryu_ip, ryu_port)

        # monitoring agent
        if monitor:
            self.monitor_agent = DCNetworkMonitor(self)
        else:
            self.monitor_agent = None

        # initialize resource model registrar
        self.rm_registrar = ResourceModelRegistrar(
            dc_emulation_max_cpu, dc_emulation_max_mem)
Пример #13
0
    def stop(self):

        # stop the monitor agent
        if self.monitor_agent is not None:
            self.monitor_agent.stop()

        # stop emulator net
        Containernet.stop(self)

        # stop Ryu controller
        self.killRyu()

        # flag to indicate the topology has been stopped
        self.exit = True
Пример #14
0
def createTopo(g=4,
               a=None,
               p=1,
               h=1,
               bw_sw_h=10,
               bw_inn_sw=30,
               bw_int_sw=10,
               ip="127.0.0.1",
               port=6633):
    if a is None: a = g - 1  # Canonical Topo
    logging.debug("LV1 Create DragonFly")
    topo = DragonFly(g, a, p, h)
    topo.createTopo(bw_sw_h=bw_sw_h, bw_inn_sw=bw_inn_sw, bw_int_sw=bw_int_sw)

    logging.debug("LV1 Start Mininet")
    CONTROLLER_IP = ip
    CONTROLLER_PORT = port
    net = Containernet(topo=topo,
                       link=TCLink,
                       controller=None,
                       autoSetMacs=True,
                       autoStaticArp=True)
    net.addController('controller',
                      controller=RemoteController,
                      ip=CONTROLLER_IP,
                      port=CONTROLLER_PORT)
    net.start()

    dump_etc_hosts(net)
    dump_mpi_hosts_file(net)
    run_set_ssh(net)

    CLI(net)
    net.stop()
Пример #15
0
    def __init__(self):
        "Constructor - set up our network topology"

        # setup the base containernet controller
        net = self.net = Containernet(controller=Controller)
        net.addController('c0')

        # our root backbone switch
        s0 = self.s0 = net.addSwitch('s0')

        # this host is publicly accessible
        h0 = self.h0 = net.addDocker(
            'h0',
            ip = '10.0.0.100',
            dimage = 'hc-containernet-base',
        )

        # connect to our backbone switch
        net.addLink(s0, h0)

        # create two nodes behind nats
        d1 = self.d1 = self.addNatNode(1)
        d2 = self.d2 = self.addNatNode(2)

        # start the simulation
        net.start()
Пример #16
0
 def addSwitch( self, name, add_to_graph=True, **params ):
     """
     Wrapper for addSwitch method to store switch also in graph.
     """
     if add_to_graph:
         self.DCNetwork_graph.add_node(name)
     return Containernet.addSwitch(self, name, protocols='OpenFlow10,OpenFlow12,OpenFlow13', **params)
Пример #17
0
class DatacenterBasicTopo:
    "Datacenter topology with 4 hosts per rack, 4 racks, and a root switch"
    self = Containernet(controller=RemoteController)
    info('*** Adding controller\n')
    self.addController('c0',
                       controller=RemoteController,
                       ip='172.31.2.32',
                       port=6653)

    def build(self):
        self.racks = []
        rootSwitch = self.addSwitch('s1')
        for i in irange(1, 2):
            rack = self.buildRack(i)
            self.racks.append(rack)
            for switch in rack:
                self.addLink(rootSwitch, switch)

    def buildRack(self, loc):
        "Build a rack of hosts with a top-of-rack switch"

        dpid = (loc * 16) + 1
        switch = self.addSwitch('s1r%s' % loc, dpid='%x' % dpid)

        for n in irange(1, 5):
            #host = self.addHost( 'h%sr%s' % ( n, loc ) )
            host = self.addDocker('h%sr%s' % (n, loc), dimage="ubuntu:trusty")
            self.addLink(switch, host)

        # Return list of top-of-rack switches for this rack
        return [switch]
Пример #18
0
 def addDocker(self, label, **params):
     """
     Wrapper for addDocker method to use custom container class.
     """
     self.DCNetwork_graph.add_node(label, type=params.get('type', 'docker'))
     return Containernet.addDocker(
         self, label, cls=EmulatorCompute, **params)
Пример #19
0
    def addSwitch(self, name, add_to_graph=True, **params):
        """
        Wrapper for addSwitch method to store switch also in graph.
        """

        # add this switch to the global topology overview
        if add_to_graph:
            self.DCNetwork_graph.add_node(name)

        # set the learning switch behavior
        if 'failMode' in params:
            failMode = params['failMode']
        else:
            failMode = self.failMode

        s = Containernet.addSwitch(
            self,
            name,
            protocols='OpenFlow10,OpenFlow12,OpenFlow13',
            failMode=failMode,
            **params)

        # set flow entry that enables learning switch behavior (needed to enable E-LAN functionality)
        #LOG.info('failmode {0}'.format(failMode))
        #if failMode == 'standalone' :
        #    LOG.info('add NORMAL')
        #    s.dpctl('add-flow', 'actions=NORMAL')

        return s
Пример #20
0
 def addDocker(self, label, **params):
     """
     Wrapper for addDocker method to use custom container class.
     """
     self.DCNetwork_graph.add_node(label, type=params.get('type', 'docker'))
     return Containernet.addDocker(
         self, label, cls=EmulatorCompute, **params)
Пример #21
0
 def addExtSAP(self, sap_name, sap_ip, **params):
     """
     Wrapper for addExtSAP method to store SAP  also in graph.
     """
     # make sure that 'type' is set
     params['type'] = params.get('type', 'sap_ext')
     self.DCNetwork_graph.add_node(sap_name, type=params['type'])
     return Containernet.addExtSAP(self, sap_name, sap_ip, **params)
Пример #22
0
 def addExtSAP(self, sap_name, sap_ip, **params):
     """
     Wrapper for addExtSAP method to store SAP  also in graph.
     """
     # make sure that 'type' is set
     params['type'] = params.get('type','sap_ext')
     self.DCNetwork_graph.add_node(sap_name, type=params['type'])
     return Containernet.addExtSAP(self, sap_name, sap_ip, **params)
Пример #23
0
def topo():
    network = Containernet(controller=Controller, ipBase='44.44.44.1/24')

    s_adv1 = network.addSwitch('s_adv1')

    docker_image = "testbed:basic"
    home_path = os.path.expanduser('~')

    h_adv1 = network.addHost(
        'h_adv1',
        cls=Docker,
        ip="44.44.44.41",
        dimage=docker_image,
        defaultRoute='via 44.44.44.1',
        volumes=[home_path + "/SDN-Testbed/traffic/:/root/traffic"])
    h_adv2 = network.addHost(
        'h_adv2',
        cls=Docker,
        ip="44.44.44.42",
        dimage=docker_image,
        defaultRoute='via 44.44.44.1',
        volumes=[home_path + "/SDN-Testbed/traffic/:/root/traffic"])

    network.addLink(s_adv1, h_adv1, cls=TCLink, delay='10ms', bw=10)
    network.addLink(s_adv1, h_adv2, cls=TCLink, delay='10ms', bw=10)

    return network
Пример #24
0
    def __createTopologyFromFile(
            self):  # file existence and topo class has been checked in Wizard
        spec = importlib.util.spec_from_file_location("topo",
                                                      self.options["path"])
        topoModule = importlib.util.module_from_spec(spec)
        spec.loader.exec_module(topoModule)

        networks = []
        for topoClass in topoModule.topos.values():
            topo = topoClass()
            if self.options["controllerType"] == "local":
                network = Containernet(topo=topo, controller=Controller)
            else:
                network = Containernet(topo=topo, controller=RemoteController)

            networks.append(network)

        return networks
Пример #25
0
 def removeLink(self, link=None, node1=None, node2=None):
     """
     Remove the link from the Containernet and the networkx graph
     """
     if link is not None:
         node1 = link.intf1.node
         node2 = link.intf2.node
     assert node1 is not None
     assert node2 is not None
     Containernet.removeLink(self, link=link, node1=node1, node2=node2)
     # TODO we might decrease the loglevel to debug:
     try:
         self.DCNetwork_graph.remove_edge(node2.name, node1.name)
     except:
         LOG.warning("%s, %s not found in DCNetwork_graph." % ((node2.name, node1.name)))
     try:
         self.DCNetwork_graph.remove_edge(node1.name, node2.name)
     except:
         LOG.warning("%s, %s not found in DCNetwork_graph." % ((node1.name, node2.name)))
Пример #26
0
 def removeLink(self, link=None, node1=None, node2=None):
     """
     Remove the link from the Containernet and the networkx graph
     """
     if link is not None:
         node1 = link.intf1.node
         node2 = link.intf2.node
     assert node1 is not None
     assert node2 is not None
     Containernet.removeLink(self, link=link, node1=node1, node2=node2)
     # TODO we might decrease the loglevel to debug:
     try:
         self.DCNetwork_graph.remove_edge(node2.name, node1.name)
     except:
         LOG.warning("%s, %s not found in DCNetwork_graph." % ((node2.name, node1.name)))
     try:
         self.DCNetwork_graph.remove_edge(node1.name, node2.name)
     except:
         LOG.warning("%s, %s not found in DCNetwork_graph." % ((node1.name, node2.name)))
Пример #27
0
def main():
    setLogLevel('info')

    topo = EvalTetraTopo()

    net = Containernet(controller=RemoteController,
                       topo=topo,
                       build=False,
                       autoSetMacs=True,
                       link=TCLink)
    net.start()

    print()

    print("**Wiping log dir.")
    for root, dirs, files in os.walk(LoggingReceiveAction.LOG_DIR):
        for file in files:
            os.remove(os.path.join(root, file))

    print("**Starting containernet REST Server.")
    thr = threading.Thread(target=start_rest,
                           args=(net, ))  # comma behind net is on purpose
    thr.daemon = True
    thr.start()

    # wait for connection with controller
    time.sleep(3)

    hosts = net.hosts

    # send arp from reqHost to every other host -> required by ONOS HostService to resolve hosts (i.e. map MAC<->IP)
    reqHost = hosts[0]
    for host in hosts:
        if (host is not reqHost):
            startARP(reqHost, reqHost.IP(), reqHost.MAC(), host.IP(),
                     reqHost.intf())

    CLI(net)

    ## set up UDP servers to join group
    for host in hosts:
        if host.name in ['tbs10host', 'tbs11host', 'tbs4host', 'tbs21host']:
            startUDPServer(host, GROUP_IP, host.IP())

    CLI(net)

    ## send data
    startUDPClient(net.getNodeByName('tbs17host'),
                   GROUP_IP,
                   UDP_MESSAGE_SIZE_BYTES,
                   count=PACKET_COUNT,
                   rate=PACKETS_PER_SECOND)

    CLI(net)

    net.stop()
Пример #28
0
def topology():
    "Create a network with some docker containers acting as hosts."
    edgefile = os.path.join(RESULTS_FOLDER, "./substrate.edges.empty.data")
    nodesfile = os.path.join(RESULTS_FOLDER, "./substrate.nodes.data")
    CDNfile = os.path.join(RESULTS_FOLDER, "CDN.nodes.data")
    startersFile = os.path.join(RESULTS_FOLDER, "starters.nodes.data")
    solutionsFile = os.path.join(RESULTS_FOLDER, "solutions.data")
    service_edges = os.path.join(RESULTS_FOLDER, "./service.edges.data")
    switch = partial(OVSSwitch, protocols='OpenFlow13')

    topo = loadTopo(edgefile, nodesfile, CDNfile, startersFile, solutionsFile,
                    service_edges)

    c = RemoteController('c', '0.0.0.0', 6633)
    # topodock=  loaddocker(os.path.join(RESULTS_FOLDER, "./substrate.edges.data"), os.path.join(RESULTS_FOLDER, "./substrate.nodes.data"))
    info('*** Start Containernet\n')
    net = Containernet(topo=topo, controller=c, link=TCLink, switch=switch)
    for host in net.hosts:
        if host.name in topo._cmd:
            for cmd in topo._cmd[host.name]:
                print("send cmd")
                print((host.sendCmd(cmd)))

    info('*** Starting network\n')
    net.start()

    info('*** Running CLI\n')
    CLI(net)

    info('*** Stopping network')
    net.stop()
Пример #29
0
    def createNet(self,
                  nswitches=1,
                  nhosts=0,
                  ndockers=0,
                  nlibvirt=0,
                  autolinkswitches=False,
                  use_running=False):
        """
        Creates a Mininet instance and automatically adds some
        nodes to it.
        """
        self.net = Containernet(controller=Controller,
                                mgmt_net={'mac': '00:AA:BB:CC:DD:EE'},
                                cmd_endpoint="qemu:///system")
        self.net.addController('c0')

        # add some switches
        for i in range(0, nswitches):
            self.s.append(self.net.addSwitch('s%d' % i))
        # if specified, chain all switches
        if autolinkswitches:
            for i in range(0, len(self.s) - 1):
                self.net.addLink(self.s[i], self.s[i + 1])
        # add some hosts
        for i in range(0, nhosts):
            self.h.append(self.net.addHost('h%d' % i))
        # add some dockers
        for i in range(0, ndockers):
            self.d.append(self.net.addDocker('d%d' % i,
                                             dimage="ubuntu:trusty"))

        for i in range(1, nlibvirt + 1):
            self.l.append(
                self.net.addLibvirthost('vm%d' % i,
                                        disk_image=DISK_IMAGE,
                                        use_existing_vm=use_running))
Пример #30
0
def topology():

    "Create a network with some docker containers acting as hosts."

    net = Containernet(controller=Controller)

    info('*** Adding controller\n')
    net.addController('c0')

    info('*** Adding docker containers\n')
    d1 = net.addDocker('d1', ip='10.0.0.251', dimage="mpeuster/stress", cpuset="0,1")
    d1.sendCmd("./start.sh")

    info('*** Starting network\n')
    net.start()

    info('*** Running CLI\n')
    CLI(net)

    info('*** Stopping network')
    net.stop()
Пример #31
0
def main():
    os.system("rm -f /tmp/R*.log /tmp/R*.pid logs/*")
    os.system("mn -c >/dev/null 2>&1")
    os.system("killall -9 zebra bgpd > /dev/null 2>&1")

    net = Containernet(topo=SimpleTopo(), switch=Router)
    net.start()
    for router in net.switches:
        router.cmd("sysctl -w net.ipv4.ip_forward=1")
        router.waitOutput()

    log("Waiting %d seconds for sysctl changes to take effect..." % args.sleep)
    sleep(args.sleep)

    for router in net.switches:
        router.cmd(
            "/usr/sbin/zebra -f conf/zebra-%s.conf -d -i /tmp/zebra-%s.pid > logs/%s-zebra-stdout 2>&1"
            % (router.name, router.name, router.name))
        router.waitOutput()
        router.cmd(
            "/usr/sbin/bgpd -f conf/bgpd-%s.conf -d -i /tmp/bgp-%s.pid > logs/%s-bgpd-stdout 2>&1"
            % (router.name, router.name, router.name),
            shell=True)
        router.waitOutput()
        log("Starting zebra and bgpd on %s" % router.name)
        if router.name == "R1":
            r1 = router
        if router.name == "R2":
            r2 = router

    info('*** Adding docker containers\n')
    d1 = net.addDocker('d1', path="./Dockerfile.webserver", rm=True)
    d2 = net.addDocker('d2', path="./Dockerfile.webserver", rm=True)

    net.addLink(d1, r1)
    net.addLink(d2, r2)

    CLI(net)
    net.stop()
    os.system("killall -9 zebra bgpd")
 def __init__(self) -> None:
     system('clear')
     system('sudo mn -c')
     self.clear_logs()
     self.network: Containernet = Containernet(controller=RemoteController,
                                               switch=OVSSwitch,
                                               link=TCLink,
                                               autoSetMacs=True,
                                               ipBase='10.0.0.0/8')
     self.load_topology(TOPOLOGY_FILE)
     self.network.addController('c0',
                                controller=RemoteController,
                                ip='127.0.0.1',
                                port=6653)
     self.network.start()
     self.add_arps()
Пример #33
0
    def addSwitch( self, name, add_to_graph=True, **params ):
        """
        Wrapper for addSwitch method to store switch also in graph.
        """

        # add this switch to the global topology overview
        if add_to_graph:
            self.DCNetwork_graph.add_node(name, type=params.get('type','switch'))

        # set the learning switch behavior
        if 'failMode' in params :
            failMode = params['failMode']
        else :
            failMode = self.failMode

        s = Containernet.addSwitch(self, name, protocols='OpenFlow10,OpenFlow12,OpenFlow13', failMode=failMode, **params)

        return s
Пример #34
0
    def addSwitch( self, name, add_to_graph=True, **params ):
        """
        Wrapper for addSwitch method to store switch also in graph.
        """

        # add this switch to the global topology overview
        if add_to_graph:
            self.DCNetwork_graph.add_node(name, type=params.get('type','switch'))

        # set the learning switch behavior
        if 'failMode' in params :
            failMode = params['failMode']
        else :
            failMode = self.failMode

        s = Containernet.addSwitch(self, name, protocols='OpenFlow10,OpenFlow12,OpenFlow13', failMode=failMode, **params)

        return s
Пример #35
0
def topology():
    "Create a network with some docker containers acting as hosts."
    global net
    net = Containernet()

    info('*** Adding controller\n')
    net.addController(name='c0',
                      controller=RemoteController,
                      ip='172.17.0.2',
                      protocol='tcp',
                      port=6653)

    info('*** Starting network\n')
    net.start()
    # server ip
    run(host='localhost', port=8090)
    # info('*** Running CLI\n')
    # CLI(net)

    info('*** Stopping network')
    net.stop()
Пример #36
0
def topology():

    "Create a network with some docker containers acting as hosts."

    net = Containernet(controller=Controller)

    info('*** Adding controller\n')
    net.addController('c0')

    info('*** Adding docker containers\n')
    d1 = net.addDocker('d1', ip='10.0.0.251', dimage="mpeuster/stress", cpuset_cpus="0,1")
    d1.sendCmd("./start.sh")

    info('*** Starting network\n')
    net.start()

    info('*** Running CLI\n')
    CLI(net)

    info('*** Stopping network')
    net.stop()
Пример #37
0
def createTopo(pod, density, ip="127.0.0.1", port=6633, bw_c2a=0.8, bw_a2e=0.4, bw_h2a=0.2):
    logging.debug("LV1 Create Fattree")
    topo = Fattree(pod, density)
    topo.createTopo()
    topo.createLink(bw_c2a=bw_c2a, bw_a2e=bw_a2e, bw_h2a=bw_h2a)

    logging.debug("LV1 Start Mininet")
    CONTROLLER_IP = ip
    CONTROLLER_PORT = port
    net = Containernet(topo=topo, link=TCLink, controller=None, autoSetMacs=True,
                  autoStaticArp=True)
    net.addController(
        'controller', controller=RemoteController,
        ip=CONTROLLER_IP, port=CONTROLLER_PORT)
    net.start()

    dump_etc_hosts(net)
    dump_mpi_hosts_file(net)
    run_set_ssh(net)

    CLI(net)
    net.stop()
Пример #38
0
    def addSwitch( self, name, add_to_graph=True, **params ):
        """
        Wrapper for addSwitch method to store switch also in graph.
        """

        # add this switch to the global topology overview
        if add_to_graph:
            self.DCNetwork_graph.add_node(name)

        # set the learning switch behavior
        if 'failMode' in params :
            failMode = params['failMode']
        else :
            failMode = self.failMode

        s = Containernet.addSwitch(self, name, protocols='OpenFlow10,OpenFlow12,OpenFlow13', failMode=failMode, **params)

        # set flow entry that enables learning switch behavior (needed to enable E-LAN functionality)
        #LOG.info('failmode {0}'.format(failMode))
        #if failMode == 'standalone' :
        #    LOG.info('add NORMAL')
        #    s.dpctl('add-flow', 'actions=NORMAL')

        return s
    def createNet(
            self,
            nswitches=1, nhosts=0, ndockers=0,
            autolinkswitches=False):
        """
        Creates a Mininet instance and automatically adds some
        nodes to it.
        """
        self.net = Containernet( controller=Controller )
        self.net.addController( 'c0' )

        # add some switches
        for i in range(0, nswitches):
            self.s.append(self.net.addSwitch('s%d' % i))
        # if specified, chain all switches
        if autolinkswitches:
            for i in range(0, len(self.s) - 1):
                self.net.addLink(self.s[i], self.s[i + 1])
        # add some hosts
        for i in range(0, nhosts):
            self.h.append(self.net.addHost('h%d' % i))
        # add some dockers
        for i in range(0, ndockers):
            self.d.append(self.net.addDocker('d%d' % i, dimage="ubuntu:trusty"))
Пример #40
0
def topology():

    "Create a network with some docker containers acting as hosts."

    net = Containernet(controller=Controller)

    info('*** Adding controller\n')
    net.addController('c0')

    info('*** Adding hosts\n')
    h1 = net.addHost('h1')
    h2 = net.addHost('h2')

    info('*** Adding docker containers\n')
    d1 = net.addDocker('d1', ip='10.0.0.251', dimage="ubuntu:trusty")
    d2 = net.addDocker('d2', ip='10.0.0.252', dimage="ubuntu:trusty", cpu_period=50000, cpu_quota=25000)
    d3 = net.addHost(
        'd3', ip='11.0.0.253', cls=Docker, dimage="ubuntu:trusty", cpu_shares=20)
    d5 = net.addDocker('d5', dimage="ubuntu:trusty", volumes=["/:/mnt/vol1:rw"])

    info('*** Adding switch\n')
    s1 = net.addSwitch('s1')
    s2 = net.addSwitch('s2', cls=OVSSwitch)
    s3 = net.addSwitch('s3')

    info('*** Creating links\n')
    net.addLink(h1, s1)
    net.addLink(s1, d1)
    net.addLink(h2, s2)
    net.addLink(d2, s2)
    net.addLink(s1, s2)
    #net.addLink(s1, s2, cls=TCLink, delay="100ms", bw=1, loss=10)
    # try to add a second interface to a docker container
    net.addLink(d2, s3, params1={"ip": "11.0.0.254/8"})
    net.addLink(d3, s3)

    info('*** Starting network\n')
    net.start()

    net.ping([d1, d2])

    # our extended ping functionality
    net.ping([d1], manualdestip="10.0.0.252")
    net.ping([d2, d3], manualdestip="11.0.0.254")

    info('*** Dynamically add a container at runtime\n')
    d4 = net.addDocker('d4', dimage="ubuntu:trusty")
    # we have to specify a manual ip when we add a link at runtime
    net.addLink(d4, s1, params1={"ip": "10.0.0.254/8"})
    # other options to do this
    #d4.defaultIntf().ifconfig("10.0.0.254 up")
    #d4.setIP("10.0.0.254")

    net.ping([d1], manualdestip="10.0.0.254")

    info('*** Running CLI\n')
    CLI(net)

    info('*** Stopping network')
    net.stop()
Пример #41
0
 def start(self):
     # start
     for dc in self.dcs.itervalues():
         dc.start()
     Containernet.start(self)
class simpleTestTopology( unittest.TestCase ):
    """
        Helper class to do basic test setups.
        s1 -- s2 -- s3 -- ... -- sN
    """

    def __init__(self, *args, **kwargs):
        self.net = None
        self.s = []  # list of switches
        self.h = []  # list of hosts
        self.d = []  # list of docker containers
        self.docker_cli = None
        super(simpleTestTopology, self).__init__(*args, **kwargs)

    def createNet(
            self,
            nswitches=1, nhosts=0, ndockers=0,
            autolinkswitches=False):
        """
        Creates a Mininet instance and automatically adds some
        nodes to it.
        """
        self.net = Containernet( controller=Controller )
        self.net.addController( 'c0' )

        # add some switches
        for i in range(0, nswitches):
            self.s.append(self.net.addSwitch('s%d' % i))
        # if specified, chain all switches
        if autolinkswitches:
            for i in range(0, len(self.s) - 1):
                self.net.addLink(self.s[i], self.s[i + 1])
        # add some hosts
        for i in range(0, nhosts):
            self.h.append(self.net.addHost('h%d' % i))
        # add some dockers
        for i in range(0, ndockers):
            self.d.append(self.net.addDocker('d%d' % i, dimage="ubuntu:trusty"))

    def startNet(self):
        self.net.start()

    def stopNet(self):
        self.net.stop()

    def getDockerCli(self):
        """
        Helper to interact with local docker instance.
        """
        if self.docker_cli is None:
            self.docker_cli = docker.APIClient(
                base_url='unix://var/run/docker.sock')
        return self.docker_cli

    @staticmethod
    def setUp():
        pass

    @staticmethod
    def tearDown():
        cleanup()
        # make sure that all pending docker containers are killed
        with open(os.devnull, 'w') as devnull:
            subprocess.call(
                "docker rm -f $(docker ps --filter 'label=com.containernet' -a -q)",
                stdout=devnull,
                stderr=devnull,
                shell=True)

    def getContainernetContainers(self):
        """
        List the containers managed by containernet
        """
        return self.getDockerCli().containers(filters={"label": "com.containernet"})
Пример #43
0
 def removeDocker( self, label, **params ):
     """
     Wrapper for removeDocker method to update graph.
     """
     self.DCNetwork_graph.remove_node(label)
     return Containernet.removeDocker(self, label, **params)
Пример #44
0
    def addLink(self, node1, node2, **params):
        """
        Able to handle Datacenter objects as link
        end points.
        """
        assert node1 is not None
        assert node2 is not None
        LOG.debug("addLink: n1=%s n2=%s" % (str(node1), str(node2)))
        # ensure type of node1
        if isinstance( node1, basestring ):
            if node1 in self.dcs:
                node1 = self.dcs[node1].switch
        if isinstance( node1, Datacenter ):
            node1 = node1.switch
        # ensure type of node2
        if isinstance( node2, basestring ):
            if node2 in self.dcs:
                node2 = self.dcs[node2].switch
        if isinstance( node2, Datacenter ):
            node2 = node2.switch
        # try to give containers a default IP
        if isinstance( node1, Docker ):
            if "params1" not in params:
                params["params1"] = {}
            if "ip" not in params["params1"]:
                params["params1"]["ip"] = self.getNextIp()
        if isinstance( node2, Docker ):
            if "params2" not in params:
                params["params2"] = {}
            if "ip" not in params["params2"]:
                params["params2"]["ip"] = self.getNextIp()
        # ensure that we allow TCLinks between data centers
        # TODO this is not optimal, we use cls=Link for containers and TCLink for data centers
        # see Containernet issue: https://github.com/mpeuster/containernet/issues/3
        if "cls" not in params:
            params["cls"] = TCLink

        link = Containernet.addLink(self, node1, node2, **params)

        # try to give container interfaces a default id
        node1_port_id = node1.ports[link.intf1]
        if isinstance(node1, Docker):
            if "id" in params["params1"]:
                node1_port_id = params["params1"]["id"]
        node1_port_name = link.intf1.name

        node2_port_id = node2.ports[link.intf2]
        if isinstance(node2, Docker):
            if "id" in params["params2"]:
                node2_port_id = params["params2"]["id"]
        node2_port_name = link.intf2.name


        # add edge and assigned port number to graph in both directions between node1 and node2
        # port_id: id given in descriptor (if available, otherwise same as port)
        # port: portnumber assigned by Containernet

        attr_dict = {}
        # possible weight metrics allowed by TClink class:
        weight_metrics = ['bw', 'delay', 'jitter', 'loss']
        edge_attributes = [p for p in params if p in weight_metrics]
        for attr in edge_attributes:
            # if delay: strip ms (need number as weight in graph)
            match = re.search('([0-9]*\.?[0-9]+)', params[attr])
            if match:
                attr_number = match.group(1)
            else:
                attr_number = None
            attr_dict[attr] = attr_number


        attr_dict2 = {'src_port_id': node1_port_id, 'src_port_nr': node1.ports[link.intf1],
                      'src_port_name': node1_port_name,
                     'dst_port_id': node2_port_id, 'dst_port_nr': node2.ports[link.intf2],
                      'dst_port_name': node2_port_name}
        attr_dict2.update(attr_dict)
        self.DCNetwork_graph.add_edge(node1.name, node2.name, attr_dict=attr_dict2)

        attr_dict2 = {'src_port_id': node2_port_id, 'src_port_nr': node2.ports[link.intf2],
                      'src_port_name': node2_port_name,
                     'dst_port_id': node1_port_id, 'dst_port_nr': node1.ports[link.intf1],
                      'dst_port_name': node1_port_name}
        attr_dict2.update(attr_dict)
        self.DCNetwork_graph.add_edge(node2.name, node1.name, attr_dict=attr_dict2)

        return link
Пример #45
0
 def removeExtSAP(self, sap_name, **params):
     """
     Wrapper for removeExtSAP method to remove SAP  also from graph.
     """
     self.DCNetwork_graph.remove_node(sap_name)
     return Containernet.removeExtSAP(self, sap_name)
Пример #46
0
# Variables del programa

num_machines = 4  # Numero de hosts elementales
hosts = []  # Host elementales (h1, h2, h3, h4, h5, h6)
links = []
setLogLevel('info')

info('*** Inicio de la configuración de la red ***\n')

info('*** Create the controller \n')
c0 = RemoteController('c0', ip='172.17.0.2', port=6633)
info(c0)
info('*** Create Simple topology example\n')

net = Containernet(build=False, link=TCLink)
# Initialize topology

# Add containers
info('*** Adding docker containers using local_test_machine1 images\n')

# Agregando host de la red
for i in range(0, num_machines):
    hosts.append(
        net.addDocker('h' + str(i + 1),
                      ip='10.0.0.' + str(i + 1),
                      dimage="local_test_machine1"))

# Agregando host de medida
c_h100 = net.addDocker('c_h100', ip='10.0.0.100', dimage="local_test_machine1")
s_h200 = net.addDocker('s_h200',
Пример #47
0
def tfTopo():
    net = Containernet( topo=None, controller=RemoteController, switch=OVSKernelSwitch )

    net.addController( 'c0', RemoteController, ip="127.0.0.1", port=6633 )

    #Arguments
    opts, args = getopt.getopt(sys.argv[1:], "", ["flows=", "dos="])
    for o, a in opts:
        if o == "--flows":
            number_of_flows=int(a)
            print "Flows: ",a
        elif o in ("--dos"):
            number_of_dos=int(a)
            print "DoS: ",a

# Hosts 
    h1 = net.addHost('h1', ip='10.0.0.1', mac='00:00:00:00:00:01')
    h2 = net.addHost('h2', ip='10.0.0.2', mac='00:00:00:00:00:02')
    h3 = net.addHost('h3', ip='10.0.0.3', mac='00:00:00:00:00:03')
    h4 = net.addHost('h4', ip='10.0.0.4', mac='00:00:00:00:00:04')
    h5 = net.addHost('h5', ip='10.0.0.5', mac='00:00:00:00:00:05')
    h6 = net.addHost('h6', ip='10.0.0.6', mac='00:00:00:00:00:06')
    h7 = net.addHost('h7', ip='10.0.0.7', mac='00:00:00:00:00:07')
    h8 = net.addHost('h8', ip='10.0.0.8', mac='00:00:00:00:00:08')
    h9 = net.addHost('h9', ip='10.0.0.9', mac='00:00:00:00:00:09')
    h10 = net.addHost('h10', ip='10.0.0.10', mac='00:00:00:00:00:10')

    p1 = net.addHost('p1', ip='10.0.1.1', mac='00:00:00:00:01:01', cls=Docker, dimage='gmiotto/click',mem_limit=1024*1024*10, cpu_quota=pop_cpu_percentage*100,cpu_period=10000)
    p2 = net.addHost('p2', ip='10.0.1.2', mac='00:00:00:00:01:02', cls=Docker, dimage='gmiotto/click',mem_limit=1024*1024*10, cpu_quota=pop_cpu_percentage*100,cpu_period=10000)
    p3 = net.addHost('p3', ip='10.0.1.3', mac='00:00:00:00:01:03', cls=Docker, dimage='gmiotto/click',mem_limit=1024*1024*10, cpu_quota=pop_cpu_percentage*100,cpu_period=10000)
    p4 = net.addHost('p4', ip='10.0.1.4', mac='00:00:00:00:01:04', cls=Docker, dimage='gmiotto/click',mem_limit=1024*1024*10, cpu_quota=pop_cpu_percentage*100,cpu_period=10000)
    p5 = net.addHost('p5', ip='10.0.1.5', mac='00:00:00:00:01:05', cls=Docker, dimage='gmiotto/click',mem_limit=1024*1024*10, cpu_quota=pop_cpu_percentage*100,cpu_period=10000)
    p6 = net.addHost('p6', ip='10.0.1.6', mac='00:00:00:00:01:06', cls=Docker, dimage='gmiotto/click',mem_limit=1024*1024*10, cpu_quota=pop_cpu_percentage*100,cpu_period=10000)

    #Switches
    s1 = net.addSwitch('s1')
    s2 = net.addSwitch('s2')
    s3 = net.addSwitch('s3')
    s4 = net.addSwitch('s4')
    s5 = net.addSwitch('s5')
    s6 = net.addSwitch('s6')
    s7 = net.addSwitch('s7')
    s8 = net.addSwitch('s8')
    s9 = net.addSwitch('s9')
    s10 = net.addSwitch('s10')

    #PoP Hosts
    net.addLink(p1,s1, cls=TCLink, delay=pop_link_delay,bw=pop_link_bw,loss=pop_link_loss)
    net.addLink(p1,s1)

    net.addLink(p2,s2, cls=TCLink, delay=pop_link_delay,bw=pop_link_bw,loss=pop_link_loss)
    net.addLink(p2,s2)

    net.addLink(p3,s3, cls=TCLink, delay=pop_link_delay,bw=pop_link_bw,loss=pop_link_loss)
    net.addLink(p3,s3)

    net.addLink(p4,s4, cls=TCLink, delay=pop_link_delay,bw=pop_link_bw,loss=pop_link_loss)
    net.addLink(p4,s4)

    net.addLink(p5,s5, cls=TCLink, delay=pop_link_delay,bw=pop_link_bw,loss=pop_link_loss)
    net.addLink(p5,s5)

    net.addLink(p6,s6, cls=TCLink, delay=pop_link_delay,bw=pop_link_bw,loss=pop_link_loss)
    net.addLink(p6,s6)

    #Normal Hosts
    net.addLink(h1,s1, cls=TCLink, delay=host_switch_delay,bw=host_switch_bw,loss=host_switch_loss)
    net.addLink(h2,s2, cls=TCLink, delay=host_switch_delay,bw=host_switch_bw,loss=host_switch_loss)
    net.addLink(h3,s3, cls=TCLink, delay=host_switch_delay,bw=host_switch_bw,loss=host_switch_loss)
    net.addLink(h4,s4, cls=TCLink, delay=host_switch_delay,bw=host_switch_bw,loss=host_switch_loss)
    net.addLink(h5,s5, cls=TCLink, delay=host_switch_delay,bw=host_switch_bw,loss=host_switch_loss)
    net.addLink(h6,s6, cls=TCLink, delay=host_switch_delay,bw=host_switch_bw,loss=host_switch_loss)
    net.addLink(h7,s7, cls=TCLink, delay=host_switch_delay,bw=host_switch_bw,loss=host_switch_loss)
    net.addLink(h8,s8, cls=TCLink, delay=host_switch_delay,bw=host_switch_bw,loss=host_switch_loss)
    net.addLink(h9,s9, cls=TCLink, delay=host_switch_delay,bw=host_switch_bw,loss=host_switch_loss)
    net.addLink(h10,s10, cls=TCLink, delay=host_switch_delay,bw=host_switch_bw,loss=host_switch_loss)

    net.addLink(s7, s1, cls=TCLink, delay=inter_switch_delay,bw=inter_switch_bw,loss=inter_switch_loss) #s7-s1
    net.addLink(s7, s2, cls=TCLink, delay=inter_switch_delay,bw=inter_switch_bw,loss=inter_switch_loss) 
    net.addLink(s1, s2, cls=TCLink, delay=inter_switch_delay,bw=inter_switch_bw,loss=inter_switch_loss) 
    net.addLink(s1, s8, cls=TCLink, delay=inter_switch_delay,bw=inter_switch_bw,loss=inter_switch_loss) 
    net.addLink(s1, s3, cls=TCLink, delay=inter_switch_delay,bw=inter_switch_bw,loss=inter_switch_loss) 
    net.addLink(s1, s6, cls=TCLink, delay=inter_switch_delay,bw=inter_switch_bw,loss=inter_switch_loss) 
    net.addLink(s8, s3, cls=TCLink, delay=inter_switch_delay,bw=inter_switch_bw,loss=inter_switch_loss) 
    net.addLink(s2, s5, cls=TCLink, delay=inter_switch_delay,bw=inter_switch_bw,loss=inter_switch_loss) 
    net.addLink(s2, s4, cls=TCLink, delay=inter_switch_delay,bw=inter_switch_bw,loss=inter_switch_loss) 
    net.addLink(s3, s5, cls=TCLink, delay=inter_switch_delay,bw=inter_switch_bw,loss=inter_switch_loss) 
    net.addLink(s3, s4, cls=TCLink, delay=inter_switch_delay,bw=inter_switch_bw,loss=inter_switch_loss) 
    net.addLink(s4, s9, cls=TCLink, delay=inter_switch_delay,bw=inter_switch_bw,loss=inter_switch_loss) 
    net.addLink(s4, s6, cls=TCLink, delay=inter_switch_delay,bw=inter_switch_bw,loss=inter_switch_loss) 
    net.addLink(s5, s6, cls=TCLink, delay=inter_switch_delay,bw=inter_switch_bw,loss=inter_switch_loss) 
    net.addLink(s5, s10, cls=TCLink, delay=inter_switch_delay,bw=inter_switch_bw,loss=inter_switch_loss) 
    net.addLink(s9, s6, cls=TCLink, delay=inter_switch_delay,bw=inter_switch_bw,loss=inter_switch_loss) 
    net.addLink(s10, s6, cls=TCLink, delay=inter_switch_delay,bw=inter_switch_bw,loss=inter_switch_loss) 

    net.start()

    for host in net.hosts:
        if "h" in host.name:
            host.cmd('ethtool -K %s-eth0 tso off' % host.name)
            host.cmd('python httpserver.py  80 &')

    for host in net.hosts:
        if "p" in host.name:
            call("sudo bash Click/runFirewall.sh %s Click/firewall3.click " % host.name,shell=True)


    CLI(net)
    net.stop()
Пример #48
0
    def __init__(self, controller=RemoteController, monitor=False,
                 enable_learning=False, # learning switch behavior of the default ovs switches icw Ryu controller can be turned off/on, needed for E-LAN functionality
                 dc_emulation_max_cpu=1.0,  # fraction of overall CPU time for emulation
                 dc_emulation_max_mem=512,  # emulation max mem in MB
                 **kwargs):
        """
        Create an extended version of a Containernet network
        :param dc_emulation_max_cpu: max. CPU time used by containers in data centers
        :param kwargs: path through for Mininet parameters
        :return:
        """
        # members
        self.dcs = {}
        self.ryu_process = None
        #list of deployed nsds.E_Lines and E_LANs (uploaded from the dummy gatekeeper)
        self.deployed_nsds = []
        self.deployed_elines = []
        self.deployed_elans = []
        self.vlan_dict = {}

        # flag to indicate if the topology has been stopped (e.g. by api call)
        self.exit = False

        # always cleanup environment before we start the emulator
        self.killRyu()
        cleanup()

        # call original Docker.__init__ and setup default controller
        Containernet.__init__(
            self, switch=OVSKernelSwitch, controller=controller, **kwargs)

        # default switch configuration
        enable_ryu_learning = False
        if enable_learning :
            self.failMode = 'standalone'
            enable_ryu_learning = True
        else:
            self.failMode = 'secure'

        # Ryu management
        if controller == RemoteController:
            # start Ryu controller
            self.startRyu(learning_switch=enable_ryu_learning)

        # add the specified controller
        self.addController('c0', controller=controller)

        # graph of the complete DC network
        self.DCNetwork_graph = nx.MultiDiGraph()

        # initialize pool of vlan tags to setup the SDN paths
        self.vlans = range(1, 4095)[::-1]

        # link to Ryu REST_API
        ryu_ip = 'localhost'
        ryu_port = '8080'
        self.ryu_REST_api = 'http://{0}:{1}'.format(ryu_ip, ryu_port)
        self.RyuSession = requests.Session()

        # monitoring agent
        if monitor:
            self.monitor_agent = DCNetworkMonitor(self)
        else:
            self.monitor_agent = None

        # initialize resource model registrar
        self.rm_registrar = ResourceModelRegistrar(
            dc_emulation_max_cpu, dc_emulation_max_mem)
        self.cpu_period = CPU_PERIOD
Пример #49
0
def tfTopo():
 net = Containernet( topo=None, controller=RemoteController, switch=OVSKernelSwitch )

 net.addController( 'c0', RemoteController, ip="127.0.0.1", port=6633 )

 # Hosts 
 h1 = net.addHost('h1', ip='10.0.0.1', mac='00:00:00:00:00:01')
 h2 = net.addHost('h2', ip='10.0.0.2', mac='00:00:00:00:00:02')
 h3 = net.addHost('h3', ip='10.0.0.3', mac='00:00:00:00:00:03', cls=Docker, dimage='gmiotto/click',mem_limit=1024*1024*10)
 h4 = net.addHost('h4', ip='10.0.0.4', mac='00:00:00:00:00:04')
 h5 = net.addHost('h5', ip='10.0.0.5', mac='00:00:00:00:00:05')

 #Switches
 s1 = net.addSwitch('s1')
 s2 = net.addSwitch('s2')
 s3 = net.addSwitch('s3')
 s4 = net.addSwitch('s4')
 s5 = net.addSwitch('s5')

 net.addLink(h3,s3)
 net.addLink(h3,s3)

 net.addLink(s1,s2)
 net.addLink(s2,s3)
 net.addLink(s3,s4)
 net.addLink(s4,s5)
 
 net.addLink(h1,s1)
 net.addLink(h2,s2)
 net.addLink(h4,s4)
 net.addLink(h5,s5)
 


 net.start()

 for host in net.hosts:
     if "h" in host.name:
         host.cmd('ethtool -K %s-eth0 tso off' % host.name)
 #call("echo  %s "% 'ha',shell=True)
 
 CLI(net)
 net.stop()
#!/usr/bin/python
"""
This topology is used to test the compatibility of different Docker images.
The images to be tested can be found in 'examples/example-containers'.
They are build with './build.sh'
"""
from mininet.net import Containernet
from mininet.node import Controller
from mininet.cli import CLI
from mininet.link import TCLink
from mininet.log import info, setLogLevel
setLogLevel('info')

net = Containernet(controller=Controller)
info('*** Adding controller\n')
net.addController('c0')

info('*** Adding docker containers\n')
d1 = net.addDocker('d1', dimage="ubuntu:trusty")
d2 = net.addDocker('d2', dimage="containernet_example:ubuntu1404")
d3 = net.addDocker('d3', dimage="containernet_example:ubuntu1604")
d4 = net.addDocker('d4', dimage="containernet_example:ubuntu1804")
d5 = net.addDocker('d5', dimage="containernet_example:centos6")
d6 = net.addDocker('d6', dimage="containernet_example:centos7")

info('*** Adding switches\n')
s1 = net.addSwitch('s1')

info('*** Creating links\n')
net.addLink(d1, s1)
net.addLink(d2, s1)
Пример #51
0
def tfTopo():
 net = Containernet( topo=None, controller=RemoteController, switch=OVSKernelSwitch )

 net.addController( 'c0', RemoteController, ip="127.0.0.1", port=6633 )

 # Hosts 
 h1 = net.addHost('h1', ip='10.0.0.1', mac='00:00:00:00:00:01')
 h2 = net.addHost('h2', ip='10.0.0.2', mac='00:00:00:00:00:02')
 h3 = net.addHost('h3', ip='10.0.0.3', mac='00:00:00:00:00:03', cls=Docker, dimage='gmiotto/click',mem_limit=1024*1024*10, cpu_shares=2)
 h4 = net.addHost('h4', ip='10.0.0.4', mac='00:00:00:00:00:04', cls=Docker, dimage='gmiotto/click',mem_limit=1024*1024*10, cpu_shares=10)
 h5 = net.addHost('h5', ip='10.0.0.5', mac='00:00:00:00:00:05', cls=Docker, dimage='gmiotto/click',mem_limit=1024*1024*10, cpu_shares=10)
 h6 = net.addHost('h6', ip='10.0.0.6', mac='00:00:00:00:00:06')
 h7 = net.addHost('h7', ip='10.0.0.7', mac='00:00:00:00:00:07')
 h8 = net.addHost('h8', ip='10.0.0.8', mac='00:00:00:00:00:08')
 h9 = net.addHost('h9', ip='10.0.0.9', mac='00:00:00:00:00:09')

 #Switches
 s1 = net.addSwitch('s1')
 s2 = net.addSwitch('s2')
 s3 = net.addSwitch('s3')
 s4 = net.addSwitch('s4')
 s5 = net.addSwitch('s5')
 s6 = net.addSwitch('s6')
 s7 = net.addSwitch('s7')
 s8 = net.addSwitch('s8')
 s9 = net.addSwitch('s9')

 net.addLink(h3,s3)
 net.addLink(h3,s3)

 net.addLink(h4,s4)
 net.addLink(h4,s4)

 net.addLink(h5,s5)
 net.addLink(h5,s5)

 net.addLink(s1,s6)
 net.addLink(s1,s7)

 #net.addLink(s6, s3, cls=TCLink, delay="100ms", bw=0.5, loss=0)
 net.addLink(s6,s3)
 net.addLink(s6, s4, cls=TCLink, delay="1ms", bw=2, loss=0)
 #net.addLink(s6,s4)
 net.addLink(s6,s5)
 net.addLink(s7,s3)
 net.addLink(s7,s5)
 
 net.addLink(s3,s8)
 net.addLink(s3,s9)
 net.addLink(s4,s8, cls=TCLink, delay="1ms", bw=2, loss=0)
 net.addLink(s4,s9)
 net.addLink(s5,s9)
 
 net.addLink(s8,s2)
 net.addLink(s9,s2)
 
 net.addLink(h1,s1)
 net.addLink(h2,s2)
 net.addLink(h6,s6)
 net.addLink(h7,s7)
 net.addLink(h8,s8)
 net.addLink(h9,s9)
 


 net.start()

 for host in net.hosts:
     if "h" in host.name:
         host.cmd('ethtool -K %s-eth0 tso off' % host.name)
 call("sudo bash Click/runFirewall.sh h3 Click/firewall3.click ",shell=True)
 call("sudo bash Click/runFirewall.sh h4 Click/firewall3.click ",shell=True)
 call("sudo bash Click/runFirewall.sh h5 Click/firewall3.click ",shell=True)
 
 h2.cmd('python -m SimpleHTTPServer 80 &')

 CLI(net)
 net.stop()

class LinuxRouter(Node):
    "A Node with IP forwarding enabled."

    def config(self, **params):
        super(LinuxRouter, self).config(**params)
        # Enable forwarding on the router
        self.cmd('sysctl net.ipv4.ip_forward=1')

    def terminate(self):
        self.cmd('sysctl net.ipv4.ip_forward=0')
        super(LinuxRouter, self).terminate()


net = Containernet(controller=Controller)

info('*** Adding controller\n')
net.addController('c0', port=6654)

info('*** Adding Router\n')
defaultIP = '172.17.0.0/24'  # IP address for r0-eth1
router = net.addNode('r0', cls=LinuxRouter, ip=defaultIP)

info('*** Adding docker containers using {} images\n'.format(IMAGE_NAME))

# port bindings is swapped (host_machine:docker_container)
d1 = net.addDocker(name='d1',
                   ip='10.0.0.251',
                   ports=[1883],
                   port_bindings={1883: 1883},
Пример #53
0
def tfTopo():
    net = Containernet( topo=None, controller=RemoteController, switch=OVSKernelSwitch )

    net.addController( 'c0', RemoteController, ip="127.0.0.1", port=6633 )

    #Arguments
    opts, args = getopt.getopt(sys.argv[1:], "", ["flows=", "dos="])
    for o, a in opts:
        if o == "--flows":
            number_of_flows=int(a)
            print "Flows: ",a
        elif o in ("--dos"):
            number_of_dos=int(a)
            print "DoS: ",a

# Hosts 
    h1 = net.addHost('h1', ip='10.0.0.1', mac='00:00:00:00:00:01')
    h2 = net.addHost('h2', ip='10.0.0.2', mac='00:00:00:00:00:02')
    h3 = net.addHost('h3', ip='10.0.0.3', mac='00:00:00:00:00:03')
    h4 = net.addHost('h4', ip='10.0.0.4', mac='00:00:00:00:00:04')
    h5 = net.addHost('h5', ip='10.0.0.5', mac='00:00:00:00:00:05')
    h6 = net.addHost('h6', ip='10.0.0.6', mac='00:00:00:00:00:06')
    h7 = net.addHost('h7', ip='10.0.0.7', mac='00:00:00:00:00:07')
    h8 = net.addHost('h8', ip='10.0.0.8', mac='00:00:00:00:00:08')
    h9 = net.addHost('h9', ip='10.0.0.9', mac='00:00:00:00:00:09')
    h10 = net.addHost('h10', ip='10.0.0.10', mac='00:00:00:00:00:10')

    p1 = net.addHost('p1', ip='10.0.1.1', mac='00:00:00:00:01:01', cls=Docker, dimage='gmiotto/click',mem_limit=1024*1024*10, cpu_quota=pop_cpu_percentage*100,cpu_period=10000)
    p2 = net.addHost('p2', ip='10.0.1.2', mac='00:00:00:00:01:02', cls=Docker, dimage='gmiotto/click',mem_limit=1024*1024*10, cpu_quota=pop_cpu_percentage*100,cpu_period=10000)
    p3 = net.addHost('p3', ip='10.0.1.3', mac='00:00:00:00:01:03', cls=Docker, dimage='gmiotto/click',mem_limit=1024*1024*10, cpu_quota=pop_cpu_percentage*100,cpu_period=10000)
    p4 = net.addHost('p4', ip='10.0.1.4', mac='00:00:00:00:01:04', cls=Docker, dimage='gmiotto/click',mem_limit=1024*1024*10, cpu_quota=pop_cpu_percentage*100,cpu_period=10000)
    p5 = net.addHost('p5', ip='10.0.1.5', mac='00:00:00:00:01:05', cls=Docker, dimage='gmiotto/click',mem_limit=1024*1024*10, cpu_quota=pop_cpu_percentage*100,cpu_period=10000)

    #Switches
    s1 = net.addSwitch('s1')
    s2 = net.addSwitch('s2')
    s3 = net.addSwitch('s3')
    s4 = net.addSwitch('s4')
    s5 = net.addSwitch('s5')
    s6 = net.addSwitch('s6')
    s7 = net.addSwitch('s7')
    s8 = net.addSwitch('s8')
    s9 = net.addSwitch('s9')
    s10 = net.addSwitch('s10')

    #PoP Hosts
    net.addLink(p1,s1, cls=TCLink, delay=pop_link_delay,bw=pop_link_bw,loss=pop_link_loss)
    net.addLink(p1,s1)

    net.addLink(p2,s2, cls=TCLink, delay=pop_link_delay,bw=pop_link_bw,loss=pop_link_loss)
    net.addLink(p2,s2)

    net.addLink(p3,s3, cls=TCLink, delay=pop_link_delay,bw=pop_link_bw,loss=pop_link_loss)
    net.addLink(p3,s3)

    net.addLink(p4,s4, cls=TCLink, delay=pop_link_delay,bw=pop_link_bw,loss=pop_link_loss)
    net.addLink(p4,s4)

    net.addLink(p5,s5, cls=TCLink, delay=pop_link_delay,bw=pop_link_bw,loss=pop_link_loss)
    net.addLink(p5,s5)

    #Normal Hosts
    net.addLink(h1,s1, cls=TCLink, delay=host_switch_delay,bw=host_switch_bw,loss=host_switch_loss)
    net.addLink(h2,s2, cls=TCLink, delay=host_switch_delay,bw=host_switch_bw,loss=host_switch_loss)
    net.addLink(h3,s3, cls=TCLink, delay=host_switch_delay,bw=host_switch_bw,loss=host_switch_loss)
    net.addLink(h4,s4, cls=TCLink, delay=host_switch_delay,bw=host_switch_bw,loss=host_switch_loss)
    net.addLink(h5,s5, cls=TCLink, delay=host_switch_delay,bw=host_switch_bw,loss=host_switch_loss)
    net.addLink(h6,s6, cls=TCLink, delay=host_switch_delay,bw=host_switch_bw,loss=host_switch_loss)
    net.addLink(h7,s7, cls=TCLink, delay=host_switch_delay,bw=host_switch_bw,loss=host_switch_loss)
    net.addLink(h8,s8, cls=TCLink, delay=host_switch_delay,bw=host_switch_bw,loss=host_switch_loss)
    net.addLink(h9,s9, cls=TCLink, delay=host_switch_delay,bw=host_switch_bw,loss=host_switch_loss)
    net.addLink(h10,s10, cls=TCLink, delay=host_switch_delay,bw=host_switch_bw,loss=host_switch_loss)

    net.addLink(s7, s1, cls=TCLink, delay=inter_switch_delay,bw=inter_switch_bw,loss=inter_switch_loss) #s7-s1
    net.addLink(s7, s2, cls=TCLink, delay=inter_switch_delay,bw=inter_switch_bw,loss=inter_switch_loss) 
    net.addLink(s1, s2, cls=TCLink, delay=inter_switch_delay,bw=inter_switch_bw,loss=inter_switch_loss) 
    net.addLink(s1, s8, cls=TCLink, delay=inter_switch_delay,bw=inter_switch_bw,loss=inter_switch_loss) 
    net.addLink(s1, s3, cls=TCLink, delay=inter_switch_delay,bw=inter_switch_bw,loss=inter_switch_loss) 
    net.addLink(s1, s6, cls=TCLink, delay=inter_switch_delay,bw=inter_switch_bw,loss=inter_switch_loss) 
    net.addLink(s8, s3, cls=TCLink, delay=inter_switch_delay,bw=inter_switch_bw,loss=inter_switch_loss) 
    net.addLink(s2, s5, cls=TCLink, delay=inter_switch_delay,bw=inter_switch_bw,loss=inter_switch_loss) 
    net.addLink(s2, s4, cls=TCLink, delay=inter_switch_delay,bw=inter_switch_bw,loss=inter_switch_loss) 
    net.addLink(s3, s5, cls=TCLink, delay=inter_switch_delay,bw=inter_switch_bw,loss=inter_switch_loss) 
    net.addLink(s3, s4, cls=TCLink, delay=inter_switch_delay,bw=inter_switch_bw,loss=inter_switch_loss) 
    net.addLink(s4, s9, cls=TCLink, delay=inter_switch_delay,bw=inter_switch_bw,loss=inter_switch_loss) 
    net.addLink(s4, s6, cls=TCLink, delay=inter_switch_delay,bw=inter_switch_bw,loss=inter_switch_loss) 
    net.addLink(s5, s6, cls=TCLink, delay=inter_switch_delay,bw=inter_switch_bw,loss=inter_switch_loss) 
    net.addLink(s5, s10, cls=TCLink, delay=inter_switch_delay,bw=inter_switch_bw,loss=inter_switch_loss) 
    net.addLink(s9, s6, cls=TCLink, delay=inter_switch_delay,bw=inter_switch_bw,loss=inter_switch_loss) 
    net.addLink(s10, s6, cls=TCLink, delay=inter_switch_delay,bw=inter_switch_bw,loss=inter_switch_loss) 

    net.start()

    for host in net.hosts:
        if "h" in host.name:
            host.cmd('ethtool -K %s-eth0 tso off' % host.name)
            host.cmd('python httpserver.py  80 &')

    for host in net.hosts:
        if "p" in host.name:
            call("sudo bash Click/runFirewall.sh %s Click/firewall3.click " % host.name,shell=True)

    time.sleep(5)

    #Flows 
    random.seed()
    hs = [0,1,2,3,4,5,6,7,8,9]
    random.shuffle(hs)
    if number_of_flows > 5:
        number_of_flows = 5
    for i in range(0,number_of_flows):
        h_src = hs[2*i]
        h_tgt = hs[2*i+1]
        #pair = random.sample([0,1,2,3,4,5,6,7,8,9],2)
    #    print net.hosts[pair[0]].name, "->", net.hosts[pair[1]].name
        net.hosts[h_src].cmd('bash client.sh "%s" 10.0.0.%s &' % (net.hosts[h_src].name, h_tgt+1))
        net.hosts[h_src].cmd('echo ha')
        print 'bash client.sh "%s" %s &' % (net.hosts[h_src].name, net.hosts[h_tgt].name)
        
    time.sleep(2)

    targets = [1,2,3,4,5]
    random.shuffle(targets)
    for i in range(0,number_of_dos):
        h1.cmd('ping -c1 10.0.1.%s &' % targets[i])
        print "Attacking p%s" % targets[i]

    #h1.cmd('ping -c10 p5 &')
    time.sleep(60)
    #time.sleep(150)
    for host in net.hosts:
        if "h" in host.name:
            host.cmd('echo ha')

    #CLI(net)
    net.stop()
Пример #54
0
                if options.get("multi", False):
                    for i in range(multi_count):
                        host_name_i = host_name + "-" + str(i + 1)
                        host = net[host_name_i]
                        host.cmd("/usr/sbin/sshd -D -o UseDNS=no -u0 &")
                        debug("Starting SSHD on host")
                        waitListening(client=host,
                                      server=host,
                                      port=22,
                                      timeout=5)
                else:
                    host = net[host_name]
                    host.cmd("/usr/sbin/sshd -D -o UseDNS=no -u0 &")
                    debug("Starting SSHD on host")
                    waitListening(client=host, server=host, port=22, timeout=5)


if __name__ == '__main__':
    # Tell mininet to print useful information
    setLogLevel('info')
    topo = CDCITopology()
    net = Containernet(controller=Controller)
    topo.configure_network(net, "lab03", 1)
    net.start()
    topo.start_sshd(net, "lab03", 1)
    topo.configure_routes(net)
    print("Host connections:")
    #dumpNodeConnections(net.hosts)
    CLI(net)
    net.stop()
#!/usr/bin/python
"""
This is the most simple example to showcase Containernet.
"""
from mininet.net import Containernet
from mininet.node import Controller
from mininet.cli import CLI
from mininet.link import TCLink
from mininet.log import info, setLogLevel
setLogLevel('info')

net = Containernet(controller=Controller)
info('*** Adding controller\n')
net.addController('c0')
info('*** Adding docker containers\n')
d1 = net.addDocker('d1', ip='10.0.0.251', dimage="ubuntu:trusty")
d2 = net.addDocker('d2', ip='10.0.0.252', dimage="ubuntu:trusty")
info('*** Adding switches\n')
s1 = net.addSwitch('s1')
s2 = net.addSwitch('s2')
info('*** Creating links\n')
net.addLink(d1, s1)
net.addLink(s1, s2, cls=TCLink, delay='100ms', bw=1)
net.addLink(s2, d2)
info('*** Starting network\n')
net.start()
info('*** Testing connectivity\n')
net.ping([d1, d2])
info('*** Running CLI\n')
CLI(net)
info('*** Stopping network')