def create_topology1(): # create topology net = DCNetwork(controller=RemoteController, monitor=False, enable_learning=True) dc1 = net.addDatacenter("dc1") dc2 = net.addDatacenter("dc2") s1 = net.addSwitch("s1") net.addLink(dc1, s1, delay="10ms") net.addLink(dc2, s1, delay="20ms") # add the command line interface endpoint to each DC (REST API) rapi1 = RestApiEndpoint("0.0.0.0", 5001) rapi1.connectDCNetwork(net) rapi1.connectDatacenter(dc1) rapi1.connectDatacenter(dc2) # run API endpoint server (in another thread, don't block) rapi1.start() # add the SONATA dummy gatekeeper to each DC sdkg1 = SonataDummyGatekeeperEndpoint("0.0.0.0", 5000, deploy_sap=False) sdkg1.connectDatacenter(dc1) sdkg1.connectDatacenter(dc2) # run the dummy gatekeeper (in another thread, don't block) sdkg1.start() # start the emulation platform net.start() net.CLI() rapi1.stop() net.stop()
class EmulatorProfilingTopology(object): def __init__(self): pass def start(self): LOG.info("Starting emulation ...") setLogLevel('info') # set Mininet loglevel # create topology self.net = DCNetwork(monitor=False, enable_learning=False) # we only need one DC for benchmarking dc = self.net.addDatacenter("dc1") # add the command line interface endpoint to each DC (REST API) self.rapi1 = RestApiEndpoint("0.0.0.0", 5001) self.rapi1.connectDCNetwork(self.net) self.rapi1.connectDatacenter(dc) self.rapi1.start() # add the 5GTANGO lightweight life cycle manager (LLCM) to the topology self.llcm1 = TangoLLCMEndpoint("0.0.0.0", 5000, deploy_sap=False) self.llcm1.connectDatacenter(dc) self.llcm1.start() self.net.start() def stop(self): LOG.info("Stopping emulation ...") self.rapi1.stop() self.llcm1.stop() self.net.stop()
class DaemonTopology(object): """ Topology with two datacenters: dc1 <-- 50ms --> dc2 """ def __init__(self): self.running = True signal.signal(signal.SIGINT, self._stop_by_signal) signal.signal(signal.SIGTERM, self._stop_by_signal) # create and start topology self.create_topology() self.start_topology() self.daemonize() self.stop_topology() def create_topology(self): self.net = DCNetwork(monitor=False, enable_learning=True) self.dc1 = self.net.addDatacenter("dc1") self.dc2 = self.net.addDatacenter("dc2") self.net.addLink(self.dc1, self.dc2, cls=TCLink, delay="50ms") # add OpenStack-like APIs to the emulated DC self.api1 = OpenstackApiEndpoint("0.0.0.0", 6001) self.api1.connect_datacenter(self.dc1) self.api1.connect_dc_network(self.net) self.api2 = OpenstackApiEndpoint("0.0.0.0", 6002) self.api2.connect_datacenter(self.dc2) self.api2.connect_dc_network(self.net) # add the command line interface endpoint to the emulated DC (REST API) self.rapi1 = RestApiEndpoint("0.0.0.0", 5001) self.rapi1.connectDCNetwork(self.net) self.rapi1.connectDatacenter(self.dc1) self.rapi1.connectDatacenter(self.dc2) def start_topology(self): self.api1.start() self.api2.start() self.rapi1.start() self.net.start() def daemonize(self): print("Daemonizing vim-emu. Send SIGTERM or SIGKILL to stop.") while self.running: time.sleep(1) def _stop_by_signal(self, signum, frame): print("Received SIGNAL {}. Stopping.".format(signum)) self.running = False def stop_topology(self): self.api1.stop() self.api2.stop() self.rapi1.stop() self.net.stop()
class DaemonTopology(object): def __init__(self): self.running = True signal.signal(signal.SIGINT, self._stop_by_signal) signal.signal(signal.SIGTERM, self._stop_by_signal) # create and start topology self.create_topology() self.start_topology() self.daemonize() self.stop_topology() def create_topology(self): self.net = DCNetwork(monitor=False, enable_learning=True) self.client_dc = self.net.addDatacenter("client_dc") self.vnfs_dc = self.net.addDatacenter("vnfs_dc") self.server_dc = self.net.addDatacenter("server_dc") self.switch1 = self.net.addSwitch("switch1") self.switch2 = self.net.addSwitch("switch2") linkopts = dict(delay="1ms", bw=100) self.net.addLink(self.client_dc, self.switch1, **linkopts) self.net.addLink(self.vnfs_dc, self.switch1, **linkopts) self.net.addLink(self.switch1, self.switch2, **linkopts) self.net.addLink(self.vnfs_dc, self.switch2, **linkopts) self.net.addLink(self.switch2, self.server_dc, **linkopts) # add the command line interface endpoint to the emulated DC (REST API) self.rest = RestApiEndpoint("0.0.0.0", 5001) self.rest.connectDCNetwork(self.net) self.rest.connectDatacenter(self.client_dc) self.rest.connectDatacenter(self.vnfs_dc) self.rest.connectDatacenter(self.server_dc) def start_topology(self): self.rest.start() self.net.start() subprocess.call("./res/scripts/init_two_clients_servers.sh", shell=True) def daemonize(self): print("Daemonizing vim-emu. Send SIGTERM or SIGKILL to stop.") while self.running: time.sleep(1) def _stop_by_signal(self, signum, frame): print("Received SIGNAL {}. Stopping.".format(signum)) self.running = False def stop_topology(self): self.rest.stop() self.net.stop()
class SimpleTestTopology(unittest.TestCase): """ Helper class to do basic test setups. s1 -- s2 -- s3 -- ... -- sN """ def __init__(self, *args, **kwargs): self.net = None self.api = None self.s = [] # list of switches self.h = [] # list of hosts self.d = [] # list of docker containers self.dc = [] # list of data centers self.docker_cli = None super(SimpleTestTopology, self).__init__(*args, **kwargs) def createNet(self, nswitches=0, ndatacenter=0, nhosts=0, ndockers=0, autolinkswitches=False, controller=Controller, **kwargs): """ Creates a Mininet instance and automatically adds some nodes to it. Attention, we should always use Mininet's default controller for our tests. Only use other controllers if you want to test specific controller functionality. """ self.net = DCNetwork(controller=controller, **kwargs) self.api = RestApiEndpoint("127.0.0.1", 5001, self.net) # add some switches # start from s1 because ovs does not like to have dpid = 0 # and switch name-number is being used by mininet to set the dpid for i in range(1, nswitches + 1): self.s.append(self.net.addSwitch('s%d' % i)) # if specified, chain all switches if autolinkswitches: for i in range(0, len(self.s) - 1): self.net.addLink(self.s[i], self.s[i + 1]) # add some data centers for i in range(0, ndatacenter): self.dc.append( self.net.addDatacenter('datacenter%d' % i, metadata={"unittest_dc": i})) # connect data centers to the endpoint for i in range(0, ndatacenter): self.api.connectDatacenter(self.dc[i]) # add some hosts for i in range(0, nhosts): self.h.append(self.net.addHost('h%d' % i)) # add some dockers for i in range(0, ndockers): self.d.append(self.net.addDocker('d%d' % i, dimage="ubuntu:trusty")) def startApi(self): self.api.start() def stopApi(self): self.api.stop() def startNet(self): self.net.start() def stopNet(self): self.net.stop() def getDockerCli(self): """ Helper to interact with local docker instance. """ if self.docker_cli is None: self.docker_cli = docker.APIClient( base_url='unix://var/run/docker.sock') return self.docker_cli def getContainernetContainers(self): """ List the containers managed by containernet """ return self.getDockerCli().containers( filters={"label": "com.containernet"}) @staticmethod def setUp(): pass @staticmethod def tearDown(): cleanup() # make sure that all pending docker containers are killed with open(os.devnull, 'w') as devnull: subprocess.call( "sudo docker rm -f $(sudo docker ps --filter 'label=com.containernet' -a -q)", stdout=devnull, stderr=devnull, shell=True)
class TopologyZooTopology(object): def __init__(self, args): # run daemonized to stop on signal self.running = True signal.signal(signal.SIGINT, self._stop_by_signal) signal.signal(signal.SIGTERM, self._stop_by_signal) self.args = args self.G = self._load_graphml(args.graph_file) self.G_name = self.G.graph.get("label", args.graph_file) LOG.debug("Graph label: {}".format(self.G_name)) self.net = None self.pops = list() # initialize global rest api self.rest_api = RestApiEndpoint("0.0.0.0", 5001) self.rest_api.start() # initialize and start topology self.create_environment() self.create_pops() self.create_links() self.start_topology() self.daemonize() self.stop_topology() def _load_graphml(self, path): try: G = nx.read_graphml(path, node_type=int) LOG.info( "Loaded graph from '{}' with {} nodes and {} edges.".format( path, G.__len__(), G.size())) LOG.debug(G.adjacency_list()) return G except: LOG.exception("Could not read {}".format(path)) return None def create_environment(self): self.net = DCNetwork(monitor=False, enable_learning=False) self.rest_api.connectDCNetwork(self.net) def create_pops(self): i = 0 for n in self.G.nodes(data=True): # name = n[1].get("label").replace(" ", "_") # human readable names name = "pop{}".format(n[0]) # use ID as name p = self.net.addDatacenter(name) self.rest_api.connectDatacenter(p) self.pops.append(p) LOG.info("Created pop: {} representing {}".format( p, n[1].get("label", n[0]))) def create_links(self): for e in self.G.edges(data=True): # parse bw limit from edge bw_mbps = self._parse_bandwidth(e) # calculate delay from nodes; use np.around for consistent rounding behavior in phyton2 and 3 delay = int(np.around(self._calc_delay_ms(e[0], e[1]))) try: self.net.addLink(self.pops[e[0]], self.pops[e[1]], cls=TCLink, delay='{}ms'.format(delay), bw=min(bw_mbps, 1000)) LOG.info("Created link {} with delay {}".format(e, delay)) except: LOG.exception("Error in experiment") def _parse_bandwidth(self, e): """ Calculate the link bandwith based on LinkLabel field. Default: 100 Mbps (if field is not given) Result is returned in Mbps and down scaled by 10x to fit in the Mininet range. """ ll = e[2].get("LinkLabel") if ll is None: return 100 # default ll = ll.strip(" <>=") mbits_factor = 1.0 if "g" in ll.lower(): mbits_factor = 1000 elif "k" in ll.lower(): mbits_factor = (1.0 / 1000) ll = ll.strip("KMGkmpsbit/-+ ") try: bw = float(ll) * mbits_factor except: LOG.warning("Could not parse bandwidth: {}".format(ll)) bw = 100 # default LOG.debug("- Bandwidth {}-{} = {} Mbps".format(e[0], e[1], bw)) return bw * BW_SCALE_FACTOR # downscale to fit in mininet supported range def _calc_distance_meter(self, n1id, n2id): """ Calculate distance in meter between two geo positions. """ n1 = self.G.nodes(data=True)[n1id] n2 = self.G.nodes(data=True)[n2id] n1_lat, n1_long = n1[1].get("Latitude"), n1[1].get("Longitude") n2_lat, n2_long = n2[1].get("Latitude"), n2[1].get("Longitude") try: return vincenty((n1_lat, n1_long), (n2_lat, n2_long)).meters except: LOG.exception( "Could calculate distance between nodes: {}/{}".format( n1id, n2id)) return 0 def _calc_delay_ms(self, n1id, n2id): meter = self._calc_distance_meter(n1id, n2id) if meter <= 0: return 0 # default 0 ms delay LOG.debug("- Distance {}-{} = {} km".format(n1id, n2id, meter / 1000)) # calc delay delay = (meter / SPEED_OF_LIGHT * 1000) * PROPAGATION_FACTOR # in milliseconds LOG.debug("- Delay {}-{} = {} ms (rounded: {} ms)".format( n1id, n2id, delay, round(delay))) return delay def start_topology(self): print("start_topology") self.net.start() def cli(self): self.net.CLI() def daemonize(self): print("Daemonizing vim-emu. Send SIGTERM or SIGKILL to stop.") while self.running: time.sleep(1) def _stop_by_signal(self, signum, frame): print("Received SIGNAL {}. Stopping.".format(signum)) self.running = False def stop_topology(self): self.rest_api.stop() self.net.stop()
class Emulator(DockerBasedVIM): """ This class can be used to run tests on the VIM-EMU emulator. In order to use this class you need VIM-EMU to be installed locally. More information about VIM-EMU and installation instructions can be found on the project wiki-page: https://osm.etsi.org/wikipub/index.php/VIM_emulator Example: >>> from tangotest.vim.emulator import Emulator >>> vim = Emulator() >>> vim.start() >>> /* your code here */ >>> vim.stop() You can also use this class with the context manager: >>> with Emulator() as vim: >>> /* your code here */ """ def __init__(self, endpoint_port=None, tango_port=None, sonata_port=None, enable_learning=False, vnv_checker=False, *args, **kwargs): """ Initialize the Emulator. This method doesn't start the Emulator. Args: endpoint_port (int): vim-emu REST API port. Default: random free port tango_port (int): Sonata gatekeeper port. Default: random free port sonata_port (int): Tango gatekeeper port. Default: random free port vnv_checker (bool): Check if the code can be reused on the 5GTANGO V&V platform enable_learning (bool): Enable learning switch """ super(Emulator, self).__init__(*args, **kwargs) self.endpoint_port = endpoint_port self.tango_port = tango_port self.sonata_port = sonata_port self.vnv_checker = vnv_checker self.enable_learning = enable_learning @property def InstanceClass(self): return EmulatorInstance @vnv_checker_start def start(self): """ Run the Emulator and the endpoints. """ super(Emulator, self).start() initialize_GK() self.net = DCNetwork(controller=RemoteController, monitor=False, enable_learning=self.enable_learning) self.datacenter = self.net.addDatacenter('dc1') endpoint_ip = '0.0.0.0' endpoint_port = self.endpoint_port or get_free_tcp_port() self.endpoint = 'http://{}:{}'.format(endpoint_ip, endpoint_port) self.rest_api = RestApiEndpoint(endpoint_ip, endpoint_port) self.rest_api.connectDCNetwork(self.net) self.rest_api.connectDatacenter(self.datacenter) self.rest_api.start() sonata_ip = '0.0.0.0' sonata_port = self.sonata_port or get_free_tcp_port() self.sonata_address = 'http://{}:{}'.format(sonata_ip, sonata_port) self.sonata_gatekeeper = SonataDummyGatekeeperEndpoint( sonata_ip, sonata_port) self.sonata_gatekeeper.connectDatacenter(self.datacenter) self.sonata_gatekeeper.start() tango_ip = '0.0.0.0' tango_port = self.tango_port or get_free_tcp_port() self.tango_address = 'http://{}:{}'.format(tango_ip, tango_port) self.tango_gatekeeper = TangoLLCMEndpoint(tango_ip, tango_port) self.tango_gatekeeper.connectDatacenter(self.datacenter) self.tango_gatekeeper.start() self.net.start() @vnv_checker_stop def stop(self): """ Stop the Emulator and the endpoints. """ self.rest_api.stop() self.net.stop() super(Emulator, self).stop() @vnv_called_once def add_instances_from_package(self, package, package_format='tango'): if not os.path.isfile(package): raise Exception('Package {} not found'.format(package)) if package_format == 'tango': gatekeeper_address = self.tango_address elif package_format == 'sonata': gatekeeper_address = self.sonata_address else: raise Exception( 'package_format must be "tango" or "sonata", passed {}.'. format(package_format)) # Upload the package with open(package, 'rb') as package_content: files = {'package': package_content} url = '{}/packages'.format(gatekeeper_address) response = requests.post(url, files=files) if not response.ok: raise Exception('Something went wrong during uploading.') # Instantiate the service url = '{}/instantiations'.format(gatekeeper_address) response = requests.post(url, data='{}') if not response.ok: raise Exception('Something went wrong during instantiation.') instances = [] for name, instance in self.datacenter.containers.items(): if name in self.instances: continue instances.append(self._add_instance(name)) return instances @vnv_called_without_parameter('interfaces') def add_instance_from_image(self, name, image, interfaces=None, docker_command=None): """ Run a Docker image on the Emulator. Args: name (str): The name of an instance image (str): The name of an image interfaces (int), (list) (str) or (dict): Network configuration docker_command (str): The command to execute when starting the instance Returns: (EmulatorInstance): The added instance """ if not self._image_exists(image): raise Exception('Docker image {} not found'.format(image)) if not interfaces: interfaces = '(id=emu0)' elif isinstance(interfaces, str): pass elif isinstance(interfaces, int): interfaces = ','.join( ['(id=emu{})'.format(i) for i in range(interfaces)]) elif isinstance(interfaces, list): interfaces = ','.join(['(id={})'.format(i) for i in interfaces]) elif isinstance(interfaces, dict): interfaces = ','.join( ['(id={},ip={})'.format(k, v) for k, v in interfaces.items()]) else: raise Exception( 'Wrong network configuration: {}'.format(interfaces)) params = { 'name': name, 'image': image, 'command': docker_command, 'network': interfaces, 'endpoint': self.endpoint, 'datacenter': 'dc1' } EmuComputeClient().start(params) return self._add_instance(name) @vnv_called_without_parameter('interfaces') def add_instance_from_source(self, name, path, interfaces=None, image_name=None, docker_command=None, **docker_build_args): """ Build and run a Docker image on the Emulator. Args: name (str): The name of an instance path (str): The path to the directory containing Dockerfile interfaces (int), (list) (str) or (dict): Network configuration image_name (str): The name of an image. Default: tangotest<name> docker_command (str): The command to execute when starting the instance **docker_build_args: Extra arguments to be used by the Docker engine to build the image Returns: (EmulatorInstance): The added instance """ return super(Emulator, self).add_instance_from_source(name, path, interfaces, image_name, docker_command, **docker_build_args) @vnv_not_called def add_link(self, src_vnf, src_if, dst_vnf, dst_if, sniff=False, **kwargs): result = super(Emulator, self).add_link(src_vnf, src_if, dst_vnf, dst_if, sniff, **kwargs) if result: return result params = { 'source': '{}:{}'.format(src_vnf, src_if), 'destination': '{}:{}'.format(dst_vnf, dst_if), 'weight': kwargs.get('weight'), 'match': kwargs.get('match'), 'bidirectional': kwargs.get('bidirectional', True), 'cookie': kwargs.get('cookie'), 'priority': kwargs.get('priority'), 'endpoint': self.endpoint } return EmuNetworkClient().add(params)
class SimpleTestTopology(unittest.TestCase): """ Helper class to do basic test setups. s1 -- s2 -- s3 -- ... -- sN """ def __init__(self, *args, **kwargs): self.net = None self.api = None self.s = [] # list of switches self.h = [] # list of hosts self.d = [] # list of docker containers self.dc = [] # list of data centers self.docker_cli = None super(SimpleTestTopology, self).__init__(*args, **kwargs) def createNet( self, nswitches=0, ndatacenter=0, nhosts=0, ndockers=0, autolinkswitches=False, controller=Controller, **kwargs): """ Creates a Mininet instance and automatically adds some nodes to it. Attention, we should always use Mininet's default controller for our tests. Only use other controllers if you want to test specific controller functionality. """ self.net = DCNetwork(controller=controller, **kwargs) self.api = RestApiEndpoint("127.0.0.1", 5001, self.net) # add some switches # start from s1 because ovs does not like to have dpid = 0 # and switch name-number is being used by mininet to set the dpid for i in range(1, nswitches+1): self.s.append(self.net.addSwitch('s%d' % i)) # if specified, chain all switches if autolinkswitches: for i in range(0, len(self.s) - 1): self.net.addLink(self.s[i], self.s[i + 1]) # add some data centers for i in range(0, ndatacenter): self.dc.append( self.net.addDatacenter( 'datacenter%d' % i, metadata={"unittest_dc": i})) # connect data centers to the endpoint for i in range(0, ndatacenter): self.api.connectDatacenter(self.dc[i]) # add some hosts for i in range(0, nhosts): self.h.append(self.net.addHost('h%d' % i)) # add some dockers for i in range(0, ndockers): self.d.append(self.net.addDocker('d%d' % i, dimage="ubuntu:trusty")) def startApi(self): self.api.start() def stopApi(self): self.api.stop() def startNet(self): self.net.start() def stopNet(self): self.net.stop() def getDockerCli(self): """ Helper to interact with local docker instance. """ if self.docker_cli is None: self.docker_cli = docker.APIClient( base_url='unix://var/run/docker.sock') return self.docker_cli def getContainernetContainers(self): """ List the containers managed by containernet """ return self.getDockerCli().containers(filters={"label": "com.containernet"}) @staticmethod def setUp(): pass @staticmethod def tearDown(): cleanup() # make sure that all pending docker containers are killed with open(os.devnull, 'w') as devnull: subprocess.call( "sudo docker rm -f $(sudo docker ps --filter 'label=com.containernet' -a -q)", stdout=devnull, stderr=devnull, shell=True)