def set_host_ip(self, net_name: str, host: Host, ip: str) -> None: """Notify a network bridge about the IP address of a host. :param net_name: Name of the network in which to set the host IP. Must be of type 'host'. :param ip: IP address of the host as string to be parsed and validated. :raises InvalidTopo: Parameter validation failed. """ bridge = self._bridges.get(net_name) if bridge is None or not isinstance(bridge, HostNetwork): log.error("Must be a valid host network: '%s'.", net_name) raise errors.InvalidTopo() try: ip_addr = ipaddress.ip_address(ip) except ValueError: log.error("Invalid IP address: '%s'.", ip) raise errors.InvalidTopo() else: bridge.set_host_ip(host, ip_addr)
def create(self, name: str, name_prefix: str, type: str, host: Host, subnet_str: str, options: Mapping[str, Any]) -> Bridge: """Create a new network bridge. :param name: Name of the new network. :param name_prefix: Name prefix of the topology. :param type: Type of the new network. One of 'docker_bridge', 'ovs_bridge', 'overlay' or 'host'. :param host: The host which is supposed to manage the bridge. :param subnet_str: The IP subnet for the network as a string to be parsed and validated. :param options: Additional options for creating the bridge. :returns: The newly created bridge. :raises InvalidTopo: Parameter validation failed. """ if name in self._bridges: log.error("Multiple networks with name '%s'.", name) raise errors.InvalidTopo() subnet = self._parse_ip_network(subnet_str) bridge = None if type == 'docker_bridge': bridge = DockerBridge(name_prefix + name, host, subnet) elif type == 'ovs_bridge': bridge = OvsBridge(name_prefix + name, host, subnet) elif type == 'overlay': bridge = OverlayNetwork(name_prefix + name, host, subnet, encrypted=options.get('encrypted', False)) elif type == 'host': bridge = HostNetwork(name_prefix + name, subnet) else: log.error("Unknown network type '%s'.", type) raise errors.InvalidTopo() self._detected_ipv6 |= (subnet.version == 6) self._bridges[name] = bridge return bridge
def plot_topology(topo: Topology, out=sys.stdout) -> None: """Create a graph description in graphviz dot syntax illustrating the given topology. :param topo: The topology to plot. :param out: Output text stream to write the result to. :raises InvalidTopo: If a problem with the topology has been encountered. For example invalid link types. """ isds = defaultdict(list) for isd_as, asys in topo.ases.items(): isds[isd_as.isd_str()].append((isd_as, asys)) out.write( "digraph \"{}\" {{\n".format(topo.name if topo.name else "Topology")) out.write("nodesep=0.5;\n") out.write("ranksep=0.75;\n") # ISDs for name, ases in isds.items(): _plot_isd(name, ases, topo.links, out) # inter-ISD links for link in topo.links: if link.ep_a.isd_str() != link.ep_b.isd_str(): attributes = "" link_type = link.type.lower() if link_type == LinkType.CORE: attributes = "style=bold, dir=none, constraint=false" elif link_type == LinkType.PEER: attributes = "style=dashed, dir=none, constraint=false" elif link_type == LinkType.UNSET: continue # ignore dummy links else: out.write(str(link)) raise errors.InvalidTopo( ) # inter-ISD parent-child links are not allowed out.write("\"{}\" -> \"{}\" [{}];\n".format( link.ep_a, link.ep_b, attributes)) out.write("}\n")
def add_link(topo: Topology, ixp: Ixp, a: Tuple[ISD_AS, AS], b: Tuple[ISD_AS, AS], link_properties: Mapping[str, Any], workdir: Path, dc: docker.DockerClient) -> bool: """Add a new link to the topology. New links can be added while the network is running. Links added by this function use an IXP network for physical connectivity. The current implementation allows only a single link between the same two ASes per IXP. :param topo: Topology :param ixp: The IXP to use for the new link. The ASes are connected to the IXP if they do not yet have a connection. :param a: First AS to connect. Has to be an AS running on localhost. :param b: Second AS to connect. Has to be an AS running on localhost. :param link_properties: Properties of the new link. Valid properties are: 'type', 'bandwidth', 'mtu' :param workdir: Topology working directory. :param dc: Docker client object connected to the Docker daemon. :returns: True, if the link was added. False, if a link between the specified ASes over `ixp` exists already. :raises InvalidTopo: Link has invalid (e.g., identical) endpoints. """ isd_as_a, as_a = a isd_as_b, as_b = b # Check for identical endpoints. if isd_as_a == isd_as_b: raise errors.InvalidTopo("Link endpoints are identical.") # Check whether the link exists already on the IXP. for _, link in as_a.links(): if link.is_endpoint(isd_as_b) and link.bridge == ixp.bridge: log.error("Link from {} to {} exists already.".format(isd_as_a, isd_as_b)) return False br_a, connect_a = _select_br_new_link(as_a, ixp.bridge) br_b, connect_b = _select_br_new_link(as_b, ixp.bridge) ifid_a = as_a.get_unused_ifid() ifid_b = as_b.get_unused_ifid() log.info("Adding link from {}#{} to {}#{}.".format( br_a.get_name(isd_as_a), ifid_a, br_b.get_name(isd_as_b), ifid_b)) with RollbackManager() as cleanup: # assign underlay addresses link = Link( LinkEp(isd_as_a, ifid=ifid_a), LinkEp(isd_as_b, ifid=ifid_b), link_properties['type']) link.bridge = ixp.bridge link.ep_a_underlay = ixp.bridge.assign_br_address(isd_as_a, as_a, ifid_a) cleanup.defer(lambda: ixp.bridge.free_br_address(isd_as_a, ifid_a)) link.ep_b_underlay = ixp.bridge.assign_br_address(isd_as_b, as_b, ifid_b) cleanup.defer(lambda: ixp.bridge.free_br_address(isd_as_b, ifid_b)) # connect to network bridge, if containers exist if connect_a and as_a.container_id: connect_bridge(link, isd_as_a, as_a) cleanup.defer(lambda: disconnect_bridge(link, isd_as_a, as_a)) ixp.ases[isd_as_a] = as_a cleanup.defer(lambda: ixp.ases.pop(isd_as_a)) if connect_b and as_b.container_id: connect_bridge(link, isd_as_b, as_b) cleanup.defer(lambda: disconnect_bridge(link, isd_as_b, as_b)) ixp.ases[isd_as_b] = as_b cleanup.defer(lambda: ixp.ases.pop(isd_as_b)) # add link topo.links.append(link) cleanup.defer(lambda: topo.links.remove(link)) # add interfaces br_a.links[ifid_a] = link cleanup.defer(lambda: br_a.links.pop(ifid_a)) br_b.links[ifid_b] = link cleanup.defer(lambda: br_b.links.pop(ifid_b)) _connect_scion_link(topo=topo, workdir=workdir, dc=dc, a=_EndPoint(isd_as_a, as_a, br_a, ifid_a), b=_EndPoint(isd_as_b, as_b, br_b, ifid_b), link=link, properties=link_properties) cleanup.success() return True
def _parse_ip_network(subnet: str) -> IpNetwork: try: return ipaddress.ip_network(subnet) except ValueError: log.error("Invalid IP network: '%s'.", subnet) raise errors.InvalidTopo()
def extract_topo_info(topo_file: MutableMapping[str, Any], name: Optional[str] = None) -> Topology: """Initialize a Topology object with information read from a topology definition. Interface identifiers not specified in the input file are automatically assigned and added to the returned Topology object and to `topo_file`. :param topo_file: The input topology file parsed into a dictionary. When the function returns, the IXP testbed specific entries have been removed. :param name: An optional name for the topology. This name is added to all containers, network bridges, etc. to distinguish them from other testbed instances. :returns: Extracted topology information. :raises InvalidTopo: The topology file is invalid. """ topo = Topology(name) networks = NetworkFactory() brs = BrFactory() ifids = IfIdMapping(topo_file) # Subnet for automatically generated local docker bridges if 'link_subnet' in topo_file.get('defaults', {}): topo.default_link_subnet = ipaddress.ip_network( topo_file['defaults'].pop("link_subnet")) topo.ipv6_enabled |= (topo.default_link_subnet.version == 6) else: topo.default_link_subnet = None # Hosts (first pass: create host objects) localhost = topo.hosts['localhost'] = LocalHost() # always exists for host_name, host_def in topo_file.get('hosts', {}).items(): if host_name != 'localhost': if host_name in topo.hosts: log.error("Multiple hosts with name '%s'.", host_name) raise errors.InvalidTopo() if not 'coordinator' in topo_file: log.error( "Running a topology spanning multiple hosts requires a coordinator." ) raise errors.InvalidTopo() topo.hosts[host_name] = RemoteHost( host_name, _get_ip(host_def, 'ssh_host', host_name), _get_value(host_def, 'username', host_name), identity_file=host_def.get("identity_file"), ssh_port=L4Port(int(host_def.get('ssh_port', 22)))) # Networks if 'networks' in topo_file: net_defs = topo_file.pop('networks') # remove networks section for net_name, net_def in net_defs.items(): type = _get_value(net_def, 'type', net_name) subnet = _get_value(net_def, 'subnet', net_name) host = topo.hosts[net_def.get('host', 'localhost')] networks.create(net_name, topo.get_name_prefix(), type, host, subnet, net_def) # Hosts (second pass: parse network addresses for host networks) for host_name, host_def in topo_file.get('hosts', {}).items(): for net, addr in host_def.get('addresses', {}).items(): networks.set_host_ip(net, topo.hosts[host_name], addr) topo_file.pop('hosts', None) # remove host section # Coordinator if 'coordinator' in topo_file: coord_def = topo_file.pop('coordinator') # remove coordinator section host = topo.hosts[coord_def.get('host', 'localhost')] def_name = lambda: topo.get_name_prefix() + const.COORD_NET_NAME bridge = networks.get(_get_value(coord_def, 'network', 'coordinator'), def_name, localhost) cpu_affinity = CpuSet(coord_def.get('cpu_affinity')) ssh_management = coord_def.get('ssh_management', False) debug = coord_def.get('debug', True) compose_path = None if debug: if ssh_management: log.warning( "Coordinator in debug mode, 'ssh_management' has no effect." ) else: compose_path = Path( _get_value(coord_def, 'compose_path', 'coordinator')) if 'expose' not in coord_def: log.warning( "No interface to publish the coordinator on given. The coordinator will" " be exposed at http://127.0.0.1:8000.") coord = Coordinator(topo.get_coord_name(), host, bridge, cpu_affinity, ssh_management, debug, compose_path) coord.exposed_at = _get_external_address(coord_def) for name, data in coord_def['users'].items(): if name is None: log.error("User name missing.") raise errors.InvalidTopo() coord.users[name] = User(data['email'], data['password'], data.get('superuser', False)) topo.coordinator = coord # Prometheus if 'prometheus' in topo_file: prom_def = topo_file.pop('prometheus') # remove prometheus section host = topo.hosts[prom_def.get('host', 'localhost')] def_name = lambda: topo.gen_bridge_name() bridge = networks.get(_get_value(prom_def, 'network', 'coordinator'), def_name, localhost) if not bridge.is_docker_managed: log.error("Invalid network type for Prometheus.") raise InvalidTopo() prom = Prometheus( host, cast(DockerNetwork, bridge), cpu_affinity=CpuSet(prom_def.get('cpu_affinity')), scrape_interval=prom_def.get('scrape_interval', "30s"), storage_dir=_get_optional_path(prom_def, 'storage_dir'), targets=[ISD_AS(target) for target in prom_def['targets']]) prom.exposed_at = _get_external_address(prom_def) topo.additional_services.append(prom) # IXP definitions for ixp_name, ixp_def in topo_file.pop('IXPs', {}).items(): # remove IXP section if ixp_name in topo.ixps: log.error("IXP %s is defined multiple times.", name) raise errors.InvalidTopo() net_name = _get_value(ixp_def, 'network', ixp_name) def_name = lambda: topo.get_name_prefix() + ixp_name bridge = networks.get(net_name, def_name, localhost) topo.ixps[ixp_name] = Ixp(bridge) # ASes for as_name, as_def in topo_file['ASes'].items(): isd_as = ISD_AS(as_name) host_name = as_def.get('host', 'localhost') host = None try: host = topo.hosts[host_name] except KeyError: log.error("Invalid host: '%s'.", as_def[host_name]) raise cpu_affinity = CpuSet(as_def.get('cpu_affinity')) asys = AS(host, as_def.get('core', False), cpu_affinity) asys.is_attachment_point = as_def.pop('attachment_point', False) asys.owner = as_def.pop('owner', None) topo.ases[isd_as] = asys if topo.coordinator: for ixp_name in as_def.pop('ixps', []): if asys.owner is None: log.error("Infrastructure AS %s has an IXP list.", isd_as) raise errors.InvalidTopo() ixp = topo.ixps[ixp_name] ixp.ases[isd_as] = asys # Add dummy link to IXP to make sure there is a network connection. # Actual links will be configured by the coordinator. # The border router of the link endpoint is labeled here to avoid creating a new # border router for every IXP link. end_point = LinkEp(isd_as, ifid=ifids.assign_ifid(isd_as), br_label='peer') link = Link(end_point, LinkEp(), LinkType.UNSET) link.bridge = ixp.bridge topo.links.append(link) brs.add_link_ep(end_point, link) # Link definitions for link in topo_file['links']: a, b = LinkEp(link['a']), LinkEp(link['b']) # Assing IfIds if not given in the original topo file. # Setting the IDs of all interfaces in the processed topology file ensures we can identify # the interfaces in the configuration files generated by scion.sh. for ep, name in [(a, 'a'), (b, 'b')]: if ep.ifid is None: ep.ifid = ifids.assign_ifid(ep) link[name] = "{}#{}".format(link[name], ep.ifid) topo.links.append(Link(a, b, link['linkAtoB'])) # Keep track of border routers that will be created for the links. brs.add_link_ep(a, topo.links[-1]) brs.add_link_ep(b, topo.links[-1]) # Assign to a network if an IXP name or an explicit IP network is given. if "network" in link: net = link.pop('network') if net in topo.ixps: # use the IXPs network ixp = topo.ixps[net] topo.links[-1].bridge = ixp.bridge ixp.ases[a] = topo.ases[a] ixp.ases[b] = topo.ases[b] else: def_name = lambda: topo.gen_bridge_name() topo.links[-1].bridge = networks.get(net, def_name, localhost) else: if topo.ases[a].host != topo.ases[b].host: log.error( "Links between ASes on different hosts must specify the network to use." ) raise errors.InvalidTopo() # Enable IPv6 support if needed. topo.ipv6_enabled = networks.is_ipv6_required() # Store bridges in topology. topo.bridges = networks.get_bridges() # Store border router info in corresponsing AS. for isd_as, asys in topo.ases.items(): asys.border_routers = brs.get_brs(isd_as) return topo