Ejemplo n.º 1
0
def _modify_topo(topo: Topology, isd_as: ISD_AS, asys: AS,
    mod_func: Callable[[Mapping[str, Any]], None],
    workdir: Path, dc: docker.DockerClient):
    """Modify the configuration of the given AS.

    The actual modifications are made by `mod_func`. If the AS is running, it is stopped first and
    restarted after `mod_func` returns.

    :param topo: Topology
    :param isd_as: AS to modify.
    :param asys: AS to modify.
    :param mod_func: Function that performs the  desired modifications by altering the topology.json
                     file it is given in form of a mapping.
    :param workdir: Topology working directory.
    :param dc: Docker client object connected to the Docker daemon.
    """
    restart = topo.is_scion_running(isd_as, asys)
    if restart: # stop SCION in the affected AS
        log.info("Stopping SCION in AS{}.".format(isd_as))
        topo.stop_scion_asys(isd_as, asys)

    modify_as_topo_file(workdir.joinpath(isd_as.file_fmt(), "gen"), isd_as, mod_func)

    if restart: # restart SCION
        log.info("Starting SCION in AS{}.".format(isd_as))
        topo.run_scion_asys(isd_as, asys)
Ejemplo n.º 2
0
def modify_link(args):
    """Modify an existing link."""
    topo = Topology.load(args.workdir.joinpath(CONFIG_DATA_FILE))
    link = _get_link_from_args(topo, args)

    if link is not None:
        ixp, as_a, as_b = link
        properties = {}
        arg_dict = vars(args)

        for key in ['type', 'mtu', 'bandwidth']:
            if key in arg_dict:
                properties[key] = arg_dict[key]

        open_log_file(args.workdir)
        log.debug("Command: modify_link")

        try:
            links.modify_link(topo=topo,
                              workdir=args.workdir,
                              dc=docker.from_env(),
                              ixp=ixp,
                              a=(args.as_a, as_a),
                              b=(args.as_b, as_b),
                              link_properties=properties)
        finally:
            topo.close_host_connections()
            topo.save(args.workdir.joinpath(CONFIG_DATA_FILE))
Ejemplo n.º 3
0
def add_link(args):
    """Add a link to the topology."""
    topo = Topology.load(args.workdir.joinpath(CONFIG_DATA_FILE))
    link = _get_link_from_args(topo, args)

    if link is not None:
        ixp, as_a, as_b = link
        open_log_file(args.workdir)
        log.debug("Command: add_link")

        try:
            links.add_link(topo=topo,
                           workdir=args.workdir,
                           dc=docker.from_env(),
                           ixp=ixp,
                           a=(args.as_a, as_a),
                           b=(args.as_b, as_b),
                           link_properties={
                               "type": args.type,
                               "mtu": args.mtu,
                               "bandwidth": args.bandwidth
                           })
        finally:
            topo.close_host_connections()
            topo.save(args.workdir.joinpath(CONFIG_DATA_FILE))
Ejemplo n.º 4
0
def stats(args):
    """Measure the average CPU utilization of SCION ASes and services."""
    open_log_file(args.workdir)
    log.debug("Command: stats")
    topo = Topology.load(args.workdir.joinpath(CONFIG_DATA_FILE))
    try:
        measurements = Measurements()
        # Read existing measurements.
        try:
            with open(args.output_file) as file:
                measurements.data = json.load(file)
        except FileNotFoundError:
            pass  # No existing data to read in.

        # Take new measurements and merge with existing data.
        measurements.experiment = args.experiment
        measure_perf_stats(topo,
                           measurements,
                           as_pattern=args.as_pattern,
                           services=set(args.services),
                           interval=args.interval,
                           count=args.count)

        # Write merged data back to disk.
        with open(args.output_file, 'w') as file:
            json.dump(measurements.data, file)

    finally:
        topo.close_host_connections()
Ejemplo n.º 5
0
def update(args):
    """Fetch the AS configuration files from the coordinator."""
    open_log_file(args.workdir)
    log.debug("Command: update")
    topo = Topology.load(args.workdir.joinpath(CONFIG_DATA_FILE))
    try:
        if topo.coordinator is None:
            log.error("Topology has no coordinator.")
        else:
            if args.as_list is None:
                fetch_config(topo,
                             as_selector=args.as_pattern,
                             detach=args.detach,
                             force=args.force,
                             no_restart=args.no_restart,
                             rate=args.rate)
            else:
                as_list = []
                with open(args.as_list) as file:
                    for line in file.readlines():
                        as_list.append(ISD_AS(line))
                fetch_config(topo,
                             as_selector=as_list,
                             detach=args.detach,
                             force=args.force,
                             no_restart=args.no_restart,
                             rate=args.rate)
    finally:
        topo.close_host_connections()
Ejemplo n.º 6
0
def _build_standalone_topology(topo: Topology, sc: Path, workdir: Path,
                               dc: docker.DockerClient):
    """Build a standalone SCION topology using the 'scion.sh' script."""

    # Start master container
    log.info("Starting SCION Docker container.")
    master_cntr = start_scion_cntr(
        dc,
        const.AS_IMG_NAME,
        cntr_name=topo.get_name_prefix() + const.MASTER_CNTR_NAME,
        mount_dir=workdir.joinpath(
            const.MASTER_CNTR_MOUNT).resolve(),  # need absolute path
        volumes={
            "/var/run/docker.sock": {
                'bind': "/var/run/docker.sock",
                'mode': 'rw'
            }
        })

    try:
        # Copy processed topology file into the master container
        processed_topo_file_path = workdir.joinpath(const.PROCESSED_TOPO_FILE)
        copy_to_container(master_cntr, processed_topo_file_path,
                          const.SCION_TOPO_FILES_PATH)

        # Build a standalone topology in the master container
        log.info("Building standalone topology...")
        command = "./scion.sh topology --in-docker -c topology/topology.topo"
        run_cmd_in_cntr(master_cntr, const.SCION_USER, command)
    except:
        raise
    finally:
        master_cntr.stop()
        master_cntr.remove()
Ejemplo n.º 7
0
def list_links(args):
    """List all links this script has created in the topology.

    Links created by the coordinator are not listed.
    """
    # Don't log this command. Does not change state.
    topo = Topology.load(args.workdir.joinpath(CONFIG_DATA_FILE))
    try:
        links.print_links(topo)
    finally:
        topo.close_host_connections()
Ejemplo n.º 8
0
def stop_cntrs(args):
    """Stop all containers."""
    open_log_file(args.workdir)
    log.debug("Command: stop_cntrs")
    topo = Topology.load(args.workdir.joinpath(CONFIG_DATA_FILE))

    try:
        topo.stop_containers()
        topo.remove_bridges()
    finally:
        topo.close_host_connections()
        topo.save(args.workdir.joinpath(CONFIG_DATA_FILE))
Ejemplo n.º 9
0
def start_cntrs(args):
    """Start the containers and the coordinator if applicable."""
    open_log_file(args.workdir)
    log.debug("Command: start_cntrs")
    topo = Topology.load(args.workdir.joinpath(CONFIG_DATA_FILE))

    try:
        topo.create_bridges()
        topo.start_containers(workdir=args.workdir,
                              sc=args.sc,
                              push_images=args.push_images)
    finally:
        topo.close_host_connections()
        topo.save(args.workdir.joinpath(CONFIG_DATA_FILE))
Ejemplo n.º 10
0
def exec(args):
    """Run a command in one or multiple containers."""
    open_log_file(args.workdir)
    log.debug("Command: exec")
    topo = Topology.load(args.workdir.joinpath(CONFIG_DATA_FILE))
    try:
        run_in_cntrs(topo,
                     as_selector=args.as_pattern,
                     cmd_template=args.command,
                     user=args.user,
                     detach=args.detach,
                     dry_run=args.dry_run)
    finally:
        topo.close_host_connections()
Ejemplo n.º 11
0
def stop(args):
    """Stop SCION, but keep the containers and the coordinator running."""
    open_log_file(args.workdir)
    log.debug("Command: stop")
    topo = Topology.load(args.workdir.joinpath(CONFIG_DATA_FILE))

    try:
        if args.mode == 'sequential':
            topo.stop_scion()
        elif args.mode == 'parallel':
            topo.stop_scion_parallel()
        elif args.mode == 'detach':
            topo.stop_scion_parallel(detach=True)
    finally:
        topo.close_host_connections()
        topo.save(args.workdir.joinpath(CONFIG_DATA_FILE))
Ejemplo n.º 12
0
def _format_and_run(
    topo: Topology, isd_as: ISD_AS, asys: AS, cmd_template: str,
    user: str, detach:bool, dry_run: bool):
    """Format the given command template and run the command in the given AS."""
    if not asys.container_id:
        print("No container for AS{}.".format(isd_as))
    else:
        try:
            cntr = asys.get_container()
        except docker.errors.NotFound:
            cntr_name = topo.get_cntr_name(isd_as)
            log.warning("Container {} ({}) not found.".format(cntr_name, asys.container_id))
        else:
            cmd = cmd_template.format(isd_as=str(isd_as), file_fmt=isd_as.file_fmt())
            if dry_run:
                print("Would run '{}' in {}.".format(cmd, cntr.name))
            else:
                _run_in_cntr(cntr, cmd, user, detach)
Ejemplo n.º 13
0
def remove_link(args):
    """Remove a link from the topology."""
    topo = Topology.load(args.workdir.joinpath(CONFIG_DATA_FILE))
    link = _get_link_from_args(topo, args)

    if link is not None:
        ixp, as_a, as_b = link
        open_log_file(args.workdir)
        log.debug("Command: remove_link")
        try:
            links.remove_link(topo=topo,
                              workdir=args.workdir,
                              dc=docker.from_env(),
                              ixp=ixp,
                              a=(args.as_a, as_a),
                              b=(args.as_b, as_b))
        finally:
            topo.close_host_connections()
            topo.save(args.workdir.joinpath(CONFIG_DATA_FILE))
Ejemplo n.º 14
0
def policy(args):
    """Wrapper around the coordinator's peering policy REST API."""
    open_log_file(args.workdir)
    log.debug("Command: policy %s" % args.action)
    topo = Topology.load(args.workdir.joinpath(CONFIG_DATA_FILE))

    # Parse and validate ISD-AS string.
    try:
        isd_as = ISD_AS(args.isd_as)
    except SCIONParseError:
        log.error("Invalid ISD-AS string.")
        return
    try:
        if not topo.ases[isd_as].is_user_as():
            log.error("'%s' is not a user AS.", isd_as)
            return
    except KeyError:
        log.error("Unknown AS: %s", isd_as)
        return

    try:
        if topo.coordinator is None:
            log.error("Topology has no coordinator.")
        else:
            response = None
            if args.action == "get_peers":
                response = topo.coordinator.get_peers(isd_as, args.ixp)
            elif args.action == "get":
                response = topo.coordinator.get_policies(isd_as, args.ixp)
            elif args.action == "create":
                policies = sys.stdin.read() if args.data is None else args.data
                print(topo.coordinator.create_policies(isd_as, policies))
            elif args.action == "delete":
                policies = sys.stdin.read() if args.data is None else args.data
                print(topo.coordinator.delete_policies(isd_as, policies))

            if response is not None:
                print(json.dumps(response, indent=2))

    finally:
        topo.close_host_connections()
Ejemplo n.º 15
0
def start(args):
    """Start the containers and SCION."""
    open_log_file(args.workdir)
    log.debug("Command: start")
    topo = Topology.load(args.workdir.joinpath(CONFIG_DATA_FILE))

    try:
        # Make sure the containers are running
        topo.create_bridges()
        topo.start_containers(workdir=args.workdir,
                              sc=args.sc,
                              push_images=args.push_images)
        # Run SCION in containers
        if args.mode == 'sequential':
            topo.run_scion()
        elif args.mode == 'parallel':
            topo.run_scion_parallel()
        elif args.mode == 'detach':
            topo.run_scion_parallel(detach=True)
    finally:
        topo.close_host_connections()
        topo.save(args.workdir.joinpath(CONFIG_DATA_FILE))
Ejemplo n.º 16
0
def plot(args):
    """Plot the topology."""
    topo = Topology.load(args.workdir.joinpath(CONFIG_DATA_FILE))
    plot_topology(topo)
Ejemplo n.º 17
0
def assign_underlay_addresses(topo: Topology) -> None:
    """Assign underlay addresses to the border router interfaces in the given topology.

    :raises OutOfResources: Not enough underlay addresses availabile.
    """
    link_subnets = None

    if topo.default_link_subnet:
        def_subnet = topo.default_link_subnet
        prefixlen_diff = def_subnet.max_prefixlen - def_subnet.prefixlen - LINK_SUBNET_HOST_LEN
        if prefixlen_diff >= 0:
            link_subnets = topo.default_link_subnet.subnets(prefixlen_diff)

    # Wrapper around IP network host iterator.
    class HostAddrGenerator:
        def __init__(self, bridge: Bridge):
            self._iter = bridge.valid_ip_iter()
            self.current = next(self._iter)

        def next(self):
            self.current = next(self._iter)

    # Mapping from IP subnet to generator producing addresses from said subnet.
    addr_gens: Dict[IpNetwork, HostAddrGenerator] = {}

    for link in topo.links:
        if link.bridge is None: # assign a subnet of the default link network
            # DockerBridge cannot span multiple hosts.
            assert topo.ases[link.ep_a].host == topo.ases[link.ep_b].host

            if not link_subnets:
                log.error("No default link network specified.")
                raise errors.OutOfResources()
            try:
                ip_net = next(link_subnets)
                link.bridge = DockerBridge(
                    topo.gen_bridge_name(), topo.ases[link.ep_a].host, ip_net)
                topo.bridges.append(link.bridge)
            except StopIteration:
                log.error("Not enough IP addresses for all links.")
                raise errors.OutOfResources()

        # Assign IP addresses to link endpoints
        addr_gen = _lazy_setdefault(addr_gens, link.bridge.ip_network,
            lambda: HostAddrGenerator(unwrap(link.bridge)))

        try:
            if not link.ep_a.is_zero():
                link.ep_a_underlay = link.bridge.assign_br_address(
                    link.ep_a, topo.ases[link.ep_a], link.ep_a.ifid,
                    pref_ip=None if isinstance(link.bridge, HostNetwork) else addr_gen.current)
                if link.ep_a_underlay.ip == addr_gen.current:
                    addr_gen.next()

            if not link.ep_b.is_zero():
                link.ep_b_underlay = link.bridge.assign_br_address(
                    link.ep_b, topo.ases[link.ep_b], link.ep_b.ifid,
                    pref_ip=None if isinstance(link.bridge, HostNetwork) else addr_gen.current)
                if link.ep_b_underlay.ip == addr_gen.current:
                    addr_gen.next()

        except (errors.OutOfResources, StopIteration):
            log.error("Not enough IP addresses in subnet '%s'.", link.bridge.ip_network)
            raise errors.OutOfResources()
Ejemplo n.º 18
0
def status(args):
    """Get status information on the SCION services from supervisord."""
    # Don't log this command. Does not change state.
    topo = Topology.load(args.workdir.joinpath(CONFIG_DATA_FILE))
    topo.print_container_status()
Ejemplo n.º 19
0
def extract_topo_info(topo_file: MutableMapping[str, Any],
                      name: Optional[str] = None) -> Topology:
    """Initialize a Topology object with information read from a topology definition.

    Interface identifiers not specified in the input file are automatically assigned and added to
    the returned Topology object and to `topo_file`.

    :param topo_file: The input topology file parsed into a dictionary. When the function returns,
                      the IXP testbed specific entries have been removed.
    :param name: An optional name for the topology. This name is added to all containers, network
                 bridges, etc. to distinguish them from other testbed instances.
    :returns: Extracted topology information.
    :raises InvalidTopo: The topology file is invalid.
    """
    topo = Topology(name)
    networks = NetworkFactory()
    brs = BrFactory()
    ifids = IfIdMapping(topo_file)

    # Subnet for automatically generated local docker bridges
    if 'link_subnet' in topo_file.get('defaults', {}):
        topo.default_link_subnet = ipaddress.ip_network(
            topo_file['defaults'].pop("link_subnet"))
        topo.ipv6_enabled |= (topo.default_link_subnet.version == 6)
    else:
        topo.default_link_subnet = None

    # Hosts (first pass: create host objects)
    localhost = topo.hosts['localhost'] = LocalHost()  # always exists
    for host_name, host_def in topo_file.get('hosts', {}).items():
        if host_name != 'localhost':
            if host_name in topo.hosts:
                log.error("Multiple hosts with name '%s'.", host_name)
                raise errors.InvalidTopo()

            if not 'coordinator' in topo_file:
                log.error(
                    "Running a topology spanning multiple hosts requires a coordinator."
                )
                raise errors.InvalidTopo()

            topo.hosts[host_name] = RemoteHost(
                host_name,
                _get_ip(host_def, 'ssh_host', host_name),
                _get_value(host_def, 'username', host_name),
                identity_file=host_def.get("identity_file"),
                ssh_port=L4Port(int(host_def.get('ssh_port', 22))))

    # Networks
    if 'networks' in topo_file:
        net_defs = topo_file.pop('networks')  # remove networks section
        for net_name, net_def in net_defs.items():
            type = _get_value(net_def, 'type', net_name)
            subnet = _get_value(net_def, 'subnet', net_name)
            host = topo.hosts[net_def.get('host', 'localhost')]
            networks.create(net_name, topo.get_name_prefix(), type, host,
                            subnet, net_def)

    # Hosts (second pass: parse network addresses for host networks)
    for host_name, host_def in topo_file.get('hosts', {}).items():
        for net, addr in host_def.get('addresses', {}).items():
            networks.set_host_ip(net, topo.hosts[host_name], addr)
    topo_file.pop('hosts', None)  # remove host section

    # Coordinator
    if 'coordinator' in topo_file:
        coord_def = topo_file.pop('coordinator')  # remove coordinator section
        host = topo.hosts[coord_def.get('host', 'localhost')]
        def_name = lambda: topo.get_name_prefix() + const.COORD_NET_NAME
        bridge = networks.get(_get_value(coord_def, 'network', 'coordinator'),
                              def_name, localhost)
        cpu_affinity = CpuSet(coord_def.get('cpu_affinity'))
        ssh_management = coord_def.get('ssh_management', False)

        debug = coord_def.get('debug', True)
        compose_path = None
        if debug:
            if ssh_management:
                log.warning(
                    "Coordinator in debug mode, 'ssh_management' has no effect."
                )
        else:
            compose_path = Path(
                _get_value(coord_def, 'compose_path', 'coordinator'))
            if 'expose' not in coord_def:
                log.warning(
                    "No interface to publish the coordinator on given. The coordinator will"
                    " be exposed at http://127.0.0.1:8000.")

        coord = Coordinator(topo.get_coord_name(), host, bridge, cpu_affinity,
                            ssh_management, debug, compose_path)
        coord.exposed_at = _get_external_address(coord_def)

        for name, data in coord_def['users'].items():
            if name is None:
                log.error("User name missing.")
                raise errors.InvalidTopo()
            coord.users[name] = User(data['email'], data['password'],
                                     data.get('superuser', False))

        topo.coordinator = coord

    # Prometheus
    if 'prometheus' in topo_file:
        prom_def = topo_file.pop('prometheus')  # remove prometheus section
        host = topo.hosts[prom_def.get('host', 'localhost')]

        def_name = lambda: topo.gen_bridge_name()
        bridge = networks.get(_get_value(prom_def, 'network', 'coordinator'),
                              def_name, localhost)
        if not bridge.is_docker_managed:
            log.error("Invalid network type for Prometheus.")
            raise InvalidTopo()

        prom = Prometheus(
            host,
            cast(DockerNetwork, bridge),
            cpu_affinity=CpuSet(prom_def.get('cpu_affinity')),
            scrape_interval=prom_def.get('scrape_interval', "30s"),
            storage_dir=_get_optional_path(prom_def, 'storage_dir'),
            targets=[ISD_AS(target) for target in prom_def['targets']])
        prom.exposed_at = _get_external_address(prom_def)

        topo.additional_services.append(prom)

    # IXP definitions
    for ixp_name, ixp_def in topo_file.pop('IXPs',
                                           {}).items():  # remove IXP section
        if ixp_name in topo.ixps:
            log.error("IXP %s is defined multiple times.", name)
            raise errors.InvalidTopo()
        net_name = _get_value(ixp_def, 'network', ixp_name)
        def_name = lambda: topo.get_name_prefix() + ixp_name
        bridge = networks.get(net_name, def_name, localhost)
        topo.ixps[ixp_name] = Ixp(bridge)

    # ASes
    for as_name, as_def in topo_file['ASes'].items():
        isd_as = ISD_AS(as_name)
        host_name = as_def.get('host', 'localhost')
        host = None
        try:
            host = topo.hosts[host_name]
        except KeyError:
            log.error("Invalid host: '%s'.", as_def[host_name])
            raise
        cpu_affinity = CpuSet(as_def.get('cpu_affinity'))
        asys = AS(host, as_def.get('core', False), cpu_affinity)

        asys.is_attachment_point = as_def.pop('attachment_point', False)
        asys.owner = as_def.pop('owner', None)
        topo.ases[isd_as] = asys

        if topo.coordinator:
            for ixp_name in as_def.pop('ixps', []):
                if asys.owner is None:
                    log.error("Infrastructure AS %s has an IXP list.", isd_as)
                    raise errors.InvalidTopo()
                ixp = topo.ixps[ixp_name]
                ixp.ases[isd_as] = asys
                # Add dummy link to IXP to make sure there is a network connection.
                # Actual links will be configured by the coordinator.
                # The border router of the link endpoint is labeled here to avoid creating a new
                # border router for every IXP link.
                end_point = LinkEp(isd_as,
                                   ifid=ifids.assign_ifid(isd_as),
                                   br_label='peer')
                link = Link(end_point, LinkEp(), LinkType.UNSET)
                link.bridge = ixp.bridge
                topo.links.append(link)
                brs.add_link_ep(end_point, link)

    # Link definitions
    for link in topo_file['links']:
        a, b = LinkEp(link['a']), LinkEp(link['b'])

        # Assing IfIds if not given in the original topo file.
        # Setting the IDs of all interfaces in the processed topology file ensures we can identify
        # the interfaces in the configuration files generated by scion.sh.
        for ep, name in [(a, 'a'), (b, 'b')]:
            if ep.ifid is None:
                ep.ifid = ifids.assign_ifid(ep)
                link[name] = "{}#{}".format(link[name], ep.ifid)

        topo.links.append(Link(a, b, link['linkAtoB']))

        # Keep track of border routers that will be created for the links.
        brs.add_link_ep(a, topo.links[-1])
        brs.add_link_ep(b, topo.links[-1])

        # Assign to a network if an IXP name or an explicit IP network is given.
        if "network" in link:
            net = link.pop('network')
            if net in topo.ixps:  # use the IXPs network
                ixp = topo.ixps[net]
                topo.links[-1].bridge = ixp.bridge
                ixp.ases[a] = topo.ases[a]
                ixp.ases[b] = topo.ases[b]
            else:
                def_name = lambda: topo.gen_bridge_name()
                topo.links[-1].bridge = networks.get(net, def_name, localhost)
        else:
            if topo.ases[a].host != topo.ases[b].host:
                log.error(
                    "Links between ASes on different hosts must specify the network to use."
                )
                raise errors.InvalidTopo()

    # Enable IPv6 support if needed.
    topo.ipv6_enabled = networks.is_ipv6_required()

    # Store bridges in topology.
    topo.bridges = networks.get_bridges()

    # Store border router info in corresponsing AS.
    for isd_as, asys in topo.ases.items():
        asys.border_routers = brs.get_brs(isd_as)

    return topo