Esempio n. 1
0
    def _restart_container(self, cntr, isd_as: ISD_AS, asys: AS) -> bool:
        """Try to restart an AS container currently not running.

        :param cntr: Container to restart.
        :param isd_as: AS the container belongs to.
        :param asys: AS the container belongs to.
        :returns: True, if container is now running. False, if the restart failed.
        """
        cntr.start()  # try to start the container
        cntr.reload()  # get the new status
        if cntr.status == "running":
            log.info("Restarted container %s [%s] (%s).", cntr.name,
                     asys.host.name, asys.container_id)

            # Delete the socket used by the supervisor so scion.sh knows it has to be restarted.
            # See supervisor/supervisor.sh in the SCION source code.
            run_cmd_in_cntr(cntr, const.SCION_USER, "rm /tmp/supervisor.sock")

            # Network bridges not created by Docker don't reconnect automatically.
            disconnect_bridges(isd_as, asys, non_docker_only=True)
            connect_bridges(isd_as, asys, non_docker_only=True)

            # Restart the SSH server in managed ASes.
            if self.coordinator is not None:
                if asys.is_attachment_point or self.coordinator.ssh_management:
                    dc = asys.host.docker_client
                    self._start_sshd(dc.container.get(asys.container_id))

            return True  # container is now running
        else:
            log.warning("Restarting container %s [%s] (%s) failed.", cntr.name,
                        asys.host.name, asys.container_id)
            asys.container_id = None
            return False
Esempio n. 2
0
def _build_standalone_topology(topo: Topology, sc: Path, workdir: Path,
                               dc: docker.DockerClient):
    """Build a standalone SCION topology using the 'scion.sh' script."""

    # Start master container
    log.info("Starting SCION Docker container.")
    master_cntr = start_scion_cntr(
        dc,
        const.AS_IMG_NAME,
        cntr_name=topo.get_name_prefix() + const.MASTER_CNTR_NAME,
        mount_dir=workdir.joinpath(
            const.MASTER_CNTR_MOUNT).resolve(),  # need absolute path
        volumes={
            "/var/run/docker.sock": {
                'bind': "/var/run/docker.sock",
                'mode': 'rw'
            }
        })

    try:
        # Copy processed topology file into the master container
        processed_topo_file_path = workdir.joinpath(const.PROCESSED_TOPO_FILE)
        copy_to_container(master_cntr, processed_topo_file_path,
                          const.SCION_TOPO_FILES_PATH)

        # Build a standalone topology in the master container
        log.info("Building standalone topology...")
        command = "./scion.sh topology --in-docker -c topology/topology.topo"
        run_cmd_in_cntr(master_cntr, const.SCION_USER, command)
    except:
        raise
    finally:
        master_cntr.stop()
        master_cntr.remove()
Esempio n. 3
0
    def delete_policies(self, isd_as: ISD_AS, policies: str) -> str:
        """Delete peering policies for user AS `isd_as`.

        :params policies: The policies to delete in JSON format as expected by the coordinator.
        :return: String containing the HTTP status code.
        """
        try:
            # Production configuration: Caddy container does not have curl, so run directly in the
            # Django container.
            cntr = self.get_web_container(
            ) if self.debug else self.get_django_container()
        except errors.NotFound:
            log.error("Coordinator is not running.")
            return ""

        uid, secret = self.api_credentials[isd_as]
        cmd = "curl -X DELETE {base_url}/api/peering/host/{host}/policies" \
              " -u {host}:{secret} -d \"{policies}\" -i".format(
                  base_url=self.get_url() if self.debug else _PROD_DJANGO_COORD_URL,
                  host=uid, secret=secret,
                  policies=policies.replace("'", "\"").replace('"', '\\"'))

        user = const.SCIONLAB_USER_DEBUG if self.debug else const.SCIONLAB_USER_PRODUCTION
        result = io.StringIO()
        run_cmd_in_cntr(cntr, user, cmd, output=result)
        return result.getvalue().splitlines()[0]
Esempio n. 4
0
def init_db(topo, workdir: Path, debug):
    """Initialize the coordinator's database with information from `topo`.

    :param topo: Topology database.
    :param workdir: Directory containing the topology data.
    :raises errors.NotFound: The container of the coordinator has not been found.
    """
    coord = topo.coordinator
    assert coord

    log.info("Initializing coordinator database.")
    # Create configuration in working directory (on host)
    output_path = workdir.joinpath(const.COORD_SCRIPT_NAME)
    with open(output_path, 'w') as file:
        _create_config_script(topo, file)

    # Run configuration script in Django
    try:
        cntr = coord.get_django_container()
    except errors.NotFound:
        log.error("Coordinator is not running.")
        raise

    if debug:
        path = Path(const.SCIONLAB_PATH_DEBUG)
        user = const.SCIONLAB_USER_DEBUG
    else:
        path = Path(const.SCIONLAB_PATH_PRODUCTION)
        user = const.SCIONLAB_USER_PRODUCTION
    copy_to_container(cntr, output_path, path.joinpath("scripts"))
    cmd = "./manage.py shell < scripts/" + const.COORD_SCRIPT_NAME
    run_cmd_in_cntr(cntr, user, cmd, check=True)
Esempio n. 5
0
    def get_policies(self, isd_as: ISD_AS,
                     ixp_id: Optional[int]) -> Optional[Dict]:
        """Get the peering policies of user AS `isd_as` from the coordinator.

        :params ixp_id: An optional integer identifying an IXP in the coordinator. Filters the
                        result for policies applying to this IXP.
        :return: The dictionary returned by the coordinator's API. Returns `None` if the coordinator
                 is not running.
        """
        try:
            # Production configuration: Caddy container does not have curl, so run directly in the
            # Django container.
            cntr = self.get_web_container(
            ) if self.debug else self.get_django_container()
        except errors.NotFound:
            log.error("Coordinator is not running.")
            return None

        uid, secret = self.api_credentials[isd_as]
        req_params = ("?ixp=%s" % ixp_id) if ixp_id is not None else ""
        cmd = "curl -X GET {base_url}/api/peering/host/{host}/policies{params}" \
              " -u {host}:{secret}".format(
                  base_url=self.get_url() if self.debug else _PROD_DJANGO_COORD_URL,
                  params=req_params, host=uid, secret=secret)
        user = const.SCIONLAB_USER_DEBUG if self.debug else const.SCIONLAB_USER_PRODUCTION
        response = io.StringIO()
        run_cmd_in_cntr(cntr, user, cmd, output=response)

        response.seek(0)
        return json.load(response)
Esempio n. 6
0
    def stop_scion_asys(self, isd_as: ISD_AS, asys: AS):
        """Stop a single SCION AS.

        :param isd_as: AS to stop.
        :param asys: AS to stop.
        """
        cntr = self._get_container(isd_as, asys)
        run_cmd_in_cntr(cntr, const.SCION_USER, "./scion.sh stop")
Esempio n. 7
0
 def _authorize_coord_ssh_key(cntr, workdir):
     """Copy to coordinator's public key to the authorized_keys file in the given container."""
     with open(
             workdir.joinpath(const.COORD_KEY_PATH,
                              const.COORD_PUBLIC_KEY_FILE),
             'r') as public_key:
         cmd = "umask 077 && mkdir -p ~/.ssh && echo \"%s\" >> ~/.ssh/authorized_keys" % public_key.read(
         )
         run_cmd_in_cntr(cntr, const.SCION_USER, cmd, check=True)
Esempio n. 8
0
 def get_br_prom_ports(self, isd_as: ISD_AS) -> List[L4Port]:
     """Get the Prometheus endpoint ports of all border routers in the given AS."""
     ports = io.StringIO()
     cntr = self.get_django_container()
     user = const.SCIONLAB_USER_DEBUG if self.debug else const.SCIONLAB_USER_PRODUCTION
     cmd = "./manage.py runscript print_prom_ports --script-args %s" % isd_as.as_str(
     )
     run_cmd_in_cntr(cntr, user, cmd, output=ports, check=True)
     return [L4Port(int(port)) for port in ports.getvalue().split()]
Esempio n. 9
0
    def run_scion_asys(self, isd_as: ISD_AS, asys: AS):
        """Start a single SCION AS.

        :param isd_as: AS to start.
        :param asys: AS to start.
        """
        cntr = self._get_container(isd_as, asys)
        if self.coordinator is not None:
            run_cmd_in_cntr(cntr, const.SCION_USER,
                            self.coordinator.get_config_cmd(isd_as))
        else:
            run_cmd_in_cntr(cntr, const.SCION_USER, "./scion.sh run nobuild")
Esempio n. 10
0
def fetch_api_secrets(topo, debug):
    """Retrieve coordinator API credentials for all ASes in the topology."""
    coord = topo.coordinator
    assert coord

    log.info("Fetching API secrets from coordinator.")
    try:
        cntr = coord.get_django_container()
    except errors.NotFound:
        log.error("Coordinator is not running.")
        raise

    secrets = io.StringIO()
    user = const.SCIONLAB_USER_DEBUG if debug else const.SCIONLAB_USER_PRODUCTION
    cmd = "./manage.py runscript print_api_secrets"
    run_cmd_in_cntr(cntr, user, cmd, output=secrets, check=True)
    coord.api_credentials = _parse_api_secrets(secrets.getvalue())
Esempio n. 11
0
def config_ssh_client(topo, workdir: Path, debug: bool):
    """Copy the SSH private key and client configuration to the coordinator."""
    coord = topo.coordinator
    assert coord

    log.info("Copying SSH key to coordinator.")
    try:
        cntr = coord.get_ssh_container()
    except errors.NotFound:
        log.error("Coordinator is not running.")
        raise

    src_path = workdir.joinpath(const.COORD_KEY_PATH)
    if debug:
        dst_path = Path(const.SCIONLAB_PATH_DEBUG).joinpath("run")
        user = const.SCIONLAB_USER_DEBUG
    else:
        dst_path = Path(const.SCIONLAB_PATH_PRODUCTION).joinpath("run")
        user = const.SCIONLAB_USER_PRODUCTION

    copy_to_container(cntr, src_path.joinpath(const.COORD_PRIVATE_KEY_FILE),
                      dst_path)

    # Make sure private key is only readable by the current user (otherwise ssh does not accept it)
    run_cmd_in_cntr(cntr,
                    user,
                    "chmod 600 %s" %
                    dst_path.joinpath(const.COORD_PRIVATE_KEY_FILE),
                    check=True)

    copy_to_container(cntr, src_path.joinpath(const.SSH_CLIENT_CONFIG),
                      dst_path)

    # Retrieve host keys
    run_cmd_in_cntr(cntr, user, "umask 077 && mkdir -p ~/.ssh", check=True)
    for isd_as in topo.ases.keys():
        cmd = "ssh-keyscan -H %s >> ~/.ssh/known_hosts" % (
            topo.coordinator.bridge.get_ip_address(isd_as))
        run_cmd_in_cntr(cntr, user, cmd)
Esempio n. 12
0
 def _start_sshd(cntr):
     """Start the SSH server in the given container."""
     log.info("Starting sshd in %s.", cntr.name)
     run_cmd_in_cntr(cntr, "root", "mkdir -p /var/run/sshd")
     run_cmd_in_cntr(cntr, "root", "/usr/sbin/sshd", check=True)
Esempio n. 13
0
    def _start_container(self, isd_as: ISD_AS, asys: AS, workdir: Path,
                         sc: Path) -> None:
        """Start the Docker container hosting the given AS and connect it to the necessary bridges.
        """
        dc = asys.host.docker_client

        # Check if container is already running
        if asys.container_id:
            try:
                cntr = dc.containers.get(asys.container_id)
            except docker.errors.NotFound:
                # container has been removed
                asys.container_id = None
            else:
                if cntr.status == "running":
                    return  # container is already running
                elif cntr.status == 'paused':
                    cntr.unpause()
                    log.info("Unpaused container %s [%s] (%s).", cntr.name,
                             asys.host.name, cntr.id)
                    return
                else:
                    if self._restart_container(cntr, isd_as, asys):
                        return  # restart successful

        # Create and start a new container
        cntr_name = self.get_cntr_name(isd_as)
        ports = get_published_ports(isd_as, asys)
        for cntr_port, (host_ip, host_port) in ports.items():
            log.info("Exposing port %s of %s on %s:%s [%s].", cntr_port,
                     cntr_name, host_ip, host_port, asys.host.name)

        cntr = None
        if asys.host.is_local:
            mount_dir = workdir.joinpath(isd_as.file_fmt()).resolve()
            if self.coordinator is not None:
                # Starting a new instance of the coordinator generates new configuration files,
                # certificates, etc. If there are configuration or cache files from a previous run,
                # we remove them here.
                shutil.rmtree(mount_dir.joinpath("gen"), ignore_errors=True)
                shutil.rmtree(mount_dir.joinpath("gen-cache"),
                              ignore_errors=True)

            kwargs = {}
            if not asys.cpu_affinity.is_unrestricted():
                kwargs['cpuset_cpus'] = str(asys.cpu_affinity)
            cntr = start_scion_cntr(dc,
                                    const.AS_IMG_NAME,
                                    cntr_name=cntr_name,
                                    mount_dir=mount_dir,
                                    ports=ports,
                                    extra_args=kwargs)
            asys.container_id = cntr.id

        else:  # Start container on a remote host
            kwargs = {}
            if not asys.cpu_affinity.is_unrestricted():
                kwargs['cpuset_cpus'] = str(asys.cpu_affinity)
            cntr = dc.containers.run(
                const.AS_IMG_NAME,
                name=cntr_name,
                tty=True,  # keep the container running
                detach=True,
                ports=ports,
                **kwargs)
            asys.container_id = cntr.id

        log.info("Started container %s [%s] with ID %s.", cntr_name,
                 asys.host.name, asys.container_id)

        if self.coordinator is None:
            # If the coordinator creates the gen folder, 'gen-certs.sh' is invoked by
            # 'scionlab-config-user'.
            # If the topology is generated by 'scion.sh topology', we create the certificates
            # now.
            run_cmd_in_cntr(cntr,
                            const.SCION_USER,
                            "./gen-certs.sh",
                            check=True)
        else:
            # Connect the new container to the coordinator.
            self.coordinator.bridge.connect(isd_as, asys)
            if asys.is_attachment_point or self.coordinator.ssh_management:
                # Allow the coordinator to access the container via SSH.
                self._authorize_coord_ssh_key(cntr, workdir)
                self._start_sshd(cntr)

        # Connect bridges SCION links.
        connect_bridges(isd_as, asys)
Esempio n. 14
0
 def wait_for_db_migrations(self, docker_client: docker.DockerClient,
                            timeout: int) -> None:
     cmd = "appdeps.py --wait-secs {} --file-wait db_initialized".format(
         timeout)
     run_cmd_in_cntr(self.get_django_container(docker_client),
                     const.SCIONLAB_USER_PRODUCTION, cmd)