Exemplo n.º 1
0
def serviceCIDR(runner: Runner):
    """
    Get service IP range, based on heuristic of constructing CIDR from
    existing Service IPs. We create more services if there are less
    than 8, to ensure some coverage of the IP range.
    """

    def get_service_ips():
        services = json.loads(
            runner.get_output(runner.kubectl("get", "services", "-o", "json"))
        )["items"]
        # FIXME: Add test(s) here so we don't crash on, e.g., ExternalName
        return [
            svc["spec"]["clusterIP"] for svc in services
            if svc["spec"].get("clusterIP", "None") != "None"
        ]

    service_ips = get_service_ips()
    new_services = []  # type: List[str]
    # Ensure we have at least 8 ClusterIP Services:
    while len(service_ips) + len(new_services) < 8:
        new_service = random_name()
        runner.check_call(
            runner.kubectl(
                "create", "service", "clusterip", new_service, "--tcp=3000"
            )
        )
        new_services.append(new_service)
    if new_services:
        service_ips = get_service_ips()
    # Store Service CIDR:
    service_cidr = covering_cidr(service_ips)
    # Delete new services:
    for new_service in new_services:
        runner.check_call(runner.kubectl("delete", "service", new_service))

    if runner.chatty:
        runner.show(
            "Guessing that Services IP range is {}. Services started after"
            " this point will be inaccessible if are outside this range;"
            " restart telepresence if you can't access a "
            "new Service.\n".format(service_cidr)
        )
    return service_cidr
Exemplo n.º 2
0
def parse_args(args=None) -> argparse.Namespace:
    """Create a new ArgumentParser and parse sys.argv."""
    parser = argparse.ArgumentParser(
        formatter_class=argparse.RawDescriptionHelpFormatter,
        allow_abbrev=False,  # can make adding changes not backwards compatible
        description=(
            "Telepresence: local development proxied to a remote Kubernetes "
            "cluster.\n\n"
            "Documentation: https://telepresence.io\n"
            "Real-time help: https://d6e.co/slack\n"
            "Issue tracker: https://github.com/datawire/telepresence/issues\n"
            "\n" + HELP_EXAMPLES + "\n\n"
        )
    )
    parser.add_argument(
        '--version', action='version', version=telepresence.__version__
    )
    parser.add_argument(
        "--verbose",
        action='store_true',
        help="Enables verbose logging for troubleshooting."
    )
    parser.add_argument(
        "--logfile",
        default="./telepresence.log",
        help=(
            "The path to write logs to. '-' means stdout, "
            "default is './telepresence.log'."
        )
    )
    parser.add_argument(
        "--method",
        "-m",
        choices=["inject-tcp", "vpn-tcp", "container"],
        help=(
            "'inject-tcp': inject process-specific shared "
            "library that proxies TCP to the remote cluster.\n"
            "'vpn-tcp': all local processes can route TCP "
            "traffic to the remote cluster. Requires root.\n"
            "'container': used with --docker-run.\n"
            "\n"
            "Default is 'vpn-tcp', or 'container' when --docker-run is used.\n"
            "\nFor more details see "
            "https://telepresence.io/reference/methods.html"
        )
    )
    group_deployment = parser.add_mutually_exclusive_group()
    group_deployment.add_argument(
        '--new-deployment',
        "-n",
        metavar="DEPLOYMENT_NAME",
        dest="new_deployment",
        help=(
            "Create a new Deployment in Kubernetes where the "
            "datawire/telepresence-k8s image will run. It will be deleted "
            "on exit. If no deployment option is specified this will be "
            " used by default, with a randomly generated name."
        )
    )
    group_deployment.add_argument(
        "--swap-deployment",
        "-s",
        dest="swap_deployment",
        metavar="DEPLOYMENT_NAME[:CONTAINER]",
        help=(
            "Swap out an existing deployment with the Telepresence proxy, "
            "swap back on exit. If there are multiple containers in the pod "
            "then add the optional container name to indicate which container"
            " to use."
        )
    )
    group_deployment.add_argument(
        "--deployment",
        "-d",
        metavar="EXISTING_DEPLOYMENT_NAME",
        help=(
            "The name of an existing Kubernetes Deployment where the " +
            "datawire/telepresence-k8s image is already running."
        )
    )
    parser.add_argument(
        "--context",
        default=None,
        help=(
            "The Kubernetes context to use. Defaults to current kubectl"
            " context."
        )
    )
    parser.add_argument(
        "--namespace",
        default=None,
        help=(
            "The Kubernetes namespace to use. Defaults to kubectl's default"
            " for the current context, which is usually 'default'."
        )
    )
    parser.add_argument(
        "--expose",
        action='append',
        metavar="PORT[:REMOTE_PORT]",
        default=[],
        help=(
            "Port number that will be exposed to Kubernetes in the Deployment."
            " Should match port exposed in the existing Deployment if using "
            "--deployment or --swap-deployment. By default local port and "
            "remote port are the same; if you want to listen on port 8080 "
            "locally but be exposed as port 80 in Kubernetes you can do "
            "'--expose 8080:80'."
        )
    )
    parser.add_argument(
        "--also-proxy",
        metavar="CLOUD_HOSTNAME",
        dest="also_proxy",
        action='append',
        default=[],
        help=(
            "If you are using --method=vpn-tcp, use this to add additional "
            "remote IPs, IP ranges, or hostnames to proxy. Kubernetes service "
            "and pods are proxied automatically, so you only need to list "
            "cloud resources, e.g. the hostname of a AWS RDS. "
            "When using --method=inject-tcp "
            "this option is unnecessary as all outgoing communication in "
            "the run subprocess will be proxied."
        )
    )
    parser.add_argument(
        "--mount",
        type=path_or_bool,
        metavar="PATH_OR_BOOLEAN",
        dest="mount",
        default=True,
        help=(
            "The absolute path for the root directory where volumes will be "
            "mounted, $TELEPRESENCE_ROOT. "
            "Use \"true\" to have Telepresence pick a random mount point "
            "under /tmp (default). "
            "Use \"false\" to disable filesystem mounting entirely."
        )
    )

    parser.add_argument(
        "--env-json",
        metavar="FILENAME",
        default=None,
        help="Also emit the remote environment to a file as a JSON blob."
    )

    parser.add_argument(
        "--env-file",
        metavar="FILENAME",
        default=None,
        help=(
            "Also emit the remote environment to an env file in Docker "
            "Compose format. "
            "See https://docs.docker.com/compose/env-file/ for more "
            "information on the limitations of this format."
        )
    )

    group = parser.add_mutually_exclusive_group()
    group.add_argument(
        "--run-shell",
        dest="runshell",
        action="store_true",
        help="Run a local shell that will be proxied to/from Kubernetes.",
    )
    group.add_argument(
        "--run",
        metavar=("COMMAND", "ARG"),
        dest="run",
        nargs=argparse.REMAINDER,
        help=(
            "Run the specified command arguments, e.g. "
            "'--run python myapp.py'."
        )
    )
    group.add_argument(
        "--docker-run",
        metavar="DOCKER_RUN_ARG",
        dest="docker_run",
        nargs=argparse.REMAINDER,
        help=(
            "Run a Docker container, by passing the arguments to 'docker run',"
            " e.g. '--docker-run -i -t ubuntu:16.04 /bin/bash'. "
            "Requires --method container."
        )
    )
    args = parser.parse_args(args)

    # Fill in defaults:
    if args.method is None:
        if args.docker_run is not None:
            args.method = "container"
        else:
            args.method = "vpn-tcp"
    if args.deployment is None and args.new_deployment is None and (
        args.swap_deployment is None
    ):
        args.new_deployment = random_name()

    if args.method == "container" and args.docker_run is None:
        raise SystemExit(
            "'--docker-run' is required when using '--method container'."
        )
    if args.docker_run is not None and args.method != "container":
        raise SystemExit(
            "'--method container' is required when using '--docker-run'."
        )

    args.expose = PortMapping.parse(args.expose)
    return args
Exemplo n.º 3
0
def run_docker_command(
    runner: Runner,
    remote_info: RemoteInfo,
    args: argparse.Namespace,
    remote_env: Dict[str, str],
    subprocesses: Subprocesses,
    ssh: SSH,
) -> None:
    """
    --docker-run support.

    Connect using sshuttle running in a Docker container, and then run user
    container.

    :param args: Command-line args to telepresence binary.
    :param remote_env: Dictionary with environment on remote pod.
    :param mount_dir: Path to local directory where remote pod's filesystem is
        mounted.
    """
    # Mount remote filesystem. We allow all users if we're using Docker because
    # we don't know what uid the Docker container will use:
    mount_dir, mount_cleanup = mount_remote_volumes(
        runner,
        remote_info,
        ssh,
        True,
    )

    # Update environment:
    remote_env["TELEPRESENCE_ROOT"] = mount_dir
    remote_env["TELEPRESENCE_METHOD"] = "container"  # mostly just for tests :(

    # Start the sshuttle container:
    name = random_name()
    config = {
        "port":
        ssh.port,
        "cidrs":
        get_proxy_cidrs(runner, args, remote_info,
                        remote_env["KUBERNETES_SERVICE_HOST"]),
        "expose_ports":
        list(args.expose.local_to_remote()),
    }
    if sys.platform == "darwin":
        config["ip"] = MAC_LOOPBACK_IP
    # Image already has tini init so doesn't need --init option:
    subprocesses.append(
        runner.popen(
            docker_runify([
                "--rm", "--privileged", "--name=" + name,
                TELEPRESENCE_LOCAL_IMAGE, "proxy",
                json.dumps(config)
            ])), make_docker_kill(runner, name))

    # Write out env file:
    with NamedTemporaryFile("w", delete=False) as envfile:
        for key, value in remote_env.items():
            envfile.write("{}={}\n".format(key, value))
    atexit.register(os.remove, envfile.name)

    # Wait for sshuttle to be running:
    while True:
        try:
            runner.check_call(
                docker_runify([
                    "--network=container:" + name, "--rm",
                    TELEPRESENCE_LOCAL_IMAGE, "wait"
                ]))
        except CalledProcessError as e:
            if e.returncode == 100:
                # We're good!
                break
                return name, envfile.name
            elif e.returncode == 125:
                # Docker failure, probably due to original container not
                # starting yet... so sleep and try again:
                sleep(1)
                continue
            else:
                raise
        else:
            raise RuntimeError(
                "Waiting container exited prematurely. File a bug, please!")

    # Start the container specified by the user:
    container_name = random_name()
    docker_command = docker_runify([
        "--volume={}:{}".format(mount_dir, mount_dir),
        "--name=" + container_name,
        "--network=container:" + name,
        "--env-file",
        envfile.name,
    ])
    # Older versions of Docker don't have --init:
    if "--init" in runner.get_output(["docker", "run", "--help"]):
        docker_command += ["--init"]
    docker_command += args.docker_run
    p = Popen(docker_command)

    def terminate_if_alive():
        runner.write("Shutting down containers...\n")
        if p.poll() is None:
            runner.write("Killing local container...\n")
            make_docker_kill(runner, container_name)()

        mount_cleanup()

    atexit.register(terminate_if_alive)
    wait_for_exit(runner, p, subprocesses)
Exemplo n.º 4
0
def run_docker_command(
    runner: Runner,
    remote_info: RemoteInfo,
    docker_run: List[str],
    expose: PortMapping,
    to_pod: List[int],
    from_pod: List[int],
    container_to_host: PortMapping,
    remote_env: Dict[str, str],
    ssh: SSH,
    mount_dir: Optional[str],
    use_docker_mount: Optional[bool],
    pod_info: Dict[str, str],
) -> "subprocess.Popen[bytes]":
    """
    --docker-run support.

    Connect using sshuttle running in a Docker container, and then run user
    container.

    :param remote_env: Dictionary with environment on remote pod.
    :param mount_dir: Path to local directory where remote pod's filesystem is
        mounted.
    """
    # Update environment:
    remote_env["TELEPRESENCE_METHOD"] = "container"  # mostly just for tests :(

    # Extract --publish flags and add them to the sshuttle container, which is
    # responsible for defining the network entirely.
    docker_args, publish_args = parse_docker_args(docker_run)

    # Point a host port to the network container's sshd
    container_sshd_port = find_free_port()
    publish_args.append(
        "--publish=127.0.0.1:{}:38022/tcp".format(container_sshd_port)
    )
    local_ssh = SSH(runner, container_sshd_port, "[email protected]")

    # Start the network (sshuttle) container:
    name = random_name()
    config = {
        "cidrs": ["0/0"],
        "expose_ports": list(expose.local_to_remote()),
        "to_pod": to_pod,
        "from_pod": from_pod,
    }
    dns_args = []
    if "hostname" in pod_info:
        dns_args.append("--hostname={}".format(pod_info["hostname"].strip()))
    if "hosts" in pod_info:
        dns_args.extend(parse_hosts_aliases(pod_info["hosts"]))
    if "resolv" in pod_info:
        dns_args.extend(parse_resolv_conf(pod_info["resolv"]))

    # Image already has tini init so doesn't need --init option:
    span = runner.span()
    runner.launch(
        "Network container",
        runner.docker(
            "run", *publish_args, *dns_args, "--rm", "--privileged",
            "--name=" + name, TELEPRESENCE_LOCAL_IMAGE, "proxy",
            json.dumps(config)
        ),
        killer=make_docker_kill(runner, name),
        keep_session=runner.sudo_for_docker,
    )

    # Set up ssh tunnel to allow the container to reach the cluster
    if not local_ssh.wait():
        raise RuntimeError("SSH to the network container failed to start.")

    container_forward_args = ["-R", "38023:127.0.0.1:{}".format(ssh.port)]
    for container_port, host_port in container_to_host.local_to_remote():
        if runner.chatty:
            runner.show(
                "Forwarding container port {} to host port {}.".format(
                    container_port, host_port
                )
            )
        container_forward_args.extend([
            "-R", "{}:127.0.0.1:{}".format(container_port, host_port)
        ])
    runner.launch(
        "Local SSH port forward", local_ssh.bg_command(container_forward_args)
    )

    # Wait for sshuttle to be running:
    sshuttle_ok = False
    for _ in runner.loop_until(120, 1):
        try:
            runner.check_call(
                runner.docker(
                    "run", "--network=container:" + name, "--rm",
                    TELEPRESENCE_LOCAL_IMAGE, "wait"
                )
            )
        except subprocess.CalledProcessError as e:
            if e.returncode == 100:
                # We're good!
                sshuttle_ok = True
                break
            elif e.returncode == 125:
                # Docker failure, probably due to original container not
                # starting yet... so try again:
                continue
            else:
                raise
        else:
            raise RuntimeError(
                "Waiting container exited prematurely. File a bug, please!"
            )
    if not sshuttle_ok:
        # This used to loop forever. Now we time out after two minutes.
        raise RuntimeError(
            "Waiting for network container timed out. File a bug, please!"
        )

    # Start the container specified by the user:
    container_name = random_name()
    docker_command = runner.docker(
        "run",
        "--name=" + container_name,
        "--network=container:" + name,
        env=True,
    )

    # Prepare container environment
    for key in remote_env:
        docker_command.append("-e={}".format(key))
    docker_env = os.environ.copy()
    docker_env.update(remote_env)

    if mount_dir:
        if use_docker_mount:
            mount_volume = "telepresence-" + runner.session_id
        else:
            mount_volume = mount_dir

        docker_command.append("--volume={}:{}".format(mount_volume, mount_dir))

    # Don't add --init if the user is doing something with it
    init_args = [
        arg for arg in docker_args
        if arg == "--init" or arg.startswith("--init=")
    ]
    # Older versions of Docker don't have --init:
    docker_run_help = runner.get_output(["docker", "run", "--help"])
    if not init_args and "--init" in docker_run_help:
        docker_command += ["--init"]
    docker_command += docker_args
    span.end()

    runner.show("Setup complete. Launching your container.")
    process = subprocess.Popen(docker_command, env=docker_env)

    def terminate_if_alive() -> None:
        runner.write("Shutting down containers...\n")
        if process.poll() is None:
            runner.write("Killing local container...\n")
            make_docker_kill(runner, container_name)()

    runner.add_cleanup("Terminate local container", terminate_if_alive)
    return process
Exemplo n.º 5
0
def run_docker_command(
    runner: Runner,
    remote_info: RemoteInfo,
    args: argparse.Namespace,
    remote_env: Dict[str, str],
    ssh: SSH,
    mount_dir: Optional[str],
) -> Popen:
    """
    --docker-run support.

    Connect using sshuttle running in a Docker container, and then run user
    container.

    :param args: Command-line args to telepresence binary.
    :param remote_env: Dictionary with environment on remote pod.
    :param mount_dir: Path to local directory where remote pod's filesystem is
        mounted.
    """
    if SUDO_FOR_DOCKER:
        runner.require_sudo()

    # Update environment:
    remote_env["TELEPRESENCE_METHOD"] = "container"  # mostly just for tests :(

    # Extract --publish flags and add them to the sshuttle container, which is
    # responsible for defining the network entirely.
    docker_args, publish_args = parse_docker_args(args.docker_run)

    # Start the sshuttle container:
    name = random_name()
    config = {
        "port":
        ssh.port,
        "cidrs":
        get_proxy_cidrs(
            runner, args, remote_info, remote_env["KUBERNETES_SERVICE_HOST"]
        ),
        "expose_ports":
        list(args.expose.local_to_remote()),
    }
    if runner.platform == "darwin":
        config["ip"] = MAC_LOOPBACK_IP
    # Image already has tini init so doesn't need --init option:
    span = runner.span()
    runner.launch(
        "Network container",
        docker_runify(
            publish_args + [
                "--rm", "--privileged", "--name=" +
                name, TELEPRESENCE_LOCAL_IMAGE, "proxy",
                json.dumps(config)
            ]
        ),
        killer=make_docker_kill(runner, name)
    )

    # Wait for sshuttle to be running:
    while True:
        try:
            runner.check_call(
                docker_runify([
                    "--network=container:" + name, "--rm",
                    TELEPRESENCE_LOCAL_IMAGE, "wait"
                ])
            )
        except CalledProcessError as e:
            if e.returncode == 100:
                # We're good!
                break
            elif e.returncode == 125:
                # Docker failure, probably due to original container not
                # starting yet... so sleep and try again:
                sleep(1)
                continue
            else:
                raise
        else:
            raise RuntimeError(
                "Waiting container exited prematurely. File a bug, please!"
            )

    # Start the container specified by the user:
    container_name = random_name()
    docker_command = docker_runify([
        "--name=" + container_name,
        "--network=container:" + name,
    ],
                                   env=True)

    # Prepare container environment
    for key in remote_env:
        docker_command.append("-e={}".format(key))
    docker_env = os.environ.copy()
    docker_env.update(remote_env)

    if mount_dir:
        docker_command.append("--volume={}:{}".format(mount_dir, mount_dir))

    # Don't add --init if the user is doing something with it
    init_args = [
        arg for arg in docker_args
        if arg == "--init" or arg.startswith("--init=")
    ]
    # Older versions of Docker don't have --init:
    if not init_args and "--init" in runner.get_output([
        "docker", "run", "--help"
    ]):
        docker_command += ["--init"]
    docker_command += docker_args
    span.end()

    p = Popen(docker_command, env=docker_env)

    def terminate_if_alive():
        runner.write("Shutting down containers...\n")
        if p.poll() is None:
            runner.write("Killing local container...\n")
            make_docker_kill(runner, container_name)()

    runner.add_cleanup("Terminate local container", terminate_if_alive)
    return p
Exemplo n.º 6
0
def get_proxy_cidrs(runner: Runner, args: argparse.Namespace,
                    remote_info: RemoteInfo,
                    service_address: str) -> List[str]:
    """
    Figure out which IP ranges to route via sshuttle.

    1. Given the IP address of a service, figure out IP ranges used by
       Kubernetes services.
    2. Extract pod ranges from API.
    3. Any hostnames/IPs given by the user using --also-proxy.

    See https://github.com/kubernetes/kubernetes/issues/25533 for eventual
    long-term solution for service CIDR.
    """

    # Run script to convert --also-proxy hostnames to IPs, doing name
    # resolution inside Kubernetes, so we get cloud-local IP addresses for
    # cloud resources:
    def resolve_ips():
        # Separate hostnames from IPs and IP ranges
        hostnames = []
        ip_ranges = []

        for proxy_target in args.also_proxy:
            try:
                addr = ipaddress.ip_network(proxy_target)
            except ValueError:
                pass
            else:
                ip_ranges.append(str(addr))
                continue

            hostnames.append(proxy_target)

        resolved_ips = json.loads(
            runner.get_kubectl(args.context, args.namespace, [
                "exec", "--container=" + remote_info.container_name,
                remote_info.pod_name, "--", "python3", "-c", _GET_IPS_PY
            ] + hostnames))
        return resolved_ips + ip_ranges

    try:
        result = set(resolve_ips())
    except CalledProcessError as e:
        runner.write(str(e))
        raise SystemExit(
            "We failed to do a DNS lookup inside Kubernetes for the "
            "hostname(s) you listed in "
            "--also-proxy ({}). Maybe you mistyped one of them?".format(
                ", ".join(args.also_proxy)))

    # Get pod IPs from nodes if possible, otherwise use pod IPs as heuristic:
    try:
        nodes = json.loads(
            runner.get_output(
                [runner.kubectl_cmd, "get", "nodes", "-o", "json"]))["items"]
    except CalledProcessError as e:
        runner.write("Failed to get nodes: {}".format(e))
        # Fallback to using pod IPs:
        pods = json.loads(
            runner.get_output(
                [runner.kubectl_cmd, "get", "pods", "-o", "json"]))["items"]
        pod_ips = []
        for pod in pods:
            try:
                pod_ips.append(pod["status"]["podIP"])
            except KeyError:
                # Apparently a problem on OpenShift
                pass
        if pod_ips:
            result.add(covering_cidr(pod_ips))
    else:
        for node in nodes:
            pod_cidr = node["spec"].get("podCIDR")
            if pod_cidr is not None:
                result.add(pod_cidr)

    # Add service IP range, based on heuristic of constructing CIDR from
    # existing Service IPs. We create more services if there are less than 8,
    # to ensure some coverage of the IP range:
    def get_service_ips():
        services = json.loads(
            runner.get_output(
                [runner.kubectl_cmd, "get", "services", "-o",
                 "json"]))["items"]
        # FIXME: Add test(s) here so we don't crash on, e.g., ExternalName
        return [
            svc["spec"]["clusterIP"] for svc in services
            if svc["spec"].get("clusterIP", "None") != "None"
        ]

    service_ips = get_service_ips()
    new_services = []  # type: List[str]
    # Ensure we have at least 8 ClusterIP Services:
    while len(service_ips) + len(new_services) < 8:
        new_service = random_name()
        runner.check_call([
            runner.kubectl_cmd, "create", "service", "clusterip", new_service,
            "--tcp=3000"
        ])
        new_services.append(new_service)
    if new_services:
        service_ips = get_service_ips()
    # Store Service CIDR:
    service_cidr = covering_cidr(service_ips)
    result.add(service_cidr)
    # Delete new services:
    for new_service in new_services:
        runner.check_call(
            [runner.kubectl_cmd, "delete", "service", new_service])

    if sys.stderr.isatty():
        print("Guessing that Services IP range is {}. Services started after"
              " this point will be inaccessible if are outside this range;"
              " restart telepresence if you can't access a "
              "new Service.\n".format(service_cidr),
              file=sys.stderr)

    return list(result)