コード例 #1
0
def run_docker_command(
    runner: Runner,
    remote_info: RemoteInfo,
    args: argparse.Namespace,
    remote_env: Dict[str, str],
    subprocesses: Subprocesses,
    ssh: SSH,
    mount_dir: Optional[str],
) -> Popen:
    """
    --docker-run support.

    Connect using sshuttle running in a Docker container, and then run user
    container.

    :param args: Command-line args to telepresence binary.
    :param remote_env: Dictionary with environment on remote pod.
    :param mount_dir: Path to local directory where remote pod's filesystem is
        mounted.
    """
    # Update environment:
    remote_env["TELEPRESENCE_METHOD"] = "container"  # mostly just for tests :(

    # Extract --publish flags and add them to the sshuttle container, which is
    # responsible for defining the network entirely.
    docker_args, publish_args = parse_docker_args(args.docker_run)

    # Start the sshuttle container:
    name = random_name()
    config = {
        "port":
        ssh.port,
        "cidrs":
        get_proxy_cidrs(
            runner, args, remote_info, remote_env["KUBERNETES_SERVICE_HOST"]
        ),
        "expose_ports":
        list(args.expose.local_to_remote()),
    }
    if sys.platform == "darwin":
        config["ip"] = MAC_LOOPBACK_IP
    # Image already has tini init so doesn't need --init option:
    span = runner.span()
    subprocesses.append(
        runner.popen(
            docker_runify(
                publish_args + [
                    "--rm", "--privileged", "--name=" + name,
                    TELEPRESENCE_LOCAL_IMAGE, "proxy",
                    json.dumps(config)
                ]
            )
        ), make_docker_kill(runner, name)
    )

    # Wait for sshuttle to be running:
    while True:
        try:
            runner.check_call(
                docker_runify([
                    "--network=container:" + name, "--rm",
                    TELEPRESENCE_LOCAL_IMAGE, "wait"
                ])
            )
        except CalledProcessError as e:
            if e.returncode == 100:
                # We're good!
                break
            elif e.returncode == 125:
                # Docker failure, probably due to original container not
                # starting yet... so sleep and try again:
                sleep(1)
                continue
            else:
                raise
        else:
            raise RuntimeError(
                "Waiting container exited prematurely. File a bug, please!"
            )

    # Start the container specified by the user:
    container_name = random_name()
    docker_command = docker_runify([
        "--name=" + container_name,
        "--network=container:" + name,
    ])

    # Prepare container environment
    for key in remote_env:
        docker_command.append("-e={}".format(key))
    docker_env = os.environ.copy()
    docker_env.update(remote_env)

    if mount_dir:
        docker_command.append("--volume={}:{}".format(mount_dir, mount_dir))

    # Don't add --init if the user is doing something with it
    init_args = [
        arg for arg in docker_args
        if arg == "--init" or arg.startswith("--init=")
    ]
    # Older versions of Docker don't have --init:
    if not init_args and "--init" in runner.get_output([
        "docker", "run", "--help"
    ]):
        docker_command += ["--init"]
    docker_command += docker_args
    span.end()

    p = Popen(docker_command, env=docker_env)

    def terminate_if_alive():
        runner.write("Shutting down containers...\n")
        if p.poll() is None:
            runner.write("Killing local container...\n")
            make_docker_kill(runner, container_name)()

    atexit.register(terminate_if_alive)
    return p
コード例 #2
0
ファイル: container.py プロジェクト: getbread/telepresence
def run_docker_command(
    runner: Runner,
    remote_info: RemoteInfo,
    docker_run: List[str],
    expose: PortMapping,
    to_pod: List[int],
    from_pod: List[int],
    container_to_host: PortMapping,
    remote_env: Dict[str, str],
    docker_host: Optional[str],
    ssh: SSH,
    mount_dir: Optional[str],
    use_docker_mount: Optional[bool],
    pod_info: Dict[str, str],
    exclude_proxy: List[str],
    host_ip: str,
) -> "subprocess.Popen[bytes]":
    """
    --docker-run support.

    Connect using sshuttle running in a Docker container, and then run user
    container.

    :param remote_env: Dictionary with environment on remote pod.
    :param mount_dir: Path to local directory where remote pod's filesystem is
        mounted.
    """
    # Update environment:
    remote_env["TELEPRESENCE_METHOD"] = "container"  # mostly just for tests :(

    # Extract --publish flags and add them to the sshuttle container, which is
    # responsible for defining the network entirely.
    docker_args, publish_args = parse_docker_args(docker_run)

    # Point a host port to the network container's sshd
    container_sshd_port = find_free_port()
    publish_args.append(
        "--publish=127.0.0.1:{}:38022/tcp".format(container_sshd_port))

    if not docker_host:
        docker_host = "127.0.0.1"
    local_ssh = SSH(runner, container_sshd_port, "root@{}".format(docker_host))

    if host_ip:
        exclude_proxy.append(host_ip)

    # Start the network (sshuttle) container:
    name = random_name()
    config = {
        "cidrs": ["0/0"],
        "expose_ports": list(expose.local_to_remote()),
        "to_pod": to_pod,
        "from_pod": from_pod,
        "exclude_proxy": exclude_proxy,
        "host_ip": host_ip,
    }
    dns_args = []
    if "hostname" in pod_info:
        dns_args.append("--hostname={}".format(pod_info["hostname"].strip()))
    if "hosts" in pod_info:
        dns_args.extend(parse_hosts_aliases(pod_info["hosts"]))
    if "resolv" in pod_info:
        dns_args.extend(parse_resolv_conf(pod_info["resolv"]))

    # Image already has tini init so doesn't need --init option:
    span = runner.span()
    runner.launch(
        "Network container",
        runner.docker("run", *publish_args, *dns_args, "--rm", "--privileged",
                      "--name=" + name, TELEPRESENCE_LOCAL_IMAGE, "proxy",
                      json.dumps(config)),
        killer=make_docker_kill(runner, name),
        keep_session=runner.sudo_for_docker,
    )

    # Set up ssh tunnel to allow the container to reach the cluster
    if not local_ssh.wait():
        raise RuntimeError("SSH to the network container failed to start.")

    container_forward_args = ["-R", "38023:127.0.0.1:{}".format(ssh.port)]
    for container_port, host_port in container_to_host.local_to_remote():
        if runner.chatty:
            runner.show("Forwarding container port {} to host port {}.".format(
                container_port, host_port))
        container_forward_args.extend(
            ["-R", "{}:127.0.0.1:{}".format(container_port, host_port)])
    runner.launch("Local SSH port forward",
                  local_ssh.bg_command(container_forward_args))

    # Wait for sshuttle to be running:
    sshuttle_ok = False
    for _ in runner.loop_until(120, 1):
        try:
            runner.check_call(
                runner.docker("run", "--network=container:" + name, "--rm",
                              TELEPRESENCE_LOCAL_IMAGE, "wait"))
        except subprocess.CalledProcessError as e:
            if e.returncode == 100:
                # We're good!
                sshuttle_ok = True
                break
            elif e.returncode == 125:
                # Docker failure, probably due to original container not
                # starting yet... so try again:
                continue
            else:
                raise
        else:
            raise RuntimeError(
                "Waiting container exited prematurely. File a bug, please!")
    if not sshuttle_ok:
        # This used to loop forever. Now we time out after two minutes.
        raise RuntimeError(
            "Waiting for network container timed out. File a bug, please!")

    # Start the container specified by the user:
    container_name = random_name()
    docker_command = runner.docker(
        "run",
        "--name=" + container_name,
        "--network=container:" + name,
        env=True,
    )

    # Prepare container environment
    for key in remote_env:
        docker_command.append("-e={}".format(key))
    docker_env = os.environ.copy()
    docker_env.update(remote_env)

    if mount_dir:
        if use_docker_mount:
            mount_volume = "telepresence-" + runner.session_id
        else:
            mount_volume = mount_dir

        docker_command.append("--volume={}:{}".format(mount_volume, mount_dir))

    # Don't add --init if the user is doing something with it
    init_args = [
        arg for arg in docker_args
        if arg == "--init" or arg.startswith("--init=")
    ]
    # Older versions of Docker don't have --init:
    docker_run_help = runner.get_output(["docker", "run", "--help"])
    if not init_args and "--init" in docker_run_help:
        docker_command += ["--init"]
    docker_command += docker_args
    span.end()

    runner.show("Setup complete. Launching your container.")
    process = subprocess.Popen(docker_command, env=docker_env)

    def terminate_if_alive() -> None:
        runner.write("Shutting down containers...\n")
        if process.poll() is None:
            runner.write("Killing local container...\n")
            make_docker_kill(runner, container_name)()

    runner.add_cleanup("Terminate local container", terminate_if_alive)
    return process
コード例 #3
0
def connect(
    runner: Runner, remote_info: RemoteInfo, is_container_mode: bool,
    expose: PortMapping, to_pod: List[int], from_pod: List[int]
) -> Tuple[int, SSH]:
    """
    Start all the processes that handle remote proxying.

    Return (local port of SOCKS proxying tunnel, SSH instance).
    """
    span = runner.span()
    # Keep local copy of pod logs, for debugging purposes. Set is_critical to
    # False so logs failing doesn't bring down the Telepresence session.
    runner.launch(
        "kubectl logs",
        runner.kubectl(
            "logs", "-f", remote_info.pod_name, "--container",
            remote_info.container_name, "--tail=10"
        ),
        bufsize=0,
        is_critical=False,
    )

    ssh = SSH(runner, find_free_port())

    # forward remote port to here, by tunneling via remote SSH server:
    runner.launch(
        "kubectl port-forward",
        runner.kubectl(
            "port-forward", remote_info.pod_name, "{}:8022".format(ssh.port)
        )
    )

    if not ssh.wait():
        # Describe the pod; output goes to the logfile
        runner.write("SSH timed out. Pod info follows.")
        try:
            runner.check_call(
                runner.kubectl("describe", "pod", remote_info.pod_name),
                timeout=10
            )
        except Exception:
            pass
        raise RuntimeError("SSH to the cluster failed to start. See logfile.")

    # Create ssh tunnels. In the case of the container method, just show the
    # associated messages; the tunnels will be created in the network
    # container, where those messages are not visible to the user.
    expose_local_services(
        runner,
        ssh,
        list(expose.local_to_remote()),
        to_pod,
        from_pod,
        show_only=is_container_mode
    )

    # Start tunnels for the SOCKS proxy (local -> remote)
    # and the local server for the proxy to poll (remote -> local).
    socks_port = find_free_port()
    local_server_port = find_free_port()

    launch_local_server(runner, local_server_port)
    forward_args = [
        "-L127.0.0.1:{}:127.0.0.1:9050".format(socks_port),
        "-R9055:127.0.0.1:{}".format(local_server_port)
    ]
    runner.launch(
        "SSH port forward (socks and proxy poll)",
        ssh.bg_command(forward_args)
    )

    span.end()
    return socks_port, ssh
コード例 #4
0
def supplant_deployment(runner: Runner,
                        args: argparse.Namespace) -> Tuple[str, str, Dict]:
    """
    Swap out an existing Deployment, supplant method.

    Native Kubernetes version.

    Returns (Deployment name, unique K8s label, JSON of original container that
    was swapped out.)
    """
    span = runner.span()
    run_id = runner.session_id

    deployment_name, *container_name = args.swap_deployment.split(":", 1)
    if container_name:
        container_name = container_name[0]
    deployment_json = get_deployment_json(
        runner,
        deployment_name,
        args.context,
        args.namespace,
        "deployment",
    )

    # If no container name was given, just use the first one:
    if not container_name:
        container_name = deployment_json["spec"]["template"]["spec"][
            "containers"][0]["name"]

    # If we're on local VM we need to use different nameserver to
    # prevent infinite loops caused by sshuttle.
    add_custom_nameserver = args.method == "vpn-tcp" and args.in_local_vm

    if args.needs_root:
        image_name = TELEPRESENCE_REMOTE_IMAGE_PRIV
    else:
        image_name = TELEPRESENCE_REMOTE_IMAGE

    new_deployment_json, orig_container_json = new_swapped_deployment(
        deployment_json,
        container_name,
        run_id,
        image_name,
        add_custom_nameserver,
    )

    # Compute a new name that isn't too long, i.e. up to 63 characters.
    # Trim the original name until "tel-{run_id}-{pod_id}" fits.
    # https://github.com/kubernetes/community/blob/master/contributors/design-proposals/architecture/identifiers.md
    new_deployment_name = "{name:.{max_width}s}-{id}".format(
        name=deployment_json["metadata"]["name"],
        id=run_id,
        max_width=(50 - (len(run_id) + 1)))
    new_deployment_json["metadata"]["name"] = new_deployment_name

    def resize_original(replicas):
        """Resize the original deployment (kubectl scale)"""
        runner.check_call(
            runner.kubectl("scale", "deployment", deployment_name,
                           "--replicas={}".format(replicas)))

    def delete_new_deployment(check):
        """Delete the new (copied) deployment"""
        ignore = []
        if not check:
            ignore = ["--ignore-not-found"]
        runner.check_call(
            runner.kubectl("delete", "deployment", new_deployment_name,
                           *ignore))

    # Launch the new deployment
    runner.add_cleanup("Delete new deployment", delete_new_deployment, True)
    delete_new_deployment(False)  # Just in case
    runner.check_call(runner.kubectl("apply", "-f", "-"),
                      input=json.dumps(new_deployment_json).encode("utf-8"))

    # Scale down the original deployment
    runner.add_cleanup("Re-scale original deployment", resize_original,
                       deployment_json["spec"]["replicas"])
    resize_original(0)

    span.end()
    return new_deployment_name, run_id, orig_container_json
コード例 #5
0
ファイル: deployment.py プロジェクト: tuapuikia/telepresence
def supplant_deployment(
    runner: Runner,
    deployment_arg: str,
    expose: PortMapping,
    custom_nameserver: Optional[str],
    service_account: str,
) -> Tuple[str, str]:
    """
    Swap out an existing Deployment, supplant method.

    Native Kubernetes version.

    Returns (Deployment name, unique K8s label, JSON of original container that
    was swapped out.)
    """
    span = runner.span()
    run_id = runner.session_id

    runner.show("Starting network proxy to cluster by swapping out "
                "Deployment {} with a proxy".format(deployment_arg))

    deployment, container = _split_deployment_container(deployment_arg)
    deployment_json = get_deployment_json(
        runner,
        deployment,
        "deployment",
    )
    container = _get_container_name(container, deployment_json)

    new_deployment_json = new_swapped_deployment(
        runner,
        deployment_json,
        container,
        run_id,
        expose,
        service_account,
        custom_nameserver,
    )

    # Compute a new name that isn't too long, i.e. up to 63 characters.
    # Trim the original name until "tel-{run_id}-{pod_id}" fits.
    # https://github.com/kubernetes/community/blob/master/contributors/design-proposals/architecture/identifiers.md
    new_deployment_name = "{name:.{max_width}s}-{id}".format(
        name=deployment_json["metadata"]["name"],
        id=run_id,
        max_width=(50 - (len(run_id) + 1)))
    new_deployment_json["metadata"]["name"] = new_deployment_name

    def resize_original(replicas):
        """Resize the original deployment (kubectl scale)"""
        runner.check_call(
            runner.kubectl("scale", "deployment", deployment,
                           "--replicas={}".format(replicas)))

    def delete_new_deployment(check):
        """Delete the new (copied) deployment"""
        ignore = []
        if not check:
            ignore = ["--ignore-not-found"]
        else:
            runner.show(
                "Swapping Deployment {} back to its original state".format(
                    deployment_arg))
        runner.check_call(
            runner.kubectl("delete", "deployment", new_deployment_name,
                           *ignore))

    # Launch the new deployment
    runner.add_cleanup("Delete new deployment", delete_new_deployment, True)
    delete_new_deployment(False)  # Just in case
    runner.check_call(runner.kubectl("apply", "-f", "-"),
                      input=json.dumps(new_deployment_json).encode("utf-8"))

    # Scale down the original deployment
    runner.add_cleanup("Re-scale original deployment", resize_original,
                       deployment_json["spec"]["replicas"])
    resize_original(0)

    span.end()
    return new_deployment_name, run_id
コード例 #6
0
ファイル: deployment.py プロジェクト: tuapuikia/telepresence
def swap_deployment_openshift(
    runner: Runner,
    deployment_arg: str,
    expose: PortMapping,
    custom_nameserver: Optional[str],
    service_account: str,
) -> Tuple[str, str]:
    """
    Swap out an existing DeploymentConfig and also clears any triggers
    which were registered, otherwise replaced telepresence pod would
    be immediately swapped back to the original one because of
    image change trigger.

    Returns (Deployment name, unique K8s label, JSON of original container that
    was swapped out.)

    """

    run_id = runner.session_id
    deployment, container = _split_deployment_container(deployment_arg)

    dc_json_with_triggers = json.loads(
        runner.get_output(
            runner.kubectl("get", "dc/{}".format(deployment), "-o", "json",
                           "--export")))

    runner.check_call(
        runner.kubectl("set", "triggers", "dc/{}".format(deployment),
                       "--remove-all"))

    dc_json = json.loads(
        runner.get_output(
            runner.kubectl("get", "dc/{}".format(deployment), "-o", "json",
                           "--export")))

    def apply_json(json_config):
        runner.check_call(runner.kubectl("replace", "-f", "-"),
                          input=json.dumps(json_config).encode("utf-8"))
        # Now that we've updated the deployment config,
        # let's rollout latest version to apply the changes
        runner.check_call(
            runner.kubectl("rollout", "latest", "dc/{}".format(deployment)))

        runner.check_call(
            runner.kubectl("rollout", "status", "-w",
                           "dc/{}".format(deployment)))

    runner.add_cleanup("Restore original deployment config", apply_json,
                       dc_json_with_triggers)

    container = _get_container_name(container, dc_json)

    new_dc_json = new_swapped_deployment(
        runner,
        dc_json,
        container,
        run_id,
        expose,
        service_account,
        custom_nameserver,
    )

    apply_json(new_dc_json)

    return deployment, run_id
コード例 #7
0
ファイル: deployment.py プロジェクト: tuapuikia/telepresence
def create_new_deployment(
    runner: Runner,
    deployment_arg: str,
    expose: PortMapping,
    custom_nameserver: Optional[str],
    service_account: str,
) -> Tuple[str, str]:
    """
    Create a new Deployment, return its name and Kubernetes label.
    """
    span = runner.span()
    run_id = runner.session_id
    runner.show("Starting network proxy to cluster using "
                "new Deployment {}".format(deployment_arg))

    def remove_existing_deployment(quiet=False):
        if not quiet:
            runner.show("Cleaning up Deployment {}".format(deployment_arg))
        runner.check_call(
            runner.kubectl(
                "delete",
                "--ignore-not-found",
                "svc,deploy",
                "--selector=telepresence=" + run_id,
            ))

    runner.add_cleanup("Delete new deployment", remove_existing_deployment)
    remove_existing_deployment(quiet=True)
    # Define the deployment as yaml
    env = {}
    if custom_nameserver:
        # If we're on local VM we need to use different nameserver to prevent
        # infinite loops caused by sshuttle:
        env["TELEPRESENCE_NAMESERVER"] = custom_nameserver
    # Create the deployment via yaml
    deployment_yaml = _get_deployment_yaml(
        deployment_arg,
        run_id,
        get_image_name(runner, expose),
        service_account,
        env,
    )
    try:
        runner.check_call(runner.kubectl("create", "-f", "-"),
                          input=deployment_yaml.encode("utf-8"))
    except CalledProcessError as exc:
        raise runner.fail("Failed to create deployment {}:\n{}".format(
            deployment_arg, exc.stderr))
    # Expose the deployment with a service
    if expose.remote():
        command = [
            "expose",
            "deployment",
            deployment_arg,
        ]
        # Provide a stable argument ordering.  Reverse it because that
        # happens to make some current tests happy but in the long run
        # that's totally arbitrary and doesn't need to be maintained.
        # See issue 494.
        for port in sorted(expose.remote(), reverse=True):
            command.append("--port={}".format(port))
        try:
            runner.check_call(runner.kubectl(*command))
        except CalledProcessError as exc:
            raise runner.fail("Failed to expose deployment {}:\n{}".format(
                deployment_arg, exc.stderr))
    span.end()
    return deployment_arg, run_id
コード例 #8
0
ファイル: main.py プロジェクト: plunix/telepresence
def connect(runner: Runner, remote_info: RemoteInfo,
            cmdline_args: argparse.Namespace) -> Tuple[Subprocesses, int, SSH]:
    """
    Start all the processes that handle remote proxying.

    Return (Subprocesses, local port of SOCKS proxying tunnel, SSH instance).
    """
    processes = Subprocesses()
    # Keep local copy of pod logs, for debugging purposes:
    processes.append(
        runner.popen(
            runner.kubectl(cmdline_args.context, remote_info.namespace, [
                "logs", "-f", remote_info.pod_name, "--container",
                remote_info.container_name
            ]),
            bufsize=0,
        ))

    ssh = SSH(runner, find_free_port())

    # forward remote port to here, by tunneling via remote SSH server:
    processes.append(
        runner.popen(
            runner.kubectl(cmdline_args.context, remote_info.namespace, [
                "port-forward", remote_info.pod_name, "{}:8022".format(
                    ssh.port)
            ])))
    if cmdline_args.method == "container":
        # kubectl port-forward currently only listens on loopback. So we
        # portforward from the docker0 interface on Linux, and the lo0 alias we
        # added on OS X, to loopback (until we can use kubectl port-forward
        # option to listen on docker0 -
        # https://github.com/kubernetes/kubernetes/pull/46517, or all our users
        # have latest version of Docker for Mac, which has nicer solution -
        # https://github.com/datawire/telepresence/issues/224).
        if sys.platform == "linux":

            # If ip addr is available use it if not fall back to ifconfig.
            if which("ip"):
                docker_interfaces = re.findall(
                    r"(\d+\.\d+\.\d+\.\d+)",
                    runner.get_output(["ip", "addr", "show", "dev",
                                       "docker0"]))
            elif which("ifconfig"):
                docker_interfaces = re.findall(
                    r"(\d+\.\d+\.\d+\.\d+)",
                    runner.get_output(["ifconfig", "docker0"]))
            else:
                raise SystemExit("'ip addr' nor 'ifconfig' available")

            if len(docker_interfaces) == 0:
                raise SystemExit("No interface for docker found")

            docker_interface = docker_interfaces[0]

        else:
            # The way to get routing from container to host is via an alias on
            # lo0 (https://docs.docker.com/docker-for-mac/networking/). We use
            # an IP range that is assigned for testing network devices and
            # therefore shouldn't conflict with real IPs or local private
            # networks (https://tools.ietf.org/html/rfc6890).
            runner.check_call(
                ["sudo", "ifconfig", "lo0", "alias", MAC_LOOPBACK_IP])
            atexit.register(
                runner.check_call,
                ["sudo", "ifconfig", "lo0", "-alias", MAC_LOOPBACK_IP])
            docker_interface = MAC_LOOPBACK_IP
        processes.append(
            runner.popen([
                "socat", "TCP4-LISTEN:{},bind={},reuseaddr,fork".format(
                    ssh.port,
                    docker_interface,
                ), "TCP4:127.0.0.1:{}".format(ssh.port)
            ]))

    ssh.wait()

    # In Docker mode this happens inside the local Docker container:
    if cmdline_args.method != "container":
        expose_local_services(
            processes,
            ssh,
            cmdline_args.expose.local_to_remote(),
        )

    socks_port = find_free_port()
    if cmdline_args.method == "inject-tcp":
        # start tunnel to remote SOCKS proxy:
        processes.append(
            ssh.popen(["-L",
                       "127.0.0.1:{}:127.0.0.1:9050".format(socks_port)]), )

    return processes, socks_port, ssh
コード例 #9
0
def get_proxy_cidrs(
    runner: Runner, args: argparse.Namespace, remote_info: RemoteInfo,
    service_address: str
) -> List[str]:
    """
    Figure out which IP ranges to route via sshuttle.

    1. Given the IP address of a service, figure out IP ranges used by
       Kubernetes services.
    2. Extract pod ranges from API.
    3. Any hostnames/IPs given by the user using --also-proxy.

    See https://github.com/kubernetes/kubernetes/issues/25533 for eventual
    long-term solution for service CIDR.
    """

    # Run script to convert --also-proxy hostnames to IPs, doing name
    # resolution inside Kubernetes, so we get cloud-local IP addresses for
    # cloud resources:
    def resolve_ips():
        return json.loads(
            runner.get_kubectl(
                args.context, args.namespace, [
                    "exec", "--container=" + remote_info.container_name,
                    remote_info.pod_name, "--", "python3", "-c", _GET_IPS_PY
                ] + args.also_proxy
            )
        )

    try:
        result = set([ip + "/32" for ip in resolve_ips()])
    except CalledProcessError as e:
        runner.write(str(e))
        raise SystemExit(
            "We failed to do a DNS lookup inside Kubernetes for the "
            "hostname(s) you listed in "
            "--also-proxy ({}). Maybe you mistyped one of them?".format(
                ", ".join(args.also_proxy)
            )
        )

    # Get pod IPs from nodes if possible, otherwise use pod IPs as heuristic:
    try:
        nodes = json.loads(
            runner.get_output([
                runner.kubectl_cmd, "get", "nodes", "-o", "json"
            ])
        )["items"]
    except CalledProcessError as e:
        runner.write("Failed to get nodes: {}".format(e))
        # Fallback to using pod IPs:
        pods = json.loads(
            runner.get_output([
                runner.kubectl_cmd, "get", "pods", "-o", "json"
            ])
        )["items"]
        pod_ips = []
        for pod in pods:
            try:
                pod_ips.append(pod["status"]["podIP"])
            except KeyError:
                # Apparently a problem on OpenShift
                pass
        if pod_ips:
            result.add(covering_cidr(pod_ips))
    else:
        for node in nodes:
            pod_cidr = node["spec"].get("podCIDR")
            if pod_cidr is not None:
                result.add(pod_cidr)

    # Add service IP range, based on heuristic of constructing CIDR from
    # existing Service IPs. We create more services if there are less than 8,
    # to ensure some coverage of the IP range:
    def get_service_ips():
        services = json.loads(
            runner.get_output([
                runner.kubectl_cmd, "get", "services", "-o", "json"
            ])
        )["items"]
        # FIXME: Add test(s) here so we don't crash on, e.g., ExternalName
        return [
            svc["spec"]["clusterIP"] for svc in services
            if svc["spec"].get("clusterIP", "None") != "None"
        ]

    service_ips = get_service_ips()
    new_services = []  # type: List[str]
    # Ensure we have at least 8 ClusterIP Services:
    while len(service_ips) + len(new_services) < 8:
        new_service = random_name()
        runner.check_call([
            runner.kubectl_cmd, "create", "service", "clusterip", new_service,
            "--tcp=3000"
        ])
        new_services.append(new_service)
    if new_services:
        service_ips = get_service_ips()
    # Store Service CIDR:
    service_cidr = covering_cidr(service_ips)
    result.add(service_cidr)
    # Delete new services:
    for new_service in new_services:
        runner.check_call([
            runner.kubectl_cmd, "delete", "service", new_service
        ])

    if sys.stderr.isatty():
        print(
            "Guessing that Services IP range is {}. Services started after"
            " this point will be inaccessible if are outside this range;"
            " restart telepresence if you can't access a "
            "new Service.\n".format(service_cidr),
            file=sys.stderr
        )

    return list(result)
コード例 #10
0
def setup(runner: Runner, args):
    """
    Determine how the user wants to set up the proxy in the cluster.
    """

    # Figure out if we need capability that allows for ports < 1024:
    image_name = TELEPRESENCE_REMOTE_IMAGE
    if any([p < 1024 for p in args.expose.remote()]):
        if runner.kubectl.command == "oc":
            # OpenShift doesn't support running as root:
            raise runner.fail("OpenShift does not support ports <1024.")
        image_name = TELEPRESENCE_REMOTE_IMAGE_PRIV

    # Figure out which operation the user wants
    # Handle --deployment case
    deployment_arg = args.deployment
    operation = existing_deployment
    args.operation = "deployment"

    if args.new_deployment is not None:
        # This implies --new-deployment
        deployment_arg = args.new_deployment
        operation = create_new_deployment
        args.operation = "new_deployment"

    deployment_type = "deployment"
    if runner.kubectl.command == "oc":
        # OpenShift Origin might be using DeploymentConfig instead
        if args.swap_deployment:
            try:
                runner.check_call(
                    runner.kubectl("get",
                                   "dc/{}".format(args.swap_deployment)), )
                deployment_type = "deploymentconfig"
            except CalledProcessError as exc:
                runner.show(
                    "Failed to find OpenShift deploymentconfig {}. "
                    "Will try regular k8s deployment. Reason:\n{}".format(
                        deployment_arg, exc.stderr))

    if args.swap_deployment is not None:
        # This implies --swap-deployment
        deployment_arg = args.swap_deployment
        if runner.kubectl.command == "oc" \
                and deployment_type == "deploymentconfig":
            operation = swap_deployment_openshift
        else:
            operation = supplant_deployment
        args.operation = "swap_deployment"

    # minikube/minishift break DNS because DNS gets captured, sent to minikube,
    # which sends it back to the DNS server set by host, resulting in a DNS
    # loop... We've fixed that for most cases by setting a distinct name server
    # for the proxy to use when making a new proxy pod, but that does not work
    # for --deployment.
    add_custom_ns = args.method == "vpn-tcp" and runner.kubectl.in_local_vm
    if add_custom_ns and args.operation == "deployment":
        raise runner.fail(
            "vpn-tcp method doesn't work with minikube/minishift when"
            " using --deployment. Use --swap-deployment or"
            " --new-deployment instead.")

    def start_proxy(runner_: Runner) -> RemoteInfo:
        tel_deployment, run_id = operation(runner_, deployment_arg, image_name,
                                           args.expose, add_custom_ns)
        remote_info = get_remote_info(
            runner,
            tel_deployment,
            deployment_type,
            run_id=run_id,
        )
        return remote_info

    return start_proxy
コード例 #11
0
def run_docker_command(
    runner: Runner,
    remote_info: RemoteInfo,
    args: argparse.Namespace,
    remote_env: Dict[str, str],
    subprocesses: Subprocesses,
    ssh: SSH,
) -> None:
    """
    --docker-run support.

    Connect using sshuttle running in a Docker container, and then run user
    container.

    :param args: Command-line args to telepresence binary.
    :param remote_env: Dictionary with environment on remote pod.
    :param mount_dir: Path to local directory where remote pod's filesystem is
        mounted.
    """
    # Mount remote filesystem. We allow all users if we're using Docker because
    # we don't know what uid the Docker container will use:
    mount_dir, mount_cleanup = mount_remote_volumes(
        runner,
        remote_info,
        ssh,
        True,
    )

    # Update environment:
    remote_env["TELEPRESENCE_ROOT"] = mount_dir
    remote_env["TELEPRESENCE_METHOD"] = "container"  # mostly just for tests :(

    # Extract --publish flags and add them to the sshuttle container, which is
    # responsible for defining the network entirely.
    docker_args, publish_args = parse_docker_args(args.docker_run)

    # Start the sshuttle container:
    name = random_name()
    config = {
        "port":
        ssh.port,
        "cidrs":
        get_proxy_cidrs(runner, args, remote_info,
                        remote_env["KUBERNETES_SERVICE_HOST"]),
        "expose_ports":
        list(args.expose.local_to_remote()),
    }
    if sys.platform == "darwin":
        config["ip"] = MAC_LOOPBACK_IP
    # Image already has tini init so doesn't need --init option:
    subprocesses.append(
        runner.popen(
            docker_runify(publish_args + [
                "--rm", "--privileged", "--name=" +
                name, TELEPRESENCE_LOCAL_IMAGE, "proxy",
                json.dumps(config)
            ])), make_docker_kill(runner, name))

    # Write out env file:
    with NamedTemporaryFile("w", delete=False) as envfile:
        for key, value in remote_env.items():
            envfile.write("{}={}\n".format(key, value))
    atexit.register(os.remove, envfile.name)

    # Wait for sshuttle to be running:
    while True:
        try:
            runner.check_call(
                docker_runify([
                    "--network=container:" + name, "--rm",
                    TELEPRESENCE_LOCAL_IMAGE, "wait"
                ]))
        except CalledProcessError as e:
            if e.returncode == 100:
                # We're good!
                break
                return name, envfile.name
            elif e.returncode == 125:
                # Docker failure, probably due to original container not
                # starting yet... so sleep and try again:
                sleep(1)
                continue
            else:
                raise
        else:
            raise RuntimeError(
                "Waiting container exited prematurely. File a bug, please!")

    # Start the container specified by the user:
    container_name = random_name()
    docker_command = docker_runify([
        "--volume={}:{}".format(mount_dir, mount_dir),
        "--name=" + container_name,
        "--network=container:" + name,
        "--env-file",
        envfile.name,
    ])
    # Older versions of Docker don't have --init:
    if "--init" in runner.get_output(["docker", "run", "--help"]):
        docker_command += ["--init"]
    docker_command += docker_args
    p = Popen(docker_command)

    def terminate_if_alive():
        runner.write("Shutting down containers...\n")
        if p.poll() is None:
            runner.write("Killing local container...\n")
            make_docker_kill(runner, container_name)()

        mount_cleanup()

    atexit.register(terminate_if_alive)
    wait_for_exit(runner, p, subprocesses)