def create_new_deployment(
    runner: Runner, deployment_arg: str, image_name: str, expose: PortMapping,
    add_custom_nameserver: bool
) -> Tuple[str, str]:
    """
    Create a new Deployment, return its name and Kubernetes label.
    """
    span = runner.span()
    run_id = runner.session_id
    runner.show(
        "Starting network proxy to cluster using "
        "new Deployment {}".format(deployment_arg)
    )

    def remove_existing_deployment(quiet=False):
        if not quiet:
            runner.show("Cleaning up Deployment {}".format(deployment_arg))
        runner.check_call(
            runner.kubectl(
                "delete",
                "--ignore-not-found",
                "svc,deploy",
                "--selector=telepresence=" + run_id,
            )
        )

    runner.add_cleanup("Delete new deployment", remove_existing_deployment)
    remove_existing_deployment(quiet=True)
    command = [
        "run",  # This will result in using Deployment:
        "--restart=Always",
        "--limits=cpu=100m,memory=256Mi",
        "--requests=cpu=25m,memory=64Mi",
        deployment_arg,
        "--image=" + image_name,
        "--labels=telepresence=" + run_id,
    ]
    # Provide a stable argument ordering.  Reverse it because that happens to
    # make some current tests happy but in the long run that's totally
    # arbitrary and doesn't need to be maintained.  See issue 494.
    for port in sorted(expose.remote(), reverse=True):
        command.append("--port={}".format(port))
    if expose.remote():
        command.append("--expose")
    # If we're on local VM we need to use different nameserver to prevent
    # infinite loops caused by sshuttle:
    if add_custom_nameserver:
        command.append(
            "--env=TELEPRESENCE_NAMESERVER=" + get_alternate_nameserver()
        )
    try:
        runner.check_call(runner.kubectl(command))
    except CalledProcessError as exc:
        raise runner.fail(
            "Failed to create deployment {}:\n{}".format(
                deployment_arg, exc.stderr
            )
        )
    span.end()
    return deployment_arg, run_id
Beispiel #2
0
def set_expose_ports(expose: PortMapping, pod: Manifest,
                     container_name: str) -> None:
    """Merge container ports into the expose list."""
    pod_spec = pod["spec"]  # type: Manifest
    container = find_container(pod_spec, container_name)
    expose.merge_automatic_ports([
        port["containerPort"] for port in container.get("ports", [])
        if port["protocol"] == "TCP"
    ])
Beispiel #3
0
def get_image_name(runner: Runner, expose: PortMapping) -> str:
    """
    Return the correct Telepresence image name (OpenShift-specific, privileged,
    or not) accounting for the existence of an OpenShift cluster, user
    overrides, and the use of privileged ports (< 1024).
    """
    ocp_env_name = "TELEPRESENCE_USE_OCP_IMAGE"
    ocp_env_value = os.environ.get(ocp_env_name, "auto")
    ocp_env = ocp_env_value.lower()
    if ocp_env in ("true", "on", "yes", "1", "always"):
        return TELEPRESENCE_REMOTE_IMAGE_OCP

    ocp_image_allowed = True
    if ocp_env in ("false", "off", "no", "0", "never"):
        ocp_image_allowed = False
    elif ocp_env not in ("auto", "automatic", "default"):
        runner.show(
            "\nWARNING: Ignoring {} environment variable with value {!r}. "
            "Accepted values are YES or NO or AUTO. "
            "Using AUTO.".format(ocp_env_name, ocp_env_value)
        )

    if ocp_image_allowed and runner.kubectl.cluster_is_openshift:
        return TELEPRESENCE_REMOTE_IMAGE_OCP
    if expose.has_privileged_ports():
        return TELEPRESENCE_REMOTE_IMAGE_PRIV
    return TELEPRESENCE_REMOTE_IMAGE
Beispiel #4
0
def get_image_name(expose: PortMapping) -> str:
    """
    Return the correct Telepresence image name (privileged or not) depending on
    whether any privileged ports (< 1024) are used.
    """
    if expose.has_privileged_ports():
        return TELEPRESENCE_REMOTE_IMAGE_PRIV
    return TELEPRESENCE_REMOTE_IMAGE
Beispiel #5
0
def get_image_name(runner: Runner, expose: PortMapping) -> str:
    """
    Return the correct Telepresence image name (privileged or not) depending on
    whether any privileged ports (< 1024) are used.
    """
    if runner.kubectl.cluster_is_openshift:
        return TELEPRESENCE_REMOTE_IMAGE_OCP
    if expose.has_privileged_ports():
        return TELEPRESENCE_REMOTE_IMAGE_PRIV
    return TELEPRESENCE_REMOTE_IMAGE
Beispiel #6
0
def connect(runner: Runner, remote_info: RemoteInfo, is_container_mode: bool,
            expose: PortMapping) -> Tuple[int, SSH]:
    """
    Start all the processes that handle remote proxying.

    Return (local port of SOCKS proxying tunnel, SSH instance).
    """
    span = runner.span()
    # Keep local copy of pod logs, for debugging purposes. Set is_critical to
    # False so logs failing doesn't bring down the Telepresence session.
    runner.launch(
        "kubectl logs",
        runner.kubectl("logs", "-f", remote_info.pod_name, "--container",
                       remote_info.container_name, "--tail=10"),
        bufsize=0,
        is_critical=False,
    )

    ssh = SSH(runner, find_free_port())

    # forward remote port to here, by tunneling via remote SSH server:
    runner.launch(
        "kubectl port-forward",
        runner.kubectl("port-forward", remote_info.pod_name,
                       "{}:8022".format(ssh.port)))

    if not ssh.wait():
        raise RuntimeError("SSH to the cluster failed to start.")

    # Create ssh tunnels. In the case of the container method, just show the
    # associated messages; the tunnels will be created in the network
    # container, where those messages are not visible to the user.
    expose_local_services(runner,
                          ssh,
                          list(expose.local_to_remote()),
                          show_only=is_container_mode)

    # Start tunnels for the SOCKS proxy (local -> remote)
    # and the local server for the proxy to poll (remote -> local).
    socks_port = find_free_port()
    local_server_port = find_free_port()

    launch_local_server(runner, local_server_port)
    forward_args = [
        "-L127.0.0.1:{}:127.0.0.1:9050".format(socks_port),
        "-R9055:127.0.0.1:{}".format(local_server_port)
    ]
    runner.launch("SSH port forward (socks and proxy poll)",
                  ssh.bg_command(forward_args))

    span.end()
    return socks_port, ssh
Beispiel #7
0
def connect(runner: Runner, remote_info: RemoteInfo, is_container_mode: bool,
            expose: PortMapping) -> Tuple[int, SSH]:
    """
    Start all the processes that handle remote proxying.

    Return (local port of SOCKS proxying tunnel, SSH instance).
    """
    span = runner.span()
    # Keep local copy of pod logs, for debugging purposes:
    runner.launch(
        "kubectl logs",
        runner.kubectl("logs", "-f", remote_info.pod_name, "--container",
                       remote_info.container_name, "--tail=10"),
        bufsize=0,
    )

    ssh = SSH(runner, find_free_port())

    # forward remote port to here, by tunneling via remote SSH server:
    runner.launch(
        "kubectl port-forward",
        runner.kubectl("port-forward", remote_info.pod_name,
                       "{}:8022".format(ssh.port)))

    ssh.wait()

    # In Docker mode this happens inside the local Docker container:
    if not is_container_mode:
        expose_local_services(
            runner,
            ssh,
            list(expose.local_to_remote()),
        )

    # Start tunnels for the SOCKS proxy (local -> remote)
    # and the local server for the proxy to poll (remote -> local).
    socks_port = find_free_port()
    local_server_port = find_free_port()

    launch_local_server(runner, local_server_port)
    forward_args = [
        "-L127.0.0.1:{}:127.0.0.1:9050".format(socks_port),
        "-R9055:127.0.0.1:{}".format(local_server_port)
    ]
    runner.launch("SSH port forward (socks and proxy poll)",
                  ssh.bg_command(forward_args))

    span.end()
    return socks_port, ssh
Beispiel #8
0
def connect(runner: Runner, remote_info: RemoteInfo, is_container_mode: bool,
            expose: PortMapping) -> Tuple[int, SSH]:
    """
    Start all the processes that handle remote proxying.

    Return (local port of SOCKS proxying tunnel, SSH instance).
    """
    span = runner.span()
    # Keep local copy of pod logs, for debugging purposes:
    runner.launch(
        "kubectl logs",
        runner.kubectl("logs", "-f", remote_info.pod_name, "--container",
                       remote_info.container_name),
        bufsize=0,
    )

    ssh = SSH(runner, find_free_port())

    # forward remote port to here, by tunneling via remote SSH server:
    runner.launch(
        "kubectl port-forward",
        runner.kubectl("port-forward", remote_info.pod_name,
                       "{}:8022".format(ssh.port)))
    if is_container_mode:
        # kubectl port-forward currently only listens on loopback. So we
        # portforward from the docker0 interface on Linux, and the lo0 alias we
        # added on OS X, to loopback (until we can use kubectl port-forward
        # option to listen on docker0 -
        # https://github.com/kubernetes/kubernetes/pull/46517, or all our users
        # have latest version of Docker for Mac, which has nicer solution -
        # https://github.com/datawire/telepresence/issues/224).
        if runner.platform == "linux":

            # If ip addr is available use it if not fall back to ifconfig.
            missing = runner.depend(["ip", "ifconfig"])
            if "ip" not in missing:
                docker_interfaces = re.findall(
                    r"(\d+\.\d+\.\d+\.\d+)",
                    runner.get_output(["ip", "addr", "show", "dev",
                                       "docker0"]))
            elif "ifconfig" not in missing:
                docker_interfaces = re.findall(
                    r"(\d+\.\d+\.\d+\.\d+)",
                    runner.get_output(["ifconfig", "docker0"]))
            else:
                raise runner.fail(
                    """At least one of "ip addr" or "ifconfig" must be """ +
                    "available to retrieve Docker interface info.")

            if len(docker_interfaces) == 0:
                raise runner.fail("No interface for docker found")

            docker_interface = docker_interfaces[0]

        else:
            # The way to get routing from container to host is via an alias on
            # lo0 (https://docs.docker.com/docker-for-mac/networking/). We use
            # an IP range that is assigned for testing network devices and
            # therefore shouldn't conflict with real IPs or local private
            # networks (https://tools.ietf.org/html/rfc6890).
            runner.check_call(
                ["sudo", "ifconfig", "lo0", "alias", MAC_LOOPBACK_IP])
            runner.add_cleanup(
                "Mac Loopback", runner.check_call,
                ["sudo", "ifconfig", "lo0", "-alias", MAC_LOOPBACK_IP])
            docker_interface = MAC_LOOPBACK_IP

        runner.launch("socat for docker", [
            "socat", "TCP4-LISTEN:{},bind={},reuseaddr,fork".format(
                ssh.port,
                docker_interface,
            ), "TCP4:127.0.0.1:{}".format(ssh.port)
        ])

    ssh.wait()

    # In Docker mode this happens inside the local Docker container:
    if not is_container_mode:
        expose_local_services(
            runner,
            ssh,
            list(expose.local_to_remote()),
        )

    # Start tunnels for the SOCKS proxy (local -> remote)
    # and the local server for the proxy to poll (remote -> local).
    socks_port = find_free_port()
    local_server_port = find_free_port()
    runner.track_background(
        launch_local_server(local_server_port, runner.output))
    forward_args = [
        "-L127.0.0.1:{}:127.0.0.1:9050".format(socks_port),
        "-R9055:127.0.0.1:{}".format(local_server_port)
    ]
    runner.launch("SSH port forward (socks and proxy poll)",
                  ssh.bg_command(forward_args))

    span.end()
    return socks_port, ssh
Beispiel #9
0
def run_docker_command(
    runner: Runner,
    remote_info: RemoteInfo,
    docker_run: List[str],
    expose: PortMapping,
    to_pod: List[int],
    from_pod: List[int],
    container_to_host: PortMapping,
    remote_env: Dict[str, str],
    ssh: SSH,
    mount_dir: Optional[str],
    use_docker_mount: Optional[bool],
    pod_info: Dict[str, str],
) -> "subprocess.Popen[bytes]":
    """
    --docker-run support.

    Connect using sshuttle running in a Docker container, and then run user
    container.

    :param remote_env: Dictionary with environment on remote pod.
    :param mount_dir: Path to local directory where remote pod's filesystem is
        mounted.
    """
    # Update environment:
    remote_env["TELEPRESENCE_METHOD"] = "container"  # mostly just for tests :(

    # Extract --publish flags and add them to the sshuttle container, which is
    # responsible for defining the network entirely.
    docker_args, publish_args = parse_docker_args(docker_run)

    # Point a host port to the network container's sshd
    container_sshd_port = find_free_port()
    publish_args.append(
        "--publish=127.0.0.1:{}:38022/tcp".format(container_sshd_port)
    )
    local_ssh = SSH(runner, container_sshd_port, "[email protected]")

    # Start the network (sshuttle) container:
    name = random_name()
    config = {
        "cidrs": ["0/0"],
        "expose_ports": list(expose.local_to_remote()),
        "to_pod": to_pod,
        "from_pod": from_pod,
    }
    dns_args = []
    if "hostname" in pod_info:
        dns_args.append("--hostname={}".format(pod_info["hostname"].strip()))
    if "hosts" in pod_info:
        dns_args.extend(parse_hosts_aliases(pod_info["hosts"]))
    if "resolv" in pod_info:
        dns_args.extend(parse_resolv_conf(pod_info["resolv"]))

    # Image already has tini init so doesn't need --init option:
    span = runner.span()
    runner.launch(
        "Network container",
        runner.docker(
            "run", *publish_args, *dns_args, "--rm", "--privileged",
            "--name=" + name, TELEPRESENCE_LOCAL_IMAGE, "proxy",
            json.dumps(config)
        ),
        killer=make_docker_kill(runner, name),
        keep_session=runner.sudo_for_docker,
    )

    # Set up ssh tunnel to allow the container to reach the cluster
    if not local_ssh.wait():
        raise RuntimeError("SSH to the network container failed to start.")

    container_forward_args = ["-R", "38023:127.0.0.1:{}".format(ssh.port)]
    for container_port, host_port in container_to_host.local_to_remote():
        if runner.chatty:
            runner.show(
                "Forwarding container port {} to host port {}.".format(
                    container_port, host_port
                )
            )
        container_forward_args.extend([
            "-R", "{}:127.0.0.1:{}".format(container_port, host_port)
        ])
    runner.launch(
        "Local SSH port forward", local_ssh.bg_command(container_forward_args)
    )

    # Wait for sshuttle to be running:
    sshuttle_ok = False
    for _ in runner.loop_until(120, 1):
        try:
            runner.check_call(
                runner.docker(
                    "run", "--network=container:" + name, "--rm",
                    TELEPRESENCE_LOCAL_IMAGE, "wait"
                )
            )
        except subprocess.CalledProcessError as e:
            if e.returncode == 100:
                # We're good!
                sshuttle_ok = True
                break
            elif e.returncode == 125:
                # Docker failure, probably due to original container not
                # starting yet... so try again:
                continue
            else:
                raise
        else:
            raise RuntimeError(
                "Waiting container exited prematurely. File a bug, please!"
            )
    if not sshuttle_ok:
        # This used to loop forever. Now we time out after two minutes.
        raise RuntimeError(
            "Waiting for network container timed out. File a bug, please!"
        )

    # Start the container specified by the user:
    container_name = random_name()
    docker_command = runner.docker(
        "run",
        "--name=" + container_name,
        "--network=container:" + name,
        env=True,
    )

    # Prepare container environment
    for key in remote_env:
        docker_command.append("-e={}".format(key))
    docker_env = os.environ.copy()
    docker_env.update(remote_env)

    if mount_dir:
        if use_docker_mount:
            mount_volume = "telepresence-" + runner.session_id
        else:
            mount_volume = mount_dir

        docker_command.append("--volume={}:{}".format(mount_volume, mount_dir))

    # Don't add --init if the user is doing something with it
    init_args = [
        arg for arg in docker_args
        if arg == "--init" or arg.startswith("--init=")
    ]
    # Older versions of Docker don't have --init:
    docker_run_help = runner.get_output(["docker", "run", "--help"])
    if not init_args and "--init" in docker_run_help:
        docker_command += ["--init"]
    docker_command += docker_args
    span.end()

    runner.show("Setup complete. Launching your container.")
    process = subprocess.Popen(docker_command, env=docker_env)

    def terminate_if_alive() -> None:
        runner.write("Shutting down containers...\n")
        if process.poll() is None:
            runner.write("Killing local container...\n")
            make_docker_kill(runner, container_name)()

    runner.add_cleanup("Terminate local container", terminate_if_alive)
    return process
Beispiel #10
0
def new_swapped_deployment(
    old_deployment: Dict,
    container_to_update: str,
    run_id: str,
    expose: PortMapping,
    add_custom_nameserver: bool,
) -> Dict:
    """
    Create a new Deployment that uses telepresence-k8s image.

    Makes the following changes:

    1. Changes to single replica.
    2. Disables command, args, livenessProbe, readinessProbe, workingDir.
    3. Adds labels.
    4. Adds TELEPRESENCE_NAMESERVER env variable, if requested.
    5. Runs as root, if requested.
    6. Sets terminationMessagePolicy.
    7. Adds TELEPRESENCE_CONTAINER_NAMESPACE env variable so the forwarder does
       not have to access the k8s API from within the pod.

    Returns dictionary that can be encoded to JSON and used with kubectl apply.
    Mutates the passed-in PortMapping to include container ports.
    """
    new_deployment_json = deepcopy(old_deployment)
    new_deployment_json["spec"]["replicas"] = 1
    new_deployment_json["metadata"].setdefault("labels",
                                               {})["telepresence"] = run_id
    new_deployment_json["spec"]["template"]["metadata"].setdefault(
        "labels", {})["telepresence"] = run_id
    for container, old_container in zip(
            new_deployment_json["spec"]["template"]["spec"]["containers"],
            old_deployment["spec"]["template"]["spec"]["containers"],
    ):
        if container["name"] == container_to_update:
            # Merge container ports into the expose list
            expose.merge_automatic_ports([
                port["containerPort"] for port in container.get("ports", [])
                if port["protocol"] == "TCP"
            ])
            container["image"] = get_image_name(expose)
            # Not strictly necessary for real use, but tests break without this
            # since we don't upload test images to Docker Hub:
            container["imagePullPolicy"] = "IfNotPresent"
            # Drop unneeded fields:
            for unneeded in [
                    "command", "args", "livenessProbe", "readinessProbe",
                    "workingDir", "lifecycle"
            ]:
                try:
                    container.pop(unneeded)
                except KeyError:
                    pass
            # We don't write out termination file:
            container["terminationMessagePolicy"] = "FallbackToLogsOnError"
            # Use custom name server if necessary:
            if add_custom_nameserver:
                container.setdefault("env", []).append({
                    "name":
                    "TELEPRESENCE_NAMESERVER",
                    "value":
                    get_alternate_nameserver()
                })
            # Add namespace environment variable to support deployments using
            # automountServiceAccountToken: false. To be used by forwarder.py
            # in the k8s-proxy.
            container.setdefault("env", []).append({
                "name": "TELEPRESENCE_CONTAINER_NAMESPACE",
                "valueFrom": {
                    "fieldRef": {
                        "fieldPath": "metadata.namespace"
                    }
                }
            })
            return new_deployment_json

    raise RuntimeError("Couldn't find container {} in the Deployment.".format(
        container_to_update))
Beispiel #11
0
def create_new_deployment(
    runner: Runner,
    deployment_arg: str,
    expose: PortMapping,
    custom_nameserver: Optional[str],
    service_account: str,
) -> Tuple[str, str]:
    """
    Create a new Deployment, return its name and Kubernetes label.
    """
    span = runner.span()
    run_id = runner.session_id
    runner.show(
        "Starting network proxy to cluster using "
        "new Deployment {}".format(deployment_arg)
    )

    def remove_existing_deployment(quiet=False):
        if not quiet:
            runner.show("Cleaning up Deployment {}".format(deployment_arg))
        runner.check_call(
            runner.kubectl(
                "delete",
                "--ignore-not-found",
                "svc,deploy",
                "--selector=telepresence=" + run_id,
            )
        )

    runner.add_cleanup("Delete new deployment", remove_existing_deployment)
    remove_existing_deployment(quiet=True)
    # Define the deployment as yaml
    env = {}
    if custom_nameserver:
        # If we're on local VM we need to use different nameserver to prevent
        # infinite loops caused by sshuttle:
        env["TELEPRESENCE_NAMESERVER"] = custom_nameserver
    # Create the deployment via yaml
    deployment_yaml = _get_deployment_yaml(
        deployment_arg,
        run_id,
        get_image_name(runner, expose),
        service_account,
        env,
    )
    try:
        runner.check_call(
            runner.kubectl("create", "-f", "-"),
            input=deployment_yaml.encode("utf-8")
        )
    except CalledProcessError as exc:
        raise runner.fail(
            "Failed to create deployment {}:\n{}".format(
                deployment_arg, exc.stderr
            )
        )
    # Expose the deployment with a service
    if expose.remote():
        command = [
            "expose",
            "deployment",
            deployment_arg,
        ]
        # Provide a stable argument ordering.  Reverse it because that
        # happens to make some current tests happy but in the long run
        # that's totally arbitrary and doesn't need to be maintained.
        # See issue 494.
        for port in sorted(expose.remote(), reverse=True):
            command.append("--port={}".format(port))
        try:
            runner.check_call(runner.kubectl(*command))
        except CalledProcessError as exc:
            raise runner.fail(
                "Failed to expose deployment {}:\n{}".format(
                    deployment_arg, exc.stderr
                )
            )
    span.end()
    return deployment_arg, run_id
Beispiel #12
0
def run_docker_command(
    runner: Runner,
    remote_info: RemoteInfo,
    docker_run: List[str],
    expose: PortMapping,
    also_proxy: List[str],
    remote_env: Dict[str, str],
    ssh: SSH,
    mount_dir: Optional[str],
) -> Popen:
    """
    --docker-run support.

    Connect using sshuttle running in a Docker container, and then run user
    container.

    :param remote_env: Dictionary with environment on remote pod.
    :param mount_dir: Path to local directory where remote pod's filesystem is
        mounted.
    """
    # Update environment:
    remote_env["TELEPRESENCE_METHOD"] = "container"  # mostly just for tests :(

    # Extract --publish flags and add them to the sshuttle container, which is
    # responsible for defining the network entirely.
    docker_args, publish_args = parse_docker_args(docker_run)

    # Start the sshuttle container:
    name = random_name()
    config = {
        "port": ssh.port,
        "cidrs": get_proxy_cidrs(runner, remote_info, also_proxy),
        "expose_ports": list(expose.local_to_remote()),
    }
    if runner.platform == "darwin":
        config["ip"] = MAC_LOOPBACK_IP
    # Image already has tini init so doesn't need --init option:
    span = runner.span()
    runner.launch("Network container",
                  docker_runify(publish_args + [
                      "--rm", "--privileged", "--name=" +
                      name, TELEPRESENCE_LOCAL_IMAGE, "proxy",
                      json.dumps(config)
                  ]),
                  killer=make_docker_kill(runner, name))

    # Wait for sshuttle to be running:
    while True:
        try:
            runner.check_call(
                docker_runify([
                    "--network=container:" + name, "--rm",
                    TELEPRESENCE_LOCAL_IMAGE, "wait"
                ]))
        except CalledProcessError as e:
            if e.returncode == 100:
                # We're good!
                break
            elif e.returncode == 125:
                # Docker failure, probably due to original container not
                # starting yet... so sleep and try again:
                sleep(1)
                continue
            else:
                raise
        else:
            raise RuntimeError(
                "Waiting container exited prematurely. File a bug, please!")

    # Start the container specified by the user:
    container_name = random_name()
    docker_command = docker_runify([
        "--name=" + container_name,
        "--network=container:" + name,
    ],
                                   env=True)

    # Prepare container environment
    for key in remote_env:
        docker_command.append("-e={}".format(key))
    docker_env = os.environ.copy()
    docker_env.update(remote_env)

    if mount_dir:
        docker_command.append("--volume={}:{}".format(mount_dir, mount_dir))

    # Don't add --init if the user is doing something with it
    init_args = [
        arg for arg in docker_args
        if arg == "--init" or arg.startswith("--init=")
    ]
    # Older versions of Docker don't have --init:
    if not init_args and "--init" in runner.get_output(
        ["docker", "run", "--help"]):
        docker_command += ["--init"]
    docker_command += docker_args
    span.end()

    process = Popen(docker_command, env=docker_env)

    def terminate_if_alive():
        runner.write("Shutting down containers...\n")
        if process.poll() is None:
            runner.write("Killing local container...\n")
            make_docker_kill(runner, container_name)()

    runner.add_cleanup("Terminate local container", terminate_if_alive)
    return process
Beispiel #13
0
def command(runner, args):
    with runner.cleanup_handling(), crash_reporting(runner):
        # Process arguments
        name = args.name or runner.session_id
        local_port = args.port
        deployment = args.deployment
        patterns = [
            dict(name=header, regex_match=pattern)
            for header, pattern in args.match
        ]

        # Inform the user
        runner.show("Setting up intercept session {}".format(name))
        runner.show("Intercepting requests to {}".format(deployment))
        runner.show("and redirecting them to localhost:{}".format(local_port))
        runner.show("when the following headers match:")
        for obj in patterns:
            runner.show("  {name}: {regex_match}".format(**obj))

        # Check the deployment exists and has the sidecar
        # FIXME: implement

        # Connect to the proxy
        runner.show("Connecting to the Telepresence Proxy")
        proxy_name = "telepresence-proxy"
        remote_info = get_remote_info(runner, proxy_name, "deployment")

        old_chatty, runner.chatty = runner.chatty, False
        _, ssh = connect(runner, remote_info, False, PortMapping())
        runner.chatty = old_chatty

        # Forward local port to the proxy's API server
        api_server_port = find_free_port()
        forward_args = [
            "-L127.0.0.1:{}:127.0.0.1:8081".format(api_server_port)
        ]
        runner.launch("SSH port forward (api server)",
                      ssh.bg_command(forward_args))
        url = "http://127.0.0.1:{}/intercept/{}".format(
            api_server_port, deployment)
        runner.write("Proxy URL is {}".format(url))

        # Start the intercept, get the remote port on the proxy
        data = json.dumps(dict(name=name, patterns=patterns))
        response = proxy_request(runner, url, data, "POST")
        try:
            remote_port = int(response)
        except ValueError:
            raise runner.fail("Unexpected response from the proxy")

        # Forward remote proxy port to the local port. This is how the
        # intercepted requests will get from the proxy to the user's code.
        forward_args = ["-R{}:127.0.0.1:{}".format(remote_port, local_port)]
        runner.launch("SSH port forward (proxy to user code)",
                      ssh.bg_command(forward_args))

        runner.add_cleanup("Delete intercept", proxy_request, runner, url,
                           str(remote_port), "DELETE")

        runner.show("Intercept is running. Press Ctrl-C/Ctrl-Break to quit.")
        user_process = Popen(["cat"], stdout=DEVNULL)
        runner.wait_for_exit(user_process)