Exemplo n.º 1
0
def get_deployment_json(
    runner: Runner,
    deployment_name: str,
    deployment_type: str,
    run_id: Optional[str] = None,
) -> Dict:
    """Get the decoded JSON for a deployment.

    If this is a Deployment we created, the run_id is also passed in - this is
    the session id we set for the telepresence label. Otherwise run_id is None
    and the Deployment name must be used to locate the Deployment.
    """
    span = runner.span()
    try:
        get_deployment = [
            "get",
            deployment_type,
            "-o",
            "json",
        ]
        if run_id is None:
            kcmd = get_deployment + [deployment_name]
            output = runner.get_output(runner.kubectl(*kcmd))
            return json.loads(output)
        else:
            # When using a selector we get a list of objects, not just one:
            kcmd = get_deployment + ["--selector=telepresence=" + run_id]
            output = runner.get_output(runner.kubectl(*kcmd))
            return json.loads(output)["items"][0]
    except CalledProcessError as e:
        raise runner.fail("Failed to find deployment {}:\n{}".format(
            deployment_name, e.stdout))
    finally:
        span.end()
Exemplo n.º 2
0
def podCIDRs(runner: Runner):
    """
    Get pod IPs from nodes if possible, otherwise use pod IPs as heuristic:
    """
    cidrs = set()
    try:
        nodes = json.loads(
            runner.get_output(runner.kubectl("get", "nodes", "-o",
                                             "json")))["items"]
    except CalledProcessError as e:
        runner.write("Failed to get nodes: {}".format(e))
    else:
        for node in nodes:
            pod_cidr = node["spec"].get("podCIDR")
            if pod_cidr is not None:
                cidrs.add(pod_cidr)

    if len(cidrs) == 0:
        # Fallback to using pod IPs:
        pods = json.loads(
            runner.get_output(runner.kubectl("get", "pods", "-o",
                                             "json")))["items"]
        pod_ips = []
        for pod in pods:
            try:
                pod_ips.append(pod["status"]["podIP"])
            except KeyError:
                # Apparently a problem on OpenShift
                pass
        if pod_ips:
            cidrs.add(covering_cidr(pod_ips))

    return list(cidrs)
Exemplo n.º 3
0
def create_with_cleanup(runner: Runner, manifests: Iterable[Manifest]) -> None:
    """Create resources and set up their removal at cleanup.

    Uses "kubectl create" with the supplied manifests to create resources.
    Assumes that all the created resources include the telepresence label so it
    can use a label selector to delete those resources.
    """
    kinds = set(str(manifest["kind"]).capitalize() for manifest in manifests)
    kinds_display = ", ".join(kinds)
    manifest_list = make_k8s_list(manifests)
    manifest_json = json.dumps(manifest_list)
    try:
        runner.check_call(runner.kubectl("create", "-f", "-"),
                          input=manifest_json.encode("utf-8"))
    except CalledProcessError as exc:
        raise runner.fail("Failed to create {}:\n{}".format(
            kinds_display, exc.stderr))

    def clean_up() -> None:
        runner.show("Cleaning up {}".format(kinds_display))
        runner.check_call(
            runner.kubectl(
                "delete",
                "--ignore-not-found",
                "--wait=false",
                "--selector=telepresence=" + runner.session_id,
                ",".join(kinds),
            ))

    runner.add_cleanup("Delete proxy {}".format(kinds_display), clean_up)
Exemplo n.º 4
0
def setup_inject(runner: Runner, args: Namespace) -> LaunchType:
    command = ["torsocks"] + (args.run or ["bash", "--norc"])
    check_local_command(runner, command[1])
    runner.require(["torsocks"], "Please install torsocks (v2.1 or later)")
    if runner.chatty:
        runner.show(
            "Starting proxy with method 'inject-tcp', which has the following "
            "limitations: Go programs, static binaries, suid programs, and "
            "custom DNS implementations are not supported. For a full list of "
            "method limitations see "
            "https://telepresence.io/reference/methods.html"
        )

    if args.also_proxy:
        runner.show(
            "Note: --also-proxy is not meaningful with -m inject-tcp. "
            "The inject-tcp method sends all network traffic to the cluster."
        )

    def launch(
        runner_, _remote_info, env, socks_port, _ssh, _mount_dir, _pod_info
    ):
        return launch_inject(runner_, command, socks_port, env)

    return launch
Exemplo n.º 5
0
def call_scout(runner: Runner, args):
    config_root = Path(Path.home() / ".config" / "telepresence")
    config_root.mkdir(parents=True, exist_ok=True)
    id_file = Path(config_root / "id")

    scout_kwargs = dict(kubectl_version=runner.kubectl.command_version,
                        kubernetes_version=runner.kubectl.cluster_version,
                        operation=args.operation,
                        method=args.method)

    try:
        with id_file.open('x') as f:
            install_id = str(uuid4())
            f.write(install_id)
            scout_kwargs["new_install"] = True
    except FileExistsError:
        with id_file.open('r') as f:
            install_id = f.read()
            scout_kwargs["new_install"] = False

    scout = Scout("telepresence", __version__, install_id)
    scouted = scout.report(**scout_kwargs)

    runner.write("Scout info: {}".format(scouted))

    my_version = get_numeric_version(__version__)
    try:
        latest = get_numeric_version(scouted["latest_version"])
    except (KeyError, ValueError):
        latest = my_version

    if latest > my_version:
        message = ("\nTelepresence {} is available (you're running {}). "
                   "https://www.telepresence.io/reference/changelog").format(
                       scouted["latest_version"], __version__)

        def ver_notice():
            runner.show(message)

        runner.add_cleanup("Show version notice", ver_notice)
Exemplo n.º 6
0
def final_checks(runner: Runner, args):
    """
    Perform some last cross-cutting checks
    """

    # Make sure we can access Kubernetes:
    try:
        runner.check_call(
            runner.kubectl(
                "get", "pods", "telepresence-connectivity-check",
                "--ignore-not-found"
            )
        )
    except CalledProcessError as exc:
        sys.stderr.write("Error accessing Kubernetes: {}\n".format(exc))
        if exc.stderr:
            sys.stderr.write("{}\n".format(exc.stderr.strip()))
        raise runner.fail("Cluster access failed")
    except (OSError, IOError) as exc:
        raise runner.fail(
            "Unexpected error accessing Kubernetes: {}\n".format(exc)
        )
Exemplo n.º 7
0
 def _check_if_in_local_vm(self, runner: Runner) -> bool:
     # Minikube just has 'minikube' as context'
     if self.context == "minikube":
         return True
     # Minishift has complex context name, so check by server:
     if self.command == "oc":
         try:
             ip = runner.get_output(["minishift", "ip"]).strip()
         except (OSError, CalledProcessError):
             return False
         if ip and ip in runner.kubectl.server:
             return True
     return False
Exemplo n.º 8
0
def launch_vpn(
    runner: Runner,
    remote_info: RemoteInfo,
    command: List[str],
    also_proxy: List[str],
    env_overrides: Dict[str, str],
    ssh: SSH,
) -> Popen:
    """
    Launch sshuttle and the user's command
    """
    connect_sshuttle(runner, remote_info, also_proxy, ssh)
    env = get_local_env(runner, env_overrides, False)
    runner.show("Setup complete. Launching your command.")
    try:
        process = Popen(command, env=env)
    except OSError as exc:
        raise runner.fail("Failed to launch your command: {}".format(exc))
    runner.add_cleanup(
        "Terminate local process", terminate_local_process, runner, process
    )
    return process
Exemplo n.º 9
0
def proxy(config: dict):
    """Start sshuttle proxy to Kubernetes."""
    cidrs = config["cidrs"]
    expose_ports = config["expose_ports"]

    # Launch local sshd so Tel outside can forward 38023 to the cluster
    runner = Runner("-", "-", False)
    runner.check_call(["/usr/sbin/sshd", "-e"])

    # Wait for the cluster to be available
    ssh = SSH(runner, 38023, "[email protected]")
    ssh.wait()

    # Figure out IP addresses to exclude, from the incoming ssh
    exclusions = []
    netstat_output = runner.get_output(["netstat", "-n"])
    for line in netstat_output.splitlines():
        if not line.startswith("tcp") or "ESTABLISHED" not in line:
            continue
        parts = line.split()
        try:
            for address in (parts[3], parts[4]):
                ip, port = address.split(":")
                exclusions.extend(["-x", ip])
        except (IndexError, ValueError):
            runner.write("Failed on line: " + line)
            raise
    assert exclusions, netstat_output

    # Start the sshuttle VPN-like thing:
    # XXX duplicates code in telepresence, remove duplication
    main_process = Popen([
        "sshuttle-telepresence", "-v", "--dns", "--method", "nat", "-e", (
            "ssh -oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null " +
            "-F /dev/null"
        ), "-r",
        "[email protected]:38023"
    ] + exclusions + cidrs)

    # Start the SSH tunnels to expose local services:
    expose_local_services(runner, ssh, expose_ports)

    # Wait for everything to exit:
    runner.wait_for_exit(main_process)
Exemplo n.º 10
0
def launch_local(
    runner: Runner,
    command: List[str],
    env_overrides: Dict[str, str],
    replace_dns_tools: bool,
) -> Popen:
    # Compute user process environment
    env = os.environ.copy()
    env.update(env_overrides)
    env["PROMPT_COMMAND"] = ('PS1="@{}|$PS1";unset PROMPT_COMMAND'.format(
        runner.kubectl.context))
    env["PATH"] = apply_workarounds(runner, env["PATH"], replace_dns_tools)

    # Launch the user process
    runner.show("Setup complete. Launching your command.")
    try:
        process = Popen(command, env=env)
    except OSError as exc:
        raise runner.fail("Failed to launch your command: {}".format(exc))
    runner.add_cleanup("Terminate local process", terminate_local_process,
                       runner, process)
    return process
Exemplo n.º 11
0
def swap_deployment_openshift(
        runner: Runner, args: argparse.Namespace) -> Tuple[str, str, Dict]:
    """
    Swap out an existing DeploymentConfig.

    Returns (Deployment name, unique K8s label, JSON of original container that
    was swapped out.)

    In practice OpenShift doesn't seem to do the right thing when a
    DeploymentConfig is updated. In particular, we need to disable the image
    trigger so that we can use the new image, but the replicationcontroller
    then continues to deploy the existing image.

    So instead we use a different approach than for Kubernetes, replacing the
    current ReplicationController with one that uses the Telepresence image,
    then restores it. We delete the pods to force the RC to do its thing.
    """
    run_id = runner.session_id
    deployment_name, *container_name = args.swap_deployment.split(":", 1)
    if container_name:
        container_name = container_name[0]
    rcs = runner.get_output(
        runner.kubectl(
            "get", "rc", "-o", "name", "--selector",
            "openshift.io/deployment-config.name=" + deployment_name))
    rc_name = sorted(rcs.split(),
                     key=lambda n: int(n.split("-")[-1]))[0].split("/", 1)[1]
    rc_json = json.loads(
        runner.get_output(runner.kubectl("get", "rc", "-o", "json", "--export",
                                         rc_name),
                          stderr=STDOUT))

    def apply_json(json_config):
        runner.check_call(runner.kubectl("apply", "-f", "-"),
                          input=json.dumps(json_config).encode("utf-8"))
        # Now that we've updated the replication controller, delete pods to
        # make sure changes get applied:
        runner.check_call(
            runner.kubectl("delete", "pod", "--selector",
                           "deployment=" + rc_name))

    runner.add_cleanup("Restore original replication controller", apply_json,
                       rc_json)

    # If no container name was given, just use the first one:
    if not container_name:
        container_name = rc_json["spec"]["template"]["spec"]["containers"][0][
            "name"]

    new_rc_json, orig_container_json = new_swapped_deployment(
        rc_json,
        container_name,
        run_id,
        TELEPRESENCE_REMOTE_IMAGE,
        args.method == "vpn-tcp" and args.in_local_vm,
    )
    apply_json(new_rc_json)
    return deployment_name, run_id, orig_container_json
Exemplo n.º 12
0
def mount_remote_volumes_docker(runner: Runner, ssh: SSH) -> Callable:
    """
    sshfs is used to mount the remote system locally.
    Allowing all users may require root, so we use sudo in that case.
    Returns (path to mounted directory, callable that will unmount it).
    """
    span = runner.span()
    try:
        ssh_args = ssh.required_args.copy()
        f_index = ssh_args.index("-F") if "-F" in ssh_args else None
        if f_index is not None:
            del ssh_args[f_index + 1]
            del ssh_args[f_index]

        runner.check_call(
            runner.docker(
                "volume", "create", "-d", "vieux/sshfs", "-o",
                "port={}".format(ssh.port), *ssh_args, "-o", "allow_other",
                "-o", "sshcmd={}:/".format(ssh.user_at_host),
                "telepresence-{}".format(runner.session_id)
            )
        )

        mounted = True
    except CalledProcessError as exc:
        runner.show(
            "Mounting remote volumes failed, they will be unavailable"
            " in this session."
            " please report a bug, attaching telepresence.log to"
            " the bug report:"
            " https://github.com/datawire/telepresence/issues/new"
        )
        if exc.stderr:
            runner.show("\nMount error was: {}\n".format(exc.stderr.strip()))
        mounted = False

    def no_cleanup():
        pass

    def cleanup():
        runner.check_call(
            runner.docker(
                "volume", "rm", "-f", "telepresence-" + runner.session_id
            )
        )

    span.end()
    return cleanup if mounted else no_cleanup
Exemplo n.º 13
0
def get_deployment(runner: Runner, name: str) -> Dict[str, Any]:
    """
    Retrieve the Deployment/DeploymentConfig manifest named, or emit an error
    message for the user.
    """
    if ":" in name:
        name, container = name.split(":", 1)

    kube = runner.kubectl
    manifest = ""

    # Maybe try to find an OpenShift DeploymentConfig
    if kube.command == "oc" and kube.cluster_is_openshift:
        try:
            manifest = runner.get_output(
                runner.kubectl("get", "dc", name, "-o", "json"),
                reveal=True,
            )
        except CalledProcessError as exc:
            runner.show(
                "Failed to find DeploymentConfig {}:\n  {}".format(
                    name, exc.stderr
                )
            )
            runner.show("Will try regular Kubernetes Deployment.")

    # No DC or no OpenShift, look for a Deployment
    if manifest == "":
        try:
            manifest = runner.get_output(
                runner.kubectl("get", "deploy", name, "-o", "json"),
                reveal=True,
            )
        except CalledProcessError as exc:
            raise runner.fail(
                "Failed to find Deployment {}:\n  {}".format(name, exc.stderr)
            )

    # Parse the resulting manifest
    # This failing is likely a bug, so crash...
    deployment = json.loads(manifest)  # type: Manifest

    return deployment
Exemplo n.º 14
0
def main():
    """
    Top-level function for Telepresence
    """

    ########################################
    # Preliminaries: No changes to the machine or the cluster, no cleanup
    # Capture environment info and the user's intent

    with crash_reporting():
        args = parse_args()  # tab-completion stuff goes here

        runner = Runner(Output(args.logfile), None, args.verbose)
        span = runner.span()
        runner.add_cleanup("Stop time tracking", span.end)
        runner.kubectl = KubeInfo(runner, args)

        start_proxy = proxy.setup(runner, args)
        do_connect = connect.setup(runner, args)
        get_remote_env, write_env_files = remote_env.setup(runner, args)
        launch = outbound.setup(runner, args)
        mount_remote = mount.setup(runner, args)

        final_checks(runner, args)

        # Usage tracking
        call_scout(runner, args)

    ########################################
    # Now it's okay to change things

    with runner.cleanup_handling(), crash_reporting(runner):
        # Set up the proxy pod (operation -> pod name)
        remote_info = start_proxy(runner)

        # Connect to the proxy (pod name -> ssh object)
        socks_port, ssh = do_connect(runner, remote_info)

        # Capture remote environment information (ssh object -> env info)
        env = get_remote_env(runner, remote_info)

        # Handle filesystem stuff
        mount_dir = mount_remote(runner, env, ssh)

        # Maybe write environment files
        write_env_files(runner, env)

        # Set up outbound networking (pod name, ssh object)
        # Launch user command with the correct environment (...)
        user_process = launch(runner, remote_info, env, socks_port, ssh,
                              mount_dir)

        wait_for_exit(runner, user_process)
Exemplo n.º 15
0
def _get_remote_env(
    runner: Runner, context: str, namespace: str, pod_name: str,
    container_name: str
) -> Dict[str, str]:
    """Get the environment variables in the remote pod."""
    env = runner.get_kubectl(
        context, namespace, [
            "exec", pod_name, "--container", container_name, "--", "python3",
            "-c", "import json, os; print(json.dumps(dict(os.environ)))"
        ]
    )
    result = {}  # type: Dict[str,str]
    result.update(loads(env))
    return result
Exemplo n.º 16
0
def get_remote_env(runner: Runner, args: argparse.Namespace,
                   remote_info: RemoteInfo) -> Dict[str, str]:
    # Get the environment variables we want to copy from the remote pod; it may
    # take a few seconds for the SSH proxies to get going:
    start = time()
    while time() - start < 10:
        try:
            env = get_env_variables(runner, remote_info)
            break
        except CalledProcessError:
            sleep(0.25)
    else:
        raise runner.fail("Error: Failed to get environment variables")
    return env
Exemplo n.º 17
0
def wait_for_pod(runner: Runner, remote_info: RemoteInfo) -> None:
    """Wait for the pod to start running."""
    span = runner.span()
    start = time()
    while time() - start < 120:
        try:
            pod = json.loads(
                runner.get_kubectl(
                    remote_info.context, remote_info.namespace,
                    ["get", "pod", remote_info.pod_name, "-o", "json"]))
        except CalledProcessError:
            sleep(0.25)
            continue
        if pod["status"]["phase"] == "Running":
            for container in pod["status"]["containerStatuses"]:
                if container["name"] == remote_info.container_name and (
                        container["ready"]):
                    span.end()
                    return
        sleep(0.25)
    span.end()
    raise RuntimeError("Pod isn't starting or can't be found: {}".format(
        pod["status"]))
Exemplo n.º 18
0
def set_up_torsocks(runner: Runner, socks_port: int) -> Dict[str, str]:
    """
    Set up environment variables and configuration to make torsocks work
    correctly. Wait for connectivity.
    """
    span = runner.span()
    # Create custom torsocks.conf, since some options we want (in particular,
    # port) aren't accessible via env variables in older versions of torsocks:
    tor_conffile = runner.temp / "tel_torsocks.conf"
    tor_conffile.write_text(TORSOCKS_CONFIG.format(socks_port))

    torsocks_env = dict()
    torsocks_env["TORSOCKS_CONF_FILE"] = str(tor_conffile)
    if runner.logfile_path != "-":
        torsocks_env["TORSOCKS_LOG_FILE_PATH"] = runner.logfile_path

    # Wait until DNS resolution via torsocks succeeds
    # FIXME: Make this lookup externally configurable
    # https://github.com/telepresenceio/telepresence/issues/389
    # https://github.com/telepresenceio/telepresence/issues/985
    test_hostname = "kubernetes.default.svc.cluster.local"
    test_proxying_cmd = [
        "torsocks", "python3", "-c",
        "import socket; socket.socket().connect(('%s', 443))" % test_hostname
    ]
    launch_env = os.environ.copy()
    launch_env.update(torsocks_env)
    try:
        for _ in runner.loop_until(15, 0.1):
            try:
                runner.check_call(test_proxying_cmd, env=launch_env)
                return torsocks_env
            except CalledProcessError:
                pass
        raise RuntimeError("SOCKS network proxying failed to start...")
    finally:
        span.end()
Exemplo n.º 19
0
def proxy(config: typing.Dict[str, typing.Any]) -> None:
    """Start sshuttle proxy to Kubernetes."""
    cidrs = config["cidrs"]
    expose_ports = config["expose_ports"]
    to_pod = config["to_pod"]
    from_pod = config["from_pod"]

    # Launch local sshd so Tel outside can forward 38023 to the cluster
    runner = Runner("-", False)
    runner.check_call(["/usr/sbin/sshd", "-e"])

    # Wait for the cluster to be available
    ssh = SSH(runner, 38023, "[email protected]")
    if not ssh.wait():
        raise RuntimeError(
            "SSH from local container to the cluster failed to start.")

    # Figure out IP addresses to exclude, from the incoming ssh
    exclusions = []
    netstat_output = runner.get_output(["netstat", "-n"])
    for line in netstat_output.splitlines():
        if not line.startswith("tcp") or "ESTABLISHED" not in line:
            continue
        parts = line.split()
        try:
            for address in (parts[3], parts[4]):
                ip, port = address.split(":")
                exclusions.extend(["-x", ip])
        except (IndexError, ValueError):
            runner.write("Failed on line: " + line)
            raise
    assert exclusions, netstat_output

    # Start the sshuttle VPN-like thing:
    sshuttle_cmd = get_sshuttle_command(ssh, "nat") + exclusions + cidrs
    main_process = Popen(sshuttle_cmd, universal_newlines=True)

    # Start the SSH tunnels to expose local services:
    expose_local_services(runner, ssh, expose_ports, to_pod, from_pod)

    # Wait for everything to exit:
    runner.wait_for_exit(main_process)
Exemplo n.º 20
0
def apply_workarounds(runner: Runner, original_path: str,
                      replace_dns_tools: bool) -> str:
    """
    Apply workarounds by creating required executables and returning an updated
    PATH variable for the user process.

    :param runner: Runner
    :param original_path: Current $PATH
    :param replace_dns_tools: True for inject-tcp, where DNS is not proxied
    :param work_around_sip: True for inject-tcp on the Mac
    :return: Updated $PATH
    """
    paths = original_path.split(os.pathsep)

    if runner.platform == "darwin":
        # Capture protected $PATH entries in order
        protected_set = {"/bin", "/sbin", "/usr/sbin", "/usr/bin"}
        protected = [Path(path) for path in paths if path in protected_set]

        # Make copies in an unprotected location
        sip_bin = runner.make_temp("sip_bin")
        make_sip_workaround_copy(protected, sip_bin)

        # Replace protected paths with the unprotected path
        paths = [path for path in paths if path not in protected_set]
        paths.insert(0, str(sip_bin))

    # Handle unsupported commands
    unsupported_bin = runner.make_temp("unsup_bin")
    unsupported = ["ping", "traceroute"]
    if replace_dns_tools:
        unsupported += ["nslookup", "dig", "host"]
    make_unsupported_tool(unsupported, unsupported_bin)
    paths.insert(0, str(unsupported_bin))

    return os.pathsep.join(paths)
Exemplo n.º 21
0
def mount_remote_volumes(
    runner: Runner, ssh: SSH, allow_all_users: bool, mount_dir: str
) -> Tuple[str, Callable]:
    """
    sshfs is used to mount the remote system locally.

    Allowing all users may require root, so we use sudo in that case.

    Returns (path to mounted directory, callable that will unmount it).
    """
    span = runner.span()
    if allow_all_users:
        sudo_prefix = ["sudo"]
        middle = ["-o", "allow_other"]
    else:
        sudo_prefix = []
        middle = []
    try:
        runner.get_output(
            sudo_prefix + ["sshfs", "-p", str(ssh.port)] + ssh.required_args +
            middle + ["{}:/".format(ssh.user_at_host), mount_dir],
            stderr=STDOUT
        )
        mounted = True
    except CalledProcessError as exc:
        runner.show(
            "Mounting remote volumes failed, they will be unavailable"
            " in this session. If you are running"
            " on Windows Subystem for Linux then see"
            " https://github.com/datawire/telepresence/issues/115,"
            " otherwise please report a bug, attaching telepresence.log to"
            " the bug report:"
            " https://github.com/datawire/telepresence/issues/new"
        )
        if exc.output:
            runner.show("\nMount error was: {}\n".format(exc.output.strip()))
        mounted = False

    def no_cleanup():
        pass

    def cleanup():
        if runner.platform == "linux":
            runner.check_call(
                sudo_prefix + ["fusermount", "-z", "-u", mount_dir]
            )
        else:
            runner.get_output(sudo_prefix + ["umount", "-f", mount_dir])

    span.end()
    return mount_dir, cleanup if mounted else no_cleanup
Exemplo n.º 22
0
def existing_deployment_openshift(
    runner: Runner,
    deployment_arg: str,
    expose: PortMapping,
    deployment_env: Dict,
    service_account: str,
) -> Tuple[str, Optional[str]]:
    """
    Handle an existing deploymentconfig by doing nothing
    """
    runner.show("Starting network proxy to cluster using the existing proxy "
                "DeploymentConfig {}".format(deployment_arg))
    try:
        d_json = json.loads(
            runner.get_output(
                runner.kubectl("get", "deploymentconfig", deployment_arg, "-o",
                               "json")))

        _set_expose_ports(expose, deployment_arg, d_json)
    except CalledProcessError as exc:
        raise runner.fail("Failed to find deploymentconfig {}:\n{}".format(
            deployment_arg, exc.stderr))
    run_id = None
    return deployment_arg, run_id
Exemplo n.º 23
0
def setup_container(runner: Runner, args):
    runner.require(["docker", "socat"], "Needed for the container method.")
    if runner.platform == "linux":
        needed = ["ip", "ifconfig"]
        missing = runner.depend(needed)
        if set(needed) == set(missing):
            raise runner.fail(
                """At least one of "ip addr" or "ifconfig" must be """ +
                "available to retrieve Docker interface info.")
    if runner.platform == "darwin":
        runner.require(
            ["ifconfig"],
            "Needed to manage networking with the container method.",
        )
        runner.require_sudo()
    if SUDO_FOR_DOCKER:
        runner.require_sudo()

    def launch(runner_, remote_info, env, _socks_port, ssh, mount_dir):
        return run_docker_command(runner_, remote_info, args.docker_run,
                                  args.expose, args.also_proxy, env, ssh,
                                  mount_dir)

    return launch
Exemplo n.º 24
0
def swap_deployment_openshift(runner: Runner, deployment_arg: str,
                              image_name: str, expose: PortMapping,
                              add_custom_nameserver: bool) -> Tuple[str, str]:
    """
    Swap out an existing DeploymentConfig.

    Returns (Deployment name, unique K8s label, JSON of original container that
    was swapped out.)

    In practice OpenShift doesn't seem to do the right thing when a
    DeploymentConfig is updated. In particular, we need to disable the image
    trigger so that we can use the new image, but the replicationcontroller
    then continues to deploy the existing image.

    So instead we use a different approach than for Kubernetes, replacing the
    current ReplicationController with one that uses the Telepresence image,
    then restores it. We delete the pods to force the RC to do its thing.
    """
    run_id = runner.session_id
    deployment, container = _split_deployment_container(deployment_arg)
    rcs = runner.get_output(
        runner.kubectl("get", "rc", "-o", "name", "--selector",
                       "openshift.io/deployment-config.name=" + deployment))
    rc_name = sorted(rcs.split(),
                     key=lambda n: int(n.split("-")[-1]))[0].split("/", 1)[1]
    rc_json = json.loads(
        runner.get_output(runner.kubectl("get", "rc", "-o", "json", "--export",
                                         rc_name),
                          stderr=STDOUT))

    def apply_json(json_config):
        runner.check_call(runner.kubectl("apply", "-f", "-"),
                          input=json.dumps(json_config).encode("utf-8"))
        # Now that we've updated the replication controller, delete pods to
        # make sure changes get applied:
        runner.check_call(
            runner.kubectl("delete", "pod", "--selector",
                           "deployment=" + rc_name))

    runner.add_cleanup("Restore original replication controller", apply_json,
                       rc_json)

    container = _get_container_name(container, rc_json)

    new_rc_json, orig_container_json = new_swapped_deployment(
        rc_json,
        container,
        run_id,
        image_name,
        add_custom_nameserver,
    )
    apply_json(new_rc_json)

    _merge_expose_ports(expose, orig_container_json)

    return deployment, run_id
Exemplo n.º 25
0
def _dc_exists(runner: Runner, name: str) -> bool:
    """
    If we're using OpenShift Origin, we may be using a DeploymentConfig instead
    of a Deployment. Return True if a dc exists with the given name.
    """
    # Need to use oc to manage DeploymentConfigs. The cluster needs to be
    # running OpenShift as well. Check for both.
    kube = runner.kubectl
    if kube.command != "oc" or not kube.cluster_is_openshift:
        return False
    if ":" in name:
        name, container = name.split(":", 1)
    try:
        runner.check_call(runner.kubectl("get", "dc/{}".format(name)))
        return True
    except CalledProcessError as exc:
        runner.show(
            "Failed to find OpenShift deploymentconfig {}:".format(name))
        runner.show("  {}".format(str(exc.stderr)))
        runner.show("Will try regular Kubernetes Deployment.")
    return False
Exemplo n.º 26
0
 def _check_if_in_local_vm(self, runner: Runner) -> bool:
     # Running Docker Desktop on macOS (or maybe Windows?)
     if self.context == "docker-for-desktop":
         return True
     # Minikube just has 'minikube' as context'
     if self.context == "minikube":
         return True
     # Minishift has complex context name, so check by server:
     if self.command == "oc":
         try:
             ip = runner.get_output(["minishift", "ip"]).strip()
         except (OSError, CalledProcessError):
             return False
         if ip and ip in self.server:
             return True
     return False
Exemplo n.º 27
0
def get_unsupported_tools(runner: Runner, dns_supported: bool) -> str:
    """
    Create replacement command-line tools that just error out, in a nice way.

    Returns path to directory where overriden tools are stored.
    """
    commands = ["ping", "traceroute"]
    if not dns_supported:
        commands += ["nslookup", "dig", "host"]
    unsupported_bin = str(runner.make_temp("unsup_bin"))
    for command in commands:
        path = unsupported_bin + "/" + command
        with open(path, "w") as f:
            f.write(NICE_FAILURE.format(command))
        os.chmod(path, 0o755)
    return unsupported_bin
Exemplo n.º 28
0
def serviceCIDR(runner: Runner):
    """
    Get service IP range, based on heuristic of constructing CIDR from
    existing Service IPs. We create more services if there are less
    than 8, to ensure some coverage of the IP range.
    """

    def get_service_ips():
        services = json.loads(
            runner.get_output(runner.kubectl("get", "services", "-o", "json"))
        )["items"]
        # FIXME: Add test(s) here so we don't crash on, e.g., ExternalName
        return [
            svc["spec"]["clusterIP"] for svc in services
            if svc["spec"].get("clusterIP", "None") != "None"
        ]

    service_ips = get_service_ips()
    new_services = []  # type: List[str]
    # Ensure we have at least 8 ClusterIP Services:
    while len(service_ips) + len(new_services) < 8:
        new_service = random_name()
        runner.check_call(
            runner.kubectl(
                "create", "service", "clusterip", new_service, "--tcp=3000"
            )
        )
        new_services.append(new_service)
    if new_services:
        service_ips = get_service_ips()
    # Store Service CIDR:
    service_cidr = covering_cidr(service_ips)
    # Delete new services:
    for new_service in new_services:
        runner.check_call(runner.kubectl("delete", "service", new_service))

    if runner.chatty:
        runner.show(
            "Guessing that Services IP range is {}. Services started after"
            " this point will be inaccessible if are outside this range;"
            " restart telepresence if you can't access a "
            "new Service.\n".format(service_cidr)
        )
    return service_cidr
Exemplo n.º 29
0
def write_env_file(runner: Runner, env: Dict[str, str], env_file: str) -> None:
    try:
        data, skipped = _serialize_as_env_file(env)
        with open(env_file, "w") as env_file_file:
            env_file_file.write(data)
        if skipped:
            runner.show("Skipped these environment keys when writing env "
                        "file because the associated values have newlines:")
            for key in skipped:
                runner.show(key)
    except IOError as exc:
        runner.show("Failed to write environment as env file: {}".format(exc))
Exemplo n.º 30
0
def create_new_deployment(runner: Runner, deployment_arg: str, image_name: str,
                          expose: PortMapping,
                          add_custom_nameserver: bool) -> Tuple[str, str]:
    """
    Create a new Deployment, return its name and Kubernetes label.
    """
    span = runner.span()
    run_id = runner.session_id

    def remove_existing_deployment():
        runner.get_output(
            runner.kubectl(
                "delete",
                "--ignore-not-found",
                "svc,deploy",
                "--selector=telepresence=" + run_id,
            ))

    runner.add_cleanup("Delete new deployment", remove_existing_deployment)
    remove_existing_deployment()
    command = [
        "run",  # This will result in using Deployment:
        "--restart=Always",
        "--limits=cpu=100m,memory=256Mi",
        "--requests=cpu=25m,memory=64Mi",
        deployment_arg,
        "--image=" + image_name,
        "--labels=telepresence=" + run_id,
    ]
    # Provide a stable argument ordering.  Reverse it because that happens to
    # make some current tests happy but in the long run that's totally
    # arbitrary and doesn't need to be maintained.  See issue 494.
    for port in sorted(expose.remote(), reverse=True):
        command.append("--port={}".format(port))
    if expose.remote():
        command.append("--expose")
    # If we're on local VM we need to use different nameserver to prevent
    # infinite loops caused by sshuttle:
    if add_custom_nameserver:
        command.append("--env=TELEPRESENCE_NAMESERVER=" +
                       get_alternate_nameserver())
    try:
        runner.get_output(runner.kubectl(command), reveal=True, stderr=STDOUT)
    except CalledProcessError as exc:
        raise runner.fail("Failed to create deployment {}:\n{}".format(
            deployment_arg, exc.stdout))
    span.end()
    return deployment_arg, run_id