Ejemplo n.º 1
0
def create_new_deployment(
    runner: Runner, deployment_arg: str, image_name: str, expose: PortMapping,
    add_custom_nameserver: bool
) -> Tuple[str, str]:
    """
    Create a new Deployment, return its name and Kubernetes label.
    """
    span = runner.span()
    run_id = runner.session_id
    runner.show(
        "Starting network proxy to cluster using "
        "new Deployment {}".format(deployment_arg)
    )

    def remove_existing_deployment(quiet=False):
        if not quiet:
            runner.show("Cleaning up Deployment {}".format(deployment_arg))
        runner.check_call(
            runner.kubectl(
                "delete",
                "--ignore-not-found",
                "svc,deploy",
                "--selector=telepresence=" + run_id,
            )
        )

    runner.add_cleanup("Delete new deployment", remove_existing_deployment)
    remove_existing_deployment(quiet=True)
    command = [
        "run",  # This will result in using Deployment:
        "--restart=Always",
        "--limits=cpu=100m,memory=256Mi",
        "--requests=cpu=25m,memory=64Mi",
        deployment_arg,
        "--image=" + image_name,
        "--labels=telepresence=" + run_id,
    ]
    # Provide a stable argument ordering.  Reverse it because that happens to
    # make some current tests happy but in the long run that's totally
    # arbitrary and doesn't need to be maintained.  See issue 494.
    for port in sorted(expose.remote(), reverse=True):
        command.append("--port={}".format(port))
    if expose.remote():
        command.append("--expose")
    # If we're on local VM we need to use different nameserver to prevent
    # infinite loops caused by sshuttle:
    if add_custom_nameserver:
        command.append(
            "--env=TELEPRESENCE_NAMESERVER=" + get_alternate_nameserver()
        )
    try:
        runner.check_call(runner.kubectl(command))
    except CalledProcessError as exc:
        raise runner.fail(
            "Failed to create deployment {}:\n{}".format(
                deployment_arg, exc.stderr
            )
        )
    span.end()
    return deployment_arg, run_id
Ejemplo n.º 2
0
def create_new_deployment(runner: Runner,
                          args: argparse.Namespace) -> Tuple[str, str]:
    """Create a new Deployment, return its name and Kubernetes label."""
    span = runner.span()
    run_id = str(uuid4())

    def remove_existing_deployment():
        runner.get_kubectl(
            args.context, args.namespace, [
                "delete",
                "--ignore-not-found",
                "svc,deploy",
                "--selector=telepresence=" + run_id,
            ]
        )

    atexit.register(remove_existing_deployment)
    remove_existing_deployment()
    command = [
        "run",
        # This will result in using Deployment:
        "--restart=Always",
        "--limits=cpu=100m,memory=256Mi",
        "--requests=cpu=25m,memory=64Mi",
        args.new_deployment,
        "--image=" + TELEPRESENCE_REMOTE_IMAGE,
        "--labels=telepresence=" + run_id,
    ]
    # Provide a stable argument ordering.  Reverse it because that happens to
    # make some current tests happy but in the long run that's totally
    # arbitrary and doesn't need to be maintained.  See issue 494.
    for port in sorted(args.expose.remote(), reverse=True):
        command.append("--port={}".format(port))
    if args.expose.remote():
        command.append("--expose")
    # If we're on local VM we need to use different nameserver to prevent
    # infinite loops caused by sshuttle:
    if args.method == "vpn-tcp" and args.in_local_vm:
        command.append(
            "--env=TELEPRESENCE_NAMESERVER=" + get_alternate_nameserver()
        )
    if args.needs_root:
        override = {
            "apiVersion": "extensions/v1beta1",
            "spec": {
                "template": {
                    "spec": {
                        "securityContext": {
                            "runAsUser": 0
                        }
                    }
                }
            }
        }
        command.append("--overrides=" + json.dumps(override))
    runner.get_kubectl(args.context, args.namespace, command)
    span.end()
    return args.new_deployment, run_id
Ejemplo n.º 3
0
def create_new_deployment(runner: Runner,
                          args: argparse.Namespace) -> Tuple[str, str]:
    """Create a new Deployment, return its name and Kubernetes label."""
    span = runner.span()
    run_id = runner.session_id

    def remove_existing_deployment():
        runner.get_output(
            runner.kubectl(
                "delete",
                "--ignore-not-found",
                "svc,deploy",
                "--selector=telepresence=" + run_id,
            ))

    runner.add_cleanup("Delete new deployment", remove_existing_deployment)
    remove_existing_deployment()
    if args.needs_root:
        image_name = TELEPRESENCE_REMOTE_IMAGE_PRIV
    else:
        image_name = TELEPRESENCE_REMOTE_IMAGE
    command = [
        "run",
        # This will result in using Deployment:
        "--restart=Always",
        "--limits=cpu=100m,memory=256Mi",
        "--requests=cpu=25m,memory=64Mi",
        args.new_deployment,
        "--image=" + image_name,
        "--labels=telepresence=" + run_id,
    ]
    # Provide a stable argument ordering.  Reverse it because that happens to
    # make some current tests happy but in the long run that's totally
    # arbitrary and doesn't need to be maintained.  See issue 494.
    for port in sorted(args.expose.remote(), reverse=True):
        command.append("--port={}".format(port))
    if args.expose.remote():
        command.append("--expose")
    # If we're on local VM we need to use different nameserver to prevent
    # infinite loops caused by sshuttle:
    if args.method == "vpn-tcp" and args.in_local_vm:
        command.append("--env=TELEPRESENCE_NAMESERVER=" +
                       get_alternate_nameserver())
    runner.get_output(runner.kubectl(command))
    span.end()
    return args.new_deployment, run_id
Ejemplo n.º 4
0
def new_swapped_deployment(
    old_deployment: Dict,
    container_to_update: str,
    run_id: str,
    telepresence_image: str,
    add_custom_nameserver: bool,
) -> Tuple[Dict, Dict]:
    """
    Create a new Deployment that uses telepresence-k8s image.

    Makes the following changes:

    1. Changes to single replica.
    2. Disables command, args, livenessProbe, readinessProbe, workingDir.
    3. Adds labels.
    4. Adds TELEPRESENCE_NAMESERVER env variable, if requested.
    5. Runs as root, if requested.
    6. Sets terminationMessagePolicy.
    7. Adds TELEPRESENCE_CONTAINER_NAMESPACE env variable so the forwarder does
       not have to access the k8s API from within the pod.

    Returns dictionary that can be encoded to JSON and used with kubectl apply,
    and contents of swapped out container.
    """
    new_deployment_json = deepcopy(old_deployment)
    new_deployment_json["spec"]["replicas"] = 1
    new_deployment_json["metadata"].setdefault("labels",
                                               {})["telepresence"] = run_id
    new_deployment_json["spec"]["template"]["metadata"].setdefault(
        "labels", {}
    )["telepresence"] = run_id
    for container, old_container in zip(
        new_deployment_json["spec"]["template"]["spec"]["containers"],
        old_deployment["spec"]["template"]["spec"]["containers"],
    ):
        if container["name"] == container_to_update:
            container["image"] = telepresence_image
            # Not strictly necessary for real use, but tests break without this
            # since we don't upload test images to Docker Hub:
            container["imagePullPolicy"] = "IfNotPresent"
            # Drop unneeded fields:
            for unneeded in [
                "command", "args", "livenessProbe", "readinessProbe",
                "workingDir", "lifecycle"
            ]:
                try:
                    container.pop(unneeded)
                except KeyError:
                    pass
            # We don't write out termination file:
            container["terminationMessagePolicy"] = "FallbackToLogsOnError"
            # Use custom name server if necessary:
            if add_custom_nameserver:
                container.setdefault("env", []).append({
                    "name":
                    "TELEPRESENCE_NAMESERVER",
                    "value":
                    get_alternate_nameserver()
                })
            # Add namespace environment variable to support deployments using
            # automountServiceAccountToken: false. To be used by forwarder.py
            # in the k8s-proxy.
            container.setdefault("env", []).append({
                "name":
                "TELEPRESENCE_CONTAINER_NAMESPACE",
                "valueFrom": {
                    "fieldRef": {
                        "fieldPath": "metadata.namespace"
                    }
                }
            })
            return new_deployment_json, old_container

    raise RuntimeError(
        "Couldn't find container {} in the Deployment.".
        format(container_to_update)
    )
Ejemplo n.º 5
0
def setup(runner: Runner, args):
    """
    Determine how the user wants to set up the proxy in the cluster.
    """

    # OpenShift doesn't support running as root:
    if (args.expose.has_privileged_ports()
            and runner.kubectl.cluster_is_openshift):
        raise runner.fail("OpenShift does not support ports <1024.")

    # Check the service account, if present
    if args.service_account:
        try:
            runner.check_call(
                runner.kubectl("get", "serviceaccount", args.service_account))
        except CalledProcessError as exc:
            raise runner.fail("Check service account {} failed:\n{}".format(
                args.service_account, exc.stderr))

    # Figure out which operation the user wants
    if args.deployment is not None:
        # This implies --deployment
        if _dc_exists(runner, args.deployment_arg):
            operation = existing_deployment_openshift
            deployment_type = "deploymentconfig"
        else:
            operation = existing_deployment
            deployment_type = "deployment"

    if args.new_deployment is not None:
        # This implies --new-deployment
        deployment_type = "deployment"
        operation = create_new_deployment

    if args.swap_deployment is not None:
        # This implies --swap-deployment
        if _dc_exists(runner, args.deployment_arg):
            operation = swap_deployment_openshift
            deployment_type = "deploymentconfig"
        else:
            operation = supplant_deployment
            deployment_type = "deployment"

    # minikube/minishift break DNS because DNS gets captured, sent to minikube,
    # which sends it back to the DNS server set by host, resulting in a DNS
    # loop... We've fixed that for most cases by setting a distinct name server
    # for the proxy to use when making a new proxy pod, but that does not work
    # for --deployment.
    deployment_env = {}
    if args.method == "vpn-tcp" and runner.kubectl.in_local_vm:
        if args.operation == "deployment":
            raise runner.fail(
                "vpn-tcp method doesn't work with minikube/minishift when"
                " using --deployment. Use --swap-deployment or"
                " --new-deployment instead.")
        try:
            deployment_env["TELEPRESENCE_NAMESERVER"] \
                = get_alternate_nameserver()
            if args.also_proxy:
                proxy_names = []
                for name in args.also_proxy:
                    if not (re.search(r"[^\w.]", name)
                            or re.match(r"^(?:\d+\.){3}\d+$", name)):
                        proxy_names.append(name)
                if proxy_names:
                    deployment_env["TELEPRESENCE_LOCAL_NAMES"] \
                        = ",".join(proxy_names)

        except Exception as exc:
            raise runner.fail(
                "Failed to find a fallback nameserver: {}".format(exc))

    def start_proxy(runner_: Runner) -> RemoteInfo:
        tel_deployment, run_id = operation(runner_, args.deployment_arg,
                                           args.expose, deployment_env,
                                           args.service_account)
        remote_info = get_remote_info(
            runner,
            tel_deployment,
            deployment_type,
            run_id=run_id,
        )
        return remote_info

    return start_proxy
Ejemplo n.º 6
0
def setup(runner: Runner,
          args: argparse.Namespace) -> Callable[[Runner], RemoteInfo]:
    """
    Determine how the user wants to set up the proxy in the cluster.
    """

    if os.environ.get("TELEPRESENCE_USE_DEPLOYMENT", ""):
        return legacy_setup(runner, args)

    runner.show(
        "Using a Pod instead of a Deployment for the Telepresence proxy. "
        "If you experience problems, please file an issue!")
    runner.show(
        "Set the environment variable TELEPRESENCE_USE_DEPLOYMENT to any "
        "non-empty value to force the old behavior, e.g.,")
    runner.show(
        "    env TELEPRESENCE_USE_DEPLOYMENT=1 telepresence --run curl hello")
    runner.show("\n")

    # OpenShift doesn't support running as root:
    if (args.expose.has_privileged_ports()
            and runner.kubectl.cluster_is_openshift):
        raise runner.fail("OpenShift does not support ports <1024.")

    # Check the service account, if present
    if args.service_account:
        try:
            runner.check_call(
                runner.kubectl("get", "serviceaccount", args.service_account))
        except CalledProcessError as exc:
            raise runner.fail("Check service account {} failed:\n{}".format(
                args.service_account, exc.stderr))

    # Collect user intent
    name, container = args.deployment_arg, ""
    if ":" in name:
        name, container = name.split(":", 1)
    deployment_env = {}  # type: Dict[str, str]

    if args.method == "vpn-tcp" and runner.kubectl.in_local_vm:
        # minikube/minishift break DNS because DNS gets captured, sent to
        # minikube, which sends it back to the DNS server set by host,
        # resulting in a DNS loop... We've fixed that for most cases by setting
        # a distinct name server for the proxy to use when making a new proxy
        # pod, but that does not work automatically for --deployment.
        if args.operation == "deployment":
            raise runner.fail(
                "vpn-tcp method doesn't work with minikube/minishift when"
                " using --deployment. Use --swap-deployment or"
                " --new-deployment instead.")
        try:
            deployment_env["TELEPRESENCE_NAMESERVER"] \
                = get_alternate_nameserver()
        except Exception as exc:
            raise runner.fail(
                "Failed to find a fallback nameserver: {}".format(exc))

        # Support resolving a passed-in set of local names in the proxy.
        # Otherwise local name resolution is broken while Telepresence is
        # running, because Telepresence must use an alternate name server (see
        # DNS loop above) that presumably doesn't know your local setup.
        if args.also_proxy:
            proxy_names = []
            for proxy_name in args.also_proxy:
                if not (re.search(r"[^\w.]", proxy_name)
                        or re.match(r"^(?:\d+\.){3}\d+$", proxy_name)):
                    proxy_names.append(proxy_name)
            if proxy_names:
                deployment_env["TELEPRESENCE_LOCAL_NAMES"] \
                    = ",".join(proxy_names)

    intent = ProxyIntent(
        name,
        container,
        args.expose,
        deployment_env,
        args.service_account or "",
    )

    # Figure out which operation the user wants
    if args.operation == "deployment":
        operation = Existing(intent)  # type: ProxyOperation
    elif args.operation == "new_deployment":
        operation = New(intent)
    else:
        assert args.operation == "swap_deployment"
        operation = Swap(intent)

    operation.prepare(runner)

    return operation.act
Ejemplo n.º 7
0
def setup(runner: Runner, args):
    """
    Determine how the user wants to set up the proxy in the cluster.
    """

    # OpenShift doesn't support running as root:
    if (args.expose.has_privileged_ports()
            and runner.kubectl.cluster_is_openshift):
        raise runner.fail("OpenShift does not support ports <1024.")

    # Figure out which operation the user wants
    if args.deployment is not None:
        # This implies --deployment
        deployment_arg = args.deployment
        if _dc_exists(runner, deployment_arg):
            operation = existing_deployment_openshift
            deployment_type = "deploymentconfig"
        else:
            operation = existing_deployment
            deployment_type = "deployment"
        args.operation = "deployment"

    if args.new_deployment is not None:
        # This implies --new-deployment
        deployment_arg = args.new_deployment
        if runner.kubectl.cluster_is_openshift:
            # FIXME: This will not clean up the new dc
            deployment_type = "deploymentconfig"
        else:
            deployment_type = "deployment"
        operation = create_new_deployment
        args.operation = "new_deployment"

    if args.swap_deployment is not None:
        # This implies --swap-deployment
        deployment_arg = args.swap_deployment
        if _dc_exists(runner, deployment_arg):
            operation = swap_deployment_openshift
            deployment_type = "deploymentconfig"
        else:
            operation = supplant_deployment
            deployment_type = "deployment"
        args.operation = "swap_deployment"

    # minikube/minishift break DNS because DNS gets captured, sent to minikube,
    # which sends it back to the DNS server set by host, resulting in a DNS
    # loop... We've fixed that for most cases by setting a distinct name server
    # for the proxy to use when making a new proxy pod, but that does not work
    # for --deployment.
    custom_nameserver = None
    if args.method == "vpn-tcp" and runner.kubectl.in_local_vm:
        if args.operation == "deployment":
            raise runner.fail(
                "vpn-tcp method doesn't work with minikube/minishift when"
                " using --deployment. Use --swap-deployment or"
                " --new-deployment instead.")
        try:
            custom_nameserver = get_alternate_nameserver()
        except Exception as exc:
            raise runner.fail(
                "Failed to find a fallback nameserver: {}".format(exc))

    def start_proxy(runner_: Runner) -> RemoteInfo:
        if args.service_account:
            try:
                runner_.check_call(
                    runner_.kubectl("get", "serviceaccount",
                                    args.service_account))
            except CalledProcessError as exc:
                raise runner.fail(
                    "Check service account {} failed:\n{}".format(
                        args.service_account, exc.stderr))
        tel_deployment, run_id = operation(runner_, deployment_arg,
                                           args.expose, custom_nameserver,
                                           args.service_account)
        remote_info = get_remote_info(
            runner,
            tel_deployment,
            deployment_type,
            run_id=run_id,
        )
        return remote_info

    return start_proxy