예제 #1
0
파일: main.py 프로젝트: spotx/telepresence
def main():
    """
    Top-level function for Telepresence
    """

    ########################################
    # Preliminaries: No changes to the machine or the cluster, no cleanup
    # Capture environment info and the user's intent

    # Check for a subcommand
    with crash_reporting():
        args = command_parse_args(None, only_for_commands=True)
    if args is not None:
        command_main(args)

    with crash_reporting():
        args = parse_args()  # tab-completion stuff goes here

        runner = Runner(Output(args.logfile), None, args.verbose)
        span = runner.span()
        runner.add_cleanup("Stop time tracking", span.end)
        runner.kubectl = KubeInfo(runner, args)

        start_proxy = proxy.setup(runner, args)
        do_connect = connect.setup(runner, args)
        get_remote_env, write_env_files = remote_env.setup(runner, args)
        launch = outbound.setup(runner, args)
        mount_remote = mount.setup(runner, args)

        final_checks(runner, args)

        # Usage tracking
        call_scout(runner, args)

    ########################################
    # Now it's okay to change things

    with runner.cleanup_handling(), crash_reporting(runner):
        # Set up the proxy pod (operation -> pod name)
        remote_info = start_proxy(runner)

        # Connect to the proxy (pod name -> ssh object)
        socks_port, ssh = do_connect(runner, remote_info)

        # Capture remote environment information (ssh object -> env info)
        env = get_remote_env(runner, remote_info)

        # Handle filesystem stuff
        mount_dir = mount_remote(runner, env, ssh)

        # Maybe write environment files
        write_env_files(runner, env)

        # Set up outbound networking (pod name, ssh object)
        # Launch user command with the correct environment (...)
        user_process = launch(
            runner, remote_info, env, socks_port, ssh, mount_dir
        )

        wait_for_exit(runner, user_process)
예제 #2
0
def main():
    """
    Top-level function for Telepresence
    """

    with crash_reporting():
        ########################################
        # Preliminaries: No changes to the machine or the cluster, no cleanup
        # Capture environment info

        args = parse_args()  # tab-completion stuff goes here

        runner = Runner(args.logfile, args.verbose)
        span = runner.span()
        runner.add_cleanup("Stop time tracking", span.end)
        set_kube_command(runner, args)

    with runner.cleanup_handling(), crash_reporting(runner):
        ########################################
        # Intent: Fast, user prompts here, cleanup available
        # Capture the user's intent

        start_proxy = proxy.setup(runner, args)
        do_connect = connect.setup(runner, args)
        get_remote_env, write_env_files = remote_env.setup(runner, args)
        launch = outbound.setup(runner, args)
        mount_remote = mount.setup(runner, args)

        final_checks(runner, args)

        # Usage tracking
        call_scout(runner, args)

        ########################################
        # Action: Perform the user's intended operation(s)
        # Now it's okay to change things

        # Set up the proxy pod (operation -> pod name)
        remote_info = start_proxy(runner)

        # Connect to the proxy (pod name -> ssh object)
        socks_port, ssh = do_connect(runner, remote_info)

        # Capture remote environment information (ssh object -> env info)
        env, pod_info = get_remote_env(runner, ssh, remote_info)

        # Handle filesystem stuff
        mount_dir = mount_remote(runner, env, ssh)

        # Maybe write environment files
        write_env_files(runner, env)

        # Set up outbound networking (pod name, ssh object)
        # Launch user command with the correct environment (...)
        user_process = launch(
            runner, remote_info, env, socks_port, ssh, mount_dir, pod_info
        )

        runner.wait_for_exit(user_process)
예제 #3
0
파일: main.py 프로젝트: spotx/telepresence
def command_main(args):
    """
    Top-level function for Telepresence when executing subcommands
    """

    with crash_reporting():
        runner = Runner(Output(args.logfile), None, args.verbose)
        span = runner.span()
        runner.add_cleanup("Stop time tracking", span.end)
        runner.kubectl = KubeInfo(runner, args)

        args.operation = args.command
        args.method = "teleproxy"
        call_scout(runner, args)

    if args.command == "outbound":
        return outbound.command(runner)

    raise runner.fail("Not implemented!")
예제 #4
0
def main(session):
    """
    Top-level function for Telepresence
    """

    ########################################
    # Preliminaries: No changes to the machine or the cluster, no cleanup

    with crash_reporting():
        session.args = parse_args()  # tab-completion stuff goes here

        session.output = Output(session.args.logfile)
        del session.args.logfile

        session.kube_info, session.runner = analyze_args(session)

        span = session.runner.span()
        session.runner.add_cleanup("Stop time tracking", span.end)

        # Usage tracking
        call_scout(session)

    ########################################
    # Now it's okay to change things

    with session.runner.cleanup_handling(), crash_reporting(session.runner):
        runner = session.runner
        args = session.args

        # Set up the proxy pod (operation -> pod name)
        remote_info = start_proxy(runner, args)

        # Connect to the proxy (pod name -> ssh object)
        socks_port, ssh = connect(runner, remote_info, args)

        # Capture remote environment information (ssh object -> env info)
        env = get_remote_env(runner, args, remote_info)

        # Used by mount_remote
        session.ssh = ssh
        session.remote_info = remote_info
        session.env = env

        # Handle filesystem stuff (pod name, ssh object)
        mount_dir = mount_remote(session)

        # Maybe write environment files
        write_env_files(session)

        # Set up outbound networking (pod name, ssh object)
        # Launch user command with the correct environment (...)
        if args.method == "container":
            user_process = run_docker_command(
                runner,
                remote_info,
                args,
                env,
                ssh,
                mount_dir,
            )
        else:
            user_process = run_local_command(runner, remote_info, args, env,
                                             socks_port, ssh)

        wait_for_exit(runner, user_process)
예제 #5
0
    def go():
        # We don't quite know yet if we want kubectl or oc (if someone has both
        # it depends on the context), so until we know the context just guess.
        # We prefer kubectl over oc insofar as (1) kubectl commands we do in
        # this prelim setup stage don't require oc and (2) sometimes oc is a
        # different binary unrelated to OpenShift.
        if which("kubectl"):
            prelim_command = "kubectl"
        elif which("oc"):
            prelim_command = "oc"
        else:
            raise SystemExit("Found neither 'kubectl' nor 'oc' in your $PATH.")

        # Usage tracking
        try:
            kubectl_version_output = str(
                check_output([prelim_command, "version", "--short"]),
                "utf-8").split("\n")
            kubectl_version = kubectl_version_output[0].split(": v")[1]
            kube_cluster_version = kubectl_version_output[1].split(": v")[1]
        except CalledProcessError as exc:
            kubectl_version = kube_cluster_version = "(error: {})".format(exc)
        if args.deployment:
            operation = "deployment"
        elif args.new_deployment:
            operation = "new_deployment"
        elif args.swap_deployment:
            operation = "swap_deployment"
        else:
            operation = "bad_args"
        scouted = call_scout(kubectl_version, kube_cluster_version, operation,
                             args.method)

        # Make sure we have a Kubernetes context set either on command line or
        # in kubeconfig:
        if args.context is None:
            try:
                args.context = str(
                    check_output([prelim_command, "config", "current-context"],
                                 stderr=STDOUT), "utf-8").strip()
            except CalledProcessError:
                raise SystemExit(
                    "No current-context set. "
                    "Please use the --context option to explicitly set the "
                    "context.")

        # Figure out explicit namespace if its not specified, and the server
        # address (we use the server address to determine for good whether we
        # want oc or kubectl):
        kubectl_config = json.loads(
            str(check_output([prelim_command, "config", "view", "-o", "json"]),
                "utf-8"))
        for context_setting in kubectl_config["contexts"]:
            if context_setting["name"] == args.context:
                if args.namespace is None:
                    args.namespace = context_setting["context"].get(
                        "namespace", "default")
                cluster = context_setting["context"]["cluster"]
                break
        for cluster_setting in kubectl_config["clusters"]:
            if cluster_setting["name"] == cluster:
                server = cluster_setting["cluster"]["server"]

        # Log file path should be absolute since some processes may run in
        # different directories:
        if args.logfile != "-":
            args.logfile = os.path.abspath(args.logfile)
        runner = Runner.open(args.logfile, kubectl_or_oc(server), args.verbose)
        span = runner.span()
        atexit.register(span.end)
        runner.write("Scout info: {}\n".format(scouted))
        runner.write(
            "Context: {}, namespace: {}, kubectl_command: {}\n".format(
                args.context, args.namespace, runner.kubectl_cmd))

        # Figure out if we need capability that allows for ports < 1024:
        if any([p < 1024 for p in args.expose.remote()]):
            if runner.kubectl_cmd == "oc":
                # OpenShift doesn't support running as root:
                raise SystemExit("OpenShift does not support ports <1024.")
            args.needs_root = True
        else:
            args.needs_root = False

        # minikube/minishift break DNS because DNS gets captured, sent to
        # minikube, which sends it back to DNS server set by host, resulting in
        # loop... we've fixed that for most cases, but not --deployment.
        def check_if_in_local_vm() -> bool:
            # Minikube just has 'minikube' as context'
            if args.context == "minikube":
                return True
            # Minishift has complex context name, so check by server:
            if runner.kubectl_cmd == "oc" and which("minishift"):
                ip = runner.get_output(["minishift", "ip"]).strip()
                if ip and ip in server:
                    return True
            return False

        args.in_local_vm = check_if_in_local_vm()
        if args.in_local_vm:
            runner.write("Looks like we're in a local VM, e.g. minikube.\n")
        if (args.in_local_vm and args.method == "vpn-tcp"
                and args.new_deployment is None
                and args.swap_deployment is None):
            raise SystemExit(
                "vpn-tcp method doesn't work with minikube/minishift when"
                " using --deployment. Use --swap-deployment or"
                " --new-deployment instead.")

        # Make sure we can access Kubernetes:
        try:
            if runner.kubectl_cmd == "oc":
                status_command = "status"
            else:
                status_command = "cluster-info"
            runner.get_output([
                runner.kubectl_cmd, "--context", args.context, status_command
            ])
        except (CalledProcessError, OSError, IOError) as e:
            sys.stderr.write("Error accessing Kubernetes: {}\n".format(e))
            raise SystemExit(1)
        # Make sure we can run openssh:
        try:
            version = runner.get_output(["ssh", "-V"],
                                        stdin=DEVNULL,
                                        stderr=STDOUT)
            if not version.startswith("OpenSSH"):
                raise SystemExit(
                    "'ssh' is not the OpenSSH client, apparently.")
        except (CalledProcessError, OSError, IOError) as e:
            sys.stderr.write("Error running ssh: {}\n".format(e))
            raise SystemExit(1)

        # Other requirements:
        require_command(runner, "torsocks",
                        "Please install torsocks (v2.1 or later)")
        require_command(runner, "sshfs")
        # Need conntrack for sshuttle on Linux:
        if sys.platform.startswith("linux") and args.method == "vpn-tcp":
            require_command(runner, "conntrack")

        subprocesses, env, socks_port, ssh, remote_info = start_proxy(
            runner, args)
        if args.method == "container":
            run_docker_command(
                runner,
                remote_info,
                args,
                env,
                subprocesses,
                ssh,
            )
        else:
            run_local_command(runner, remote_info, args, env, subprocesses,
                              socks_port, ssh)
예제 #6
0
    def go():
        # We don't quite know yet if we want kubectl or oc (if someone has both
        # it depends on the context), so until we know the context just guess.
        # We prefer kubectl over oc insofar as (1) kubectl commands we do in
        # this prelim setup stage don't require oc and (2) sometimes oc is a
        # different binary unrelated to OpenShift.
        if which("kubectl"):
            prelim_command = "kubectl"
        elif which("oc"):
            prelim_command = "oc"
        else:
            raise SystemExit("Found neither 'kubectl' nor 'oc' in your $PATH.")

        # Usage tracking
        try:
            kubectl_version_output = str(
                check_output([prelim_command, "version", "--short"]),
                "utf-8").split("\n")
            kubectl_version = kubectl_version_output[0].split(": v")[1]
            kube_cluster_version = kubectl_version_output[1].split(": v")[1]
        except CalledProcessError as exc:
            kubectl_version = kube_cluster_version = "(error: {})".format(exc)
        if args.deployment:
            operation = "deployment"
        elif args.new_deployment:
            operation = "new_deployment"
        elif args.swap_deployment:
            operation = "swap_deployment"
        else:
            operation = "bad_args"
        scouted = call_scout(kubectl_version, kube_cluster_version, operation,
                             args.method)

        # Make sure we have a Kubernetes context set either on command line or
        # in kubeconfig:
        if args.context is None:
            try:
                args.context = str(
                    check_output([prelim_command, "config", "current-context"],
                                 stderr=STDOUT), "utf-8").strip()
            except CalledProcessError:
                raise SystemExit(
                    "No current-context set. "
                    "Please use the --context option to explicitly set the "
                    "context.")

        # Figure out explicit namespace if its not specified, and the server
        # address (we use the server address to determine for good whether we
        # want oc or kubectl):
        kubectl_config = json.loads(
            str(check_output([prelim_command, "config", "view", "-o", "json"]),
                "utf-8"))
        for context_setting in kubectl_config["contexts"]:
            if context_setting["name"] == args.context:
                if args.namespace is None:
                    args.namespace = context_setting["context"].get(
                        "namespace", "default")
                cluster = context_setting["context"]["cluster"]
                break
        else:
            return exit("Error: Unable to find cluster information")
        for cluster_setting in kubectl_config["clusters"]:
            if cluster_setting["name"] == cluster:
                server = cluster_setting["cluster"]["server"]
                break
        else:
            return exit("Error: Unable to find server information")

        # Log file path should be absolute since some processes may run in
        # different directories:
        if args.logfile != "-":
            args.logfile = os.path.abspath(args.logfile)
        runner = Runner.open(args.logfile, kubectl_or_oc(server), args.verbose)
        return runner, scouted, server
예제 #7
0
def main(session):
    """
    Top-level function for Telepresence
    """

    ########################################
    # Preliminaries: No changes to the machine or the cluster, no cleanup

    session.args = parse_args()  # tab-completion stuff goes here

    session.output = Output(session.args.logfile)
    del session.args.logfile

    session.kube_info, session.runner = analyze_args(session)

    span = session.runner.span()
    atexit.register(span.end)

    # Set up signal handling
    # Make SIGTERM and SIGHUP do clean shutdown (in particular, we want atexit
    # functions to be called):
    def shutdown(signum, frame):
        raise SystemExit(0)

    signal.signal(signal.SIGTERM, shutdown)
    signal.signal(signal.SIGHUP, shutdown)

    # Usage tracking
    call_scout(session)

    # Set up exit handling
    # XXX exit handling via atexit
    try:
        ########################################
        # Now it's okay to change things

        runner = session.runner
        args = session.args

        # Set up the proxy pod (operation -> pod name)
        remote_info = start_proxy(runner, args)

        # Connect to the proxy (pod name -> ssh object)
        subprocesses, socks_port, ssh = connect(runner, remote_info, args)

        # Capture remote environment information (ssh object -> env info)
        env = get_remote_env(runner, args, remote_info)

        # Used by mount_remote
        session.ssh = ssh
        session.remote_info = remote_info
        session.env = env

        # Handle filesystem stuff (pod name, ssh object)
        mount_dir = mount_remote(session)

        # Maybe write environment files
        write_env_files(session)

        # Set up outbound networking (pod name, ssh object)
        # Launch user command with the correct environment (...)
        if args.method == "container":
            user_process = run_docker_command(
                runner,
                remote_info,
                args,
                env,
                subprocesses,
                ssh,
                mount_dir,
            )
        else:
            user_process = run_local_command(runner, remote_info, args, env,
                                             subprocesses, socks_port, ssh,
                                             mount_dir)

        # Clean up (call the cleanup methods for everything above)
        # XXX handled by wait_for_exit and atexit
        wait_for_exit(runner, user_process, subprocesses)

    finally:
        pass