Ejemplo n.º 1
0
def convert_context_arguments(
        auth,
        organization_argument: str = None,
        project_argument: str = None,
        deck_argument: str = None) -> Tuple[str, str, str]:
    try:
        # organization
        if organization_argument:
            organization_id = convert_organization_argument_to_uuid(
                auth, organization_argument)
        else:
            organization_id = None

        # project
        if project_argument:
            project_id = convert_project_argument_to_uuid(
                auth, project_argument, organization_id=organization_id)
        else:
            project_id = None

        # deck
        if deck_argument:
            deck_id = convert_deck_argument_to_uuid(
                auth,
                deck_argument,
                organization_id=organization_id,
                project_id=project_id)
        else:
            deck_id = None
    except Exception as e:
        console.error(e, _exit=True)

    return organization_id, project_id, deck_id
Ejemplo n.º 2
0
Archivo: app.py Proyecto: unikubehq/cli
def argument_apps(k8s,
                  apps: List[str],
                  multiselect: bool = False) -> List[str]:
    if not apps:
        app_choices = [
            pod.metadata.name for pod in k8s.get_pods().items
            if pod.status.phase not in ["Terminating", "Evicted", "Pending"]
        ]
        message = "Please select an app" if not multiselect else "Please select one or multiple apps"
        kwargs = {
            "message": message,
            "choices": app_choices,
            "multiselect": multiselect,
        }
        if multiselect:
            kwargs["transformer"] = lambda result: f"{', '.join(result)}"
            apps = console.list(**kwargs)
        else:
            apps = [console.list(**kwargs)]

    if not apps:
        console.error("No apps available.", _exit=True)

    if apps and any(
            c_app not in [pod.metadata.name for pod in k8s.get_pods().items]
            for c_app in apps):
        console.error("Some apps do not exist.", _exit=True)

    return apps
Ejemplo n.º 3
0
    def get_kubeconfig(self, wait=10) -> Optional[str]:
        arguments = ["kubeconfig", "get", self.k3d_cluster_name]
        # this is a nasty busy wait, but we don't have another chance
        for i in range(1, wait):
            process = self._execute(arguments)
            if process.returncode == 0:
                break
            else:
                console.info(
                    f"Waiting for the cluster to be ready ({i}/{wait}).")
                sleep(2)

        if process.returncode != 0:
            console.error(
                "Something went completely wrong with the cluster spin up (or we got a timeout)."
            )
        else:
            # we now need to write the kubekonfig to a file
            config = process.stdout.read().strip()
            if not os.path.isdir(
                    os.path.join(settings.CLI_KUBECONFIG_DIRECTORY,
                                 self.k3d_cluster_name)):
                os.mkdir(
                    os.path.join(settings.CLI_KUBECONFIG_DIRECTORY,
                                 self.k3d_cluster_name))
            config_path = os.path.join(
                settings.CLI_KUBECONFIG_DIRECTORY,
                self.k3d_cluster_name,
                "kubeconfig.yaml",
            )
            file = open(config_path, "w+")
            file.write(config)
            file.close()
            return config_path
Ejemplo n.º 4
0
def info(ctx, project=None, organization=None, **kwargs):
    """
    Displays the id, title and optional description of the selected project.
    """

    _ = ctx.auth.refresh()

    # context
    organization_id, project_id, _ = ctx.context.get_context_ids_from_arguments(
        organization_argument=organization, project_argument=project)

    # argument
    if not project_id:
        project_id = console.project_list(ctx, organization_id=organization_id)
        if not project_id:
            return None

    # GraphQL
    try:
        graph_ql = GraphQL(authentication=ctx.auth)
        data = graph_ql.query(
            """
            query($id: UUID!) {
                project(id: $id) {
                    id
                    title
                    description
                    specRepository
                    specRepositoryBranch
                    organization {
                        title
                    }
                }
            }
            """,
            query_variables={"id": project_id},
        )
        project_selected = data["project"]
    except Exception as e:
        console.debug(e)
        console.exit_generic_error()

    # console
    if project_selected:
        project_selected["organization"] = project_selected.pop(
            "organization").get("title", "-")
        project_selected["repository"] = project_selected.pop("specRepository")
        project_selected["repository branch"] = project_selected.pop(
            "specRepositoryBranch")

        console.table(
            data={
                "key": [k for k in project_selected.keys()],
                "value": [v for v in project_selected.values()],
            },
            headers=["Key", "Value"],
        )
    else:
        console.error("Project does not exist.")
Ejemplo n.º 5
0
def delete(ctx, project=None, organization=None, **kwargs):
    """
    Delete the current project and all related data.
    """

    # context
    organization_id, project_id, _ = ctx.context.get_context_ids_from_arguments(
        organization_argument=organization, project_argument=project)

    # cluster
    cluster_list = ctx.cluster_manager.get_cluster_list()

    # argument
    if not project_id:
        project_id = console.project_list(
            ctx,
            organization_id=organization_id,
            filter=[cluster.id for cluster in cluster_list])
        if not project_id:
            return None

    if project_id not in [cluster.id for cluster in cluster_list]:
        console.info(
            f"The project cluster for '{project_id_2_display_name(ctx=ctx, id=project_id)}' could not be found.",
            _exit=True,
        )

    # initial warning
    console.warning(
        "Deleting a project will remove the cluster including all of its data."
    )

    # confirm question
    confirm = input("Do want to continue [N/y]: ")
    if confirm not in ["y", "Y", "yes", "Yes"]:
        console.info("No action taken.", _exit=True)

    # get cluster
    cluster = None
    for cluster_data in cluster_list:
        if cluster_data.id == project_id:
            cluster = ctx.cluster_manager.select(cluster_data=cluster_data, )
            break

    # delete cluster
    if not cluster.exists():
        ctx.cluster_manager.delete(cluster.id)
        console.info(
            f"No Kubernetes cluster to delete for '{cluster.display_name}', nothing to do.",
            _exit=True)

    success = cluster.delete()

    # console
    if success:
        console.success("The project was deleted successfully.")
        ctx.cluster_manager.delete(cluster.id)
    else:
        console.error("The cluster could not be deleted.")
Ejemplo n.º 6
0
def down(ctx, project=None, organization=None, **kwargs):
    """
    Stop/pause cluster.
    """

    # context
    organization_id, project_id, _ = ctx.context.get_context_ids_from_arguments(
        organization_argument=organization, project_argument=project)

    # cluster
    cluster_list = ctx.cluster_manager.get_cluster_list(ready=True)

    # argument
    if not project_id:
        project_id = console.project_list(
            ctx,
            organization_id=organization_id,
            filter=[cluster.id for cluster in cluster_list])
        if not project_id:
            return None

    # check if project is in local storage
    if project_id not in [cluster.id for cluster in cluster_list]:
        console.info(
            f"The project cluster for '{project_id_2_display_name(ctx=ctx, id=project_id)}' is not up or does not exist yet.",
            _exit=True,
        )

    # get cluster
    cluster = None
    for cluster_data in cluster_list:
        if cluster_data.id == project_id:
            cluster = ctx.cluster_manager.select(cluster_data=cluster_data, )
            break

    # cluster down
    if not cluster.exists():
        # something went wrong or cluster was already delete from somewhere else
        console.info(
            f"No Kubernetes cluster to stop for '{cluster.display_name}'",
            _exit=True)

    if not cluster.ready():
        console.info(
            f"Kubernetes cluster for '{cluster.display_name}' is not running",
            _exit=True)

    console.info("Stopping Telepresence daemon.")
    Telepresence(cluster.storage.get()).stop()

    # stop cluster
    console.info(f"Stopping Kubernetes cluster for '{cluster.display_name}'")
    success = cluster.stop()

    # console
    if success:
        console.success("The project cluster is down.")
    else:
        console.error("The cluster could not be stopped.")
Ejemplo n.º 7
0
Archivo: app.py Proyecto: unikubehq/cli
def shell(ctx,
          app,
          organization=None,
          project=None,
          deck=None,
          container=None,
          **kwargs):
    """
    Drop into an interactive shell.
    """

    cluster_data, deck = get_deck_from_arguments(ctx, organization, project,
                                                 deck)

    # get cluster
    cluster = get_cluster_or_exit(ctx, cluster_data.id)
    provider_data = cluster.storage.get()

    # shell
    k8s = KubeAPI(provider_data, deck)
    app = argument_app(k8s, app)

    # get the data of the selected pod
    data = k8s.get_pod(app)
    telepresence = Telepresence(provider_data)

    # the corresponding deployment by getting rid of the pod name suffix
    deployment = "-".join(data.metadata.name.split("-")[0:-2])

    # 1. check if this pod is of a switched deployment (in case of an active Telepresence)
    if telepresence.is_swapped(deployment, namespace=data.metadata.namespace):
        # the container name generated in "app switch" for that pod
        container_name = settings.TELEPRESENCE_DOCKER_IMAGE_FORMAT.format(
            project=cluster_data.name.lower(),
            deck=deck["title"].lower(),
            name=deployment.lower()).replace(":", "")

        if Docker().check_running(container_name):
            # 2. Connect to that container
            # 2.a connect using Docker
            Docker().exec(container_name, "/bin/sh", interactive=True)
        else:
            console.error(
                "This is a Telepresence Pod with no corresponding Docker container "
                "running in order to connect (inconsistent state?)")

    else:
        if not container and len(data.spec.containers) > 1:
            container = console.container_list(data=data)
            if not container:
                return None

        # 2.b connect using kubernetes
        KubeCtl(provider_data).exec_pod(app,
                                        deck["environment"][0]["namespace"],
                                        "/bin/sh",
                                        interactive=True,
                                        container=container)
Ejemplo n.º 8
0
def version():
    """
    Check unikube version.
    """
    version = compare_current_and_latest_versions()
    if version is None:
        console.error("Could not determine version.")

    console.info(f"unikube, version {version}")
Ejemplo n.º 9
0
 def start(self) -> None:
     arguments = ["connect", "--no-report"]
     process = self._execute(arguments)
     if process.returncode and process.returncode != 0:
         # this is a retry
         process = self._execute(arguments)
         if process.returncode and process.returncode != 0:
             console.error(
                 f"Could not start Telepresence daemon: {process.stdout.readlines()}",
                 _exit=False)
Ejemplo n.º 10
0
 def uninstall(self, deployment, namespace=None, silent=False):
     arguments = ["uninstall", "--agent", deployment]
     arguments.append(deployment)
     if namespace:
         arguments += ["-n", namespace]
     console.debug(arguments)
     process = self._execute(arguments)
     if not silent and process.returncode and process.returncode != 0:
         console.error(
             "There was an error with uninstalling the traffic agent, please find details above",
             _exit=False)
Ejemplo n.º 11
0
Archivo: app.py Proyecto: unikubehq/cli
def get_deck_from_arguments(ctx, organization_id: str, project_id: str,
                            deck_id: str):
    # context
    organization_id, project_id, deck_id = ctx.context.get_context_ids_from_arguments(
        organization_argument=organization_id,
        project_argument=project_id,
        deck_argument=deck_id)

    # argument
    if not deck_id:
        deck_id = console.deck_list(ctx,
                                    organization_id=organization_id,
                                    project_id=project_id)
        if not deck_id:
            exit(1)

    # GraphQL
    try:
        graph_ql = GraphQL(authentication=ctx.auth)
        data = graph_ql.query(
            """
            query($id: UUID) {
                deck(id: $id) {
                    id
                    title
                    environment {
                        namespace
                    }
                    project {
                        id
                    }
                }
            }
            """,
            query_variables={"id": deck_id},
        )
        deck = data["deck"]
        project_id = deck["project"]["id"]
    except Exception as e:
        console.debug(e)
        console.exit_generic_error()

    # cluster data
    cluster_list = ctx.cluster_manager.get_cluster_list(ready=True)
    if project_id not in [cluster.id for cluster in cluster_list]:
        console.info(
            f"The project cluster for '{project_id}' is not up or does not exist yet.",
            _exit=True)

    cluster_data = ctx.cluster_manager.get(id=project_id)
    if not cluster_data:
        console.error("The cluster could not be found.", _exit=True)

    return cluster_data, deck
Ejemplo n.º 12
0
def prune(ctx, **kwargs):
    """
    Remove unused clusters.
    """

    # GraphQL
    try:
        graph_ql = GraphQL(authentication=ctx.auth)
        data = graph_ql.query("""
            query {
                allProjects {
                    results {
                        id
                    }
                }
            }
            """)
        projects = data["allProjects"]["results"]
    except Exception as e:
        console.debug(e)
        console.exit_generic_error()

    # cluster
    cluster_list = ctx.cluster_manager.get_cluster_list()

    # select clusters to prune
    prune_clusters = []
    for cluster_data in cluster_list:
        if cluster_data.id not in [project["id"] for project in projects]:
            prune_clusters.append(cluster_data)

    for cluster_data in prune_clusters:
        console.info(
            f"It seems like the project for cluster '{cluster_data.name}' has been deleted."
        )

        # confirm question
        confirmed = console.confirm(
            question="Do want to remove the cluster? [N/y]: ")
        if not confirmed:
            console.info("No action taken.")
            continue

        # delete
        try:
            cluster = ctx.cluster_manager.select(cluster_data=cluster_data)
            success = cluster.delete()
            if success:
                console.success("The project was deleted successfully.")
                ctx.cluster_manager.delete(cluster.id)
        except Exception as e:
            console.debug(e)
            console.error("The cluster could not be deleted.")
Ejemplo n.º 13
0
def render_completion_script(cli, shell: str):
    """Renders a completion for a given shell."""
    SUPPORTED_SHELLS = ["bash"]

    if shell not in SUPPORTED_SHELLS:
        console.error(
            "{} is not supported. Following shells are supported: {}.".format(shell, ", ".join(SUPPORTED_SHELLS)),
            _exit=True,
        )

    if shell == "bash":
        render_bash(cli)
Ejemplo n.º 14
0
 def leave(self, deployment, namespace=None, silent=False):
     arguments = ["leave", "--no-report"]
     if namespace:
         arguments.append(f"{deployment}-{namespace}")
     else:
         arguments.append(deployment)
     console.debug(arguments)
     process = self._execute(arguments)
     if not silent and process.returncode and process.returncode != 0:
         console.error(
             "There was an error with leaving the deployment, please find details above",
             _exit=False)
Ejemplo n.º 15
0
def info(ctx, organization=None, project=None, deck=None, **kwargs):
    """
    Display further information of the selected deck.
    """

    # context
    organization_id, project_id, deck_id = ctx.context.get_context_ids_from_arguments(
        organization_argument=organization,
        project_argument=project,
        deck_argument=deck)

    # argument
    if not deck_id:
        deck_id = console.deck_list(ctx,
                                    organization_id=organization_id,
                                    project_id=project_id)
        if not deck_id:
            return None

    # GraphQL
    try:
        graph_ql = GraphQL(authentication=ctx.auth)
        data = graph_ql.query(
            """
            query($id: UUID) {
                deck(id: $id) {
                    id
                    title
                    description
                    namespace
                    type
                }
            }
            """,
            query_variables={"id": deck_id},
        )
        deck_selected = data["deck"]
    except Exception as e:
        console.debug(e)
        console.exit_generic_error()

    # console
    if deck_selected:
        console.table(
            data={
                "key": [k for k in deck_selected.keys()],
                "value": [v for v in deck_selected.values()],
            },
            headers=["Key", "Value"],
        )
    else:
        console.error("Deck does not exist.")
Ejemplo n.º 16
0
def check_running_cluster(ctx: ClickContext, cluster_provider_type: K8sProviderType.k3d, project_instance: dict):
    for cluster_data in ctx.cluster_manager.get_all():
        cluster = ctx.cluster_manager.select(cluster_data=cluster_data, cluster_provider_type=cluster_provider_type)
        if cluster.exists() and cluster.ready():
            if cluster.name == project_instance["title"] and cluster.id == project_instance["id"]:
                Telepresence(cluster.storage.get()).start()
                console.info(f"Kubernetes cluster for '{cluster.display_name}' is already running.", _exit=True)
            else:
                console.error(
                    f"You cannot start multiple projects at the same time. Project {cluster.name} ({cluster.id}) is "
                    f"currently running. Please run 'unikube project down {cluster.id}' first and "
                    f"try again.",
                    _exit=True,
                )
Ejemplo n.º 17
0
 def install(self) -> int:
     return_code = 0
     for idx, step in enumerate(self.installation_steps):
         console.info(f"Running installation step #{idx+1}: {step}")
         try:
             process = subprocess.run(step, shell=True)
             return_code += process.returncode
         except Exception as cpr:
             console.error(
                 f"An error occured during the installation of {self.verbose_name}:"
             )
             console.error(str(cpr))
             return 1
     return return_code
Ejemplo n.º 18
0
    def swap(self,
             deployment,
             image_name,
             command=None,
             namespace=None,
             envs=None,
             mounts=None,
             port=None):
        arguments = ["intercept", "--no-report", deployment]
        if namespace:
            arguments = arguments + ["--namespace", namespace]

        arguments = arguments + [
            "--port", f"{port}:{port}", "--docker-run", "--"
        ]
        if platform.system() != "Darwin":
            arguments.append("--network=host")
        arguments += [
            f"--dns-search={namespace}",
            "--rm",
        ]
        if mounts:
            for mount in mounts:
                arguments = arguments + ["-v", f"{mount[0]}:{mount[1]}"]
        if envs:
            for env in envs:
                arguments = arguments + ["--env", f"{env[0]}={env[1]}"]

        # this name to be retrieved for "app shell" command
        arguments = arguments + ["--name", image_name.replace(":", "")]
        arguments.append(image_name)
        if command:
            arguments = arguments + ["sh", "-c"] + [f"{' '.join(command)}"]

        console.debug(arguments)
        try:
            process = self._execute_intercept(arguments)
            if process.returncode and (process.returncode != 0
                                       and not process.returncode != 137):
                console.error(
                    "There was an error with switching the deployment, please find details above",
                    _exit=False)
        except KeyboardInterrupt:
            pass
        console.info(
            "Stopping the switch operation. It takes a few seconds to reset the cluster."
        )
        self.leave(deployment, namespace, silent=True)
        self.uninstall(deployment, namespace, silent=True)
Ejemplo n.º 19
0
def get_cluster(ctx, deck: dict):
    cluster_data = ctx.cluster_manager.get(id=deck["project"]["id"])
    if not cluster_data.name:
        console.error(
            "The project cluster does not exist. Please be sure to run 'unikube project up' first.",
            _exit=True)

    cluster = ctx.cluster_manager.select(cluster_data=cluster_data)

    # check if kubernetes cluster is running/ready
    if not cluster.ready():
        console.error(
            f"The project cluster for '{cluster.display_name}' is not running.",
            _exit=True)

    return cluster
Ejemplo n.º 20
0
def info(ctx, organization, **kwargs):
    """
    Display further information of the selected organization.
    """

    _ = ctx.auth.refresh()

    # context
    organization_id, _, _ = ctx.context.get_context_ids_from_arguments(organization_argument=organization)

    # argument
    if not organization_id:
        organization_id = console.organization_list(ctx)
        if not organization_id:
            return None

    # GraphQL
    try:
        graph_ql = GraphQL(authentication=ctx.auth)
        data = graph_ql.query(
            """
            query($id: UUID!) {
                organization(id: $id) {
                    id
                    title
                    description
                }
            }
            """,
            query_variables={"id": organization_id},
        )
        organization_selected = data["organization"]
    except Exception as e:
        console.debug(e)
        console.exit_generic_error()

    # console
    if organization_selected:
        console.table(
            data={
                "key": [k for k in organization_selected.keys()],
                "value": [v for v in organization_selected.values()],
            },
            headers=["Key", "Value"],
        )
    else:
        console.error("Organization does not exist.")
Ejemplo n.º 21
0
 def get_logs(self, pod, follow, container=None):
     if follow:
         w = watch.Watch()
         try:
             for log in w.stream(self._core_api.read_namespaced_pod_log,
                                 name=pod,
                                 namespace=self._namespace,
                                 container=container):
                 click.echo(log)
         except ApiException as e:
             console.error(str(e))
     else:
         try:
             ret = self._core_api.read_namespaced_pod_log(
                 name=pod, namespace=self._namespace, container=container)
         except ApiException as e:
             console.error(str(e))
         else:
             return ret
Ejemplo n.º 22
0
        def do_POST(self):
            POST = self.get_post_data()

            if POST["state"] != state:
                raise Exception(f"Invalid state: {POST['state']}")

            response = ctx.auth._get_requesting_party_token(
                POST["access_token"])

            login_file = open(
                os.path.join(os.path.dirname(os.path.realpath(__file__)),
                             "login.html"))
            text = login_file.read()
            login_file.close()

            # select response
            if not response["success"]:
                console.error("Login failed!")
                text = (
                    "Login failed! Could not retrieve requesting party token. "
                    "Please try again or contact your System administrator")
            else:
                try:
                    token = ctx.auth.token_from_response(response)
                except Exception as e:
                    console.debug(e)
                    console.debug(response)
                    console.error("Login failed!")
                    text = "Login failed! Your token does not match."
                else:
                    ctx.auth.general_data.authentication = AuthenticationData(
                        email=token["email"],
                        access_token=response["response"]["access_token"],
                        refresh_token=response["response"]["refresh_token"],
                        requesting_party_token=True,
                    )
                    ctx.auth.local_storage_general.set(ctx.auth.general_data)

                    if given_name := token.get("given_name", ""):
                        greeting = f"Hello {given_name}!"
                    else:
                        greeting = "Hello!"
Ejemplo n.º 23
0
 def _do_install():
     incomplete = []
     successful = []
     unsuccessful = []
     for dependency in dependencies:
         rcode = install_dependency(dependency["name"])
         # since this run can contain multiple installations, capture all return codes
         if rcode is None:
             incomplete.append(dependency["name"])
         elif rcode == 0:
             successful.append(dependency["name"])
         elif rcode != 0:
             unsuccessful.append(dependency["name"])
     if unsuccessful:
         console.error("Some of the requested installations terminated unsuccessful")
     elif successful and not unsuccessful and not incomplete:
         # this only become 0 if installation actually run and was successful
         console.success("All requested dependencies installed successfully")
     elif incomplete:
         console.warning("Not all dependencies could be installed")
Ejemplo n.º 24
0
def password_flow(ctx, email, password):
    response = ctx.auth.login(
        email,
        password,
    )
    if response["success"]:
        try:
            token = ctx.auth.token_from_response(response)
        except Exception as e:
            console.debug(e)
            console.debug(response)
            console.error("Login failed. Your token does not match.")
            return False

        if token["given_name"]:
            console.success(f'Login successful. Hello {token["given_name"]}!')
        else:
            console.success("Login successful.")
    else:
        console.error("Login failed. Please check email and password.")
    return True
Ejemplo n.º 25
0
def install_dependency(name, silent=False) -> Optional[int]:
    try:
        klass = next(
            filter(
                lambda klass: klass.verbose_name.lower() == name.lower(),
                ALL_DEPENDENCIES,
            ))
    except StopIteration:
        console.error(
            f"The dependency name '{name}' is not valid. No action taken.")
        return None
    else:
        if hasattr(klass, "installation_steps"):
            console.info(
                f"Now running installation setup for {klass.verbose_name}")
            rcode = klass().install()
            if rcode == 0:
                console.success(
                    f"The installation of {klass.verbose_name} was successful."
                )
            else:
                console.error(
                    f"The installation of {klass.verbose_name} was not successful."
                )
            return rcode
        else:
            console.error(
                f"The unikube.tech CLI does currently not support the installation of {klass.verbose_name}. "
                f"{'Please find instructions here: ' + klass.website if hasattr(klass, 'website') else ''}"
            )
            return None
Ejemplo n.º 26
0
def verify(verbose):
    """
    Verifies the installation of dependencies on your local machine. If you need a verbose tabular output, please
    add the ``--verbose`` flag to the command.
    """

    compare_current_and_latest_versions()

    report_data = probe_dependencies(silent=verbose)
    unsuccessful = list(filter(lambda x: not x["success"], report_data))

    # show detailed table
    if verbose:
        successful = list(filter(lambda x: x["success"], report_data))

        console.table(
            successful + unsuccessful,
            headers={
                "name": "Name",
                "success": "Ok",
                "required_version": "Required Version",
                "installed_version": "Installed Version",
                "msg": "Message",
            },
        )

    if unsuccessful:
        console.error(
            f"There {'is' if len(unsuccessful) == 1 else 'are'} {len(unsuccessful)} (of {len(report_data)}) "
            f"unsuccessfully probed {'dependency' if len(unsuccessful) == 1 else 'dependencies'} on your "
            f"local machine. Please run 'unikube system install' in order to fix "
            f"these issues."
        )
        return False

    console.success("Local dependencies verified.")

    return True
Ejemplo n.º 27
0
def download_manifest(deck: dict, authentication: TokenAuthentication, access_token: str, environment_index: int = 0):
    try:
        environment_id = deck["environment"][environment_index]["id"]
        console.info("Requesting manifests. This process may take a few seconds.")
        manifest = download_specs(
            access_token=access_token,
            environment_id=environment_id,
        )
    except HTTPError as e:
        project_id = deck["project"]["id"]
        if e.response.status_code == 404:
            console.warning(
                "This deck does potentially not specify a valid Environment of type 'local'. "
                f"Please go to https://app.unikube.io/project/{project_id}/decks "
                f"and save a valid values path."
            )
            exit(1)
        elif e.response.status_code == 403:
            console.warning("Refreshing access token")
            environment_id = deck["environment"][environment_index]["id"]
            response = authentication.refresh()
            if not response["success"]:
                console.exit_login_required()

            access_token = response["response"]["access_token"]
            try:
                manifest = download_specs(
                    access_token=access_token,
                    environment_id=environment_id,
                )
            except HTTPError as e:
                console.warning(f"Even after refreshing access token download specs fails with {e}")
                exit(1)
        else:
            console.error("Could not load manifest: " + str(e), _exit=True)

    return manifest
Ejemplo n.º 28
0
Archivo: app.py Proyecto: unikubehq/cli
def switch(ctx,
           app,
           organization,
           project,
           deck,
           deployment,
           unikubefile: str = None,
           no_build: bool = False,
           **kwargs):
    """
    Switch a running deployment with a local Docker container.
    """

    cluster_data, deck = get_deck_from_arguments(ctx, organization, project,
                                                 deck)

    # get cluster
    cluster = get_cluster_or_exit(ctx, cluster_data.id)

    # unikube file input
    try:
        unikube_file = unikube_file_selector.get(path_unikube_file=unikubefile)
        unikube_file_app = unikube_file.get_app(name=app)
    except Exception as e:
        console.debug(e)
        console.error("Invalid 'app' argument.", _exit=True)

    # 2: Get a deployment
    # 2.1.a Check the deployment identifier
    if not deployment and unikube_file_app:
        # 1.1.b check the unikubefile
        deployment = unikube_file_app.get_deployment()
        if not deployment:
            console.error(
                "Please specify the 'deployment' key of your app in your unikube.yaml.",
                _exit=True)
    else:
        console.error(
            "Please specify the deployment either using the '--deployment' option or in the unikube.yaml. "
            "Run 'unikube app switch' in a directory containing the unikube.yaml file.",
            _exit=True,
        )

    # 2.2 Fetch available "deployment:", deployments
    # GraphQL
    try:
        graph_ql = GraphQL(authentication=ctx.auth)
        data = graph_ql.query(
            """
            query($id: UUID) {
                deck(id: $id) {
                    deployments(level: "local") {
                        id
                        title
                        description
                        ports
                        isSwitchable
                    }
                    environment {
                        id
                        type
                        valuesPath
                        namespace
                    }
                }
            }
            """,
            query_variables={
                "id": deck["id"],
            },
        )
    except Exception as e:
        console.debug(e)
        console.exit_generic_error()

    target_deployment = None
    for _deployment in data["deck"]["deployments"]:
        if _deployment["title"] == deployment:
            target_deployment = _deployment

    # 2.3 Check and select deployment data
    if target_deployment is None:
        console.error(
            f"The deployment '{deployment}' you specified could not be found.",
            _exit=True,
        )

    ports = target_deployment["ports"].split(",")
    deployment = target_deployment["title"]
    namespace = deck["environment"][0]["namespace"]

    console.info("Please wait while unikube prepares the switch.")
    with click_spinner.spinner(beep=False,
                               disable=False,
                               force=False,
                               stream=sys.stdout):
        # check telepresence
        provider_data = cluster.storage.get()
        telepresence = Telepresence(provider_data)

        available_deployments = telepresence.list(namespace, flat=True)
        if deployment not in available_deployments:
            console.error(
                "The given deployment cannot be switched. "
                f"You may have to run 'unikube deck install {deck}' first.",
                _exit=True,
            )

        is_swapped = telepresence.is_swapped(deployment, namespace)

        k8s = KubeAPI(provider_data, deck)
        # service account token, service cert
        service_account_tokens = k8s.get_serviceaccount_tokens(deployment)

    # 3: Build an new Docker image
    # 3.1 Grab the docker file
    context, dockerfile, target = unikube_file_app.get_docker_build()
    if not target:
        target = ""
    console.debug(f"{context}, {dockerfile}, {target}")

    # 3.2 Set an image name
    image_name = settings.TELEPRESENCE_DOCKER_IMAGE_FORMAT.format(
        project=cluster_data.name.replace(" ", "").lower(),
        deck=deck["title"],
        name=deployment)

    docker = Docker()

    if is_swapped:
        console.warning(
            "It seems this app is already switched in another process. ")
        if click.confirm("Do you want to kill it and switch here?"):
            telepresence.leave(deployment, namespace, silent=True)
            if docker.check_running(image_name):
                docker.kill(name=image_name)
        else:
            sys.exit(0)

    # 3.3 Build image
    if not docker.image_exists(image_name) or not no_build:
        if no_build:
            console.warning(
                f"Ignoring --no-build since the required image '{image_name}' does not exist"
            )
        console.info(
            f"Building a Docker image for {dockerfile} with context {context}")
        with click_spinner.spinner(beep=False,
                                   disable=False,
                                   force=False,
                                   stream=sys.stdout):
            status, msg = docker.build(image_name, context, dockerfile, target)
        if not status:
            console.debug(msg)
            console.error("Failed to build Docker image.", _exit=True)

        console.info(f"Docker image successfully built: {image_name}")

    # 4. Start the Telepresence session
    # 4.1 Set the right intercept port
    port = unikube_file_app.get_port()
    if port is None:
        port = str(ports[0])
        if len(ports) > 1:
            console.warning(
                f"No port specified although there are multiple ports available: {ports}. "
                f"Defaulting to port {port} which might not be correct.")
    if port not in ports:
        console.error(
            f"The specified port {port} is not in the rage of available options: {ports}",
            _exit=True)
    if not _is_local_port_free(port):
        console.error(
            f"The local port {port} is busy. Please stop the application running on "
            f"this port and try again.",
            _exit=True,
        )

    # 4.2 See if there are volume mounts
    mounts = unikube_file_app.get_mounts()
    console.debug(f"Volumes requested: {mounts}")
    # mount service tokens
    if service_account_tokens:
        tmp_sa_token = tempfile.NamedTemporaryFile(delete=True)
        tmp_sa_cert = tempfile.NamedTemporaryFile(delete=True)
        tmp_sa_token.write(service_account_tokens[0].encode())
        tmp_sa_cert.write(service_account_tokens[1].encode())
        tmp_sa_token.flush()
        tmp_sa_cert.flush()
        mounts.append((tmp_sa_token.name, settings.SERVICE_TOKEN_FILENAME))
        mounts.append((tmp_sa_cert.name, settings.SERVICE_CERT_FILENAME))
    else:
        tmp_sa_token = None
        tmp_sa_cert = None

    # 4.3 See if there special env variables
    envs = unikube_file_app.get_environment()
    console.debug(f"Envs requested: {envs}")

    # 4.4 See if there is a run command to be executed
    command = unikube_file_app.get_command(port=port)
    console.debug(f"Run command: {command}")

    console.info(
        "Starting your container, this may take a while to become effective")

    telepresence.swap(deployment, image_name, command, namespace, envs, mounts,
                      port)
    if docker.check_running(image_name):
        docker.kill(name=image_name)
    if tmp_sa_token:
        tmp_sa_token.close()
        tmp_sa_cert.close()
Ejemplo n.º 29
0
 def stop(self) -> None:
     arguments = ["quit", "--no-report"]
     process = self._execute(arguments)
     if process.returncode and process.returncode != 0:
         console.error("Could not stop Telepresence daemon", _exit=False)
Ejemplo n.º 30
0
def check_environment_type_local_or_exit(deck: dict, environment_index: int = 0):
    if (
        environment_type_from_string(environment_type=deck["environment"][environment_index]["type"])
        != EnvironmentType.LOCAL
    ):
        console.error("This deck cannot be installed locally.", _exit=True)