def compare_current_and_latest_versions(): try: current_version = None try: path = Path(__file__).parent / "../VERSION" with path.open("r") as f: current_version = f.read() except (FileNotFoundError, PermissionError): console.debug("Could not read current version.") if not current_version: dist = pkg_resources.working_set.by_key.get("unikube") if dist: current_version = dist.version release = requests.get("https://api.github.com/repos/unikubehq/cli/releases/latest") if release.status_code == 403: console.info("Versions cannot be compared, as API rate limit was exceeded") return None latest_release_version = release.json()["tag_name"].replace("-", ".") if current_version != latest_release_version: console.info( f"You are using unikube version {current_version}; however, version {latest_release_version} is " f"available." ) return current_version except pkg_resources.DistributionNotFound as e: console.warning(f"Version of the package could not be found: {e}") except Exception: import traceback console.info(f"Versions cannot be compared, because of error {traceback.format_exc()}")
def delete(ctx, project=None, organization=None, **kwargs): """ Delete the current project and all related data. """ # context organization_id, project_id, _ = ctx.context.get_context_ids_from_arguments( organization_argument=organization, project_argument=project) # cluster cluster_list = ctx.cluster_manager.get_cluster_list() # argument if not project_id: project_id = console.project_list( ctx, organization_id=organization_id, filter=[cluster.id for cluster in cluster_list]) if not project_id: return None if project_id not in [cluster.id for cluster in cluster_list]: console.info( f"The project cluster for '{project_id_2_display_name(ctx=ctx, id=project_id)}' could not be found.", _exit=True, ) # initial warning console.warning( "Deleting a project will remove the cluster including all of its data." ) # confirm question confirm = input("Do want to continue [N/y]: ") if confirm not in ["y", "Y", "yes", "Yes"]: console.info("No action taken.", _exit=True) # get cluster cluster = None for cluster_data in cluster_list: if cluster_data.id == project_id: cluster = ctx.cluster_manager.select(cluster_data=cluster_data, ) break # delete cluster if not cluster.exists(): ctx.cluster_manager.delete(cluster.id) console.info( f"No Kubernetes cluster to delete for '{cluster.display_name}', nothing to do.", _exit=True) success = cluster.delete() # console if success: console.success("The project was deleted successfully.") ctx.cluster_manager.delete(cluster.id) else: console.error("The cluster could not be deleted.")
def install(reinstall): """ Install all required dependencies on your local machine. In order to reinstall dependencies use the ``--reinstall`` argument. You need to specify the name of the dependency with the ``--reinstall`` option, for example: ``--reinstall k3d,telepresence`` """ def _do_install(): incomplete = [] successful = [] unsuccessful = [] for dependency in dependencies: rcode = install_dependency(dependency["name"]) # since this run can contain multiple installations, capture all return codes if rcode is None: incomplete.append(dependency["name"]) elif rcode == 0: successful.append(dependency["name"]) elif rcode != 0: unsuccessful.append(dependency["name"]) if unsuccessful: console.error("Some of the requested installations terminated unsuccessful") elif successful and not unsuccessful and not incomplete: # this only become 0 if installation actually run and was successful console.success("All requested dependencies installed successfully") elif incomplete: console.warning("Not all dependencies could be installed") # check account permission if os.geteuid() != 0: console.warning( "You are not running the installation with an administrative account. " "You may be prompted for your password." ) # install if reinstall: dependencies = [{"name": i} for i in reinstall.split(",")] else: report_data = probe_dependencies(silent=True) dependencies = list(filter(lambda x: not x["success"], report_data)) if len(dependencies) == 1: console.info(f"The following dependency is going to be installed: {dependencies[0]['name']}") elif len(dependencies) > 1: console.info( f"The following dependencies are going to be " f"installed: {','.join(k['name'] for k in dependencies)}" ) else: console.info("All dependencies are already satisfied. No action taken.") sys.exit(0) _do_install()
def _do_install(): incomplete = [] successful = [] unsuccessful = [] for dependency in dependencies: rcode = install_dependency(dependency["name"]) # since this run can contain multiple installations, capture all return codes if rcode is None: incomplete.append(dependency["name"]) elif rcode == 0: successful.append(dependency["name"]) elif rcode != 0: unsuccessful.append(dependency["name"]) if unsuccessful: console.error("Some of the requested installations terminated unsuccessful") elif successful and not unsuccessful and not incomplete: # this only become 0 if installation actually run and was successful console.success("All requested dependencies installed successfully") elif incomplete: console.warning("Not all dependencies could be installed")
def ingress(ctx, organization=None, project=None, deck=None, **kwargs): """ Display ingress configuration for *installed* decks. This command prints a table containing URLs, paths and the associated backends. """ # context organization_id, project_id, deck_id = ctx.context.get_context_ids_from_arguments( organization_argument=organization, project_argument=project, deck_argument=deck) # argument if not deck_id: deck_id = console.deck_list(ctx, organization_id=organization_id, project_id=project_id) if not deck_id: return None deck = get_deck(ctx, deck_id=deck_id) # get cluster cluster = get_cluster(ctx=ctx, deck=deck) provider_data = cluster.storage.get() ingress_data = get_ingress_data(deck, provider_data) console.table( ingress_data, headers={ "name": "Name", "url": "URLs" }, ) if not ingress_data: console.warning( f"Are you sure the deck is installed? You may have to run 'unikube deck install {deck['title']}' first." )
def download_manifest(deck: dict, authentication: TokenAuthentication, access_token: str, environment_index: int = 0): try: environment_id = deck["environment"][environment_index]["id"] console.info("Requesting manifests. This process may take a few seconds.") manifest = download_specs( access_token=access_token, environment_id=environment_id, ) except HTTPError as e: project_id = deck["project"]["id"] if e.response.status_code == 404: console.warning( "This deck does potentially not specify a valid Environment of type 'local'. " f"Please go to https://app.unikube.io/project/{project_id}/decks " f"and save a valid values path." ) exit(1) elif e.response.status_code == 403: console.warning("Refreshing access token") environment_id = deck["environment"][environment_index]["id"] response = authentication.refresh() if not response["success"]: console.exit_login_required() access_token = response["response"]["access_token"] try: manifest = download_specs( access_token=access_token, environment_id=environment_id, ) except HTTPError as e: console.warning(f"Even after refreshing access token download specs fails with {e}") exit(1) else: console.error("Could not load manifest: " + str(e), _exit=True) return manifest
def switch(ctx, app, organization, project, deck, deployment, unikubefile: str = None, no_build: bool = False, **kwargs): """ Switch a running deployment with a local Docker container. """ cluster_data, deck = get_deck_from_arguments(ctx, organization, project, deck) # get cluster cluster = get_cluster_or_exit(ctx, cluster_data.id) # unikube file input try: unikube_file = unikube_file_selector.get(path_unikube_file=unikubefile) unikube_file_app = unikube_file.get_app(name=app) except Exception as e: console.debug(e) console.error("Invalid 'app' argument.", _exit=True) # 2: Get a deployment # 2.1.a Check the deployment identifier if not deployment and unikube_file_app: # 1.1.b check the unikubefile deployment = unikube_file_app.get_deployment() if not deployment: console.error( "Please specify the 'deployment' key of your app in your unikube.yaml.", _exit=True) else: console.error( "Please specify the deployment either using the '--deployment' option or in the unikube.yaml. " "Run 'unikube app switch' in a directory containing the unikube.yaml file.", _exit=True, ) # 2.2 Fetch available "deployment:", deployments # GraphQL try: graph_ql = GraphQL(authentication=ctx.auth) data = graph_ql.query( """ query($id: UUID) { deck(id: $id) { deployments(level: "local") { id title description ports isSwitchable } environment { id type valuesPath namespace } } } """, query_variables={ "id": deck["id"], }, ) except Exception as e: console.debug(e) console.exit_generic_error() target_deployment = None for _deployment in data["deck"]["deployments"]: if _deployment["title"] == deployment: target_deployment = _deployment # 2.3 Check and select deployment data if target_deployment is None: console.error( f"The deployment '{deployment}' you specified could not be found.", _exit=True, ) ports = target_deployment["ports"].split(",") deployment = target_deployment["title"] namespace = deck["environment"][0]["namespace"] console.info("Please wait while unikube prepares the switch.") with click_spinner.spinner(beep=False, disable=False, force=False, stream=sys.stdout): # check telepresence provider_data = cluster.storage.get() telepresence = Telepresence(provider_data) available_deployments = telepresence.list(namespace, flat=True) if deployment not in available_deployments: console.error( "The given deployment cannot be switched. " f"You may have to run 'unikube deck install {deck}' first.", _exit=True, ) is_swapped = telepresence.is_swapped(deployment, namespace) k8s = KubeAPI(provider_data, deck) # service account token, service cert service_account_tokens = k8s.get_serviceaccount_tokens(deployment) # 3: Build an new Docker image # 3.1 Grab the docker file context, dockerfile, target = unikube_file_app.get_docker_build() if not target: target = "" console.debug(f"{context}, {dockerfile}, {target}") # 3.2 Set an image name image_name = settings.TELEPRESENCE_DOCKER_IMAGE_FORMAT.format( project=cluster_data.name.replace(" ", "").lower(), deck=deck["title"], name=deployment) docker = Docker() if is_swapped: console.warning( "It seems this app is already switched in another process. ") if click.confirm("Do you want to kill it and switch here?"): telepresence.leave(deployment, namespace, silent=True) if docker.check_running(image_name): docker.kill(name=image_name) else: sys.exit(0) # 3.3 Build image if not docker.image_exists(image_name) or not no_build: if no_build: console.warning( f"Ignoring --no-build since the required image '{image_name}' does not exist" ) console.info( f"Building a Docker image for {dockerfile} with context {context}") with click_spinner.spinner(beep=False, disable=False, force=False, stream=sys.stdout): status, msg = docker.build(image_name, context, dockerfile, target) if not status: console.debug(msg) console.error("Failed to build Docker image.", _exit=True) console.info(f"Docker image successfully built: {image_name}") # 4. Start the Telepresence session # 4.1 Set the right intercept port port = unikube_file_app.get_port() if port is None: port = str(ports[0]) if len(ports) > 1: console.warning( f"No port specified although there are multiple ports available: {ports}. " f"Defaulting to port {port} which might not be correct.") if port not in ports: console.error( f"The specified port {port} is not in the rage of available options: {ports}", _exit=True) if not _is_local_port_free(port): console.error( f"The local port {port} is busy. Please stop the application running on " f"this port and try again.", _exit=True, ) # 4.2 See if there are volume mounts mounts = unikube_file_app.get_mounts() console.debug(f"Volumes requested: {mounts}") # mount service tokens if service_account_tokens: tmp_sa_token = tempfile.NamedTemporaryFile(delete=True) tmp_sa_cert = tempfile.NamedTemporaryFile(delete=True) tmp_sa_token.write(service_account_tokens[0].encode()) tmp_sa_cert.write(service_account_tokens[1].encode()) tmp_sa_token.flush() tmp_sa_cert.flush() mounts.append((tmp_sa_token.name, settings.SERVICE_TOKEN_FILENAME)) mounts.append((tmp_sa_cert.name, settings.SERVICE_CERT_FILENAME)) else: tmp_sa_token = None tmp_sa_cert = None # 4.3 See if there special env variables envs = unikube_file_app.get_environment() console.debug(f"Envs requested: {envs}") # 4.4 See if there is a run command to be executed command = unikube_file_app.get_command(port=port) console.debug(f"Run command: {command}") console.info( "Starting your container, this may take a while to become effective") telepresence.swap(deployment, image_name, command, namespace, envs, mounts, port) if docker.check_running(image_name): docker.kill(name=image_name) if tmp_sa_token: tmp_sa_token.close() tmp_sa_cert.close()
def list(ctx, organization=None, project=None, **kwargs): """ List all decks. """ # context organization_id, project_id, _ = ctx.context.get_context_ids_from_arguments( organization_argument=organization, project_argument=project) # GraphQL try: graph_ql = GraphQL(authentication=ctx.auth) data = graph_ql.query( """ query($organization_id: UUID, $project_id: UUID) { allDecks(organizationId: $organization_id, projectId: $project_id) { results { id title project { title organization { title } } } } } """, query_variables={ "organization_id": organization_id, "project_id": project_id, }, ) deck_list = data["allDecks"]["results"] except Exception as e: console.debug(e) console.exit_generic_error() if not deck_list: console.warning( "No decks available. Please go to https://app.unikube.io and create a project.", _exit=True) # format list to table table_data = [] for deck in deck_list: data = {} if not organization_id: data["organization"] = deck["project"]["organization"]["title"] if not project_id: data["project"] = deck["project"]["title"] data["id"] = deck["id"] data["title"] = deck["title"] table_data.append(data) # console console.table(data=table_data)