def web_flow(ctx): client = Client(client_authn_method=CLIENT_AUTHN_METHOD) issuer = f"{settings.AUTH_DEFAULT_HOST}/auth/realms/unikube" client.provider_config(issuer) state = rndstr() nonce = rndstr() # 1. run callback server from unikube.authentication.web import run_callback_server port = run_callback_server(state, nonce, client, ctx) # 2. send to login with redirect url. args = { "client_id": "cli", "response_type": ["token"], "response_mode": "form_post", "scope": ["openid"], "nonce": nonce, "state": state, "redirect_uri": f"http://localhost:{port}", } auth_req = client.construct_AuthorizationRequest(request_args=args) login_url = auth_req.request(client.authorization_endpoint) console.info("If your Browser does not open automatically, go to the following URL and login:") console.link(login_url) click.launch(login_url) return True
def down(ctx, project=None, organization=None, **kwargs): """ Stop/pause cluster. """ # context organization_id, project_id, _ = ctx.context.get_context_ids_from_arguments( organization_argument=organization, project_argument=project) # cluster cluster_list = ctx.cluster_manager.get_cluster_list(ready=True) # argument if not project_id: project_id = console.project_list( ctx, organization_id=organization_id, filter=[cluster.id for cluster in cluster_list]) if not project_id: return None # check if project is in local storage if project_id not in [cluster.id for cluster in cluster_list]: console.info( f"The project cluster for '{project_id_2_display_name(ctx=ctx, id=project_id)}' is not up or does not exist yet.", _exit=True, ) # get cluster cluster = None for cluster_data in cluster_list: if cluster_data.id == project_id: cluster = ctx.cluster_manager.select(cluster_data=cluster_data, ) break # cluster down if not cluster.exists(): # something went wrong or cluster was already delete from somewhere else console.info( f"No Kubernetes cluster to stop for '{cluster.display_name}'", _exit=True) if not cluster.ready(): console.info( f"Kubernetes cluster for '{cluster.display_name}' is not running", _exit=True) console.info("Stopping Telepresence daemon.") Telepresence(cluster.storage.get()).stop() # stop cluster console.info(f"Stopping Kubernetes cluster for '{cluster.display_name}'") success = cluster.stop() # console if success: console.success("The project cluster is down.") else: console.error("The cluster could not be stopped.")
def version(): """ Check unikube version. """ version = compare_current_and_latest_versions() if version is None: console.error("Could not determine version.") console.info(f"unikube, version {version}")
def logout(ctx, **kwargs): """ Log out of a Unikube host. """ ctx.auth.logout() console.info("Logout completed.") return True
def list( message: str, choices: List[str], identifiers: Union[List[str], None] = None, filter: Union[List[str], None] = None, excludes: Union[List[str], None] = None, help_texts: Union[List[str], None] = None, allow_duplicates: bool = False, message_no_choices: str = "No choices available!", multiselect: bool = False, transformer: Callable[[Any], str] = None, ) -> Union[None, List[str]]: # choices exist if not len(choices) > 0: console.info(message_no_choices) return None # handle duplicates if not allow_duplicates: if identifiers: choices_duplicates = resolve_duplicates(choices=choices, identifiers=identifiers, help_texts=help_texts) else: choices_duplicates = set(choices) else: choices_duplicates = choices # filter choices_filtered = filter_by_identifiers(choices=choices_duplicates, identifiers=identifiers, filter=filter) # exclude choices_excluded = exclude_by_identifiers(choices=choices_filtered, identifiers=identifiers, excludes=excludes) # prompt answer = inquirer.fuzzy( message=message, choices=choices_excluded, multiselect=multiselect, transformer=transformer, keybindings={ "toggle": [{ "key": "space" }] }, style=INQUIRER_STYLE, amark="✔", ).execute() if not answer: return None return answer
def prune(ctx, **kwargs): """ Remove unused clusters. """ # GraphQL try: graph_ql = GraphQL(authentication=ctx.auth) data = graph_ql.query(""" query { allProjects { results { id } } } """) projects = data["allProjects"]["results"] except Exception as e: console.debug(e) console.exit_generic_error() # cluster cluster_list = ctx.cluster_manager.get_cluster_list() # select clusters to prune prune_clusters = [] for cluster_data in cluster_list: if cluster_data.id not in [project["id"] for project in projects]: prune_clusters.append(cluster_data) for cluster_data in prune_clusters: console.info( f"It seems like the project for cluster '{cluster_data.name}' has been deleted." ) # confirm question confirmed = console.confirm( question="Do want to remove the cluster? [N/y]: ") if not confirmed: console.info("No action taken.") continue # delete try: cluster = ctx.cluster_manager.select(cluster_data=cluster_data) success = cluster.delete() if success: console.success("The project was deleted successfully.") ctx.cluster_manager.delete(cluster.id) except Exception as e: console.debug(e) console.error("The cluster could not be deleted.")
def list(ctx, organization, **kwargs): """ Display a table of all available project names alongside with the ids. """ _ = ctx.auth.refresh() # context organization_id, _, _ = ctx.context.get_context_ids_from_arguments( organization_argument=organization) # GraphQL try: graph_ql = GraphQL(authentication=ctx.auth) data = graph_ql.query( """ query($organization_id: UUID) { allProjects(organizationId: $organization_id) { results { title id description } } } """, query_variables={"organization_id": organization_id}, ) project_list = data["allProjects"]["results"] except Exception as e: console.debug(e) console.exit_generic_error() # console if len(project_list) < 1: console.info( "No projects available. Please go to https://app.unikube.io and create a project.", _exit=True) console.table( data={ "id": [p["id"] for p in project_list], "title": [p["title"] for p in project_list], "description": [p["description"] for p in project_list], }, headers=["id", "name", "description"], )
def delete(ctx, project=None, organization=None, **kwargs): """ Delete the current project and all related data. """ # context organization_id, project_id, _ = ctx.context.get_context_ids_from_arguments( organization_argument=organization, project_argument=project) # cluster cluster_list = ctx.cluster_manager.get_cluster_list() # argument if not project_id: project_id = console.project_list( ctx, organization_id=organization_id, filter=[cluster.id for cluster in cluster_list]) if not project_id: return None if project_id not in [cluster.id for cluster in cluster_list]: console.info( f"The project cluster for '{project_id_2_display_name(ctx=ctx, id=project_id)}' could not be found.", _exit=True, ) # initial warning console.warning( "Deleting a project will remove the cluster including all of its data." ) # confirm question confirm = input("Do want to continue [N/y]: ") if confirm not in ["y", "Y", "yes", "Yes"]: console.info("No action taken.", _exit=True) # get cluster cluster = None for cluster_data in cluster_list: if cluster_data.id == project_id: cluster = ctx.cluster_manager.select(cluster_data=cluster_data, ) break # delete cluster if not cluster.exists(): ctx.cluster_manager.delete(cluster.id) console.info( f"No Kubernetes cluster to delete for '{cluster.display_name}', nothing to do.", _exit=True) success = cluster.delete() # console if success: console.success("The project was deleted successfully.") ctx.cluster_manager.delete(cluster.id) else: console.error("The cluster could not be deleted.")
def install(reinstall): """ Install all required dependencies on your local machine. In order to reinstall dependencies use the ``--reinstall`` argument. You need to specify the name of the dependency with the ``--reinstall`` option, for example: ``--reinstall k3d,telepresence`` """ def _do_install(): incomplete = [] successful = [] unsuccessful = [] for dependency in dependencies: rcode = install_dependency(dependency["name"]) # since this run can contain multiple installations, capture all return codes if rcode is None: incomplete.append(dependency["name"]) elif rcode == 0: successful.append(dependency["name"]) elif rcode != 0: unsuccessful.append(dependency["name"]) if unsuccessful: console.error("Some of the requested installations terminated unsuccessful") elif successful and not unsuccessful and not incomplete: # this only become 0 if installation actually run and was successful console.success("All requested dependencies installed successfully") elif incomplete: console.warning("Not all dependencies could be installed") # check account permission if os.geteuid() != 0: console.warning( "You are not running the installation with an administrative account. " "You may be prompted for your password." ) # install if reinstall: dependencies = [{"name": i} for i in reinstall.split(",")] else: report_data = probe_dependencies(silent=True) dependencies = list(filter(lambda x: not x["success"], report_data)) if len(dependencies) == 1: console.info(f"The following dependency is going to be installed: {dependencies[0]['name']}") elif len(dependencies) > 1: console.info( f"The following dependencies are going to be " f"installed: {','.join(k['name'] for k in dependencies)}" ) else: console.info("All dependencies are already satisfied. No action taken.") sys.exit(0) _do_install()
def status(ctx, token=False, **kwargs): """ View authentication status. """ response = ctx.auth.verify() # show token information if token: console.info(f"access token: {ctx.auth.general_data.authentication.access_token}") console.echo("---") console.info(f"refresh token: {ctx.auth.general_data.authentication.refresh_token}") console.echo("---") console.info(f"requesting party token: {ctx.auth.general_data.authentication.requesting_party_token}") console.echo("") if response["success"]: console.success("Authentication verified.") else: console.info("Authentication could not be verified.") return True
def up(ctx, project=None, organization=None, ingress=None, provider=None, workers=None, **kwargs): """ This command starts or resumes a Kubernetes cluster for the specified project. As it is a selection command, the project can be specified and/or filtered in several ways: * as a positional argument, id or project title can be specified, or from a set context * as an interactive selection from available projects * via ``-o`` or ``--organization`` option, specifying an organisation to which a project belongs """ _ = ctx.auth.refresh() if not Docker().daemon_active(): console.error( "Docker is not running. Please start Docker before starting a project.", _exit=True) # context organization_id, project_id, _ = ctx.context.get_context_ids_from_arguments( organization_argument=organization, project_argument=project) # cluster information cluster_list = ctx.cluster_manager.get_cluster_list(ready=True) cluster_id_list = [item.id for item in cluster_list] # argument if not project_id: project_id = console.project_list(ctx, organization_id=organization_id, excludes=cluster_id_list) if not project_id: return None if project_id in cluster_id_list: console.info( f"Project '{project_id_2_display_name(ctx=ctx, id=project_id)}' is already up.", _exit=True) # GraphQL try: graph_ql = GraphQL(authentication=ctx.auth) data = graph_ql.query( """ query($id: UUID) { project(id: $id) { title id organization { id } clusterSettings { id port } organization { title } } } """, query_variables={ "id": project_id, }, ) project_selected = data["project"] except Exception as e: console.debug(e) console.exit_generic_error() if not project_selected: console.info( f"The project '{project_id_2_display_name(ctx=ctx, id=project_id)}' could not be found.", _exit=True) try: cluster_provider_type = K8sProviderType[provider] except KeyError: console.error( f"The provider '{provider}' is not supported. Please use " f"one of: {','.join(opt.name for opt in K8sProviderType)}", _exit=True, ) check_running_cluster(ctx, cluster_provider_type, project_selected) # get project id if ingress is None: ingress = project_selected["clusterSettings"]["port"] if not_available_ports := check_ports([ingress]): console.error( "Following ports are currently busy, however needed to spin up the cluster: {}" .format(", ".join([str(port) for port in not_available_ports])), _exit=True, )
if not_available_ports := check_ports([ingress]): console.error( "Following ports are currently busy, however needed to spin up the cluster: {}" .format(", ".join([str(port) for port in not_available_ports])), _exit=True, ) # cluster up cluster_data = ctx.cluster_manager.get(id=project_selected["id"]) cluster_data.name = project_selected["title"] ctx.cluster_manager.set(id=project_selected["id"], data=cluster_data) cluster = ctx.cluster_manager.select( cluster_data=cluster_data, cluster_provider_type=cluster_provider_type) console.info( f"Setting up a Kubernetes cluster (with provider {provider}) for " f"project '{cluster.display_name}'.") if not cluster.exists(): console.info( f"Kubernetes cluster for '{cluster.display_name}' does not exist, creating it now." ) with click_spinner.spinner(beep=False, disable=False, force=False, stream=sys.stdout): success = cluster.create( ingress_port=ingress, workers=workers, )
def install(ctx, organization=None, project=None, deck=None, **kwargs): """ Install a deck. For further information please refer to :ref:` """ # context organization_id, project_id, deck_id = ctx.context.get_context_ids_from_arguments( organization_argument=organization, project_argument=project, deck_argument=deck) # argument if not deck_id: deck_id = console.deck_list(ctx, organization_id=organization_id, project_id=project_id) if not deck_id: return None deck = get_deck(ctx, deck_id=deck_id) # cluster cluster = get_cluster(ctx=ctx, deck=deck) # check environment type check_environment_type_local_or_exit(deck=deck) # check for switched app provider_data = cluster.storage.get() telepresence = Telepresence(provider_data) if telepresence.intercept_count(): console.error( "It is not possible to install a deck while having an active switch.", _exit=True) # download manifest general_data = ctx.storage_general.get() manifest = download_manifest( deck=deck, authentication=ctx.auth, access_token=general_data.authentication.access_token) # KubeCtl provider_data = cluster.storage.get() kubectl = KubeCtl(provider_data=provider_data) namespace = deck["environment"][0]["namespace"] kubectl.create_namespace(namespace) with click.progressbar( manifest, label="[INFO] Installing Kubernetes resources to the cluster.", ) as files: for file in files: kubectl.apply_str(namespace, file["content"]) # ingress ingress_data = get_ingress_data(deck, provider_data) if not ingress_data: console.info("No ingress configuration available.", _exit=True) console.table( ingress_data, headers={ "name": "Name", "url": "URLs", "paths": "Paths" }, )