def auth_gcp(self): config = self.spec["gcp"] key_path = self.config_path.joinpath(config["key"]) project = config["project"] # If cluster is regional, it'll have a `region` key set. # Else, it'll just have a `zone` key set. Let's respect either. location = config.get("zone", config.get("region")) cluster = config["cluster"] with tempfile.NamedTemporaryFile() as kubeconfig: orig_kubeconfig = os.environ.get("KUBECONFIG") try: os.environ["KUBECONFIG"] = kubeconfig.name with get_decrypted_file(key_path) as decrypted_key_path: subprocess.check_call([ "gcloud", "auth", "activate-service-account", f"--key-file={os.path.abspath(decrypted_key_path)}", ]) subprocess.check_call([ "gcloud", "container", "clusters", # --zone works with regions too f"--zone={location}", f"--project={project}", "get-credentials", cluster, ]) yield finally: if orig_kubeconfig is not None: os.environ["KUBECONFIG"] = orig_kubeconfig
def auth_azure(self): """ Read `azure` nested config, login to Azure with a Service Principal, activate the appropriate subscription, then authenticate against the cluster using `az aks get-credentials`. """ config = self.spect["azure"] key_path = self.config_path.joinpath(config["key"]) cluster = config["cluster"] resource_group = config["resource_group"] with tempfile.NamedTemporaryFile() as kubeconfig: orig_kubeconfig = os.environ.get("KUBECONFIG", None) try: os.environ["KUBECONFIG"] = kubeconfig.name with get_decrypted_file(key_path) as decrypted_key_path: decrypted_key_abspath = os.path.abspath(decrypted_key_path) if not os.path.isfile(decrypted_key_abspath): raise FileNotFoundError( "The decrypted key file does not exist") with open(decrypted_key_path) as f: service_principal = json.load(f) # Login to Azure subprocess.check_call([ "az", "login", "--service-principal", f"--username={service_principal['service_principal_id']}", f"--password={service_principal['service_principal_password']}", f"--tenant={service_principal['tenant_id']}", ]) # Set the Azure subscription subprocess.check_call([ "az", "account", "set", f"--subscription={service_principal['subscription_id']}", ]) # Get cluster creds subprocess.check_call([ "az", "aks", "get-credentials", f"--name={cluster}", f"--resource-group={resource_group}", ]) yield finally: if orig_kubeconfig is not None: os.environ["KUBECONFIG"] = orig_kubeconfig
def auth_aws(self): """ Reads `aws` nested config and temporarily sets environment variables like `KUBECONFIG`, `AWS_ACCESS_KEY_ID`, and `AWS_SECRET_ACCESS_KEY` before trying to authenticate with the `aws eks update-kubeconfig` command. Finally get those environment variables to the original values to prevent side-effects on existing local configuration. """ config = self.spec["aws"] key_path = self.config_path.joinpath(config["key"]) cluster_name = config["clusterName"] region = config["region"] with tempfile.NamedTemporaryFile() as kubeconfig: orig_kubeconfig = os.environ.get("KUBECONFIG", None) orig_access_key_id = os.environ.get("AWS_ACCESS_KEY_ID", None) orig_secret_access_key = os.environ.get("AWS_SECRET_ACCESS_KEY", None) try: with get_decrypted_file(key_path) as decrypted_key_path: decrypted_key_abspath = os.path.abspath(decrypted_key_path) if not os.path.isfile(decrypted_key_abspath): raise FileNotFoundError( "The decrypted key file does not exist") with open(decrypted_key_abspath) as f: creds = json.load(f) os.environ["AWS_ACCESS_KEY_ID"] = creds["AccessKey"][ "AccessKeyId"] os.environ["AWS_SECRET_ACCESS_KEY"] = creds["AccessKey"][ "SecretAccessKey"] os.environ["KUBECONFIG"] = kubeconfig.name subprocess.check_call([ "aws", "eks", "update-kubeconfig", f"--name={cluster_name}", f"--region={region}", ]) yield finally: if orig_kubeconfig is not None: os.environ["KUBECONFIG"] = orig_kubeconfig if orig_access_key_id is not None: os.environ["AWS_ACCESS_KEY_ID"] = orig_access_key_id if orig_secret_access_key is not None: os.environ[ "AWS_SECRET_ACCESS_KEY"] = orig_secret_access_key
def deploy(cluster_name, hub_name, config_path, dask_gateway_version): """ Deploy one or more hubs in a given cluster """ validate_cluster_config(cluster_name) validate_hub_config(cluster_name, hub_name) assert_single_auth_method_enabled(cluster_name, hub_name) with get_decrypted_file(config_path) as decrypted_file_path: with open(decrypted_file_path) as f: config = yaml.load(f) # Most of our hubs use Auth0 for Authentication. This lets us programmatically # determine what auth provider each hub uses - GitHub, Google, etc. Without # this, we'd have to manually generate credentials for each hub - and we # don't want to do that. Auth0 domains are tied to a account, and # this is our auth0 domain for the paid account that 2i2c has. auth0 = config["auth0"] k = KeyProvider(auth0["domain"], auth0["client_id"], auth0["client_secret"]) # Each hub needs a unique proxy.secretToken. However, we don't want # to manually generate & save it. We also don't want it to change with # each deploy - that causes a pod restart with downtime. So instead, # we generate it based on a single secret key (`PROXY_SECRET_KEY`) # combined with the name of each hub. This way, we get unique, # cryptographically secure proxy.secretTokens without having to # keep much state. We can rotate them by changing `PROXY_SECRET_KEY`. # However, if `PROXY_SECRET_KEY` leaks, that means all the hub's # proxy.secretTokens have leaked. So let's be careful with that! SECRET_KEY = bytes.fromhex(config["secret_key"]) config_file_path = find_absolute_path_to_cluster_file(cluster_name) with open(config_file_path) as f: cluster = Cluster(yaml.load(f), config_file_path.parent) with cluster.auth(): hubs = cluster.hubs if hub_name: hub = next((hub for hub in hubs if hub.spec["name"] == hub_name), None) print_colour(f"Deploying hub {hub.spec['name']}...") hub.deploy(k, SECRET_KEY, dask_gateway_version) else: for i, hub in enumerate(hubs): print_colour( f"{i+1} / {len(hubs)}: Deploying hub {hub.spec['name']}..." ) hub.deploy(k, SECRET_KEY, dask_gateway_version)
def auth_kubeconfig(self): """ Context manager for authenticating with just a kubeconfig file For the duration of the contextmanager, we: 1. Decrypt the file specified in kubeconfig.file with sops 2. Set `KUBECONFIG` env var to our decrypted file path, so applications we call (primarily helm) will use that as config """ config = self.spec["kubeconfig"] config_path = self.config_path.joinpath(config["file"]) with get_decrypted_file(config_path) as decrypted_key_path: # FIXME: Unset this after our yield os.environ["KUBECONFIG"] = decrypted_key_path yield
def get_central_grafana_token(cluster_name): """Returns the access token of the Grafana located in `cluster_name` cluster. This access token should have enough permissions to create datasources. """ # Get the location of the file that stores the central grafana token cluster_config_dir_path = find_absolute_path_to_cluster_file( cluster_name).parent grafana_token_file = ( cluster_config_dir_path).joinpath("enc-grafana-token.secret.yaml") # Read the secret grafana token file with get_decrypted_file(grafana_token_file) as decrypted_file_path: with open(decrypted_file_path) as f: config = yaml.load(f) return config["grafana_token"]
def auth_gcp(self): config = self.spec["gcp"] key_path = self.config_path.joinpath(config["key"]) project = config["project"] # If cluster is regional, it'll have a `region` key set. # Else, it'll just have a `zone` key set. Let's respect either. location = config.get("zone", config.get("region")) cluster = config["cluster"] with tempfile.NamedTemporaryFile() as kubeconfig: # CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE is removed as the action of # "gcloud auth activate-server-account" will be secondary to it # otherwise, and this env var can be set by GitHub Actions we use # before using this deployer script to deploy hubs to clusters. orig_cloudsdk_auth_credential_file_override = os.environ.pop( "CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE", None) orig_kubeconfig = os.environ.get("KUBECONFIG") try: os.environ["KUBECONFIG"] = kubeconfig.name with get_decrypted_file(key_path) as decrypted_key_path: subprocess.check_call([ "gcloud", "auth", "activate-service-account", f"--key-file={os.path.abspath(decrypted_key_path)}", ]) subprocess.check_call([ "gcloud", "container", "clusters", # --zone works with regions too f"--zone={location}", f"--project={project}", "get-credentials", cluster, ]) yield finally: if orig_kubeconfig is not None: os.environ["KUBECONFIG"] = orig_kubeconfig if orig_cloudsdk_auth_credential_file_override is not None: os.environ[ "CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE"] = orig_cloudsdk_auth_credential_file_override
def _load_client_id(self, config_filename): try: with get_decrypted_file(config_filename) as decrypted_path: with open(decrypted_path) as f: auth_config = yaml.load(f) basehub = auth_config.get("basehub", None) if basehub: return auth_config["basehub"]["jupyterhub"]["hub"]["config"][ "CILogonOAuthenticator" ]["client_id"] return auth_config["jupyterhub"]["hub"]["config"]["CILogonOAuthenticator"][ "client_id" ] except FileNotFoundError: print( "Oops! The CILogon client you requested to doesn't exist! Please create it first." ) return
def get_cluster_prometheus_creds(cluster_name): """Retrieves the credentials of the prometheus instance running on the `cluster_name` cluster. These credentials are stored in `enc-support.secret.values.yaml` file of each cluster config directory. Args: cluster_name: name of the cluster Returns: dict object: {username: `username`, password: `password`} """ cluster_config_dir_path = find_absolute_path_to_cluster_file( cluster_name).parent config_filename = cluster_config_dir_path.joinpath( "enc-support.secret.values.yaml") with get_decrypted_file(config_filename) as decrypted_path: with open(decrypted_path) as f: prometheus_config = yaml.load(f) return prometheus_config.get("prometheusIngressAuthSecret", {})
def deploy_support(self): cert_manager_url = "https://charts.jetstack.io" cert_manager_version = "v1.3.1" print_colour("Adding cert-manager chart repo...") subprocess.check_call([ "helm", "repo", "add", "jetstack", cert_manager_url, ]) print_colour("Updating cert-manager chart repo...") subprocess.check_call([ "helm", "repo", "update", ]) print_colour("Provisioning cert-manager...") subprocess.check_call([ "helm", "upgrade", "--install", "--create-namespace", "--namespace=cert-manager", "cert-manager", "jetstack/cert-manager", f"--version={cert_manager_version}", "--set=installCRDs=true", ]) print_colour("Done!") print_colour("Provisioning support charts...") support_dir = (Path(__file__).parent.parent).joinpath( "helm-charts", "support") subprocess.check_call(["helm", "dep", "up", support_dir]) support_secrets_file = support_dir.joinpath("enc-support.secret.yaml") # TODO: Update this with statement to handle any number of context managers # containing decrypted support values files. Not critical right now as # no individual cluster has specific support secrets, but it's possible # to support that if we want to in the future. with get_decrypted_file(support_secrets_file) as secret_file: cmd = [ "helm", "upgrade", "--install", "--create-namespace", "--namespace=support", "--wait", "support", str(support_dir), f"--values={secret_file}", ] for values_file in self.support["helm_chart_values_files"]: cmd.append( f"--values={self.config_path.joinpath(values_file)}") print_colour(f"Running {' '.join([str(c) for c in cmd])}") subprocess.check_call(cmd) print_colour("Done!")
def deploy(self, auth_provider, secret_key, dask_gateway_version): """ Deploy this hub """ # Support overriding domain configuration in the loaded cluster.yaml via # a cluster.yaml specified enc-<something>.secret.yaml file that only # includes the domain configuration of a typical cluster.yaml file. # # Check if this hub has an override file. If yes, apply override. # # FIXME: This could could be generalized so that the cluster.yaml would allow # any of this configuration to be specified in a secret file instead of a # publicly readable file. We should not keep adding specific config overrides # if such need occur but instead make cluster.yaml be able to link to # additional secret configuration. if "domain_override_file" in self.spec.keys(): domain_override_file = self.spec["domain_override_file"] with get_decrypted_file( self.cluster.config_path.joinpath(domain_override_file) ) as decrypted_path: with open(decrypted_path) as f: domain_override_config = yaml.load(f) self.spec["domain"] = domain_override_config["domain"] generated_values = self.get_generated_config(auth_provider, secret_key) if self.spec["helm_chart"] == "daskhub": # Install CRDs for daskhub before deployment manifest_urls = [ f"https://raw.githubusercontent.com/dask/dask-gateway/{dask_gateway_version}/resources/helm/dask-gateway/crds/daskclusters.yaml", f"https://raw.githubusercontent.com/dask/dask-gateway/{dask_gateway_version}/resources/helm/dask-gateway/crds/traefik.yaml", ] for manifest_url in manifest_urls: subprocess.check_call(["kubectl", "apply", "-f", manifest_url]) with tempfile.NamedTemporaryFile( mode="w" ) as generated_values_file, get_decrypted_files( self.cluster.config_path.joinpath(p) for p in self.spec["helm_chart_values_files"] ) as values_files: json.dump(generated_values, generated_values_file) generated_values_file.flush() cmd = [ "helm", "upgrade", "--install", "--create-namespace", "--wait", f"--namespace={self.spec['name']}", self.spec["name"], helm_charts_dir.joinpath(self.spec["helm_chart"]), # Ordering matters here - config explicitly mentioned in cli should take # priority over our generated values. Based on how helm does overrides, this means # we should put the config from cluster.yaml last. f"--values={generated_values_file.name}", ] # Add on the values files for values_file in values_files: cmd.append(f"--values={values_file}") # join method will fail on the PosixPath element if not transformed # into a string first print_colour(f"Running {' '.join([str(c) for c in cmd])}") subprocess.check_call(cmd)
def deploy_grafana_dashboards(cluster_name): """ Deploy grafana dashboards to a cluster that provide useful metrics for operating a JupyterHub Grafana dashboards and deployment mechanism in question are maintained in this repo: https://github.com/jupyterhub/grafana-dashboards """ validate_cluster_config(cluster_name) validate_support_config(cluster_name) config_file_path = find_absolute_path_to_cluster_file(cluster_name) with open(config_file_path) as f: cluster = Cluster(yaml.load(f), config_file_path.parent) # If grafana support chart is not deployed, then there's nothing to do if not cluster.support: print_colour( "Support chart has not been deployed. Skipping Grafana dashboards deployment..." ) return grafana_token_file = ( config_file_path.parent).joinpath("enc-grafana-token.secret.yaml") # Read the cluster specific secret grafana token file with get_decrypted_file(grafana_token_file) as decrypted_file_path: with open(decrypted_file_path) as f: config = yaml.load(f) # Check GRAFANA_TOKEN exists in the secret config file before continuing if "grafana_token" not in config.keys(): raise ValueError( f"`grafana_token` not provided in secret file! Please add it and try again: {grafana_token_file}" ) # FIXME: We assume grafana_url and uses_tls config will be defined in the first # file listed under support.helm_chart_values_files. support_values_file = cluster.support.get("helm_chart_values_files", [])[0] with open(config_file_path.parent.joinpath(support_values_file)) as f: support_values_config = yaml.load(f) # Get the url where grafana is running from the support values file grafana_url = (support_values_config.get("grafana", {}).get("ingress", {}).get("hosts", {})) uses_tls = (support_values_config.get("grafana", {}).get("ingress", {}).get("tls", {})) if not grafana_url: print_colour( "Couldn't find `config.grafana.ingress.hosts`. Skipping Grafana dashboards deployment..." ) return grafana_url = (f"https://{grafana_url[0]}" if uses_tls else f"http://{grafana_url[0]}") # Use the jupyterhub/grafana-dashboards deployer to deploy the dashboards to this cluster's grafana print_colour("Cloning jupyterhub/grafana-dashboards...") dashboards_dir = "grafana_dashboards" subprocess.check_call([ "git", "clone", "https://github.com/jupyterhub/grafana-dashboards", dashboards_dir, ]) # We need the existing env too for the deployer to be able to find jssonnet and grafonnet deploy_env = os.environ.copy() deploy_env.update({"GRAFANA_TOKEN": config["grafana_token"]}) try: print_colour(f"Deploying grafana dashboards to {cluster_name}...") subprocess.check_call(["./deploy.py", grafana_url], env=deploy_env, cwd=dashboards_dir) print_colour(f"Done! Dashboards deployed to {grafana_url}.") finally: # Delete the directory where we cloned the repo. # The deployer cannot call jsonnet to deploy the dashboards if using a temp directory here. # Might be because opening more than once of a temp file is tried # (https://docs.python.org/3.8/library/tempfile.html#tempfile.NamedTemporaryFile) shutil.rmtree(dashboards_dir)
def run_hub_health_check(cluster_name, hub_name, check_dask_scaling=False): """Run a health check on a given hub on a given cluster. Optionally check scaling of dask workers if the hub is a daskhub. Args: cluster_name (str): The name of the cluster where the hub is deployed hub_name (str): The name of the hub to run a health check for check_dask_scaling (bool, optional): If true, run an additional check that dask workers can scale. Only applies to daskhubs. Defaults to False. Returns exit_code (int): The exit code of the pytest process. 0 for pass, any other integer number greater than 0 for failure. """ # Read in the cluster.yaml file config_file_path = find_absolute_path_to_cluster_file(cluster_name) with open(config_file_path) as f: cluster = Cluster(yaml.load(f), config_file_path.parent) # Find the hub's config hub_indx = [ indx for (indx, h) in enumerate(cluster.hubs) if h.spec["name"] == hub_name ] if len(hub_indx) == 1: hub = cluster.hubs[hub_indx[0]] elif len(hub_indx) > 1: print_colour("ERROR: More than one hub with this name found!") sys.exit(1) elif len(hub_indx) == 0: print_colour("ERROR: No hubs with this name found!") sys.exit(1) print_colour(f"Running hub health check for {hub.spec['name']}...") # Check if this hub has a domain override file. If yes, apply override. if "domain_override_file" in hub.spec.keys(): domain_override_file = hub.spec["domain_override_file"] with get_decrypted_file( hub.cluster.config_path.joinpath( domain_override_file)) as decrypted_path: with open(decrypted_path) as f: domain_override_config = yaml.load(f) hub.spec["domain"] = domain_override_config["domain"] # Retrieve hub's URL hub_url = f'https://{hub.spec["domain"]}' # Read in the service api token from a k8s Secret in the k8s cluster with cluster.auth(): try: service_api_token_b64encoded = subprocess.check_output( [ "kubectl", "get", "secrets", "hub", f"--namespace={hub.spec['name']}", r"--output=jsonpath={.data['hub\.services\.hub-health\.apiToken']}", ], text=True, ) except subprocess.CalledProcessError as e: raise ValueError( f"Failed to acquire a JupyterHub API token for the hub-health service: {e.stdout}" ) service_api_token = base64.b64decode( service_api_token_b64encoded).decode() # On failure, pytest prints out params to the test that failed. # This can contain sensitive info - so we hide stderr # FIXME: Don't use pytest - just call a function instead # # Show errors locally but redirect on CI gh_ci = os.environ.get("CI", "false") pytest_args = [ "-q", "deployer/tests", f"--hub-url={hub_url}", f"--api-token={service_api_token}", f"--hub-type={hub.spec['helm_chart']}", ] if (hub.spec["helm_chart"] == "daskhub") and check_dask_scaling: pytest_args.append("--check-dask-scaling") if gh_ci == "true": print_colour("Testing on CI, not printing output") with open(os.devnull, "w") as dn, redirect_stderr(dn), redirect_stdout(dn): exit_code = pytest.main(pytest_args) else: print_colour("Testing locally, do not redirect output") exit_code = pytest.main(pytest_args) if exit_code != 0: print("Health check failed!", file=sys.stderr) sys.exit(exit_code) else: print_colour("Health check succeeded!") return exit_code
def deploy(self, auth_provider, secret_key, skip_hub_health_test=False): """ Deploy this hub """ # Support overriding domain configuration in the loaded cluster.yaml via # a cluster.yaml specified enc-<something>.secret.yaml file that only # includes the domain configuration of a typical cluster.yaml file. # # Check if this hub has an override file. If yes, apply override. # # FIXME: This could could be generalized so that the cluster.yaml would allow # any of this configuration to be specified in a secret file instead of a # publicly readable file. We should not keep adding specific config overrides # if such need occur but instead make cluster.yaml be able to link to # additional secret configuration. if "domain_override_file" in self.spec.keys(): domain_override_file = self.spec["domain_override_file"] with get_decrypted_file( self.cluster.config_path.joinpath( domain_override_file)) as decrypted_path: with open(decrypted_path) as f: domain_override_config = yaml.load(f) self.spec["domain"] = domain_override_config["domain"] generated_values = self.get_generated_config(auth_provider, secret_key) with tempfile.NamedTemporaryFile( mode="w") as generated_values_file, get_decrypted_files( self.spec["helm_chart_values_files"], self.cluster.config_path) as values_files: json.dump(generated_values, generated_values_file) generated_values_file.flush() cmd = [ "helm", "upgrade", "--install", "--create-namespace", "--wait", f"--namespace={self.spec['name']}", self.spec["name"], helm_charts_dir.joinpath(self.spec["helm_chart"]), # Ordering matters here - config explicitly mentioned in cli should take # priority over our generated values. Based on how helm does overrides, this means # we should put the config from cluster.yaml last. f"--values={generated_values_file.name}", ] # Add on the values files for values_file in values_files: cmd.append(f"--values={values_file}") # join method will fail on the PosixPath element if not transformed # into a string first print_colour(f"Running {' '.join([str(c) for c in cmd])}") # Can't test without deploying, since our service token isn't set by default subprocess.check_call(cmd) if not skip_hub_health_test: # FIXMEL: Clean this up if self.spec["helm_chart"] != "basehub": service_api_token = generated_values["basehub"][ "jupyterhub"]["hub"]["services"]["hub-health"][ "apiToken"] else: service_api_token = generated_values["jupyterhub"]["hub"][ "services"]["hub-health"]["apiToken"] hub_url = f'https://{self.spec["domain"]}' # On failure, pytest prints out params to the test that failed. # This can contain sensitive info - so we hide stderr # FIXME: Don't use pytest - just call a function instead print_colour("Running hub health check...") # Show errors locally but redirect on CI gh_ci = os.environ.get("CI", "false") pytest_args = [ "-q", "deployer/tests", "--hub-url", hub_url, "--api-token", service_api_token, "--hub-type", self.spec["helm_chart"], ] if gh_ci == "true": print_colour("Testing on CI, not printing output") with open(os.devnull, "w") as dn, redirect_stderr( dn), redirect_stdout(dn): exit_code = pytest.main(pytest_args) else: print_colour("Testing locally, do not redirect output") exit_code = pytest.main(pytest_args) if exit_code != 0: print("Health check failed!", file=sys.stderr) sys.exit(exit_code) else: print_colour("Health check succeeded!")
def main(): argparser = argparse.ArgumentParser( description="""A command line tool to create/update/delete CILogon clients. """ ) subparsers = argparser.add_subparsers( required=True, dest="action", help="Available subcommands" ) # Create subcommand create_parser = subparsers.add_parser( "create", help="Create a CILogon client", ) create_parser.add_argument( "cluster_name", type=str, help="The name of the cluster where the hub lives", ) create_parser.add_argument( "hub_name", type=str, help="The hub for which we'll create a CILogon client", ) create_parser.add_argument( "hub_type", type=str, help="The type of hub for which we'll create a CILogon client.", default="basehub", ) create_parser.add_argument( "callback_url", type=str, help="URL that is invoked after OAuth authorization", ) # Update subcommand update_parser = subparsers.add_parser( "update", help="Update a CILogon client", ) update_parser.add_argument( "cluster_name", type=str, help="The name of the cluster where the hub lives", ) update_parser.add_argument( "hub_name", type=str, help="The hub for which we'll update the CILogon client", ) update_parser.add_argument( "callback_url", type=str, help=""" New callback_url to associate with the client. This URL is invoked after OAuth authorization """, ) # Get subcommand get_parser = subparsers.add_parser( "get", help="Retrieve details about an existing CILogon client", ) get_parser.add_argument( "cluster_name", type=str, help="The name of the cluster where the hub lives", ) get_parser.add_argument( "hub_name", type=str, help="The hub for which we'll retrieve the CILogon client details", ) # Get all subcommand subparsers.add_parser( "get-all", help="Retrieve details about an existing CILogon client", ) # Delete subcommand delete_parser = subparsers.add_parser( "delete", help="Delete an existing CILogon client", ) delete_parser.add_argument( "cluster_name", type=str, help="The name of the cluster where the hub lives or none if --id is present", default="", nargs="?", ) delete_parser.add_argument( "hub_name", type=str, help="The hub for which we'll delete the CILogon client details or none if --id is present", default="", nargs="?", ) delete_parser.add_argument( "--id", type=str, help="The id of the client to delete of the form cilogon:/client_id/<id>", ) args = argparser.parse_args() # This filepath is relative to the PROJECT ROOT general_auth_config = "shared/deployer/enc-auth-providers-credentials.secret.yaml" with get_decrypted_file(general_auth_config) as decrypted_file_path: with open(decrypted_file_path) as f: config = yaml.load(f) cilogon = CILogonClientProvider( config["cilogon_admin"]["client_id"], config["cilogon_admin"]["client_secret"] ) if args.action == "create": cilogon.create_client( args.cluster_name, args.hub_name, args.hub_type, args.callback_url, ) elif args.action == "update": cilogon.update_client( args.cluster_name, args.hub_name, args.callback_url, ) elif args.action == "get": cilogon.get_client( args.cluster_name, args.hub_name, ) elif args.action == "delete": cilogon.delete_client(args.cluster_name, args.hub_name, args.id) elif args.action == "get-all": cilogon.get_all_clients()