def auth_gcp(self): config = self.spec['gcp'] key_path = config['key'] project = config['project'] # If cluster is regional, it'll have a `region` key set. # Else, it'll just have a `zone` key set. Let's respect either. location = config.get('zone', config.get('region')) cluster = config['cluster'] with tempfile.NamedTemporaryFile() as kubeconfig: orig_kubeconfig = os.environ.get('KUBECONFIG') try: os.environ['KUBECONFIG'] = kubeconfig.name with decrypt_file(key_path) as decrypted_key_path: subprocess.check_call([ 'gcloud', 'auth', 'activate-service-account', '--key-file', os.path.abspath(decrypted_key_path) ]) subprocess.check_call([ 'gcloud', 'container', 'clusters', # --zone works with regions too f'--zone={location}', f'--project={project}', 'get-credentials', cluster ]) yield finally: if orig_kubeconfig is not None: os.environ['KUBECONFIG'] = orig_kubeconfig
def decrypt(self): filename = self.cryptname_entry.get() if filename: if os.path.isfile(filename): filename_dec = utils.decrypt_file(filename) tkinter.messagebox.showinfo( 'Decrypted', 'File har dekrypterats i ' + filename_dec) else: tkinter.messagebox.showerror('Error', 'Detta är inte en äkta fil!') else: tkinter.messagebox.showerror('Error', 'Du måste ange en fil!')
def deploy(cluster_name, hub_name, skip_hub_health_test, config_path): """ Deploy one or more hubs in a given cluster """ # Validate our config with JSON Schema first before continuing validate(cluster_name) with decrypt_file(config_path) as decrypted_file_path: with open(decrypted_file_path) as f: config = yaml.load(f) # All our hubs use Auth0 for Authentication. This lets us programmatically # determine what auth provider each hub uses - GitHub, Google, etc. Without # this, we'd have to manually generate credentials for each hub - and we # don't want to do that. Auth0 domains are tied to a account, and # this is our auth0 domain for the paid account that 2i2c has. auth0 = config["auth0"] k = KeyProvider(auth0["domain"], auth0["client_id"], auth0["client_secret"]) # Each hub needs a unique proxy.secretToken. However, we don't want # to manually generate & save it. We also don't want it to change with # each deploy - that causes a pod restart with downtime. So instead, # we generate it based on a single secret key (`PROXY_SECRET_KEY`) # combined with the name of each hub. This way, we get unique, # cryptographically secure proxy.secretTokens without having to # keep much state. We can rotate them by changing `PROXY_SECRET_KEY`. # However, if `PROXY_SECRET_KEY` leaks, that means all the hub's # proxy.secretTokens have leaked. So let's be careful with that! SECRET_KEY = bytes.fromhex(config["secret_key"]) config_file_path = (Path(os.getcwd()) / "config/hubs" / f"{cluster_name}.cluster.yaml") with open(config_file_path) as f: cluster = Cluster(yaml.load(f)) with cluster.auth(): hubs = cluster.hubs if hub_name: hub = next((hub for hub in hubs if hub.spec["name"] == hub_name), None) update_authenticator_config(hub.spec["config"], hub.spec["template"]) hub.deploy(k, SECRET_KEY, skip_hub_health_test) else: for hub in hubs: update_authenticator_config(hub.spec["config"], hub.spec["template"]) hub.deploy(k, SECRET_KEY, skip_hub_health_test)
def auth_kubeconfig(self): """ Context manager for authenticating with just a kubeconfig file For the duration of the contextmanager, we: 1. Decrypt the file specified in kubeconfig.file with sops 2. Set `KUBECONFIG` env var to our decrypted file path, so applications we call (primarily helm) will use that as config """ config = self.spec["kubeconfig"] config_path = config["file"] with decrypt_file(config_path) as decrypted_key_path: # FIXME: Unset this after our yield os.environ["KUBECONFIG"] = decrypted_key_path yield
def decrypt_file(self): self.file_decrypt_btn['state'] = 'disabled' path_i = self.dir_in_entry.get() if self.dir_out_entry.get(): path_o = self.dir_out_entry.get() else: path_o = os.path.dirname(path_i) self.dir_out_entry.insert('0', path_o) for _, status, step in utils.decrypt_file(self.prikey, self.thirdkey, path_i, path_o): if status == -2: tkinter.messagebox.showerror('Error', '文件已损坏') elif status == -1: tkinter.messagebox.showerror('Error', '文件信息无效') elif status == 0: ResultWindow(path_o, 1, True) elif status == 1: ResultWindow(path_o, 1, False) elif status == 2: self.progressbar['value'] = self.progressbar['value'] + step self.progressbar['value'] = 0 self.file_decrypt_btn['state'] = 'normal'
def auth_gcp(self): config = self.spec["gcp"] key_path = config["key"] project = config["project"] # If cluster is regional, it'll have a `region` key set. # Else, it'll just have a `zone` key set. Let's respect either. location = config.get("zone", config.get("region")) cluster = config["cluster"] with tempfile.NamedTemporaryFile() as kubeconfig: orig_kubeconfig = os.environ.get("KUBECONFIG") try: os.environ["KUBECONFIG"] = kubeconfig.name with decrypt_file(key_path) as decrypted_key_path: subprocess.check_call( [ "gcloud", "auth", "activate-service-account", f"--key-file={os.path.abspath(decrypted_key_path)}", ] ) subprocess.check_call( [ "gcloud", "container", "clusters", # --zone works with regions too f"--zone={location}", f"--project={project}", "get-credentials", cluster, ] ) yield finally: if orig_kubeconfig is not None: os.environ["KUBECONFIG"] = orig_kubeconfig
def validate(cluster_name): cluster_dir = Path(os.getcwd()) / "config/hubs" schema_file = cluster_dir / "schema.yaml" config_file = cluster_dir / f"{cluster_name}.cluster.yaml" with open(config_file) as cf, open(schema_file) as sf: cluster_config = yaml.load(cf) schema = yaml.load(sf) # Raises useful exception if validation fails jsonschema.validate(cluster_config, schema) secret_cluster_dir = Path(os.getcwd()) / "secrets/config/hubs" secret_schema_file = secret_cluster_dir / "schema.yaml" secret_config_file = secret_cluster_dir / f"{cluster_name}.cluster.yaml" # If a secret config file exists, validate it as well if os.path.exists(secret_config_file): with decrypt_file(secret_config_file) as decrypted_file_path: with open(decrypted_file_path) as scf, open( secret_schema_file) as ssf: secret_cluster_config = yaml.load(scf) secret_schema = yaml.load(ssf) jsonschema.validate(secret_cluster_config, secret_schema)
def main(): telegram_handle, reset_keyword, kill_switch, time_interval, file_path = utils.get_switch_input() listener = Listener(telegram_handle) listener.start_listening() print("[X] Encrypting sensitive file") key = utils.encrypt_file(file_path) check_in_time = time.time() + time_interval print("[X] Listening for incoming messages") while check_in_time > time.time(): if listener.new_message: new_update = listener.get_new_update() new_message = new_update.message.text if kill_switch in new_message: new_update.message.reply_text("[X] Discarding decryption keys") break if reset_keyword in new_message: new_update.message.reply_text("[X] Decrypting files") success = utils.decrypt_file(file_path, key) print("[X] Decrypting files") if not success: print("[X] Trouble decrypting, file maybe missing.\n[X] Writing key to file") utils.write_key_to_file(file_path, key) listener.stop_polling() return else: new_update.message.reply_text(f"[X] Extending check-in time by {time_interval} seconds") check_in_time = time.time() + time_interval time.sleep(1) print("[X] Discarding Keys") key = "0" * len(key) print("[X] The key has been discarded.") listener.stop_polling()
def deploy_support(self): cert_manager_url = "https://charts.jetstack.io" cert_manager_version = "v1.3.1" print_colour("Adding cert-manager chart repo...") subprocess.check_call( [ "helm", "repo", "add", "jetstack", cert_manager_url, ] ) print_colour("Updating cert-manager chart repo...") subprocess.check_call( [ "helm", "repo", "update", ] ) print_colour("Provisioning cert-manager...") subprocess.check_call( [ "helm", "upgrade", "--install", "--create-namespace", "--namespace=cert-manager", "cert-manager", "jetstack/cert-manager", f"--version={cert_manager_version}", "--set=installCRDs=true", ] ) print_colour("Done!") print_colour("Provisioning support charts...") subprocess.check_call(["helm", "dep", "up", "support"]) support_dir = Path(__file__).parent.parent / "support" support_secrets_file = support_dir / "secrets.yaml" with tempfile.NamedTemporaryFile(mode="w") as f, decrypt_file( support_secrets_file ) as secret_file: yaml.dump(self.support.get("config", {}), f) f.flush() subprocess.check_call( [ "helm", "upgrade", "--install", "--create-namespace", "--namespace=support", "support", str(support_dir), f"--values={secret_file}", f"--values={f.name}", "--wait", ] ) print_colour("Done!")
def deploy(self, auth_provider, secret_key, skip_hub_health_test=False): """ Deploy this hub """ # Ensure helm charts are up to date os.chdir("helm-charts") subprocess.check_call(["helm", "dep", "up", "basehub"]) if self.spec["template"] == "daskhub": subprocess.check_call(["helm", "dep", "up", "daskhub"]) os.chdir("..") # Check if this cluster has any secret config. If yes, read it in. secret_config_path = ( Path(os.getcwd()) / "secrets/config/hubs" / f'{self.cluster.spec["name"]}.cluster.yaml' ) secret_hub_config = {} if os.path.exists(secret_config_path): with decrypt_file(secret_config_path) as decrypted_file_path: with open(decrypted_file_path) as f: secret_config = yaml.load(f) if secret_config.get("hubs", {}): hubs = secret_config["hubs"] current_hub = next( (hub for hub in hubs if hub["name"] == self.spec["name"]), {} ) # Support domain name overrides if "domain" in current_hub: self.spec["domain"] = current_hub["domain"] secret_hub_config = current_hub.get("config", {}) generated_values = self.get_generated_config(auth_provider, secret_key) with tempfile.NamedTemporaryFile( mode="w" ) as values_file, tempfile.NamedTemporaryFile( mode="w" ) as generated_values_file, tempfile.NamedTemporaryFile( mode="w" ) as secret_values_file: json.dump(self.spec["config"], values_file) json.dump(generated_values, generated_values_file) json.dump(secret_hub_config, secret_values_file) values_file.flush() generated_values_file.flush() secret_values_file.flush() cmd = [ "helm", "upgrade", "--install", "--create-namespace", "--wait", f"--namespace={self.spec['name']}", self.spec["name"], os.path.join("helm-charts", self.spec["template"]), # Ordering matters here - config explicitly mentioned in clu should take # priority over our generated values. Based on how helm does overrides, this means # we should put the config from config/hubs last. f"--values={generated_values_file.name}", f"--values={values_file.name}", f"--values={secret_values_file.name}", ] print_colour(f"Running {' '.join(cmd)}") # Can't test without deploying, since our service token isn't set by default subprocess.check_call(cmd) if not skip_hub_health_test: # FIXMEL: Clean this up if self.spec["template"] != "basehub": service_api_token = generated_values["basehub"]["jupyterhub"][ "hub" ]["services"]["hub-health"]["apiToken"] else: service_api_token = generated_values["jupyterhub"]["hub"][ "services" ]["hub-health"]["apiToken"] hub_url = f'https://{self.spec["domain"]}' # On failure, pytest prints out params to the test that failed. # This can contain sensitive info - so we hide stderr # FIXME: Don't use pytest - just call a function instead print_colour("Running hub health check...") # Show errors locally but redirect on CI gh_ci = os.environ.get("CI", "false") pytest_args = [ "-q", "deployer/tests", "--hub-url", hub_url, "--api-token", service_api_token, "--hub-type", self.spec["template"], ] if gh_ci == "true": print_colour("Testing on CI, not printing output") with open(os.devnull, "w") as dn, redirect_stderr( dn ), redirect_stdout(dn): exit_code = pytest.main(pytest_args) else: print_colour("Testing locally, do not redirect output") exit_code = pytest.main(pytest_args) if exit_code != 0: print("Health check failed!", file=sys.stderr) sys.exit(exit_code) else: print_colour("Health check succeeded!")
def auth_azure(self): """ Read `azure` nested config, login to Azure with a Service Principal, activate the appropriate subscription, then authenticate against the cluster using `az aks get-credentials`. """ config = self.spect["azure"] key_path = config["key"] cluster = config["cluster"] resource_group = config["resource_group"] with tempfile.NamedTemporaryFile() as kubeconfig: orig_kubeconfig = os.environ.get("KUBECONFIG", None) try: os.environ["KUBECONFIG"] = kubeconfig.name with decrypt_file(key_path) as decrypted_key_path: decrypted_key_abspath = os.path.abspath(decrypted_key_path) if not os.path.isfile(decrypted_key_abspath): raise FileNotFoundError("The decrypted key file does not exist") with open(decrypted_key_path) as f: service_principal = json.load(f) # Login to Azure subprocess.check_call( [ "az", "login", "--service-principal", f"--username={service_principal['service_principal_id']}", f"--password={service_principal['service_principal_password']}", f"--tenant={service_principal['tenant_id']}", ] ) # Set the Azure subscription subprocess.check_call( [ "az", "account", "set", f"--subscription={service_principal['subscription_id']}", ] ) # Get cluster creds subprocess.check_call( [ "az", "aks", "get-credentials", f"--name={cluster}", f"--resource-group={resource_group}", ] ) yield finally: if orig_kubeconfig is not None: os.environ["KUBECONFIG"] = orig_kubeconfig
def auth_aws(self): """ Reads `aws` nested config and temporarily sets environment variables like `KUBECONFIG`, `AWS_ACCESS_KEY_ID`, and `AWS_SECRET_ACCESS_KEY` before trying to authenticate with the `aws eks update-kubeconfig` or the `kops export kubecfg --admin` commands. Finally get those environment variables to the original values to prevent side-effects on existing local configuration. """ config = self.spec["aws"] key_path = config["key"] cluster_type = config["clusterType"] cluster_name = config["clusterName"] region = config["region"] if cluster_type == "kops": state_store = config["stateStore"] with tempfile.NamedTemporaryFile() as kubeconfig: orig_kubeconfig = os.environ.get("KUBECONFIG", None) orig_access_key_id = os.environ.get("AWS_ACCESS_KEY_ID", None) orig_secret_access_key = os.environ.get("AWS_SECRET_ACCESS_KEY", None) try: with decrypt_file(key_path) as decrypted_key_path: decrypted_key_abspath = os.path.abspath(decrypted_key_path) if not os.path.isfile(decrypted_key_abspath): raise FileNotFoundError("The decrypted key file does not exist") with open(decrypted_key_abspath) as f: creds = json.load(f) os.environ["AWS_ACCESS_KEY_ID"] = creds["AccessKey"]["AccessKeyId"] os.environ["AWS_SECRET_ACCESS_KEY"] = creds["AccessKey"][ "SecretAccessKey" ] os.environ["KUBECONFIG"] = kubeconfig.name if cluster_type == "kops": subprocess.check_call( [ "kops", "export", "kubecfg", "--admin", f"--name={cluster_name}", f"--state={state_store}", ] ) else: subprocess.check_call( [ "aws", "eks", "update-kubeconfig", f"--name={cluster_name}", f"--region={region}", ] ) yield finally: if orig_kubeconfig is not None: os.environ["KUBECONFIG"] = orig_kubeconfig if orig_access_key_id is not None: os.environ["AWS_ACCESS_KEY_ID"] = orig_access_key_id if orig_kubeconfig is not None: os.environ["AWS_SECRET_ACCESS_KEY"] = orig_secret_access_key
def main(): allfiles = dict() specificfiles = dict() infofiles = dict() datefiles = dict() match_hashset = list() while True: print("\n") print("################################################") print("# [1]Search [2]Encryption [3]File Difference #") print("# [4]System Info [5]Generate report #") print('# q or "exit" to exit #') print("################################################") ch = input("$ ") # Search in files if ch == "1": while True: print("\n") print("##########################################") print("# [1] Find all files [2] File Extension #") print("# [3] By date [4] Search in files #") print('# q or "back" to go back #') print("##########################################") ch2 = input("$ ") if ch2 == "1": path = input("$ Path to folder: ") if path == "q" or path == "back": break list_tmp = utils.find_all_files(path) utils.create_dict(path, allfiles, list_tmp) match_hashset += utils.verify_files(list_tmp) print_results(list_tmp) if ch2 == "2": ext = input("$ Extension: ") if ext == "q" or ext == "back": break folder = input("$ Path to folder: ") if folder == "q" or folder == "back": break list_tmp = utils.find_specific_files(folder, ext) utils.create_dict(ext, specificfiles, list_tmp) match_hashset += utils.verify_files(list_tmp) print_results(list_tmp) if ch2 == "3": folder = input("$ Path to folder: ") if folder == "q" or folder == "back": break date = input("$ Date (Ex format: 2020-03-03): ") if date == "q" or date == "back": break list_tmp = utils.find_modified_files(folder, date) utils.create_dict(date, datefiles, list_tmp) match_hashset = utils.verify_files(list_tmp) print_results(list_tmp) if ch2 == "4": folder = input("$ Path to folder: ") if folder == "q" or folder == "back": break ext = input("$ Extension: ") if ext == "q" or ext == "back": break keyword = input("$ Keyword: ") if keyword == "q" or keyword == "back": break list_tmp = utils.search_files(folder, ext, keyword) utils.create_dict(keyword, infofiles, list_tmp) match_hashset = utils.verify_files(list_tmp) print_results(list_tmp) if ch2 == "q" or ch2 == "back": break #Encryption if ch == "2": while True: print("\n") print("###########################") print("# [1] Encrypt [2] Decrypt #") print('# q or "back" to go back #') print("###########################") ch2 = input("$ ") if ch2 == "1": filename = input("$ Path to file: ") if filename == "q" or filename == "back": break utils.encrypt_file(filename) print(filename + " has been encrypted.") if ch2 == "2": filename = input("$ Path to file: ") if filename == "q" or filename == "back": break utils.decrypt_file(filename) print(filename + "has been decrypted.") if ch2 == "q" or ch2 == "back": break # File Difference if ch == "3": while True: print("\n") print(' q or "back" to go back') file1 = input("$ File 1: ") if file1 == "q" or file1 == "back": break file2 = input("$ File 2: ") if file2 == "q" or file2 == "back": break file1_diff, file2_diff = utils.word_difference(file1, file2) print() print("Words in file 1, but not in file 2:") print_results(file1_diff) print("Words in file 2, but not in file 1:") print_results(file2_diff) # System info if ch == "4": print_results(utils.system_information()) if ch == "5": dictionary = dict() dictionary['sys'] = utils.system_information() dictionary['hashset'] = match_hashset dictionary['allfiles'] = allfiles dictionary['extfiles'] = specificfiles dictionary['infofiles'] = infofiles dictionary['datefiles'] = datefiles utils.gen_report(dictionary) print("The report has been generated!") if ch == "q" or ch == "exit": print("\n") print(" Cya! ") print("\n") break
def deploy_jupyterhub_grafana(cluster_name): """ Deploy grafana dashboards for operating a hub """ # Validate our config with JSON Schema first before continuing validate(cluster_name) config_file_path = (Path(os.getcwd()) / "config/hubs" / f"{cluster_name}.cluster.yaml") with open(config_file_path) as f: cluster = Cluster(yaml.load(f)) # If grafana support chart is not deployed, then there's nothing to do if not cluster.support: print_colour( "Support chart has not been deployed. Skipping Grafana dashboards deployment..." ) return secret_config_file = (Path(os.getcwd()) / "secrets/config/hubs" / f"{cluster_name}.cluster.yaml") # Read and set GRAFANA_TOKEN from the cluster specific secret config file with decrypt_file(secret_config_file) as decrypted_file_path: with open(decrypted_file_path) as f: config = yaml.load(f) # Get the url where grafana is running from the cluster config grafana_url = (cluster.support.get("config", {}).get("grafana", {}).get("ingress", {}).get("hosts", {})) uses_tls = (cluster.support.get("config", {}).get("grafana", {}).get("ingress", {}).get("tls", {})) if not grafana_url: print_colour( "Couldn't find `config.grafana.ingress.hosts`. Skipping Grafana dashboards deployment..." ) return grafana_url = ("https://" + grafana_url[0] if uses_tls else "http://" + grafana_url[0]) # Use the jupyterhub/grafana-dashboards deployer to deploy the dashboards to this cluster's grafana print_colour("Cloning jupyterhub/grafana-dashboards...") dashboards_dir = "grafana_dashboards" subprocess.check_call([ "git", "clone", "https://github.com/jupyterhub/grafana-dashboards", dashboards_dir, ]) # We need the existing env too for the deployer to be able to find jssonnet and grafonnet deploy_env = os.environ.copy() deploy_env.update({"GRAFANA_TOKEN": config["grafana_token"]}) try: print_colour(f"Deploying grafana dashboards to {cluster_name}...") subprocess.check_call(["./deploy.py", grafana_url], env=deploy_env, cwd=dashboards_dir) print_colour(f"Done! Dasboards deployed to {grafana_url}.") finally: # Delete the directory where we cloned the repo. # The deployer cannot call jsonnet to deploy the dashboards if using a temp directory here. # Might be because opening more than once of a temp file is tried # (https://docs.python.org/3.8/library/tempfile.html#tempfile.NamedTemporaryFile) shutil.rmtree(dashboards_dir)
def load_encrypted(self, config_file, passphrase): config = yaml.safe_load(decrypt_file(config_file, passphrase)) self._process_config(config)
def download_and_decript(file_id, fd_out): temp_io = download_or_get_cached(file_id) temp_io.seek(0) decrypt_file(temp_io, key, fd_out) log.info(download_or_get_cached.cache_info())