def set_kube_config(self) -> None: providers = self.layer.root().gen_providers(0) ensure_installed("az") rg_name = providers["terraform"]["backend"]["azurerm"][ "resource_group_name"] cluster_name = self.layer.get_cluster_name() kube_context_name = self.get_kube_context_name() if not self.cluster_exist(): raise Exception( "The AKS cluster name could not be determined -- please make sure it has been applied in the environment." ) nice_run( [ "az", "aks", "get-credentials", "--resource-group", rg_name, "--name", cluster_name, "--admin", "--overwrite-existing", "--context", kube_context_name.replace("-admin", ""), ], stdout=DEVNULL, check=True, )
def _make_installation_file() -> None: logger.debug(f"Querying {OPTA_INSTALL_URL}") resp = requests.get(OPTA_INSTALL_URL) resp.raise_for_status() with open(TEMP_INSTALLATION_FILENAME, "w") as file: file.write(resp.text) nice_run(["chmod", "777", TEMP_INSTALLATION_FILENAME])
def remove_from_state(cls, resource_address: str) -> None: kwargs: Dict[str, Any] = {"env": {**os.environ.copy(), **EXTRA_ENV}} nice_run( ["terraform", "state", "rm", resource_address], use_asyncio_nice_run=True, **kwargs, )
def refresh(cls, layer: "Layer", *tf_flags: str) -> None: kwargs = cls.insert_extra_env(layer) nice_run( ["terraform", "refresh", *tf_flags], check=True, use_asyncio_nice_run=True, **kwargs, )
def cluster_exist(self) -> bool: # "kubectl version" returns an error code if it can't connect to a cluster try: nice_run(["kubectl", "version"], check=True, capture_output=True) except Exception: raise UserErrors( "The current kubectl configuration must be valid if you wanna use the BYO K8s feature" ) return True
def test_timeout(self): with pytest.raises(TimeoutError): nice_run( ["sleep", "5"], check=True, capture_output=True, use_asyncio_nice_run=True, timeout=1, )
def import_resource(cls, tf_resource_address: str, aws_resource_id: str, layer: "Layer") -> None: kwargs = cls.insert_extra_env(layer) nice_run( ["terraform", "import", tf_resource_address, aws_resource_id], check=True, use_asyncio_nice_run=True, **kwargs, )
def push_to_docker_local( local_image: str, registry_url: str, image_tag_override: Optional[str], ) -> Tuple[str, str]: image_tag = get_push_tag(local_image, image_tag_override) remote_image_name = f"{registry_url}:{image_tag}" nice_run(["docker", "tag", local_image, remote_image_name], check=True) nice_run(["docker", "push", remote_image_name], check=True) return get_image_digest(registry_url, image_tag), image_tag
def shell(env: Optional[str], config: str, type: str, local: Optional[bool], var: Dict[str, str]) -> None: """ Get a shell into one of the pods in a service Examples: opta shell -c my-service.yaml """ config = check_opta_file_exists(config) if local: config = local_setup(config, input_variables=var) # Configure kubectl layer = Layer.load_from_yaml(config, env, input_variables=var, strict_input_variables=False) amplitude_client.send_event( amplitude_client.SHELL_EVENT, event_properties={ "org_name": layer.org_name, "layer_name": layer.name }, ) layer.verify_cloud_credentials() gen_all(layer) set_kube_config(layer) load_opta_kube_config() context_name = layer.get_cloud_client().get_kube_context_name() # Get a random pod in the service v1 = CoreV1Api() pod_list = v1.list_namespaced_pod(layer.name).items if len(pod_list) == 0: raise UserErrors("This service is not yet deployed") nice_run([ "kubectl", "exec", "-n", layer.name, "-c", "k8s-service", "--kubeconfig", constants.GENERATED_KUBE_CONFIG or constants.DEFAULT_KUBECONFIG, "--context", context_name, pod_list[0].metadata.name, "-it", "--", type, "-il", ])
def test_graceful_timeout_exit(self): if os.path.exists(GRACEFUL_TERMINATION_FILE): os.remove(GRACEFUL_TERMINATION_FILE) with pytest.raises(TimeoutExpired): nice_run(["python", SIGNAL_HANDLER_SCRIPT], timeout=3) assert os.path.exists(GRACEFUL_TERMINATION_FILE) # clean up os.remove(GRACEFUL_TERMINATION_FILE)
def test_graceful_timeout_exit(self): if os.path.exists(GRACEFUL_TERMINATION_FILE): os.remove(GRACEFUL_TERMINATION_FILE) with pytest.raises(TimeoutError): nice_run(["python", SIGNAL_HANDLER_SCRIPT], timeout=3, use_asyncio_nice_run=True) sleep(5) assert os.path.exists(GRACEFUL_TERMINATION_FILE) # clean up os.remove(GRACEFUL_TERMINATION_FILE)
def _upgrade() -> None: try: upgrade_present = check_version_upgrade(is_upgrade_call=True) if upgrade_present: _make_installation_file() nice_run([f"./{TEMP_INSTALLATION_FILENAME}"], input=b"y") _upgrade_successful() except Exception: logger.error( "\nUnable to install latest version of Opta." "\nPlease follow the instructions on https://docs.opta.dev/installation" ) finally: _cleanup_installation_file()
def cluster_exist(self) -> bool: providers = self.layer.root().gen_providers(0) ensure_installed("az") rg_name = providers["terraform"]["backend"]["azurerm"][ "resource_group_name"] subscription_id = providers["provider"]["azurerm"]["subscription_id"] cluster_name = self.layer.get_cluster_name() try: output = nice_run( [ "az", "aks", "list", "--subscription", subscription_id, "--resource-group", rg_name, ], capture_output=True, check=True, ).stdout output_list = json.loads(output) return any([x.get("name") == cluster_name for x in output_list]) except Exception: return False
def rollback_helm(cls, kube_context: str, release: str, namespace: str, revision: str = "") -> None: cls.validate_helm_installed() try: if revision == "1": nice_run( [ "helm", "uninstall", release, "--kube-context", kube_context, "--kubeconfig", constants.GENERATED_KUBE_CONFIG or constants.DEFAULT_KUBECONFIG, "--namespace", namespace, ], check=True, ) else: nice_run( [ "helm", "rollback", release, revision, "--kube-context", kube_context, "--kubeconfig", constants.GENERATED_KUBE_CONFIG or constants.DEFAULT_KUBECONFIG, "--namespace", namespace, ], check=True, ) except CalledProcessError as e: raise UserErrors( f"Helm was unable to rollback the release: {release}.\n" "Following error was raised by Helm:\n" f"{e.stderr}") except Exception as e: raise e
def force_unlock(cls, layer: "Layer", *tf_flags: str) -> None: tf_lock_exists, lock_id = cls.tf_lock_details(layer) if not tf_lock_exists: print("Terraform Lock Id could not be found.") return try: nice_run( ["terraform", "force-unlock", *tf_flags, lock_id], use_asyncio_nice_run=True, check=True, ) except Exception as e: logger.info( "An exception occured while removing the Terraform Lock.") cls.force_delete_terraform_lock(layer, e)
def init(cls, quiet: Optional[bool] = False, *tf_flags: str, layer: "Layer") -> None: kwargs = cls.insert_extra_env(layer) if quiet: kwargs["stderr"] = PIPE kwargs["stdout"] = DEVNULL try: nice_run( ["terraform", "init", *tf_flags], check=True, use_asyncio_nice_run=True, **kwargs, ) except CalledProcessError: raise
def test_echo(self): completed_process = nice_run( ["echo", "Hello world!"], check=True, capture_output=True, use_asyncio_nice_run=True, ) assert completed_process.returncode == 0 assert completed_process.stdout == "Hello world!\n"
def cluster_exist(self) -> bool: try: output: str = nice_run( [f"{HOME}/.opta/local/kind", "get", "clusters"], check=True, capture_output=True, ).stdout return output.strip() != "" except Exception: return False
def is_symlinked_path() -> Tuple[bool, str]: pwd_path = nice_run( ["pwd"], capture_output=True, shell=True, # nosec tee=False, use_asyncio_nice_run=True, # nosec ).stdout.strip() # nosec cwd_path = os.getcwd() return pwd_path != cwd_path, cwd_path
def get_helm_list( cls, kube_context: str, namespace: Optional[str] = None, release: Optional[str] = None, status: Optional[str] = None) -> List: # type: ignore # nosec """ Returns a list of helm releases. The releases can be filtered by namespace, release name and status. """ cls.validate_helm_installed() namespaces: List[str] = [] if namespace is not None: namespaces.append("--namespace") namespaces.append(str(namespace)) else: namespaces.append("--all-namespaces") try: helm_list_process = nice_run( [ "helm", "list", "--all", "--kube-context", kube_context, "--kubeconfig", constants.GENERATED_KUBE_CONFIG or constants.DEFAULT_KUBECONFIG, *namespaces, "-o", "json", ], capture_output=True, check=True, ) except CalledProcessError as e: raise UserErrors(f"Error: {e.stderr}") except Exception as e: raise e helm_list = json.loads(helm_list_process.stdout) if release is not None: helm_list = [ helm_release for helm_release in helm_list if helm_release["name"] == release ] if status is not None: helm_list = [ helm_release for helm_release in helm_list if helm_release["status"] == status ] return helm_list
def get_version(cls) -> str: try: out = nice_run( ["terraform", "version", "-json"], check=True, capture_output=True, tee=False, ).stdout terraform_data = json.loads(out) return terraform_data["terraform_version"] except CalledProcessError: raise
def apply( cls, layer: "Layer", *tf_flags: str, no_init: Optional[bool] = False, quiet: Optional[bool] = False, ) -> None: if not no_init: cls.init(quiet, layer=layer) kwargs = cls.insert_extra_env(layer) if quiet: kwargs["stderr"] = PIPE kwargs["stdout"] = DEVNULL try: nice_run( ["terraform", "apply", "-compact-warnings", *tf_flags], check=True, use_asyncio_nice_run=True, **kwargs, ) except CalledProcessError: raise
def show(cls, *tf_flags: str, capture_output: bool = False) -> Optional[str]: kwargs: Dict[str, Any] = {"env": {**os.environ.copy(), **EXTRA_ENV}} try: if capture_output: out = nice_run( ["terraform", "show", *tf_flags], check=True, capture_output=True, tee=False, use_asyncio_nice_run=True, ).stdout return out nice_run( ["terraform", "show", *tf_flags], check=True, use_asyncio_nice_run=True, **kwargs, ) except CalledProcessError: raise return None
def get_acr_auth_info(layer: Layer) -> Tuple[str, str]: acr_name = get_terraform_outputs(layer.root()).get("acr_name") if acr_name is None: raise Exception("Could not find acr_name") token = nice_run( [ "az", "acr", "login", "--name", acr_name, "--expose-token", "--output", "tsv", "--query", "accessToken", ], check=True, capture_output=True, ).stdout return "00000000-0000-0000-0000-000000000000", token
def push_to_docker( username: str, password: str, local_image: str, registry_url: str, image_tag_override: Optional[str], ) -> Tuple[str, str]: image_tag = get_push_tag(local_image, image_tag_override) remote_image_name = f"{registry_url}:{image_tag}" nice_run( [ "docker", "login", registry_url, "--username", username, "--password-stdin" ], input=password.encode(), check=True, ) nice_run(["docker", "tag", local_image, remote_image_name], check=True) nice_run(["docker", "push", remote_image_name], check=True) return get_image_digest(registry_url, image_tag), image_tag
def set_kube_config(self) -> None: nice_run( ["kubectl", "config", "use-context", "kind-opta-local-cluster"], check=True, capture_output=True, )
def get_kube_context_name(self) -> str: return nice_run(["kubectl", "config", "current-context"], check=True, capture_output=True).stdout.strip()
def test_timeout(self): with pytest.raises(TimeoutExpired): nice_run(["sleep", "5"], check=True, capture_output=True, timeout=1)