def push_image(image: str, config: str, env: Optional[str], tag: Optional[str], input_variables: Dict) -> Tuple[str, str]: ensure_installed("docker") layer = Layer.load_from_yaml(config, env, input_variables=input_variables) amplitude_client.send_event( amplitude_client.PUSH_EVENT, event_properties={ "org_name": layer.org_name, "layer_name": layer.name }, ) layer.verify_cloud_credentials() gen_all(layer) registry_url = get_registry_url(layer) if layer.cloud == "aws": username, password = get_ecr_auth_info(layer) elif layer.cloud == "google": username, password = get_gcr_auth_info(layer) elif layer.cloud == "azurerm": username, password = get_acr_auth_info(layer) else: if layer.cloud == "local": return push_to_docker_local(image, registry_url, tag) raise Exception( f"No support for pushing image to provider {layer.cloud}") return push_to_docker(username, password, image, registry_url, tag)
def set_kube_config(self) -> None: providers = self.layer.root().gen_providers(0) ensure_installed("az") rg_name = providers["terraform"]["backend"]["azurerm"][ "resource_group_name"] cluster_name = self.layer.get_cluster_name() kube_context_name = self.get_kube_context_name() if not self.cluster_exist(): raise Exception( "The AKS cluster name could not be determined -- please make sure it has been applied in the environment." ) nice_run( [ "az", "aks", "get-credentials", "--resource-group", rg_name, "--name", cluster_name, "--admin", "--overwrite-existing", "--context", kube_context_name.replace("-admin", ""), ], stdout=DEVNULL, check=True, )
def cluster_exist(self) -> bool: providers = self.layer.root().gen_providers(0) ensure_installed("az") rg_name = providers["terraform"]["backend"]["azurerm"][ "resource_group_name"] subscription_id = providers["provider"]["azurerm"]["subscription_id"] cluster_name = self.layer.get_cluster_name() try: output = nice_run( [ "az", "aks", "list", "--subscription", subscription_id, "--resource-group", rg_name, ], capture_output=True, check=True, ).stdout output_list = json.loads(output) return any([x.get("name") == cluster_name for x in output_list]) except Exception: return False
def _verify_aws_cloud_credentials(self) -> None: ensure_installed("aws") try: aws_caller_identity = boto3.client("sts").get_caller_identity() configured_aws_account_id = aws_caller_identity["Account"] required_aws_account_id = self.root().providers["aws"]["account_id"] if required_aws_account_id != configured_aws_account_id: raise UserErrors( "\nSystem configured AWS Credentials are different from the ones being used in the " f"Configuration. \nSystem is configured with credentials for account " f"{configured_aws_account_id} but the config requires the credentials for " f"{required_aws_account_id}." ) except NoCredentialsError: raise UserErrors( "Unable to locate credentials.\n" "Visit `https://docs.aws.amazon.com/sdk-for-java/v1/developer-guide/setup-credentials.html` " "for more information." ) except ClientError as e: raise UserErrors( "The AWS Credentials are not configured properly.\n" f" - Code: {e.response['Error']['Code']} Error Message: {e.response['Error']['Message']}" "Visit `https://docs.aws.amazon.com/sdk-for-java/v1/developer-guide/setup-credentials.html` " "for more information." )
def set_kube_config(layer: "Layer") -> None: """Create a kubeconfig file to connect to a kubernetes cluster specified in a given layer""" if layer.is_stateless_mode() is True: if logger.isEnabledFor(DEBUG): logger.debug( "set_kube_config called in stateless mode, verify implementation. See stack trace below:" ) traceback.print_stack() # Make sure the user has the prerequisite CLI tools installed # kubectl may not *technically* be required for this opta command to run, but require # it anyways since user must install it to access the cluster. ensure_installed("kubectl") makedirs(GENERATED_KUBE_CONFIG_DIR, exist_ok=True) layer.get_cloud_client().set_kube_config()
def validate_version(cls) -> None: ensure_installed("terraform") pre_req_link = "Check https://docs.opta.dev/installation/#prerequisites" current_version = Terraform.get_version() current_parsed = version.parse(current_version) if current_parsed < version.parse(MIN_TERRAFORM_VERSION): raise UserErrors( f"Invalid terraform version {current_version} -- must be at least {MIN_TERRAFORM_VERSION}. {pre_req_link}" ) if current_parsed >= version.parse(MAX_TERRAFORM_VERSION): raise UserErrors( f"Invalid terraform version {current_version} -- must be less than {MAX_TERRAFORM_VERSION}. {pre_req_link}" )
def set_kube_config(self) -> None: ensure_installed("gcloud") kube_config_file_name = self.layer.get_kube_config_file_name() if exists(kube_config_file_name): if getmtime(kube_config_file_name) > time.time() - ONE_WEEK_UNIX: constants.GENERATED_KUBE_CONFIG = kube_config_file_name return else: remove(kube_config_file_name) credentials = self.get_credentials()[0] region, project_id = self.get_cluster_env() cluster_name = self.layer.get_cluster_name() if not self.cluster_exist(): raise Exception( "The GKE cluster name could not be determined -- please make sure it has been applied in the environment." ) gke_client = ClusterManagerClient(credentials=credentials) cluster_data = gke_client.get_cluster( name= f"projects/{project_id}/locations/{region}/clusters/{cluster_name}" ) cluster_ca_certificate = cluster_data.master_auth.cluster_ca_certificate cluster_endpoint = f"https://{cluster_data.endpoint}" gcloud_path = which("gcloud") kube_context_name = self.get_kube_context_name() cluster_config = { "apiVersion": "v1", "kind": "Config", "clusters": [{ "cluster": { "server": cluster_endpoint, "certificate-authority-data": cluster_ca_certificate, }, "name": kube_context_name, }], "contexts": [{ "context": { "cluster": kube_context_name, "user": kube_context_name }, "name": kube_context_name, }], "current-context": kube_context_name, "preferences": {}, "users": [{ "name": kube_context_name, "user": { "auth-provider": { "name": "gcp", "config": { "cmd-args": "config config-helper --format=json", "cmd-path": gcloud_path, "expiry-key": "{.credential.token_expiry}", "token-key": "{.credential.access_token}", }, } }, }], } with open(kube_config_file_name, "w") as f: yaml.dump(cluster_config, f) constants.GENERATED_KUBE_CONFIG = kube_config_file_name return
def validate_helm_installed() -> None: ensure_installed("helm")