def process(self, module_idx: int) -> None: # Min containers must be less than or equal to max containers and can only be 0 if both are min_containers_str = str(self.module.data.get("min_containers", "")) max_containers_str = str(self.module.data.get("max_containers", "")) if min_containers_str.isdigit() and max_containers_str.isdigit(): min_containers = int(min_containers_str) max_containers = int(max_containers_str) if min_containers > max_containers: raise UserErrors( "Min containers must be less than or equal to max containers" ) if min_containers == 0 and max_containers != 0: raise UserErrors( "Min containers can only equal 0 if max containers equals zero" ) self._process_ports(self.module.data) if isinstance(self.module.data.get("public_uri"), str): self.module.data["public_uri"] = [self.module.data["public_uri"]] cron_jobs = self.module.data.get("cron_jobs", []) for cron_job in cron_jobs: cron_job["args"] = cron_job.get("args", []) if "public_uri" in self.module.data: new_uris: List[str] = [] public_uri: str for public_uri in self.module.data["public_uri"]: if public_uri.startswith("/"): new_uris.append(f"all{public_uri}") elif public_uri.startswith("*"): new_uris.append(f"all{public_uri[1:]}") else: new_uris.append(public_uri) self.module.data["public_uri"] = new_uris liveness_probe_command = self.module.data.get( "healthcheck_command" ) or self.module.data.get("liveness_probe_command") liveness_probe_path = self.module.data.get( "healthcheck_path" ) or self.module.data.get("liveness_probe_path") if liveness_probe_path is not None and liveness_probe_command is not None: raise UserErrors( "Invalid liveness probes: you can only specify a path for an http get request or a shell command to run, not both." ) readiness_probe_command = self.module.data.get( "healthcheck_command" ) or self.module.data.get("readiness_probe_command") readiness_probe_path = self.module.data.get( "healthcheck_path" ) or self.module.data.get("readiness_probe_path") if readiness_probe_path is not None and readiness_probe_command is not None: raise UserErrors( "Invalid readiness probes: you can only specify a path for an http get request or a shell command to run, not both." ) super(K8sServiceModuleProcessor, self).process(module_idx)
def validate_dns(self) -> None: if not self.module.data.get("delegated", False): return current_outputs = get_terraform_outputs(self.layer) if "name_servers" not in current_outputs: raise UserErrors( "Did not find name_servers field in output. Please apply one with delegated set to false. (might take some time to propagate)" ) expected_name_servers: List[str] = current_outputs["name_servers"] expected_name_servers = [x.strip(".") for x in expected_name_servers] try: answers: Answer = query(self.module.data["domain"], "NS") # type: ignore except NoNameservers: raise UserErrors( f"Did not find any NS records for domain {self.module.data['domain']}. (might take some time to propagate)" ) answer: NS actual_name_servers = [] for answer in answers: ns_server = answer.target.to_text(omit_final_dot=True) actual_name_servers.append(ns_server) if set(expected_name_servers) != set(actual_name_servers): raise UserErrors( f"Incorrect NS servers. Expected {expected_name_servers}, actual {actual_name_servers}. (might take some time to propagate)" )
def process(self, module_idx: int) -> None: if "repository" in self.module.data and "chart_version" not in self.module.data: raise UserErrors( "If you specify a remote repository you must give a version.") values = self.module.data.get("values", {}) if values: stream = StringIO() yaml.dump(values, stream) logger.debug( f"These are the values passed in from the opta yaml:\n{stream.getvalue()}" ) values_file = self.module.data.get("values_file", None) values_files = self.module.data.get("values_files", []) if values_file is not None and values_files != []: raise UserErrors( "Can't have values_file and values_files at the same time. Either put all of your files in " "values_files or have one single file and put it in values_file" ) if values_file: values_files.append(values_file) fullpath_values_files = [] for current_values_file in values_files: if not current_values_file.startswith("/"): full_path = os.path.join(os.path.dirname(self.layer.path), current_values_file) else: full_path = current_values_file fullpath_values_files.append(full_path) self.module.data["values_files"] = fullpath_values_files super(HelmChartProcessor, self).process(module_idx)
def __init__(self, layer: "Layer", data: Dict[Any, Any], parent_layer: Optional["Layer"] = None): if "type" not in data: raise UserErrors("Module data must always have a type") self.type: str = data["type"] self.aliased_type: Optional[str] = None if self.type in REGISTRY[layer.cloud]["module_aliases"]: self.aliased_type = REGISTRY[layer.cloud]["module_aliases"][ self.type] self.desc = REGISTRY[layer.cloud]["modules"][ self.aliased_type].copy() elif self.type in REGISTRY[layer.cloud]["modules"]: self.desc = REGISTRY[layer.cloud]["modules"][self.type].copy() else: raise UserErrors(f"{self.type} is not a valid module type") self.layer_name = layer.name self.data: Dict[Any, Any] = data self.parent_layer = parent_layer self.name: str = data.get("name", self.type.replace("-", "")) self.depends_on: List[str] = data.get("depends_on", []) if not Module.valid_name(self.name): raise UserErrors( "Invalid module name, can only contain letters and numbers!") self.halt = REGISTRY[layer.cloud]["modules"][self.aliased_type or self.type].get( "halt", False) self.module_dir_path: str = self.translate_location( self.desc.get("terraform_module", self.aliased_type or self.type)) self.used_defaults: List["StructuredDefault"] = []
def get_secret_name_and_namespace( layer: Layer, module_name: Optional[str]) -> Tuple[str, str]: k8s_services = layer.get_module_by_type("k8s-service") helm_charts = layer.get_module_by_type("helm-chart") total_modules = k8s_services + helm_charts if not total_modules: raise UserErrors("No helm/k8s-service modules were configured") if module_name is None and len(total_modules) > 1: module_name = click.prompt( "Multiple k8s-service/helm chart modules found. Please specify which one do you want the secret for.", type=click.Choice([x.name for x in total_modules]), ) if module_name is None: module: Module = total_modules[0] else: try: module = next(module for module in total_modules if module.name == module_name) except StopIteration: raise UserErrors( f"Could not find helm/k8s-service module with name {module_name}" ) from None if module.type == "k8s-service": return "manual-secrets", layer.name else: return ( f"opta-{layer.name}-{module.name}-secret", module.data.get("namespace", "default"), )
def process(self, module_idx: int) -> None: create_global_database: bool = self.module.data.get( "create_global_database", False ) existing_global_database_id: Optional[str] = self.module.data.get( "existing_global_database_id", None ) restore_from_snapshot: Optional[str] = self.module.data.get( "restore_from_snapshot" ) database_name: Optional[str] = self.module.data.get("database_name") if database_name is not None and existing_global_database_id is not None: raise UserErrors( "You can not specify a database name when creating a read replica for an existing Aurora " "Global cluster. The automatically created db will be the one of the writer db (aka the master, aka " "the one who created the cluster)." ) if create_global_database and existing_global_database_id is not None: raise UserErrors( "If you want to create a new global database, then you can't input the id of a " "pre-existing one to use." ) if restore_from_snapshot and existing_global_database_id is not None: raise UserErrors( "You can't attach to existing global database and restore from snapshot. It's one or the other." ) super(AwsPostgresProcessor, self).process(module_idx)
def process(self, module_idx: int) -> None: byo_cert_modules = self.layer.get_module_by_type("external-ssl-cert") if len(byo_cert_modules) != 0: byo_cert_module = byo_cert_modules[0] self.module.data[ "private_key"] = f"${{{{module.{byo_cert_module.name}.private_key}}}}" self.module.data[ "certificate_body"] = f"${{{{module.{byo_cert_module.name}.certificate_body}}}}" self.module.data[ "certificate_chain"] = f"${{{{module.{byo_cert_module.name}.certificate_chain}}}}" self._process_nginx_extra_ports(self.layer, self.module.data) aws_dns_modules = self.layer.get_module_by_type("aws-dns") self.module.data["enable_auto_dns"] = False if len(aws_dns_modules) != 0 and aws_dns_modules[0].data.get( "linked_module") in [ None, self.module.type, self.module.name, ]: aws_dns_module = aws_dns_modules[0] self.module.data["enable_auto_dns"] = True self.module.data["domain"] = ( self.module.data.get("domain") or f"${{{{module.{aws_dns_module.name}.domain}}}}") self.module.data["cert_arn"] = ( self.module.data.get("cert_arn") or f"${{{{module.{aws_dns_module.name}.cert_arn}}}}") self.module.data["zone_id"] = ( self.module.data.get("zone_id") or f"${{{{module.{aws_dns_module.name}.zone_id}}}}") if (self.module.data.get("domain") is None) != (self.module.data.get("zone_id") is None): raise UserErrors( "You need to specify domain and zone_id together.") aws_base_modules = self.layer.get_module_by_type( "aws-base", module_idx) if len(aws_base_modules) == 0: raise UserErrors( "Must have the base module in before the k8s-base") aws_base_module = aws_base_modules[0] self.module.data[ "s3_log_bucket_name"] = f"${{{{module.{aws_base_module.name}.s3_log_bucket_name}}}}" aws_eks_modules = self.layer.get_module_by_type( "aws-eks", module_idx - 1) if len(aws_eks_modules) == 0: raise UserErrors( "Must have the k8s-cluster/aws-eks module in before the k8s-base/aws-k8s-base" ) aws_eks_module = aws_eks_modules[0] self.module.data[ "k8s_cluster_name"] = f"${{{{module.{aws_eks_module.name}.k8s_cluster_name}}}}" self.module.data[ "k8s_version"] = f"${{{{module.{aws_eks_module.name}.k8s_version}}}}" super(AwsK8sBaseProcessor, self).process(module_idx)
def _validate_providers(providers: dict) -> None: """ Validates Configuration and throws Exception when providers section is provided but left Empty name: Test Name org_name: Test Org Name providers: modules:... """ if providers is None: raise UserErrors( "Environment Configuration needs a Provider Section.\n" "Please follow `https://docs.opta.dev/getting-started/` to get started." ) """ Validates Configuration and throws Exception when proviers is provided but left Empty. name: Test Name org_name: Test Org Name providers: aws/google/azurerm: modules:... """ if ( ("aws" in providers and providers.get("aws") is None) or ("google" in providers and providers.get("google") is None) or ("azurerm" in providers and providers.get("azurerm") is None) ): raise UserErrors("Please provide the Details of Cloud Provider Used.")
def _verify_aws_cloud_credentials(self) -> None: ensure_installed("aws") try: aws_caller_identity = boto3.client("sts").get_caller_identity() configured_aws_account_id = aws_caller_identity["Account"] required_aws_account_id = self.root().providers["aws"]["account_id"] if required_aws_account_id != configured_aws_account_id: raise UserErrors( "\nSystem configured AWS Credentials are different from the ones being used in the " f"Configuration. \nSystem is configured with credentials for account " f"{configured_aws_account_id} but the config requires the credentials for " f"{required_aws_account_id}." ) except NoCredentialsError: raise UserErrors( "Unable to locate credentials.\n" "Visit `https://docs.aws.amazon.com/sdk-for-java/v1/developer-guide/setup-credentials.html` " "for more information." ) except ClientError as e: raise UserErrors( "The AWS Credentials are not configured properly.\n" f" - Code: {e.response['Error']['Code']} Error Message: {e.response['Error']['Message']}" "Visit `https://docs.aws.amazon.com/sdk-for-java/v1/developer-guide/setup-credentials.html` " "for more information." )
def __validate_ports(self, ports: List[PortSpec]) -> None: if not self.FLAG_MULTIPLE_PORTS_SUPPORTED: if len(ports) > 1: raise UserErrors("Cannot specify multiple ports in this cloud") if len([port for port in ports if port.is_tcp]): raise UserErrors("Cannot specify TCP ports in this cloud") # Make sure we only have at most one http port http_ports = [port for port in ports if port.is_http] if len(http_ports) > 1: raise UserErrors("Multiple `type: http` ports not supported") # Check for duplicate port numbers or names uniques: Dict[str, Callable[[PortSpec], Any]] = { "port name": lambda port: port.name, "port number": lambda port: port.port, "service port number": lambda port: port.service_port, } for key, resolver in uniques.items(): values = set() for port in ports: value = resolver(port) if value in values: raise UserErrors(f"Duplicate {key} `{value}`") values.add(value)
def handle_rds_link(self, linked_module: "Module", link_permissions: List[Any]) -> None: required_db_vars = ["db_user", "db_name", "db_password", "db_host"] renamed_vars = {} if len(link_permissions) > 0: renamed_vars = link_permissions.pop() if not isinstance(renamed_vars, dict) or set( renamed_vars.keys()) != set(required_db_vars): raise UserErrors( f"To rename db variables you must provide aliases for these fields: {required_db_vars}" ) if not all(map(lambda x: isinstance(x, str), renamed_vars.values())): raise UserErrors( "DB variable rename must be only to another string") for key in required_db_vars: self.module.data["link_secrets"].append({ "name": renamed_vars.get(key, f"{linked_module.name}_{key}"), "value": f"${{{{module.{linked_module.name}.{key}}}}}", }) if link_permissions: raise Exception( "We're not supporting IAM permissions for rds right now. " "Your k8s service will have the db user, name, password, " "and host as envars (pls see docs) and these IAM " "permissions are for manipulating the db itself, which " "I don't think is what you're looking for.")
def process(self, module_idx: int) -> None: # Update the secrets self.module.data["link_secrets"] = self.module.data.get( "link_secrets", []) current_envars: Union[List, Dict[str, str]] = self.module.data.get( "env_vars", []) if isinstance(current_envars, dict): self.module.data["env_vars"] = [{ "name": x, "value": y } for x, y in current_envars.items()] # Handle links for link_data in self.module.data.get("links", []): if type(link_data) is str: target_module_name = link_data link_permissions = [] elif type(link_data) is dict: target_module_name = list(link_data.keys())[0] link_permissions = list(link_data.values())[0] else: raise UserErrors( f"Link data {link_data} must be a string or map holding the permissions" ) module = self.layer.get_module(target_module_name, module_idx) if module is None: raise Exception( f"Did not find the desired module {target_module_name} " "make sure that the module you're referencing is listed before the k8s " "app one") module_type = module.aliased_type or module.type if module_type == "azure-postgres": self.handle_rds_link(module, link_permissions) elif module_type == "azure-redis": self.handle_redis_link(module, link_permissions) else: raise Exception( f"Unsupported module type for k8s service link: {module_type}" ) acr_module_source: str if self.layer.parent is None: base_modules = self.layer.get_module_by_type("azure-base") if len(base_modules) == 0: raise UserErrors("Could not find base module for azure") acr_module_source = f"module.{base_modules[0].name}" else: acr_module_source = "data.terraform_remote_state.parent.outputs" self.module.data[ "acr_registry_name"] = f"${{{{{acr_module_source}.acr_name}}}}" if "image_tag" in self.layer.variables: self.module.data["tag"] = self.layer.variables["image_tag"] if "image_digest" in self.layer.variables: self.module.data["digest"] = self.layer.variables["image_digest"] super(AzureK8sServiceProcessor, self).process(module_idx)
def process(self, module_idx: int) -> None: dns_modules = self.layer.get_module_by_type("aws-dns", module_idx) if len(dns_modules) == 0: raise UserErrors("AWS email needs the dns to be setup and delegated to work") dns_module = dns_modules[0] if not dns_module.data.get("delegated", False): raise UserErrors("AWS email needs the dns to be setup and delegated to work") self.module.data["domain"] = f"${{{{module.{dns_module.name}.domain}}}}" self.module.data["zone_id"] = f"${{{{module.{dns_module.name}.zone_id}}}}" super(AwsEmailProcessor, self).process(module_idx)
def __check_layer_and_image(layer: "Layer", option_image: str) -> bool: k8s_modules: List[Module] = layer.get_module_by_type("k8s-service") if len(k8s_modules) == 0: raise UserErrors("k8s-service module not present.") configuration_image_name: str = k8s_modules[0].data.get( "image") # type: ignore configuration_image_name = configuration_image_name.lower() if configuration_image_name != "auto" and option_image is not None: raise UserErrors( f"Do not pass any image. Image {configuration_image_name} already present in configuration." ) return configuration_image_name == "auto"
def logs( env: Optional[str], config: str, seconds: Optional[int], local: Optional[bool], var: Dict[str, str], ) -> None: """ Get stream of logs for a service Examples: opta logs -c my-service.yaml """ config = check_opta_file_exists(config) if local: config = local_setup(config, input_variables=var) # Configure kubectl layer = Layer.load_from_yaml(config, env, input_variables=var, strict_input_variables=False) amplitude_client.send_event( amplitude_client.SHELL_EVENT, event_properties={ "org_name": layer.org_name, "layer_name": layer.name }, ) layer.verify_cloud_credentials() gen_all(layer) set_kube_config(layer) load_opta_kube_config() if layer.cloud == "aws": modules = layer.get_module_by_type("k8s-service") elif layer.cloud == "google": modules = layer.get_module_by_type("gcp-k8s-service") elif layer.cloud == "local": modules = layer.get_module_by_type("local-k8s-service") elif layer.cloud == "helm": modules = layer.get_module_by_type("local-k8s-service") else: raise Exception(f"Currently not handling logs for cloud {layer.cloud}") if len(modules) == 0: raise UserErrors("No module of type k8s-service in the yaml file") elif len(modules) > 1: raise UserErrors( "Don't put more than one k8s-service module file per opta file") module_name = modules[0].name tail_module_log(layer, module_name, seconds)
def process(self, module_idx: int) -> None: aws_base_modules = self.layer.get_module_by_type( "aws-base", module_idx) vpc_id = self.module.data.get("vpc_id") public_subnets_ids = self.module.data.get("public_subnets_ids") kms_account_key_arn = self.module.data.get("kms_account_key_arn") from_parent = False if len(aws_base_modules) == 0 and self.layer.parent is not None: from_parent = True aws_base_modules = self.layer.parent.get_module_by_type("aws-base") if len(aws_base_modules) == 0 and (vpc_id is None or public_subnets_ids is None or kms_account_key_arn is None): raise UserErrors( "You either need to have the base module present or specify a VPC id, security group to give to the" "vpn, a kms key to use, and the public subnet ids") base_module_source = ("data.terraform_remote_state.parent.outputs" if from_parent else f"module.{aws_base_modules[0].name}") self.module.data["vpc_id"] = self.module.data.get( "vpc_id", f"${{{{{base_module_source}.vpc_id}}}}") self.module.data["public_subnets_ids"] = self.module.data.get( "public_subnets_ids", f"${{{{{base_module_source}.public_subnets_ids}}}}") self.module.data["kms_account_key_arn"] = self.module.data.get( "kms_account_key_arn", f"${{{{{base_module_source}.kms_account_key_arn}}}}") super().process(module_idx)
def process(self, module_idx: int) -> None: aws_dns_modules = self.layer.get_module_by_type("aws-dns") if len(aws_dns_modules) != 0 and aws_dns_modules[0].data.get("linked_module") in [ self.module.type, self.module.name, ]: aws_dns_module = aws_dns_modules[0] self.module.data["enable_auto_dns"] = True self.module.data["domain"] = ( self.module.data.get("domain") or f"${{{{module.{aws_dns_module.name}.domain}}}}" ) self.module.data["zone_id"] = ( self.module.data.get("zone_id") or f"${{{{module.{aws_dns_module.name}.zone_id}}}}" ) if (self.module.data.get("domain") is None) != ( self.module.data.get("zone_id") is None ): raise UserErrors( "Either both domain and zone_id are mentioned at the same time, or none at all." ) k8s_base_modules = self.layer.get_module_by_type("k8s-base", module_idx) if len(k8s_base_modules) != 0: k8s_base_module = k8s_base_modules[0] self.module.data["endpoint_id"] = ( self.module.data.get("endpoint_id") or f"${{{{module.{k8s_base_module.name}.load_balancer_arn}}}}" ) super(GlobalAcceleratorsProcessor, self).process(module_idx)
def validate_yaml(config_file_path: str, cloud: str, json_schema: bool = False) -> Literal[True]: if json_schema: print("TODO") else: CLOUD_TO_SCHEMA = { "aws": aws_main_schema, "google": gcp_main_schema, "azurerm": azure_main_schema, "local": local_main_schema, "helm": helm_main_schema, } DEFAULT_SCHEMA = vanilla_main_schema data = yamale.make_data(config_file_path, parser="ruamel") schema = CLOUD_TO_SCHEMA.get(cloud, DEFAULT_SCHEMA) yamale_result = yamale.validate(schema, data, _raise_error=False) errors = [] for result in yamale_result: errors.extend(result.errors) if len(errors) > 0: _print_errors(errors) raise UserErrors(f"{config_file_path} is not a valid Opta file.") return True
def process(self, module_idx: int) -> None: current_desc = self.module.desc source: str = self.module.data.get("source") or self.module.data.get( "path_to_module") if source is None: raise UserErrors( "Need to specify source (formerly path_to_module)") path_to_layer: str = os.path.abspath(os.path.dirname(self.layer.path)) module_version: str = self.module.data.get("version", None) or "" if source.startswith("./"): source = source.strip("./") self.module.module_dir_path = os.path.join(path_to_layer, source) elif source.startswith("../"): self.module.module_dir_path = os.path.join(path_to_layer, source) else: # If this is the case, then this refers to a remote module as per # https://www.terraform.io/language/modules/sources self.module.module_dir_path = source current_desc["inputs"] = [{ "name": x, "user_facing": True } for x in self.module.data.get("terraform_inputs", {}).keys()] if module_version != "": current_desc["inputs"].append({ "name": "version", "user_facing": True }) self.module.data.update(self.module.data.get("terraform_inputs", {}))
def fetch_cert_chain(self) -> Tuple[X509, str]: certificate_chain_file: str = os.path.join( os.path.dirname(self.layer.path), self.module.data["certificate_chain_file"] ) try: with open(certificate_chain_file, "r") as f: cert_chain = f.read() except FileNotFoundError: raise UserErrors( f"Could not find cert chain with path {certificate_chain_file}. Pls try again" ) try: cert_chain_obj = load_certificate(FILETYPE_PEM, cert_chain.encode("utf-8")) return cert_chain_obj, cert_chain except Error: raise UserErrors("certificate chain is not correct pem cert")
def _process_ports(self, data: Dict[Any, Any]) -> None: self.__transform_port(data) if "ports" not in data: return ports = self.__read_ports(data["ports"]) self.__validate_ports(ports) data["ports"] = ports http_port = next((port for port in ports if port.is_http), None) data["http_port"] = http_port if "probe_port" in data: try: probe_port = next( port for port in ports if port.name == data["probe_port"] ) except StopIteration: raise UserErrors( f"invalid probe_port: {data['probe_port']} is not a valid port name" ) else: probe_port = ports[0] data["probe_port"] = probe_port data["service_annotations"] = self.__service_annotations(ports)
def _verify_parent_layer(layer: Layer, auto_approve: bool = False) -> None: if layer.parent is None: return try: get_terraform_outputs(layer.parent) except ClientError as e: if e.response["Error"]["Code"] == "AccessDenied": raise UserErrors( f"We were unable to fetch Environment details for the Env {layer.parent.name}, on your AWS account (opta needs this to store state). " "Usually, it means that your AWS account has insufficient permissions. " "Please fix these issues and try again!") except MissingState as e: if not auto_approve: click.confirm( f"Failed to get the Environment state {e.args[0]} " "Usually, this means that the Environment mentioned in configuration file does not exist. \n" f"Would you like to create your environment using {layer.parent.path}?", abort=True, ) _apply( layer.parent.path, env=None, refresh=False, local=False, image_tag=None, test=False, auto_approve=False, input_variables={}, ) cleanup_files()
def process(self, module_idx: int) -> None: aws_base_modules = self.layer.get_module_by_type( "aws-base", module_idx) base_from_parent = False if len(aws_base_modules) == 0 and self.layer.parent is not None: aws_base_modules = self.layer.parent.get_module_by_type("aws-base") base_from_parent = True if len(aws_base_modules) > 0: aws_base_module = aws_base_modules[0] module_source = ("data.terraform_remote_state.parent.outputs" if base_from_parent else f"module.{aws_base_module.name}") self.module.data["vpc_id"] = f"${{{{{module_source}.vpc_id}}}}" file_path: str = self.module.data.get("filename") if not file_path.startswith("/"): file_path = os.path.join(os.path.dirname(self.layer.path), file_path) file_size = os.path.getsize(file_path) if file_path is not None else 0 self.module.data["filename"] = file_path if file_size >= 50000000: raise UserErrors( "We're very sorry, but Opta currently only supports uploading zips of max 50 MB to lambda. " "Please raise this issue to the Opta maintainers so that we may expedite this feature enhancement" ) super(LambdaFunctionProcessor, self).process(module_idx)
def process(self, module_idx: int) -> None: # Handle links for link_data in self.module.data.get("links", []): if type(link_data) is str: target_module_name = link_data link_permissions = [] elif type(link_data) is dict: target_module_name = list(link_data.keys())[0] link_permissions = list(link_data.values())[0] else: raise UserErrors( f"Link data {link_data} must be a string or map holding the permissions" ) module = self.layer.get_module(target_module_name, module_idx) if module is None: raise Exception( f"Did not find the desired module {target_module_name} " "make sure that the module you're referencing is listed before the k8s " "app one") module_type = module.aliased_type or module.type if module_type == "gcp-gcs": self.handle_gcs_link(module, link_permissions) else: raise Exception( f"Unsupported module type for gcp service account link: {module_type}" ) self.module.data["read_buckets"] = ( self.module.data.get("read_buckets", []) + self.read_buckets) self.module.data["write_buckets"] = ( self.module.data.get("write_buckets", []) + self.write_buckets) super(GcpServiceAccountProcessor, self).process(module_idx)
def validate_version(cls) -> None: ensure_installed("terraform") pre_req_link = "Check https://docs.opta.dev/installation/#prerequisites" current_version = Terraform.get_version() current_parsed = version.parse(current_version) if current_parsed < version.parse(MIN_TERRAFORM_VERSION): raise UserErrors( f"Invalid terraform version {current_version} -- must be at least {MIN_TERRAFORM_VERSION}. {pre_req_link}" ) if current_parsed >= version.parse(MAX_TERRAFORM_VERSION): raise UserErrors( f"Invalid terraform version {current_version} -- must be less than {MAX_TERRAFORM_VERSION}. {pre_req_link}" )
def opta_acquire_lock() -> None: if check_opta_running_file_exists(): raise UserErrors( "Opta already running in the current directory.\n" "If no opta instance is running, please delete file with mime type .opta.lock" ) create_opta_lock_file()
def _verify_azure_cloud_credentials(self) -> None: try: DefaultAzureCredential() except ClientAuthenticationError as e: raise UserErrors( "Azure Cloud are not configured properly.\n" f" Error: {e.message}" )
def fetch_private_key(self) -> Tuple[PKey, str]: private_key_file: str = os.path.join( os.path.dirname(self.layer.path), self.module.data["private_key_file"] ) try: with open(private_key_file, "r") as f: privkey = f.read() except FileNotFoundError: raise UserErrors( f"Could not find private key with path {private_key_file}. Pls try again" ) try: private_key_obj = load_privatekey(FILETYPE_PEM, privkey) return private_key_obj, privkey except Error: raise UserErrors("private key is not correct pem private key")
def __process_nginx_extra_ports( self, extra_ports: List[int], extra_tls_ports: List[int], service_ports: Dict[int, str], ) -> Dict[int, str]: placeholder_port_mapping = { port: f"{NGINX_PLACEHOLDER_SERVICE}" for port in extra_ports } # Only expose ports defined in nginx_extra_tcp_ports port_mapping = { port: service_ports.get(port, placeholder_service) for port, placeholder_service in placeholder_port_mapping.items() } missing_ports = [ str(port) for port in extra_tls_ports if port not in port_mapping ] if missing_ports: raise UserErrors( f"Cannot enable TLS on ports {', '.join(missing_ports)} unless they are also set in nginx_extra_tcp_ports" ) return port_mapping
def convert(raw_spec: Dict[str, Any]) -> PortSpec: try: return PortSpec.from_raw(raw_spec) except ValueError as e: name = raw_spec.get("name", "unnamed") raise UserErrors(f"Issue with port {name}: {str(e)}") from e