コード例 #1
0
 def fetch_cert_chain(self) -> Tuple[Optional[X509], Optional[str]]:
     while True:
         cert_chain_path = prompt(
             "Please enter the full path to the certificate chain/intermediate certificate pem file found locally or "
             "the empty string if there is none. If you used fullchain.pem for the body, or something else saying full "
             "chain then leave this empty.",
             type=click.STRING,
             default="",
         )
         if cert_chain_path == "" or cert_chain_path is None:
             return None, None
         try:
             with open(cert_chain_path, "r") as f:
                 cert_chain = f.read()
         except FileNotFoundError:
             logger.warning(
                 f"Could not find cert chain with path {cert_chain_path}. Pls try again"
             )
             continue
         try:
             cert_chain_obj = load_certificate(FILETYPE_PEM,
                                               cert_chain.encode("utf-8"))
             return cert_chain_obj, cert_chain
         except Error:
             logger.warning("certificate chain is not correct pem cert")
             continue
コード例 #2
0
ファイル: upgrade.py プロジェクト: run-x/opta
def check_version_upgrade(is_upgrade_call: bool = False) -> bool:
    """Logs a warning if newer version of opta is available.

    The version check is not always performed when this function is called.
    It is performed non-deterministically with a probability of UPGRADE_CHECK_PROBABILITY
    in order to not spam the user.
    """
    if OptaUpgrade.successful:
        OptaUpgrade.unset()
        return True
    if is_upgrade_call or _should_check_for_version_upgrade():
        logger.info("Checking for version upgrades...")
        try:
            latest_version = _get_latest_version()
        except Exception as e:
            logger.debug(e, exc_info=True)
            logger.info("Unable to find latest version.")
            return False
        try:
            if semver.VersionInfo.parse(
                    VERSION.strip("v")).compare(latest_version) < 0:
                logger.warning(
                    "New version available.\n"
                    f"You have {VERSION} installed. Latest version is {latest_version}."
                )
                if not is_upgrade_call:
                    print(
                        f"Upgrade instructions are available at {UPGRADE_INSTRUCTIONS_URL}  or simply use the `opta upgrade` command"
                    )
                return True
            else:
                logger.info("User on the latest version.")
        except Exception as e:
            logger.info(f"Semver check failed with error {e}")
    return False
コード例 #3
0
    def delete_opta_config(self) -> None:

        if os.path.isfile(self.config_file_path):
            os.remove(self.config_file_path)
            logger.info("Deleted opta config from local")
        else:
            logger.warning(
                f"Did not find opta config {self.config_file_path} to delete")
コード例 #4
0
ファイル: terraform.py プロジェクト: run-x/opta
 def _local_delete_state_storage(cls, layer: "Layer") -> None:
     providers = layer.gen_providers(0)
     if "local" not in providers.get("terraform", {}).get("backend", {}):
         return
     try:
         rmtree(os.path.join(cls.get_local_opta_dir()))
     except Exception:
         logger.warning("Local state delete did not work?")
コード例 #5
0
    def delete_remote_state(self) -> None:

        if os.path.isfile(self.tf_file):
            os.remove(self.tf_file)
            logger.info("Deleted opta tf config from local")
        if os.path.isfile(self.tf_file + ".backup"):
            os.remove(self.tf_file + ".backup")
            logger.info("Deleted opta tf backup config from local")
        else:
            logger.warning(
                f"Did not find opta tf state {self.tf_file} to delete")
コード例 #6
0
 def delete_opta_config(self) -> None:
     bucket = self.layer.state_storage()
     config_path = f"opta_config/{self.layer.name}"
     credentials, project_id = self.get_credentials()
     gcs_client = storage.Client(project=project_id,
                                 credentials=credentials)
     bucket_object = gcs_client.get_bucket(bucket)
     try:
         bucket_object.delete_blob(config_path)
     except NotFound:
         logger.warning(f"Did not find opta config {config_path} to delete")
     logger.info("Deleted opta config from gcs")
コード例 #7
0
 def delete_remote_state(self) -> None:
     bucket = self.layer.state_storage()
     tfstate_path = f"{self.layer.name}/default.tfstate"
     credentials, project_id = self.get_credentials()
     gcs_client = storage.Client(project=project_id,
                                 credentials=credentials)
     bucket_object = gcs_client.get_bucket(bucket)
     try:
         bucket_object.delete_blob(tfstate_path)
     except NotFound:
         logger.warning(
             f"Did not find opta tf state {tfstate_path} to delete")
     logger.info(f"Deleted opta tf state for {self.layer.name}")
コード例 #8
0
def get_secrets(namespace: str, manual_secret_name: str) -> dict:
    """:return: manual and linked secrets"""
    manual_secrets = get_namespaced_secrets(namespace, manual_secret_name)
    linked_secrets = get_namespaced_secrets(
        namespace, LINKED_SECRET_NAME
    )  # Helm charts don't have linked secrets, but it'll just return an empty dict so no worries
    for secret_name in manual_secrets.keys():
        if secret_name in linked_secrets:
            logger.warning(
                f"# Secret {secret_name} found manually overwritten from linked value."
            )
            del linked_secrets[secret_name]
    return deep_merge(manual_secrets, linked_secrets)
コード例 #9
0
ファイル: terraform.py プロジェクト: run-x/opta
 def _gcp_delete_state_storage(cls, layer: "Layer") -> None:
     providers = layer.gen_providers(0)
     if "gcs" not in providers.get("terraform", {}).get("backend", {}):
         return
     bucket_name = providers["terraform"]["backend"]["gcs"]["bucket"]
     credentials, project_id = GCP.get_credentials()
     gcs_client = storage.Client(project=project_id,
                                 credentials=credentials)
     try:
         bucket_obj = gcs_client.get_bucket(bucket_name)
         bucket_obj.delete(force=True)
         logger.info("Successfully deleted GCP state storage")
     except NotFound:
         logger.warning("State bucket was already deleted")
コード例 #10
0
def _verify_semver(
    old_semver_string: str,
    current_semver_string: str,
    layer: "Layer",
    auto_approve: bool = False,
) -> None:
    if old_semver_string in [DEV_VERSION, ""] or current_semver_string in [
            DEV_VERSION,
            "",
    ]:
        return

    old_semver = semver.VersionInfo.parse(old_semver_string)
    current_semver = semver.VersionInfo.parse(current_semver_string)
    if old_semver > current_semver:
        logger.warning(
            f"You're trying to run an older version ({current_semver}) of opta (last run with version {old_semver})."
        )
        if not auto_approve:
            click.confirm(
                "Do you wish to upgrade to the latest version of Opta?",
                abort=True,
            )
        _upgrade()
        logger.info("Please rerun the command if the upgrade was successful.")
        exit(0)

    present_modules = [k.aliased_type or k.type for k in layer.modules]

    current_upgrade_warnings = sorted(
        [(k, v) for k, v in UPGRADE_WARNINGS.items()
         if current_semver >= k[0] > old_semver and k[1] == layer.cloud
         and k[2] in present_modules],
        key=lambda x: semver.VersionInfo.parse(x[0][0]),
    )
    for current_upgrade_warning in current_upgrade_warnings:
        logger.info(
            f"{fg('magenta')}WARNING{attr(0)}: Detecting an opta upgrade to or past version {current_upgrade_warning[0]}. "
            f"Got the following warning: {current_upgrade_warning[1]}")
    if not auto_approve and len(current_upgrade_warnings) > 0:
        click.confirm(
            "Are you ok with the aforementioned warnings and done all precautionary steps you wish to do?",
            abort=True,
        )
コード例 #11
0
def _gcp_get_configs(layer: "Layer") -> List[str]:
    bucket_name = layer.state_storage()
    gcs_config_dir = "opta_config/"
    credentials, project_id = GCP.get_credentials()
    gcs_client = storage.Client(project=project_id, credentials=credentials)
    try:
        bucket_object = gcs_client.get_bucket(bucket_name)
    except NotFound:
        logger.warning(
            "Couldn't find the state bucket, must have already been destroyed in a previous destroy run"
        )
        return []
    blobs: List[storage.Blob] = list(
        gcs_client.list_blobs(bucket_object, prefix=gcs_config_dir)
    )
    configs = [blob.name[len(gcs_config_dir) :] for blob in blobs]
    if layer.name in configs:
        configs.remove(layer.name)
    return configs
コード例 #12
0
 def fetch_private_key(self) -> Tuple[PKey, str]:
     while True:
         privkey_path = prompt(
             "Please enter the full path to the private key pem file found locally. This is typically called privkey.pem or something like that.",
             type=click.STRING,
         )
         try:
             with open(privkey_path, "r") as f:
                 privkey = f.read()
         except FileNotFoundError:
             logger.warning(
                 f"Could not find private key with path {privkey_path}. Pls try again"
             )
             continue
         try:
             private_key_obj = load_privatekey(FILETYPE_PEM, privkey)
             return private_key_obj, privkey
         except Error:
             logger.warning("private key is not correct pem private key")
             continue
コード例 #13
0
 def fetch_cert_body(self) -> Tuple[X509, str]:
     while True:
         cert_body_path = prompt(
             "Please enter the full path to the certificate body pem file found locally. This is typically called "
             f"cert.pem, and is {fg(1)}NOT{attr(0)} fullchain.pem",
             type=click.STRING,
         )
         try:
             with open(cert_body_path, "r") as f:
                 cert_body = f.read()
         except FileNotFoundError:
             logger.warning(
                 f"Could not find cert body with path {cert_body_path}. Pls try again"
             )
             continue
         if len(cert_body.split("-----END CERTIFICATE-----")) > 2:
             logger.warning(
                 "Certificate body can only have one certificate-- additional ones must go in the chain."
             )
         try:
             cert_obj = load_certificate(FILETYPE_PEM,
                                         cert_body.encode("utf-8"))
             return cert_obj, cert_body
         except Error:
             logger.warning("Certificate body is not correct pem cert.")
             continue
コード例 #14
0
 def cleanup_cloudwatch_log_group(self, region: str) -> None:
     logger.debug(
         "Seeking dangling cloudwatch log group for k8s cluster just destroyed."
     )
     client: CloudWatchLogsClient = boto3.client(
         "logs", config=Config(region_name=region))
     log_group_name = f"/aws/eks/opta-{self.layer.name}/cluster"
     log_groups = client.describe_log_groups(
         logGroupNamePrefix=log_group_name)
     if len(log_groups["logGroups"]) == 0:
         return
     logger.debug(
         f"Found dangling cloudwatch log group {log_group_name}. Deleting it now"
     )
     client.delete_log_group(logGroupName=log_group_name)
     sleep(3)
     log_groups = client.describe_log_groups(
         logGroupNamePrefix=log_group_name)
     if len(log_groups["logGroups"]) != 0:
         logger.warning(
             f"Cloudwatch Log group {log_group_name} has recreated itself. Not stopping the destroy, but you will "
             "wanna check this out.")
コード例 #15
0
    def display(detailed_plan: bool = False) -> None:
        if detailed_plan:
            regular_plan = Terraform.show(TF_PLAN_PATH, capture_output=True)
            CURRENT_CRASH_REPORTER.tf_plan_text = ansi_scrub(regular_plan
                                                             or "")
            print(regular_plan)
            return
        plan_dict = json.loads(
            Terraform.show(*["-no-color", "-json", TF_PLAN_PATH],
                           capture_output=True)  # type: ignore
        )
        CURRENT_CRASH_REPORTER.tf_plan_text = (
            CURRENT_CRASH_REPORTER.tf_plan_text or json.dumps(plan_dict))
        plan_risk = LOW_RISK
        module_changes: dict = {}
        resource_change: dict
        for resource_change in plan_dict.get("resource_changes", []):
            if resource_change.get("change", {}).get("actions",
                                                     ["no-op"]) == ["no-op"]:
                continue
            address: str = resource_change["address"]

            if not address.startswith("module."):
                logger.warning(
                    f"Unable to determine risk of changes to resource {address}. "
                    "Please run in detailed plan mode for more info")
            module_name = address.split(".")[1]
            module_changes[module_name] = module_changes.get(
                module_name, {
                    "risk": LOW_RISK,
                    "resources": {}
                })
            resource_name = ".".join(address.split(".")[2:])
            actions = resource_change.get("change", {}).get("actions", [])
            if "create" in actions and "delete" in actions:
                actions = ["replace"]
            action = actions[0]
            if action in ["read", "create"]:
                current_risk = LOW_RISK
                action_reason = "data_refresh" if action == "read" else "creation"
            elif action in ["replace", "delete"]:
                current_risk = HIGH_RISK
                action_reason = resource_change.get("action_reason", "N/A")
            elif action in ["update"]:
                current_risk, action_reason = PlanDisplayer.handle_update(
                    resource_change)
            else:
                raise Exception(
                    f"Do not know how to handle planned action: {action}")

            module_changes[module_name]["resources"][resource_name] = {
                "action": action,
                "reason": action_reason,
                "risk": current_risk,
            }
            module_changes[module_name]["risk"] = _max_risk(
                module_changes[module_name]["risk"], current_risk)
            plan_risk = _max_risk(plan_risk, current_risk)

        logger.info(
            f"Identified total risk of {RISK_COLORS[plan_risk]}{plan_risk}{attr(0)}.\n"
            f"{RISK_EXPLANATIONS[plan_risk]}\n"
            "For additional help, please reach out to the RunX team at https://slack.opta.dev/"
        )
        module_changes_list = sorted(
            [(k, v) for k, v in module_changes.items()],
            key=lambda x: x[1]["risk"],
            reverse=True,
        )
        table = []
        for module_name, module_change in module_changes_list:
            resource_changes_list = sorted(
                [(k, v) for k, v in module_change["resources"].items()],
                key=lambda x: x[1]["risk"],
                reverse=True,
            )
            for resource_name, resource_change in resource_changes_list:
                current_risk = resource_change["risk"]
                table.append([
                    f"{fg('blue')}{module_name}{attr(0)}",
                    resource_name,
                    resource_change["action"],
                    f"{RISK_COLORS[current_risk]}{current_risk}{attr(0)}",
                    resource_change["reason"].replace("_", " "),
                ])
        if len(module_changes) == 0:
            logger.info("No changes found.")
        else:
            print(
                tabulate(
                    table,
                    ["module", "resource", "action", "risk", "reason"],
                    tablefmt="fancy_grid",
                ))
        logger.info(
            "For more details, please rerun the command with the --detailed-plan flag."
        )
コード例 #16
0
def generate_terraform(
    ctx: click.Context,
    config: str,
    env: Optional[str],
    directory: Optional[str],
    readme_format: str,
    delete: bool,
    auto_approve: bool,
    backend: str,
    var: Dict[str, str],
) -> None:
    """(beta) Generate Terraform language files

    Examples:

    opta generate-terraform -c my-config.yaml

    opta generate-terraform -c my-config.yaml --directory ./terraform

    opta generate-terraform -c my-config.yaml --auto-approve --backend remote --readme-format md
    """

    print("This command is in beta mode")
    print(
        "If you have any error or suggestion, please let us know in our slack channel  https://slack.opta.dev\n"
    )

    config = check_opta_file_exists(config)

    pre_check()
    _clean_tf_folder()

    layer = Layer.load_from_yaml(config,
                                 env,
                                 stateless_mode=True,
                                 input_variables=var)
    layer.validate_required_path_dependencies()

    if directory is None:
        # generate the target directory
        directory = f"gen-tf-{layer.name}"
        if env is not None:
            directory = f"{directory}-{env}"

    if directory.strip() == "":
        # the users sets it to empty
        raise click.UsageError("--directory can't be empty")

    event_properties: Dict = layer.get_event_properties()
    event_properties["modules"] = ",".join(
        [m.get_type() for m in layer.get_modules()])
    amplitude_client.send_event(
        amplitude_client.START_GEN_TERRAFORM_EVENT,
        event_properties=event_properties,
    )

    try:

        # work in a temp directory until command is over, to not leave a partially generated folder
        tmp_dir_obj = tempfile.TemporaryDirectory(prefix="opta-gen-tf")
        tmp_dir = tmp_dir_obj.name

        # quick exit if directory already exists and not empty
        output_dir = os.path.join(os.getcwd(), directory)
        if _dir_has_files(output_dir):
            if not delete:
                raise UserErrors(
                    f"Error: Output directory already exists: '{output_dir}'. If you want to delete it, use the '--delete' option"
                )
            print(
                f"Output directory {output_dir} already exists and --delete flag is on, deleting it"
            )
            if not auto_approve:
                state_file_warning = (
                    ", including terraform state files" if os.path.exists(
                        os.path.join(output_dir, "tfstate")) else "")
                click.confirm(
                    f"The output directory will be deleted{state_file_warning}: {output_dir}.\n Do you approve?",
                    abort=True,
                )
            _clean_folder(output_dir)

        # to keep consistent with what opta does - we could make this an option if opta tags are not desirable
        gen_opta_resource_tags(layer)

        # copy helm service dir
        if "k8s-service" in [m.type for m in layer.modules]:
            # find module root directory
            service_helm_dir = os.path.join(layer.modules[0].module_dir_path,
                                            "..", "..",
                                            "opta-k8s-service-helm")
            target_dir = os.path.join(tmp_dir, "modules",
                                      "opta-k8s-service-helm")
            logger.debug(
                f"Copying helm charts from {service_helm_dir} to {target_dir}")
            shutil.copytree(service_helm_dir, target_dir, dirs_exist_ok=True)

        # copy module directories and update the module path to point to local directory
        # note this will only copy the 'tf_module' subdirectory ex: modules/aws_base/tf_module
        for module in layer.modules:
            src_path = module.module_dir_path
            if not os.path.exists(src_path):
                logger.warning(
                    f"Could not find source directory for module '{module.name}', ignoring it"
                )
                # dynamically mark it as not exportable
                module.desc["is_exportable"] = False
                continue
            rel_path = "./" + src_path[src_path.index("modules/"):]
            abs_path = os.path.join(tmp_dir, rel_path)
            logger.debug(
                f"Copying module from {module.get_type()} to {abs_path}")
            shutil.copytree(src_path, abs_path, dirs_exist_ok=True)
            # configure module path to use new relative path
            module.module_dir_path = rel_path
            # if there is some export documentation load it now - it will be added to the readme
            export_md = os.path.join(src_path, "..", "export.md")
            if os.path.exists(export_md):
                with open(export_md, "r") as f:
                    module.desc["export"] = f.read()

        # update terraform backend to be local (currently defined in the registry)
        # this is needed as the generated terraform should work outside of opta
        original_backend = REGISTRY[layer.cloud]["backend"]
        if backend.lower() == "local":
            backend_dir = f"./tfstate/{layer.root().name}.tfstate"
            logger.debug(f"Setting terraform backend to local: {backend_dir}")
            REGISTRY[layer.cloud]["backend"] = {"local": {"path": backend_dir}}
        # generate the main.tf.json
        try:
            execution_plan = list(gen(layer))
        finally:
            REGISTRY[layer.cloud]["backend"] = original_backend

        # break down json file in multiple files
        with open(TF_FILE_PATH) as f:
            main_tf_json = json.load(f)

        for key in ["provider", "data", "output", "terraform"]:
            # extract the relevant json
            main_tf_json, extracted_json = dicts.extract(main_tf_json, key)

            # save it as it's own file
            _write_json(extracted_json, os.path.join(tmp_dir,
                                                     f"{key}.tf.json"))

        # extract modules tf.json in their own files
        main_tf_json, modules_json = dicts.extract(main_tf_json, "module")
        for name, value in modules_json["module"].items():
            _write_json({"module": {
                name: value
            }}, os.path.join(tmp_dir, f"module-{name}.tf.json"))

        # update the main file without the extracted sections
        if main_tf_json:
            # only write file there is anything remaining
            _write_json(
                main_tf_json,
                os.path.join(tmp_dir, f"{tmp_dir}/{layer.name}.tf.json"))

        # generate the readme
        opta_cmd = f"opta {ctx.info_name} {str_options(ctx)}"
        readme_file = _generate_readme(layer, execution_plan, tmp_dir,
                                       readme_format, opta_cmd, backend)

        # we have a service file but the env was not exported
        if layer.name != layer.root().name and not os.path.exists(
                os.path.join(output_dir, "module-base.tf.json")):
            print(
                f"Warning: the output directory doesn't include terraform files for the environment named '{layer.root().name}', "
                "some dependencies might be missing for terraform to work.")

        # if everything was successfull, copy tmp dir to target dir
        logger.debug(f"Copy {tmp_dir} to {output_dir}")
        shutil.copytree(tmp_dir, output_dir, dirs_exist_ok=True)
        unsupported_modules = [
            m for m in layer.get_modules() if not m.is_exportable()
        ]

        if unsupported_modules:
            unsupported_modules_str = ",".join(
                [m.get_type() for m in unsupported_modules])
            event_properties["unsupported_modules"] = unsupported_modules_str
            print(
                f"Terraform files partially generated, a few modules are not supported: {unsupported_modules_str}"
            )
        else:
            print("Terraform files generated successfully.")
        if readme_file:
            copied_readme = os.path.join(output_dir,
                                         os.path.basename(readme_file))
            print(f"Check {copied_readme} for documentation.")

    except Exception as e:
        event_properties["success"] = False
        event_properties["error_name"] = e.__class__.__name__
        raise e
    else:
        event_properties["success"] = True
    finally:
        amplitude_client.send_event(
            amplitude_client.FINISH_GEN_TERRAFORM_EVENT,
            event_properties=event_properties,
        )

        tmp_dir_obj.cleanup()
コード例 #17
0
def reconcile_nginx_extra_ports(*, update_config_map: bool = True) -> Dict[int, str]:
    """
    Runs the pseudo-controller that scans the cluster for Kubernetes services that expose raw TCP ports.
    If :update_config_map is True (default), it will also update the nginx port config map.
    The ConfigMap won't be updated with any ports not already defined in it.

    Returns the port mapping defined by services, in the form of a dict of "external port -> 'namespace/service_name:service_port'"
    """

    services = kubernetes.list_services()

    # Filter out any deleted services or services that don't have the annotation we want
    services = [
        service for service in services if service.metadata.deletion_timestamp is None
    ]

    # Skip services that don't have the annotation
    services = [
        service
        for service in services
        if NGINX_EXTRA_TCP_PORTS_ANNOTATION in (service.metadata.annotations or {})
    ]

    # Give precedence to older services in case of port conflicts
    services.sort(key=lambda svc: svc.metadata.creation_timestamp)

    port_mapping: Dict[int, str] = {}
    for service in services:
        id = f"{service.metadata.namespace}/{service.metadata.name}"

        extra_ports_annotation: str = service.metadata.annotations[
            NGINX_EXTRA_TCP_PORTS_ANNOTATION
        ]

        try:
            extra_ports: Dict[str, str] = json.loads(extra_ports_annotation)
        except json.JSONDecodeError as e:
            logger.warning(
                "Error decoding the %s annotation on service %s",
                NGINX_EXTRA_TCP_PORTS_ANNOTATION,
                id,
                exc_info=e,
            )
            continue

        if not isinstance(extra_ports, dict):
            logger.warning(
                "Contents of the %s annotation not expected format on service %s",
                NGINX_EXTRA_TCP_PORTS_ANNOTATION,
                id,
            )
            continue

        for nginx_port_str, target_port in extra_ports.items():
            try:
                nginx_port = int(nginx_port_str)
            except ValueError:
                logger.warning(
                    "Contents of the %s annotation not expected format (non-int key) on service %s",
                    NGINX_EXTRA_TCP_PORTS_ANNOTATION,
                    id,
                )
                continue

            if nginx_port in port_mapping:
                logger.warning(
                    "Multiple services found that bind to the %i ingress port. Prioritizing oldest service",
                    nginx_port,
                )
                # Skip conflicting ports
                continue

            port_mapping[
                nginx_port
            ] = f"{service.metadata.namespace}/{service.metadata.name}:{target_port}"

    if not update_config_map:
        return port_mapping

    cm = kubernetes.get_config_map(*NGINX_TCP_CONFIGMAP)
    if cm is None:
        # We can't update anything if we don't have the config map
        return port_mapping

    desired_mapping = {str(port): service for port, service in port_mapping.items()}

    # Don't add any keys, and keys that we don't have a service for should be set to the placeholder service
    current_data: Dict[str, str] = cm.data or {}
    desired_data = {
        port: desired_mapping.get(port, NGINX_PLACEHOLDER_SERVICE)
        for port in current_data
    }

    if desired_data != current_data:
        # We don't handle any conficts here (by passing resource version), but we probably don't need to until this is implemented as an actual controller
        kubernetes.update_config_map_data(
            cm.metadata.namespace, cm.metadata.name, desired_data
        )

    # We return port_mapping instead of desired_data because we always want to return services that have "requested" to be mapped,
    # even if nginx hasn't been configured to expose them.
    return port_mapping
コード例 #18
0
    def process(self, module_idx: int) -> None:
        if self.layer.is_stateless_mode() is True:
            # do not do create any certificate
            super(AwsDnsProcessor, self).process(module_idx)
            return

        providers = self.layer.gen_providers(0)
        region = providers["provider"]["aws"]["region"]
        self.validate_dns()
        if self.module.data.get("upload_cert"):
            ssm_client: SSMClient = boto3.client(
                "ssm", config=Config(region_name=region))
            parameters = ssm_client.get_parameters_by_path(
                Path=f"/opta-{self.layer.get_env()}",
                Recursive=True).get("Parameters", [])
            parameter_names = list(map(lambda x: x["Name"], parameters))
            files_found = False
            private_key_ssm_path = f"/opta-{self.layer.get_env()}/{PRIVATE_KEY_FILE_NAME}"
            cert_body_ssm_path = (
                f"/opta-{self.layer.get_env()}/{CERTIFICATE_BODY_FILE_NAME}")
            cert_chain_ssm_path = (
                f"/opta-{self.layer.get_env()}/{CERTIFICATE_CHAIN_FILE_NAME}")
            if {private_key_ssm_path,
                    cert_body_ssm_path}.issubset(set(parameter_names)):
                logger.info("SSL files found in cloud")
                files_found = True
            if cert_chain_ssm_path in parameter_names:
                self.module.data["cert_chain_included"] = True
            force_update = self.module.data.get("force_update", False)
            if (force_update or not files_found) and not self.module.data.get(
                    "_updated_already", False):
                logger.info(
                    f"{fg(5)}{attr(1)}You have indicated that you wish to pass in your own ssl certificate and the files have not been "
                    "found on the cloud or you have specified an update must be forced. "
                    "This is not the typically recommended option as the dns delegation way "
                    "includes certificate refreshing so if you don't do this you will need to periodically force a new "
                    f"update. Sometimes this can not be helped, which brings us here.{attr(0)}"
                )
                matching_cert_and_keys = False
                while not matching_cert_and_keys:
                    private_key_obj, private_key_str = self.fetch_private_key()
                    cert_obj, cert_str = self.fetch_cert_body()
                    cert_pub = dump_publickey(FILETYPE_PEM,
                                              cert_obj.get_pubkey())
                    key_pub = dump_publickey(FILETYPE_PEM, private_key_obj)
                    if cert_pub != key_pub:
                        logger.warning(
                            "Certificate private key does not match inputted private key, try again"
                        )
                        continue
                    cert_chain_obj, cert_chain_str = self.fetch_cert_chain()
                    # TODO: add cert chain validation and full chain validation against trusted CA
                    domains_list = self.get_subject_alternative_names(cert_obj)
                    if self.module.data["domain"] not in domains_list:
                        raise UserErrors(
                            f"You provided a domain of {self.module.data['domain']} but the cert is only for domains {domains_list}"
                        )
                    matching_cert_and_keys = True
                if cert_chain_str:
                    ssm_client.put_parameter(
                        Name=cert_chain_ssm_path,
                        Value=cert_chain_str,
                        Type="SecureString",
                        Overwrite=True,
                    )
                    self.module.data["cert_chain_included"] = True
                elif cert_chain_ssm_path in parameter_names:
                    ssm_client.delete_parameter(Name=cert_chain_ssm_path, )
                ssm_client.put_parameter(
                    Name=private_key_ssm_path,
                    Value=private_key_str,
                    Type="SecureString",
                    Overwrite=True,
                )
                ssm_client.put_parameter(
                    Name=cert_body_ssm_path,
                    Value=cert_str,
                    Type="SecureString",
                    Overwrite=True,
                )
                logger.info(
                    "certificate files uploaded securely to parameter store for future consumption"
                )
                self.module.data["_updated_already"] = True
        elif self.module.data.get("external_cert_arn") is not None:
            acm_client: ACMClient = boto3.client(
                "acm", config=Config(region_name=region))
            try:
                cert = acm_client.describe_certificate(CertificateArn=str(
                    self.module.data.get("external_cert_arn")))
            except Exception as e:
                raise UserErrors(
                    f"Encountered error when attempting to verify external certificate {self.module.data.get('external_cert_arn')}: "
                    f"{e}")
            cert_domains = set([cert["Certificate"]["DomainName"]] +
                               cert["Certificate"]["SubjectAlternativeNames"])
            if self.module.data["domain"] not in cert_domains:
                raise UserErrors(
                    f"Inputted certificate is for domains of {cert_domains}, but the main domain "
                    f"{self.module.data['domain']} is not one of them")

        linked_module_name = self.module.data.get("linked_module")
        if linked_module_name is not None:
            x: Module
            linked_modules = list(
                filter(lambda x: linked_module_name in [x.name, x.type],
                       self.layer.modules))
            if len(linked_modules) != 1:
                raise UserErrors(
                    f"Could not find DNS' linked_module of {linked_module_name}-- it must be the name or type of a single module"
                )
        super(AwsDnsProcessor, self).process(module_idx)
コード例 #19
0
def destroy(
    config: str,
    env: Optional[str],
    auto_approve: bool,
    detailed_plan: bool,
    local: Optional[bool],
    var: Dict[str, str],
) -> None:
    """Destroy all opta resources from the current config

    To destroy an environment, you have to first destroy all the services first.

    Examples:

    opta destroy -c my-service.yaml --auto-approve

    opta destroy -c my-env.yaml --auto-approve
    """
    try:
        opta_acquire_lock()
        pre_check()
        logger.warning(
            "You are destroying your cloud infra state. DO NOT, I REPEAT, DO NOT do this as "
            "an attempt to debug a weird/errored apply. What you have created is not some ephemeral object that can be "
            "tossed arbitrarily (perhaps some day) and destroying unnecessarily just to reapply typically makes it "
            "worse. If you're doing this cause you are really trying to destroy the environment entirely, then that's"
            "perfectly fine-- if not then please reach out to the opta team in the slack workspace "
            "(https://slack.opta.dev) and I promise that they'll be happy to help debug."
        )

        config = check_opta_file_exists(config)
        if local:
            config, _ = _handle_local_flag(config, False)
            _clean_tf_folder()
        layer = Layer.load_from_yaml(config, env, input_variables=var)
        event_properties: Dict = layer.get_event_properties()
        amplitude_client.send_event(
            amplitude_client.DESTROY_EVENT, event_properties=event_properties,
        )
        layer.verify_cloud_credentials()
        layer.validate_required_path_dependencies()
        if not Terraform.download_state(layer):
            logger.info(
                "The opta state could not be found. This may happen if destroy ran successfully before."
            )
            return

        tf_lock_exists, _ = Terraform.tf_lock_details(layer)
        if tf_lock_exists:
            raise UserErrors(USER_ERROR_TF_LOCK)

        # Any child layers should be destroyed first before the current layer.
        children_layers = _fetch_children_layers(layer)
        if children_layers:
            # TODO: ideally we can just automatically destroy them but it's
            # complicated...
            logger.error(
                "Found the following services that depend on this environment. Please run `opta destroy` on them first!\n"
                + "\n".join(children_layers)
            )
            raise UserErrors("Dependant services found!")

        tf_flags: List[str] = []
        if auto_approve:
            sleep_time = 5
            logger.info(
                f"{attr('bold')}Opta will now destroy the {attr('underlined')}{layer.name}{attr(0)}"
                f"{attr('bold')} layer.{attr(0)}\n"
                f"{attr('bold')}Sleeping for {attr('underlined')}{sleep_time} secs{attr(0)}"
                f"{attr('bold')}, press Ctrl+C to Abort.{attr(0)}"
            )
            time.sleep(sleep_time)
            tf_flags.append("-auto-approve")
        modules = Terraform.get_existing_modules(layer)
        layer.modules = [x for x in layer.modules if x.name in modules]
        gen_all(layer)
        Terraform.init(False, "-reconfigure", layer=layer)
        Terraform.refresh(layer)

        idx = len(layer.modules) - 1
        for module in reversed(layer.modules):
            try:
                module_address_prefix = f"-target=module.{module.name}"
                logger.info("Planning your changes (might take a minute)")
                Terraform.plan(
                    "-lock=false",
                    "-input=false",
                    "-destroy",
                    f"-out={TF_PLAN_PATH}",
                    layer=layer,
                    *list([module_address_prefix]),
                )
                PlanDisplayer.display(detailed_plan=detailed_plan)
                tf_flags = []
                if not auto_approve:
                    click.confirm(
                        "The above are the planned changes for your opta run. Do you approve?",
                        abort=True,
                    )
                else:
                    tf_flags.append("-auto-approve")
                Terraform.apply(layer, *tf_flags, TF_PLAN_PATH, no_init=True, quiet=False)
                layer.post_delete(idx)
                idx -= 1
            except Exception as e:
                raise e

        Terraform.delete_state_storage(layer)
    finally:
        opta_release_lock()
コード例 #20
0
ファイル: aws_nodegroup.py プロジェクト: run-x/opta
 def __back_compat_use_gpu(self) -> None:
     if self.module.data.get("use_gpu", False):
         logger.warning(
             "Using deprecated input use_gpu. Please use ami_type input to avoid the warning in future."
         )
         self.module.data["ami_type"] = "AL2_x86_64_GPU"
コード例 #21
0
    def post_hook(self, module_idx: int, exception: Optional[Exception]) -> None:
        if exception is not None:
            return
        providers = self.layer.gen_providers(0)
        region = providers["provider"]["aws"]["region"]
        sesv2_client: SESV2Client = boto3.client(
            "sesv2", config=Config(region_name=region)
        )
        ses_account = sesv2_client.get_account()

        if ses_account["ProductionAccessEnabled"]:
            logger.debug("Alrighty, looks like your account is out of SES sandbox")
            return
        elif "Details" in ses_account:
            if ses_account["Details"]["ReviewDetails"]["Status"] == "PENDING":
                logger.info(
                    f"{fg(5)}{attr(1)}Looks like review for taking you out of the SES sandbox is still pending. You can "
                    f"follow it in your AWS Support Cases (e.g. go to the AWS UI aka console and at the top search bar look "
                    f"for \"support\")-- your case id is {ses_account['Details']['ReviewDetails']['CaseId']}{attr(0)}"
                )
                return
            elif (
                ses_account["Details"]["ReviewDetails"]["Status"] == "FAILED"
                or ses_account["Details"]["ReviewDetails"]["Status"] == "DENIED"
            ):
                logger.warning(
                    f"{attr(1)}Looks like your request to move out of the SES sandbox has been "
                    f"denied/failed-- you're gonna need to go resolve this manually in your support case. This is how "
                    f"you do it: go to our AWS Support Cases (e.g. go to the AWS UI aka console and at the top search "
                    f"bar look for \"support\")-- your case id is {ses_account['Details']['ReviewDetails']['CaseId']}. "
                    f"AWS customer service can help you get this approve. Just click on it, and nicely answer the "
                    f"human's questions/concerns to get your access approved.{attr(0)}"
                )
                return

        logger.info(
            f'{fg(5)}{attr(1)}Currently the amazon email service is provisioned in the default "sandboxed" mode, i.e. '
            f"they can only send emails to a few verified accounts. You can read more about it "
            f"https://docs.aws.amazon.com/ses/latest/DeveloperGuide/request-production-access.html. To apply for the "
            f"production access, please answer a few questions.{attr(0)}"
        )
        website_url = ""
        while website_url == "":
            website_url = prompt(
                "Please enter your official website url-- the most official thing to show the AWS folks that this is for real.",
                type=click.STRING,
            ).strip()
        if not website_url.startswith("https://"):
            website_url = f"https://{website_url}"
        description = ""
        while description == "":
            description = prompt(
                "Please enter some brief description about why you want this email capability",
                type=click.STRING,
            ).strip()
        email_list: List[str] = []
        valid_emails = False
        while not valid_emails:
            email_list = []
            contact_emails: str = prompt(
                "Please enter a comma-delimited list of contact emails to keep in the loop about this request (need at least one).",
                type=click.STRING,
            )
            potential_emails = contact_emails.split(",")
            valid_emails = True
            for potential_email in potential_emails:
                try:
                    valid = validate_email(potential_email.strip())
                    email_list.append(valid.email)
                except EmailNotValidError as e:
                    logger.warning(str(e))
                    valid_emails = False

        sesv2_client.put_account_details(
            MailType="TRANSACTIONAL",
            WebsiteURL=website_url,
            ContactLanguage="EN",
            UseCaseDescription=description,
            AdditionalContactEmailAddresses=email_list,
            ProductionAccessEnabled=True,
        )
        logger.info(
            "Alright, SES accoount upgrade from sandbox request is sent. Give AWS ~24 hours to resolve this issue "
            "(the emails we asked you to include will be kept in the loop and should already have an email sent). "
            "You can keep using opta in the mean time and opta will print logs stating the status of your request so far"
        )