Example #1
0
File: secret.py Project: run-x/opta
def update(
    secret: str,
    value: str,
    env: Optional[str],
    config: str,
    no_restart: bool,
    local: Optional[bool],
    var: Dict[str, str],
    module: Optional[str],
) -> None:
    """Update a given secret of a k8s service with a new value

    Examples:

    opta secret update -c my-service.yaml "MY_SECRET_1" "value"
    """

    config = check_opta_file_exists(config)
    if local:
        config = local_setup(config, input_variables=var)
        env = "localopta"
    layer = Layer.load_from_yaml(config,
                                 env,
                                 input_variables=var,
                                 strict_input_variables=False)
    secret_name, namespace = get_secret_name_and_namespace(layer, module)

    set_kube_config(layer)
    create_namespace_if_not_exists(namespace)
    amplitude_client.send_event(amplitude_client.UPDATE_SECRET_EVENT)
    update_secrets(namespace, secret_name, {secret: str(value)})
    __restart_deployments(no_restart, namespace)

    logger.info("Success")
Example #2
0
File: secret.py Project: run-x/opta
def delete(
    secret: str,
    env: Optional[str],
    config: str,
    no_restart: bool,
    local: Optional[bool],
    var: Dict[str, str],
    module: Optional[str],
) -> None:
    """Delete a secret key from a k8s service

    Examples:

    opta secret delete -c my-service.yaml "MY_SECRET_1"
    """

    config = check_opta_file_exists(config)
    if local:
        config = local_setup(config, input_variables=var)
        env = "localopta"
    layer = Layer.load_from_yaml(config,
                                 env,
                                 input_variables=var,
                                 strict_input_variables=False)
    secret_name, namespace = get_secret_name_and_namespace(layer, module)

    set_kube_config(layer)
    if check_if_namespace_exists(namespace):
        delete_secret_key(namespace, secret_name, secret)
        __restart_deployments(no_restart, namespace)
    amplitude_client.send_event(amplitude_client.UPDATE_SECRET_EVENT)
    logger.info("Success")
Example #3
0
def configure_kubectl(config: str, env: Optional[str], local: Optional[bool],
                      var: Dict[str, str]) -> None:
    """
    Configure kubectl so you can connect to the cluster

    This command constructs a configuration with prepopulated server and certificate authority data values for the managed cluster.

    If you have the KUBECONFIG environment variable set, then the resulting configuration file is created at that location.
    Otherwise, by default, the resulting configuration file is created at the default kubeconfig path (.kube/config) in your home directory.
    """
    try:
        opta_acquire_lock()
        config = check_opta_file_exists(config)
        if local:
            config = local_setup(config, input_variables=var)
        layer = Layer.load_from_yaml(config,
                                     env,
                                     input_variables=var,
                                     strict_input_variables=False)
        amplitude_client.send_event(
            amplitude_client.CONFIGURE_KUBECTL_EVENT,
            event_properties={
                "org_name": layer.org_name,
                "layer_name": layer.name
            },
        )
        layer.verify_cloud_credentials()
        purge_opta_kube_config(layer)
        configure(layer)
        load_opta_kube_config_to_default(layer)
    finally:
        opta_release_lock()
Example #4
0
def test_check_opta_file_exists_file_does_not_exists_user_input(
    mocker: MockFixture,
) -> None:
    mock_config_path = "mock_config_path"
    mock_user_config_path = "mock_user_config_path"
    mock_os_path_exists = mocker.patch(
        "opta.utils.os.path.exists", side_effect=[False, True]
    )
    mock_click_prompt = mocker.patch(
        "opta.utils.click.prompt", return_value=mock_user_config_path
    )
    mock_system_exit = mocker.patch("opta.utils.sys.exit")

    config_path = check_opta_file_exists(mock_config_path)

    assert config_path == mock_user_config_path
    mock_os_path_exists.assert_has_calls(
        [mocker.call(mock_config_path), mocker.call(mock_user_config_path)]
    )
    mock_click_prompt.assert_called_once_with(
        "Enter a Configuration Path (Empty String will exit)",
        default="",
        type=click.STRING,
        show_default=False,
    )
    mock_system_exit.assert_not_called()
Example #5
0
File: output.py Project: run-x/opta
def output(
    config: str,
    env: Optional[str],
    local: Optional[bool],
    var: Dict[str, str],
) -> None:
    """Print TF outputs"""
    disable_version_upgrade()
    config = check_opta_file_exists(config)
    if local:
        config = local_setup(config, var, None)
    layer = Layer.load_from_yaml(config,
                                 env,
                                 input_variables=var,
                                 strict_input_variables=False)
    amplitude_client.send_event(
        amplitude_client.VIEW_OUTPUT_EVENT,
        event_properties={
            "org_name": layer.org_name,
            "layer_name": layer.name
        },
    )
    layer.verify_cloud_credentials()
    gen_all(layer)
    outputs = get_terraform_outputs(layer)
    # Adding extra outputs
    if layer.cloud == "aws":
        outputs = _load_extra_aws_outputs(outputs)
    elif layer.cloud == "google":
        outputs = _load_extra_gcp_outputs(outputs)
    outputs_formatted = json.dumps(outputs, indent=4)
    print(outputs_formatted)
Example #6
0
def validate(config: str, json_schema: bool, env: Optional[str],
             var: Dict[str, str]) -> None:
    config = check_opta_file_exists(config)

    Layer.load_from_yaml(config,
                         env,
                         json_schema,
                         input_variables=var,
                         strict_input_variables=False)
Example #7
0
def shell(env: Optional[str], config: str, type: str, local: Optional[bool],
          var: Dict[str, str]) -> None:
    """
    Get a shell into one of the pods in a service

    Examples:

    opta shell -c my-service.yaml

    """

    config = check_opta_file_exists(config)
    if local:
        config = local_setup(config, input_variables=var)
    # Configure kubectl
    layer = Layer.load_from_yaml(config,
                                 env,
                                 input_variables=var,
                                 strict_input_variables=False)
    amplitude_client.send_event(
        amplitude_client.SHELL_EVENT,
        event_properties={
            "org_name": layer.org_name,
            "layer_name": layer.name
        },
    )
    layer.verify_cloud_credentials()
    gen_all(layer)
    set_kube_config(layer)
    load_opta_kube_config()
    context_name = layer.get_cloud_client().get_kube_context_name()

    # Get a random pod in the service
    v1 = CoreV1Api()
    pod_list = v1.list_namespaced_pod(layer.name).items
    if len(pod_list) == 0:
        raise UserErrors("This service is not yet deployed")

    nice_run([
        "kubectl",
        "exec",
        "-n",
        layer.name,
        "-c",
        "k8s-service",
        "--kubeconfig",
        constants.GENERATED_KUBE_CONFIG or constants.DEFAULT_KUBECONFIG,
        "--context",
        context_name,
        pod_list[0].metadata.name,
        "-it",
        "--",
        type,
        "-il",
    ])
Example #8
0
def test_check_opta_file_exists_file_exists(mocker: MockFixture) -> None:
    mock_config_path = "mock_config_path"
    mock_os_path_exists = mocker.patch("opta.utils.os.path.exists", return_value=True)
    mock_click_prompt = mocker.patch("opta.utils.click.prompt")
    mock_system_exit = mocker.patch("opta.utils.sys.exit")

    config_path = check_opta_file_exists(mock_config_path)

    assert config_path == mock_config_path
    mock_os_path_exists.assert_called_once_with(mock_config_path)
    mock_click_prompt.assert_not_called()
    mock_system_exit.assert_not_called()
Example #9
0
File: logs.py Project: run-x/opta
def logs(
    env: Optional[str],
    config: str,
    seconds: Optional[int],
    local: Optional[bool],
    var: Dict[str, str],
) -> None:
    """
    Get stream of logs for a service

    Examples:

    opta logs -c my-service.yaml

    """

    config = check_opta_file_exists(config)
    if local:
        config = local_setup(config, input_variables=var)
    # Configure kubectl
    layer = Layer.load_from_yaml(config,
                                 env,
                                 input_variables=var,
                                 strict_input_variables=False)
    amplitude_client.send_event(
        amplitude_client.SHELL_EVENT,
        event_properties={
            "org_name": layer.org_name,
            "layer_name": layer.name
        },
    )
    layer.verify_cloud_credentials()
    gen_all(layer)
    set_kube_config(layer)
    load_opta_kube_config()
    if layer.cloud == "aws":
        modules = layer.get_module_by_type("k8s-service")
    elif layer.cloud == "google":
        modules = layer.get_module_by_type("gcp-k8s-service")
    elif layer.cloud == "local":
        modules = layer.get_module_by_type("local-k8s-service")
    elif layer.cloud == "helm":
        modules = layer.get_module_by_type("local-k8s-service")
    else:
        raise Exception(f"Currently not handling logs for cloud {layer.cloud}")
    if len(modules) == 0:
        raise UserErrors("No module of type k8s-service in the yaml file")
    elif len(modules) > 1:
        raise UserErrors(
            "Don't put more than one k8s-service module file per opta file")
    module_name = modules[0].name
    tail_module_log(layer, module_name, seconds)
Example #10
0
def push(
    image: str,
    config: str,
    env: Optional[str],
    tag: Optional[str],
    var: Dict[str, str],
) -> None:
    config = check_opta_file_exists(config)
    if not is_service_config(config):
        raise UserErrors(
            fmt_msg("""
            Opta push can only run on service yaml files. This is an environment yaml file.
            ~See https://docs.runx.dev/docs/reference/modules/ for more details.
            ~
            ~(We know that this is an environment yaml file, because service yaml must
            ~specify the "environments" field).
            """))

    push_image(image, config, env, tag, var)
Example #11
0
File: secret.py Project: run-x/opta
def view(
    secret: str,
    env: Optional[str],
    config: str,
    local: Optional[bool],
    var: Dict[str, str],
    module: Optional[str],
) -> None:
    """View a given secret of a k8s service

    Examples:

    opta secret view -c my-service.yaml "MY_SECRET_1"
    """

    config = check_opta_file_exists(config)
    if local:
        config = local_setup(config, input_variables=var)
        env = "localopta"
    layer = Layer.load_from_yaml(config,
                                 env,
                                 input_variables=var,
                                 strict_input_variables=False)
    amplitude_client.send_event(
        amplitude_client.VIEW_SECRET_EVENT,
        event_properties={
            "org_name": layer.org_name,
            "layer_name": layer.name
        },
    )
    layer.verify_cloud_credentials()
    secret_name, namespace = get_secret_name_and_namespace(layer, module)

    set_kube_config(layer)
    create_namespace_if_not_exists(namespace)
    secrets = get_secrets(namespace, secret_name)
    if secret not in secrets:
        raise UserErrors(
            f"We couldn't find a secret named {secret}. You either need to add it to your opta.yaml file or if it's"
            f" already there - update it via secret update.")

    print(secrets[secret])
Example #12
0
def apply(
    config: str,
    env: Optional[str],
    refresh: bool,
    local: bool,
    image_tag: Optional[str],
    test: bool,
    auto_approve: bool,
    detailed_plan: bool,
    var: Dict[str, str],
) -> None:
    """Create or update infrastructure

    Apply changes to match the Opta configuration
    files in the current directory.

    Examples:

    opta apply --auto-approve

    opta apply --auto-approve --var variable1=value1

    opta apply -c my-config.yaml --image-tag=v1
    """
    try:
        opta_acquire_lock()
        config = check_opta_file_exists(config)
        _apply(
            config,
            env,
            refresh,
            local,
            image_tag,
            test,
            auto_approve,
            detailed_plan=detailed_plan,
            input_variables=var,
        )
    finally:
        opta_release_lock()
Example #13
0
File: secret.py Project: run-x/opta
def bulk_update(
    env_file: str,
    env: Optional[str],
    config: str,
    no_restart: bool,
    local: Optional[bool],
    var: Dict[str, str],
    module: Optional[str],
) -> None:
    """Bulk update a list of secrets for a k8s service using a dotenv file as in input.

    Each line of the file should be in VAR=VAL format.

    Examples:

    opta secret bulk-update -c my-service.yaml secrets.env
    """

    config = check_opta_file_exists(config)
    if local:
        config = local_setup(config, input_variables=var)
        env = "localopta"
    layer = Layer.load_from_yaml(config,
                                 env,
                                 input_variables=var,
                                 strict_input_variables=False)
    secret_name, namespace = get_secret_name_and_namespace(layer, module)

    set_kube_config(layer)
    create_namespace_if_not_exists(namespace)
    amplitude_client.send_event(amplitude_client.UPDATE_BULK_SECRET_EVENT)

    bulk_update_manual_secrets(namespace, secret_name, env_file)
    __restart_deployments(no_restart, namespace)

    logger.info("Success")
Example #14
0
def inspect(
    config: str,
    env: Optional[str],
    local: Optional[bool],
    var: Dict[str, str],
) -> None:
    """Displays important resources and AWS/Datadog links to them"""

    pre_check()

    config = check_opta_file_exists(config)
    if local:
        config = local_setup(config)
    layer = Layer.load_from_yaml(config, env, input_variables=var)
    amplitude_client.send_event(
        amplitude_client.INSPECT_EVENT,
        event_properties={
            "org_name": layer.org_name,
            "layer_name": layer.name
        },
    )
    layer.verify_cloud_credentials()
    gen_all(layer)
    InspectCommand(layer).run()
Example #15
0
File: secret.py Project: run-x/opta
def list_command(
    env: Optional[str],
    config: str,
    local: Optional[bool],
    var: Dict[str, str],
    module: Optional[str],
) -> None:
    """List the secrets (names and values) for the given k8s service module

      It expects a file in the dotenv file format.
      Each line is in VAR=VAL format.


      The output is in the dotenv file format. Each line is in
    VAR=VAL format.

      Examples:

      opta secret list -c my-service.yaml
    """
    config = check_opta_file_exists(config)
    if local:
        config = local_setup(config, input_variables=var)
        env = "localopta"
    layer = Layer.load_from_yaml(config,
                                 env,
                                 input_variables=var,
                                 strict_input_variables=False)
    amplitude_client.send_event(amplitude_client.LIST_SECRETS_EVENT)
    secret_name, namespace = get_secret_name_and_namespace(layer, module)

    set_kube_config(layer)
    create_namespace_if_not_exists(namespace)
    secrets = get_secrets(namespace, secret_name)
    for key, value in secrets.items():
        print(f"{key}={value}")
Example #16
0
File: layer.py Project: run-x/opta
    def load_from_yaml(
        cls,
        config: str,
        env: Optional[str],
        is_parent: bool = False,
        local: bool = False,
        json_schema: bool = False,
        stateless_mode: bool = False,
        input_variables: Optional[Dict[str, str]] = None,
        strict_input_variables: bool = True,
    ) -> Layer:
        t = None
        if config.startswith("git@"):
            logger.debug("Loading layer from git...")
            git_url, file_path = config.split("//")
            branch = "main"
            if "?" in file_path:
                file_path, file_vars = file_path.split("?")
                res = dict(
                    map(
                        lambda x: (x.split("=")[0], x.split("=")[1]),
                        file_vars.split(","),
                    )
                )
                branch = res.get("ref", branch)
            t = tempfile.mkdtemp()
            # Clone into temporary dir
            try:
                import git
            except ImportError:
                raise UserErrors(
                    "Please install git locally to be able to load environments from git"
                )

            git.Repo.clone_from(git_url, t, branch=branch, depth=1)
            config_path = os.path.join(t, file_path)
        else:
            config_path = config

        # this will make sure that the file exist and support alternate y(a)ml extension
        config_path = check_opta_file_exists(config_path, prompt=False)

        with open(config_path) as f:
            config_string = f.read()
        logger.debug(f"Loaded the following configfile:\n{config_string}")
        conf = yaml.load(config_string)

        conf["original_spec"] = config_string
        conf["path"] = config_path

        layer = cls.load_from_dict(
            conf, env, is_parent, stateless_mode, input_variables, strict_input_variables
        )
        if local:
            pass
        validate_yaml(config_path, layer.cloud, json_schema)
        if t is not None:
            shutil.rmtree(t)

        cls.validate_layer(layer)
        if not is_parent:
            CURRENT_CRASH_REPORTER.set_layer(layer)
        return layer
Example #17
0
def deploy(
    image: str,
    config: str,
    env: Optional[str],
    tag: Optional[str],
    auto_approve: bool,
    detailed_plan: bool,
    local: Optional[bool],
    var: Dict[str, str],
) -> None:
    """Deploys an image to Kubernetes

    - Pushes the local image to private container registry (ECR, GCR, ACR), if configuration contains `image: AUTO`,
      else uses the image provided from a Repo.

    - Update the kubernetes deployment to use the new image.

    - Create new pods to use the new image - automatically done by kubernetes.

    Examples:

    opta deploy -c image-auto-configuration.yaml -i image:local --auto-approve

    opta deploy -c repo-provided-configuration.yaml -e prod

    opta deploy -c my-service.yaml -i my-image:latest --local

    Documentation: https://docs.opta.dev/features/custom_image/

    """

    try:
        opta_acquire_lock()
        pre_check()

        config = check_opta_file_exists(config)
        if local:
            config = local_setup(config,
                                 image_tag=tag,
                                 refresh_local_env=True,
                                 input_variables=var)
        if not is_service_config(config):
            raise UserErrors(
                fmt_msg("""
                Opta deploy can only run on service yaml files. This is an environment yaml file.
                ~See https://docs.opta.dev/getting-started/ for more details.
                ~
                ~(We think that this is an environment yaml file, because service yaml must
                ~specify the "environments" field).
                """))

        layer = Layer.load_from_yaml(config, env, input_variables=var)
        amplitude_client.send_event(
            amplitude_client.DEPLOY_EVENT,
            event_properties={
                "org_name": layer.org_name,
                "layer_name": layer.name
            },
        )
        is_auto = __check_layer_and_image(layer, image)
        layer.verify_cloud_credentials()
        layer.validate_required_path_dependencies()
        if Terraform.download_state(layer):
            tf_lock_exists, _ = Terraform.tf_lock_details(layer)
            if tf_lock_exists:
                raise UserErrors(USER_ERROR_TF_LOCK)

        try:
            outputs = Terraform.get_outputs(layer)
        except MissingState:
            outputs = {}

        image_digest, image_tag = (None, None)
        if is_auto:
            if "docker_repo_url" not in outputs or outputs[
                    "docker_repo_url"] == "":
                logger.info(
                    "Did not find docker repository in state, so applying once to create it before deployment"
                )
                _apply(
                    config=config,
                    env=env,
                    refresh=False,
                    image_tag=None,
                    test=False,
                    local=local,
                    auto_approve=auto_approve,
                    stdout_logs=False,
                    detailed_plan=detailed_plan,
                    input_variables=var,
                )
            if image is not None:
                image_digest, image_tag = push_image(
                    image=image,
                    config=config,
                    env=env,
                    tag=tag,
                    input_variables=var,
                )
        _apply(
            config=config,
            env=env,
            refresh=False,
            image_tag=None,
            test=False,
            local=local,
            auto_approve=auto_approve,
            image_digest=image_digest,
            detailed_plan=detailed_plan,
            input_variables=var,
        )
    finally:
        opta_release_lock()
Example #18
0
def force_unlock(
    config: str, env: Optional[str], local: Optional[bool], var: Dict[str, str],
) -> None:
    """Release a stuck lock on the current workspace

    Manually unlock the state for the defined configuration.

    This will not modify your infrastructure. This command removes the lock on the
    state for the current workspace.

    Examples:

    opta force-unlock -c my-config.yaml -e prod
    """
    try:
        opta_acquire_lock()
        tf_flags: List[str] = []
        config = check_opta_file_exists(config)
        if local:
            config = local_setup(config, input_variables=var)
        amplitude_client.send_event(amplitude_client.FORCE_UNLOCK_EVENT)
        layer = Layer.load_from_yaml(
            config, env, input_variables=var, strict_input_variables=False
        )
        layer.verify_cloud_credentials()
        modules = Terraform.get_existing_modules(layer)
        layer.modules = [x for x in layer.modules if x.name in modules]
        gen_all(layer)

        tf_lock_exists, _ = Terraform.tf_lock_details(layer)
        if tf_lock_exists:
            Terraform.init(layer=layer)
            click.confirm(
                "This will remove the lock on the remote state."
                "\nPlease make sure that no other instance of opta command is running on this file."
                "\nDo you still want to proceed?",
                abort=True,
            )
            tf_flags.append("-force")
            Terraform.force_unlock(layer, *tf_flags)

        if Terraform.download_state(layer):
            if layer.parent is not None or "k8scluster" in modules:
                set_kube_config(layer)
                kube_context = layer.get_cloud_client().get_kube_context_name()
                pending_upgrade_release_list = Helm.get_helm_list(
                    kube_context=kube_context, status="pending-upgrade"
                )
                click.confirm(
                    "Do you also wish to Rollback the Helm releases in Pending-Upgrade State?"
                    "\nPlease make sure that no other instance of opta command is running on this file."
                    "\nDo you still want to proceed?",
                    abort=True,
                )

                for release in pending_upgrade_release_list:
                    Helm.rollback_helm(
                        kube_context,
                        release["name"],
                        namespace=release["namespace"],
                        revision=release["revision"],
                    )
    finally:
        opta_release_lock()
Example #19
0
def destroy(
    config: str,
    env: Optional[str],
    auto_approve: bool,
    detailed_plan: bool,
    local: Optional[bool],
    var: Dict[str, str],
) -> None:
    """Destroy all opta resources from the current config

    To destroy an environment, you have to first destroy all the services first.

    Examples:

    opta destroy -c my-service.yaml --auto-approve

    opta destroy -c my-env.yaml --auto-approve
    """
    try:
        opta_acquire_lock()
        pre_check()
        logger.warning(
            "You are destroying your cloud infra state. DO NOT, I REPEAT, DO NOT do this as "
            "an attempt to debug a weird/errored apply. What you have created is not some ephemeral object that can be "
            "tossed arbitrarily (perhaps some day) and destroying unnecessarily just to reapply typically makes it "
            "worse. If you're doing this cause you are really trying to destroy the environment entirely, then that's"
            "perfectly fine-- if not then please reach out to the opta team in the slack workspace "
            "(https://slack.opta.dev) and I promise that they'll be happy to help debug."
        )

        config = check_opta_file_exists(config)
        if local:
            config, _ = _handle_local_flag(config, False)
            _clean_tf_folder()
        layer = Layer.load_from_yaml(config, env, input_variables=var)
        event_properties: Dict = layer.get_event_properties()
        amplitude_client.send_event(
            amplitude_client.DESTROY_EVENT, event_properties=event_properties,
        )
        layer.verify_cloud_credentials()
        layer.validate_required_path_dependencies()
        if not Terraform.download_state(layer):
            logger.info(
                "The opta state could not be found. This may happen if destroy ran successfully before."
            )
            return

        tf_lock_exists, _ = Terraform.tf_lock_details(layer)
        if tf_lock_exists:
            raise UserErrors(USER_ERROR_TF_LOCK)

        # Any child layers should be destroyed first before the current layer.
        children_layers = _fetch_children_layers(layer)
        if children_layers:
            # TODO: ideally we can just automatically destroy them but it's
            # complicated...
            logger.error(
                "Found the following services that depend on this environment. Please run `opta destroy` on them first!\n"
                + "\n".join(children_layers)
            )
            raise UserErrors("Dependant services found!")

        tf_flags: List[str] = []
        if auto_approve:
            sleep_time = 5
            logger.info(
                f"{attr('bold')}Opta will now destroy the {attr('underlined')}{layer.name}{attr(0)}"
                f"{attr('bold')} layer.{attr(0)}\n"
                f"{attr('bold')}Sleeping for {attr('underlined')}{sleep_time} secs{attr(0)}"
                f"{attr('bold')}, press Ctrl+C to Abort.{attr(0)}"
            )
            time.sleep(sleep_time)
            tf_flags.append("-auto-approve")
        modules = Terraform.get_existing_modules(layer)
        layer.modules = [x for x in layer.modules if x.name in modules]
        gen_all(layer)
        Terraform.init(False, "-reconfigure", layer=layer)
        Terraform.refresh(layer)

        idx = len(layer.modules) - 1
        for module in reversed(layer.modules):
            try:
                module_address_prefix = f"-target=module.{module.name}"
                logger.info("Planning your changes (might take a minute)")
                Terraform.plan(
                    "-lock=false",
                    "-input=false",
                    "-destroy",
                    f"-out={TF_PLAN_PATH}",
                    layer=layer,
                    *list([module_address_prefix]),
                )
                PlanDisplayer.display(detailed_plan=detailed_plan)
                tf_flags = []
                if not auto_approve:
                    click.confirm(
                        "The above are the planned changes for your opta run. Do you approve?",
                        abort=True,
                    )
                else:
                    tf_flags.append("-auto-approve")
                Terraform.apply(layer, *tf_flags, TF_PLAN_PATH, no_init=True, quiet=False)
                layer.post_delete(idx)
                idx -= 1
            except Exception as e:
                raise e

        Terraform.delete_state_storage(layer)
    finally:
        opta_release_lock()
Example #20
0
def generate_terraform(
    ctx: click.Context,
    config: str,
    env: Optional[str],
    directory: Optional[str],
    readme_format: str,
    delete: bool,
    auto_approve: bool,
    backend: str,
    var: Dict[str, str],
) -> None:
    """(beta) Generate Terraform language files

    Examples:

    opta generate-terraform -c my-config.yaml

    opta generate-terraform -c my-config.yaml --directory ./terraform

    opta generate-terraform -c my-config.yaml --auto-approve --backend remote --readme-format md
    """

    print("This command is in beta mode")
    print(
        "If you have any error or suggestion, please let us know in our slack channel  https://slack.opta.dev\n"
    )

    config = check_opta_file_exists(config)

    pre_check()
    _clean_tf_folder()

    layer = Layer.load_from_yaml(config,
                                 env,
                                 stateless_mode=True,
                                 input_variables=var)
    layer.validate_required_path_dependencies()

    if directory is None:
        # generate the target directory
        directory = f"gen-tf-{layer.name}"
        if env is not None:
            directory = f"{directory}-{env}"

    if directory.strip() == "":
        # the users sets it to empty
        raise click.UsageError("--directory can't be empty")

    event_properties: Dict = layer.get_event_properties()
    event_properties["modules"] = ",".join(
        [m.get_type() for m in layer.get_modules()])
    amplitude_client.send_event(
        amplitude_client.START_GEN_TERRAFORM_EVENT,
        event_properties=event_properties,
    )

    try:

        # work in a temp directory until command is over, to not leave a partially generated folder
        tmp_dir_obj = tempfile.TemporaryDirectory(prefix="opta-gen-tf")
        tmp_dir = tmp_dir_obj.name

        # quick exit if directory already exists and not empty
        output_dir = os.path.join(os.getcwd(), directory)
        if _dir_has_files(output_dir):
            if not delete:
                raise UserErrors(
                    f"Error: Output directory already exists: '{output_dir}'. If you want to delete it, use the '--delete' option"
                )
            print(
                f"Output directory {output_dir} already exists and --delete flag is on, deleting it"
            )
            if not auto_approve:
                state_file_warning = (
                    ", including terraform state files" if os.path.exists(
                        os.path.join(output_dir, "tfstate")) else "")
                click.confirm(
                    f"The output directory will be deleted{state_file_warning}: {output_dir}.\n Do you approve?",
                    abort=True,
                )
            _clean_folder(output_dir)

        # to keep consistent with what opta does - we could make this an option if opta tags are not desirable
        gen_opta_resource_tags(layer)

        # copy helm service dir
        if "k8s-service" in [m.type for m in layer.modules]:
            # find module root directory
            service_helm_dir = os.path.join(layer.modules[0].module_dir_path,
                                            "..", "..",
                                            "opta-k8s-service-helm")
            target_dir = os.path.join(tmp_dir, "modules",
                                      "opta-k8s-service-helm")
            logger.debug(
                f"Copying helm charts from {service_helm_dir} to {target_dir}")
            shutil.copytree(service_helm_dir, target_dir, dirs_exist_ok=True)

        # copy module directories and update the module path to point to local directory
        # note this will only copy the 'tf_module' subdirectory ex: modules/aws_base/tf_module
        for module in layer.modules:
            src_path = module.module_dir_path
            if not os.path.exists(src_path):
                logger.warning(
                    f"Could not find source directory for module '{module.name}', ignoring it"
                )
                # dynamically mark it as not exportable
                module.desc["is_exportable"] = False
                continue
            rel_path = "./" + src_path[src_path.index("modules/"):]
            abs_path = os.path.join(tmp_dir, rel_path)
            logger.debug(
                f"Copying module from {module.get_type()} to {abs_path}")
            shutil.copytree(src_path, abs_path, dirs_exist_ok=True)
            # configure module path to use new relative path
            module.module_dir_path = rel_path
            # if there is some export documentation load it now - it will be added to the readme
            export_md = os.path.join(src_path, "..", "export.md")
            if os.path.exists(export_md):
                with open(export_md, "r") as f:
                    module.desc["export"] = f.read()

        # update terraform backend to be local (currently defined in the registry)
        # this is needed as the generated terraform should work outside of opta
        original_backend = REGISTRY[layer.cloud]["backend"]
        if backend.lower() == "local":
            backend_dir = f"./tfstate/{layer.root().name}.tfstate"
            logger.debug(f"Setting terraform backend to local: {backend_dir}")
            REGISTRY[layer.cloud]["backend"] = {"local": {"path": backend_dir}}
        # generate the main.tf.json
        try:
            execution_plan = list(gen(layer))
        finally:
            REGISTRY[layer.cloud]["backend"] = original_backend

        # break down json file in multiple files
        with open(TF_FILE_PATH) as f:
            main_tf_json = json.load(f)

        for key in ["provider", "data", "output", "terraform"]:
            # extract the relevant json
            main_tf_json, extracted_json = dicts.extract(main_tf_json, key)

            # save it as it's own file
            _write_json(extracted_json, os.path.join(tmp_dir,
                                                     f"{key}.tf.json"))

        # extract modules tf.json in their own files
        main_tf_json, modules_json = dicts.extract(main_tf_json, "module")
        for name, value in modules_json["module"].items():
            _write_json({"module": {
                name: value
            }}, os.path.join(tmp_dir, f"module-{name}.tf.json"))

        # update the main file without the extracted sections
        if main_tf_json:
            # only write file there is anything remaining
            _write_json(
                main_tf_json,
                os.path.join(tmp_dir, f"{tmp_dir}/{layer.name}.tf.json"))

        # generate the readme
        opta_cmd = f"opta {ctx.info_name} {str_options(ctx)}"
        readme_file = _generate_readme(layer, execution_plan, tmp_dir,
                                       readme_format, opta_cmd, backend)

        # we have a service file but the env was not exported
        if layer.name != layer.root().name and not os.path.exists(
                os.path.join(output_dir, "module-base.tf.json")):
            print(
                f"Warning: the output directory doesn't include terraform files for the environment named '{layer.root().name}', "
                "some dependencies might be missing for terraform to work.")

        # if everything was successfull, copy tmp dir to target dir
        logger.debug(f"Copy {tmp_dir} to {output_dir}")
        shutil.copytree(tmp_dir, output_dir, dirs_exist_ok=True)
        unsupported_modules = [
            m for m in layer.get_modules() if not m.is_exportable()
        ]

        if unsupported_modules:
            unsupported_modules_str = ",".join(
                [m.get_type() for m in unsupported_modules])
            event_properties["unsupported_modules"] = unsupported_modules_str
            print(
                f"Terraform files partially generated, a few modules are not supported: {unsupported_modules_str}"
            )
        else:
            print("Terraform files generated successfully.")
        if readme_file:
            copied_readme = os.path.join(output_dir,
                                         os.path.basename(readme_file))
            print(f"Check {copied_readme} for documentation.")

    except Exception as e:
        event_properties["success"] = False
        event_properties["error_name"] = e.__class__.__name__
        raise e
    else:
        event_properties["success"] = True
    finally:
        amplitude_client.send_event(
            amplitude_client.FINISH_GEN_TERRAFORM_EVENT,
            event_properties=event_properties,
        )

        tmp_dir_obj.cleanup()