Esempio n. 1
0
File: secret.py Progetto: run-x/opta
def update(
    secret: str,
    value: str,
    env: Optional[str],
    config: str,
    no_restart: bool,
    local: Optional[bool],
    var: Dict[str, str],
    module: Optional[str],
) -> None:
    """Update a given secret of a k8s service with a new value

    Examples:

    opta secret update -c my-service.yaml "MY_SECRET_1" "value"
    """

    config = check_opta_file_exists(config)
    if local:
        config = local_setup(config, input_variables=var)
        env = "localopta"
    layer = Layer.load_from_yaml(config,
                                 env,
                                 input_variables=var,
                                 strict_input_variables=False)
    secret_name, namespace = get_secret_name_and_namespace(layer, module)

    set_kube_config(layer)
    create_namespace_if_not_exists(namespace)
    amplitude_client.send_event(amplitude_client.UPDATE_SECRET_EVENT)
    update_secrets(namespace, secret_name, {secret: str(value)})
    __restart_deployments(no_restart, namespace)

    logger.info("Success")
Esempio n. 2
0
    def process(self, module_idx: int) -> None:
        if self.layer.is_stateless_mode() is True:
            # no k8s available, skip injecting secret
            super(DatadogProcessor, self).process(module_idx)
            return

        set_kube_config(self.layer)
        load_opta_kube_config()
        v1 = CoreV1Api()
        # Update the secrets
        namespaces = v1.list_namespace(field_selector=f"metadata.name={self.layer.name}")
        if len(namespaces.items) == 0:
            v1.create_namespace(
                body=V1Namespace(metadata=V1ObjectMeta(name=self.layer.name))
            )
        try:
            secret = v1.read_namespaced_secret("secret", self.layer.name)
            if (
                "DATADOG_API_KEY" not in secret.data
                or secret.data["DATADOG_API_KEY"] == ""
            ):
                api_key = self.create_secret(v1)
            else:
                api_key = base64.b64decode(secret.data["DATADOG_API_KEY"]).decode("utf-8")
        except ApiException:
            v1.create_namespaced_secret(
                namespace=self.layer.name,
                body=V1Secret(
                    metadata=V1ObjectMeta(name="secret"),
                    string_data={"DATADOG_API_KEY": ""},
                ),
            )
            api_key = self.create_secret(v1)
        self.module.data["api_key"] = api_key
        super(DatadogProcessor, self).process(module_idx)
Esempio n. 3
0
File: secret.py Progetto: run-x/opta
def delete(
    secret: str,
    env: Optional[str],
    config: str,
    no_restart: bool,
    local: Optional[bool],
    var: Dict[str, str],
    module: Optional[str],
) -> None:
    """Delete a secret key from a k8s service

    Examples:

    opta secret delete -c my-service.yaml "MY_SECRET_1"
    """

    config = check_opta_file_exists(config)
    if local:
        config = local_setup(config, input_variables=var)
        env = "localopta"
    layer = Layer.load_from_yaml(config,
                                 env,
                                 input_variables=var,
                                 strict_input_variables=False)
    secret_name, namespace = get_secret_name_and_namespace(layer, module)

    set_kube_config(layer)
    if check_if_namespace_exists(namespace):
        delete_secret_key(namespace, secret_name, secret)
        __restart_deployments(no_restart, namespace)
    amplitude_client.send_event(amplitude_client.UPDATE_SECRET_EVENT)
    logger.info("Success")
Esempio n. 4
0
 def delete_opta_config(self) -> None:
     set_kube_config(self.layer)
     load_opta_kube_config()
     v1 = CoreV1Api()
     secret_name = f"opta-config-{self.layer.state_storage()}"
     if check_if_secret_exists("default", secret_name):
         v1.delete_namespaced_secret(secret_name, "default")
Esempio n. 5
0
File: events.py Progetto: run-x/opta
def events(
    env: Optional[str],
    config: str,
    seconds: Optional[int],
    local: Optional[bool],
    var: Dict[str, str],
) -> None:
    """
    List the events for a service

    Examples:

    opta events -c my-service.yaml

    """
    if local:
        config = local_setup(config, input_variables=var)
    # Configure kubectl
    layer = Layer.load_from_yaml(config, env, strict_input_variables=False)
    amplitude_client.send_event(
        amplitude_client.SHELL_EVENT,
        event_properties={
            "org_name": layer.org_name,
            "layer_name": layer.name
        },
    )
    start_time = None
    if seconds:
        start_time = pytz.utc.localize(
            datetime.datetime.min) - datetime.timedelta(seconds=seconds)
    layer.verify_cloud_credentials()
    gen_all(layer)
    set_kube_config(layer)
    load_opta_kube_config()
    tail_namespace_events(layer, start_time)
Esempio n. 6
0
def gen(
    layer: "Layer",
    existing_config: Optional["StructuredConfig"] = None,
    image_tag: Optional[str] = None,
    image_digest: Optional[str] = None,
    test: bool = False,
    check_image: bool = False,
    auto_approve: bool = False,
) -> Generator[Tuple[int, List["Module"], int], None, None]:
    """Generate TF file based on opta config file"""
    logger.debug("Loading infra blocks")

    total_module_count = len(layer.modules)
    current_modules = []
    for module_idx, module in enumerate(layer.modules):
        logger.debug(f"Generating {module_idx} - {module.name}")
        current_modules.append(module)
        if not module.halt and module_idx + 1 != total_module_count:
            continue
        service_modules = layer.get_module_by_type("k8s-service", module_idx)
        if check_image and len(service_modules) > 0 and cluster_exist(
                layer.root()):
            set_kube_config(layer)

            for service_module in service_modules:
                current_image_info = current_image_digest_tag(layer)
                if (image_digest is None
                        and (current_image_info["tag"] is not None
                             or current_image_info["digest"] is not None)
                        and image_tag is None and service_module.data.get(
                            "image", "").upper() == "AUTO" and not test):
                    if not auto_approve:
                        if click.confirm(
                                f"WARNING There is an existing deployment (tag={current_image_info['tag']}, "
                                f"digest={current_image_info['digest']}) and the pods will be killed as you "
                                f"did not specify an image tag. Would you like to keep the existing deployment alive?",
                        ):
                            image_tag = current_image_info["tag"]
                            image_digest = current_image_info["digest"]
                    else:
                        logger.info(
                            f"{attr('bold')}Using the existing deployment {attr('underlined')}"
                            f"(tag={current_image_info['tag']}, digest={current_image_info['digest']}).{attr(0)}\n"
                            f"{attr('bold')}If you wish to deploy another image, please use "
                            f"{attr('bold')}{attr('underlined')} opta deploy command.{attr(0)}"
                        )
                        image_tag = current_image_info["tag"]
                        image_digest = current_image_info["digest"]
        layer.variables["image_tag"] = image_tag
        layer.variables["image_digest"] = image_digest
        ret = layer.gen_providers(module_idx)
        ret = deep_merge(layer.gen_tf(module_idx, existing_config), ret)

        gen_tf.gen(ret, TF_FILE_PATH)

        yield module_idx, current_modules, total_module_count
Esempio n. 7
0
def shell(env: Optional[str], config: str, type: str, local: Optional[bool],
          var: Dict[str, str]) -> None:
    """
    Get a shell into one of the pods in a service

    Examples:

    opta shell -c my-service.yaml

    """

    config = check_opta_file_exists(config)
    if local:
        config = local_setup(config, input_variables=var)
    # Configure kubectl
    layer = Layer.load_from_yaml(config,
                                 env,
                                 input_variables=var,
                                 strict_input_variables=False)
    amplitude_client.send_event(
        amplitude_client.SHELL_EVENT,
        event_properties={
            "org_name": layer.org_name,
            "layer_name": layer.name
        },
    )
    layer.verify_cloud_credentials()
    gen_all(layer)
    set_kube_config(layer)
    load_opta_kube_config()
    context_name = layer.get_cloud_client().get_kube_context_name()

    # Get a random pod in the service
    v1 = CoreV1Api()
    pod_list = v1.list_namespaced_pod(layer.name).items
    if len(pod_list) == 0:
        raise UserErrors("This service is not yet deployed")

    nice_run([
        "kubectl",
        "exec",
        "-n",
        layer.name,
        "-c",
        "k8s-service",
        "--kubeconfig",
        constants.GENERATED_KUBE_CONFIG or constants.DEFAULT_KUBECONFIG,
        "--context",
        context_name,
        pod_list[0].metadata.name,
        "-it",
        "--",
        type,
        "-il",
    ])
Esempio n. 8
0
 def get_remote_config(self) -> Optional["StructuredConfig"]:
     set_kube_config(self.layer)
     load_opta_kube_config()
     v1 = CoreV1Api()
     secret_name = f"opta-config-{self.layer.state_storage()}"
     secrets: V1SecretList = v1.list_namespaced_secret(
         "default", field_selector=f"metadata.name={secret_name}")
     if len(secrets.items) == 0:
         return None
     secret: V1Secret = secrets.items[0]
     return json.loads(
         base64.b64decode(secret.data["config"]).decode("utf-8"))
Esempio n. 9
0
File: logs.py Progetto: run-x/opta
def logs(
    env: Optional[str],
    config: str,
    seconds: Optional[int],
    local: Optional[bool],
    var: Dict[str, str],
) -> None:
    """
    Get stream of logs for a service

    Examples:

    opta logs -c my-service.yaml

    """

    config = check_opta_file_exists(config)
    if local:
        config = local_setup(config, input_variables=var)
    # Configure kubectl
    layer = Layer.load_from_yaml(config,
                                 env,
                                 input_variables=var,
                                 strict_input_variables=False)
    amplitude_client.send_event(
        amplitude_client.SHELL_EVENT,
        event_properties={
            "org_name": layer.org_name,
            "layer_name": layer.name
        },
    )
    layer.verify_cloud_credentials()
    gen_all(layer)
    set_kube_config(layer)
    load_opta_kube_config()
    if layer.cloud == "aws":
        modules = layer.get_module_by_type("k8s-service")
    elif layer.cloud == "google":
        modules = layer.get_module_by_type("gcp-k8s-service")
    elif layer.cloud == "local":
        modules = layer.get_module_by_type("local-k8s-service")
    elif layer.cloud == "helm":
        modules = layer.get_module_by_type("local-k8s-service")
    else:
        raise Exception(f"Currently not handling logs for cloud {layer.cloud}")
    if len(modules) == 0:
        raise UserErrors("No module of type k8s-service in the yaml file")
    elif len(modules) > 1:
        raise UserErrors(
            "Don't put more than one k8s-service module file per opta file")
    module_name = modules[0].name
    tail_module_log(layer, module_name, seconds)
Esempio n. 10
0
 def add_admin_roles(self) -> None:
     if self.module.data.get("admin_arns") is None:
         return
     set_kube_config(self.layer)
     load_opta_kube_config()
     v1 = CoreV1Api()
     aws_auth_config_map: V1ConfigMap = v1.read_namespaced_config_map(
         "aws-auth", "kube-system")
     opta_arns_config_map: V1ConfigMap = v1.read_namespaced_config_map(
         "opta-arns", "default")
     admin_arns = yaml.load(opta_arns_config_map.data["adminArns"])
     current_data = aws_auth_config_map.data
     old_map_roles = yaml.load(current_data["mapRoles"])
     new_map_roles = [
         old_map_role for old_map_role in old_map_roles
         if not old_map_role["username"].startswith("opta-managed")
     ]
     old_map_users = yaml.load(current_data.get("mapUsers", "[]"))
     new_map_users = [
         old_map_user for old_map_user in old_map_users
         if not old_map_user["username"].startswith("opta-managed")
     ]
     for arn in admin_arns:
         arn_data = AWS.parse_arn(arn)
         if arn_data["resource_type"] == "user":
             new_map_users.append({
                 "groups": ["system:masters"],
                 "userarn": arn,
                 "username": "******",
             })
         elif arn_data["resource_type"] == "role":
             new_map_roles.append({
                 "groups": ["system:masters"],
                 "rolearn": arn,
                 "username": "******",
             })
         else:
             raise UserErrors(f"Invalid arn for IAM role or user: {arn}")
     stream = StringIO()
     yaml.dump(new_map_roles, stream)
     aws_auth_config_map.data["mapRoles"] = stream.getvalue()
     if len(new_map_users) > 0:
         stream = StringIO()
         yaml.dump(new_map_users, stream)
         aws_auth_config_map.data["mapUsers"] = stream.getvalue()
     v1.replace_namespaced_config_map("aws-auth",
                                      "kube-system",
                                      body=aws_auth_config_map)
Esempio n. 11
0
    def upload_opta_config(self) -> None:
        set_kube_config(self.layer)
        load_opta_kube_config()
        v1 = CoreV1Api()
        secret_name = f"opta-config-{self.layer.state_storage()}"
        create_secret_if_not_exists("default", secret_name)
        current_secret_object: V1Secret = v1.read_namespaced_secret(
            secret_name, "default")
        current_secret_object.data = current_secret_object.data or {}
        current_secret_object.data["config"] = base64.b64encode(
            json.dumps(self.layer.structured_config()).encode("utf-8")).decode(
                "utf-8")
        v1.replace_namespaced_secret(secret_name, "default",
                                     current_secret_object)

        return None
Esempio n. 12
0
    def _process_nginx_extra_ports(self, layer: "Layer", data: Dict[Any, Any]) -> None:
        extra_ports: List[int] = data.get("nginx_extra_tcp_ports", [])
        extra_tls_ports: List[int] = data.get("nginx_extra_tcp_ports_tls", [])

        # stateless mode - no k8s available
        if layer.is_stateless_mode() is True:
            data["nginx_extra_tcp_ports"] = {}
            return

        kubernetes.set_kube_config(layer)
        service_port_mapping = reconcile_nginx_extra_ports(update_config_map=False)

        # In a separate function to make logic more testable
        data["nginx_extra_tcp_ports"] = self.__process_nginx_extra_ports(
            extra_ports, extra_tls_ports, service_port_mapping
        )
Esempio n. 13
0
File: secret.py Progetto: run-x/opta
def view(
    secret: str,
    env: Optional[str],
    config: str,
    local: Optional[bool],
    var: Dict[str, str],
    module: Optional[str],
) -> None:
    """View a given secret of a k8s service

    Examples:

    opta secret view -c my-service.yaml "MY_SECRET_1"
    """

    config = check_opta_file_exists(config)
    if local:
        config = local_setup(config, input_variables=var)
        env = "localopta"
    layer = Layer.load_from_yaml(config,
                                 env,
                                 input_variables=var,
                                 strict_input_variables=False)
    amplitude_client.send_event(
        amplitude_client.VIEW_SECRET_EVENT,
        event_properties={
            "org_name": layer.org_name,
            "layer_name": layer.name
        },
    )
    layer.verify_cloud_credentials()
    secret_name, namespace = get_secret_name_and_namespace(layer, module)

    set_kube_config(layer)
    create_namespace_if_not_exists(namespace)
    secrets = get_secrets(namespace, secret_name)
    if secret not in secrets:
        raise UserErrors(
            f"We couldn't find a secret named {secret}. You either need to add it to your opta.yaml file or if it's"
            f" already there - update it via secret update.")

    print(secrets[secret])
Esempio n. 14
0
File: secret.py Progetto: run-x/opta
def bulk_update(
    env_file: str,
    env: Optional[str],
    config: str,
    no_restart: bool,
    local: Optional[bool],
    var: Dict[str, str],
    module: Optional[str],
) -> None:
    """Bulk update a list of secrets for a k8s service using a dotenv file as in input.

    Each line of the file should be in VAR=VAL format.

    Examples:

    opta secret bulk-update -c my-service.yaml secrets.env
    """

    config = check_opta_file_exists(config)
    if local:
        config = local_setup(config, input_variables=var)
        env = "localopta"
    layer = Layer.load_from_yaml(config,
                                 env,
                                 input_variables=var,
                                 strict_input_variables=False)
    secret_name, namespace = get_secret_name_and_namespace(layer, module)

    set_kube_config(layer)
    create_namespace_if_not_exists(namespace)
    amplitude_client.send_event(amplitude_client.UPDATE_BULK_SECRET_EVENT)

    bulk_update_manual_secrets(namespace, secret_name, env_file)
    __restart_deployments(no_restart, namespace)

    logger.info("Success")
Esempio n. 15
0
File: secret.py Progetto: run-x/opta
def list_command(
    env: Optional[str],
    config: str,
    local: Optional[bool],
    var: Dict[str, str],
    module: Optional[str],
) -> None:
    """List the secrets (names and values) for the given k8s service module

      It expects a file in the dotenv file format.
      Each line is in VAR=VAL format.


      The output is in the dotenv file format. Each line is in
    VAR=VAL format.

      Examples:

      opta secret list -c my-service.yaml
    """
    config = check_opta_file_exists(config)
    if local:
        config = local_setup(config, input_variables=var)
        env = "localopta"
    layer = Layer.load_from_yaml(config,
                                 env,
                                 input_variables=var,
                                 strict_input_variables=False)
    amplitude_client.send_event(amplitude_client.LIST_SECRETS_EVENT)
    secret_name, namespace = get_secret_name_and_namespace(layer, module)

    set_kube_config(layer)
    create_namespace_if_not_exists(namespace)
    secrets = get_secrets(namespace, secret_name)
    for key, value in secrets.items():
        print(f"{key}={value}")
Esempio n. 16
0
    def download_state(cls, layer: "Layer") -> bool:
        if layer.is_stateless_mode() is True:
            # no remote state for stateless mode
            return False

        if not cls.verify_storage(layer):
            logger.debug(
                fmt_msg("""
                    We store state in S3/GCP buckets/Azure Storage. Since the state bucket was not found,
                    ~this probably means that you either haven't created your opta resources yet,
                    ~or you previously successfully destroyed your opta resources.
                    """))
            return False

        state_file: str = "./tmp.tfstate"
        providers = layer.gen_providers(0)
        terraform_backends = providers.get("terraform", {}).get("backend", {})
        if "s3" in terraform_backends:
            bucket = providers["terraform"]["backend"]["s3"]["bucket"]
            region = providers["terraform"]["backend"]["s3"]["region"]
            key = providers["terraform"]["backend"]["s3"]["key"]
            logger.debug(
                f"Found an s3 backend in bucket {bucket} and key {key}, "
                "gonna try to download the statefile from there")
            s3 = boto3.client("s3", config=Config(region_name=region))
            try:
                s3.download_file(Bucket=bucket, Key=key, Filename=state_file)
            except ClientError as e:
                if e.response["Error"]["Code"] == "404":
                    # The object does not exist.
                    logger.debug("Did not find terraform state file")
                    return False
                raise
        elif "gcs" in terraform_backends:
            bucket = providers["terraform"]["backend"]["gcs"]["bucket"]
            prefix = providers["terraform"]["backend"]["gcs"]["prefix"]
            credentials, project_id = GCP.get_credentials()
            gcs_client = storage.Client(project=project_id,
                                        credentials=credentials)
            bucket_object = gcs_client.get_bucket(bucket)
            blob = storage.Blob(f"{prefix}/default.tfstate", bucket_object)
            try:
                with open(state_file, "wb") as file_obj:
                    gcs_client.download_blob_to_file(blob, file_obj)
            except GoogleClientError as e:
                if e.code == 404:
                    # The object does not exist.
                    os.remove(state_file)
                    return False
                raise
        elif "azurerm" in terraform_backends:
            storage_account_name = providers["terraform"]["backend"][
                "azurerm"]["storage_account_name"]
            container_name = providers["terraform"]["backend"]["azurerm"][
                "container_name"]
            key = providers["terraform"]["backend"]["azurerm"]["key"]

            credentials = Azure.get_credentials()
            try:
                blob = (BlobServiceClient(
                    f"https://{storage_account_name}.blob.core.windows.net/",
                    credential=credentials,
                ).get_container_client(container_name).get_blob_client(key))
                with open(state_file, "wb") as file_obj:
                    blob_data = blob.download_blob()
                    blob_data.readinto(file_obj)
            except ResourceNotFoundError:
                return False
        elif layer.cloud == "local":
            try:
                tf_file = os.path.join(
                    cls.get_local_opta_dir(),
                    "tfstate",
                    f"{layer.name}",
                )
                if os.path.exists(tf_file):
                    copyfile(tf_file, state_file)

                else:
                    return False
            except Exception:
                UserErrors(f"Could copy local state file to {state_file}")

        elif layer.cloud == "helm":
            set_kube_config(layer)
            load_opta_kube_config()
            v1 = CoreV1Api()
            secret_name = f"tfstate-default-{layer.state_storage()}"
            secrets: V1SecretList = v1.list_namespaced_secret(
                "default", field_selector=f"metadata.name={secret_name}")
            if len(secrets.items) == 0:
                return False
            secret: V1Secret = secrets.items[0]
            decoded_secret = gzip.decompress(
                base64.b64decode(secret.data["tfstate"]))
            with open(state_file, "wb") as file_obj:
                file_obj.write(decoded_secret)
        else:
            raise UserErrors(
                "Need to get state from S3 or GCS or Azure storage")

        with open(state_file, "r") as file:
            raw_state = file.read().strip()
        os.remove(state_file)
        if raw_state != "":
            cls.downloaded_state[layer.name] = json.loads(raw_state)
            return True
        return False
Esempio n. 17
0
def force_unlock(
    config: str, env: Optional[str], local: Optional[bool], var: Dict[str, str],
) -> None:
    """Release a stuck lock on the current workspace

    Manually unlock the state for the defined configuration.

    This will not modify your infrastructure. This command removes the lock on the
    state for the current workspace.

    Examples:

    opta force-unlock -c my-config.yaml -e prod
    """
    try:
        opta_acquire_lock()
        tf_flags: List[str] = []
        config = check_opta_file_exists(config)
        if local:
            config = local_setup(config, input_variables=var)
        amplitude_client.send_event(amplitude_client.FORCE_UNLOCK_EVENT)
        layer = Layer.load_from_yaml(
            config, env, input_variables=var, strict_input_variables=False
        )
        layer.verify_cloud_credentials()
        modules = Terraform.get_existing_modules(layer)
        layer.modules = [x for x in layer.modules if x.name in modules]
        gen_all(layer)

        tf_lock_exists, _ = Terraform.tf_lock_details(layer)
        if tf_lock_exists:
            Terraform.init(layer=layer)
            click.confirm(
                "This will remove the lock on the remote state."
                "\nPlease make sure that no other instance of opta command is running on this file."
                "\nDo you still want to proceed?",
                abort=True,
            )
            tf_flags.append("-force")
            Terraform.force_unlock(layer, *tf_flags)

        if Terraform.download_state(layer):
            if layer.parent is not None or "k8scluster" in modules:
                set_kube_config(layer)
                kube_context = layer.get_cloud_client().get_kube_context_name()
                pending_upgrade_release_list = Helm.get_helm_list(
                    kube_context=kube_context, status="pending-upgrade"
                )
                click.confirm(
                    "Do you also wish to Rollback the Helm releases in Pending-Upgrade State?"
                    "\nPlease make sure that no other instance of opta command is running on this file."
                    "\nDo you still want to proceed?",
                    abort=True,
                )

                for release in pending_upgrade_release_list:
                    Helm.rollback_helm(
                        kube_context,
                        release["name"],
                        namespace=release["namespace"],
                        revision=release["revision"],
                    )
    finally:
        opta_release_lock()
Esempio n. 18
0
 def pre_hook(self, module_idx: int) -> None:
     set_kube_config(self.layer)
     list_namespaces()
     super(AwsK8sBaseProcessor, self).pre_hook(module_idx)