Ejemplo n.º 1
0
Archivo: azure.py Proyecto: run-x/opta
    def get_remote_config(self) -> Optional["StructuredConfig"]:
        providers = self.layer.gen_providers(0)
        credentials = self.get_credentials()

        storage_account_name = providers["terraform"]["backend"]["azurerm"][
            "storage_account_name"]
        container_name = providers["terraform"]["backend"]["azurerm"][
            "container_name"]

        storage_client = ContainerClient(
            account_url=f"https://{storage_account_name}.blob.core.windows.net",
            container_name=container_name,
            credential=credentials,
        )
        config_path = f"opta_config/{self.layer.name}"
        try:
            download_stream: StorageStreamDownloader = storage_client.download_blob(
                config_path)
            data = download_stream.readall()
            return json.loads(data)
        except Exception:  # Backwards compatibility
            logger.debug(
                "Could not successfully download and parse any pre-existing config"
            )
            return None
Ejemplo n.º 2
0
Archivo: azure.py Proyecto: run-x/opta
    def cluster_exist(self) -> bool:
        providers = self.layer.root().gen_providers(0)

        ensure_installed("az")

        rg_name = providers["terraform"]["backend"]["azurerm"][
            "resource_group_name"]
        subscription_id = providers["provider"]["azurerm"]["subscription_id"]
        cluster_name = self.layer.get_cluster_name()
        try:
            output = nice_run(
                [
                    "az",
                    "aks",
                    "list",
                    "--subscription",
                    subscription_id,
                    "--resource-group",
                    rg_name,
                ],
                capture_output=True,
                check=True,
            ).stdout
            output_list = json.loads(output)
            return any([x.get("name") == cluster_name for x in output_list])
        except Exception:
            return False
Ejemplo n.º 3
0
 def _download_remote_blob(s3_client: S3Client, bucket: str,
                           key: str) -> Optional["StructuredConfig"]:
     try:
         obj = s3_client.get_object(Bucket=bucket, Key=key)
         return json.loads(obj["Body"].read())
     except Exception:
         logger.debug(
             "Could not successfully download and parse any pre-existing config"
         )
         return None
Ejemplo n.º 4
0
 def _download_remote_blob(bucket: Bucket,
                           key: str) -> Optional["StructuredConfig"]:
     try:
         blob = storage.Blob(key, bucket)
         return json.loads(blob.download_as_text())
     except Exception:  # Backwards compatibility
         logger.debug(
             "Could not successfully download and parse any pre-existing config"
         )
         return None
Ejemplo n.º 5
0
    def get_helm_list(
            cls,
            kube_context: str,
            namespace: Optional[str] = None,
            release: Optional[str] = None,
            status: Optional[str] = None) -> List:  # type: ignore # nosec
        """
        Returns a list of helm releases.
        The releases can be filtered by namespace, release name and status.
        """
        cls.validate_helm_installed()
        namespaces: List[str] = []
        if namespace is not None:
            namespaces.append("--namespace")
            namespaces.append(str(namespace))
        else:
            namespaces.append("--all-namespaces")

        try:
            helm_list_process = nice_run(
                [
                    "helm",
                    "list",
                    "--all",
                    "--kube-context",
                    kube_context,
                    "--kubeconfig",
                    constants.GENERATED_KUBE_CONFIG
                    or constants.DEFAULT_KUBECONFIG,
                    *namespaces,
                    "-o",
                    "json",
                ],
                capture_output=True,
                check=True,
            )
        except CalledProcessError as e:
            raise UserErrors(f"Error: {e.stderr}")
        except Exception as e:
            raise e

        helm_list = json.loads(helm_list_process.stdout)

        if release is not None:
            helm_list = [
                helm_release for helm_release in helm_list
                if helm_release["name"] == release
            ]
        if status is not None:
            helm_list = [
                helm_release for helm_release in helm_list
                if helm_release["status"] == status
            ]
        return helm_list
Ejemplo n.º 6
0
 def get_version(cls) -> str:
     try:
         out = nice_run(
             ["terraform", "version", "-json"],
             check=True,
             capture_output=True,
             tee=False,
         ).stdout
         terraform_data = json.loads(out)
         return terraform_data["terraform_version"]
     except CalledProcessError:
         raise
Ejemplo n.º 7
0
    def _get_k8s_metadata_values(self, resource_properties: dict) -> dict:
        if "metadata" not in resource_properties:
            return {}

        k8s_values: Any = {}
        for chart in resource_properties["metadata"]:
            chart_values = json.loads(chart.get("values", "{}"))
            k8s_values = deep_merge(k8s_values, chart_values)

        values: Any = {}
        for k, v in k8s_values.items():
            values[f"k8s-{k}"] = v

        return values
Ejemplo n.º 8
0
    def get_terraform_lock_id(self) -> str:
        bucket = self.layer.state_storage()
        providers = self.layer.gen_providers(0)
        dynamodb_table = providers["terraform"]["backend"]["s3"][
            "dynamodb_table"]

        tf_lock_data = self.__get_dynamodb(dynamodb_table).get_item(
            TableName=dynamodb_table,
            Key={"LockID": {
                "S": f"{bucket}/{self.layer.name}"
            }},
        )

        try:
            return json.loads(
                tf_lock_data["Item"]["Info"]["S"])["ID"]  # type: ignore
        except Exception:
            return ""
Ejemplo n.º 9
0
Archivo: azure.py Proyecto: run-x/opta
    def get_terraform_lock_id(self) -> str:
        providers = self.layer.gen_providers(0)
        credentials = self.get_credentials()
        storage_account_name = providers["terraform"]["backend"]["azurerm"][
            "storage_account_name"]
        container_name = providers["terraform"]["backend"]["azurerm"][
            "container_name"]
        key = providers["terraform"]["backend"]["azurerm"]["key"]

        try:
            blob = (BlobServiceClient(
                f"https://{storage_account_name}.blob.core.windows.net/",
                credential=credentials,
            ).get_container_client(container_name).get_blob_client(key))
            b64_encoded_tf_lock = blob.get_blob_properties(
            ).metadata["Terraformlockid"]
            tf_lock_data = json.loads(base64.b64decode(b64_encoded_tf_lock))
            return tf_lock_data["ID"]
        except ResourceNotFoundError:
            return ""
        except Exception:
            return ""
Ejemplo n.º 10
0
    def download_state(cls, layer: "Layer") -> bool:
        if layer.is_stateless_mode() is True:
            # no remote state for stateless mode
            return False

        if not cls.verify_storage(layer):
            logger.debug(
                fmt_msg("""
                    We store state in S3/GCP buckets/Azure Storage. Since the state bucket was not found,
                    ~this probably means that you either haven't created your opta resources yet,
                    ~or you previously successfully destroyed your opta resources.
                    """))
            return False

        state_file: str = "./tmp.tfstate"
        providers = layer.gen_providers(0)
        terraform_backends = providers.get("terraform", {}).get("backend", {})
        if "s3" in terraform_backends:
            bucket = providers["terraform"]["backend"]["s3"]["bucket"]
            region = providers["terraform"]["backend"]["s3"]["region"]
            key = providers["terraform"]["backend"]["s3"]["key"]
            logger.debug(
                f"Found an s3 backend in bucket {bucket} and key {key}, "
                "gonna try to download the statefile from there")
            s3 = boto3.client("s3", config=Config(region_name=region))
            try:
                s3.download_file(Bucket=bucket, Key=key, Filename=state_file)
            except ClientError as e:
                if e.response["Error"]["Code"] == "404":
                    # The object does not exist.
                    logger.debug("Did not find terraform state file")
                    return False
                raise
        elif "gcs" in terraform_backends:
            bucket = providers["terraform"]["backend"]["gcs"]["bucket"]
            prefix = providers["terraform"]["backend"]["gcs"]["prefix"]
            credentials, project_id = GCP.get_credentials()
            gcs_client = storage.Client(project=project_id,
                                        credentials=credentials)
            bucket_object = gcs_client.get_bucket(bucket)
            blob = storage.Blob(f"{prefix}/default.tfstate", bucket_object)
            try:
                with open(state_file, "wb") as file_obj:
                    gcs_client.download_blob_to_file(blob, file_obj)
            except GoogleClientError as e:
                if e.code == 404:
                    # The object does not exist.
                    os.remove(state_file)
                    return False
                raise
        elif "azurerm" in terraform_backends:
            storage_account_name = providers["terraform"]["backend"][
                "azurerm"]["storage_account_name"]
            container_name = providers["terraform"]["backend"]["azurerm"][
                "container_name"]
            key = providers["terraform"]["backend"]["azurerm"]["key"]

            credentials = Azure.get_credentials()
            try:
                blob = (BlobServiceClient(
                    f"https://{storage_account_name}.blob.core.windows.net/",
                    credential=credentials,
                ).get_container_client(container_name).get_blob_client(key))
                with open(state_file, "wb") as file_obj:
                    blob_data = blob.download_blob()
                    blob_data.readinto(file_obj)
            except ResourceNotFoundError:
                return False
        elif layer.cloud == "local":
            try:
                tf_file = os.path.join(
                    cls.get_local_opta_dir(),
                    "tfstate",
                    f"{layer.name}",
                )
                if os.path.exists(tf_file):
                    copyfile(tf_file, state_file)

                else:
                    return False
            except Exception:
                UserErrors(f"Could copy local state file to {state_file}")

        elif layer.cloud == "helm":
            set_kube_config(layer)
            load_opta_kube_config()
            v1 = CoreV1Api()
            secret_name = f"tfstate-default-{layer.state_storage()}"
            secrets: V1SecretList = v1.list_namespaced_secret(
                "default", field_selector=f"metadata.name={secret_name}")
            if len(secrets.items) == 0:
                return False
            secret: V1Secret = secrets.items[0]
            decoded_secret = gzip.decompress(
                base64.b64decode(secret.data["tfstate"]))
            with open(state_file, "wb") as file_obj:
                file_obj.write(decoded_secret)
        else:
            raise UserErrors(
                "Need to get state from S3 or GCS or Azure storage")

        with open(state_file, "r") as file:
            raw_state = file.read().strip()
        os.remove(state_file)
        if raw_state != "":
            cls.downloaded_state[layer.name] = json.loads(raw_state)
            return True
        return False
Ejemplo n.º 11
0
def reconcile_nginx_extra_ports(*, update_config_map: bool = True) -> Dict[int, str]:
    """
    Runs the pseudo-controller that scans the cluster for Kubernetes services that expose raw TCP ports.
    If :update_config_map is True (default), it will also update the nginx port config map.
    The ConfigMap won't be updated with any ports not already defined in it.

    Returns the port mapping defined by services, in the form of a dict of "external port -> 'namespace/service_name:service_port'"
    """

    services = kubernetes.list_services()

    # Filter out any deleted services or services that don't have the annotation we want
    services = [
        service for service in services if service.metadata.deletion_timestamp is None
    ]

    # Skip services that don't have the annotation
    services = [
        service
        for service in services
        if NGINX_EXTRA_TCP_PORTS_ANNOTATION in (service.metadata.annotations or {})
    ]

    # Give precedence to older services in case of port conflicts
    services.sort(key=lambda svc: svc.metadata.creation_timestamp)

    port_mapping: Dict[int, str] = {}
    for service in services:
        id = f"{service.metadata.namespace}/{service.metadata.name}"

        extra_ports_annotation: str = service.metadata.annotations[
            NGINX_EXTRA_TCP_PORTS_ANNOTATION
        ]

        try:
            extra_ports: Dict[str, str] = json.loads(extra_ports_annotation)
        except json.JSONDecodeError as e:
            logger.warning(
                "Error decoding the %s annotation on service %s",
                NGINX_EXTRA_TCP_PORTS_ANNOTATION,
                id,
                exc_info=e,
            )
            continue

        if not isinstance(extra_ports, dict):
            logger.warning(
                "Contents of the %s annotation not expected format on service %s",
                NGINX_EXTRA_TCP_PORTS_ANNOTATION,
                id,
            )
            continue

        for nginx_port_str, target_port in extra_ports.items():
            try:
                nginx_port = int(nginx_port_str)
            except ValueError:
                logger.warning(
                    "Contents of the %s annotation not expected format (non-int key) on service %s",
                    NGINX_EXTRA_TCP_PORTS_ANNOTATION,
                    id,
                )
                continue

            if nginx_port in port_mapping:
                logger.warning(
                    "Multiple services found that bind to the %i ingress port. Prioritizing oldest service",
                    nginx_port,
                )
                # Skip conflicting ports
                continue

            port_mapping[
                nginx_port
            ] = f"{service.metadata.namespace}/{service.metadata.name}:{target_port}"

    if not update_config_map:
        return port_mapping

    cm = kubernetes.get_config_map(*NGINX_TCP_CONFIGMAP)
    if cm is None:
        # We can't update anything if we don't have the config map
        return port_mapping

    desired_mapping = {str(port): service for port, service in port_mapping.items()}

    # Don't add any keys, and keys that we don't have a service for should be set to the placeholder service
    current_data: Dict[str, str] = cm.data or {}
    desired_data = {
        port: desired_mapping.get(port, NGINX_PLACEHOLDER_SERVICE)
        for port in current_data
    }

    if desired_data != current_data:
        # We don't handle any conficts here (by passing resource version), but we probably don't need to until this is implemented as an actual controller
        kubernetes.update_config_map_data(
            cm.metadata.namespace, cm.metadata.name, desired_data
        )

    # We return port_mapping instead of desired_data because we always want to return services that have "requested" to be mapped,
    # even if nginx hasn't been configured to expose them.
    return port_mapping