예제 #1
0
def _make_installation_file() -> None:
    logger.debug(f"Querying {OPTA_INSTALL_URL}")
    resp = requests.get(OPTA_INSTALL_URL)
    resp.raise_for_status()
    with open(TEMP_INSTALLATION_FILENAME, "w") as file:
        file.write(resp.text)
    nice_run(["chmod", "777", TEMP_INSTALLATION_FILENAME])
예제 #2
0
파일: upgrade.py 프로젝트: run-x/opta
def check_version_upgrade(is_upgrade_call: bool = False) -> bool:
    """Logs a warning if newer version of opta is available.

    The version check is not always performed when this function is called.
    It is performed non-deterministically with a probability of UPGRADE_CHECK_PROBABILITY
    in order to not spam the user.
    """
    if OptaUpgrade.successful:
        OptaUpgrade.unset()
        return True
    if is_upgrade_call or _should_check_for_version_upgrade():
        logger.info("Checking for version upgrades...")
        try:
            latest_version = _get_latest_version()
        except Exception as e:
            logger.debug(e, exc_info=True)
            logger.info("Unable to find latest version.")
            return False
        try:
            if semver.VersionInfo.parse(
                    VERSION.strip("v")).compare(latest_version) < 0:
                logger.warning(
                    "New version available.\n"
                    f"You have {VERSION} installed. Latest version is {latest_version}."
                )
                if not is_upgrade_call:
                    print(
                        f"Upgrade instructions are available at {UPGRADE_INSTRUCTIONS_URL}  or simply use the `opta upgrade` command"
                    )
                return True
            else:
                logger.info("User on the latest version.")
        except Exception as e:
            logger.info(f"Semver check failed with error {e}")
    return False
예제 #3
0
    def process(self, module_idx: int) -> None:
        aws_base_modules = self.layer.get_module_by_type(
            "aws-base", module_idx)
        from_parent = False
        if len(aws_base_modules) == 0 and self.layer.parent is not None:
            from_parent = True
            aws_base_modules = self.layer.parent.get_module_by_type("aws-base")

        if len(aws_base_modules) == 0:
            logger.debug(
                "Did not find the aws-base module. "
                "This is highly recommended even for SPA as it sets up logging/auditing buckets"
            )
        else:
            module_source = ("data.terraform_remote_state.parent.outputs"
                             if from_parent else
                             f"module.{aws_base_modules[0].name}")
            self.module.data["s3_log_bucket_name"] = self.module.data.get(
                "s3_log_bucket_name",
                f"${{{{{module_source}.s3_log_bucket_name}}}}")
        file_path: Optional[str] = self.module.data.get("files")
        if file_path is not None and not file_path.startswith("/"):
            self.module.data["files"] = os.path.join(
                os.path.dirname(self.layer.path), file_path)
        super(AwsS3Processor, self).process(module_idx)
예제 #4
0
파일: kubernetes.py 프로젝트: run-x/opta
def delete_persistent_volume_claims(namespace: str,
                                    opta_managed: bool = True,
                                    async_req: bool = True) -> None:
    """delete_persistent_volume_claims

    Delete Persistent Volume Claims for a given namespace

    This method makes a synchronous HTTP request by default. To make an
    asynchronous HTTP request, please pass async_req=True

    :param str namespace: namespace to search for the Persistent Volume Claims
    :param bool opta_managed: filter to only delete objects managed by opta
    :param bool async_req: execute request asynchronously
    """

    claims = list_persistent_volume_claims(namespace=namespace,
                                           opta_managed=opta_managed)
    if not claims:
        logger.debug(
            f"No persistent volume claim (opta_managed: {opta_managed}) found in namespace '{namespace}', skipping persistent volume cleanup"
        )
        return

    logger.info(f"Deleting persistent volumes in namespace '{namespace}'")

    # delete the PVCs
    # Note: when deleting the PVC, the PV are automatically deleted
    for claim in claims:
        logger.info(
            f"Deleting persistent volume claim '{claim.metadata.name}'")
        delete_persistent_volume_claim(namespace,
                                       claim.metadata.name,
                                       async_req=async_req)
예제 #5
0
파일: azure.py 프로젝트: run-x/opta
    def get_remote_config(self) -> Optional["StructuredConfig"]:
        providers = self.layer.gen_providers(0)
        credentials = self.get_credentials()

        storage_account_name = providers["terraform"]["backend"]["azurerm"][
            "storage_account_name"]
        container_name = providers["terraform"]["backend"]["azurerm"][
            "container_name"]

        storage_client = ContainerClient(
            account_url=f"https://{storage_account_name}.blob.core.windows.net",
            container_name=container_name,
            credential=credentials,
        )
        config_path = f"opta_config/{self.layer.name}"
        try:
            download_stream: StorageStreamDownloader = storage_client.download_blob(
                config_path)
            data = download_stream.readall()
            return json.loads(data)
        except Exception:  # Backwards compatibility
            logger.debug(
                "Could not successfully download and parse any pre-existing config"
            )
            return None
예제 #6
0
 def post_hook(self, module_idx: int,
               exception: Optional[Exception]) -> None:
     if exception is not None or not self.module.data.get(
             "enable_metrics", False):
         logger.debug(
             "Not enabling metrics for default node group's autoscaling group"
         )
         return
     providers = self.layer.gen_providers(0)
     region = providers["provider"]["aws"]["region"]
     autoscaling_client: AutoScalingClient = boto3.client(
         "autoscaling", config=Config(region_name=region))
     kwargs: Dict[str, str] = {}
     while True:
         response = autoscaling_client.describe_auto_scaling_groups(
             **kwargs  # type: ignore
         )
         current_token = response.get("NextToken", "")
         kwargs["NextToken"] = current_token
         current_groups = response["AutoScalingGroups"]
         cluster_name = f"opta-{self.layer.root().name}"
         for group in current_groups:
             tag_dict = {x["Key"]: x["Value"] for x in group["Tags"]}
             if tag_dict.get(f"kubernetes.io/cluster/{cluster_name}"
                             ) == "owned" and tag_dict.get(
                                 "eks:nodegroup-name",
                                 "").startswith(f"{cluster_name}-default"):
                 group_name = group["AutoScalingGroupName"]
                 logger.debug(
                     f"Enabling metrics for autoscaling group {group_name}")
                 autoscaling_client.enable_metrics_collection(
                     AutoScalingGroupName=group_name, Granularity="1Minute")
             return None
         if current_token == "":  # nosec
             break
예제 #7
0
    def process(self, module_idx: int) -> None:
        if "repository" in self.module.data and "chart_version" not in self.module.data:
            raise UserErrors(
                "If you specify a remote repository you must give a version.")
        values = self.module.data.get("values", {})
        if values:
            stream = StringIO()
            yaml.dump(values, stream)
            logger.debug(
                f"These are the values passed in from the opta yaml:\n{stream.getvalue()}"
            )
        values_file = self.module.data.get("values_file", None)
        values_files = self.module.data.get("values_files", [])
        if values_file is not None and values_files != []:
            raise UserErrors(
                "Can't have values_file and values_files at the same time. Either put all of your files in "
                "values_files or have one single file and put it in values_file"
            )
        if values_file:
            values_files.append(values_file)

        fullpath_values_files = []
        for current_values_file in values_files:
            if not current_values_file.startswith("/"):
                full_path = os.path.join(os.path.dirname(self.layer.path),
                                         current_values_file)
            else:
                full_path = current_values_file
            fullpath_values_files.append(full_path)

        self.module.data["values_files"] = fullpath_values_files

        super(HelmChartProcessor, self).process(module_idx)
예제 #8
0
파일: kubernetes.py 프로젝트: run-x/opta
def restart_deployment(namespace: str, deployment: str) -> None:
    """restart_deployment

    restart the deployment in the specified namespace, this will honnor the update strategy

    :param str namespace: namespace to search in.
    :param deployment the name of deployment to restart
    """

    load_opta_kube_config()
    apps_client = AppsV1Api()

    logger.debug(
        f"Restarting deployment '{deployment}' in namespace '{namespace}'")
    # note this is similar implementation to kubectl rollout restart
    # https://github.com/kubernetes/kubectl/blob/release-1.22/pkg/polymorphichelpers/objectrestarter.go#L41
    now = str(datetime.datetime.utcnow().isoformat("T") + "Z")
    body = {
        "spec": {
            "template": {
                "metadata": {
                    "annotations": {
                        "kubectl.kubernetes.io/restartedAt": now
                    }
                }
            }
        }
    }
    apps_client.patch_namespaced_deployment(deployment, namespace, body)
예제 #9
0
파일: kubernetes.py 프로젝트: run-x/opta
def list_ingress_classes() -> List[V1IngressClass]:
    load_opta_kube_config()
    networking_client = NetworkingV1Api()

    logger.debug("Listing ingress classes")
    ingress_classes: V1IngressClassList = networking_client.list_ingress_class(
    )
    return ingress_classes.items
예제 #10
0
파일: kubernetes.py 프로젝트: run-x/opta
def cluster_exist(layer: "Layer") -> bool:
    if layer.is_stateless_mode() is True:
        if logger.isEnabledFor(DEBUG):
            logger.debug(
                "cluster_exist called in stateless mode, verify implementation. See stack trace below:"
            )
            traceback.print_stack()
            return False
    return layer.get_cloud_client().cluster_exist()
예제 #11
0
 def cleanup_dangling_enis(self, region: str) -> None:
     client: EC2Client = boto3.client("ec2",
                                      config=Config(region_name=region))
     vpcs = client.describe_vpcs(Filters=[
         {
             "Name": "tag:layer",
             "Values": [self.layer.name]
         },
         {
             "Name": "tag:opta",
             "Values": ["true"]
         },
     ])["Vpcs"]
     if len(vpcs) == 0:
         logger.debug(f"Opta vpc for layer {self.layer.name} not found")
         return
     elif len(vpcs) > 1:
         logger.debug(
             f"Weird, found multiple vpcs for layer {self.layer.name}: {[x['VpcId'] for x in vpcs]}"
         )
         return
     vpc = vpcs[0]
     vpc_id = vpc["VpcId"]
     dangling_enis: List[NetworkInterfaceTypeDef] = []
     next_token = None
     logger.debug("Seeking dangling enis from k8s cluster just destroyed")
     while True:
         if next_token is None:
             describe_enis = client.describe_network_interfaces(
                 Filters=[{
                     "Name": "vpc-id",
                     "Values": [vpc_id]
                 }])
         else:
             describe_enis = client.describe_network_interfaces(  # type: ignore
                 Filters=[{
                     "Name": "vpc-id",
                     "Values": [vpc_id]
                 }],
                 NextToken=next_token)
         for eni in describe_enis["NetworkInterfaces"]:
             if eni["Description"] == f"Amazon EKS opta-{self.layer.name}" or (
                     eni["Description"].startswith("aws-K8S")
                     and eni["Status"] == "available"):
                 logger.debug(
                     f"Identified dangling EKS network interface {eni['NetworkInterfaceId']}"
                 )
                 dangling_enis.append(eni)
         next_token = describe_enis.get("NextToken", None)
         if next_token is None:
             break
     for eni in dangling_enis:
         logger.debug(
             f"Now deleting dangling network interface {eni['NetworkInterfaceId']}"
         )
         client.delete_network_interface(
             NetworkInterfaceId=eni["NetworkInterfaceId"])
예제 #12
0
 def get_remote_config(self) -> Optional["StructuredConfig"]:
     try:
         with open(self.config_file_path, "r") as f:
             return json.load(f)
     except Exception:  # Backwards compatibility
         logger.debug(
             "Could not successfully download and parse any pre-existing config"
         )
         return None
예제 #13
0
파일: generator.py 프로젝트: run-x/opta
def gen(
    layer: "Layer",
    existing_config: Optional["StructuredConfig"] = None,
    image_tag: Optional[str] = None,
    image_digest: Optional[str] = None,
    test: bool = False,
    check_image: bool = False,
    auto_approve: bool = False,
) -> Generator[Tuple[int, List["Module"], int], None, None]:
    """Generate TF file based on opta config file"""
    logger.debug("Loading infra blocks")

    total_module_count = len(layer.modules)
    current_modules = []
    for module_idx, module in enumerate(layer.modules):
        logger.debug(f"Generating {module_idx} - {module.name}")
        current_modules.append(module)
        if not module.halt and module_idx + 1 != total_module_count:
            continue
        service_modules = layer.get_module_by_type("k8s-service", module_idx)
        if check_image and len(service_modules) > 0 and cluster_exist(
                layer.root()):
            set_kube_config(layer)

            for service_module in service_modules:
                current_image_info = current_image_digest_tag(layer)
                if (image_digest is None
                        and (current_image_info["tag"] is not None
                             or current_image_info["digest"] is not None)
                        and image_tag is None and service_module.data.get(
                            "image", "").upper() == "AUTO" and not test):
                    if not auto_approve:
                        if click.confirm(
                                f"WARNING There is an existing deployment (tag={current_image_info['tag']}, "
                                f"digest={current_image_info['digest']}) and the pods will be killed as you "
                                f"did not specify an image tag. Would you like to keep the existing deployment alive?",
                        ):
                            image_tag = current_image_info["tag"]
                            image_digest = current_image_info["digest"]
                    else:
                        logger.info(
                            f"{attr('bold')}Using the existing deployment {attr('underlined')}"
                            f"(tag={current_image_info['tag']}, digest={current_image_info['digest']}).{attr(0)}\n"
                            f"{attr('bold')}If you wish to deploy another image, please use "
                            f"{attr('bold')}{attr('underlined')} opta deploy command.{attr(0)}"
                        )
                        image_tag = current_image_info["tag"]
                        image_digest = current_image_info["digest"]
        layer.variables["image_tag"] = image_tag
        layer.variables["image_digest"] = image_digest
        ret = layer.gen_providers(module_idx)
        ret = deep_merge(layer.gen_tf(module_idx, existing_config), ret)

        gen_tf.gen(ret, TF_FILE_PATH)

        yield module_idx, current_modules, total_module_count
예제 #14
0
 def _download_remote_blob(s3_client: S3Client, bucket: str,
                           key: str) -> Optional["StructuredConfig"]:
     try:
         obj = s3_client.get_object(Bucket=bucket, Key=key)
         return json.loads(obj["Body"].read())
     except Exception:
         logger.debug(
             "Could not successfully download and parse any pre-existing config"
         )
         return None
예제 #15
0
 def upload_opta_config(self) -> None:
     bucket = self.layer.state_storage()
     config_path = f"opta_config/{self.layer.name}"
     credentials, project_id = self.get_credentials()
     gcs_client = storage.Client(project=project_id,
                                 credentials=credentials)
     bucket_object = gcs_client.get_bucket(bucket)
     blob = storage.Blob(config_path, bucket_object)
     blob.upload_from_string(json.dumps(self.layer.structured_config()))
     logger.debug("Uploaded opta config to gcs")
예제 #16
0
 def _download_remote_blob(bucket: Bucket,
                           key: str) -> Optional["StructuredConfig"]:
     try:
         blob = storage.Blob(key, bucket)
         return json.loads(blob.download_as_text())
     except Exception:  # Backwards compatibility
         logger.debug(
             "Could not successfully download and parse any pre-existing config"
         )
         return None
예제 #17
0
    def send_event(
        self,
        event_type: str,
        event_properties: Optional[dict] = None,
        user_properties: Optional[dict] = None,
    ) -> None:
        if hasattr(sys, "_called_from_test") or VERSION == DEV_VERSION:
            logger.debug(
                "Not sending amplitude cause we think we're in a pytest or in dev"
            )
            return
        event_properties = event_properties or {}
        user_properties = user_properties or {}
        insert_id = "".join(random.SystemRandom().choices(
            string.ascii_letters + string.digits, k=16))
        if event_type not in self.VALID_EVENTS:
            raise Exception(f"Invalid event type: {event_type}")
        body = {
            "api_key":
            self.api_key,
            "events": [{
                "user_id": self.user_id,
                "device_id": self.device_id,
                "event_type": event_type,
                "event_properties": event_properties,
                "user_properties": user_properties,
                "app_version": VERSION,
                "platform": self.platform,
                "os_name": self.os_name,
                "os_version": self.os_version,
                "insert_id": insert_id,
                "session_id": SESSION_ID,
            }],
        }
        headers = {"Content-Type": "application/json", "Accept": "*/*"}

        if os.environ.get(OPTA_DISABLE_REPORTING) is None:
            try:
                r = post(
                    "https://api2.amplitude.com/2/httpapi",
                    params={},
                    headers=headers,
                    json=body,
                )
                if r.status_code != codes.ok:
                    raise Exception(
                        "Hey, we're trying to send some analytics over to our devs for the "
                        f"product usage and we got a {r.status_code} response back. Could "
                        "you pls email over to our dev team about this and tell them of the "
                        f"failure with the aforementioned code and this response body: {r.text}"
                    )
            except Exception as err:
                logger.debug(
                    f"Unexpected error when connecting to amplitude {err=}, {type(err)=}"
                )
예제 #18
0
    def upload_opta_config(self) -> None:
        bucket = self.layer.state_storage()
        config_path = f"opta_config/{self.layer.name}"

        s3_client = boto3.client("s3", config=Config(region_name=self.region))
        s3_client.put_object(
            Body=json.dumps(self.layer.structured_config()).encode("utf-8"),
            Bucket=bucket,
            Key=config_path,
        )
        logger.debug("Uploaded opta config to s3")
예제 #19
0
 def get_terraform_lock_id(self) -> str:
     bucket = self.layer.state_storage()
     tf_lock_path = f"{self.layer.name}/default.tflock"
     credentials, project_id = self.get_credentials()
     gcs_client = storage.Client(project=project_id,
                                 credentials=credentials)
     bucket_object = gcs_client.get_bucket(bucket)
     try:
         tf_lock_blob = bucket_object.get_blob(tf_lock_path)
         return str(tf_lock_blob.generation)
     except Exception:  # Backwards compatibility
         logger.debug("No Terraform Lock state exists.")
         return ""
예제 #20
0
파일: kubernetes.py 프로젝트: run-x/opta
def set_kube_config(layer: "Layer") -> None:
    """Create a kubeconfig file to connect to a kubernetes cluster specified in a given layer"""

    if layer.is_stateless_mode() is True:
        if logger.isEnabledFor(DEBUG):
            logger.debug(
                "set_kube_config called in stateless mode, verify implementation. See stack trace below:"
            )
            traceback.print_stack()

    # Make sure the user has the prerequisite CLI tools installed
    # kubectl may not *technically* be required for this opta command to run, but require
    # it anyways since user must install it to access the cluster.
    ensure_installed("kubectl")
    makedirs(GENERATED_KUBE_CONFIG_DIR, exist_ok=True)
    layer.get_cloud_client().set_kube_config()
예제 #21
0
파일: aws_base.py 프로젝트: run-x/opta
    def validate_existing_vpc_params(self, data: dict) -> None:
        logger.debug("Validating existing VPC parameters")

        missing = [
            param for param in _EXISTING_VPC_PARAMS if param not in data
        ]
        if missing:
            param_str = ", ".join(missing)
            raise UserErrors(
                f"In the aws_base module, the parameters `{param_str}` are all required if any are set"
            )

        for unique_param in ["public_subnet_ids", "private_subnet_ids"]:
            values: List[str] = data[unique_param]
            if len(values) != len(set(values)):
                raise UserErrors(
                    f"In the aws_base module, the values in {unique_param} must all be unique"
                )
예제 #22
0
 def cleanup_security_groups(self, region: str) -> None:
     logger.debug("Seeking dangling security groups EKS forgot to destroy.")
     client: EC2Client = boto3.client("ec2",
                                      config=Config(region_name=region))
     vpcs = client.describe_vpcs(Filters=[
         {
             "Name": "tag:layer",
             "Values": [self.layer.name]
         },
         {
             "Name": "tag:opta",
             "Values": ["true"]
         },
     ])["Vpcs"]
     if len(vpcs) == 0:
         logger.debug(f"Opta vpc for layer {self.layer.name} not found")
         return
     elif len(vpcs) > 1:
         logger.debug(
             f"Weird, found multiple vpcs for layer {self.layer.name}: {[x['VpcId'] for x in vpcs]}"
         )
         return
     vpc = vpcs[0]
     vpc_id = vpc["VpcId"]
     eks_security_groups = client.describe_security_groups(Filters=[
         {
             "Name": f"tag:kubernetes.io/cluster/opta-{self.layer.name}",
             "Values": ["owned"],
         },
         {
             "Name": "vpc-id",
             "Values": [vpc_id]
         },
     ], )["SecurityGroups"]
     if len(eks_security_groups) == 0:
         logger.debug("No dangling security groups found")
         return
     for eks_security_group in eks_security_groups:
         logger.debug(
             f"Deleting dangling security group {eks_security_group['GroupId']}"
         )
         client.delete_security_group(GroupId=eks_security_group["GroupId"])
예제 #23
0
 def cleanup_cloudwatch_log_group(self, region: str) -> None:
     logger.debug(
         "Seeking dangling cloudwatch log group for k8s cluster just destroyed."
     )
     client: CloudWatchLogsClient = boto3.client(
         "logs", config=Config(region_name=region))
     log_group_name = f"/aws/eks/opta-{self.layer.name}/cluster"
     log_groups = client.describe_log_groups(
         logGroupNamePrefix=log_group_name)
     if len(log_groups["logGroups"]) == 0:
         return
     logger.debug(
         f"Found dangling cloudwatch log group {log_group_name}. Deleting it now"
     )
     client.delete_log_group(logGroupName=log_group_name)
     sleep(3)
     log_groups = client.describe_log_groups(
         logGroupNamePrefix=log_group_name)
     if len(log_groups["logGroups"]) != 0:
         logger.warning(
             f"Cloudwatch Log group {log_group_name} has recreated itself. Not stopping the destroy, but you will "
             "wanna check this out.")
예제 #24
0
파일: terraform.py 프로젝트: run-x/opta
    def get_existing_module_resources(cls, layer: "Layer") -> List[str]:
        try:
            state = cls.get_state(layer)
        except MissingState:
            logger.debug(
                "Could not fetch remote terraform state, assuming no resources exist yet."
            )
            return []
        resources = state.get("resources", [])
        module_resources: List[str] = []
        resource: dict
        for resource in resources:
            if ("module" not in resource or "type" not in resource
                    or "name" not in resource):
                continue
            resource_name_builder = list()
            resource_name_builder.append(resource["module"])
            if resource["mode"] == "managed":
                resource_name_builder.append("data")
            resource_name_builder.append(resource["type"])
            resource_name_builder.append(resource["name"])
            module_resources.append(".".join(resource_name_builder))

        return module_resources
예제 #25
0
    def process(self, module_idx: int) -> None:
        aws_base_modules = self.layer.get_module_by_type("aws-base", module_idx)
        from_parent = False
        if len(aws_base_modules) == 0 and self.layer.parent is not None:
            from_parent = True
            aws_base_modules = self.layer.parent.get_module_by_type("aws-base")

        if len(aws_base_modules) == 0:
            logger.debug(
                "Did not find the aws-base module. "
                "This is highly recommended even for SPA as it sets up logging/auditing buckets"
            )
        else:
            module_source = (
                "data.terraform_remote_state.parent.outputs"
                if from_parent
                else f"module.{aws_base_modules[0].name}"
            )
            self.module.data["s3_log_bucket_name"] = self.module.data.get(
                "s3_log_bucket_name", f"${{{{{module_source}.s3_log_bucket_name}}}}"
            )

        aws_dns_modules = self.layer.get_module_by_type("aws-dns", module_idx)
        if len(aws_dns_modules) != 0 and aws_dns_modules[0].data.get("linked_module") in [
            self.module.type,
            self.module.name,
        ]:
            aws_dns_module = aws_dns_modules[0]
            self.module.data["enable_auto_dns"] = True
            self.module.data["zone_id"] = (
                self.module.data.get("zone_id")
                or f"${{{{module.{aws_dns_module.name}.zone_id}}}}"
            )
            self.module.data["acm_cert_arn"] = (
                self.module.data.get("acm_cert_arn")
                or f"${{{{module.{aws_dns_module.name}.cert_arn}}}}"
            )
            self.module.data["domains"] = self.module.data.get("domains") or [
                f"${{{{module.{aws_dns_module.name}.domain}}}}"
            ]

        links = self.module.data.get("links", [])
        if links == [] and (
            "bucket_name" not in self.module.data
            or "origin_access_identity_path" not in self.module.data
        ):
            raise UserErrors(
                "You need to either link 1 opta s3 bucket or provide the bucket_name and "
                "origin_access_identity_path for your bucket."
            )

        if len(links) > 1:
            raise UserErrors("Cloudfront Distribution can't have more than one links.")

        for module_name in links:
            module = self.layer.get_module(module_name, module_idx)
            if module is None:
                raise UserErrors(f"Could not find module {module_name}")
            module_type = module.aliased_type or module.type
            if module_type == "aws-s3":
                self.handle_s3_link(module)
            elif module_type == "aws-k8s-base":
                self.handle_k8s_base_link(module)

        super(CloudfrontDistributionProcessor, self).process(module_idx)
예제 #26
0
파일: gen_tf.py 프로젝트: run-x/opta
def gen(tf_blocks: Mapping[Any, Any], out_file: str) -> None:
    with open(out_file, "w") as f:
        f.write(json.dumps(tf_blocks, indent=2))

    logger.debug(f"Output written to {out_file}")
예제 #27
0
파일: terraform.py 프로젝트: run-x/opta
    def _create_gcp_state_storage(cls, providers: dict) -> None:
        bucket_name = providers["terraform"]["backend"]["gcs"]["bucket"]
        region = providers["provider"]["google"]["region"]
        project_name = providers["provider"]["google"]["project"]
        credentials, project_id = GCP.get_credentials()
        if project_id != project_name:
            raise UserErrors(
                f"We got {project_name} as the project name in opta, but {project_id} in the google credentials"
            )
        gcs_client = storage.Client(project=project_id,
                                    credentials=credentials)
        try:
            bucket = gcs_client.get_bucket(bucket_name)
            bucket_project_number = bucket.project_number
        except GoogleClientError as e:
            if e.code == 403:
                raise UserErrors(
                    f"The Bucket Name: {bucket_name} (Opta needs to store state here) already exists.\n"
                    "Possible Failures:\n"
                    " - Bucket is present in some other project and User does not have access to the Project.\n"
                    "Please change the name in the Opta Configuration file or please change the User Permissions.\n"
                    "Please fix it and try again.")
            elif e.code != 404:
                raise UserErrors(
                    "When trying to determine the status of the state bucket, we got an "
                    f"{e.code} error with the message "
                    f"{e.message}")
            logger.debug(
                "GCS bucket for terraform state not found, creating a new one")
            try:
                bucket = gcs_client.create_bucket(bucket_name, location=region)
                bucket_project_number = bucket.project_number
            except Conflict:
                raise UserErrors(
                    f"It looks like a gcs bucket with the name {bucket_name} was created recently, but then deleted "
                    "and Google keeps hold of gcs bucket names for 30 days after deletion-- pls wait until the end of "
                    "that time or change your environment name slightly.")

        # Enable the APIs
        credentials = GoogleCredentials.get_application_default()
        service = discovery.build("serviceusage",
                                  "v1",
                                  credentials=credentials,
                                  static_discovery=False)
        new_api_enabled = False
        for service_name in [
                "container.googleapis.com",
                "iam.googleapis.com",
                "containerregistry.googleapis.com",
                "cloudkms.googleapis.com",
                "dns.googleapis.com",
                "servicenetworking.googleapis.com",
                "redis.googleapis.com",
                "compute.googleapis.com",
                "secretmanager.googleapis.com",
                "cloudresourcemanager.googleapis.com",
        ]:
            request = service.services().enable(
                name=f"projects/{project_name}/services/{service_name}")
            try:
                response = request.execute()
                new_api_enabled = new_api_enabled or (
                    response.get("name") != "operations/noop.DONE_OPERATION")
            except HttpError as e:
                if e.resp.status == 400:
                    raise UserErrors(
                        f"Got a 400 response when trying to enable the google {service_name} service with the following error reason: {e._get_reason()}"
                    )
            logger.debug(f"Google service {service_name} activated")
        if new_api_enabled:
            logger.info(
                "New api has been enabled, waiting 120 seconds before progressing"
            )
            time.sleep(120)
        service = discovery.build(
            "cloudresourcemanager",
            "v1",
            credentials=credentials,
            static_discovery=False,
        )
        request = service.projects().get(projectId=project_id)
        response = request.execute()

        if response["projectNumber"] != str(bucket_project_number):
            raise UserErrors(
                f"State storage bucket {bucket_name}, has already been created, but it was created in another project. "
                f"Current project's number {response['projectNumber']}. Bucket's project number: {bucket_project_number}. "
                "You do, however, have access to view that bucket, so it sounds like you already run this opta apply in "
                "your org, but on a different project."
                "Note: project number is NOT project id. It is yet another globally unique identifier for your project "
                "I kid you not, go ahead and look it up.")
예제 #28
0
파일: terraform.py 프로젝트: run-x/opta
    def _create_aws_state_storage(cls, providers: dict) -> None:
        bucket_name = providers["terraform"]["backend"]["s3"]["bucket"]
        dynamodb_table = providers["terraform"]["backend"]["s3"][
            "dynamodb_table"]
        region = providers["terraform"]["backend"]["s3"]["region"]
        s3 = boto3.client("s3", config=Config(region_name=region))
        dynamodb = boto3.client("dynamodb", config=Config(region_name=region))
        iam = boto3.client("iam", config=Config(region_name=region))
        try:
            s3.get_bucket_encryption(Bucket=bucket_name, )
        except ClientError as e:
            if e.response["Error"]["Code"] == "AuthFailure":
                raise UserErrors(
                    "The AWS Credentials are not configured properly.\n"
                    "Visit https://docs.aws.amazon.com/sdk-for-java/v1/developer-guide/setup-credentials.html "
                    "for more information.")
            if e.response["Error"]["Code"] == "AccessDenied":
                raise UserErrors(
                    f"We were unable to access the S3 bucket, {bucket_name} on your AWS account (opta needs this to store state).\n"
                    "Possible Issues: \n"
                    " - Bucket name is not unique and might be present in some other Account. Try updating the name in Configuration file to something else.\n"
                    " - It could also mean that your AWS account has insufficient permissions.\n"
                    "Please fix these issues and try again!")
            if e.response["Error"]["Code"] != "NoSuchBucket":
                raise UserErrors(
                    "When trying to determine the status of the state bucket, we got an "
                    f"{e.response['Error']['Code']} error with the message "
                    f"{e.response['Error']['Message']}")
            logger.debug(
                "S3 bucket for terraform state not found, creating a new one")
            if region == "us-east-1":
                s3.create_bucket(Bucket=bucket_name, )
            else:
                s3.create_bucket(
                    Bucket=bucket_name,
                    CreateBucketConfiguration={"LocationConstraint": region},
                )
            time.sleep(10)
            s3.put_bucket_encryption(
                Bucket=bucket_name,
                ServerSideEncryptionConfiguration={
                    "Rules": [
                        {
                            "ApplyServerSideEncryptionByDefault": {
                                "SSEAlgorithm": "AES256"
                            },
                        },
                    ]
                },
            )
            # Visit (https://run-x.atlassian.net/browse/RUNX-1125) for further reference
            s3.put_bucket_versioning(
                Bucket=bucket_name,
                VersioningConfiguration={"Status": "Enabled"},
            )
            s3.put_bucket_lifecycle(
                Bucket=bucket_name,
                LifecycleConfiguration={
                    "Rules": [
                        {
                            "ID": "default",
                            "Prefix": "/",
                            "Status": "Enabled",
                            "NoncurrentVersionTransition": {
                                "NoncurrentDays": 30,
                                "StorageClass": "GLACIER",
                            },
                            "NoncurrentVersionExpiration": {
                                "NoncurrentDays": 60
                            },
                            "AbortIncompleteMultipartUpload": {
                                "DaysAfterInitiation": 10
                            },
                        },
                    ]
                },
            )

        try:
            dynamodb.describe_table(TableName=dynamodb_table)
        except ClientError as e:
            if e.response["Error"]["Code"] != "ResourceNotFoundException":
                raise UserErrors(
                    "When trying to determine the status of the state dynamodb table, we got an "
                    f"{e.response['Error']['Code']} error with the message "
                    f"{e.response['Error']['Message']}")
            logger.debug(
                "Dynamodb table for terraform state not found, creating a new one"
            )
            dynamodb.create_table(
                TableName=dynamodb_table,
                KeySchema=[{
                    "AttributeName": "LockID",
                    "KeyType": "HASH"
                }],
                AttributeDefinitions=[{
                    "AttributeName": "LockID",
                    "AttributeType": "S"
                }],
                BillingMode="PROVISIONED",
                ProvisionedThroughput={
                    "ReadCapacityUnits": 20,
                    "WriteCapacityUnits": 20,
                },
            )
        # Create the service linked roles
        try:
            iam.create_service_linked_role(
                AWSServiceName="autoscaling.amazonaws.com", )
        except ClientError as e:
            if e.response["Error"]["Code"] != "InvalidInput":
                raise UserErrors(
                    "When trying to create the aws service linked role for autoscaling, we got an "
                    f"{e.response['Error']['Code']} error with the message "
                    f"{e.response['Error']['Message']}")
            logger.debug("Autoscaling service linked role present")
        try:
            iam.create_service_linked_role(
                AWSServiceName="elasticloadbalancing.amazonaws.com", )
        except ClientError as e:
            if e.response["Error"]["Code"] != "InvalidInput":
                raise UserErrors(
                    "When trying to create the aws service linked role for load balancing, we got an "
                    f"{e.response['Error']['Code']} error with the message "
                    f"{e.response['Error']['Message']}")
            logger.debug("Load balancing service linked role present")
예제 #29
0
파일: kubernetes.py 프로젝트: run-x/opta
def load_opta_kube_config_to_default(layer: "Layer") -> None:
    kube_config_file_name = layer.get_kube_config_file_name()
    if not exists(kube_config_file_name):
        logger.debug(
            f"Can not find opta managed kube config, {kube_config_file_name}, to load to user default"
        )
        return

    with open(kube_config_file_name) as f:
        opta_config = yaml.load(f)

    default_kube_config_filename = expanduser(
        constants.DEFAULT_KUBECONFIG.split(ENV_KUBECONFIG_PATH_SEPARATOR)[0])
    logger.debug(
        f"Checking kube config file of {default_kube_config_filename}")
    if not exists(default_kube_config_filename):
        logger.debug("The kube config file did not exist")
        makedirs(dirname(default_kube_config_filename), exist_ok=True)
        with open(default_kube_config_filename, "w") as f:
            yaml.dump(opta_config, f)
        return
    logger.debug("Loading kube config file")
    with open(default_kube_config_filename) as f:
        default_kube_config = yaml.load(f)

    opta_config_user = opta_config["users"][0]
    opta_config_context = opta_config["contexts"][0]
    opta_config_cluster = opta_config["clusters"][0]

    user_indices = [
        i for i, x in enumerate(default_kube_config["users"])
        if x["name"] == opta_config_user["name"]
    ]
    if user_indices:
        default_kube_config["users"][user_indices[0]] = opta_config_user
    else:
        default_kube_config["users"].append(opta_config_user)

    context_indices = [
        i for i, x in enumerate(default_kube_config["contexts"])
        if x["name"] == opta_config_context["name"]
    ]
    if context_indices:
        default_kube_config["contexts"][
            context_indices[0]] = opta_config_context
    else:
        default_kube_config["contexts"].append(opta_config_context)

    cluster_indices = [
        i for i, x in enumerate(default_kube_config["clusters"])
        if x["name"] == opta_config_cluster["name"]
    ]
    if cluster_indices:
        default_kube_config["clusters"][
            context_indices[0]] = opta_config_cluster
    else:
        default_kube_config["clusters"].append(opta_config_cluster)

    default_kube_config["current-context"] = opta_config_context["name"]
    with open(default_kube_config_filename, "w") as f:
        yaml.dump(default_kube_config, f)
예제 #30
0
파일: kubernetes.py 프로젝트: run-x/opta
def tail_namespace_events(
        layer: "Layer",
        earliest_event_start_time: Optional[datetime.datetime] = None,
        color_idx: int = 15,  # White Color
) -> None:
    load_opta_kube_config()
    v1 = EventsV1Api()
    watch = Watch()
    print(f"{fg(color_idx)}Showing events for namespace {layer.name}{attr(0)}")
    retry_count = 0
    old_events: List[EventsV1Event] = v1.list_namespaced_event(
        namespace=layer.name).items
    # Filter by time
    if earliest_event_start_time is not None:
        # Redefine so mypy doesn't complain about earliest_event_start_time being Optional during lambda call
        filter_start_time = earliest_event_start_time

        old_events = list(
            filter(
                lambda x: _event_last_observed(x) > filter_start_time,
                old_events,
            ))
    # Sort by timestamp
    old_events = sorted(old_events, key=lambda x: _event_last_observed(x))
    event: EventsV1Event
    for event in old_events:
        if do_not_show_event(event):
            continue
        earliest_event_start_time = _event_last_observed(event)
        print(
            f"{fg(color_idx)}{earliest_event_start_time} Namespace {layer.name} event: {event.note}{attr(0)}"
        )
    deleted_pods = set()
    while True:
        try:
            for stream_obj in watch.stream(
                    v1.list_namespaced_event,
                    namespace=layer.name,
            ):
                event = stream_obj["object"]
                event_time = _event_last_observed(event)
                if (earliest_event_start_time is None
                        or event_time > earliest_event_start_time):
                    if "Deleted pod:" in event.note:
                        deleted_pods.add(event.note.split(" ")[-1])
                    involved_object: Optional[
                        V1ObjectReference] = event.regarding
                    if (involved_object is not None
                            and involved_object.kind == "Pod"
                            and involved_object.name in deleted_pods):
                        continue
                    if do_not_show_event(event):
                        continue
                    print(
                        f"{fg(color_idx)}{event_time} Namespace {layer.name} event: {event.note}{attr(0)}"
                    )
        except ApiException as e:
            if retry_count < 5:
                print(
                    f"{fg(color_idx)}Couldn't get logs, waiting a bit and retrying{attr(0)}"
                )
                time.sleep(1 << retry_count)
                retry_count += 1
            else:
                logger.error(
                    f"{fg(color_idx)}Got the following error while trying to fetch the events in namespace {layer.name}: {e}"
                )
                return
        except Exception as e:
            # print(sys.exc_info()[2])
            logger.error(
                f"{fg(color_idx)}Got the following error while trying to fetch the events in namespace {layer.name}: {e}{attr(0)}"
            )
            logger.debug("Event watch exception", exc_info=True)
            return