コード例 #1
0
ファイル: test_aws.py プロジェクト: run-x/opta
    def test_get_terraform_lock_id(self, mocker: MockFixture,
                                   aws_layer: Mock) -> None:
        mock_dynamodb_client_instance = mocker.Mock(spec=DynamoDBClient)
        mocker.patch("opta.core.aws.boto3.client",
                     return_value=mock_dynamodb_client_instance)

        mock_dynamodb_client_instance.get_item.return_value = {
            "Item": {
                "Info": {
                    "S": '{"ID": "mock_lock_id"}'
                }
            }
        }

        mock_aws = AWS(aws_layer)
        assert mock_aws.get_terraform_lock_id() == "mock_lock_id"

        mock_dynamodb_client_instance.get_item.assert_called_once_with(
            TableName=aws_layer.gen_providers(0)["terraform"]["backend"]["s3"]
            ["dynamodb_table"],
            Key={
                "LockID": {
                    "S": f"{aws_layer.state_storage()}/{aws_layer.name}"
                }
            },
        )
コード例 #2
0
ファイル: test_aws.py プロジェクト: run-x/opta
    def test_get_all_remote_configs_configuration_present(
            self, mocker: MockFixture) -> None:
        mock_s3_client_instance = mocker.Mock(spec=S3Client)
        mocker.patch("opta.core.aws.boto3.client",
                     return_value=mock_s3_client_instance)
        mocker.patch("opta.core.aws.AWS._get_opta_buckets",
                     return_value=["test"])
        mock_s3_client_instance.list_objects.return_value = {
            "Contents": [{
                "Key": "opta_config/test-config"
            }]
        }
        mock_stream = mocker.Mock(spec=StreamingBody)
        mock_stream.read.return_value = """{"original_spec": "actual_config"}"""
        mock_s3_client_instance.get_object.return_value = {"Body": mock_stream}
        mock_download_remote_blob = mocker.patch(
            "opta.core.aws.AWS._download_remote_blob",
            return_value={
                "opta_version": "dev",
                "date": datetime.utcnow().isoformat(),
                "original_spec": "actual_config",
                "defaults": {},
            },
        )

        AWS().get_all_remote_configs()
        mock_s3_client_instance.list_objects.assert_called_once_with(
            Bucket="test", Prefix="opta_config/", Delimiter="/")
        mock_download_remote_blob.assert_called_once_with(
            mock_s3_client_instance, "test", "opta_config/test-config")
コード例 #3
0
ファイル: terraform.py プロジェクト: run-x/opta
    def delete_state_storage(cls, layer: "Layer") -> None:
        """
        Idempotently remove remote storage for tf state
        """
        # After the layer is completely deleted, remove the opta config from the state bucket.
        if layer.cloud == "aws":
            cloud_client: CloudClient = AWS(layer)
        elif layer.cloud == "google":
            cloud_client = GCP(layer)
        elif layer.cloud == "azurerm":
            cloud_client = Azure(layer)
        elif layer.cloud == "local":
            cloud_client = Local(layer)
        elif layer.cloud == "helm":
            # There is no opta managed storage to delete
            return
        else:
            raise Exception(
                f"Can not handle opta config deletion for cloud {layer.cloud}")
        cloud_client.delete_opta_config()
        cloud_client.delete_remote_state()

        # If this is the env layer, delete the state bucket & dynamo table as well.
        if layer.name == layer.root().name:

            logger.info(f"Deleting the state storage for {layer.name}...")
            if layer.cloud == "aws":
                cls._aws_delete_state_storage(layer)
            elif layer.cloud == "google":
                cls._gcp_delete_state_storage(layer)
            elif layer.cloud == "local":
                cls._local_delete_state_storage(layer)
コード例 #4
0
ファイル: terraform.py プロジェクト: run-x/opta
    def _aws_delete_state_storage(cls, layer: "Layer") -> None:
        providers = layer.gen_providers(0)
        if "s3" not in providers.get("terraform", {}).get("backend", {}):
            return

        # Delete the state storage bucket
        bucket_name = providers["terraform"]["backend"]["s3"]["bucket"]
        region = providers["terraform"]["backend"]["s3"]["region"]
        AWS.delete_bucket(bucket_name, region)

        # Delete the dynamodb state lock table
        dynamodb_table = providers["terraform"]["backend"]["s3"][
            "dynamodb_table"]

        AWS.delete_dynamodb_table(dynamodb_table, region)
        logger.info("Successfully deleted AWS state storage")
コード例 #5
0
ファイル: terraform.py プロジェクト: run-x/opta
 def force_delete_terraform_lock(cls, layer: "Layer",
                                 exception: Exception) -> None:
     if layer.cloud == "aws":
         AWS(layer).force_delete_terraform_lock_id()
     elif layer.cloud == "google":
         GCP(layer).force_delete_terraform_lock_id()
     else:
         raise exception
コード例 #6
0
ファイル: show.py プロジェクト: run-x/opta
def __get_cloud_client(cloud: str, layer: Optional[Layer] = None) -> CloudClient:
    cloud_client: CloudClient
    if cloud.lower() == "aws":
        cloud_client = AWS(layer=layer)
    elif cloud.lower() == "google":
        cloud_client = GCP(layer=layer)
    else:
        raise UserErrors(f"Can't get client for cloud {cloud}")

    return cloud_client
コード例 #7
0
ファイル: test_aws.py プロジェクト: run-x/opta
 def test_get_remote_state(self, mocker: MockFixture,
                           aws_layer: Mock) -> None:
     mock_s3_client_instance = mocker.Mock(spec=S3Client)
     mocker.patch("opta.core.aws.boto3.client",
                  return_value=mock_s3_client_instance)
     mock_download_remote_blob = mocker.patch(
         "opta.core.aws.AWS._download_remote_blob",
         return_value="""{"test": "test"}""")
     AWS(layer=aws_layer).get_remote_state()
     mock_download_remote_blob.assert_called_once_with(
         mock_s3_client_instance, aws_layer.state_storage(), aws_layer.name)
コード例 #8
0
ファイル: test_aws.py プロジェクト: run-x/opta
 def test_get_remote_state_state_does_not_exist(self, mocker: MockFixture,
                                                aws_layer: Mock) -> None:
     mock_s3_client_instance = mocker.Mock(spec=S3Client)
     mocker.patch("opta.core.aws.boto3.client",
                  return_value=mock_s3_client_instance)
     mock_download_remote_blob = mocker.patch(
         "opta.core.aws.AWS._download_remote_blob", return_value=None)
     with pytest.raises(MissingState):
         AWS(layer=aws_layer).get_remote_state()
     mock_download_remote_blob.assert_called_once_with(
         mock_s3_client_instance, aws_layer.state_storage(), aws_layer.name)
コード例 #9
0
ファイル: test_aws.py プロジェクト: run-x/opta
 def test_get_all_remote_configs_buckets_not_present(
         self, mocker: MockFixture) -> None:
     mock_s3_client_instance = mocker.Mock(spec=S3Client)
     mocker.patch("opta.core.aws.boto3.client",
                  return_value=mock_s3_client_instance)
     mocker.patch("opta.core.aws.AWS._get_opta_buckets", return_value=[])
     mock_s3_client_instance.list_objects.return_value = {}
     mock_download_remote_blob = mocker.patch(
         "opta.core.aws.AWS._download_remote_blob")
     AWS().get_all_remote_configs()
     mock_s3_client_instance.list_objects.assert_not_called()
     mock_download_remote_blob.assert_not_called()
コード例 #10
0
ファイル: layer.py プロジェクト: run-x/opta
    def bucket_exists(self, bucket_name: str) -> bool:

        if self.is_stateless_mode() is True:
            return False

        if self.cloud == "aws":
            region = self.providers["aws"]["region"]
            return AWS(self).bucket_exists(bucket_name, region)
        elif self.cloud == "google":
            return GCP(self).bucket_exists(bucket_name)
        else:  # Note - this function does not work for Azure
            return False
コード例 #11
0
ファイル: test_aws.py プロジェクト: run-x/opta
 def test_get_all_remote_configs_configuration_not_present(
         self, mocker: MockFixture) -> None:
     mock_s3_client_instance = mocker.Mock(spec=S3Client)
     mocker.patch("opta.core.aws.boto3.client",
                  return_value=mock_s3_client_instance)
     mocker.patch("opta.core.aws.AWS._get_opta_buckets",
                  return_value=["test"])
     mock_s3_client_instance.list_objects.return_value = {}
     mock_download_remote_blob = mocker.patch(
         "opta.core.aws.AWS._download_remote_blob")
     AWS().get_all_remote_configs()
     mock_s3_client_instance.list_objects.assert_called_once_with(
         Bucket="test", Prefix="opta_config/", Delimiter="/")
     mock_download_remote_blob.assert_not_called()
コード例 #12
0
ファイル: layer.py プロジェクト: run-x/opta
 def get_cloud_client(self) -> CloudClient:
     if self.cloud == "aws":
         return AWS(self)
     elif self.cloud == "google":
         return GCP(self)
     elif self.cloud == "azurerm":
         return Azure(self)
     elif self.cloud == "local":
         return Local(self)
     elif self.cloud == "helm":
         return HelmCloudClient(self)
     else:
         raise Exception(
             f"Unknown cloud {self.cloud}. Can not handle getting the cloud client"
         )
コード例 #13
0
ファイル: aws_k8s_base.py プロジェクト: run-x/opta
 def add_admin_roles(self) -> None:
     if self.module.data.get("admin_arns") is None:
         return
     set_kube_config(self.layer)
     load_opta_kube_config()
     v1 = CoreV1Api()
     aws_auth_config_map: V1ConfigMap = v1.read_namespaced_config_map(
         "aws-auth", "kube-system")
     opta_arns_config_map: V1ConfigMap = v1.read_namespaced_config_map(
         "opta-arns", "default")
     admin_arns = yaml.load(opta_arns_config_map.data["adminArns"])
     current_data = aws_auth_config_map.data
     old_map_roles = yaml.load(current_data["mapRoles"])
     new_map_roles = [
         old_map_role for old_map_role in old_map_roles
         if not old_map_role["username"].startswith("opta-managed")
     ]
     old_map_users = yaml.load(current_data.get("mapUsers", "[]"))
     new_map_users = [
         old_map_user for old_map_user in old_map_users
         if not old_map_user["username"].startswith("opta-managed")
     ]
     for arn in admin_arns:
         arn_data = AWS.parse_arn(arn)
         if arn_data["resource_type"] == "user":
             new_map_users.append({
                 "groups": ["system:masters"],
                 "userarn": arn,
                 "username": "******",
             })
         elif arn_data["resource_type"] == "role":
             new_map_roles.append({
                 "groups": ["system:masters"],
                 "rolearn": arn,
                 "username": "******",
             })
         else:
             raise UserErrors(f"Invalid arn for IAM role or user: {arn}")
     stream = StringIO()
     yaml.dump(new_map_roles, stream)
     aws_auth_config_map.data["mapRoles"] = stream.getvalue()
     if len(new_map_users) > 0:
         stream = StringIO()
         yaml.dump(new_map_users, stream)
         aws_auth_config_map.data["mapUsers"] = stream.getvalue()
     v1.replace_namespaced_config_map("aws-auth",
                                      "kube-system",
                                      body=aws_auth_config_map)
コード例 #14
0
 def prepare_iam_statements(self) -> List[dict]:
     iam_statements = []
     if self.read_buckets:
         iam_statements.append(
             AWS.prepare_read_buckets_iam_statements(self.read_buckets)
         )
     if self.write_buckets:
         iam_statements.append(
             AWS.prepare_write_buckets_iam_statements(self.write_buckets)
         )
     if self.publish_queues:
         iam_statements.append(
             AWS.prepare_publish_queues_iam_statements(self.publish_queues)
         )
     if self.subscribe_queues:
         iam_statements.append(
             AWS.prepare_subscribe_queues_iam_statements(self.subscribe_queues)
         )
     if self.publish_topics:
         iam_statements.append(
             AWS.prepare_publish_sns_iam_statements(self.publish_topics)
         )
     if self.kms_write_keys:
         iam_statements.append(
             AWS.prepare_kms_write_keys_statements(self.kms_write_keys)
         )
     if self.kms_read_keys:
         iam_statements.append(
             AWS.prepare_kms_read_keys_statements(self.kms_read_keys)
         )
     if self.dynamodb_write_tables:
         iam_statements.append(
             AWS.prepare_dynamodb_write_tables_statements(self.dynamodb_write_tables)
         )
     if self.dynamodb_read_tables:
         iam_statements.append(
             AWS.prepare_dynamodb_read_tables_statements(self.dynamodb_read_tables)
         )
     return iam_statements
コード例 #15
0
ファイル: test_aws.py プロジェクト: run-x/opta
    def test_aws_set_kube_config(self, mocker: MockFixture,
                                 aws_layer: Mock) -> None:
        mocked_exist = mocker.patch("opta.core.aws.exists")
        mocked_exist.return_value = False
        mock_eks_client = mocker.Mock()
        mocker.patch("opta.core.aws.boto3.client",
                     return_value=mock_eks_client)
        mock_eks_client.describe_cluster.return_value = {
            "cluster": {
                "certificateAuthority": {
                    "data": "ca-data"
                },
                "endpoint": "eks-endpoint",
            }
        }

        mocker.patch(
            "opta.core.aws.AWS.cluster_exist",
            return_value=True,
        )
        mocked_file = mocker.patch("opta.core.aws.open",
                                   mocker.mock_open(read_data=""))
        AWS(aws_layer).set_kube_config()
        config_file_name = f"{GENERATED_KUBE_CONFIG_DIR}/kubeconfig-{aws_layer.root().name}-{aws_layer.cloud}.yaml"
        mocked_file.assert_called_once_with(config_file_name, "w")
        mocked_file().write.assert_called_once_with(
            "apiVersion: v1\n"
            "clusters:\n"
            "- cluster: {certificate-authority-data: ca-data, server: eks-endpoint}\n"
            "  name: 111111111111_us-east-1_mocked_cluster_name\n"
            "contexts:\n"
            "- context: {cluster: 111111111111_us-east-1_mocked_cluster_name, user: "******"111111111111_us-east-1_mocked_cluster_name}\n"
            "  name: 111111111111_us-east-1_mocked_cluster_name\n"
            "current-context: 111111111111_us-east-1_mocked_cluster_name\n"
            "kind: Config\n"
            "preferences: {}\n"
            "users:\n"
            "- name: 111111111111_us-east-1_mocked_cluster_name\n"
            "  user:\n"
            "    exec:\n"
            "      apiVersion: client.authentication.k8s.io/v1alpha1\n"
            "      args: [--region, us-east-1, eks, get-token, --cluster-name, "
            "mocked_cluster_name]\n"
            "      command: aws\n"
            "      env: null\n")
コード例 #16
0
def _apply(
    config: str,
    env: Optional[str],
    refresh: bool,
    local: bool,
    image_tag: Optional[str],
    test: bool,
    auto_approve: bool,
    input_variables: Dict[str, str],
    image_digest: Optional[str] = None,
    stdout_logs: bool = True,
    detailed_plan: bool = False,
) -> None:
    pre_check()
    _clean_tf_folder()
    if local and not test:
        config = local_setup(config,
                             input_variables,
                             image_tag,
                             refresh_local_env=True)

    layer = Layer.load_from_yaml(config, env, input_variables=input_variables)
    layer.verify_cloud_credentials()
    layer.validate_required_path_dependencies()

    if Terraform.download_state(layer):
        tf_lock_exists, _ = Terraform.tf_lock_details(layer)
        if tf_lock_exists:
            raise UserErrors(USER_ERROR_TF_LOCK)
    _verify_parent_layer(layer, auto_approve)

    event_properties: Dict = layer.get_event_properties()
    amplitude_client.send_event(
        amplitude_client.START_GEN_EVENT,
        event_properties=event_properties,
    )

    # We need a region with at least 3 AZs for leader election during failover.
    # Also EKS historically had problems with regions that have fewer than 3 AZs.
    if layer.cloud == "aws":
        providers = layer.gen_providers(0)["provider"]
        aws_region = providers["aws"]["region"]
        azs = _fetch_availability_zones(aws_region)
        if len(azs) < 3:
            raise UserErrors(
                fmt_msg(f"""
                    Opta requires a region with at least *3* availability zones like us-east-1 or us-west-2.
                    ~You configured {aws_region}, which only has the availability zones: {azs}.
                    ~Please choose a different region.
                    """))

    Terraform.create_state_storage(layer)
    gen_opta_resource_tags(layer)
    cloud_client: CloudClient
    if layer.cloud == "aws":
        cloud_client = AWS(layer)
    elif layer.cloud == "google":
        cloud_client = GCP(layer)
    elif layer.cloud == "azurerm":
        cloud_client = Azure(layer)
    elif layer.cloud == "local":
        if local:  # boolean passed via cli
            pass
        cloud_client = Local(layer)
    elif layer.cloud == "helm":
        cloud_client = HelmCloudClient(layer)
    else:
        raise Exception(f"Cannot handle upload config for cloud {layer.cloud}")

    existing_config: Optional[
        StructuredConfig] = cloud_client.get_remote_config()
    old_semver_string = ("" if existing_config is None else
                         existing_config.get("opta_version", "").strip("v"))
    current_semver_string = VERSION.strip("v")
    _verify_semver(old_semver_string, current_semver_string, layer,
                   auto_approve)

    try:
        existing_modules: Set[str] = set()
        first_loop = True
        for module_idx, current_modules, total_block_count in gen(
                layer, existing_config, image_tag, image_digest, test, True,
                auto_approve):
            if first_loop:
                # This is set during the first iteration, since the tf file must exist.
                existing_modules = Terraform.get_existing_modules(layer)
                first_loop = False
            configured_modules = set([x.name for x in current_modules])
            is_last_module = module_idx == total_block_count - 1
            has_new_modules = not configured_modules.issubset(existing_modules)
            if not is_last_module and not has_new_modules and not refresh:
                continue
            if is_last_module:
                untouched_modules = existing_modules - configured_modules
                configured_modules = configured_modules.union(
                    untouched_modules)

            layer.pre_hook(module_idx)
            if layer.cloud == "local":
                if is_last_module:
                    targets = []
            else:
                targets = list(
                    map(lambda x: f"-target=module.{x}",
                        sorted(configured_modules)))
            if test:
                Terraform.plan("-lock=false", *targets, layer=layer)
                print(
                    "Plan ran successfully, not applying since this is a test."
                )
            else:
                current_properties = event_properties.copy()
                current_properties["module_idx"] = module_idx
                amplitude_client.send_event(
                    amplitude_client.APPLY_EVENT,
                    event_properties=current_properties,
                )
                logger.info("Planning your changes (might take a minute)")

                try:
                    Terraform.plan(
                        "-lock=false",
                        "-input=false",
                        f"-out={TF_PLAN_PATH}",
                        layer=layer,
                        *targets,
                        quiet=True,
                    )
                except CalledProcessError as e:
                    logger.error(e.stderr or "")
                    raise e
                PlanDisplayer.display(detailed_plan=detailed_plan)

                if not auto_approve:
                    click.confirm(
                        "The above are the planned changes for your opta run. Do you approve?",
                        abort=True,
                    )
                logger.info("Applying your changes (might take a minute)")
                service_modules = (layer.get_module_by_type(
                    "k8s-service", module_idx) if layer.cloud == "aws" else
                                   layer.get_module_by_type(
                                       "gcp-k8s-service", module_idx))
                if (len(service_modules) != 0 and cluster_exist(layer.root())
                        and stdout_logs):
                    service_module = service_modules[0]
                    # Tailing logs
                    logger.info(
                        f"Identified deployment for kubernetes service module {service_module.name}, tailing logs now."
                    )
                    new_thread = Thread(
                        target=tail_module_log,
                        args=(
                            layer,
                            service_module.name,
                            10,
                            datetime.datetime.utcnow().replace(
                                tzinfo=pytz.UTC),
                            2,
                        ),
                        daemon=True,
                    )
                    # Tailing events
                    new_thread.start()
                    new_thread = Thread(
                        target=tail_namespace_events,
                        args=(
                            layer,
                            datetime.datetime.utcnow().replace(
                                tzinfo=pytz.UTC),
                            3,
                        ),
                        daemon=True,
                    )
                    new_thread.start()

                tf_flags: List[str] = []
                if auto_approve:
                    tf_flags.append("-auto-approve")
                try:
                    Terraform.apply(layer,
                                    *tf_flags,
                                    TF_PLAN_PATH,
                                    no_init=True,
                                    quiet=False)
                except Exception as e:
                    layer.post_hook(module_idx, e)
                    raise e
                else:
                    layer.post_hook(module_idx, None)
                cloud_client.upload_opta_config()
                logger.info("Opta updates complete!")
    except Exception as e:
        event_properties["success"] = False
        event_properties["error_name"] = e.__class__.__name__
        raise e
    else:
        event_properties["success"] = True
    finally:
        amplitude_client.send_event(
            amplitude_client.FINISH_GEN_EVENT,
            event_properties=event_properties,
        )
コード例 #17
0
ファイル: terraform.py プロジェクト: run-x/opta
 def _aws_verify_storage(cls, layer: "Layer") -> bool:
     bucket = layer.state_storage()
     region = layer.root().providers["aws"]["region"]
     return AWS(layer).bucket_exists(bucket, region)
コード例 #18
0
ファイル: layer.py プロジェクト: run-x/opta
    def metadata_hydration(self) -> Dict[Any, Any]:
        parent_name = self.parent.name if self.parent is not None else "nil"
        parent = None
        if self.parent is not None:
            parent = SimpleNamespace(
                **{
                    k: f"${{data.terraform_remote_state.parent.outputs.{k}}}"
                    for k in self.parent.outputs()
                }
            )
        providers = self.providers
        if self.parent is not None:
            providers = deep_merge(providers, self.parent.providers)
        provider_hydration = {}
        for name, values in providers.items():
            provider_hydration[name] = SimpleNamespace(**values)

        region: Optional[str] = None
        k8s_access_token = None
        if self.cloud == "google":
            gcp = GCP(self)
            region = gcp.region
            credentials = gcp.get_credentials()[0]
            if isinstance(credentials, service_account.Credentials):
                service_account_credentials: service_account.Credentials = (
                    credentials.with_scopes(
                        [
                            "https://www.googleapis.com/auth/userinfo.email",
                            "https://www.googleapis.com/auth/cloud-platform",
                        ]
                    )
                )
                service_account_credentials.refresh(
                    google.auth.transport.requests.Request()
                )
                k8s_access_token = service_account_credentials.token
            else:
                k8s_access_token = credentials.token
            if k8s_access_token is None:
                raise Exception("Was unable to get GCP access token")
        elif self.cloud == "aws":
            aws = AWS(self)
            region = aws.region
        elif self.cloud == "azurerm":
            region = self.root().providers["azurerm"]["location"]
        elif self.cloud == "local":
            pass

        return {
            "parent": parent,
            "vars": SimpleNamespace(**self.variables),
            "variables": SimpleNamespace(**self.variables),
            "parent_name": parent_name,
            "layer_name": self.name,
            "state_storage": self.state_storage(),
            "env": self.get_env(),
            "kubeconfig": KUBE_CONFIG_DEFAULT_LOCATION,
            "k8s_access_token": k8s_access_token,
            "region": region,
            **provider_hydration,
        }
コード例 #19
0
ファイル: terraform.py プロジェクト: run-x/opta
 def _get_aws_lock_id(cls, layer: "Layer") -> str:
     aws = AWS(layer)
     return aws.get_terraform_lock_id()