def test_smoke(self, lambda_client):
        resource_name = random_suffix_name("lambda-csc", 24)

        resources = get_bootstrap_resources()
        logging.debug(resources)

        replacements = REPLACEMENT_VALUES.copy()
        replacements["AWS_REGION"] = get_region()
        replacements["CODE_SIGNING_CONFIG_NAME"] = resource_name
        replacements["SIGNING_PROFILE_VERSION_ARN"] = resources.SigningProfileVersionArn

        # Load Lambda CR
        resource_data = load_lambda_resource(
            "code_signing_config",
            additional_replacements=replacements,
        )
        logging.debug(resource_data)

        # Create k8s resource
        ref = k8s.CustomResourceReference(
            CRD_GROUP, CRD_VERSION, RESOURCE_PLURAL,
            resource_name, namespace="default",
        )
        k8s.create_custom_resource(ref, resource_data)
        cr = k8s.wait_resource_consumed_by_controller(ref)

        assert cr is not None
        assert k8s.get_resource_exists(ref)

        codeSigningConfigARN = cr['status']['ackResourceMetadata']['arn']

        time.sleep(CREATE_WAIT_AFTER_SECONDS)

        # Check Lambda code signing config exists
        exists = self.code_signing_config_exists(lambda_client, codeSigningConfigARN)
        assert exists

        # Update cr
        cr["spec"]["description"] = "new description"

        # Patch k8s resource
        k8s.patch_custom_resource(ref, cr)
        time.sleep(UPDATE_WAIT_AFTER_SECONDS)

        # Check code signing config  description
        csc = self.get_code_signing_config(lambda_client, codeSigningConfigARN)
        assert csc is not None
        assert csc["Description"] == "new description"

        # Delete k8s resource
        _, deleted = k8s.delete_custom_resource(ref)
        assert deleted

        time.sleep(DELETE_WAIT_AFTER_SECONDS)

        # Check Lambda code signing config doesn't exist
        exists = self.code_signing_config_exists(lambda_client, codeSigningConfigARN)
        assert not exists
Пример #2
0
    def test_repository_lifecycle_policy(self, ecr_client):
        resource_name = random_suffix_name("ecr-repository", 24)

        replacements = REPLACEMENT_VALUES.copy()
        replacements["REPOSITORY_NAME"] = resource_name
        # Load ECR CR
        resource_data = load_ecr_resource(
            "repository_lifecycle_policy",
            additional_replacements=replacements,
        )
        logging.debug(resource_data)

        # Create k8s resource
        ref = k8s.CustomResourceReference(
            CRD_GROUP,
            CRD_VERSION,
            RESOURCE_PLURAL,
            resource_name,
            namespace="default",
        )
        k8s.create_custom_resource(ref, resource_data)
        cr = k8s.wait_resource_consumed_by_controller(ref)

        assert cr is not None
        assert k8s.get_resource_exists(ref)

        time.sleep(CREATE_WAIT_AFTER_SECONDS)

        # Check ECR repository exists
        repo = self.get_repository(ecr_client, resource_name)
        assert repo is not None

        # Check ECR repository lifecycle policy exists
        lifecycle_policy = self.get_lifecycle_policy(ecr_client, resource_name,
                                                     repo["registryId"])
        assert lifecycle_policy == LIFECYCLE_POLICY_FILTERING_ON_IMAGE_AGE

        # Remove lifecycle policy
        cr["spec"]["lifecyclePolicy"] = ""

        # Patch k8s resource
        k8s.patch_custom_resource(ref, cr)
        time.sleep(UPDATE_WAIT_AFTER_SECONDS)

        lifecycle_policy = self.get_lifecycle_policy(ecr_client, resource_name,
                                                     repo["registryId"])
        assert lifecycle_policy == ""

        # Delete k8s resource
        _, deleted = k8s.delete_custom_resource(ref)
        assert deleted is True

        time.sleep(DELETE_WAIT_AFTER_SECONDS)

        # Check ECR repository doesn't exists
        exists = self.repository_exists(ecr_client, resource_name)
        assert not exists
    def test_create_delete_2az(self, rds_client):
        resource_name = "my-db-subnet-group"
        resource_desc = "my-db-subnet-group description"

        br_resources = get_bootstrap_resources()

        replacements = REPLACEMENT_VALUES.copy()
        replacements["DB_SUBNET_GROUP_NAME"] = resource_name
        replacements["DB_SUBNET_GROUP_DESC"] = resource_desc
        replacements["SUBNET_AZ1"] = br_resources.SubnetAZ1
        replacements["SUBNET_AZ2"] = br_resources.SubnetAZ2

        resource_data = load_rds_resource(
            "db_subnet_group_2az",
            additional_replacements=replacements,
        )
        logging.debug(resource_data)

        # Create the k8s resource
        ref = k8s.CustomResourceReference(
            CRD_GROUP, CRD_VERSION, RESOURCE_PLURAL,
            resource_name, namespace="default",
        )
        k8s.create_custom_resource(ref, resource_data)
        cr = k8s.wait_resource_consumed_by_controller(ref)

        assert cr is not None
        assert k8s.get_resource_exists(ref)

        # Let's check that the DB subnet group appears in RDS
        aws_res = rds_client.describe_db_subnet_groups(DBSubnetGroupName=resource_name)
        assert aws_res is not None
        assert len(aws_res['DBSubnetGroups']) == 1

        now = datetime.datetime.now()
        timeout = now + datetime.timedelta(seconds=CREATE_TIMEOUT_SECONDS)

        # TODO(jaypipes): Move this into generic AWS-side waiter
        while aws_res['DBSubnetGroups'][0]['SubnetGroupStatus'] != "Complete":
            if datetime.datetime.now() >= timeout:
                raise Exception("failed to find DB subnet group in Complete status before timeout")
            time.sleep(CREATE_INTERVAL_SLEEP_SECONDS)
            aws_res = rds_client.describe_db_subnet_groups(DBSubnetGroupName=resource_name)
            assert aws_res is not None
            assert len(aws_res['DBSubnetGroups']) == 1

        # Delete the k8s resource on teardown of the module
        k8s.delete_custom_resource(ref)

        time.sleep(DELETE_WAIT_AFTER_SECONDS)

        # DB subnet group should no longer appear in RDS
        try:
            aws_res = rds_client.describe_db_subnet_groups(DBSubnetGroupName=resource_name)
            assert False
        except rds_client.exceptions.DBSubnetGroupNotFoundFault:
            pass
Пример #4
0
    def test_smoke(self, lambda_client, lambda_function):
        (_, function_resource) = lambda_function
        lambda_function_name = function_resource["spec"]["name"]

        resource_name = random_suffix_name("lambda-alias", 24)

        replacements = REPLACEMENT_VALUES.copy()
        replacements["AWS_REGION"] = get_region()
        replacements["ALIAS_NAME"] = resource_name
        replacements["FUNCTION_NAME"] = lambda_function_name
        replacements["FUNCTION_VERSION"] = "$LATEST"

        # Load alias CR
        resource_data = load_lambda_resource(
            "alias",
            additional_replacements=replacements,
        )
        logging.debug(resource_data)

        # Create k8s resource
        ref = k8s.CustomResourceReference(
            CRD_GROUP, CRD_VERSION, RESOURCE_PLURAL,
            resource_name, namespace="default",
        )
        k8s.create_custom_resource(ref, resource_data)
        cr = k8s.wait_resource_consumed_by_controller(ref)

        assert cr is not None
        assert k8s.get_resource_exists(ref)

        time.sleep(CREATE_WAIT_AFTER_SECONDS)

        # Check alias exists
        alias = self.alias_exist(lambda_client, resource_name, lambda_function_name)
        assert alias is not None

        # Update cr
        cr["spec"]["description"] = ""

        # Patch k8s resource
        k8s.patch_custom_resource(ref, cr)
        time.sleep(UPDATE_WAIT_AFTER_SECONDS)

        # Check alias description
        alias = self.get_alias(lambda_client, resource_name, lambda_function_name)
        assert alias is not None
        assert alias["Description"] == ""

        # Delete k8s resource
        _, deleted = k8s.delete_custom_resource(ref)
        assert deleted

        time.sleep(DELETE_WAIT_AFTER_SECONDS)

        # Check alias doesn't exist
        exists = self.get_alias(lambda_client, resource_name, lambda_function_name)
        assert not exists
    def _make_replication_group(yaml_name, input_dict, rg_name):
        rg = load_elasticache_resource(
            yaml_name, additional_replacements=input_dict)
        logging.debug(rg)

        reference = k8s.CustomResourceReference(
            CRD_GROUP, CRD_VERSION, RESOURCE_PLURAL, rg_name, namespace="default")
        _ = k8s.create_custom_resource(reference, rg)
        resource = k8s.wait_resource_consumed_by_controller(reference, wait_periods=10)
        assert resource is not None
        return (reference, resource)
Пример #6
0
    def test_basic_repository(self, ecr_client):
        resource_name = random_suffix_name("ecr-repository", 24)

        replacements = REPLACEMENT_VALUES.copy()
        replacements["REPOSITORY_NAME"] = resource_name
        # Load ECR CR
        resource_data = load_ecr_resource(
            "repository",
            additional_replacements=replacements,
        )
        logging.debug(resource_data)

        # Create k8s resource
        ref = k8s.CustomResourceReference(
            CRD_GROUP,
            CRD_VERSION,
            RESOURCE_PLURAL,
            resource_name,
            namespace="default",
        )
        k8s.create_custom_resource(ref, resource_data)
        cr = k8s.wait_resource_consumed_by_controller(ref)

        assert cr is not None
        assert k8s.get_resource_exists(ref)

        time.sleep(CREATE_WAIT_AFTER_SECONDS)

        # Check ECR repository exists
        exists = self.repository_exists(ecr_client, resource_name)
        assert exists

        # Update CR
        cr["spec"]["imageScanningConfiguration"]["scanOnPush"] = True

        # Patch k8s resource
        k8s.patch_custom_resource(ref, cr)
        time.sleep(UPDATE_WAIT_AFTER_SECONDS)

        # Check repository scanOnPush scanning configuration
        repo = self.get_repository(ecr_client, resource_name)
        assert repo is not None
        assert repo["imageScanningConfiguration"]["scanOnPush"] is True

        # Delete k8s resource
        _, deleted = k8s.delete_custom_resource(ref)
        assert deleted is True

        time.sleep(DELETE_WAIT_AFTER_SECONDS)

        # Check ECR repository doesn't exists
        exists = self.repository_exists(ecr_client, resource_name)
        assert not exists
    def test_smoke(self, dynamodb_client, dynamodb_table):
        (_, table_resource) = dynamodb_table
        resource_name = random_suffix_name("backup", 32)
        table_name = table_resource["spec"]["tableName"]

        replacements = REPLACEMENT_VALUES.copy()
        replacements["TABLE_NAME"] = table_name
        replacements["BACKUP_NAME"] = resource_name

        # Load Backup CR
        resource_data = load_dynamodb_resource(
            "backup",
            additional_replacements=replacements,
        )
        logging.debug(resource_data)

        # Create k8s resource
        ref = k8s.CustomResourceReference(
            CRD_GROUP,
            CRD_VERSION,
            RESOURCE_PLURAL,
            resource_name,
            namespace="default",
        )
        k8s.create_custom_resource(ref, resource_data)
        cr = k8s.wait_resource_consumed_by_controller(ref)

        assert cr is not None
        assert k8s.get_resource_exists(ref)

        wait_for_cr_status(
            ref,
            "backupStatus",
            "AVAILABLE",
            10,
            5,
        )

        backupArn = k8s.get_resource_arn(cr)
        # Check DynamoDB Backup exists
        exists = self.backup_exists(dynamodb_client, backupArn)
        assert exists

        # Delete k8s resource
        _, deleted = k8s.delete_custom_resource(ref)
        assert deleted is True

        time.sleep(DELETE_WAIT_AFTER_SECONDS)

        # Check DynamoDB Backup doesn't exists
        exists = self.backup_exists(dynamodb_client, backupArn)
        assert not exists
def user_password(user_password_input, elasticache_client):

    # inject parameters into yaml; create User in cluster
    user = load_elasticache_resource("user_password", additional_replacements=user_password_input)
    reference = k8s.CustomResourceReference(
        CRD_GROUP, CRD_VERSION, RESOURCE_PLURAL, user_password_input["USER_ID"], namespace="default")
    _ = k8s.create_custom_resource(reference, user)
    resource = k8s.wait_resource_consumed_by_controller(reference)
    assert resource is not None
    yield (reference, resource)

    # teardown: delete in k8s, assert user does not exist in AWS
    k8s.delete_custom_resource(reference)
    sleep(DEFAULT_WAIT_SECS)
    assert_user_deletion(user_password_input['USER_ID'])
Пример #9
0
    def test_smoke(self, dynamodb_client, dynamodb_table):
        (_, table_resource) = dynamodb_table

        # Global Tables must have the same name as dynamodb Tables
        global_table_name = table_resource["spec"]["tableName"]

        replacements = REPLACEMENT_VALUES.copy()
        replacements["REGION_NAME"] = get_region()
        replacements["TABLE_NAME"] = global_table_name
        replacements["GLOBAL_TABLE_NAME"] = global_table_name

        # Load GLobal Table CR
        resource_data = load_dynamodb_resource(
            "global_table",
            additional_replacements=replacements,
        )
        logging.debug(resource_data)

        # Create k8s resource
        ref = k8s.CustomResourceReference(
            CRD_GROUP, CRD_VERSION, RESOURCE_PLURAL,
            global_table_name, namespace="default",
        )
        k8s.create_custom_resource(ref, resource_data)
        cr = k8s.wait_resource_consumed_by_controller(ref)

        assert cr is not None
        assert k8s.get_resource_exists(ref)

        wait_for_cr_status(
            ref,
            "globalTableStatus",
            "ACTIVE",
            10,
            5,
        )

        # Check DynamoDB Global Table exists
        exists = self.global_table_exists(dynamodb_client, global_table_name)
        assert exists

        _, deleted = k8s.delete_custom_resource(ref)
        assert deleted is True        

        time.sleep(DELETE_WAIT_AFTER_SECONDS)

        exists = self.global_table_exists(dynamodb_client, global_table_name)
        assert not exists
Пример #10
0
    def test_create_delete(self, dynamodb_client):
        resource_name = random_suffix_name("table", 32)

        replacements = REPLACEMENT_VALUES.copy()
        replacements["TABLE_NAME"] = resource_name

        # Load Table CR
        resource_data = load_dynamodb_resource(
            "table_forums",
            additional_replacements=replacements,
        )
        logging.debug(resource_data)

        # Create k8s resource
        ref = k8s.CustomResourceReference(
            CRD_GROUP,
            CRD_VERSION,
            RESOURCE_PLURAL,
            resource_name,
            namespace="default",
        )
        k8s.create_custom_resource(ref, resource_data)
        cr = k8s.wait_resource_consumed_by_controller(ref)

        assert cr is not None
        assert k8s.get_resource_exists(ref)

        wait_for_cr_status(
            ref,
            "tableStatus",
            "ACTIVE",
            10,
            5,
        )

        # Check DynamoDB Table exists
        exists = self.table_exists(dynamodb_client, resource_name)
        assert exists

        # Delete k8s resource
        _, deleted = k8s.delete_custom_resource(ref)
        assert deleted is True

        time.sleep(DELETE_WAIT_AFTER_SECONDS)

        # Check DynamoDB Table doesn't exists
        exists = self.table_exists(dynamodb_client, resource_name)
        assert not exists
    def test_create_delete_simple(self, rds_client):
        resource_name = "my-db-security-group"
        resource_desc = "my-db-security-group description"

        br_resources = get_bootstrap_resources()

        replacements = REPLACEMENT_VALUES.copy()
        replacements["DB_SECURITY_GROUP_NAME"] = resource_name
        replacements["DB_SECURITY_GROUP_DESC"] = resource_desc

        resource_data = load_rds_resource(
            "db_security_group_simple",
            additional_replacements=replacements,
        )
        logging.debug(resource_data)

        # Create the k8s resource
        ref = k8s.CustomResourceReference(
            CRD_GROUP,
            CRD_VERSION,
            RESOURCE_PLURAL,
            resource_name,
            namespace="default",
        )
        k8s.create_custom_resource(ref, resource_data)
        cr = k8s.wait_resource_consumed_by_controller(ref)

        assert cr is not None
        assert k8s.get_resource_exists(ref)

        # Let's check that the DB security group appears in RDS
        aws_res = rds_client.describe_db_security_groups(
            DBSecurityGroupName=resource_name)
        assert aws_res is not None
        assert len(aws_res['DBSecurityGroups']) == 1

        # Delete the k8s resource on teardown of the module
        k8s.delete_custom_resource(ref)

        time.sleep(DELETE_WAIT_AFTER_SECONDS)

        # DB security group should no longer appear in RDS
        try:
            aws_res = rds_client.describe_db_security_groups(
                DBSecurityGroupName=resource_name)
            assert False
        except rds_client.exceptions.DBSecurityGroupNotFoundFault:
            pass
Пример #12
0
    def test_create_delete_aurora_mysql5_7(self, rds_client):
        resource_name = "aurora-mysql-5-7"
        resource_desc = "Parameters for Aurora MySQL 5.7-compatible"

        replacements = REPLACEMENT_VALUES.copy()
        replacements["DB_CLUSTER_PARAMETER_GROUP_NAME"] = resource_name
        replacements["DB_CLUSTER_PARAMETER_GROUP_DESC"] = resource_desc

        resource_data = load_rds_resource(
            "db_cluster_parameter_group_aurora_mysql5.7",
            additional_replacements=replacements,
        )
        logging.debug(resource_data)

        # Create the k8s resource
        ref = k8s.CustomResourceReference(
            CRD_GROUP, CRD_VERSION, RESOURCE_PLURAL,
            resource_name, namespace="default",
        )
        k8s.create_custom_resource(ref, resource_data)
        cr = k8s.wait_resource_consumed_by_controller(ref)

        assert cr is not None
        assert k8s.get_resource_exists(ref)

        # Let's check that the DB cluster parameter group appears in RDS
        aws_res = rds_client.describe_db_cluster_parameter_groups(
            DBClusterParameterGroupName=resource_name,
        )
        assert aws_res is not None
        assert len(aws_res['DBClusterParameterGroups']) == 1

        # Delete the k8s resource on teardown of the module
        k8s.delete_custom_resource(ref)

        time.sleep(DELETE_WAIT_AFTER_SECONDS)

        # DB cluster parameter group should no longer appear in RDS
        try:
            aws_res = rds_client.describe_db_cluster_parameter_groups(
                DBClusterParameterGroupName=resource_name,
            )
            assert False
        # NOTE(jaypipes): RDS DescribeDBClusterParameterGroups returns
        # DBParameterGroupNotFoundFault, *not* DBClusterParameterGroupNotFound.
        except rds_client.exceptions.DBParameterGroupNotFoundFault:
            pass
Пример #13
0
    def test_create_delete_postgres13_standard(self, rds_client):
        resource_name = "pg13-standard"
        resource_desc = "Parameters for PostgreSQL 13"

        replacements = REPLACEMENT_VALUES.copy()
        replacements["DB_PARAMETER_GROUP_NAME"] = resource_name
        replacements["DB_PARAMETER_GROUP_DESC"] = resource_desc

        resource_data = load_rds_resource(
            "db_parameter_group_postgres13_standard",
            additional_replacements=replacements,
        )
        logging.debug(resource_data)

        # Create the k8s resource
        ref = k8s.CustomResourceReference(
            CRD_GROUP,
            CRD_VERSION,
            RESOURCE_PLURAL,
            resource_name,
            namespace="default",
        )
        k8s.create_custom_resource(ref, resource_data)
        cr = k8s.wait_resource_consumed_by_controller(ref)

        assert cr is not None
        assert k8s.get_resource_exists(ref)

        # Let's check that the DB parameter group appears in RDS
        aws_res = rds_client.describe_db_parameter_groups(
            DBParameterGroupName=resource_name)
        assert aws_res is not None
        assert len(aws_res['DBParameterGroups']) == 1

        # Delete the k8s resource on teardown of the module
        k8s.delete_custom_resource(ref)

        time.sleep(DELETE_WAIT_AFTER_SECONDS)

        # DB parameter group should no longer appear in RDS
        try:
            aws_res = rds_client.describe_db_parameter_groups(
                DBParameterGroupName=resource_name)
            assert False
        except rds_client.exceptions.DBParameterGroupNotFoundFault:
            pass
Пример #14
0
def lambda_function():
    resource_name = random_suffix_name("lambda-function", 24)
    resources = get_bootstrap_resources()

    replacements = REPLACEMENT_VALUES.copy()
    replacements["FUNCTION_NAME"] = resource_name
    replacements["BUCKET_NAME"] = resources.FunctionsBucketName
    replacements["LAMBDA_ROLE"] = resources.LambdaESMRoleARN
    replacements["LAMBDA_FILE_NAME"] = resources.LambdaFunctionFileZip
    replacements["RESERVED_CONCURRENT_EXECUTIONS"] = "0"
    replacements["CODE_SIGNING_CONFIG_ARN"] = ""
    replacements["AWS_REGION"] = get_region()

    # Load function CR
    resource_data = load_lambda_resource(
        "function",
        additional_replacements=replacements,
    )
    logging.debug(resource_data)

    # Create k8s resource
    function_reference = k8s.CustomResourceReference(
        CRD_GROUP,
        CRD_VERSION,
        "functions",
        resource_name,
        namespace="default",
    )

    # Create lambda function
    k8s.create_custom_resource(function_reference, resource_data)
    function_resource = k8s.wait_resource_consumed_by_controller(
        function_reference)

    assert function_resource is not None
    assert k8s.get_resource_exists(function_reference)

    time.sleep(CREATE_WAIT_AFTER_SECONDS)

    yield (function_reference, function_resource)

    _, deleted = k8s.delete_custom_resource(function_reference)
    assert deleted
Пример #15
0
def user_group_create(get_user_group_yaml):
    user_group_id = random_suffix_name("ack-usergroup", 32)

    reference = k8s.CustomResourceReference(CRD_GROUP,
                                            CRD_VERSION,
                                            RESOURCE_PLURAL,
                                            user_group_id,
                                            namespace="default")

    user_group = get_user_group_yaml(user_group_id)

    # Create new user group
    _ = k8s.create_custom_resource(reference, user_group)
    resource = k8s.wait_resource_consumed_by_controller(reference,
                                                        wait_periods=10)
    assert resource is not None
    yield reference, resource

    # Teardown
    _, deleted = k8s.delete_custom_resource(reference)
    assert deleted is True
Пример #16
0
def user_password(user_password_input, elasticache_client):

    # inject parameters into yaml; create User in cluster
    user = load_elasticache_resource(
        "user_password", additional_replacements=user_password_input)
    reference = k8s.CustomResourceReference(CRD_GROUP,
                                            CRD_VERSION,
                                            RESOURCE_PLURAL,
                                            user_password_input["USER_ID"],
                                            namespace="default")
    _ = k8s.create_custom_resource(reference, user)
    resource = k8s.wait_resource_consumed_by_controller(reference)
    assert resource is not None
    yield (reference, resource)

    # teardown: delete in k8s, assert user does not exist in AWS
    k8s.delete_custom_resource(reference)
    sleep(DEFAULT_WAIT_SECS)
    with pytest.raises(botocore.exceptions.ClientError, match="UserNotFound"):
        _ = elasticache_client.describe_users(
            UserId=user_password_input["USER_ID"])
def code_signing_config():
    resource_name = random_suffix_name("lambda-csc", 24)

    resources = get_bootstrap_resources()
    logging.debug(resources)

    replacements = REPLACEMENT_VALUES.copy()
    replacements["AWS_REGION"] = get_region()
    replacements["CODE_SIGNING_CONFIG_NAME"] = resource_name
    replacements[
        "SIGNING_PROFILE_VERSION_ARN"] = resources.SigningProfileVersionArn

    # Load Lambda CR
    resource_data = load_lambda_resource(
        "code_signing_config",
        additional_replacements=replacements,
    )
    logging.debug(resource_data)

    # Create k8s resource
    ref = k8s.CustomResourceReference(
        CRD_GROUP,
        CRD_VERSION,
        "codesigningconfigs",
        resource_name,
        namespace="default",
    )
    k8s.create_custom_resource(ref, resource_data)
    cr = k8s.wait_resource_consumed_by_controller(ref)

    assert cr is not None
    assert k8s.get_resource_exists(ref)

    time.sleep(CREATE_WAIT_AFTER_SECONDS)

    yield (ref, cr)

    _, deleted = k8s.delete_custom_resource(ref)
    assert deleted
def dynamodb_table():
    resource_name = random_suffix_name("table", 32)

    replacements = REPLACEMENT_VALUES.copy()
    replacements["TABLE_NAME"] = resource_name

    # load resource
    resource_data = load_dynamodb_resource(
        "table_forums",
        additional_replacements=replacements,
    )

    table_reference = k8s.CustomResourceReference(
        CRD_GROUP,
        CRD_VERSION,
        "tables",
        resource_name,
        namespace="default",
    )

    # Create table
    k8s.create_custom_resource(table_reference, resource_data)
    table_resource = k8s.wait_resource_consumed_by_controller(table_reference)

    assert table_resource is not None
    assert k8s.get_resource_exists(table_reference)

    wait_for_cr_status(
        table_reference,
        "tableStatus",
        "ACTIVE",
        10,
        30,
    )

    yield (table_reference, table_resource)

    _, deleted = k8s.delete_custom_resource(table_reference)
    assert deleted
    def custom_resource_reference(
            self,
            input_data: dict,
            input_replacements: dict = {}) -> k8s.CustomResourceReference:
        """Helper method to provide k8s.CustomResourceReference for supplied input

        Args:
            input_data: custom resource input data
            input_replacements: input replacements

        Returns:
            k8s.CustomResourceReference
        """

        resource_name = input_data.get("metadata").get("name")
        crd_group = input_replacements.get("CRD_GROUP")
        crd_version = input_replacements.get("CRD_VERSION")

        reference = k8s.CustomResourceReference(crd_group,
                                                crd_version,
                                                self.resource_plural,
                                                resource_name,
                                                namespace="default")
        return reference
Пример #20
0
    def test_create_delete_2d3m_multi_az_no_vpc_7_9(self, es_client):
        resource = Domain(name="my-es-domain2",
                          data_node_count=2,
                          master_node_count=3,
                          is_zone_aware=True)

        replacements = REPLACEMENT_VALUES.copy()
        replacements["DOMAIN_NAME"] = resource.name
        replacements["MASTER_NODE_COUNT"] = str(resource.master_node_count)
        replacements["DATA_NODE_COUNT"] = str(resource.data_node_count)

        resource_data = load_resource(
            "domain_es_xdym_multi_az7.9",
            additional_replacements=replacements,
        )
        logging.debug(resource_data)

        # Create the k8s resource
        ref = k8s.CustomResourceReference(
            CRD_GROUP,
            CRD_VERSION,
            RESOURCE_PLURAL,
            resource.name,
            namespace="default",
        )
        k8s.create_custom_resource(ref, resource_data)
        cr = k8s.wait_resource_consumed_by_controller(ref)

        assert cr is not None
        assert k8s.get_resource_exists(ref)

        logging.debug(cr)

        # Let's check that the domain appears in AES
        aws_res = es_client.describe_elasticsearch_domain(
            DomainName=resource.name)

        logging.debug(aws_res)

        now = datetime.datetime.now()
        timeout = now + datetime.timedelta(seconds=CREATE_TIMEOUT_SECONDS)

        aws_res = wait_for_create_or_die(es_client, resource, timeout)
        logging.info(
            f"ES Domain {resource.name} creation succeeded and DomainStatus.Processing is now False"
        )

        assert aws_res['DomainStatus']['ElasticsearchVersion'] == '7.9'
        assert aws_res['DomainStatus']['Created'] == True
        assert aws_res['DomainStatus']['ElasticsearchClusterConfig'][
            'InstanceCount'] == resource.data_node_count
        assert aws_res['DomainStatus']['ElasticsearchClusterConfig'][
            'DedicatedMasterCount'] == resource.master_node_count
        assert aws_res['DomainStatus']['ElasticsearchClusterConfig'][
            'ZoneAwarenessEnabled'] == resource.is_zone_aware

        # Delete the k8s resource on teardown of the module
        k8s.delete_custom_resource(ref)

        logging.info(
            f"Deleted CR for ES Domain {resource.name}. Waiting {DELETE_WAIT_AFTER_SECONDS} before checking existence in AWS API"
        )
        time.sleep(DELETE_WAIT_AFTER_SECONDS)

        now = datetime.datetime.now()
        timeout = now + datetime.timedelta(seconds=DELETE_TIMEOUT_SECONDS)

        # Domain should no longer appear in AES
        wait_for_delete_or_die(es_client, resource, timeout)
Пример #21
0
    def test_repository_tags(self, ecr_client):
        resource_name = random_suffix_name("ecr-repository", 24)

        replacements = REPLACEMENT_VALUES.copy()
        replacements["REPOSITORY_NAME"] = resource_name
        # Load ECR CR
        resource_data = load_ecr_resource(
            "repository",
            additional_replacements=replacements,
        )
        logging.debug(resource_data)

        # Create k8s resource
        ref = k8s.CustomResourceReference(
            CRD_GROUP,
            CRD_VERSION,
            RESOURCE_PLURAL,
            resource_name,
            namespace="default",
        )
        k8s.create_custom_resource(ref, resource_data)
        cr = k8s.wait_resource_consumed_by_controller(ref)

        assert cr is not None
        assert k8s.get_resource_exists(ref)

        time.sleep(CREATE_WAIT_AFTER_SECONDS)

        cr = k8s.wait_resource_consumed_by_controller(ref)

        # Check ECR repository exists
        exists = self.repository_exists(ecr_client, resource_name)
        assert exists

        # Add respository tags
        tags = [
            {
                "key": "k1",
                "value": "v1",
            },
            {
                "key": "k2",
                "value": "v2",
            },
        ]
        cr["spec"]["tags"] = tags

        # Patch k8s resource
        k8s.patch_custom_resource(ref, cr)
        time.sleep(UPDATE_WAIT_AFTER_SECONDS)

        repository_tags = self.get_resource_tags(
            ecr_client, cr["status"]["ackResourceMetadata"]["arn"])
        assert len(repository_tags) == len(tags)
        assert repository_tags[0]['Key'] == tags[0]['key']
        assert repository_tags[0]['Value'] == tags[0]['value']
        assert repository_tags[1]['Key'] == tags[1]['key']
        assert repository_tags[1]['Value'] == tags[1]['value']

        # Update repository tags
        tags = [
            {
                "key": "k1",
                "value": "v1",
            },
            {
                "key": "k2",
                "value": "v2.updated",
            },
        ]

        cr = k8s.wait_resource_consumed_by_controller(ref)
        cr["spec"]["tags"] = tags
        k8s.patch_custom_resource(ref, cr)

        time.sleep(UPDATE_WAIT_AFTER_SECONDS)

        repository_tags = self.get_resource_tags(
            ecr_client, cr["status"]["ackResourceMetadata"]["arn"])
        assert len(repository_tags) == len(tags)
        assert repository_tags[0]['Key'] == tags[0]['key']
        assert repository_tags[0]['Value'] == tags[0]['value']
        assert repository_tags[1]['Key'] == tags[1]['key']
        assert repository_tags[1]['Value'] == tags[1]['value']

        cr = k8s.wait_resource_consumed_by_controller(ref)

        # Delete one repository tag
        cr["spec"]["tags"] = tags[:-1]
        k8s.patch_custom_resource(ref, cr)
        time.sleep(UPDATE_WAIT_AFTER_SECONDS)

        repository_tags = self.get_resource_tags(
            ecr_client, cr["status"]["ackResourceMetadata"]["arn"])
        assert len(repository_tags) == len(tags[:-1])
        assert repository_tags[0]['Key'] == tags[0]['key']
        assert repository_tags[0]['Value'] == tags[0]['value']

        # Delete k8s resource
        _, deleted = k8s.delete_custom_resource(ref)
        assert deleted is True

        time.sleep(DELETE_WAIT_AFTER_SECONDS)

        # Check ECR repository doesn't exists
        exists = self.repository_exists(ecr_client, resource_name)
        assert not exists
Пример #22
0
    def test_create_delete_postgres13_standard(self):
        resource_name = "pg13-standard"
        resource_desc = "Parameters for PostgreSQL 13"

        replacements = REPLACEMENT_VALUES.copy()
        replacements["DB_PARAMETER_GROUP_NAME"] = resource_name
        replacements["DB_PARAMETER_GROUP_DESC"] = resource_desc

        resource_data = load_rds_resource(
            "db_parameter_group_postgres13_standard",
            additional_replacements=replacements,
        )
        logging.debug(resource_data)

        # Create the k8s resource
        ref = k8s.CustomResourceReference(
            CRD_GROUP,
            CRD_VERSION,
            RESOURCE_PLURAL,
            resource_name,
            namespace="default",
        )
        k8s.create_custom_resource(ref, resource_data)
        cr = k8s.wait_resource_consumed_by_controller(ref)

        assert cr is not None
        assert k8s.get_resource_exists(ref)
        condition.assert_synced(ref)

        # Let's check that the DB parameter group appears in RDS
        latest = db_parameter_group.get(resource_name)
        assert latest is not None
        assert latest['Description'] == resource_desc

        arn = latest['DBParameterGroupArn']
        expect_tags = [{"Key": "environment", "Value": "dev"}]
        latest_tags = db_parameter_group.get_tags(arn)
        assert expect_tags == latest_tags

        # OK, now let's update the tag set and check that the tags are
        # updated accordingly.
        new_tags = [{
            "key": "environment",
            "value": "prod",
        }]
        updates = {
            "spec": {
                "tags": new_tags
            },
        }
        k8s.patch_custom_resource(ref, updates)
        time.sleep(MODIFY_WAIT_AFTER_SECONDS)

        latest_tags = db_parameter_group.get_tags(arn)
        after_update_expected_tags = [{
            "Key": "environment",
            "Value": "prod",
        }]
        assert latest_tags == after_update_expected_tags

        # Delete the k8s resource on teardown of the module
        k8s.delete_custom_resource(ref)

        time.sleep(DELETE_WAIT_AFTER_SECONDS)

        # DB parameter group should no longer appear in RDS
        latest = db_parameter_group.get(resource_name)
        assert latest is None
    def test_function_code_signing_config(self, lambda_client,
                                          code_signing_config):
        (_, csc_resource) = code_signing_config
        code_signing_config_arn = csc_resource["status"][
            "ackResourceMetadata"]["arn"]
        resource_name = random_suffix_name("lambda-function", 24)

        resources = get_bootstrap_resources()

        replacements = REPLACEMENT_VALUES.copy()
        replacements["FUNCTION_NAME"] = resource_name
        replacements["BUCKET_NAME"] = resources.FunctionsBucketName
        replacements["LAMBDA_ROLE"] = resources.LambdaBasicRoleARN
        replacements["LAMBDA_FILE_NAME"] = resources.LambdaFunctionFileZip
        replacements["RESERVED_CONCURRENT_EXECUTIONS"] = "2"
        replacements["CODE_SIGNING_CONFIG_ARN"] = code_signing_config_arn
        replacements["AWS_REGION"] = get_region()

        # Load Lambda CR
        resource_data = load_lambda_resource(
            "function",
            additional_replacements=replacements,
        )
        logging.debug(resource_data)

        # Create k8s resource
        ref = k8s.CustomResourceReference(
            CRD_GROUP,
            CRD_VERSION,
            RESOURCE_PLURAL,
            resource_name,
            namespace="default",
        )
        k8s.create_custom_resource(ref, resource_data)
        cr = k8s.wait_resource_consumed_by_controller(ref)

        assert cr is not None
        assert k8s.get_resource_exists(ref)

        time.sleep(CREATE_WAIT_AFTER_SECONDS)

        cr = k8s.wait_resource_consumed_by_controller(ref)

        # Check Lambda function exists
        exists = self.function_exists(lambda_client, resource_name)
        assert exists

        # Check function code signing config is correct
        function_csc_arn = self.get_function_code_signing_config(
            lambda_client, resource_name)
        assert function_csc_arn == code_signing_config_arn

        # Delete function code signing config
        cr["spec"]["codeSigningConfigARN"] = ""
        k8s.patch_custom_resource(ref, cr)

        time.sleep(UPDATE_WAIT_AFTER_SECONDS)

        function_csc_arn = self.get_function_code_signing_config(
            lambda_client, resource_name)
        assert function_csc_arn is None

        # Delete k8s resource
        _, deleted = k8s.delete_custom_resource(ref)
        assert deleted is True

        time.sleep(DELETE_WAIT_AFTER_SECONDS)

        # Check Lambda function doesn't exist
        exists = self.function_exists(lambda_client, resource_name)
        assert not exists
Пример #24
0
    def test_crud_postgres14_t3_micro(
        self,
        k8s_secret,
    ):
        db_instance_id = random_suffix_name("pg14-t3-micro", 20)
        secret = k8s_secret(
            self.MUP_NS,
            self.MUP_SEC_NAME,
            self.MUP_SEC_KEY,
            self.MUP_SEC_VAL,
        )

        replacements = REPLACEMENT_VALUES.copy()
        replacements['COPY_TAGS_TO_SNAPSHOT'] = "False"
        replacements["DB_INSTANCE_ID"] = db_instance_id
        replacements["MASTER_USER_PASS_SECRET_NAMESPACE"] = secret.ns
        replacements["MASTER_USER_PASS_SECRET_NAME"] = secret.name
        replacements["MASTER_USER_PASS_SECRET_KEY"] = secret.key
        replacements["DB_SUBNET_GROUP_NAME"] = get_bootstrap_resources(
        ).DBSubnetGroupName

        resource_data = load_rds_resource(
            "db_instance_postgres14_t3_micro",
            additional_replacements=replacements,
        )

        # Create the k8s resource
        ref = k8s.CustomResourceReference(
            CRD_GROUP,
            CRD_VERSION,
            RESOURCE_PLURAL,
            db_instance_id,
            namespace="default",
        )
        k8s.create_custom_resource(ref, resource_data)
        cr = k8s.wait_resource_consumed_by_controller(ref)

        assert cr is not None
        assert 'status' in cr
        assert 'dbInstanceStatus' in cr['status']
        assert cr['status']['dbInstanceStatus'] == 'creating'
        condition.assert_not_synced(ref)

        # Wait for the resource to get synced
        assert k8s.wait_on_condition(ref,
                                     "ACK.ResourceSynced",
                                     "True",
                                     wait_periods=MAX_WAIT_FOR_SYNCED_MINUTES)

        # After the resource is synced, assert that DBInstanceStatus is available
        latest = db_instance.get(db_instance_id)
        assert latest is not None
        assert latest['DBInstanceStatus'] == 'available'
        assert latest['MultiAZ'] is False

        # Before we update the DBInstance CR below, let's check to see that the
        # DbInstanceStatus field in the CR has been updated to something other
        # than 'creating', which is what is set after the initial creation.
        # The CR's `Status.DBInstanceStatus` should be updated because the CR
        # is requeued on successful reconciliation loops and subsequent
        # reconciliation loops call ReadOne and should update the CR's Status
        # with the latest observed information.
        # https://github.com/aws-controllers-k8s/community/issues/923
        cr = k8s.get_resource(ref)
        assert cr is not None
        assert 'status' in cr
        assert 'dbInstanceStatus' in cr['status']
        assert cr['status']['dbInstanceStatus'] != 'creating'
        condition.assert_synced(ref)

        # We're now going to modify the CopyTagsToSnapshot field of the DB
        # instance, wait some time and verify that the RDS server-side resource
        # shows the new value of the field.
        latest = db_instance.get(db_instance_id)
        assert latest is not None
        assert latest['CopyTagsToSnapshot'] is False
        assert latest['DBSubnetGroup'][
            'DBSubnetGroupName'] == get_bootstrap_resources().DBSubnetGroupName
        updates = {
            "spec": {
                "copyTagsToSnapshot": True,
                "multiAZ": True
            },
        }
        k8s.patch_custom_resource(ref, updates)
        time.sleep(MODIFY_WAIT_AFTER_SECONDS)

        # wait for the resource to get synced after the patch
        assert k8s.wait_on_condition(ref,
                                     "ACK.ResourceSynced",
                                     "True",
                                     wait_periods=MAX_WAIT_FOR_SYNCED_MINUTES)

        # After resource is synced again, assert that patches are reflected in the AWS resource
        latest = db_instance.get(db_instance_id)
        assert latest is not None
        assert latest['CopyTagsToSnapshot'] is True
        assert latest['MultiAZ'] is True

        updates = {
            "spec": {
                "copyTagsToSnapshot": False,
                "multiAZ": False
            },
        }
        k8s.patch_custom_resource(ref, updates)
        time.sleep(MODIFY_WAIT_AFTER_SECONDS)

        # wait for the resource to get synced after the patch
        assert k8s.wait_on_condition(ref,
                                     "ACK.ResourceSynced",
                                     "True",
                                     wait_periods=MAX_WAIT_FOR_SYNCED_MINUTES)

        # After resource is synced again, assert that patches are reflected in the AWS resource
        latest = db_instance.get(db_instance_id)
        assert latest is not None
        assert latest['CopyTagsToSnapshot'] is False
        assert latest['MultiAZ'] is False

        k8s.delete_custom_resource(ref)

        time.sleep(DELETE_WAIT_AFTER_SECONDS)

        db_instance.wait_until_deleted(db_instance_id)
Пример #25
0
    def test_table_update_tags(self, dynamodb_client):
        resource_name = random_suffix_name("table", 32)

        replacements = REPLACEMENT_VALUES.copy()
        replacements["TABLE_NAME"] = resource_name

        # Load Table CR
        resource_data = load_dynamodb_resource(
            "table_forums",
            additional_replacements=replacements,
        )
        logging.debug(resource_data)

        # Create k8s resource
        ref = k8s.CustomResourceReference(
            CRD_GROUP,
            CRD_VERSION,
            RESOURCE_PLURAL,
            resource_name,
            namespace="default",
        )
        k8s.create_custom_resource(ref, resource_data)
        cr = k8s.wait_resource_consumed_by_controller(ref)

        assert cr is not None
        assert k8s.get_resource_exists(ref)

        wait_for_cr_status(
            ref,
            "tableStatus",
            "ACTIVE",
            10,
            5,
        )

        # Check DynamoDB Table exists
        exists = self.table_exists(dynamodb_client, resource_name)
        assert exists

        # Get CR latest revision
        cr = k8s.wait_resource_consumed_by_controller(ref)

        # Update table list of tags
        tags = [
            {
                "key": "key1",
                "value": "value1",
            },
        ]
        cr["spec"]["tags"] = tags

        # Patch k8s resource
        k8s.patch_custom_resource(ref, cr)
        time.sleep(UPDATE_TAGS_WAIT_AFTER_SECONDS)

        table_tags = get_resource_tags(
            cr["status"]["ackResourceMetadata"]["arn"])
        assert len(table_tags) == len(tags)
        assert table_tags[0]['Key'] == tags[0]['key']
        assert table_tags[0]['Value'] == tags[0]['value']

        # Delete k8s resource
        _, deleted = k8s.delete_custom_resource(ref)
        assert deleted is True

        time.sleep(DELETE_WAIT_AFTER_SECONDS)

        # Check DynamoDB Table doesn't exists
        exists = self.table_exists(dynamodb_client, resource_name)
        assert not exists
Пример #26
0
    def test_smoke_dynamodb_table_stream(self, lambda_client, lambda_function):
        (_, function_resource) = lambda_function
        lambda_function_name = function_resource["spec"]["name"]

        resource_name = random_suffix_name("lambda-esm", 24)
        resources = get_bootstrap_resources()

        replacements = REPLACEMENT_VALUES.copy()
        replacements["AWS_REGION"] = get_region()
        replacements["EVENT_SOURCE_MAPPING_NAME"] = resource_name
        replacements["BATCH_SIZE"] = "10"
        replacements["FUNCTION_NAME"] = lambda_function_name
        replacements["EVENT_SOURCE_ARN"] = resources.DynamoDBTableARN
        replacements["STARTING_POSITION"] = "LATEST"
        replacements["MAXIMUM_RETRY_ATTEMPTS"] = "-1"

        # Load ESM CR
        resource_data = load_lambda_resource(
            "event_source_mapping_dynamodb",
            additional_replacements=replacements,
        )
        logging.debug(resource_data)

        # Create k8s resource
        ref = k8s.CustomResourceReference(
            CRD_GROUP,
            CRD_VERSION,
            RESOURCE_PLURAL,
            resource_name,
            namespace="default",
        )
        k8s.create_custom_resource(ref, resource_data)
        cr = k8s.wait_resource_consumed_by_controller(ref)

        assert cr is not None
        assert k8s.get_resource_exists(ref)

        time.sleep(CREATE_WAIT_AFTER_SECONDS)

        esm_uuid = cr['status']['uuid']

        # Check ESM exists
        exists = self.event_source_mapping_exists(lambda_client, esm_uuid)
        assert exists

        # Update cr
        cr["spec"]["maximumRetryAttempts"] = 3
        cr["spec"]["destinationConfig"] = {
            'onFailure': {
                'destination': resources.SQSQueueARN,
            }
        }

        # Patch k8s resource
        k8s.patch_custom_resource(ref, cr)
        time.sleep(UPDATE_WAIT_AFTER_SECONDS)

        # Check ESM maximum retry attempts
        esm = self.get_event_source_mapping(lambda_client, esm_uuid)
        assert esm is not None
        logging.info(esm)
        assert esm["MaximumRetryAttempts"] == 3

        # Delete k8s resource
        _, deleted = k8s.delete_custom_resource(ref)
        assert deleted

        time.sleep(DELETE_WAIT_AFTER_SECONDS)

        # Check ESM doesn't exist
        exists = self.event_source_mapping_exists(lambda_client, esm_uuid)
        assert not exists
Пример #27
0
    def test_smoke_sqs_queue_stream(self, lambda_client, lambda_function):
        (_, function_resource) = lambda_function
        lambda_function_name = function_resource["spec"]["name"]

        resource_name = random_suffix_name("lambda-esm", 24)
        resources = get_bootstrap_resources()

        replacements = REPLACEMENT_VALUES.copy()
        replacements["AWS_REGION"] = get_region()
        replacements["EVENT_SOURCE_MAPPING_NAME"] = resource_name
        replacements["BATCH_SIZE"] = "10"
        replacements["FUNCTION_NAME"] = lambda_function_name
        replacements["EVENT_SOURCE_ARN"] = resources.SQSQueueARN
        replacements["MAXIMUM_BATCHING_WINDOW_IN_SECONDS"] = "1"

        # Load ESM CR
        resource_data = load_lambda_resource(
            "event_source_mapping_sqs",
            additional_replacements=replacements,
        )
        logging.debug(resource_data)

        # Create k8s resource
        ref = k8s.CustomResourceReference(
            CRD_GROUP,
            CRD_VERSION,
            RESOURCE_PLURAL,
            resource_name,
            namespace="default",
        )
        k8s.create_custom_resource(ref, resource_data)
        cr = k8s.wait_resource_consumed_by_controller(ref)

        assert cr is not None
        assert k8s.get_resource_exists(ref)

        time.sleep(CREATE_WAIT_AFTER_SECONDS)

        esm_uuid = cr['status']['uuid']

        # Check ESM exists
        exists = self.event_source_mapping_exists(lambda_client, esm_uuid)
        assert exists

        # Update cr
        cr["spec"]["batchSize"] = 20

        # Patch k8s resource
        k8s.patch_custom_resource(ref, cr)
        time.sleep(UPDATE_WAIT_AFTER_SECONDS)

        # Check ESM batch size
        esm = self.get_event_source_mapping(lambda_client, esm_uuid)
        assert esm is not None
        assert esm["BatchSize"] == 20

        # Delete k8s resource
        _, deleted = k8s.delete_custom_resource(ref)
        assert deleted

        time.sleep(DELETE_WAIT_AFTER_SECONDS)

        # Check ESM doesn't exist
        exists = self.event_source_mapping_exists(lambda_client, esm_uuid)
        assert not exists
Пример #28
0
    def test_create_delete_7_9(self, es_client):
        resource = Domain(name="my-es-domain", data_node_count=1)

        replacements = REPLACEMENT_VALUES.copy()
        replacements["DOMAIN_NAME"] = resource.name

        resource_data = load_resource(
            "domain_es7.9",
            additional_replacements=replacements,
        )
        logging.debug(resource_data)

        # Create the k8s resource
        ref = k8s.CustomResourceReference(
            CRD_GROUP,
            CRD_VERSION,
            RESOURCE_PLURAL,
            resource.name,
            namespace="default",
        )
        k8s.create_custom_resource(ref, resource_data)
        cr = k8s.wait_resource_consumed_by_controller(ref)

        assert cr is not None
        assert k8s.get_resource_exists(ref)

        logging.debug(cr)

        # Let's check that the domain appears in AES
        aws_res = es_client.describe_elasticsearch_domain(
            DomainName=resource.name)

        logging.debug(aws_res)

        now = datetime.datetime.now()
        timeout = now + datetime.timedelta(seconds=CREATE_TIMEOUT_SECONDS)

        # An ES Domain gets its `DomainStatus.Created` field set to `True`
        # almost immediately, however the `DomainStatus.Processing` field is
        # set to `True` while Elasticsearch is being installed onto the worker
        # node(s). If you attempt to delete an ES Domain that is both Created
        # and Processing == True, AES will set the `DomainStatus.Deleted` field
        # to True as well, so the `Created`, `Processing` and `Deleted` fields
        # will all be True. It typically takes upwards of 4-6 minutes for an ES
        # Domain to reach Created = True && Processing = False and then another
        # 2 minutes or so after calling DeleteElasticsearchDomain for the ES
        # Domain to no longer appear in DescribeElasticsearchDomain API call.
        aws_res = wait_for_create_or_die(es_client, resource, timeout)
        logging.info(
            f"ES Domain {resource.name} creation succeeded and DomainStatus.Processing is now False"
        )

        assert aws_res['DomainStatus']['ElasticsearchVersion'] == '7.9'
        assert aws_res['DomainStatus']['Created'] == True
        assert aws_res['DomainStatus']['ElasticsearchClusterConfig'][
            'InstanceCount'] == resource.data_node_count
        assert aws_res['DomainStatus']['ElasticsearchClusterConfig'][
            'ZoneAwarenessEnabled'] == resource.is_zone_aware

        # Delete the k8s resource on teardown of the module
        k8s.delete_custom_resource(ref)

        logging.info(
            f"Deleted CR for ES Domain {resource.name}. Waiting {DELETE_WAIT_AFTER_SECONDS} before checking existence in AWS API"
        )
        time.sleep(DELETE_WAIT_AFTER_SECONDS)

        now = datetime.datetime.now()
        timeout = now + datetime.timedelta(seconds=DELETE_TIMEOUT_SECONDS)

        # Domain should no longer appear in AES
        wait_for_delete_or_die(es_client, resource, timeout)
Пример #29
0
    def test_crud_mysql_serverless(
            self,
            k8s_secret,
    ):
        db_cluster_id = "my-aurora-mysql"
        db_name = "mydb"
        secret = k8s_secret(
            self.MUP_NS,
            self.MUP_SEC_NAME,
            self.MUP_SEC_KEY,
            self.MUP_SEC_VAL,
        )

        replacements = REPLACEMENT_VALUES.copy()
        replacements['COPY_TAGS_TO_SNAPSHOT'] = "False"
        replacements["DB_CLUSTER_ID"] = db_cluster_id
        replacements["DB_NAME"] = db_name
        replacements["MASTER_USER_PASS_SECRET_NAMESPACE"] = secret.ns
        replacements["MASTER_USER_PASS_SECRET_NAME"] = secret.name
        replacements["MASTER_USER_PASS_SECRET_KEY"] = secret.key

        resource_data = load_rds_resource(
            "db_cluster_mysql_serverless",
            additional_replacements=replacements,
        )

        ref = k8s.CustomResourceReference(
            CRD_GROUP, CRD_VERSION, RESOURCE_PLURAL,
            db_cluster_id, namespace="default",
        )
        k8s.create_custom_resource(ref, resource_data)
        cr = k8s.wait_resource_consumed_by_controller(ref)

        assert cr is not None
        assert 'status' in cr
        assert 'status' in cr['status']
        assert cr['status']['status'] == 'creating'
        condition.assert_not_synced(ref)

        db_cluster.wait_until(
            db_cluster_id,
            db_cluster.status_matches('available'),
        )

        time.sleep(CHECK_STATUS_WAIT_SECONDS)

        # Before we update the DBCluster CR below, let's check to see that the
        # Status field in the CR has been updated to something other than
        # 'creating', which is what is set after the initial creation.  The
        # CR's `Status.Status` should be updated because the CR is requeued on
        # successful reconciliation loops and subsequent reconciliation loops
        # call ReadOne and should update the CR's Status with the latest
        # observed information.
        # https://github.com/aws-controllers-k8s/community/issues/923
        cr = k8s.get_resource(ref)
        assert cr is not None
        assert 'status' in cr
        assert 'status' in cr['status']
        assert cr['status']['status'] != 'creating'
        condition.assert_synced(ref)

        # We're now going to modify the CopyTagsToSnapshot field of the DB
        # instance, wait some time and verify that the RDS server-side resource
        # shows the new value of the field.
        latest = db_cluster.get(db_cluster_id)
        assert latest is not None
        assert latest['CopyTagsToSnapshot'] == False
        updates = {
            "spec": {"copyTagsToSnapshot": True},
        }
        k8s.patch_custom_resource(ref, updates)
        time.sleep(MODIFY_WAIT_AFTER_SECONDS)

        latest = db_cluster.get(db_cluster_id)
        assert latest is not None
        assert latest['CopyTagsToSnapshot'] == True

        arn = latest['DBClusterArn']
        expect_tags = [
            {"Key": "environment", "Value": "dev"}
        ]
        latest_tags = db_cluster.get_tags(arn)
        assert expect_tags == latest_tags

        # OK, now let's update the tag set and check that the tags are
        # updated accordingly.
        new_tags = [
            {
                "key": "environment",
                "value": "prod",
            }
        ]
        updates = {
            "spec": {"tags": new_tags},
        }
        k8s.patch_custom_resource(ref, updates)
        time.sleep(MODIFY_WAIT_AFTER_SECONDS)

        latest_tags = db_cluster.get_tags(arn)
        after_update_expected_tags = [
            {
                "Key": "environment",
                "Value": "prod",
            }
        ]
        assert latest_tags == after_update_expected_tags

        k8s.delete_custom_resource(ref)

        time.sleep(DELETE_WAIT_AFTER_SECONDS)

        db_cluster.wait_until_deleted(db_cluster_id)
    def test_create_delete_non_public(
        self,
        amq_client,
        admin_user_pass_secret,
    ):
        resource_name = random_suffix_name("my-rabbit-broker-non-public", 32)
        aup_sec_ns, aup_sec_name, aup_sec_key = admin_user_pass_secret

        replacements = REPLACEMENT_VALUES.copy()
        replacements["BROKER_NAME"] = resource_name
        replacements["ADMIN_USER_PASS_SECRET_NAMESPACE"] = aup_sec_ns
        replacements["ADMIN_USER_PASS_SECRET_NAME"] = aup_sec_name
        replacements["ADMIN_USER_PASS_SECRET_KEY"] = aup_sec_key

        resource_data = load_mq_resource(
            "broker_rabbitmq_non_public",
            additional_replacements=replacements,
        )
        logging.error(resource_data)

        # Create the k8s resource
        ref = k8s.CustomResourceReference(
            CRD_GROUP,
            CRD_VERSION,
            RESOURCE_PLURAL,
            resource_name,
            namespace="default",
        )
        k8s.create_custom_resource(ref, resource_data)
        cr = k8s.wait_resource_consumed_by_controller(ref)

        assert cr is not None

        broker_id = cr['status']['brokerID']

        # Let's check that the Broker appears in AmazonMQ
        aws_res = amq_client.describe_broker(BrokerId=broker_id)
        assert aws_res is not None

        wait_for_cr_status(
            ref,
            "brokerState",
            "RUNNING",
            CREATE_INTERVAL_SLEEP_SECONDS,
            45,
        )

        # At this point, there should be at least one BrokerInstance record in
        # the Broker.Status.BrokerInstances collection which we can grab an
        # endpoint from.
        latest_res = k8s.get_resource(ref)
        assert latest_res['status']['brokerInstances'] is not None
        assert len(latest_res['status']['brokerInstances']) == 1
        assert len(latest_res['status']['brokerInstances'][0]['endpoints']) > 0

        # Delete the k8s resource on teardown of the module
        k8s.delete_custom_resource(ref)

        time.sleep(DELETE_WAIT_AFTER_SECONDS)

        now = datetime.datetime.now()
        timeout = now + datetime.timedelta(seconds=DELETE_TIMEOUT_SECONDS)

        # Broker should no longer appear in AmazonMQ
        while True:
            if datetime.datetime.now() >= timeout:
                pytest.fail(
                    "Timed out waiting for ES Domain to being deleted in AES API"
                )
            time.sleep(DELETE_WAIT_INTERVAL_SLEEP_SECONDS)

            try:
                aws_res = amq_client.describe_broker(BrokerId=broker_id)
                if aws_res['BrokerState'] != "DELETION_IN_PROGRESS":
                    pytest.fail(
                        "BrokerState is not DELETION_IN_PROGRESS for broker that was deleted. BrokerState is "
                        + aws_res['BrokerState'])
            except amq_client.exceptions.NotFoundException:
                break