def secrets():
    secrets = {
        "NAME1": random_suffix_name("first", 32),
        "NAME2": random_suffix_name("second", 32),
        "KEY1": "secret1",
        "KEY2": "secret2"
    }
    k8s.create_opaque_secret("default", secrets['NAME1'], secrets['KEY1'], random_suffix_name("password", 32))
    k8s.create_opaque_secret("default", secrets['NAME2'], secrets['KEY2'], random_suffix_name("password", 32))
    yield secrets

    # teardown
    k8s.delete_secret("default", secrets['NAME1'])
    k8s.delete_secret("default", secrets['NAME2'])
示例#2
0
def service_bootstrap() -> Resources:
    logging.getLogger().setLevel(logging.INFO)

    # First create the AuthorizerRole and VPC.
    # Then use the created AuthorizerRole.arn for
    # creating Authorizer lambda function.

    resources = BootstrapResources(AuthorizerRole=Role(
        "ack-apigwv2-authorizer-role",
        "lambda.amazonaws.com",
        managed_policies=[
            "arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole"
        ]),
                                   VPC=VPC(name_prefix="apigwv2-vpc-link"))

    try:
        resources.bootstrap()
    except BootstrapFailureException as ex:
        exit(254)

    authorizer_function_name = random_suffix_name("ack-apigwv2-authorizer", 30)
    authorizer_function_arn = create_lambda_authorizer(
        authorizer_function_name, resources.AuthorizerRole.arn)

    resources.AuthorizerFunctionName = authorizer_function_name
    resources.AuthorizerFunctionArn = authorizer_function_arn
    return resources
def xgboost_churn_data_quality_monitoring_schedule(
    xgboost_churn_data_quality_job_definition,
):
    (_, job_definition_resource) = xgboost_churn_data_quality_job_definition

    job_definition_name = job_definition_resource["spec"].get("jobDefinitionName")

    monitoring_schedule_name = random_suffix_name("monitoring-schedule", 32)

    replacements = REPLACEMENT_VALUES.copy()
    replacements["MONITORING_SCHEDULE_NAME"] = monitoring_schedule_name
    replacements["JOB_DEFINITION_NAME"] = job_definition_name
    replacements["MONITORING_TYPE"] = "DataQuality"

    reference, spec, resource = create_sagemaker_resource(
        resource_plural=RESOURCE_PLURAL,
        resource_name=monitoring_schedule_name,
        spec_file="monitoring_schedule_base",
        replacements=replacements,
    )
    assert resource is not None

    yield (reference, resource, spec)

    if k8s.get_resource_exists(reference):
        _, deleted = k8s.delete_custom_resource(reference, cfg.DELETE_WAIT_PERIOD, cfg.DELETE_WAIT_LENGTH)
        assert deleted
def xgboost_training_job():
    resource_name = random_suffix_name("xgboost-trainingjob", 32)
    replacements = REPLACEMENT_VALUES.copy()
    replacements["TRAINING_JOB_NAME"] = resource_name
    reference, _, resource = create_sagemaker_resource(
        resource_plural=RESOURCE_PLURAL,
        resource_name=resource_name,
        spec_file="xgboost_trainingjob",
        replacements=replacements,
    )

    assert resource is not None
    if k8s.get_resource_arn(resource) is None:
        logging.error(
            f"ARN for this resource is None, resource status is: {resource['status']}"
        )
    assert k8s.get_resource_arn(resource) is not None

    yield (reference, resource)

    if k8s.get_resource_exists(reference):
        _, deleted = k8s.delete_custom_resource(reference,
                                                cfg.JOB_DELETE_WAIT_PERIODS,
                                                cfg.JOB_DELETE_WAIT_LENGTH)
        assert deleted
示例#5
0
def single_variant_config():
    config_resource_name = random_suffix_name("single-variant-config", 32)
    model_resource_name = config_resource_name + "-model"

    replacements = REPLACEMENT_VALUES.copy()
    replacements["CONFIG_NAME"] = config_resource_name
    replacements["MODEL_NAME"] = model_resource_name

    model_reference, model_spec, model_resource = create_sagemaker_resource(
        resource_plural=MODEL_RESOURCE_PLURAL,
        resource_name=model_resource_name,
        spec_file="xgboost_model",
        replacements=replacements,
    )
    assert model_resource is not None

    config_reference, config_spec, config_resource = create_sagemaker_resource(
        resource_plural=CONFIG_RESOURCE_PLURAL,
        resource_name=config_resource_name,
        spec_file="endpoint_config_single_variant",
        replacements=replacements,
    )
    assert config_resource is not None

    yield (config_reference, config_resource)

    k8s.delete_custom_resource(model_reference)
    # Delete the k8s resource if not already deleted by tests
    if k8s.get_resource_exists(config_reference):
        k8s.delete_custom_resource(config_reference)
def xgboost_model_package_group():
    resource_name = random_suffix_name("xgboost-model-package-group", 38)

    replacements = REPLACEMENT_VALUES.copy()
    replacements["MODEL_PACKAGE_GROUP_NAME"] = resource_name

    (
        model_package_group_reference,
        model_package_group_spec,
        model_package_group_resource,
    ) = create_sagemaker_resource(
        resource_plural=cfg.MODEL_PACKAGE_GROUP_RESOURCE_PLURAL,
        resource_name=resource_name,
        spec_file="xgboost_model_package_group",
        replacements=replacements,
    )
    assert model_package_group_resource is not None
    if k8s.get_resource_arn(model_package_group_resource) is None:
        logging.error(
            f"ARN for this resource is None, resource status is: {model_package_group_resource['status']}"
        )
    assert k8s.get_resource_arn(model_package_group_resource) is not None

    yield (model_package_group_reference, model_package_group_resource)

    # Delete the k8s resource if not already deleted by tests
    if k8s.get_resource_exists(model_package_group_reference):
        _, deleted = k8s.delete_custom_resource(model_package_group_reference,
                                                DELETE_WAIT_PERIOD,
                                                DELETE_WAIT_LENGTH)
        assert deleted
示例#7
0
def create_data_bucket() -> str:
    region = get_region()
    account_id = get_account_id()
    bucket_name = resources.random_suffix_name(
        f"ack-data-bucket-{region}-{account_id}", 63)

    s3 = boto3.client("s3", region_name=region)
    if region == "us-east-1":
        s3.create_bucket(Bucket=bucket_name)
    else:
        s3.create_bucket(
            Bucket=bucket_name,
            CreateBucketConfiguration={"LocationConstraint": region})

    logging.info(f"Created SageMaker data bucket {bucket_name}")

    s3_resource = boto3.resource("s3", region_name=region)

    source_bucket = s3_resource.Bucket(SAGEMAKER_SOURCE_DATA_BUCKET)
    destination_bucket = s3_resource.Bucket(bucket_name)
    duplicate_bucket_contents(source_bucket, destination_bucket)

    logging.info(f"Synced data bucket")

    return bucket_name
def xgboost_churn_data_quality_job_definition(xgboost_churn_endpoint):
    endpoint_spec = xgboost_churn_endpoint
    endpoint_name = endpoint_spec["spec"].get("endpointName")

    resource_name = random_suffix_name("data-quality-job-definition", 32)

    replacements = REPLACEMENT_VALUES.copy()
    replacements["JOB_DEFINITION_NAME"] = resource_name
    replacements["ENDPOINT_NAME"] = endpoint_name

    job_definition_reference, _, resource = create_sagemaker_resource(
        resource_plural=cfg.DATA_QUALITY_JOB_DEFINITION_RESOURCE_PLURAL,
        resource_name=resource_name,
        spec_file="data_quality_job_definition_xgboost_churn",
        replacements=replacements,
    )
    assert resource is not None

    job_definition_name = resource["spec"].get("jobDefinitionName")

    yield (job_definition_reference, resource)

    if k8s.get_resource_exists(job_definition_reference):
        _, deleted = k8s.delete_custom_resource(job_definition_reference, 3,
                                                10)
        assert deleted
    def test_crud_authorizer(self, api_resource):
        api_ref, api_cr = api_resource
        api_id = api_cr['status']['apiID']
        test_data = REPLACEMENT_VALUES.copy()
        authorizer_name = random_suffix_name("ack-test-authorizer", 25)
        test_data['AUTHORIZER_NAME'] = authorizer_name
        test_data['AUTHORIZER_TITLE'] = authorizer_name
        test_data['API_ID'] = api_id
        test_data['AUTHORIZER_URI'] = f'arn:aws:apigateway:{get_region()}:lambda:path/2015-03-31/functions/{get_bootstrap_resources().AuthorizerFunctionArn}/invocations'
        authorizer_ref, authorizer_data = helper.authorizer_ref_and_data(authorizer_resource_name=authorizer_name,
                                                                         replacement_values=test_data)
        logging.debug(f"http api authorizer resource. name: {authorizer_name}, data: {authorizer_data}")

        # test create
        k8s.create_custom_resource(authorizer_ref, authorizer_data)
        time.sleep(CREATE_WAIT_AFTER_SECONDS)
        assert k8s.wait_on_condition(authorizer_ref, "ACK.ResourceSynced", "True", wait_periods=10)

        cr = k8s.get_resource(authorizer_ref)
        assert cr is not None

        authorizer_id = cr['status']['authorizerID']

        # Let's check that the HTTP Api integration appears in Amazon API Gateway
        apigw_validator.assert_authorizer_is_present(api_id=api_id, authorizer_id=authorizer_id)

        apigw_validator.assert_authorizer_name(
            api_id=api_id,
            authorizer_id=authorizer_id,
            expected_authorizer_name=authorizer_name
        )

        # test update
        updated_authorizer_title = 'updated-' + authorizer_name
        test_data['AUTHORIZER_TITLE'] = updated_authorizer_title
        updated_authorizer_resource_data = load_apigatewayv2_resource(
            "authorizer",
            additional_replacements=test_data,
        )
        logging.debug(f"updated http api authorizer resource: {updated_authorizer_resource_data}")

        # Update the k8s resource
        k8s.patch_custom_resource(authorizer_ref, updated_authorizer_resource_data)
        time.sleep(UPDATE_WAIT_AFTER_SECONDS)

        condition.assert_synced(authorizer_ref)
        # Let's check that the HTTP Api authorizer appears in Amazon API Gateway with updated title
        apigw_validator.assert_authorizer_name(
            api_id=api_id,
            authorizer_id=authorizer_id,
            expected_authorizer_name=updated_authorizer_title
        )

        # test delete
        k8s.delete_custom_resource(authorizer_ref)
        time.sleep(DELETE_WAIT_AFTER_SECONDS)
        assert not k8s.get_resource_exists(authorizer_ref)
        # HTTP Api authorizer should no longer appear in Amazon API Gateway
        apigw_validator.assert_authorizer_is_deleted(api_id=api_id, authorizer_id=authorizer_id)
def xgboost_transformjob(sagemaker_client):
    # Create model using boto3 for TransformJob
    transform_model_file = (
        f"s3://{get_bootstrap_resources().DataBucketName}/sagemaker/batch/model.tar.gz"
    )
    model_name = random_suffix_name("xgboost-model", 32)

    create_response = sagemaker_client.create_model(
        ModelName=model_name,
        PrimaryContainer={
            "Image": REPLACEMENT_VALUES["XGBOOST_IMAGE_URI"],
            "ModelDataUrl": transform_model_file,
            "Environment": {},
        },
        ExecutionRoleArn=REPLACEMENT_VALUES["SAGEMAKER_EXECUTION_ROLE_ARN"],
    )
    logging.debug(create_response)

    # Check if the model is created successfully
    describe_model_response = sagemaker_client.describe_model(
        ModelName=model_name)
    assert describe_model_response["ModelName"] is not None

    resource_name = random_suffix_name("xgboost-transformjob", 32)

    # Use the model created above
    replacements = REPLACEMENT_VALUES.copy()
    replacements["MODEL_NAME"] = model_name
    replacements["TRANSFORM_JOB_NAME"] = resource_name

    reference, spec, resource = create_sagemaker_resource(
        resource_plural=RESOURCE_PLURAL,
        resource_name=resource_name,
        spec_file="xgboost_transformjob",
        replacements=replacements,
    )
    assert resource is not None

    yield (reference, resource)

    # Delete the model created
    sagemaker_client.delete_model(ModelName=model_name)

    # Delete the k8s resource if not already deleted by tests
    if k8s.get_resource_exists(reference):
        k8s.delete_custom_resource(reference)
示例#11
0
def create_sns_topic() -> str:
    topic_name = random_suffix_name("ack-sns-topic", 32)

    sns = boto3.client("sns")
    response = sns.create_topic(Name=topic_name)
    logging.info(f"Created SNS topic {response['TopicArn']}")

    return response['TopicArn']
示例#12
0
def create_log_group():
    logs = boto3.client("logs")
    log_group_name = random_suffix_name("ack-cw-log-group", 32)
    logs.create_log_group(logGroupName=log_group_name)

    logging.info(f"Create CW log group {log_group_name}")

    return log_group_name
示例#13
0
def create_cc_snapshot():
    ec = boto3.client("elasticache")

    cc_id = random_suffix_name("ack-cache-cluster", 32)
    _ = ec.create_cache_cluster(CacheClusterId=cc_id,
                                NumCacheNodes=1,
                                CacheNodeType="cache.t3.micro",
                                Engine="redis")
    waiter = ec.get_waiter('cache_cluster_available')
    waiter.wait(CacheClusterId=cc_id)
    logging.info(f"Created cache cluster {cc_id} for snapshotting")

    snapshot_name = random_suffix_name("ack-cc-snapshot", 32)
    _ = ec.create_snapshot(CacheClusterId=cc_id, SnapshotName=snapshot_name)
    assert wait_snapshot_available(snapshot_name)

    return snapshot_name
    def test_smoke(self, lambda_client):
        resource_name = random_suffix_name("lambda-csc", 24)

        resources = get_bootstrap_resources()
        logging.debug(resources)

        replacements = REPLACEMENT_VALUES.copy()
        replacements["AWS_REGION"] = get_region()
        replacements["CODE_SIGNING_CONFIG_NAME"] = resource_name
        replacements["SIGNING_PROFILE_VERSION_ARN"] = resources.SigningProfileVersionArn

        # Load Lambda CR
        resource_data = load_lambda_resource(
            "code_signing_config",
            additional_replacements=replacements,
        )
        logging.debug(resource_data)

        # Create k8s resource
        ref = k8s.CustomResourceReference(
            CRD_GROUP, CRD_VERSION, RESOURCE_PLURAL,
            resource_name, namespace="default",
        )
        k8s.create_custom_resource(ref, resource_data)
        cr = k8s.wait_resource_consumed_by_controller(ref)

        assert cr is not None
        assert k8s.get_resource_exists(ref)

        codeSigningConfigARN = cr['status']['ackResourceMetadata']['arn']

        time.sleep(CREATE_WAIT_AFTER_SECONDS)

        # Check Lambda code signing config exists
        exists = self.code_signing_config_exists(lambda_client, codeSigningConfigARN)
        assert exists

        # Update cr
        cr["spec"]["description"] = "new description"

        # Patch k8s resource
        k8s.patch_custom_resource(ref, cr)
        time.sleep(UPDATE_WAIT_AFTER_SECONDS)

        # Check code signing config  description
        csc = self.get_code_signing_config(lambda_client, codeSigningConfigARN)
        assert csc is not None
        assert csc["Description"] == "new description"

        # Delete k8s resource
        _, deleted = k8s.delete_custom_resource(ref)
        assert deleted

        time.sleep(DELETE_WAIT_AFTER_SECONDS)

        # Check Lambda code signing config doesn't exist
        exists = self.code_signing_config_exists(lambda_client, codeSigningConfigARN)
        assert not exists
示例#15
0
    def test_repository_lifecycle_policy(self, ecr_client):
        resource_name = random_suffix_name("ecr-repository", 24)

        replacements = REPLACEMENT_VALUES.copy()
        replacements["REPOSITORY_NAME"] = resource_name
        # Load ECR CR
        resource_data = load_ecr_resource(
            "repository_lifecycle_policy",
            additional_replacements=replacements,
        )
        logging.debug(resource_data)

        # Create k8s resource
        ref = k8s.CustomResourceReference(
            CRD_GROUP,
            CRD_VERSION,
            RESOURCE_PLURAL,
            resource_name,
            namespace="default",
        )
        k8s.create_custom_resource(ref, resource_data)
        cr = k8s.wait_resource_consumed_by_controller(ref)

        assert cr is not None
        assert k8s.get_resource_exists(ref)

        time.sleep(CREATE_WAIT_AFTER_SECONDS)

        # Check ECR repository exists
        repo = self.get_repository(ecr_client, resource_name)
        assert repo is not None

        # Check ECR repository lifecycle policy exists
        lifecycle_policy = self.get_lifecycle_policy(ecr_client, resource_name,
                                                     repo["registryId"])
        assert lifecycle_policy == LIFECYCLE_POLICY_FILTERING_ON_IMAGE_AGE

        # Remove lifecycle policy
        cr["spec"]["lifecyclePolicy"] = ""

        # Patch k8s resource
        k8s.patch_custom_resource(ref, cr)
        time.sleep(UPDATE_WAIT_AFTER_SECONDS)

        lifecycle_policy = self.get_lifecycle_policy(ecr_client, resource_name,
                                                     repo["registryId"])
        assert lifecycle_policy == ""

        # Delete k8s resource
        _, deleted = k8s.delete_custom_resource(ref)
        assert deleted is True

        time.sleep(DELETE_WAIT_AFTER_SECONDS)

        # Check ECR repository doesn't exists
        exists = self.repository_exists(ecr_client, resource_name)
        assert not exists
    def test_crud_integration(self, api_resource):
        api_ref, api_cr = api_resource
        api_id = api_cr['status']['apiID']
        test_data = REPLACEMENT_VALUES.copy()
        integration_name = random_suffix_name("ack-test-integration", 25)
        test_data['INTEGRATION_NAME'] = integration_name
        test_data['API_ID'] = api_id
        integration_ref, integration_data = helper.integration_ref_and_data(integration_resource_name=integration_name,
                                                                            replacement_values=test_data)
        logging.debug(f"http api integration resource. name: {integration_name}, data: {integration_data}")

        # test create
        k8s.create_custom_resource(integration_ref, integration_data)
        time.sleep(CREATE_WAIT_AFTER_SECONDS)
        assert k8s.wait_on_condition(integration_ref, "ACK.ResourceSynced", "True", wait_periods=10)

        cr = k8s.get_resource(integration_ref)
        assert cr is not None

        integration_id = cr['status']['integrationID']

        # Let's check that the HTTP Api integration appears in Amazon API Gateway
        apigw_validator.assert_integration_is_present(api_id=api_id, integration_id=integration_id)

        apigw_validator.assert_integration_uri(
            api_id=api_id,
            integration_id=integration_id,
            expected_uri=test_data['INTEGRATION_URI']
        )

        # test update
        updated_uri = 'https://httpbin.org/post'
        test_data['INTEGRATION_URI'] = updated_uri
        updated_integration_resource_data = load_apigatewayv2_resource(
            "integration",
            additional_replacements=test_data,
        )
        logging.debug(f"updated http api integration resource: {updated_integration_resource_data}")

        # Update the k8s resource
        k8s.patch_custom_resource(integration_ref, updated_integration_resource_data)
        time.sleep(UPDATE_WAIT_AFTER_SECONDS)

        condition.assert_synced(integration_ref)
        # Let's check that the HTTP Api integration appears in Amazon API Gateway with updated uri
        apigw_validator.assert_integration_uri(
            api_id=api_id,
            integration_id=integration_id,
            expected_uri=updated_uri
        )

        # test delete
        k8s.delete_custom_resource(integration_ref)
        time.sleep(DELETE_WAIT_AFTER_SECONDS)
        assert not k8s.get_resource_exists(integration_ref)
        # HTTP Api integration should no longer appear in Amazon API Gateway
        apigw_validator.assert_integration_is_deleted(api_id=api_id, integration_id=integration_id)
示例#17
0
def create_cpg():
    ec = boto3.client("elasticache")
    cpg_name = random_suffix_name("ack-cpg", 32)
    ec.create_cache_parameter_group(CacheParameterGroupName=cpg_name,
                                    CacheParameterGroupFamily='redis6.x',
                                    Description='ACK e2e test')

    logging.info(f"Created ElastiCache cache paramter group {cpg_name}")
    return cpg_name
示例#18
0
    def test_smoke(self, lambda_client, lambda_function):
        (_, function_resource) = lambda_function
        lambda_function_name = function_resource["spec"]["name"]

        resource_name = random_suffix_name("lambda-alias", 24)

        replacements = REPLACEMENT_VALUES.copy()
        replacements["AWS_REGION"] = get_region()
        replacements["ALIAS_NAME"] = resource_name
        replacements["FUNCTION_NAME"] = lambda_function_name
        replacements["FUNCTION_VERSION"] = "$LATEST"

        # Load alias CR
        resource_data = load_lambda_resource(
            "alias",
            additional_replacements=replacements,
        )
        logging.debug(resource_data)

        # Create k8s resource
        ref = k8s.CustomResourceReference(
            CRD_GROUP, CRD_VERSION, RESOURCE_PLURAL,
            resource_name, namespace="default",
        )
        k8s.create_custom_resource(ref, resource_data)
        cr = k8s.wait_resource_consumed_by_controller(ref)

        assert cr is not None
        assert k8s.get_resource_exists(ref)

        time.sleep(CREATE_WAIT_AFTER_SECONDS)

        # Check alias exists
        alias = self.alias_exist(lambda_client, resource_name, lambda_function_name)
        assert alias is not None

        # Update cr
        cr["spec"]["description"] = ""

        # Patch k8s resource
        k8s.patch_custom_resource(ref, cr)
        time.sleep(UPDATE_WAIT_AFTER_SECONDS)

        # Check alias description
        alias = self.get_alias(lambda_client, resource_name, lambda_function_name)
        assert alias is not None
        assert alias["Description"] == ""

        # Delete k8s resource
        _, deleted = k8s.delete_custom_resource(ref)
        assert deleted

        time.sleep(DELETE_WAIT_AFTER_SECONDS)

        # Check alias doesn't exist
        exists = self.get_alias(lambda_client, resource_name, lambda_function_name)
        assert not exists
    def test_crud_stage(self, api_resource):
        api_ref, api_cr = api_resource
        api_id = api_cr['status']['apiID']
        test_data = REPLACEMENT_VALUES.copy()
        stage_name = random_suffix_name("ack-test-stage", 25)
        test_data['STAGE_NAME'] = stage_name
        test_data['API_ID'] = api_id
        stage_ref, stage_data = helper.stage_ref_and_data(stage_resource_name=stage_name,
                                                          replacement_values=test_data)
        logging.debug(f"http api stage resource. name: {stage_name}, data: {stage_data}")

        # test create
        k8s.create_custom_resource(stage_ref, stage_data)
        time.sleep(CREATE_WAIT_AFTER_SECONDS)
        assert k8s.wait_on_condition(stage_ref, "ACK.ResourceSynced", "True", wait_periods=10)

        cr = k8s.get_resource(stage_ref)
        assert cr is not None

        # Let's check that the HTTP Api integration appears in Amazon API Gateway
        apigw_validator.assert_stage_is_present(api_id=api_id, stage_name=stage_name)

        stage_description = test_data['STAGE_DESCRIPTION']
        apigw_validator.assert_stage_description(
            api_id=api_id,
            stage_name=stage_name,
            expected_description=stage_description
        )

        # test update
        updated_description = 'updated' + stage_description
        test_data['STAGE_DESCRIPTION'] = updated_description
        updated_stage_resource_data = load_apigatewayv2_resource(
            "stage",
            additional_replacements=test_data,
        )
        logging.debug(f"updated http api stage resource: {updated_stage_resource_data}")

        # Update the k8s resource
        k8s.patch_custom_resource(stage_ref, updated_stage_resource_data)
        time.sleep(UPDATE_WAIT_AFTER_SECONDS)

        condition.assert_synced(stage_ref)
        # Let's check that the HTTP Api stage appears in Amazon API Gateway with updated description
        apigw_validator.assert_stage_description(
            api_id=api_id,
            stage_name=stage_name,
            expected_description=updated_description
        )

        # test delete
        k8s.delete_custom_resource(stage_ref)
        time.sleep(DELETE_WAIT_AFTER_SECONDS)
        assert not k8s.get_resource_exists(stage_ref)
        # HTTP Api stage should no longer appear in Amazon API Gateway
        apigw_validator.assert_stage_is_deleted(api_id=api_id, stage_name=stage_name)
示例#20
0
def create_user_group() -> str:
    ec = boto3.client("elasticache")

    usergroup_id = random_suffix_name("ack-ec-usergroup", 32)
    _ = ec.create_user_group(UserGroupId=usergroup_id,
                             Engine="Redis",
                             UserIds=["default"])
    logging.info(f"Creating ElastiCache User Group {usergroup_id}")
    assert wait_usergroup_active(usergroup_id)

    return usergroup_id
示例#21
0
def create_non_default_user() -> str:
    ec = boto3.client("elasticache")
    user_id = random_suffix_name("ackecuser", 32)

    _ = ec.create_user(UserId=user_id,
                       UserName="******",
                       Engine="Redis",
                       NoPasswordRequired=True,
                       AccessString="on -@all")

    logging.info(f"Creating ElastiCache non default User {user_id}")
    return user_id
示例#22
0
    def test_basic_repository(self, ecr_client):
        resource_name = random_suffix_name("ecr-repository", 24)

        replacements = REPLACEMENT_VALUES.copy()
        replacements["REPOSITORY_NAME"] = resource_name
        # Load ECR CR
        resource_data = load_ecr_resource(
            "repository",
            additional_replacements=replacements,
        )
        logging.debug(resource_data)

        # Create k8s resource
        ref = k8s.CustomResourceReference(
            CRD_GROUP,
            CRD_VERSION,
            RESOURCE_PLURAL,
            resource_name,
            namespace="default",
        )
        k8s.create_custom_resource(ref, resource_data)
        cr = k8s.wait_resource_consumed_by_controller(ref)

        assert cr is not None
        assert k8s.get_resource_exists(ref)

        time.sleep(CREATE_WAIT_AFTER_SECONDS)

        # Check ECR repository exists
        exists = self.repository_exists(ecr_client, resource_name)
        assert exists

        # Update CR
        cr["spec"]["imageScanningConfiguration"]["scanOnPush"] = True

        # Patch k8s resource
        k8s.patch_custom_resource(ref, cr)
        time.sleep(UPDATE_WAIT_AFTER_SECONDS)

        # Check repository scanOnPush scanning configuration
        repo = self.get_repository(ecr_client, resource_name)
        assert repo is not None
        assert repo["imageScanningConfiguration"]["scanOnPush"] is True

        # Delete k8s resource
        _, deleted = k8s.delete_custom_resource(ref)
        assert deleted is True

        time.sleep(DELETE_WAIT_AFTER_SECONDS)

        # Check ECR repository doesn't exists
        exists = self.repository_exists(ecr_client, resource_name)
        assert not exists
    def test_crud_httpapi_using_import(self):
        test_data = REPLACEMENT_VALUES.copy()
        api_name = random_suffix_name("ack-test-importapi", 25)
        test_data['IMPORT_API_NAME'] = api_name
        test_data['IMPORT_API_TITLE'] = api_name
        api_ref, api_data = helper.import_api_ref_and_data(api_resource_name=api_name,
                                                           replacement_values=test_data)
        logging.debug(f"imported http api resource. name: {api_name}, data: {api_data}")

        # test create
        k8s.create_custom_resource(api_ref, api_data)
        time.sleep(CREATE_API_WAIT_AFTER_SECONDS)
        assert k8s.wait_on_condition(api_ref, "ACK.ResourceSynced", "True", wait_periods=10)

        cr = k8s.get_resource(api_ref)
        assert cr is not None

        api_id = cr['status']['apiID']

        # Let's check that the imported HTTP Api appears in Amazon API Gateway
        apigw_validator.assert_api_is_present(api_id=api_id)

        apigw_validator.assert_api_name(
            api_id=api_id,
            expected_api_name=api_name
        )

        # test update
        updated_api_title = 'updated-' + api_name
        test_data['IMPORT_API_TITLE'] = updated_api_title
        updated_api_resource_data = load_apigatewayv2_resource(
            "import_api",
            additional_replacements=test_data,
        )
        logging.debug(f"updated import http api resource: {updated_api_resource_data}")

        # Update the k8s resource
        k8s.patch_custom_resource(api_ref, updated_api_resource_data)
        time.sleep(UPDATE_WAIT_AFTER_SECONDS)

        condition.assert_synced(api_ref)
        # Let's check that the HTTP Api appears in Amazon API Gateway with updated title
        apigw_validator.assert_api_name(
            api_id=api_id,
            expected_api_name=updated_api_title
        )

        # test delete
        k8s.delete_custom_resource(api_ref)
        time.sleep(DELETE_WAIT_AFTER_SECONDS)
        assert not k8s.get_resource_exists(api_ref)
        # HTTP Api should no longer appear in Amazon API Gateway
        apigw_validator.assert_api_is_deleted(api_id=api_id)
    def test_smoke(self, dynamodb_client, dynamodb_table):
        (_, table_resource) = dynamodb_table
        resource_name = random_suffix_name("backup", 32)
        table_name = table_resource["spec"]["tableName"]

        replacements = REPLACEMENT_VALUES.copy()
        replacements["TABLE_NAME"] = table_name
        replacements["BACKUP_NAME"] = resource_name

        # Load Backup CR
        resource_data = load_dynamodb_resource(
            "backup",
            additional_replacements=replacements,
        )
        logging.debug(resource_data)

        # Create k8s resource
        ref = k8s.CustomResourceReference(
            CRD_GROUP,
            CRD_VERSION,
            RESOURCE_PLURAL,
            resource_name,
            namespace="default",
        )
        k8s.create_custom_resource(ref, resource_data)
        cr = k8s.wait_resource_consumed_by_controller(ref)

        assert cr is not None
        assert k8s.get_resource_exists(ref)

        wait_for_cr_status(
            ref,
            "backupStatus",
            "AVAILABLE",
            10,
            5,
        )

        backupArn = k8s.get_resource_arn(cr)
        # Check DynamoDB Backup exists
        exists = self.backup_exists(dynamodb_client, backupArn)
        assert exists

        # Delete k8s resource
        _, deleted = k8s.delete_custom_resource(ref)
        assert deleted is True

        time.sleep(DELETE_WAIT_AFTER_SECONDS)

        # Check DynamoDB Backup doesn't exists
        exists = self.backup_exists(dynamodb_client, backupArn)
        assert not exists
def authorizer_resource(api_resource):
    authorizer_resource_name = random_suffix_name(test_resource_values['AUTHORIZER_NAME'], 25)
    test_resource_values['AUTHORIZER_NAME'] = authorizer_resource_name
    authorizer_uri = f'arn:aws:apigateway:{get_region()}:lambda:path/2015-03-31/functions/{get_bootstrap_resources().AuthorizerFunctionArn}/invocations'
    test_resource_values["AUTHORIZER_URI"] = authorizer_uri
    authorizer_ref, authorizer_data = helper.authorizer_ref_and_data(authorizer_resource_name=authorizer_resource_name,
                                                                     replacement_values=test_resource_values)
    k8s.create_custom_resource(authorizer_ref, authorizer_data)
    time.sleep(CREATE_WAIT_AFTER_SECONDS)
    assert k8s.wait_on_condition(authorizer_ref, "ACK.ResourceSynced", "True", wait_periods=10)

    cr = k8s.get_resource(authorizer_ref)
    assert cr is not None

    authorizer_id = cr['status']['authorizerID']
    test_resource_values['AUTHORIZER_ID'] = authorizer_id

    # add permissions for apigateway to invoke authorizer lambda
    authorizer_arn = "arn:aws:execute-api:{region}:{account}:{api_id}/authorizers/{authorizer_id}".format(
        region=get_region(),
        account=get_account_id(),
        api_id=test_resource_values['API_ID'],
        authorizer_id=authorizer_id
    )
    lambda_client = boto3.client("lambda")
    function_name = get_bootstrap_resources().AuthorizerFunctionName
    statement_id = random_suffix_name('invoke-permission', 25)
    lambda_client.add_permission(FunctionName=function_name,
                                 StatementId=statement_id,
                                 Action='lambda:InvokeFunction',
                                 Principal='apigateway.amazonaws.com',
                                 SourceArn=authorizer_arn)

    yield authorizer_ref, cr

    lambda_client.remove_permission(FunctionName=function_name, StatementId=statement_id)
    k8s.delete_custom_resource(authorizer_ref)
    def test_create_delete(self, dynamodb_client):
        resource_name = random_suffix_name("table", 32)

        replacements = REPLACEMENT_VALUES.copy()
        replacements["TABLE_NAME"] = resource_name

        # Load Table CR
        resource_data = load_dynamodb_resource(
            "table_forums",
            additional_replacements=replacements,
        )
        logging.debug(resource_data)

        # Create k8s resource
        ref = k8s.CustomResourceReference(
            CRD_GROUP,
            CRD_VERSION,
            RESOURCE_PLURAL,
            resource_name,
            namespace="default",
        )
        k8s.create_custom_resource(ref, resource_data)
        cr = k8s.wait_resource_consumed_by_controller(ref)

        assert cr is not None
        assert k8s.get_resource_exists(ref)

        wait_for_cr_status(
            ref,
            "tableStatus",
            "ACTIVE",
            10,
            5,
        )

        # Check DynamoDB Table exists
        exists = self.table_exists(dynamodb_client, resource_name)
        assert exists

        # Delete k8s resource
        _, deleted = k8s.delete_custom_resource(ref)
        assert deleted is True

        time.sleep(DELETE_WAIT_AFTER_SECONDS)

        # Check DynamoDB Table doesn't exists
        exists = self.table_exists(dynamodb_client, resource_name)
        assert not exists
示例#27
0
def create_data_bucket() -> str:
    region = get_region()
    account_id = get_account_id()
    bucket_name = resources.random_suffix_name(
        f"ack-data-bucket-{region}-{account_id}", 63)

    s3 = boto3.client("s3", region_name=region)
    if region == "us-east-1":
        s3.create_bucket(Bucket=bucket_name)
    else:
        s3.create_bucket(
            Bucket=bucket_name,
            CreateBucketConfiguration={"LocationConstraint": region})

    logging.info(f"Created SageMaker data bucket {bucket_name}")

    s3_resource = boto3.resource("s3", region_name=region)

    source_bucket = s3_resource.Bucket(SAGEMAKER_SOURCE_DATA_BUCKET)
    destination_bucket = s3_resource.Bucket(bucket_name)
    temp_dir = "/tmp/ack_s3_data"
    # awscli is not installed in test-infra container hence use boto3 to copy in us-west-2
    if region == "us-west-2":
        duplicate_bucket_contents(source_bucket, destination_bucket)
        # above method does an async copy
        # TODO: find a way to remove random wait
        time.sleep(180)
    else:
        # workaround to copy if buckets are across regions
        # TODO: check if there is a better way and merge to test-infra
        subprocess.call(["mkdir", f"{temp_dir}"])
        subprocess.call([
            "aws",
            "s3",
            "sync",
            f"s3://{SAGEMAKER_SOURCE_DATA_BUCKET}",
            f"./{temp_dir}/",
            "--quiet",
        ])
        subprocess.call([
            "aws", "s3", "sync", f"./{temp_dir}/", f"s3://{bucket_name}",
            "--quiet"
        ])

    logging.info(f"Synced data bucket")

    return bucket_name
示例#28
0
def create_execution_role() -> str:
    region = get_region()
    role_name = resources.random_suffix_name(f"ack-sagemaker-execution-role",
                                             63)
    iam = boto3.client("iam", region_name=region)

    iam.create_role(
        RoleName=role_name,
        AssumeRolePolicyDocument=json.dumps({
            "Version":
            "2012-10-17",
            "Statement": [{
                "Effect": "Allow",
                "Principal": {
                    "Service": "sagemaker.amazonaws.com"
                },
                "Action": "sts:AssumeRole",
            }],
        }),
        Description=
        "SageMaker execution role for ACK integration and canary tests",
    )

    # random sleep to prevent throttling
    time.sleep(random.randrange(1, 3))
    iam.attach_role_policy(
        RoleName=role_name,
        PolicyArn="arn:aws:iam::aws:policy/AmazonSageMakerFullAccess",
    )

    # random sleep to prevent throttling
    time.sleep(random.randrange(1, 3))
    iam.attach_role_policy(
        RoleName=role_name,
        PolicyArn="arn:aws:iam::aws:policy/AmazonS3FullAccess")

    iam_resource = iam.get_role(RoleName=role_name)
    resource_arn = iam_resource["Role"]["Arn"]

    # There appears to be a delay in role availability after role creation
    # resulting in failure that role is not present. So adding a delay
    # to allow for the role to become available
    time.sleep(10)
    logging.info(f"Created SageMaker execution role {resource_arn}")

    return resource_arn
def notebook_instance_lifecycleConfig():
    notebook_instance_lfc_name = random_suffix_name("notebookinstancelfc", 50)
    replacements = REPLACEMENT_VALUES.copy()
    replacements["NOTEBOOK_INSTANCE_LFC_NAME"] = notebook_instance_lfc_name
    reference, spec, resource = create_sagemaker_resource(
        resource_plural="notebookinstancelifecycleconfigs",
        resource_name=notebook_instance_lfc_name,
        spec_file="notebook_instance_lifecycle_config",
        replacements=replacements,
    )
    assert resource is not None
    yield (reference, resource, spec)

    if k8s.get_resource_exists(reference):
        _, deleted = k8s.delete_custom_resource(reference, DELETE_WAIT_PERIOD,
                                                DELETE_PERIOD_LENGTH)
        assert deleted
示例#30
0
def xgboost_training_job_debugger():
    resource_name = random_suffix_name("xgboost-trainingjob-debugger", 50)
    replacements = REPLACEMENT_VALUES.copy()
    replacements["TRAINING_JOB_NAME"] = resource_name
    reference, _, resource = create_sagemaker_resource(
        resource_plural=RESOURCE_PLURAL,
        resource_name=resource_name,
        spec_file="xgboost_trainingjob_debugger",
        replacements=replacements,
    )
    assert resource is not None

    yield (reference, resource)

    if k8s.get_resource_exists(reference):
        _, deleted = k8s.delete_custom_resource(reference, 3, 10)
        assert deleted