def notebook_instance(): default_code_repository = "https://github.com/aws-controllers-k8s/community" resource_name = random_suffix_name("nb", 32) replacements = REPLACEMENT_VALUES.copy() replacements["NOTEBOOK_INSTANCE_NAME"] = resource_name replacements["DEFAULT_CODE_REPOSITORY"] = default_code_repository reference, spec, resource = create_sagemaker_resource( resource_plural="notebookinstances", resource_name=resource_name, spec_file="notebook_instance", replacements=replacements, ) assert resource is not None yield (reference, resource, spec) # Delete the k8s resource if not already deleted by tests if k8s.get_resource_exists(reference): _, deleted = k8s.delete_custom_resource(reference, DELETE_WAIT_PERIOD, DELETE_WAIT_LENGTH) assert deleted
def code_signing_config(): resource_name = random_suffix_name("lambda-csc", 24) resources = get_bootstrap_resources() logging.debug(resources) replacements = REPLACEMENT_VALUES.copy() replacements["AWS_REGION"] = get_region() replacements["CODE_SIGNING_CONFIG_NAME"] = resource_name replacements[ "SIGNING_PROFILE_VERSION_ARN"] = resources.SigningProfileVersionArn # Load Lambda CR resource_data = load_lambda_resource( "code_signing_config", additional_replacements=replacements, ) logging.debug(resource_data) # Create k8s resource ref = k8s.CustomResourceReference( CRD_GROUP, CRD_VERSION, "codesigningconfigs", resource_name, namespace="default", ) k8s.create_custom_resource(ref, resource_data) cr = k8s.wait_resource_consumed_by_controller(ref) assert cr is not None assert k8s.get_resource_exists(ref) time.sleep(CREATE_WAIT_AFTER_SECONDS) yield (ref, cr) _, deleted = k8s.delete_custom_resource(ref) assert deleted
def lambda_function(): resource_name = random_suffix_name("lambda-function", 24) resources = get_bootstrap_resources() replacements = REPLACEMENT_VALUES.copy() replacements["FUNCTION_NAME"] = resource_name replacements["BUCKET_NAME"] = resources.FunctionsBucketName replacements["LAMBDA_ROLE"] = resources.LambdaBasicRoleARN replacements["LAMBDA_FILE_NAME"] = resources.LambdaFunctionFileZip replacements["RESERVED_CONCURRENT_EXECUTIONS"] = "0" replacements["CODE_SIGNING_CONFIG_ARN"] = "" replacements["AWS_REGION"] = get_region() # Load function CR resource_data = load_lambda_resource( "function", additional_replacements=replacements, ) logging.debug(resource_data) # Create k8s resource function_reference = k8s.CustomResourceReference( CRD_GROUP, CRD_VERSION, "functions", resource_name, namespace="default", ) # Create lambda function k8s.create_custom_resource(function_reference, resource_data) function_resource = k8s.wait_resource_consumed_by_controller(function_reference) assert function_resource is not None assert k8s.get_resource_exists(function_reference) time.sleep(CREATE_WAIT_AFTER_SECONDS) yield (function_reference, function_resource) _, deleted = k8s.delete_custom_resource(function_reference) assert deleted
def xgboost_churn_model_bias_job_definition(xgboost_churn_endpoint): endpoint_spec = xgboost_churn_endpoint endpoint_name = endpoint_spec["spec"].get("endpointName") job_definition_name = random_suffix_name("model-bias-job-definition", 32) replacements = REPLACEMENT_VALUES.copy() replacements["JOB_DEFINITION_NAME"] = job_definition_name replacements["ENDPOINT_NAME"] = endpoint_name reference, _, resource = create_sagemaker_resource( resource_plural=RESOURCE_PLURAL, resource_name=job_definition_name, spec_file="model_bias_job_definition_xgboost_churn", replacements=replacements, ) assert resource is not None yield (reference, resource) if k8s.get_resource_exists(reference): _, deleted = k8s.delete_custom_resource(reference, 3, 10) assert deleted
def single_container_model(name_suffix): model_resource_name = name_suffix + "-model" replacements = REPLACEMENT_VALUES.copy() replacements["MODEL_NAME"] = model_resource_name model_reference, model_spec, model_resource = create_sagemaker_resource( resource_plural=cfg.MODEL_RESOURCE_PLURAL, resource_name=model_resource_name, spec_file="xgboost_model", replacements=replacements, ) assert model_resource is not None if k8s.get_resource_arn(model_resource) is None: logging.error( f"ARN for this resource is None, resource status is: {model_resource['status']}" ) assert k8s.get_resource_arn(model_resource) is not None yield (model_reference, model_resource) _, deleted = k8s.delete_custom_resource(model_reference, 3, 10) assert deleted
def dynamodb_table(): resource_name = random_suffix_name("table", 32) replacements = REPLACEMENT_VALUES.copy() replacements["TABLE_NAME"] = resource_name # load resource resource_data = load_dynamodb_resource( "table_forums", additional_replacements=replacements, ) table_reference = k8s.CustomResourceReference( CRD_GROUP, CRD_VERSION, "tables", resource_name, namespace="default", ) # Create table k8s.create_custom_resource(table_reference, resource_data) table_resource = k8s.wait_resource_consumed_by_controller(table_reference) assert table_resource is not None assert k8s.get_resource_exists(table_reference) wait_for_cr_status( table_reference, "tableStatus", "ACTIVE", 10, 30, ) yield (table_reference, table_resource) _, deleted = k8s.delete_custom_resource(table_reference) assert deleted
def xgboost_hpojob(): resource_name = random_suffix_name("xgboost-hpojob", 32) replacements = REPLACEMENT_VALUES.copy() replacements["HPO_JOB_NAME"] = resource_name reference, _, resource = create_sagemaker_resource( resource_plural=RESOURCE_PLURAL, resource_name=resource_name, spec_file="xgboost_hpojob", replacements=replacements, ) assert resource is not None if k8s.get_resource_arn(resource) is None: logging.error( f"ARN for this resource is None, resource status is: {resource['status']}" ) assert k8s.get_resource_arn(resource) is not None yield (reference, resource) if k8s.get_resource_exists(reference): _, deleted = k8s.delete_custom_resource(reference, cfg.JOB_DELETE_WAIT_PERIODS, cfg.JOB_DELETE_WAIT_LENGTH) assert deleted
def xgboost_model_package_group(): resource_name = random_suffix_name("xgboost-model-package-group", 50) replacements = REPLACEMENT_VALUES.copy() replacements["MODEL_PACKAGE_GROUP_NAME"] = resource_name reference, spec, resource = create_sagemaker_resource( resource_plural=RESOURCE_PLURAL, resource_name=resource_name, spec_file="xgboost_model_package_group", replacements=replacements, ) assert resource is not None yield (reference, resource) # Delete the k8s resource if not already deleted by tests if k8s.get_resource_exists(reference): _, deleted = k8s.delete_custom_resource(reference, cfg.DELETE_WAIT_PERIOD, cfg.DELETE_WAIT_LENGTH) assert deleted
def xgboost_model_for_transform(generate_job_names): (transform_resource_name, model_resource_name) = generate_job_names replacements = REPLACEMENT_VALUES.copy() replacements["MODEL_NAME"] = model_resource_name reference, _, resource = create_sagemaker_resource( resource_plural=cfg.MODEL_RESOURCE_PLURAL, resource_name=model_resource_name, spec_file="xgboost_model", replacements=replacements, ) assert resource is not None if k8s.get_resource_arn(resource) is None: logging.error( f"ARN for this resource is None, resource status is: {resource['status']}" ) assert k8s.get_resource_arn(resource) is not None yield (transform_resource_name, model_resource_name) if k8s.get_resource_exists(reference): _, deleted = k8s.delete_custom_resource(reference, cfg.JOB_DELETE_WAIT_PERIODS, cfg.JOB_DELETE_WAIT_LENGTH) assert deleted
def xgboost_versioned_model_package(xgboost_model_package_group): resource_name = random_suffix_name("xgboost-versioned-model-package", 38) (_, model_package_group_resource) = xgboost_model_package_group model_package_group_resource_name = model_package_group_resource[ "spec"].get("modelPackageGroupName", None) replacements = REPLACEMENT_VALUES.copy() replacements[ "MODEL_PACKAGE_GROUP_NAME"] = model_package_group_resource_name replacements["MODEL_PACKAGE_RESOURCE_NAME"] = resource_name reference, spec, resource = create_sagemaker_resource( resource_plural=RESOURCE_PLURAL, resource_name=resource_name, spec_file="xgboost_versioned_model_package", replacements=replacements, ) assert resource is not None yield (reference, spec, resource) # Delete the k8s resource if not already deleted by tests if k8s.get_resource_exists(reference): _, deleted = k8s.delete_custom_resource(reference, DELETE_WAIT_PERIOD, DELETE_WAIT_LENGTH) assert deleted
def cross_region_model(): resource_name = random_suffix_name("cross-region-model", 32) region = get_cross_region() replacements = REPLACEMENT_VALUES.copy() replacements["MODEL_NAME"] = resource_name replacements["REGION"] = region replacements[ "XGBOOST_V1_IMAGE_URI"] = f"{XGBOOST_V1_IMAGE_URIS[region]}/xgboost:latest" reference, spec, resource = create_sagemaker_resource( resource_plural=cfg.MODEL_RESOURCE_PLURAL, resource_name=resource_name, spec_file="cross_region_model", replacements=replacements, ) assert resource is not None yield (reference, resource) # Delete the k8s resource if not already deleted by tests if k8s.get_resource_exists(reference): _, deleted = k8s.delete_custom_resource(reference, 3, 10) assert deleted
def single_variant_config(): config_resource_name = random_suffix_name("single-variant-config", 32) model_resource_name = config_resource_name + "-model" replacements = REPLACEMENT_VALUES.copy() replacements["ENDPOINT_CONFIG_NAME"] = config_resource_name replacements["MODEL_NAME"] = model_resource_name model_reference, model_spec, model_resource = create_sagemaker_resource( resource_plural=cfg.MODEL_RESOURCE_PLURAL, resource_name=model_resource_name, spec_file="xgboost_model", replacements=replacements, ) assert model_resource is not None if k8s.get_resource_arn(model_resource) is None: logging.error( f"ARN for this resource is None, resource status is: {model_resource['status']}" ) assert k8s.get_resource_arn(model_resource) is not None config_reference, config_spec, config_resource = create_sagemaker_resource( resource_plural=cfg.ENDPOINT_CONFIG_RESOURCE_PLURAL, resource_name=config_resource_name, spec_file="endpoint_config_single_variant", replacements=replacements, ) assert config_resource is not None yield (config_reference, config_resource) k8s.delete_custom_resource(model_reference, 3, 10) # Delete the k8s resource if not already deleted by tests if k8s.get_resource_exists(config_reference): _, deleted = k8s.delete_custom_resource(config_reference, 3, 10) assert deleted
def xgboost_endpoint(name_suffix, single_variant_config): endpoint_resource_name = name_suffix (_, config_resource) = single_variant_config config_resource_name = config_resource["spec"].get("endpointConfigName", None) replacements = REPLACEMENT_VALUES.copy() replacements["ENDPOINT_NAME"] = endpoint_resource_name replacements["CONFIG_NAME"] = config_resource_name reference, spec, resource = create_sagemaker_resource( resource_plural=ENDPOINT_RESOURCE_PLURAL, resource_name=endpoint_resource_name, spec_file="endpoint_base", replacements=replacements, ) assert resource is not None yield (reference, resource, spec) # Delete the k8s resource if not already deleted by tests if k8s.get_resource_exists(reference): k8s.delete_custom_resource(reference)
def test_table_update_tags(self, dynamodb_client): resource_name = random_suffix_name("table", 32) replacements = REPLACEMENT_VALUES.copy() replacements["TABLE_NAME"] = resource_name # Load Table CR resource_data = load_dynamodb_resource( "table_forums", additional_replacements=replacements, ) logging.debug(resource_data) # Create k8s resource ref = k8s.CustomResourceReference( CRD_GROUP, CRD_VERSION, RESOURCE_PLURAL, resource_name, namespace="default", ) k8s.create_custom_resource(ref, resource_data) cr = k8s.wait_resource_consumed_by_controller(ref) assert cr is not None assert k8s.get_resource_exists(ref) wait_for_cr_status( ref, "tableStatus", "ACTIVE", 10, 5, ) # Check DynamoDB Table exists exists = self.table_exists(dynamodb_client, resource_name) assert exists # Get CR latest revision cr = k8s.wait_resource_consumed_by_controller(ref) # Update table list of tags tags = [ { "key": "key1", "value": "value1", }, ] cr["spec"]["tags"] = tags # Patch k8s resource k8s.patch_custom_resource(ref, cr) time.sleep(UPDATE_TAGS_WAIT_AFTER_SECONDS) table_tags = get_resource_tags( cr["status"]["ackResourceMetadata"]["arn"]) assert len(table_tags) == len(tags) assert table_tags[0]['Key'] == tags[0]['key'] assert table_tags[0]['Value'] == tags[0]['value'] # Delete k8s resource _, deleted = k8s.delete_custom_resource(ref) assert deleted is True time.sleep(DELETE_WAIT_AFTER_SECONDS) # Check DynamoDB Table doesn't exists exists = self.table_exists(dynamodb_client, resource_name) assert not exists
def test_smoke_dynamodb_table_stream(self, lambda_client, lambda_function): (_, function_resource) = lambda_function lambda_function_name = function_resource["spec"]["name"] resource_name = random_suffix_name("lambda-esm", 24) resources = get_bootstrap_resources() replacements = REPLACEMENT_VALUES.copy() replacements["AWS_REGION"] = get_region() replacements["EVENT_SOURCE_MAPPING_NAME"] = resource_name replacements["BATCH_SIZE"] = "10" replacements["FUNCTION_NAME"] = lambda_function_name replacements["EVENT_SOURCE_ARN"] = resources.DynamoDBTableARN replacements["STARTING_POSITION"] = "LATEST" replacements["MAXIMUM_RETRY_ATTEMPTS"] = "-1" # Load ESM CR resource_data = load_lambda_resource( "event_source_mapping_dynamodb", additional_replacements=replacements, ) logging.debug(resource_data) # Create k8s resource ref = k8s.CustomResourceReference( CRD_GROUP, CRD_VERSION, RESOURCE_PLURAL, resource_name, namespace="default", ) k8s.create_custom_resource(ref, resource_data) cr = k8s.wait_resource_consumed_by_controller(ref) assert cr is not None assert k8s.get_resource_exists(ref) time.sleep(CREATE_WAIT_AFTER_SECONDS) esm_uuid = cr['status']['uuid'] # Check ESM exists exists = self.event_source_mapping_exists(lambda_client, esm_uuid) assert exists # Update cr cr["spec"]["maximumRetryAttempts"] = 3 cr["spec"]["destinationConfig"] = { 'onFailure': { 'destination': resources.SQSQueueARN, } } # Patch k8s resource k8s.patch_custom_resource(ref, cr) time.sleep(UPDATE_WAIT_AFTER_SECONDS) # Check ESM maximum retry attempts esm = self.get_event_source_mapping(lambda_client, esm_uuid) assert esm is not None logging.info(esm) assert esm["MaximumRetryAttempts"] == 3 # Delete k8s resource _, deleted = k8s.delete_custom_resource(ref) assert deleted time.sleep(DELETE_WAIT_AFTER_SECONDS) # Check ESM doesn't exist exists = self.event_source_mapping_exists(lambda_client, esm_uuid) assert not exists
def test_smoke_sqs_queue_stream(self, lambda_client, lambda_function): (_, function_resource) = lambda_function lambda_function_name = function_resource["spec"]["name"] resource_name = random_suffix_name("lambda-esm", 24) resources = get_bootstrap_resources() replacements = REPLACEMENT_VALUES.copy() replacements["AWS_REGION"] = get_region() replacements["EVENT_SOURCE_MAPPING_NAME"] = resource_name replacements["BATCH_SIZE"] = "10" replacements["FUNCTION_NAME"] = lambda_function_name replacements["EVENT_SOURCE_ARN"] = resources.SQSQueueARN replacements["MAXIMUM_BATCHING_WINDOW_IN_SECONDS"] = "1" # Load ESM CR resource_data = load_lambda_resource( "event_source_mapping_sqs", additional_replacements=replacements, ) logging.debug(resource_data) # Create k8s resource ref = k8s.CustomResourceReference( CRD_GROUP, CRD_VERSION, RESOURCE_PLURAL, resource_name, namespace="default", ) k8s.create_custom_resource(ref, resource_data) cr = k8s.wait_resource_consumed_by_controller(ref) assert cr is not None assert k8s.get_resource_exists(ref) time.sleep(CREATE_WAIT_AFTER_SECONDS) esm_uuid = cr['status']['uuid'] # Check ESM exists exists = self.event_source_mapping_exists(lambda_client, esm_uuid) assert exists # Update cr cr["spec"]["batchSize"] = 20 # Patch k8s resource k8s.patch_custom_resource(ref, cr) time.sleep(UPDATE_WAIT_AFTER_SECONDS) # Check ESM batch size esm = self.get_event_source_mapping(lambda_client, esm_uuid) assert esm is not None assert esm["BatchSize"] == 20 # Delete k8s resource _, deleted = k8s.delete_custom_resource(ref) assert deleted time.sleep(DELETE_WAIT_AFTER_SECONDS) # Check ESM doesn't exist exists = self.event_source_mapping_exists(lambda_client, esm_uuid) assert not exists
def test_create_delete_7_9(self, es_client): resource = Domain(name="my-es-domain", data_node_count=1) replacements = REPLACEMENT_VALUES.copy() replacements["DOMAIN_NAME"] = resource.name resource_data = load_resource( "domain_es7.9", additional_replacements=replacements, ) logging.debug(resource_data) # Create the k8s resource ref = k8s.CustomResourceReference( CRD_GROUP, CRD_VERSION, RESOURCE_PLURAL, resource.name, namespace="default", ) k8s.create_custom_resource(ref, resource_data) cr = k8s.wait_resource_consumed_by_controller(ref) assert cr is not None assert k8s.get_resource_exists(ref) logging.debug(cr) # Let's check that the domain appears in AES aws_res = es_client.describe_elasticsearch_domain( DomainName=resource.name) logging.debug(aws_res) now = datetime.datetime.now() timeout = now + datetime.timedelta(seconds=CREATE_TIMEOUT_SECONDS) # An ES Domain gets its `DomainStatus.Created` field set to `True` # almost immediately, however the `DomainStatus.Processing` field is # set to `True` while Elasticsearch is being installed onto the worker # node(s). If you attempt to delete an ES Domain that is both Created # and Processing == True, AES will set the `DomainStatus.Deleted` field # to True as well, so the `Created`, `Processing` and `Deleted` fields # will all be True. It typically takes upwards of 4-6 minutes for an ES # Domain to reach Created = True && Processing = False and then another # 2 minutes or so after calling DeleteElasticsearchDomain for the ES # Domain to no longer appear in DescribeElasticsearchDomain API call. aws_res = wait_for_create_or_die(es_client, resource, timeout) logging.info( f"ES Domain {resource.name} creation succeeded and DomainStatus.Processing is now False" ) assert aws_res['DomainStatus']['ElasticsearchVersion'] == '7.9' assert aws_res['DomainStatus']['Created'] == True assert aws_res['DomainStatus']['ElasticsearchClusterConfig'][ 'InstanceCount'] == resource.data_node_count assert aws_res['DomainStatus']['ElasticsearchClusterConfig'][ 'ZoneAwarenessEnabled'] == resource.is_zone_aware # Delete the k8s resource on teardown of the module k8s.delete_custom_resource(ref) logging.info( f"Deleted CR for ES Domain {resource.name}. Waiting {DELETE_WAIT_AFTER_SECONDS} before checking existence in AWS API" ) time.sleep(DELETE_WAIT_AFTER_SECONDS) now = datetime.datetime.now() timeout = now + datetime.timedelta(seconds=DELETE_TIMEOUT_SECONDS) # Domain should no longer appear in AES wait_for_delete_or_die(es_client, resource, timeout)
def test_create_delete_2d3m_multi_az_no_vpc_7_9(self, es_client): resource = Domain(name="my-es-domain2", data_node_count=2, master_node_count=3, is_zone_aware=True) replacements = REPLACEMENT_VALUES.copy() replacements["DOMAIN_NAME"] = resource.name replacements["MASTER_NODE_COUNT"] = str(resource.master_node_count) replacements["DATA_NODE_COUNT"] = str(resource.data_node_count) resource_data = load_resource( "domain_es_xdym_multi_az7.9", additional_replacements=replacements, ) logging.debug(resource_data) # Create the k8s resource ref = k8s.CustomResourceReference( CRD_GROUP, CRD_VERSION, RESOURCE_PLURAL, resource.name, namespace="default", ) k8s.create_custom_resource(ref, resource_data) cr = k8s.wait_resource_consumed_by_controller(ref) assert cr is not None assert k8s.get_resource_exists(ref) logging.debug(cr) # Let's check that the domain appears in AES aws_res = es_client.describe_elasticsearch_domain( DomainName=resource.name) logging.debug(aws_res) now = datetime.datetime.now() timeout = now + datetime.timedelta(seconds=CREATE_TIMEOUT_SECONDS) aws_res = wait_for_create_or_die(es_client, resource, timeout) logging.info( f"ES Domain {resource.name} creation succeeded and DomainStatus.Processing is now False" ) assert aws_res['DomainStatus']['ElasticsearchVersion'] == '7.9' assert aws_res['DomainStatus']['Created'] == True assert aws_res['DomainStatus']['ElasticsearchClusterConfig'][ 'InstanceCount'] == resource.data_node_count assert aws_res['DomainStatus']['ElasticsearchClusterConfig'][ 'DedicatedMasterCount'] == resource.master_node_count assert aws_res['DomainStatus']['ElasticsearchClusterConfig'][ 'ZoneAwarenessEnabled'] == resource.is_zone_aware # Delete the k8s resource on teardown of the module k8s.delete_custom_resource(ref) logging.info( f"Deleted CR for ES Domain {resource.name}. Waiting {DELETE_WAIT_AFTER_SECONDS} before checking existence in AWS API" ) time.sleep(DELETE_WAIT_AFTER_SECONDS) now = datetime.datetime.now() timeout = now + datetime.timedelta(seconds=DELETE_TIMEOUT_SECONDS) # Domain should no longer appear in AES wait_for_delete_or_die(es_client, resource, timeout)
def test_create_delete_non_public( self, amq_client, admin_user_pass_secret, ): resource_name = random_suffix_name("my-rabbit-broker-non-public", 32) aup_sec_ns, aup_sec_name, aup_sec_key = admin_user_pass_secret replacements = REPLACEMENT_VALUES.copy() replacements["BROKER_NAME"] = resource_name replacements["ADMIN_USER_PASS_SECRET_NAMESPACE"] = aup_sec_ns replacements["ADMIN_USER_PASS_SECRET_NAME"] = aup_sec_name replacements["ADMIN_USER_PASS_SECRET_KEY"] = aup_sec_key resource_data = load_mq_resource( "broker_rabbitmq_non_public", additional_replacements=replacements, ) logging.error(resource_data) # Create the k8s resource ref = k8s.CustomResourceReference( CRD_GROUP, CRD_VERSION, RESOURCE_PLURAL, resource_name, namespace="default", ) k8s.create_custom_resource(ref, resource_data) cr = k8s.wait_resource_consumed_by_controller(ref) assert cr is not None broker_id = cr['status']['brokerID'] # Let's check that the Broker appears in AmazonMQ aws_res = amq_client.describe_broker(BrokerId=broker_id) assert aws_res is not None wait_for_cr_status( ref, "brokerState", "RUNNING", CREATE_INTERVAL_SLEEP_SECONDS, 45, ) # At this point, there should be at least one BrokerInstance record in # the Broker.Status.BrokerInstances collection which we can grab an # endpoint from. latest_res = k8s.get_resource(ref) assert latest_res['status']['brokerInstances'] is not None assert len(latest_res['status']['brokerInstances']) == 1 assert len(latest_res['status']['brokerInstances'][0]['endpoints']) > 0 # Delete the k8s resource on teardown of the module k8s.delete_custom_resource(ref) time.sleep(DELETE_WAIT_AFTER_SECONDS) now = datetime.datetime.now() timeout = now + datetime.timedelta(seconds=DELETE_TIMEOUT_SECONDS) # Broker should no longer appear in AmazonMQ while True: if datetime.datetime.now() >= timeout: pytest.fail( "Timed out waiting for ES Domain to being deleted in AES API" ) time.sleep(DELETE_WAIT_INTERVAL_SLEEP_SECONDS) try: aws_res = amq_client.describe_broker(BrokerId=broker_id) if aws_res['BrokerState'] != "DELETION_IN_PROGRESS": pytest.fail( "BrokerState is not DELETION_IN_PROGRESS for broker that was deleted. BrokerState is " + aws_res['BrokerState']) except amq_client.exceptions.NotFoundException: break
from acktest.k8s import resource as k8s from acktest.aws.identity import get_region, get_account_id from e2e import service_marker, load_apigatewayv2_resource from e2e.bootstrap_resources import get_bootstrap_resources from e2e.replacement_values import REPLACEMENT_VALUES import e2e.tests.helper as helper from e2e.tests.helper import ApiGatewayValidator DELETE_WAIT_AFTER_SECONDS = 10 UPDATE_WAIT_AFTER_SECONDS = 10 APIGW_DEPLOYMENT_WAIT_AFTER_SECONDS = 10 apigw_validator = ApiGatewayValidator(boto3.client('apigatewayv2')) test_resource_values = REPLACEMENT_VALUES.copy() @pytest.fixture(scope="module") def api_resource(): random_suffix = (''.join( random.choice(string.ascii_lowercase) for _ in range(6))) api_resource_name = test_resource_values['API_NAME'] + f'-{random_suffix}' test_resource_values['API_NAME'] = api_resource_name api_ref, api_data = helper.api_ref_and_data( api_resource_name=api_resource_name, replacement_values=test_resource_values) if k8s.get_resource_exists(api_ref): raise Exception( f"expected {api_resource_name} to not exist. Did previous test cleanup?" )
def test_crud_postgres14_t3_micro( self, k8s_secret, ): db_instance_id = random_suffix_name("pg14-t3-micro", 20) secret = k8s_secret( self.MUP_NS, self.MUP_SEC_NAME, self.MUP_SEC_KEY, self.MUP_SEC_VAL, ) replacements = REPLACEMENT_VALUES.copy() replacements['COPY_TAGS_TO_SNAPSHOT'] = "False" replacements["DB_INSTANCE_ID"] = db_instance_id replacements["MASTER_USER_PASS_SECRET_NAMESPACE"] = secret.ns replacements["MASTER_USER_PASS_SECRET_NAME"] = secret.name replacements["MASTER_USER_PASS_SECRET_KEY"] = secret.key replacements["DB_SUBNET_GROUP_NAME"] = get_bootstrap_resources( ).DBSubnetGroupName resource_data = load_rds_resource( "db_instance_postgres14_t3_micro", additional_replacements=replacements, ) # Create the k8s resource ref = k8s.CustomResourceReference( CRD_GROUP, CRD_VERSION, RESOURCE_PLURAL, db_instance_id, namespace="default", ) k8s.create_custom_resource(ref, resource_data) cr = k8s.wait_resource_consumed_by_controller(ref) assert cr is not None assert 'status' in cr assert 'dbInstanceStatus' in cr['status'] assert cr['status']['dbInstanceStatus'] == 'creating' condition.assert_not_synced(ref) # Wait for the resource to get synced assert k8s.wait_on_condition(ref, "ACK.ResourceSynced", "True", wait_periods=MAX_WAIT_FOR_SYNCED_MINUTES) # After the resource is synced, assert that DBInstanceStatus is available latest = db_instance.get(db_instance_id) assert latest is not None assert latest['DBInstanceStatus'] == 'available' assert latest['MultiAZ'] is False # Before we update the DBInstance CR below, let's check to see that the # DbInstanceStatus field in the CR has been updated to something other # than 'creating', which is what is set after the initial creation. # The CR's `Status.DBInstanceStatus` should be updated because the CR # is requeued on successful reconciliation loops and subsequent # reconciliation loops call ReadOne and should update the CR's Status # with the latest observed information. # https://github.com/aws-controllers-k8s/community/issues/923 cr = k8s.get_resource(ref) assert cr is not None assert 'status' in cr assert 'dbInstanceStatus' in cr['status'] assert cr['status']['dbInstanceStatus'] != 'creating' condition.assert_synced(ref) # We're now going to modify the CopyTagsToSnapshot field of the DB # instance, wait some time and verify that the RDS server-side resource # shows the new value of the field. latest = db_instance.get(db_instance_id) assert latest is not None assert latest['CopyTagsToSnapshot'] is False assert latest['DBSubnetGroup'][ 'DBSubnetGroupName'] == get_bootstrap_resources().DBSubnetGroupName updates = { "spec": { "copyTagsToSnapshot": True, "multiAZ": True }, } k8s.patch_custom_resource(ref, updates) time.sleep(MODIFY_WAIT_AFTER_SECONDS) # wait for the resource to get synced after the patch assert k8s.wait_on_condition(ref, "ACK.ResourceSynced", "True", wait_periods=MAX_WAIT_FOR_SYNCED_MINUTES) # After resource is synced again, assert that patches are reflected in the AWS resource latest = db_instance.get(db_instance_id) assert latest is not None assert latest['CopyTagsToSnapshot'] is True assert latest['MultiAZ'] is True updates = { "spec": { "copyTagsToSnapshot": False, "multiAZ": False }, } k8s.patch_custom_resource(ref, updates) time.sleep(MODIFY_WAIT_AFTER_SECONDS) # wait for the resource to get synced after the patch assert k8s.wait_on_condition(ref, "ACK.ResourceSynced", "True", wait_periods=MAX_WAIT_FOR_SYNCED_MINUTES) # After resource is synced again, assert that patches are reflected in the AWS resource latest = db_instance.get(db_instance_id) assert latest is not None assert latest['CopyTagsToSnapshot'] is False assert latest['MultiAZ'] is False k8s.delete_custom_resource(ref) time.sleep(DELETE_WAIT_AFTER_SECONDS) db_instance.wait_until_deleted(db_instance_id)
def test_reserved_concurrent_executions(self, lambda_client): resource_name = random_suffix_name("lambda-function", 24) resources = get_bootstrap_resources() logging.debug(resources) replacements = REPLACEMENT_VALUES.copy() replacements["FUNCTION_NAME"] = resource_name replacements["BUCKET_NAME"] = resources.FunctionsBucketName replacements["LAMBDA_ROLE"] = resources.LambdaBasicRoleARN replacements["LAMBDA_FILE_NAME"] = resources.LambdaFunctionFileZip replacements["RESERVED_CONCURRENT_EXECUTIONS"] = "2" replacements["CODE_SIGNING_CONFIG_ARN"] = "" replacements["AWS_REGION"] = get_region() # Load Lambda CR resource_data = load_lambda_resource( "function", additional_replacements=replacements, ) logging.debug(resource_data) # Create k8s resource ref = k8s.CustomResourceReference( CRD_GROUP, CRD_VERSION, RESOURCE_PLURAL, resource_name, namespace="default", ) k8s.create_custom_resource(ref, resource_data) cr = k8s.wait_resource_consumed_by_controller(ref) assert cr is not None assert k8s.get_resource_exists(ref) time.sleep(CREATE_WAIT_AFTER_SECONDS) cr = k8s.wait_resource_consumed_by_controller(ref) # Check Lambda function exists exists = self.function_exists(lambda_client, resource_name) assert exists reservedConcurrentExecutions = self.get_function_concurrency( lambda_client, resource_name) assert reservedConcurrentExecutions == 2 # Update cr cr["spec"]["reservedConcurrentExecutions"] = 0 # Patch k8s resource k8s.patch_custom_resource(ref, cr) time.sleep(UPDATE_WAIT_AFTER_SECONDS) # Check function updated fields reservedConcurrentExecutions = self.get_function_concurrency( lambda_client, resource_name) assert reservedConcurrentExecutions == 0 # Delete k8s resource _, deleted = k8s.delete_custom_resource(ref) assert deleted is True time.sleep(DELETE_WAIT_AFTER_SECONDS) # Check Lambda function doesn't exist exists = self.function_exists(lambda_client, resource_name) assert not exists
def test_create_delete_postgres13_standard(self): resource_name = "pg13-standard" resource_desc = "Parameters for PostgreSQL 13" replacements = REPLACEMENT_VALUES.copy() replacements["DB_PARAMETER_GROUP_NAME"] = resource_name replacements["DB_PARAMETER_GROUP_DESC"] = resource_desc resource_data = load_rds_resource( "db_parameter_group_postgres13_standard", additional_replacements=replacements, ) logging.debug(resource_data) # Create the k8s resource ref = k8s.CustomResourceReference( CRD_GROUP, CRD_VERSION, RESOURCE_PLURAL, resource_name, namespace="default", ) k8s.create_custom_resource(ref, resource_data) cr = k8s.wait_resource_consumed_by_controller(ref) assert cr is not None assert k8s.get_resource_exists(ref) condition.assert_synced(ref) # Let's check that the DB parameter group appears in RDS latest = db_parameter_group.get(resource_name) assert latest is not None assert latest['Description'] == resource_desc arn = latest['DBParameterGroupArn'] expect_tags = [{"Key": "environment", "Value": "dev"}] latest_tags = db_parameter_group.get_tags(arn) assert expect_tags == latest_tags # OK, now let's update the tag set and check that the tags are # updated accordingly. new_tags = [{ "key": "environment", "value": "prod", }] updates = { "spec": { "tags": new_tags }, } k8s.patch_custom_resource(ref, updates) time.sleep(MODIFY_WAIT_AFTER_SECONDS) latest_tags = db_parameter_group.get_tags(arn) after_update_expected_tags = [{ "Key": "environment", "Value": "prod", }] assert latest_tags == after_update_expected_tags # Delete the k8s resource on teardown of the module k8s.delete_custom_resource(ref) time.sleep(DELETE_WAIT_AFTER_SECONDS) # DB parameter group should no longer appear in RDS latest = db_parameter_group.get(resource_name) assert latest is None
def test_crud_route(self, api_resource, integration_resource, authorizer_resource): api_ref, api_cr = api_resource api_id = api_cr['status']['apiID'] integration_ref, integration_cr = integration_resource integration_id = integration_cr['status']['integrationID'] authorizer_ref, authorizer_cr = authorizer_resource authorizer_id = authorizer_cr['status']['authorizerID'] test_data = REPLACEMENT_VALUES.copy() random_suffix = (''.join( random.choice(string.ascii_lowercase) for _ in range(6))) route_name = "ack-test-route-" + random_suffix test_data['ROUTE_NAME'] = route_name test_data['AUTHORIZER_ID'] = authorizer_id test_data['INTEGRATION_ID'] = integration_id test_data['API_ID'] = api_id test_data['ROUTE_KEY'] = 'GET /httpbins' route_ref, route_data = helper.route_ref_and_data( route_resource_name=route_name, replacement_values=test_data) logging.debug( f"http api route resource. name: {route_name}, data: {route_data}") # test create k8s.create_custom_resource(route_ref, route_data) cr = k8s.wait_resource_consumed_by_controller(route_ref) assert cr is not None assert k8s.get_resource_exists(route_ref) route_id = cr['status']['routeID'] # Let's check that the HTTP Api route appears in Amazon API Gateway apigw_validator.assert_route_is_present(api_id=api_id, route_id=route_id) apigw_validator.assert_route_key( api_id=api_id, route_id=route_id, expected_route_key=test_data['ROUTE_KEY']) # test update updated_route_key = 'GET /uhttpbins' test_data['ROUTE_KEY'] = updated_route_key updated_route_resource_data = load_apigatewayv2_resource( "route", additional_replacements=test_data, ) logging.debug( f"updated http api route resource: {updated_route_resource_data}") # Update the k8s resource k8s.patch_custom_resource(route_ref, updated_route_resource_data) time.sleep(UPDATE_WAIT_AFTER_SECONDS) # Let's check that the HTTP Api route appears in Amazon API Gateway with updated route key apigw_validator.assert_route_key(api_id=api_id, route_id=route_id, expected_route_key=updated_route_key) # test delete k8s.delete_custom_resource(route_ref) time.sleep(DELETE_WAIT_AFTER_SECONDS) assert not k8s.get_resource_exists(route_ref) # HTTP Api route should no longer appear in Amazon API Gateway apigw_validator.assert_route_is_deleted(api_id=api_id, route_id=route_id)
def test_function_code_signing_config(self, lambda_client, code_signing_config): (_, csc_resource) = code_signing_config code_signing_config_arn = csc_resource["status"][ "ackResourceMetadata"]["arn"] resource_name = random_suffix_name("lambda-function", 24) resources = get_bootstrap_resources() replacements = REPLACEMENT_VALUES.copy() replacements["FUNCTION_NAME"] = resource_name replacements["BUCKET_NAME"] = resources.FunctionsBucketName replacements["LAMBDA_ROLE"] = resources.LambdaBasicRoleARN replacements["LAMBDA_FILE_NAME"] = resources.LambdaFunctionFileZip replacements["RESERVED_CONCURRENT_EXECUTIONS"] = "2" replacements["CODE_SIGNING_CONFIG_ARN"] = code_signing_config_arn replacements["AWS_REGION"] = get_region() # Load Lambda CR resource_data = load_lambda_resource( "function", additional_replacements=replacements, ) logging.debug(resource_data) # Create k8s resource ref = k8s.CustomResourceReference( CRD_GROUP, CRD_VERSION, RESOURCE_PLURAL, resource_name, namespace="default", ) k8s.create_custom_resource(ref, resource_data) cr = k8s.wait_resource_consumed_by_controller(ref) assert cr is not None assert k8s.get_resource_exists(ref) time.sleep(CREATE_WAIT_AFTER_SECONDS) cr = k8s.wait_resource_consumed_by_controller(ref) # Check Lambda function exists exists = self.function_exists(lambda_client, resource_name) assert exists # Check function code signing config is correct function_csc_arn = self.get_function_code_signing_config( lambda_client, resource_name) assert function_csc_arn == code_signing_config_arn # Delete function code signing config cr["spec"]["codeSigningConfigARN"] = "" k8s.patch_custom_resource(ref, cr) time.sleep(UPDATE_WAIT_AFTER_SECONDS) function_csc_arn = self.get_function_code_signing_config( lambda_client, resource_name) assert function_csc_arn is None # Delete k8s resource _, deleted = k8s.delete_custom_resource(ref) assert deleted is True time.sleep(DELETE_WAIT_AFTER_SECONDS) # Check Lambda function doesn't exist exists = self.function_exists(lambda_client, resource_name) assert not exists
def test_crud_integration(self, api_resource): api_ref, api_cr = api_resource api_id = api_cr['status']['apiID'] test_data = REPLACEMENT_VALUES.copy() random_suffix = (''.join( random.choice(string.ascii_lowercase) for _ in range(6))) integration_name = "ack-test-integration-" + random_suffix test_data['INTEGRATION_NAME'] = integration_name test_data['API_ID'] = api_id integration_ref, integration_data = helper.integration_ref_and_data( integration_resource_name=integration_name, replacement_values=test_data) logging.debug( f"http api integration resource. name: {integration_name}, data: {integration_data}" ) # test create k8s.create_custom_resource(integration_ref, integration_data) cr = k8s.wait_resource_consumed_by_controller(integration_ref) assert cr is not None assert k8s.get_resource_exists(integration_ref) integration_id = cr['status']['integrationID'] # Let's check that the HTTP Api integration appears in Amazon API Gateway apigw_validator.assert_integration_is_present( api_id=api_id, integration_id=integration_id) apigw_validator.assert_integration_uri( api_id=api_id, integration_id=integration_id, expected_uri=test_data['INTEGRATION_URI']) # test update updated_uri = 'https://httpbin.org/post' test_data['INTEGRATION_URI'] = updated_uri updated_integration_resource_data = load_apigatewayv2_resource( "integration", additional_replacements=test_data, ) logging.debug( f"updated http api integration resource: {updated_integration_resource_data}" ) # Update the k8s resource k8s.patch_custom_resource(integration_ref, updated_integration_resource_data) time.sleep(UPDATE_WAIT_AFTER_SECONDS) # Let's check that the HTTP Api integration appears in Amazon API Gateway with updated uri apigw_validator.assert_integration_uri(api_id=api_id, integration_id=integration_id, expected_uri=updated_uri) # test delete k8s.delete_custom_resource(integration_ref) time.sleep(DELETE_WAIT_AFTER_SECONDS) assert not k8s.get_resource_exists(integration_ref) # HTTP Api integration should no longer appear in Amazon API Gateway apigw_validator.assert_integration_is_deleted( api_id=api_id, integration_id=integration_id)
def test_repository_tags(self, ecr_client): resource_name = random_suffix_name("ecr-repository", 24) replacements = REPLACEMENT_VALUES.copy() replacements["REPOSITORY_NAME"] = resource_name # Load ECR CR resource_data = load_ecr_resource( "repository", additional_replacements=replacements, ) logging.debug(resource_data) # Create k8s resource ref = k8s.CustomResourceReference( CRD_GROUP, CRD_VERSION, RESOURCE_PLURAL, resource_name, namespace="default", ) k8s.create_custom_resource(ref, resource_data) cr = k8s.wait_resource_consumed_by_controller(ref) assert cr is not None assert k8s.get_resource_exists(ref) time.sleep(CREATE_WAIT_AFTER_SECONDS) cr = k8s.wait_resource_consumed_by_controller(ref) # Check ECR repository exists exists = self.repository_exists(ecr_client, resource_name) assert exists # Add respository tags tags = [ { "key": "k1", "value": "v1", }, { "key": "k2", "value": "v2", }, ] cr["spec"]["tags"] = tags # Patch k8s resource k8s.patch_custom_resource(ref, cr) time.sleep(UPDATE_WAIT_AFTER_SECONDS) repository_tags = self.get_resource_tags( ecr_client, cr["status"]["ackResourceMetadata"]["arn"]) assert len(repository_tags) == len(tags) assert repository_tags[0]['Key'] == tags[0]['key'] assert repository_tags[0]['Value'] == tags[0]['value'] assert repository_tags[1]['Key'] == tags[1]['key'] assert repository_tags[1]['Value'] == tags[1]['value'] # Update repository tags tags = [ { "key": "k1", "value": "v1", }, { "key": "k2", "value": "v2.updated", }, ] cr = k8s.wait_resource_consumed_by_controller(ref) cr["spec"]["tags"] = tags k8s.patch_custom_resource(ref, cr) time.sleep(UPDATE_WAIT_AFTER_SECONDS) repository_tags = self.get_resource_tags( ecr_client, cr["status"]["ackResourceMetadata"]["arn"]) assert len(repository_tags) == len(tags) assert repository_tags[0]['Key'] == tags[0]['key'] assert repository_tags[0]['Value'] == tags[0]['value'] assert repository_tags[1]['Key'] == tags[1]['key'] assert repository_tags[1]['Value'] == tags[1]['value'] cr = k8s.wait_resource_consumed_by_controller(ref) # Delete one repository tag cr["spec"]["tags"] = tags[:-1] k8s.patch_custom_resource(ref, cr) time.sleep(UPDATE_WAIT_AFTER_SECONDS) repository_tags = self.get_resource_tags( ecr_client, cr["status"]["ackResourceMetadata"]["arn"]) assert len(repository_tags) == len(tags[:-1]) assert repository_tags[0]['Key'] == tags[0]['key'] assert repository_tags[0]['Value'] == tags[0]['value'] # Delete k8s resource _, deleted = k8s.delete_custom_resource(ref) assert deleted is True time.sleep(DELETE_WAIT_AFTER_SECONDS) # Check ECR repository doesn't exists exists = self.repository_exists(ecr_client, resource_name) assert not exists
def test_crud_mysql_serverless( self, k8s_secret, ): db_cluster_id = "my-aurora-mysql" db_name = "mydb" secret = k8s_secret( self.MUP_NS, self.MUP_SEC_NAME, self.MUP_SEC_KEY, self.MUP_SEC_VAL, ) replacements = REPLACEMENT_VALUES.copy() replacements['COPY_TAGS_TO_SNAPSHOT'] = "False" replacements["DB_CLUSTER_ID"] = db_cluster_id replacements["DB_NAME"] = db_name replacements["MASTER_USER_PASS_SECRET_NAMESPACE"] = secret.ns replacements["MASTER_USER_PASS_SECRET_NAME"] = secret.name replacements["MASTER_USER_PASS_SECRET_KEY"] = secret.key resource_data = load_rds_resource( "db_cluster_mysql_serverless", additional_replacements=replacements, ) ref = k8s.CustomResourceReference( CRD_GROUP, CRD_VERSION, RESOURCE_PLURAL, db_cluster_id, namespace="default", ) k8s.create_custom_resource(ref, resource_data) cr = k8s.wait_resource_consumed_by_controller(ref) assert cr is not None assert 'status' in cr assert 'status' in cr['status'] assert cr['status']['status'] == 'creating' condition.assert_not_synced(ref) db_cluster.wait_until( db_cluster_id, db_cluster.status_matches('available'), ) time.sleep(CHECK_STATUS_WAIT_SECONDS) # Before we update the DBCluster CR below, let's check to see that the # Status field in the CR has been updated to something other than # 'creating', which is what is set after the initial creation. The # CR's `Status.Status` should be updated because the CR is requeued on # successful reconciliation loops and subsequent reconciliation loops # call ReadOne and should update the CR's Status with the latest # observed information. # https://github.com/aws-controllers-k8s/community/issues/923 cr = k8s.get_resource(ref) assert cr is not None assert 'status' in cr assert 'status' in cr['status'] assert cr['status']['status'] != 'creating' condition.assert_synced(ref) # We're now going to modify the CopyTagsToSnapshot field of the DB # instance, wait some time and verify that the RDS server-side resource # shows the new value of the field. latest = db_cluster.get(db_cluster_id) assert latest is not None assert latest['CopyTagsToSnapshot'] == False updates = { "spec": {"copyTagsToSnapshot": True}, } k8s.patch_custom_resource(ref, updates) time.sleep(MODIFY_WAIT_AFTER_SECONDS) latest = db_cluster.get(db_cluster_id) assert latest is not None assert latest['CopyTagsToSnapshot'] == True arn = latest['DBClusterArn'] expect_tags = [ {"Key": "environment", "Value": "dev"} ] latest_tags = db_cluster.get_tags(arn) assert expect_tags == latest_tags # OK, now let's update the tag set and check that the tags are # updated accordingly. new_tags = [ { "key": "environment", "value": "prod", } ] updates = { "spec": {"tags": new_tags}, } k8s.patch_custom_resource(ref, updates) time.sleep(MODIFY_WAIT_AFTER_SECONDS) latest_tags = db_cluster.get_tags(arn) after_update_expected_tags = [ { "Key": "environment", "Value": "prod", } ] assert latest_tags == after_update_expected_tags k8s.delete_custom_resource(ref) time.sleep(DELETE_WAIT_AFTER_SECONDS) db_cluster.wait_until_deleted(db_cluster_id)
def test_crud_authorizer(self, api_resource): api_ref, api_cr = api_resource api_id = api_cr['status']['apiID'] test_data = REPLACEMENT_VALUES.copy() random_suffix = (''.join( random.choice(string.ascii_lowercase) for _ in range(6))) authorizer_name = "ack-test-authorizer-" + random_suffix test_data['AUTHORIZER_NAME'] = authorizer_name test_data['AUTHORIZER_TITLE'] = authorizer_name test_data['API_ID'] = api_id test_data[ 'AUTHORIZER_URI'] = f'arn:aws:apigateway:{get_region()}:lambda:path/2015-03-31/functions/{get_bootstrap_resources().AuthorizerFunctionArn}/invocations' authorizer_ref, authorizer_data = helper.authorizer_ref_and_data( authorizer_resource_name=authorizer_name, replacement_values=test_data) logging.debug( f"http api authorizer resource. name: {authorizer_name}, data: {authorizer_data}" ) # test create k8s.create_custom_resource(authorizer_ref, authorizer_data) cr = k8s.wait_resource_consumed_by_controller(authorizer_ref) assert cr is not None assert k8s.get_resource_exists(authorizer_ref) authorizer_id = cr['status']['authorizerID'] # Let's check that the HTTP Api integration appears in Amazon API Gateway apigw_validator.assert_authorizer_is_present( api_id=api_id, authorizer_id=authorizer_id) apigw_validator.assert_authorizer_name( api_id=api_id, authorizer_id=authorizer_id, expected_authorizer_name=authorizer_name) # test update updated_authorizer_title = 'updated-' + authorizer_name test_data['AUTHORIZER_TITLE'] = updated_authorizer_title updated_authorizer_resource_data = load_apigatewayv2_resource( "authorizer", additional_replacements=test_data, ) logging.debug( f"updated http api authorizer resource: {updated_authorizer_resource_data}" ) # Update the k8s resource k8s.patch_custom_resource(authorizer_ref, updated_authorizer_resource_data) time.sleep(UPDATE_WAIT_AFTER_SECONDS) # Let's check that the HTTP Api authorizer appears in Amazon API Gateway with updated title apigw_validator.assert_authorizer_name( api_id=api_id, authorizer_id=authorizer_id, expected_authorizer_name=updated_authorizer_title) # test delete k8s.delete_custom_resource(authorizer_ref) time.sleep(DELETE_WAIT_AFTER_SECONDS) assert not k8s.get_resource_exists(authorizer_ref) # HTTP Api authorizer should no longer appear in Amazon API Gateway apigw_validator.assert_authorizer_is_deleted( api_id=api_id, authorizer_id=authorizer_id)
def xgboost_churn_endpoint(sagemaker_client): """Creates a SageMaker endpoint with the XGBoost churn single-variant model and data capture enabled. """ endpoint_resource_name = random_suffix_name("xgboost-churn", 32) endpoint_config_resource_name = endpoint_resource_name + "-config" model_resource_name = endpoint_config_resource_name + "-model" replacements = REPLACEMENT_VALUES.copy() replacements["ENDPOINT_NAME"] = endpoint_resource_name replacements["ENDPOINT_CONFIG_NAME"] = endpoint_config_resource_name replacements["MODEL_NAME"] = model_resource_name data_bucket = replacements["SAGEMAKER_DATA_BUCKET"] replacements[ "MODEL_LOCATION"] = f"s3://{data_bucket}/sagemaker/model/xgb-churn-prediction-model.tar.gz" model_reference, model_spec, model_resource = create_sagemaker_resource( resource_plural=cfg.MODEL_RESOURCE_PLURAL, resource_name=model_resource_name, spec_file="xgboost_model_with_model_location", replacements=replacements, ) assert model_resource is not None if k8s.get_resource_arn(model_resource) is None: logging.error( f"ARN for this resource is None, resource status is: {model_resource['status']}" ) assert k8s.get_resource_arn(model_resource) is not None ( endpoint_config_reference, endpoint_config_spec, endpoint_config_resource, ) = create_sagemaker_resource( resource_plural=cfg.ENDPOINT_CONFIG_RESOURCE_PLURAL, resource_name=endpoint_config_resource_name, spec_file="endpoint_config_data_capture_single_variant", replacements=replacements, ) assert endpoint_config_resource is not None if k8s.get_resource_arn(endpoint_config_resource) is None: logging.error( f"ARN for this resource is None, resource status is: {endpoint_config_resource['status']}" ) assert k8s.get_resource_arn(endpoint_config_resource) is not None endpoint_reference, endpoint_spec, endpoint_resource = create_sagemaker_resource( resource_plural=cfg.ENDPOINT_RESOURCE_PLURAL, resource_name=endpoint_resource_name, spec_file="endpoint_base", replacements=replacements, ) assert endpoint_resource is not None if k8s.get_resource_arn(endpoint_resource) is None: logging.error( f"ARN for this resource is None, resource status is: {endpoint_resource['status']}" ) assert k8s.get_resource_arn(endpoint_resource) is not None wait_sagemaker_endpoint_status(replacements["ENDPOINT_NAME"], "InService") yield endpoint_spec for cr in (model_reference, endpoint_config_reference, endpoint_reference): _, deleted = k8s.delete_custom_resource(cr, cfg.DELETE_WAIT_PERIOD, cfg.DELETE_WAIT_LENGTH) assert deleted