def _pipeline_teardown(sfn_client, sagemaker_session, endpoint_name, pipeline):
    if endpoint_name is not None:
        delete_sagemaker_endpoint(endpoint_name, sagemaker_session)
        delete_sagemaker_endpoint_config(endpoint_name, sagemaker_session)
        delete_sagemaker_model(endpoint_name, sagemaker_session)

    state_machine_delete_wait(sfn_client, pipeline.workflow.state_machine_arn)
コード例 #2
0
def test_inference_pipeline_framework(sfn_client, sagemaker_session,
                                      sfn_role_arn, sagemaker_role_arn,
                                      sklearn_preprocessor, sklearn_estimator,
                                      inputs):
    bucket_name = sagemaker_session.default_bucket()
    unique_name = '{}-{}'.format(BASE_NAME,
                                 datetime.now().strftime('%Y%m%d%H%M%S'))
    with timeout(minutes=CREATE_ENDPOINT_TIMEOUT_MINUTES):
        pipeline = InferencePipeline(preprocessor=sklearn_preprocessor,
                                     estimator=sklearn_estimator,
                                     inputs={
                                         'train': inputs,
                                         'test': inputs
                                     },
                                     s3_bucket=bucket_name,
                                     role=sfn_role_arn,
                                     compression_type='Gzip',
                                     content_type='application/x-npy',
                                     pipeline_name=unique_name)

        _ = pipeline.create()
        execution = pipeline.execute(job_name=unique_name)
        out = execution.get_output(wait=True)
        assert out  # If fails, out is None.

        execution_info = execution.describe()

        execution_arn = execution.execution_arn
        state_machine_definition = sfn_client.describe_state_machine_for_execution(
            executionArn=execution_arn)
        state_machine_definition['definition'] = json.loads(
            state_machine_definition['definition'])
        assert state_machine_definition[
            'definition'] == pipeline.workflow.definition.to_dict()

        state_machine_arn = state_machine_definition['stateMachineArn']
        job_name = execution_info['name']

        client_info = sfn_client.describe_execution(executionArn=execution_arn)
        client_info['input'] = json.loads(client_info['input'])
        _ = client_info.pop('ResponseMetadata')
        _ = client_info.pop('output')

        assert client_info['input'] == json.loads(execution_info['input'])

        state_machine_delete_wait(sfn_client, state_machine_arn)
        delete_sagemaker_endpoint(job_name, sagemaker_session)
        delete_sagemaker_endpoint_config(job_name, sagemaker_session)
        delete_sagemaker_model(job_name, sagemaker_session)
def test_create_endpoint_step(trained_estimator, record_set_fixture,
                              sfn_client, sagemaker_session, sfn_role_arn):
    # Setup: Create model and endpoint config for trained estimator in SageMaker
    model = trained_estimator.create_model()
    model._create_sagemaker_model(instance_type=INSTANCE_TYPE)
    endpoint_config = model.sagemaker_session.create_endpoint_config(
        name=model.name,
        model_name=model.name,
        initial_instance_count=INSTANCE_COUNT,
        instance_type=INSTANCE_TYPE)
    # End of Setup

    # Build workflow definition
    endpoint_name = unique_name_from_base("integ-test-endpoint")
    endpoint_step = EndpointStep('create_endpoint_step',
                                 endpoint_name=endpoint_name,
                                 endpoint_config_name=model.name)
    endpoint_step.add_retry(SAGEMAKER_RETRY_STRATEGY)
    workflow_graph = Chain([endpoint_step])

    with timeout(minutes=DEFAULT_TIMEOUT_MINUTES):
        # Create workflow and check definition
        workflow = create_workflow_and_check_definition(
            workflow_graph=workflow_graph,
            workflow_name=unique_name_from_base(
                "integ-test-create-endpoint-step-workflow"),
            sfn_client=sfn_client,
            sfn_role_arn=sfn_role_arn)

        # Execute workflow
        execution = workflow.execute()
        execution_output = execution.get_output(wait=True)

        # Check workflow output
        endpoint_arn = execution_output.get("EndpointArn")
        assert execution_output.get("EndpointArn") is not None
        assert execution_output["SdkHttpMetadata"]["HttpStatusCode"] == 200

        # Cleanup
        state_machine_delete_wait(sfn_client, workflow.state_machine_arn)
        delete_sagemaker_endpoint(endpoint_name, sagemaker_session)
        delete_sagemaker_endpoint_config(model.name, sagemaker_session)
        delete_sagemaker_model(model.name, sagemaker_session)
コード例 #4
0
def test_pca_estimator(sfn_client, sagemaker_session, sagemaker_role_arn,
                       sfn_role_arn, pca_estimator, inputs):
    bucket_name = sagemaker_session.default_bucket()
    unique_name = '{}-{}'.format(BASE_NAME,
                                 datetime.now().strftime('%Y%m%d%H%M%S'))
    hyperparams = pca_estimator.hyperparameters()

    with timeout(minutes=DEFAULT_TIMEOUT_MINUTES):
        tp = TrainingPipeline(estimator=pca_estimator,
                              role=sfn_role_arn,
                              inputs=inputs,
                              s3_bucket=bucket_name,
                              pipeline_name=unique_name)
        tp.create()

        execution = tp.execute(job_name=unique_name,
                               hyperparameters=hyperparams)
        out = execution.get_output(wait=True)
        assert out  # If fails, out is None.
        endpoint_arn = out['EndpointArn']

        workflow_execution_info = execution.describe()

        execution_arn = execution.execution_arn
        state_machine_definition = sfn_client.describe_state_machine_for_execution(
            executionArn=execution_arn)
        state_machine_definition['definition'] = json.loads(
            state_machine_definition['definition'])
        assert state_machine_definition[
            'definition'] == tp.workflow.definition.to_dict()

        state_machine_arn = state_machine_definition['stateMachineArn']
        job_name = workflow_execution_info['name']
        s3_manifest_uri = inputs.s3_data
        status = 'SUCCEEDED'
        estimator_image_uri = get_image_uri(sagemaker_session.boto_region_name,
                                            'pca')

        execution_info = sfn_client.describe_execution(
            executionArn=execution_arn)
        execution_info['input'] = json.loads(execution_info['input'])
        _ = execution_info.pop('ResponseMetadata')
        _ = execution_info.pop('output')

        s3_output_path = 's3://{bucket_name}/{workflow_name}/models'.format(
            bucket_name=bucket_name, workflow_name=unique_name)
        expected_execution_info = {
            'executionArn': execution_arn,
            'stateMachineArn': state_machine_arn,
            'name': job_name,
            'status': status,
            'startDate': execution_info['startDate'],
            'stopDate': execution_info['stopDate'],
            'input': {
                'Training': {
                    'AlgorithmSpecification': {
                        'TrainingImage': estimator_image_uri,
                        'TrainingInputMode': 'File'
                    },
                    'OutputDataConfig': {
                        'S3OutputPath': s3_output_path
                    },
                    'StoppingCondition': {
                        'MaxRuntimeInSeconds': 86400
                    },
                    'ResourceConfig': {
                        'InstanceCount': 1,
                        'InstanceType': 'ml.m5.large',
                        'VolumeSizeInGB': 30
                    },
                    'RoleArn':
                    sagemaker_role_arn,
                    'InputDataConfig': [{
                        'DataSource': {
                            'S3DataSource': {
                                'S3DataDistributionType': 'ShardedByS3Key',
                                'S3DataType': 'ManifestFile',
                                'S3Uri': s3_manifest_uri
                            }
                        },
                        'ChannelName': 'train'
                    }],
                    'HyperParameters':
                    hyperparams,
                    'TrainingJobName':
                    'estimator-' + job_name
                },
                'Create Model': {
                    'ModelName': job_name,
                    'PrimaryContainer': {
                        'Image':
                        estimator_image_uri,
                        'Environment': {},
                        'ModelDataUrl':
                        's3://' + bucket_name + '/' + unique_name +
                        '/models/' + 'estimator-' + job_name +
                        '/output/model.tar.gz'
                    },
                    'ExecutionRoleArn': sagemaker_role_arn
                },
                'Configure Endpoint': {
                    'EndpointConfigName':
                    job_name,
                    'ProductionVariants': [{
                        'ModelName': job_name,
                        'InstanceType': 'ml.m5.large',
                        'InitialInstanceCount': 1,
                        'VariantName': 'AllTraffic'
                    }]
                },
                'Deploy': {
                    'EndpointName': job_name,
                    'EndpointConfigName': job_name
                }
            }
        }
        assert execution_info == expected_execution_info

        # Cleanup
        state_machine_delete_wait(sfn_client, state_machine_arn)
        delete_sagemaker_endpoint(job_name, sagemaker_session)
        delete_sagemaker_endpoint_config(job_name, sagemaker_session)
        delete_sagemaker_model(job_name, sagemaker_session)