コード例 #1
0
def tfs_predictor_with_accelerator(sagemaker_session,
                                   tensorflow_eia_latest_version,
                                   cpu_instance_type):
    endpoint_name = sagemaker.utils.unique_name_from_base(
        "sagemaker-tensorflow-serving")
    model_data = sagemaker_session.upload_data(
        path=os.path.join(tests.integ.DATA_DIR,
                          "tensorflow-serving-test-model.tar.gz"),
        key_prefix="tensorflow-serving/models",
    )
    with tests.integ.timeout.timeout_and_delete_endpoint_by_name(
            endpoint_name, sagemaker_session):
        model = TensorFlowModel(
            model_data=model_data,
            role="SageMakerRole",
            framework_version=tensorflow_eia_latest_version,
            sagemaker_session=sagemaker_session,
            name=endpoint_name,
        )
        predictor = model.deploy(
            1,
            cpu_instance_type,
            endpoint_name=endpoint_name,
            accelerator_type="ml.eia1.medium",
        )
        yield predictor
コード例 #2
0
    def get_model(cls):  # Load
        if cls.model == None:
            # INPUT_TENSOR_NAME = 'inputs'
            # exported_model = classifier.export_savedmodel(export_dir_base = 'export/Servo', serving_input_reciever_fn = serving_input_fn)
            # with tarfile.open(os.path.join(model_path, 'model4.tar.gz'), mode='w:gz') as inp:
            #     '''
            #         opt/ml/model에 있는 어떤 type의 model?
            #         https://aws.amazon.com/blogs/machine-learning/bring-your-own-pre-trained-mxnet-or-tensorflow-models-into-amazon-sagemaker/
            #     '''
            #     # tar = tarfile.open(inp, "r:gz")
            #     # tar.extractall()
            #     # tar.close()
            #     archive.add('export', recursive=True)

            role = 'AmazonSageMaker-ExecutionRole-20200615T164342'
            # role = get_execution_role()

            # sagemaker_session = sagemaker.Session()
            # inputs = sagemaker_session.upload_data(path='model.tar.gz', key_prefix='model')
            sagemaker_model = TensorFlowModel(
                model_data='s3://sagemaker-bucket-cj2/model/model4.tar.gz',
                role=role,
                container_log_level=20,
                name='DP-MODEL5',
                framework_version='1.15')
            predictor = sagemaker_model.deploy(initial_instance_count=1,
                                               instance_type='ml.m4.xlarge')
            cls.model = predictor
        return cls.model
コード例 #3
0
ファイル: main.py プロジェクト: neoxia/sagemaker-benchmark
def deploy():
    "Deploy the model in a SageMaker Endpoint "

    print("Get the latest training job name...")
    with open(FILENAME_TRAINING_JOB_NAME) as f:
        training_job_name = f.read()

    print("Training job name :", training_job_name)

    print("Build the Model...")
    model = TensorFlowModel(
        entry_point=ENTRY_POINT_INFERENCE,
        source_dir=SOURCE_DIR,
        framework_version=FRAMEWORK_VERSION,
        model_data=
        f"{MODEL_ARTIFACTS_S3_LOCATION}/{training_job_name}/output/model.tar.gz",
        code_location=CUSTOM_CODE_SERVING_UPLOAD_S3_LOCATION,
        name=MODEL_NAME,
        role=ROLE_ARN,
        sagemaker_session=SESS)

    print("Build an endpoint...")
    predictor = model.deploy(endpoint_name=ENDPOINT_NAME,
                             initial_instance_count=DEPLOY_INSTANCE_COUNT,
                             instance_type=DEPLOY_INSTANCE_TYPE)

    return predictor
コード例 #4
0
def tfs_predictor_with_model_and_entry_point_and_dependencies(
        sagemaker_local_session, tensorflow_inference_latest_version):
    endpoint_name = sagemaker.utils.unique_name_from_base(
        "sagemaker-tensorflow-serving")

    entry_point = os.path.join(
        tests.integ.DATA_DIR,
        "tfs/tfs-test-entrypoint-and-dependencies/inference.py")
    dependencies = [
        os.path.join(
            tests.integ.DATA_DIR,
            "tfs/tfs-test-entrypoint-and-dependencies/dependency.py",
        )
    ]

    model_data = "file://" + os.path.join(
        tests.integ.DATA_DIR, "tensorflow-serving-test-model.tar.gz")

    model = TensorFlowModel(
        entry_point=entry_point,
        model_data=model_data,
        role="SageMakerRole",
        dependencies=dependencies,
        framework_version=tensorflow_inference_latest_version,
        sagemaker_session=sagemaker_local_session,
        name=endpoint_name,
    )

    predictor = model.deploy(1, "local", endpoint_name=endpoint_name)
    try:

        yield predictor
    finally:
        predictor.delete_endpoint()
コード例 #5
0
ファイル: api.py プロジェクト: Faolain/build-a-cure
def deploy_api_from_model(role, sagemaker_session):
	''' to do: check if create_endpoint call is needed before using entry_point '''
	''' define entry point to api '''
	with open('entry_point.py', 'w') as f:
		f.write('\n')
		f.close()
	''' define sagemaker predictor object with s3 model file '''
	try:
		s3_model_file = 's3://' + sagemaker_session.default_bucket() + '/model/model.tar.gz'
		sagemaker_model = TensorFlowModel(
				model_data = s3_model_file,
		        role = role,
		        framework_version = '1.12',
		        entry_point = 'entry_point.py'
		)
	except Exception as e:
		print('define api model error', e)
	''' deploy api from sagemaker predictor object '''
	try:
		predictor = sagemaker_model.deploy(initial_instance_count=1, instance_type='ml.t2.medium')
		if predictor:
			''' get endpoint to build arn from deploy response '''
			return predictor
	except Exception as e:
		print('deploy api model error', e)
	return False
コード例 #6
0
def load_model():
    sagemaker_model = TensorFlowModel(
        role=
        'arn:aws:iam::821488635735:role/service-role/AmazonSageMaker-ExecutionRole-20171216T132860',
        model_data=
        's3://sagemaker-us-east-2-821488635735/object_detection/model.tar.gz',
        entry_point='object_detect.py')

    predictor = sagemaker_model.deploy(initial_instance_count=1,
                                       instance_type='ml.m4.xlarge')
    print("Model Loaded, Now start predicting")
    return predictor
コード例 #7
0
def test_enabling_data_capture_on_endpoint_shows_correct_data_capture_status(
        sagemaker_session, tensorflow_inference_latest_version):
    endpoint_name = unique_name_from_base("sagemaker-tensorflow-serving")
    model_data = sagemaker_session.upload_data(
        path=os.path.join(tests.integ.DATA_DIR,
                          "tensorflow-serving-test-model.tar.gz"),
        key_prefix="tensorflow-serving/models",
    )
    with tests.integ.timeout.timeout_and_delete_endpoint_by_name(
            endpoint_name, sagemaker_session):
        model = TensorFlowModel(
            model_data=model_data,
            role=ROLE,
            framework_version=tensorflow_inference_latest_version,
            sagemaker_session=sagemaker_session,
        )
        predictor = model.deploy(
            initial_instance_count=INSTANCE_COUNT,
            instance_type=INSTANCE_TYPE,
            endpoint_name=endpoint_name,
        )

        endpoint_desc = sagemaker_session.sagemaker_client.describe_endpoint(
            EndpointName=predictor.endpoint_name)

        endpoint_config_desc = sagemaker_session.sagemaker_client.describe_endpoint_config(
            EndpointConfigName=endpoint_desc["EndpointConfigName"])

        assert endpoint_config_desc.get("DataCaptureConfig") is None

        predictor.enable_data_capture()

        # Wait for endpoint to finish updating
        # Endpoint update takes ~7min. 25 retries * 60s sleeps = 25min timeout
        for _ in retries(
                max_retry_count=25,
                exception_message_prefix=
                "Waiting for 'InService' endpoint status",
                seconds_to_sleep=60,
        ):
            new_endpoint = sagemaker_session.sagemaker_client.describe_endpoint(
                EndpointName=predictor.endpoint_name)
            if new_endpoint["EndpointStatus"] == "InService":
                break

        endpoint_desc = sagemaker_session.sagemaker_client.describe_endpoint(
            EndpointName=predictor.endpoint_name)

        endpoint_config_desc = sagemaker_session.sagemaker_client.describe_endpoint_config(
            EndpointConfigName=endpoint_desc["EndpointConfigName"])

        assert endpoint_config_desc["DataCaptureConfig"]["EnableCapture"]
コード例 #8
0
    def create_model(
        self,
        role=None,
        vpc_config_override=VPC_CONFIG_DEFAULT,
        entry_point=None,
        source_dir=None,
        dependencies=None,
        **kwargs
    ):
        """Creates ``TensorFlowModel`` object to be used for creating SageMaker model entities.

        This can be done by deploying it to a SageMaker endpoint,
        or starting SageMaker Batch Transform jobs.

        Args:
            role (str): The ``TensorFlowModel``, which is also used during transform jobs.
                If not specified, the role from the Estimator is used.
            vpc_config_override (dict[str, list[str]]): Optional override for VpcConfig set on the
                model. Default: use subnets and security groups from this Estimator.

                * 'Subnets' (list[str]): List of subnet ids.
                * 'SecurityGroupIds' (list[str]): List of security group ids.

            entry_point (str): Path (absolute or relative) to the local Python source file which
                should be executed as the entry point to training. If ``source_dir`` is specified,
                then ``entry_point`` must point to a file located at the root of ``source_dir``.
                If not specified and ``endpoint_type`` is 'tensorflow-serving',
                no entry point is used. If ``endpoint_type`` is also ``None``,
                then the training entry point is used.
            source_dir (str): Path (absolute or relative or an S3 URI) to a directory with any other
                serving source code dependencies aside from the entry point file (default: None).
            dependencies (list[str]): A list of paths to directories (absolute or relative) with
                any additional libraries that will be exported to the container (default: None).
            **kwargs: Additional kwargs passed to
                :class:`~sagemaker.tensorflow.model.TensorFlowModel`.

        Returns:
            sagemaker.tensorflow.model.TensorFlowModel: A ``TensorFlowModel`` object.
                See :class:`~sagemaker.tensorflow.model.TensorFlowModel` for full details.
        """
        kwargs["name"] = self._get_or_create_name(kwargs.get("name"))

        if "image_uri" not in kwargs:
            kwargs["image_uri"] = self.image_uri

        if "enable_network_isolation" not in kwargs:
            kwargs["enable_network_isolation"] = self.enable_network_isolation()

        return TensorFlowModel(
            model_data=self.model_data,
            role=role or self.role,
            container_log_level=self.container_log_level,
            framework_version=self.framework_version,
            sagemaker_session=self.sagemaker_session,
            vpc_config=self.get_vpc_config(vpc_config_override),
            entry_point=entry_point,
            source_dir=source_dir,
            dependencies=dependencies,
            **kwargs
        )
コード例 #9
0
    def create_model(self, model_server_workers=None):
        """Create a SageMaker ``TensorFlowModel`` object that can be deployed to an ``Endpoint``.

        Args:
            model_server_workers (int): Optional. The number of worker processes used by the inference server.
                If None, server will use one worker per vCPU.

        Returns:
            sagemaker.tensorflow.model.TensorFlowModel: A SageMaker ``TensorFlowModel`` object.
                See :func:`~sagemaker.tensorflow.model.TensorFlowModel` for full details.
        """
        env = {'SAGEMAKER_REQUIREMENTS': self.requirements_file}
        return TensorFlowModel(
            self.model_data,
            self.role,
            self.entry_point,
            source_dir=self._model_source_dir(),
            enable_cloudwatch_metrics=self.enable_cloudwatch_metrics,
            env=env,
            name=self._current_job_name,
            container_log_level=self.container_log_level,
            code_location=self.code_location,
            py_version=self.py_version,
            framework_version=self.framework_version,
            model_server_workers=model_server_workers,
            sagemaker_session=self.sagemaker_session)
コード例 #10
0
 def _create_default_model(self,
                           model_server_workers,
                           role,
                           vpc_config_override,
                           entry_point=None,
                           source_dir=None,
                           dependencies=None,
                           **kwargs):
     """Placeholder docstring"""
     return TensorFlowModel(
         self.model_data,
         role,
         entry_point or self.entry_point,
         source_dir=source_dir or self._model_source_dir(),
         enable_cloudwatch_metrics=self.enable_cloudwatch_metrics,
         env={"SAGEMAKER_REQUIREMENTS": self.requirements_file},
         image=self.image_name,
         name=self._current_job_name,
         container_log_level=self.container_log_level,
         code_location=self.code_location,
         py_version=self.py_version,
         framework_version=self.framework_version,
         model_server_workers=model_server_workers,
         sagemaker_session=self.sagemaker_session,
         vpc_config=self.get_vpc_config(vpc_config_override),
         dependencies=dependencies or self.dependencies,
         enable_network_isolation=self.enable_network_isolation(),
         **kwargs)
コード例 #11
0
    def create_model(self,
                     role=None,
                     vpc_config_override=VPC_CONFIG_DEFAULT,
                     entry_point=None,
                     source_dir=None,
                     dependencies=None,
                     **kwargs):
        """Creates ``TensorFlowModel`` object to be used for creating SageMaker model entities"""
        kwargs["name"] = self._get_or_create_name(kwargs.get("name"))

        if "enable_network_isolation" not in kwargs:
            kwargs["enable_network_isolation"] = self.enable_network_isolation(
            )

        return TensorFlowModel(
            model_data=self.model_data,
            role=role or self.role,
            container_log_level=self.container_log_level,
            framework_version="2.3.1",
            sagemaker_session=self.sagemaker_session,
            vpc_config=self.get_vpc_config(vpc_config_override),
            entry_point=entry_point,
            source_dir=source_dir,
            dependencies=dependencies,
            **kwargs)
コード例 #12
0
 def create_model(self, model_url):
     from sagemaker.tensorflow.model import TensorFlowModel
     return TensorFlowModel(model_data=model_url,
                            role=self.role_name,
                            entry_point=self.script,
                            py_version=self.python,
                            name=self.endpoint_name,
                            env=self.environment)
コード例 #13
0
def test_jumpstart_tensorflow_image_uri(patched_get_model_specs, session):

    patched_get_model_specs.side_effect = get_prototype_model_spec

    model_id, model_version = "tensorflow-ic-bit-m-r101x1-ilsvrc2012-classification-1", "*"
    instance_type = "ml.p2.xlarge"
    region = "us-west-2"

    model_specs = accessors.JumpStartModelsAccessor.get_model_specs(
        region, model_id, model_version)

    # inference
    uri = image_uris.retrieve(
        framework=None,
        region=region,
        image_scope="inference",
        model_id=model_id,
        model_version=model_version,
        instance_type=instance_type,
    )

    framework_class_uri = TensorFlowModel(
        role="mock_role",
        model_data="mock_data",
        entry_point="mock_entry_point",
        framework_version=model_specs.hosting_ecr_specs.framework_version,
        sagemaker_session=session,
    ).serving_image_uri(region, instance_type)

    assert uri == framework_class_uri
    assert uri == "763104351884.dkr.ecr.us-west-2.amazonaws.com/tensorflow-inference:2.3-gpu"

    # training
    uri = image_uris.retrieve(
        framework=None,
        region=region,
        image_scope="training",
        model_id=model_id,
        model_version=model_version,
        instance_type=instance_type,
    )

    framework_class_uri = TensorFlow(
        role="mock_role",
        entry_point="mock_entry_point",
        framework_version=model_specs.training_ecr_specs.framework_version,
        py_version=model_specs.training_ecr_specs.py_version,
        instance_type=instance_type,
        instance_count=1,
        sagemaker_session=session,
    ).training_image_uri(region=region)

    assert uri == framework_class_uri
    assert uri == "763104351884.dkr.ecr.us-west-2.amazonaws.com/tensorflow-training:2.3-gpu-py37"
コード例 #14
0
def tfs_predictor_with_model_and_entry_point_same_tar(
        sagemaker_local_session, tensorflow_inference_latest_version, tmpdir):
    endpoint_name = sagemaker.utils.unique_name_from_base(
        "sagemaker-tensorflow-serving")

    model_tar = tar_dir(
        os.path.join(tests.integ.DATA_DIR,
                     "tfs/tfs-test-model-with-inference"), tmpdir)

    model = TensorFlowModel(
        model_data="file://" + model_tar,
        role="SageMakerRole",
        framework_version=tensorflow_inference_latest_version,
        sagemaker_session=sagemaker_local_session,
        name=endpoint_name,
    )
    predictor = model.deploy(1, "local", endpoint_name=endpoint_name)

    try:
        yield predictor
    finally:
        predictor.delete_endpoint()
コード例 #15
0
def main(ecr_image_path, model_s3_path, instance_type, endpoint_name):
    role = get_execution_role()
    sagemaker_model = TensorFlowModel(model_data=model_s3_path,
                                      role=role,
                                      image=ecr_image_path,
                                      entry_point='bert/run_classifier.py')
    predictor = sagemaker_model.deploy(initial_instance_count=1,
                                       instance_type=instance_type,
                                       endpoint_name=endpoint_name)
    # get the status of the endpoint
    sagemaker = boto3.client(service_name='sagemaker')
    response = sagemaker.describe_endpoint(EndpointName=endpoint_name)
    status = response['EndpointStatus']
    print('EndpointStatus = {}'.format(status))

    if status == 'InService':
        print(
            'Endpoint creation ended with EndpointStatus = {}'.format(status))
    else:
        print(
            'Endpoint creation ended with EndpointStatus = {}'.format(status))

    return endpoint_name
コード例 #16
0
 def _create_default_model(self, model_server_workers, role, vpc_config_override):
     return TensorFlowModel(self.model_data, role, self.entry_point,
                            source_dir=self._model_source_dir(),
                            enable_cloudwatch_metrics=self.enable_cloudwatch_metrics,
                            env={'SAGEMAKER_REQUIREMENTS': self.requirements_file},
                            image=self.image_name,
                            name=self._current_job_name,
                            container_log_level=self.container_log_level,
                            code_location=self.code_location, py_version=self.py_version,
                            framework_version=self.framework_version,
                            model_server_workers=model_server_workers,
                            sagemaker_session=self.sagemaker_session,
                            vpc_config=self.get_vpc_config(vpc_config_override),
                            dependencies=self.dependencies)
コード例 #17
0
    def create_model(self, model_server_workers=None, role=None):
        """Create a SageMaker ``TensorFlowModel`` object that can be deployed to an ``Endpoint``.

        Args:
            role (str): The ``ExecutionRoleArn`` IAM Role ARN for the ``Model``, which is also used during
                transform jobs. If not specified, the role from the Estimator will be used.
            model_server_workers (int): Optional. The number of worker processes used by the inference server.
                If None, server will use one worker per vCPU.

        Returns:
            sagemaker.tensorflow.model.TensorFlowModel: A SageMaker ``TensorFlowModel`` object.
                See :func:`~sagemaker.tensorflow.model.TensorFlowModel` for full details.
        """
        env = {'SAGEMAKER_REQUIREMENTS': self.requirements_file}
        role = role or self.role
        return TensorFlowModel(self.model_data, role, self.entry_point, source_dir=self._model_source_dir(),
                               enable_cloudwatch_metrics=self.enable_cloudwatch_metrics, env=env, image=self.image_name,
                               name=self._current_job_name, container_log_level=self.container_log_level,
                               code_location=self.code_location, py_version=self.py_version,
                               framework_version=self.framework_version, model_server_workers=model_server_workers,
                               sagemaker_session=self.sagemaker_session)
コード例 #18
0
    def create_model(self,
                     model_server_workers=None,
                     role=None,
                     vpc_config_override=VPC_CONFIG_DEFAULT):
        """Create a SageMaker ``TensorFlowModel`` object that can be deployed to an ``Endpoint``.

        Args:
            role (str): The ``ExecutionRoleArn`` IAM Role ARN for the ``Model``, which is also used during
                transform jobs. If not specified, the role from the Estimator will be used.
            model_server_workers (int): Optional. The number of worker processes used by the inference server.
                If None, server will use one worker per vCPU.
            vpc_config_override (dict[str, list[str]]): Optional override for VpcConfig set on the model.
                Default: use subnets and security groups from this Estimator.
                * 'Subnets' (list[str]): List of subnet ids.
                * 'SecurityGroupIds' (list[str]): List of security group ids.

        Returns:
            sagemaker.tensorflow.model.TensorFlowModel: A SageMaker ``TensorFlowModel`` object.
                See :func:`~sagemaker.tensorflow.model.TensorFlowModel` for full details.
        """
        env = {'SAGEMAKER_REQUIREMENTS': self.requirements_file}
        role = role or self.role
        return TensorFlowModel(
            self.model_data,
            role,
            self.entry_point,
            source_dir=self._model_source_dir(),
            enable_cloudwatch_metrics=self.enable_cloudwatch_metrics,
            env=env,
            image=self.image_name,
            name=self._current_job_name,
            container_log_level=self.container_log_level,
            code_location=self.code_location,
            py_version=self.py_version,
            framework_version=self.framework_version,
            model_server_workers=model_server_workers,
            sagemaker_session=self.sagemaker_session,
            vpc_config=self.get_vpc_config(vpc_config_override))
コード例 #19
0
    archive.add('export', recursive=True)

import sagemaker

sagemaker_session = sagemaker.Session()
inputs = sagemaker_session.upload_data(path='model.tar.gz', key_prefix='model')

# Deploy the trained model

get_ipython().system('touch train.py')

from sagemaker.tensorflow.model import TensorFlowModel

sagemaker_model = TensorFlowModel(model_data='s3://' +
                                  sagemaker_session.default_bucket() +
                                  '/model/model.tar.gz',
                                  role=role,
                                  framework_version='1.12',
                                  entry_point='train.py')

predictor = sagemaker_model.deploy(initial_instance_count=1,
                                   instance_type='ml.m4.xlarge')

endpoint_name = 'XXXXXXXXXXXXX'

sagemaker_session = sagemaker.Session()

import sagemaker
from sagemaker.tensorflow.model import TensorFlowModel

predictor = sagemaker.tensorflow.model.TensorFlowPredictor(
    endpoint_name, sagemaker_session)
コード例 #20
0
def test_disabling_data_capture_on_endpoint_shows_correct_data_capture_status(
        sagemaker_session, tensorflow_inference_latest_version):
    endpoint_name = unique_name_from_base("sagemaker-tensorflow-serving")
    model_data = sagemaker_session.upload_data(
        path=os.path.join(tests.integ.DATA_DIR,
                          "tensorflow-serving-test-model.tar.gz"),
        key_prefix="tensorflow-serving/models",
    )
    with tests.integ.timeout.timeout_and_delete_endpoint_by_name(
            endpoint_name, sagemaker_session):
        model = TensorFlowModel(
            model_data=model_data,
            role=ROLE,
            framework_version=tensorflow_inference_latest_version,
            sagemaker_session=sagemaker_session,
        )
        destination_s3_uri = os.path.join("s3://",
                                          sagemaker_session.default_bucket(),
                                          endpoint_name, "custom")
        predictor = model.deploy(
            initial_instance_count=INSTANCE_COUNT,
            instance_type=INSTANCE_TYPE,
            endpoint_name=endpoint_name,
            data_capture_config=DataCaptureConfig(
                enable_capture=True,
                sampling_percentage=CUSTOM_SAMPLING_PERCENTAGE,
                destination_s3_uri=destination_s3_uri,
                capture_options=CUSTOM_CAPTURE_OPTIONS,
                csv_content_types=CUSTOM_CSV_CONTENT_TYPES,
                json_content_types=CUSTOM_JSON_CONTENT_TYPES,
                sagemaker_session=sagemaker_session,
            ),
        )

        endpoint_desc = sagemaker_session.sagemaker_client.describe_endpoint(
            EndpointName=predictor.endpoint_name)

        endpoint_config_desc = sagemaker_session.sagemaker_client.describe_endpoint_config(
            EndpointConfigName=endpoint_desc["EndpointConfigName"])

        assert endpoint_config_desc["DataCaptureConfig"]["EnableCapture"]
        assert (endpoint_config_desc["DataCaptureConfig"]
                ["InitialSamplingPercentage"] == CUSTOM_SAMPLING_PERCENTAGE)
        assert endpoint_config_desc["DataCaptureConfig"]["CaptureOptions"] == [
            {
                "CaptureMode": "Input"
            }
        ]
        assert (endpoint_config_desc["DataCaptureConfig"]
                ["CaptureContentTypeHeader"]["CsvContentTypes"] ==
                CUSTOM_CSV_CONTENT_TYPES)
        assert (endpoint_config_desc["DataCaptureConfig"]
                ["CaptureContentTypeHeader"]["JsonContentTypes"] ==
                CUSTOM_JSON_CONTENT_TYPES)

        predictor.disable_data_capture()

        # Wait for endpoint to finish updating
        # Endpoint update takes ~7min. 25 retries * 60s sleeps = 25min timeout
        for _ in retries(
                max_retry_count=25,
                exception_message_prefix=
                "Waiting for 'InService' endpoint status",
                seconds_to_sleep=60,
        ):
            new_endpoint = sagemaker_session.sagemaker_client.describe_endpoint(
                EndpointName=predictor.endpoint_name)
            if new_endpoint["EndpointStatus"] == "InService":
                break

        endpoint_desc = sagemaker_session.sagemaker_client.describe_endpoint(
            EndpointName=predictor.endpoint_name)

        endpoint_config_desc = sagemaker_session.sagemaker_client.describe_endpoint_config(
            EndpointConfigName=endpoint_desc["EndpointConfigName"])

        assert not endpoint_config_desc["DataCaptureConfig"]["EnableCapture"]
コード例 #21
0
# In[65]:

import sagemaker
sagemaker_session = sagemaker.Session()
inputs = sagemaker_session.upload_data(path='model.tar.gz', key_prefix='model')

# In[66]:

get_ipython().system('touch train.py')

# In[74]:

from sagemaker.tensorflow.model import TensorFlowModel
sagemaker_model = TensorFlowModel(model_data='s3://' +
                                  sagemaker_session.default_bucket() +
                                  '/model/model.tar.gz',
                                  role=role,
                                  framework_version='2.2.0',
                                  entry_point='train.py')

# In[75]:

get_ipython().run_cell_magic(
    'time', '',
    "predictor = sagemaker_model.deploy(initial_instance_count=1,\n                                  instance_type='ml.t2.medium')"
)

# In[77]:

predictor.endpoint

# In[78]:
コード例 #22
0
    def create_model(
        self,
        role=None,
        vpc_config_override=VPC_CONFIG_DEFAULT,
        entry_point=None,
        source_dir=None,
        dependencies=None,
        **kwargs
    ):
        """Create a SageMaker ``RLEstimatorModel`` object that can be deployed
        to an Endpoint.

        Args:
            role (str): The ``ExecutionRoleArn`` IAM Role ARN for the ``Model``,
                which is also used during transform jobs. If not specified, the
                role from the Estimator will be used.
            vpc_config_override (dict[str, list[str]]): Optional override for VpcConfig set on
                the model. Default: use subnets and security groups from this Estimator.

                * 'Subnets' (list[str]): List of subnet ids.
                * 'SecurityGroupIds' (list[str]): List of security group ids.

            entry_point (str): Path (absolute or relative) to the Python source
                file which should be executed as the entry point for MXNet
                hosting (default: self.entry_point). If ``source_dir`` is specified,
                then ``entry_point`` must point to a file located at the root of ``source_dir``.
            source_dir (str): Path (absolute or relative) to a directory with
                any other training source code dependencies aside from the entry
                point file (default: self.source_dir). Structure within this
                directory are preserved when hosting on Amazon SageMaker.
            dependencies (list[str]): A list of paths to directories (absolute
                or relative) with any additional libraries that will be exported
                to the container (default: self.dependencies). The library
                folders will be copied to SageMaker in the same folder where the
                entry_point is copied. If the ```source_dir``` points to S3,
                code will be uploaded and the S3 location will be used instead.
                This is not supported with "local code" in Local Mode.
            **kwargs: Additional kwargs passed to the :class:`~sagemaker.model.FrameworkModel`
                constructor.

        Returns:
            sagemaker.model.FrameworkModel: Depending on input parameters returns
                one of the following:

                * :class:`~sagemaker.model.FrameworkModel` - if ``image_uri`` is specified
                    on the estimator;
                * :class:`~sagemaker.mxnet.MXNetModel` - if ``image_uri`` isn't specified and
                    MXNet is used as the RL backend;
                * :class:`~sagemaker.tensorflow.model.TensorFlowModel` - if ``image_uri`` isn't
                    specified and TensorFlow is used as the RL backend.

        Raises:
            ValueError: If image_uri is not specified and framework enum is not valid.
        """
        base_args = dict(
            model_data=self.model_data,
            role=role or self.role,
            image_uri=kwargs.get("image_uri", self.image_uri),
            container_log_level=self.container_log_level,
            sagemaker_session=self.sagemaker_session,
            vpc_config=self.get_vpc_config(vpc_config_override),
        )

        base_args["name"] = self._get_or_create_name(kwargs.get("name"))

        if not entry_point and (source_dir or dependencies):
            raise AttributeError("Please provide an `entry_point`.")

        entry_point = entry_point or self._model_entry_point()
        source_dir = source_dir or self._model_source_dir()
        dependencies = dependencies or self.dependencies

        extended_args = dict(
            entry_point=entry_point,
            source_dir=source_dir,
            code_location=self.code_location,
            dependencies=dependencies,
        )
        extended_args.update(base_args)

        if self.image_uri:
            return FrameworkModel(**extended_args)

        if self.toolkit == RLToolkit.RAY.value:
            raise NotImplementedError(
                "Automatic deployment of Ray models is not currently available."
                " Train policy parameters are available in model checkpoints"
                " in the TrainingJob output."
            )

        if self.framework == RLFramework.TENSORFLOW.value:
            return TensorFlowModel(framework_version=self.framework_version, **base_args)
        if self.framework == RLFramework.MXNET.value:
            return MXNetModel(
                framework_version=self.framework_version, py_version=PYTHON_VERSION, **extended_args
            )
        raise ValueError(
            "An unknown RLFramework enum was passed in. framework: {}".format(self.framework)
        )
コード例 #23
0
filepath = os.path.join('1')
model.save(filepath)

# ARCHIVE SAVED MODEL FOR DEPLOYMENT
os.system('tar -czvf model.tar.gz 1/')
'''

# DEPLOY MODEL

# 'model.tar.gz' and tokenizer ('20211005T1859_tokenizer.pkl' as loaded in inference.py) are in the same S3 folder as 'model_data' below
# 'inference.py' and 'bb_classifier_inference.py' are in the same location from which this deploy script is being run
# normally, 'source_dir' is used to indicated the location of the last 2 files on S3, but it's not working in this case for some reason
tf_model = TensorFlowModel(
    role='',
    model_data='s3:// /model.tar.gz',
    #source_dir  = 's3://',
    entry_point='./inference.py',
    framework_version='2.4.1',
    dependencies=['./bb_classifier_inference.py'],
)

# CREATE ENDPOINT
endpoint = 'test-bin-bias-model-00'
tf_model.deploy(
    initial_instance_count=1,
    instance_type='ml.g4dn.xlarge',
    endpoint_name=endpoint,
    wait=True,
    update_endpoint=True,
    #aws_region='eu-central-1',
)
コード例 #24
0
model_path = 's3://sagemaker-models-euwest2/inception_resnetv2_nainet49_v1.tar.gz'


def resolve_sm_role():
    client = boto3.client('iam', region_name='eu-west-1')
    response_roles = client.list_roles(PathPrefix='/', MaxItems=999)
    for role in response_roles['Roles']:
        if role['RoleName'].startswith('AmazonSageMaker-ExecutionRole-'):
            print('Resolved SageMaker IAM Role to: ' + str(role))
            return role['Arn']
    raise Exception(
        'Could not resolve what should be the SageMaker role to be used')


role = resolve_sm_role()

sagemaker_model = TensorFlowModel(model_data=model_path,
                                  role=role,
                                  entry_point='entry_point.py',
                                  name=MODEL_NAME)

try:
    client = boto3.client('sagemaker')
    client.delete_endpoint(EndpointName=MODEL_NAME)
    client.delete_model(ModelName=MODEL_NAME)
except:
    pass

sagemaker_model.deploy(initial_instance_count=1, instance_type='ml.m4.xlarge')
コード例 #25
0
role = get_execution_role()

sess = sage.Session()

client = boto3.client('sagemaker')
name = client.list_training_jobs(
    SortBy='CreationTime')['TrainingJobSummaries'][0]['TrainingJobName']

s3_model_path = 's3://cop-group9/model/' + name + '/output/model.tar.gz'
print('S3 path for model', s3_model_path)

#get_ipython().system('touch train1.py')

sagemaker_model = TensorFlowModel(
    model_data=s3_model_path,
    role=role,
    framework_version='1.12.0',
    #py_version='py2',
    entry_point='train1.py')

print('Starting deployment...')
predictor = sagemaker_model.deploy(initial_instance_count=1,
                                   instance_type='ml.m4.xlarge')

endpoint_name = predictor.endpoint
print('Finished deployment of Endpoint: ', endpoint_name)

endpoint_file = open("endpoint_name.txt", "w")
endpoint_file.write(endpoint_name)
endpoint_file.close()

print('Endpoint name written to file: endpoint_name.txt')