def test_deploy(sagemaker_session, tf_version): estimator = MXNet( entry_point=SCRIPT, source_dir=SOURCE_DIR, role=ROLE, framework_version=tf_version, train_instance_count=2, train_instance_type=INSTANCE_TYPE_GPU, sagemaker_session=sagemaker_session, base_job_name="test-cifar", ) estimator.fit("s3://mybucket/train") print("job succeeded: {}".format(estimator.latest_training_job.name)) estimator.deploy(initial_instance_count=1, instance_type=INSTANCE_TYPE_CPU) image = IMAGE_URI_FORMAT_STRING.format(REGION, CPU_IMAGE_NAME, tf_version, "cpu", "py2") sagemaker_session.create_model.assert_called_with( estimator._current_job_name, ROLE, { "Environment": { "SAGEMAKER_CONTAINER_LOG_LEVEL": "20", "SAGEMAKER_SUBMIT_DIRECTORY": SOURCE_DIR, "SAGEMAKER_REGION": REGION, "SAGEMAKER_PROGRAM": SCRIPT, }, "Image": image, "ModelDataUrl": "s3://m/m.tar.gz", }, )
def test_mxnet_neo(strftime, sagemaker_session, mxnet_version): mx = MXNet(entry_point=SCRIPT_PATH, role=ROLE, sagemaker_session=sagemaker_session, train_instance_count=INSTANCE_COUNT, train_instance_type=INSTANCE_TYPE, framework_version=mxnet_version) inputs = 's3://mybucket/train' mx.fit(inputs=inputs) input_shape = {'data': [100, 1, 28, 28]} output_location = 's3://neo-sdk-test' compiled_model = mx.compile_model(target_instance_family='ml_c4', input_shape=input_shape, output_path=output_location) sagemaker_call_names = [c[0] for c in sagemaker_session.method_calls] assert sagemaker_call_names == ['train', 'logs_for_job', 'sagemaker_client.describe_training_job', 'compile_model', 'wait_for_compilation_job'] expected_compile_model_args = _create_compilation_job(json.dumps(input_shape), output_location) actual_compile_model_args = sagemaker_session.method_calls[3][2] assert expected_compile_model_args == actual_compile_model_args assert compiled_model.image == _neo_inference_image(mxnet_version) predictor = mx.deploy(1, CPU, use_compiled_model=True) assert isinstance(predictor, MXNetPredictor) with pytest.raises(Exception) as wrong_target: mx.deploy(1, CPU_C5, use_compiled_model=True) assert str(wrong_target.value).startswith('No compiled model for') # deploy without sagemaker Neo should continue to work mx.deploy(1, CPU)
def test_deploy(sagemaker_session, tf_version): estimator = MXNet(entry_point=SCRIPT, source_dir=SOURCE_DIR, role=ROLE, framework_version=tf_version, train_instance_count=2, train_instance_type=INSTANCE_TYPE_GPU, sagemaker_session=sagemaker_session, base_job_name='test-cifar') estimator.fit('s3://mybucket/train') print('job succeeded: {}'.format(estimator.latest_training_job.name)) estimator.deploy(initial_instance_count=1, instance_type=INSTANCE_TYPE_CPU) image = IMAGE_URI_FORMAT_STRING.format(REGION, CPU_IMAGE_NAME, tf_version, 'cpu', 'py2') sagemaker_session.create_model.assert_called_with( estimator._current_job_name, ROLE, { 'Environment': { 'SAGEMAKER_ENABLE_CLOUDWATCH_METRICS': 'false', 'SAGEMAKER_CONTAINER_LOG_LEVEL': '20', 'SAGEMAKER_SUBMIT_DIRECTORY': SOURCE_DIR, 'SAGEMAKER_REGION': REGION, 'SAGEMAKER_PROGRAM': SCRIPT }, 'Image': image, 'ModelDataUrl': 's3://m/m.tar.gz' })
def test_mxnet_neo(strftime, sagemaker_session, neo_mxnet_version): mx = MXNet( entry_point=SCRIPT_PATH, framework_version="1.6", py_version="py3", role=ROLE, sagemaker_session=sagemaker_session, instance_count=INSTANCE_COUNT, instance_type=INSTANCE_TYPE, base_job_name="sagemaker-mxnet", ) mx.fit() input_shape = {"data": [100, 1, 28, 28]} output_location = "s3://neo-sdk-test" compiled_model = mx.compile_model( target_instance_family="ml_c4", input_shape=input_shape, output_path=output_location, framework="mxnet", framework_version=neo_mxnet_version, ) sagemaker_call_names = [c[0] for c in sagemaker_session.method_calls] assert sagemaker_call_names == [ "train", "logs_for_job", "sagemaker_client.describe_training_job", "compile_model", "wait_for_compilation_job", ] expected_compile_model_args = _create_compilation_job( json.dumps(input_shape), output_location) actual_compile_model_args = sagemaker_session.method_calls[3][2] assert expected_compile_model_args == actual_compile_model_args assert compiled_model.image_uri == _neo_inference_image(neo_mxnet_version) predictor = mx.deploy(1, CPU, use_compiled_model=True) assert isinstance(predictor, MXNetPredictor) with pytest.raises(Exception) as wrong_target: mx.deploy(1, CPU_C5, use_compiled_model=True) assert str(wrong_target.value).startswith("No compiled model for") # deploy without sagemaker Neo should continue to work mx.deploy(1, CPU)
def test_mxnet_local_data_local_script(mxnet_training_latest_version, mxnet_training_latest_py_version): data_path = os.path.join(DATA_DIR, "mxnet_mnist") script_path = os.path.join(data_path, "mnist.py") mx = MXNet( entry_point=script_path, role="SageMakerRole", instance_count=1, instance_type="local", framework_version=mxnet_training_latest_version, py_version=mxnet_training_latest_py_version, sagemaker_session=LocalNoS3Session(), ) train_input = "file://" + os.path.join(data_path, "train") test_input = "file://" + os.path.join(data_path, "test") mx.fit({"train": train_input, "test": test_input}) endpoint_name = mx.latest_training_job.name with lock.lock(LOCK_PATH): try: predictor = mx.deploy(1, "local", endpoint_name=endpoint_name) data = numpy.zeros(shape=(1, 1, 28, 28)) predictor.predict(data) finally: predictor.delete_endpoint()
def test_mxnet_local_mode(sagemaker_local_session, mxnet_training_latest_version, mxnet_training_latest_py_version): script_path = os.path.join(DATA_DIR, "mxnet_mnist", "mnist.py") data_path = os.path.join(DATA_DIR, "mxnet_mnist") mx = MXNet( entry_point=script_path, role="SageMakerRole", py_version=mxnet_training_latest_py_version, instance_count=1, instance_type="local", sagemaker_session=sagemaker_local_session, framework_version=mxnet_training_latest_version, ) train_input = mx.sagemaker_session.upload_data( path=os.path.join(data_path, "train"), key_prefix="integ-test-data/mxnet_mnist/train") test_input = mx.sagemaker_session.upload_data( path=os.path.join(data_path, "test"), key_prefix="integ-test-data/mxnet_mnist/test") mx.fit({"train": train_input, "test": test_input}) endpoint_name = mx.latest_training_job.name with lock.lock(LOCK_PATH): try: predictor = mx.deploy(1, "local", endpoint_name=endpoint_name) data = numpy.zeros(shape=(1, 1, 28, 28)) predictor.predict(data) finally: predictor.delete_endpoint()
def test_mxnet_local_mode(sagemaker_local_session, mxnet_full_version): script_path = os.path.join(DATA_DIR, 'mxnet_mnist', 'mnist.py') data_path = os.path.join(DATA_DIR, 'mxnet_mnist') mx = MXNet(entry_point=script_path, role='SageMakerRole', py_version=PYTHON_VERSION, train_instance_count=1, train_instance_type='local', sagemaker_session=sagemaker_local_session, framework_version=mxnet_full_version) train_input = mx.sagemaker_session.upload_data( path=os.path.join(data_path, 'train'), key_prefix='integ-test-data/mxnet_mnist/train') test_input = mx.sagemaker_session.upload_data( path=os.path.join(data_path, 'test'), key_prefix='integ-test-data/mxnet_mnist/test') mx.fit({'train': train_input, 'test': test_input}) endpoint_name = mx.latest_training_job.name with local_mode_utils.lock(): try: predictor = mx.deploy(1, 'local', endpoint_name=endpoint_name) data = numpy.zeros(shape=(1, 1, 28, 28)) predictor.predict(data) finally: mx.delete_endpoint()
def test_mxnet_local_data_local_script(): local_mode_lock_fd = open(LOCK_PATH, 'w') local_mode_lock = local_mode_lock_fd.fileno() script_path = os.path.join(DATA_DIR, 'mxnet_mnist', 'mnist.py') data_path = os.path.join(DATA_DIR, 'mxnet_mnist') mx = MXNet(entry_point=script_path, role='SageMakerRole', train_instance_count=1, train_instance_type='local', sagemaker_session=LocalNoS3Session()) train_input = 'file://' + os.path.join(data_path, 'train') test_input = 'file://' + os.path.join(data_path, 'test') mx.fit({'train': train_input, 'test': test_input}) endpoint_name = mx.latest_training_job.name try: # Since Local Mode uses the same port for serving, we need a lock in order # to allow concurrent test execution. The serving test is really fast so it still # makes sense to allow this behavior. fcntl.lockf(local_mode_lock, fcntl.LOCK_EX) predictor = mx.deploy(1, 'local', endpoint_name=endpoint_name) data = numpy.zeros(shape=(1, 1, 28, 28)) predictor.predict(data) finally: mx.delete_endpoint() time.sleep(5) fcntl.lockf(local_mode_lock, fcntl.LOCK_UN)
def test_mxnet(strftime, sagemaker_session, mxnet_version, skip_if_mms_version): mx = MXNet( entry_point=SCRIPT_PATH, role=ROLE, sagemaker_session=sagemaker_session, train_instance_count=INSTANCE_COUNT, train_instance_type=INSTANCE_TYPE, framework_version=mxnet_version, ) inputs = "s3://mybucket/train" mx.fit(inputs=inputs, experiment_config=EXPERIMENT_CONFIG) sagemaker_call_names = [c[0] for c in sagemaker_session.method_calls] assert sagemaker_call_names == ["train", "logs_for_job"] boto_call_names = [ c[0] for c in sagemaker_session.boto_session.method_calls ] assert boto_call_names == ["resource"] expected_train_args = _create_train_job(mxnet_version) expected_train_args["input_config"][0]["DataSource"]["S3DataSource"][ "S3Uri"] = inputs expected_train_args["experiment_config"] = EXPERIMENT_CONFIG actual_train_args = sagemaker_session.method_calls[0][2] assert actual_train_args == expected_train_args model = mx.create_model() expected_image_base = "520713654638.dkr.ecr.us-west-2.amazonaws.com/sagemaker-mxnet:{}-gpu-py2" environment = { "Environment": { "SAGEMAKER_SUBMIT_DIRECTORY": "s3://mybucket/sagemaker-mxnet-{}/source/sourcedir.tar.gz".format( TIMESTAMP), "SAGEMAKER_PROGRAM": "dummy_script.py", "SAGEMAKER_ENABLE_CLOUDWATCH_METRICS": "false", "SAGEMAKER_REGION": "us-west-2", "SAGEMAKER_CONTAINER_LOG_LEVEL": "20", }, "Image": expected_image_base.format(mxnet_version), "ModelDataUrl": "s3://m/m.tar.gz", } assert environment == model.prepare_container_def(GPU) assert "cpu" in model.prepare_container_def(CPU)["Image"] predictor = mx.deploy(1, GPU) assert isinstance(predictor, MXNetPredictor)
def test_mxnet( retrieve_image_uri, time, strftime, repack_model, create_tar_file, sagemaker_session, mxnet_training_version, mxnet_training_py_version, ): mx = MXNet( entry_point=SCRIPT_PATH, framework_version=mxnet_training_version, py_version=mxnet_training_py_version, role=ROLE, sagemaker_session=sagemaker_session, instance_count=INSTANCE_COUNT, instance_type=INSTANCE_TYPE, enable_sagemaker_metrics=False, ) inputs = "s3://mybucket/train" mx.fit(inputs=inputs, experiment_config=EXPERIMENT_CONFIG) sagemaker_call_names = [c[0] for c in sagemaker_session.method_calls] assert sagemaker_call_names == ["train", "logs_for_job"] boto_call_names = [ c[0] for c in sagemaker_session.boto_session.method_calls ] assert boto_call_names == ["resource"] actual_train_args = sagemaker_session.method_calls[0][2] job_name = actual_train_args["job_name"] expected_train_args = _get_train_args(job_name) expected_train_args["input_config"][0]["DataSource"]["S3DataSource"][ "S3Uri"] = inputs expected_train_args["experiment_config"] = EXPERIMENT_CONFIG expected_train_args["enable_sagemaker_metrics"] = False assert actual_train_args == expected_train_args model = mx.create_model() actual_environment = model.prepare_container_def(GPU) submit_directory = actual_environment["Environment"][ "SAGEMAKER_SUBMIT_DIRECTORY"] model_url = actual_environment["ModelDataUrl"] expected_environment = _get_environment(submit_directory, model_url, IMAGE) assert actual_environment == expected_environment assert "cpu" in model.prepare_container_def(CPU)["Image"] predictor = mx.deploy(1, GPU) assert isinstance(predictor, MXNetPredictor) assert _is_mms_version(mxnet_training_version) ^ ( create_tar_file.called and not repack_model.called)
def test_mxnet_mms_version(strftime, repack_model, sagemaker_session, mxnet_version, skip_if_not_mms_version): mx = MXNet( entry_point=SCRIPT_PATH, role=ROLE, sagemaker_session=sagemaker_session, train_instance_count=INSTANCE_COUNT, train_instance_type=INSTANCE_TYPE, framework_version=mxnet_version, ) inputs = "s3://mybucket/train" mx.fit(inputs=inputs) sagemaker_call_names = [c[0] for c in sagemaker_session.method_calls] assert sagemaker_call_names == ["train", "logs_for_job"] boto_call_names = [ c[0] for c in sagemaker_session.boto_session.method_calls ] assert boto_call_names == ["resource"] expected_train_args = _create_train_job(mxnet_version) expected_train_args["input_config"][0]["DataSource"]["S3DataSource"][ "S3Uri"] = inputs actual_train_args = sagemaker_session.method_calls[0][2] assert actual_train_args == expected_train_args model = mx.create_model() expected_image_base = _get_full_image_uri(mxnet_version, IMAGE_REPO_SERVING_NAME, "gpu") environment = { "Environment": { "SAGEMAKER_SUBMIT_DIRECTORY": "s3://mybucket/sagemaker-mxnet-2017-11-06-14:14:15.672/model.tar.gz", "SAGEMAKER_PROGRAM": "dummy_script.py", "SAGEMAKER_ENABLE_CLOUDWATCH_METRICS": "false", "SAGEMAKER_REGION": "us-west-2", "SAGEMAKER_CONTAINER_LOG_LEVEL": "20", }, "Image": expected_image_base.format(mxnet_version), "ModelDataUrl": "s3://mybucket/sagemaker-mxnet-2017-11-06-14:14:15.672/model.tar.gz", } assert environment == model.prepare_container_def(GPU) assert "cpu" in model.prepare_container_def(CPU)["Image"] predictor = mx.deploy(1, GPU) assert isinstance(predictor, MXNetPredictor)
def test_mxnet(strftime, sagemaker_session, mxnet_version, skip_if_mms_version): mx = MXNet(entry_point=SCRIPT_PATH, role=ROLE, sagemaker_session=sagemaker_session, train_instance_count=INSTANCE_COUNT, train_instance_type=INSTANCE_TYPE, framework_version=mxnet_version) inputs = 's3://mybucket/train' mx.fit(inputs=inputs) sagemaker_call_names = [c[0] for c in sagemaker_session.method_calls] assert sagemaker_call_names == ['train', 'logs_for_job'] boto_call_names = [ c[0] for c in sagemaker_session.boto_session.method_calls ] assert boto_call_names == ['resource'] expected_train_args = _create_train_job(mxnet_version) expected_train_args['input_config'][0]['DataSource']['S3DataSource'][ 'S3Uri'] = inputs actual_train_args = sagemaker_session.method_calls[0][2] assert actual_train_args == expected_train_args model = mx.create_model() expected_image_base = '520713654638.dkr.ecr.us-west-2.amazonaws.com/sagemaker-mxnet:{}-gpu-py2' environment = { 'Environment': { 'SAGEMAKER_SUBMIT_DIRECTORY': 's3://mybucket/sagemaker-mxnet-{}/source/sourcedir.tar.gz'.format( TIMESTAMP), 'SAGEMAKER_PROGRAM': 'dummy_script.py', 'SAGEMAKER_ENABLE_CLOUDWATCH_METRICS': 'false', 'SAGEMAKER_REGION': 'us-west-2', 'SAGEMAKER_CONTAINER_LOG_LEVEL': '20' }, 'Image': expected_image_base.format(mxnet_version), 'ModelDataUrl': 's3://m/m.tar.gz' } assert environment == model.prepare_container_def(GPU) assert 'cpu' in model.prepare_container_def(CPU)['Image'] predictor = mx.deploy(1, GPU) assert isinstance(predictor, MXNetPredictor)
def test_mxnet_mms_version(strftime, repack_model, sagemaker_session, mxnet_version, skip_if_not_mms_version): mx = MXNet(entry_point=SCRIPT_PATH, role=ROLE, sagemaker_session=sagemaker_session, train_instance_count=INSTANCE_COUNT, train_instance_type=INSTANCE_TYPE, framework_version=mxnet_version) inputs = 's3://mybucket/train' mx.fit(inputs=inputs) sagemaker_call_names = [c[0] for c in sagemaker_session.method_calls] assert sagemaker_call_names == ['train', 'logs_for_job'] boto_call_names = [ c[0] for c in sagemaker_session.boto_session.method_calls ] assert boto_call_names == ['resource'] expected_train_args = _create_train_job(mxnet_version) expected_train_args['input_config'][0]['DataSource']['S3DataSource'][ 'S3Uri'] = inputs actual_train_args = sagemaker_session.method_calls[0][2] assert actual_train_args == expected_train_args model = mx.create_model() expected_image_base = _get_full_image_uri(mxnet_version, IMAGE_REPO_SERVING_NAME, 'gpu') environment = { 'Environment': { 'SAGEMAKER_SUBMIT_DIRECTORY': REPACKED_MODEL_DATA, 'SAGEMAKER_PROGRAM': 'dummy_script.py', 'SAGEMAKER_ENABLE_CLOUDWATCH_METRICS': 'false', 'SAGEMAKER_REGION': 'us-west-2', 'SAGEMAKER_CONTAINER_LOG_LEVEL': '20' }, 'Image': expected_image_base.format(mxnet_version), 'ModelDataUrl': REPACKED_MODEL_DATA } assert environment == model.prepare_container_def(GPU) assert 'cpu' in model.prepare_container_def(CPU)['Image'] predictor = mx.deploy(1, GPU) assert isinstance(predictor, MXNetPredictor)
def test_mxnet_local_data_local_script(mxnet_training_latest_version, mxnet_training_latest_py_version): data_path = os.path.join(DATA_DIR, "mxnet_mnist") script_path = os.path.join(data_path, "mnist.py") local_no_s3_session = LocalNoS3Session() local_no_s3_session.boto_session.resource = Mock( side_effect=local_no_s3_session.boto_session.resource) local_no_s3_session.boto_session.client = Mock( side_effect=local_no_s3_session.boto_session.client) mx = MXNet( entry_point=script_path, role="SageMakerRole", instance_count=1, instance_type="local", framework_version=mxnet_training_latest_version, py_version=mxnet_training_latest_py_version, sagemaker_session=local_no_s3_session, ) train_input = "file://" + os.path.join(data_path, "train") test_input = "file://" + os.path.join(data_path, "test") mx.fit({"train": train_input, "test": test_input}) endpoint_name = mx.latest_training_job.name with lock.lock(LOCK_PATH): try: predictor = mx.deploy(1, "local", endpoint_name=endpoint_name) data = numpy.zeros(shape=(1, 1, 28, 28)) predictor.predict(data) # check if no boto_session s3 calls were made with pytest.raises(AssertionError): local_no_s3_session.boto_session.resource.assert_called_with( "s3", region_name=ANY) with pytest.raises(AssertionError): local_no_s3_session.boto_session.client.assert_called_with( "s3", region_name=ANY) finally: predictor.delete_endpoint()
def test_mxnet_local_data_local_script(): data_path = os.path.join(DATA_DIR, 'mxnet_mnist') script_path = os.path.join(data_path, 'mnist_framework_mode.py') mx = MXNet(entry_point=script_path, role='SageMakerRole', train_instance_count=1, train_instance_type='local', sagemaker_session=LocalNoS3Session()) train_input = 'file://' + os.path.join(data_path, 'train') test_input = 'file://' + os.path.join(data_path, 'test') mx.fit({'train': train_input, 'test': test_input}) endpoint_name = mx.latest_training_job.name with local_mode_utils.lock(): try: predictor = mx.deploy(1, 'local', endpoint_name=endpoint_name) data = numpy.zeros(shape=(1, 1, 28, 28)) predictor.predict(data) finally: mx.delete_endpoint()
def test_mxnet(strftime, sagemaker_session, mxnet_version): mx = MXNet(entry_point=SCRIPT_PATH, role=ROLE, sagemaker_session=sagemaker_session, train_instance_count=INSTANCE_COUNT, train_instance_type=INSTANCE_TYPE, framework_version=mxnet_version) inputs = 's3://mybucket/train' mx.fit(inputs=inputs) sagemaker_call_names = [c[0] for c in sagemaker_session.method_calls] assert sagemaker_call_names == ['train', 'logs_for_job'] boto_call_names = [c[0] for c in sagemaker_session.boto_session.method_calls] assert boto_call_names == ['resource'] expected_train_args = _create_train_job(mxnet_version) expected_train_args['input_config'][0]['DataSource']['S3DataSource']['S3Uri'] = inputs actual_train_args = sagemaker_session.method_calls[0][2] assert actual_train_args == expected_train_args model = mx.create_model() expected_image_base = '520713654638.dkr.ecr.us-west-2.amazonaws.com/sagemaker-mxnet:{}-gpu-py2' environment = { 'Environment': { 'SAGEMAKER_SUBMIT_DIRECTORY': 's3://mybucket/sagemaker-mxnet-{}/source/sourcedir.tar.gz'.format(TIMESTAMP), 'SAGEMAKER_PROGRAM': 'dummy_script.py', 'SAGEMAKER_ENABLE_CLOUDWATCH_METRICS': 'false', 'SAGEMAKER_REGION': 'us-west-2', 'SAGEMAKER_CONTAINER_LOG_LEVEL': '20' }, 'Image': expected_image_base.format(mxnet_version), 'ModelDataUrl': 's3://m/m.tar.gz' } assert environment == model.prepare_container_def(GPU) assert 'cpu' in model.prepare_container_def(CPU)['Image'] predictor = mx.deploy(1, GPU) assert isinstance(predictor, MXNetPredictor)
mnist_estimator = MXNet(entry_point='mnist.py', role=role, output_path=model_artifacts_location, code_location=custom_code_upload_location, train_instance_count=1, train_instance_type='ml.m4.xlarge', hyperparameters={'learning_rate': 0.1}) # In[42]: mnist_estimator.fit({'train': train_data_location, 'test': test_data_location}) # In[43]: predictor = mnist_estimator.deploy(initial_instance_count=1, instance_type='ml.m4.xlarge') # ## Validating the model # * Invoke the html script to read in an input. The pixel data from your drawing will be loaded into a data variable in this notebook. # * Using the predictor object to classify the handwritten digit. # * Raw predictions and Labelled predictions display the probabilities of the digit being each of the defined labels. # * Most likely answer prints the label with the maximum probability. # In[76]: HTML(open("input.html").read()) # In[77]: print(data)
"sms_spam_classifier_mxnet_script.py", role=role, train_instance_count=1, train_instance_type="ml.c5.2xlarge", output_path=output_path, base_job_name="sms-spam-classifier-mxnet", framework_version="1.2", code_location=code_location, hyperparameters={ "batch_size": 100, "epochs": 20, "learning_rate": 0.01 }, py_version="py3", ) inputs = { "train": "s3://{0}/{1}/train/".format(bucket_name, bucket_key_prefix), "val": "s3://{0}/{1}/val/".format(bucket_name, bucket_key_prefix), } m.fit(inputs) # deploy the model on sage maker endpoint mxnet_pred = m.deploy( initial_instance_count=1, instance_type="ml.t2.medium", endpoint_name="sagemaker-endpoint", )
#Se corre un training job de sagemaker from sagemaker import get_execution_role from sagemaker.mxnet import MXNet m = MXNet('sentiment.py', role=get_execution_role(), train_instance_count=1, train_instance_type='ml.c4.xlarge', framework_version='1.6.0', py_version='py3', distributions={'parameter_server': {'enabled': True}}, hyperparameters={'batch-size': 8, 'epochs': 2, 'learning-rate': 0.01, 'embedding-size': 50, 'log-interval': 1000}) #Fit y generación de resultados m.fit(inputs) predictor = m.deploy(initial_instance_count=1, instance_type='ml.c4.xlarge') data = ["this movie was extremely good .", "the plot was very boring .", "this film is so slick , superficial and trend-hoppy .", "i just could not watch it till the end .", "the movie was so enthralling !"] response = predictor.predict(data) print(response)