Esempio n. 1
0
def test_source_dirs(tmpdir, sagemaker_local_session):
    source_dir = os.path.join(DATA_DIR, "pytorch_source_dirs")
    lib = os.path.join(str(tmpdir), "alexa.py")

    with open(lib, "w") as f:
        f.write("def question(to_anything): return 42")

    estimator = PyTorch(
        entry_point="train.py",
        role="SageMakerRole",
        source_dir=source_dir,
        dependencies=[lib],
        py_version=PYTHON_VERSION,
        train_instance_count=1,
        train_instance_type="local",
        sagemaker_session=sagemaker_local_session,
    )
    estimator.fit()

    # endpoint tests all use the same port, so we use this lock to prevent concurrent execution
    with lock.lock():
        try:
            predictor = estimator.deploy(initial_instance_count=1, instance_type="local")
            predict_response = predictor.predict([7])
            assert predict_response == [49]
        finally:
            estimator.delete_endpoint()
def test_source_dirs(tmpdir, sagemaker_local_session):
    source_dir = os.path.join(DATA_DIR, 'pytorch_source_dirs')
    lib = os.path.join(str(tmpdir), 'alexa.py')

    with open(lib, 'w') as f:
        f.write('def question(to_anything): return 42')

    estimator = PyTorch(entry_point='train.py',
                        role='SageMakerRole',
                        source_dir=source_dir,
                        dependencies=[lib],
                        py_version=PYTHON_VERSION,
                        train_instance_count=1,
                        train_instance_type='local',
                        sagemaker_session=sagemaker_local_session)
    try:

        estimator.fit()

        predictor = estimator.deploy(initial_instance_count=1,
                                     instance_type='local')

        predict_response = predictor.predict([7])

        assert predict_response == [49]
    finally:
        estimator.delete_endpoint()