def test_transform_pytorch_vpc_custom_model_bucket(
    sagemaker_session,
    pytorch_inference_latest_version,
    pytorch_inference_latest_py_version,
    cpu_instance_type,
    custom_bucket_name,
):
    data_dir = os.path.join(DATA_DIR, "pytorch_mnist")

    ec2_client = sagemaker_session.boto_session.client("ec2")
    subnet_ids, security_group_id = get_or_create_vpc_resources(ec2_client)

    model_data = sagemaker_session.upload_data(
        path=os.path.join(data_dir, "model.tar.gz"),
        bucket=custom_bucket_name,
        key_prefix="integ-test-data/pytorch_mnist/model",
    )

    model = PyTorchModel(
        model_data=model_data,
        entry_point=os.path.join(data_dir, "mnist.py"),
        role="SageMakerRole",
        framework_version=pytorch_inference_latest_version,
        py_version=pytorch_inference_latest_py_version,
        sagemaker_session=sagemaker_session,
        vpc_config={
            "Subnets": subnet_ids,
            "SecurityGroupIds": [security_group_id]
        },
        code_location="s3://{}".format(custom_bucket_name),
    )

    transform_input = sagemaker_session.upload_data(
        path=os.path.join(data_dir, "transform", "data.npy"),
        key_prefix="integ-test-data/pytorch_mnist/transform",
    )

    transformer = model.transformer(1, cpu_instance_type)
    transformer.transform(
        transform_input,
        content_type="application/x-npy",
        job_name=unique_name_from_base("test-transform-vpc"),
    )

    with timeout_and_delete_model_with_transformer(
            transformer,
            sagemaker_session,
            minutes=TRANSFORM_DEFAULT_TIMEOUT_MINUTES):
        transformer.wait()
        model_desc = sagemaker_session.sagemaker_client.describe_model(
            ModelName=transformer.model_name)
        assert set(subnet_ids) == set(model_desc["VpcConfig"]["Subnets"])
        assert [security_group_id
                ] == model_desc["VpcConfig"]["SecurityGroupIds"]

        model_bucket, _ = s3.parse_s3_url(
            model_desc["PrimaryContainer"]["ModelDataUrl"])
        assert custom_bucket_name == model_bucket
local_input = Path('data/test_AM_image/source_tiles')

for group_path in local_input.iterdir():
    for file_path in (group_path / 'source').iterdir():
        s3_file_path = input_prefix / group_path.name / file_path.name
        print(f'Uploading {file_path} to {s3_file_path}')
        s3.upload_file(str(file_path), bucket, str(s3_file_path))


session = Session()
s3_input = f's3://{bucket}/{input_prefix}'
s3_output = f's3://{bucket}/{output_prefix}'
pytorch_model = PyTorchModel(model_data='s3://am-segm/unet.tar.gz',
                             image='236062312728.dkr.ecr.eu-west-1.amazonaws.com/intsco/am-segm',
                             role='AM-SegmSageMakerRole',
                             entry_point='sagemaker/main.py',
                             sagemaker_session=session)
transformer = pytorch_model.transformer(instance_count=3,
                                        instance_type='ml.c4.xlarge',
                                        # instance_type='ml.p2.xlarge',
                                        output_path=s3_output,
                                        accept='application/x-image',
                                        strategy='SingleRecord',
                                        env={'MODEL_SERVER_TIMEOUT': '180'})
start = time()
transformer.transform(data=s3_input,
                      data_type='S3Prefix',
                      content_type='application/x-image')
transformer.wait()
print('{} min {} sec'.format(*divmod(int(time() - start), 60)))