Ejemplo n.º 1
0
def GetPrediction_frame(input_to_model):
    ''' Accepts dicom, return prediction from model '''

    # # intialize variables:
    # input_to_model = []
    # prediction = []

    # # convert frames to grayscale and resize:
    # for frame in dicom['pixel_data']:

    #     # convert frame to grayscale:
    #     grayscale_image = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)

    #     # reduce frame size:
    #     reduced_image = cv2.resize(grayscale_image,(96*2,64*2))

    #     # append to list:
    #     input_to_model.append(reduced_image)

    # # convert data to numpy array for further processing:
    # input_to_model = np.array(input_to_model)

    # # reshape video to fit model requirements:
    # number_of_frames = input_to_model.shape[0]
    # means = input_to_model.reshape(number_of_frames, -1).mean(-1)
    # stds = input_to_model.reshape(number_of_frames, -1).std(-1)
    # input_to_model = input_to_model - means[:, np.newaxis, np.newaxis]
    # input_to_model = input_to_model / stds[:, np.newaxis, np.newaxis]
    # input_to_model = input_to_model.reshape(input_to_model.shape+(1,))

    # input_to_model = pickle.dumps(input_to_model.astype('float16'))

    if configuration['view']['binary_model_type'] == 'None':

        # get endpoint of model:
        views_predictor = Predictor('tf-multi-model-endpoint',
                                    model_name='views_model',
                                    content_type='application/npy',
                                    serializer=None)

    elif configuration['view']['binary_model_type'] == 'frame':

        # get endpoint of model:
        views_predictor = Predictor('tf-multi-model-endpoint',
                                    model_name='master_model',
                                    content_type='application/npy',
                                    serializer=None)

    # contact endpoint for prediction:
    result = np.array(views_predictor.predict(input_to_model)['predictions'])

    # build prediction object:
    prediction = {
        'model_name': model_name,
        'predictions': result,
    }

    return prediction
Ejemplo n.º 2
0
def test_predictor_csv(sagemaker_session):
    predictor = Predictor('endpoint', sagemaker_session, serializer=csv_serializer)

    mock_response(json.dumps(PREDICT_RESPONSE).encode('utf-8'), sagemaker_session)
    result = predictor.predict([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])

    assert_invoked(sagemaker_session,
                   EndpointName='endpoint',
                   ContentType=CSV_CONTENT_TYPE,
                   Accept=JSON_CONTENT_TYPE,
                   Body='1.0,2.0,3.0\n4.0,5.0,6.0')

    assert PREDICT_RESPONSE == result
Ejemplo n.º 3
0
def test_predictor(sagemaker_session):
    predictor = Predictor('endpoint', sagemaker_session)

    mock_response(json.dumps(PREDICT_RESPONSE).encode('utf-8'), sagemaker_session)
    result = predictor.predict(PREDICT_INPUT)

    assert_invoked(sagemaker_session,
                   EndpointName='endpoint',
                   ContentType=JSON_CONTENT_TYPE,
                   Accept=JSON_CONTENT_TYPE,
                   Body=json.dumps(PREDICT_INPUT))

    assert PREDICT_RESPONSE == result
Ejemplo n.º 4
0
def test_predictor_regress(sagemaker_session):
    predictor = Predictor('endpoint', sagemaker_session, model_name='model', model_version='123')

    mock_response(json.dumps(REGRESS_RESPONSE).encode('utf-8'), sagemaker_session)
    result = predictor.regress(REGRESS_INPUT)

    assert_invoked_with_body_dict(sagemaker_session,
                                  EndpointName='endpoint',
                                  ContentType=JSON_CONTENT_TYPE,
                                  Accept=JSON_CONTENT_TYPE,
                                  CustomAttributes='tfs-method=regress,tfs-model-name=model,tfs-model-version=123',
                                  Body=json.dumps(REGRESS_INPUT))

    assert REGRESS_RESPONSE == result
Ejemplo n.º 5
0
def test_predictor_classify(sagemaker_session):
    predictor = Predictor('endpoint', sagemaker_session)

    mock_response(json.dumps(CLASSIFY_RESPONSE).encode('utf-8'), sagemaker_session)
    result = predictor.classify(CLASSIFY_INPUT)

    assert_invoked_with_body_dict(sagemaker_session,
                                  EndpointName='endpoint',
                                  ContentType=JSON_CONTENT_TYPE,
                                  Accept=JSON_CONTENT_TYPE,
                                  CustomAttributes='tfs-method=classify',
                                  Body=json.dumps(CLASSIFY_INPUT))

    assert CLASSIFY_RESPONSE == result
Ejemplo n.º 6
0
def test_predictor_jsons(sagemaker_session):
    predictor = Predictor('endpoint', sagemaker_session, serializer=None,
                          content_type='application/jsons')

    mock_response(json.dumps(PREDICT_RESPONSE).encode('utf-8'), sagemaker_session)
    result = predictor.predict('[1.0, 2.0, 3.0]\n[4.0, 5.0, 6.0]')

    assert_invoked(sagemaker_session,
                   EndpointName='endpoint',
                   ContentType='application/jsons',
                   Accept=JSON_CONTENT_TYPE,
                   Body='[1.0, 2.0, 3.0]\n[4.0, 5.0, 6.0]')

    assert PREDICT_RESPONSE == result
Ejemplo n.º 7
0
def test_predictor_regress(sagemaker_session):
    predictor = Predictor("endpoint", sagemaker_session, model_name="model", model_version="123")

    mock_response(json.dumps(REGRESS_RESPONSE).encode("utf-8"), sagemaker_session)
    result = predictor.regress(REGRESS_INPUT)

    assert_invoked_with_body_dict(
        sagemaker_session,
        EndpointName="endpoint",
        ContentType=JSON_CONTENT_TYPE,
        Accept=JSON_CONTENT_TYPE,
        CustomAttributes="tfs-method=regress,tfs-model-name=model,tfs-model-version=123",
        Body=json.dumps(REGRESS_INPUT),
    )

    assert REGRESS_RESPONSE == result
Ejemplo n.º 8
0
def test_predictor_jsons(sagemaker_session):
    predictor = Predictor(
        "endpoint", sagemaker_session, serializer=None, content_type="application/jsons"
    )

    mock_response(json.dumps(PREDICT_RESPONSE).encode("utf-8"), sagemaker_session)
    result = predictor.predict("[1.0, 2.0, 3.0]\n[4.0, 5.0, 6.0]")

    assert_invoked(
        sagemaker_session,
        EndpointName="endpoint",
        ContentType="application/jsons",
        Accept=JSON_CONTENT_TYPE,
        Body="[1.0, 2.0, 3.0]\n[4.0, 5.0, 6.0]",
    )

    assert PREDICT_RESPONSE == result
Ejemplo n.º 9
0
def GetPrediction_video(input_to_model):
    ''' Accepts input_to_model, returns prediction from model '''

    # intitalize variables:
    preprocessing = configuration['view']['preprocessing']

    # get endpoint of model:
    if configuration['view']['binary_model_type'] == 'none':

        # models:
        model_name = 'ResNet50V2_views_model_vid_spline'
        #model_name = 'ResNet50V2_views_model_vid_spline_noweights'

        # get endpoint of model:
        views_predictor = Predictor('tf-multi-model-endpoint',
                                    model_name=model_name,
                                    content_type='application/npy',
                                    serializer=None)

    elif configuration['view']['binary_model_type'] == 'video':

        # models:
        if preprocessing == 'downsample':
            model_name = 'ResNet50V2_master_model_vid_dows'
            #model_name = 'ResNet50V2_time_distributed_master_20200126-augoff_epoch20_downs_nonorm_vid'
        elif preprocessing == 'zoom_cv2' or preprocessing == 'zoom':
            model_name = 'ResNet50V2_master_model_vid_spline'

        # get endpoint of model:
        views_predictor = Predictor('tf-multi-model-endpoint',
                                    model_name=model_name,
                                    content_type='application/npy',
                                    serializer=None)

    # contact endpoint for prediction:
    result = np.array(views_predictor.predict(input_to_model)['predictions'])

    # build prediction object:
    prediction = {
        'model_name': model_name,
        'predictions': result,
    }

    return prediction
Ejemplo n.º 10
0
def test_predict_csv(tfs_predictor):
    input_data = '1.0,2.0,5.0\n1.0,2.0,5.0'
    expected_result = {'predictions': [[3.5, 4.0, 5.5], [3.5, 4.0, 5.5]]}

    predictor = Predictor(tfs_predictor.endpoint,
                          tfs_predictor.sagemaker_session,
                          serializer=sagemaker.predictor.csv_serializer)

    result = predictor.predict(input_data)
    assert expected_result == result
Ejemplo n.º 11
0
def test_predictor_classify_bad_content_type(sagemaker_session):
    predictor = Predictor("endpoint", sagemaker_session, csv_serializer)

    with pytest.raises(ValueError):
        predictor.classify(CLASSIFY_INPUT)
Ejemplo n.º 12
0
def test_predictor_regress_bad_content_type(sagemaker_session):
    predictor = Predictor("endpoint", sagemaker_session, csv_serializer)

    with pytest.raises(ValueError):
        predictor.regress(REGRESS_INPUT)
Ejemplo n.º 13
0
def GetPrediction(prepped_data):
    ''' Accepts prepped data, returns prediction from segmentation model '''

    # unpack prepped data:
    original_video = prepped_data['original_video']
    preprocess_object = prepped_data['preprocess_object']
    windows = prepped_data['windows']
    molded_images = prepped_data['molded_images']
    image_metas = prepped_data['image_metas']
    anchors = prepped_data['anchors']

    # initialize variables:
    BATCH_SIZE = 50
    masks = []

    # get endpoint of model:
    predictor = Predictor('tf-multi-model-endpoint',
                          model_name='Mask_RCNN_a4c_seg_batch50-compact-cpu',
                          content_type='application/seg',
                          serializer=None)

    # get number of frames, batches:
    number_of_frames = len(molded_images)
    number_of_missing_frames = 0
    number_of_batches = int(np.ceil(number_of_frames / BATCH_SIZE))

    # iterate over each batch:
    for index in range(number_of_batches):

        # build payload batches:
        image_batch = molded_images[index * BATCH_SIZE:(index + 1) *
                                    BATCH_SIZE, :, :, :]
        metas_batch = image_metas[index * BATCH_SIZE:(index + 1) *
                                  BATCH_SIZE, :]
        anchor_batch = anchors[index * BATCH_SIZE:(index + 1) *
                               BATCH_SIZE, :, :]

        # set number of missing frames to 0:
        number_of_missing_frames = 0

        # pad batches smaller than batch size:
        if len(image_batch) < BATCH_SIZE:

            # get number of missing frames:
            number_of_missing_frames = BATCH_SIZE - len(image_batch)

            # build pads:
            extra_images = np.zeros(
                (number_of_missing_frames, image_batch.shape[1],
                 image_batch.shape[2], image_batch.shape[3]))
            extra_metas = np.zeros(
                (number_of_missing_frames, metas_batch.shape[1]))
            extra_anchors = np.zeros(
                (number_of_missing_frames, anchor_batch.shape[1],
                 anchor_batch.shape[2]))

            # concatenate batch and pads:
            image_batch = np.concatenate((image_batch, extra_images))
            metas_batch = np.concatenate((metas_batch, extra_metas))
            anchor_batch = np.concatenate((anchor_batch, extra_anchors))

        # compile batch payload:
        batch_payload = {
            "molded_images": image_batch.astype(np.float16),
            "image_metas": metas_batch.astype(np.float16),
            "anchors": anchor_batch.astype(np.float16),
        }

        # serialize input:
        input_to_model = pickle.dumps(batch_payload)

        # get prediction:
        response = predictor.predict(input_to_model)

        for index in range(BATCH_SIZE - number_of_missing_frames):

            result = {
                'detection':
                np.array([
                    response['predictions'][index]
                    ['mrcnn_detection/Reshape_50']
                ]),
                'mask':
                np.array(
                    [response['predictions'][index]['mrcnn_mask/Reshape_1']]),
            }

            result_dictionary = preprocess_object.result_to_dict(
                np.expand_dims(original_video[index], axis=0), molded_images,
                windows, result)[0]

            if result_dictionary['mask'].size != 0:
                mask = np.where(result_dictionary['mask'][:, :, 0], 255.0,
                                0.0).astype(np.uint8)
                masks.append(mask)
            else:
                masks.append(np.zeros(original_video[0].shape[:-1]))

    masks = np.array(masks, dtype=np.uint8)

    return masks
from sagemaker.tensorflow.serving import Predictor
from tensorflow.python.keras.preprocessing.image import load_img

!wget -O /tmp/test.jpg https://YOURPath.jpeg
file_name = '/tmp/test.jpg'

# test image
from IPython.display import Image
Image(file_name)

# Resize as model was trained after resizing
test_image = load_img(file_name, target_size=(150, 150))
test_image_array = np.array(test_image).reshape((1, 150, 150, 3)).tolist()

# Predict
predictor = Predictor(endpoint_name = "my-endpointname")
print(predictor.predict({"instances": [{"inputs": test_image_array}]}))