コード例 #1
0
def get_prediction(coords, path_to_img, host):

    # account for papaya recieved image
    coords = change_coordinate_system(coords, path_to_img)
    # pre-process image so that it matches input of model
    processed_image = pre_process_image(coords, path_to_img)
    # specify where the client should look to make requests
    client = ProdClient(host + ':9200', 'crohns', 1)

    # query tensorflow seriving model for predictions and attention layer
    prob_values, max_prob_indx, attentions = query_client(
        processed_image, client)

    # proccess the feature map to get the average and resize it
    feature_maps_arr = process_feature_maps(attentions,
                                            processed_image[0].shape)
    # make the attention layer into a nifit file
    make_feature_image(coords, path_to_img, feature_maps_arr)

    # produce an output string to display on front-end
    classes = {0: 'healthy', 1: 'abnormal (Crohn\'s)'}
    predictions = classes[max_prob_indx]
    output_str = f'{predictions} with probability {round(prob_values[0][max_prob_indx], 3)}'

    return output_str
コード例 #2
0
def main(user_text):

    logging.basicConfig(
        level=logging.DEBUG,
        format='%(asctime)s - %(levelname)s - %(name)s - %(message)s')

    # In each file/module, do this to get the module name in the logs
    logger = logging.getLogger(__name__)

    # Make sure you have a model running on localhost:9000
    host = 'localhost:8500'
    model_name = 'bert'
    model_version = 1

    #-------------------------------------------------------------------#
    #----------------INPUT STRING----------------------------
    #user_text = 'i hate all twitter users'
    request_id = np.zeros((128), dtype=int).tolist()
    content = {
        'user_text': user_text,
    }
    label_list = [0, 0, 0, 0, 0, 0]

    VOCAB_FILE = 'vocab.txt'
    tokenizer = tokenization.FullTokenizer(vocab_file=VOCAB_FILE,
                                           do_lower_case=True)

    processor = MultiLabelTextProcessor()

    inputExample = processor.serving_create_example(
        [request_id, content['user_text']], 'test')
    feature = convert_single_example(0, inputExample, label_list, 128,
                                     tokenizer)

    features = collections.OrderedDict()
    features["input_ids"] = create_int_feature(feature.input_ids)
    features["input_mask"] = create_int_feature(feature.input_mask)
    features["segment_ids"] = create_int_feature(feature.segment_ids)
    features["is_real_example"] = create_int_feature(
        [int(feature.is_real_example)])
    if isinstance(feature.label_id, list):
        label_ids = feature.label_id
    else:
        label_ids = [feature.label_id]
    features["label_ids"] = create_int_feature(label_ids)

    tf_example = tf.train.Example(features=tf.train.Features(feature=features))
    #----------------------------------------------------------------------------#
    tf_example = tf_example.SerializeToString()

    print('test')

    client = ProdClient(host, model_name, model_version)

    prediction = client.predict(tf_example, request_timeout=10)
    logger.info('Prediction: {}'.format(prediction))

    return prediction
コード例 #3
0
    def __init__(self, max_input_len: int, host: str, port: int,
                 model_name: str, model_version: int):
        self.max_input_len = max_input_len

        self.client = ProdClient('{host}:{port}'.format(host=host, port=port),
                                 model_name, model_version)

        _, self.char_to_id, self.id_to_char, self.vocabulary_size = get_vocabulary_and_dictionaries(
        )
コード例 #4
0
def test(request):
    client = ProdClient('localhost:9000', 'simple', 2)
    req_data = [{
        'in_tensor_name': 'inputs',
        'in_tensor_dtype': 'DT_INT32',
        'data': 6
    }]
    value = client.predict(req_data)
    print(value)
    return render(request, 'tf_model_test/test.html', {'value': value})
コード例 #5
0
#!/usr/bin/env python
#
# https://medium.com/epigramai/tensorflow-serving-101-pt-2-682eaf7469e7
#
# Example gRPC client of TFServing for hellworld model. Note the "predict_client"
# directory contains wrapper for tensorflow serving API, thus we don't have to
# depend on "tensorflow_serving.apis".

import argparse
import logging

from predict_client.prod_client import ProdClient

# Make logging work.
logging.basicConfig(level=logging.DEBUG, format='%(message)s')

# Parse arguments.
parser = argparse.ArgumentParser(description='Process arguments.')
parser.add_argument('--host', type=str, default='localhost:9000')
parser.add_argument('--model_name', type=str, default='simple')
parser.add_argument('--model_version', type=int, default=1)
args = parser.parse_args()

client = ProdClient(args.host, args.model_name, args.model_version)
# Note in_tensor_name ‘a’ is the same ‘a’ that we used in the signature
# definition in our model. The input tensor’s data type must also match
# the one of the placeholder a in our model.
req_data = [{'in_tensor_name': 'a', 'in_tensor_dtype': 'DT_INT32', 'data': 2}]
print(client.predict(req_data))
コード例 #6
0
import utils
import numpy as np

images = utils.images

images = utils.images
labels = utils.labels

import numpy as np
from predict_client.prod_client import ProdClient

HOST = '0.0.0.0:8500'
MODEL_NAME = 'mnistpb'
MODEL_VERSION = 1

client = ProdClient(
    HOST, MODEL_NAME,
    MODEL_VERSION)  #connect to the tf-serving running through docker

#parameters to request
req_data = [{
    'in_tensor_name': 'inputs',
    'in_tensor_dtype': 'DT_FLOAT',
    'data': images[0:4]
}]

prediction = client.predict(req_data, request_timeout=10)  #get the output

#axis 1 because of batch request
print(np.argmax(np.asarray(prediction['outputs']), axis=1))
コード例 #7
0
ファイル: pic.py プロジェクト: vovdlbezgod/Remake_app
	img = np.expand_dims(img, axis=0)
	print(img.shape)
	return img

import numpy as np
from predict_client.prod_client import ProdClient
from flask import Flask
from flask import request
from flask import jsonify

HOST = 'localhost:9000'
MODEL_NAME = 'test'
MODEL_VERSION = 1

app = Flask(__name__)
client = ProdClient(HOST, MODEL_NAME, MODEL_VERSION)

def convert_data(raw_data):
    return np.array(raw_data, dtype=np.float32)

def get_prediction_from_model(data):
    req_data = [{'in_tensor_name': 'inputs', 'in_tensor_dtype': 'DT_FLOAT', 'data': data}]

    prediction = client.predict(req_data, request_timeout=10)

    return prediction


@app.route("/prediction", methods=['POST'])
def get_prediction():
    ##req_data = request.get_json()
コード例 #8
0
import logging

from predict_client.prod_client import ProdClient

logging.basicConfig(level=logging.DEBUG,
                    format='%(asctime)s - %(levelname)s - %(name)s - %(message)s')

# In each file/module, do this to get the module name in the logs
logger = logging.getLogger(__name__)

# Make sure you have a model running on localhost:9000
host = 'localhost:9000'
model_name = 'simple'
model_version = 1

client = ProdClient(host, model_name, model_version)

req_data = [{'in_tensor_name': 'a', 'in_tensor_dtype': 'DT_INT32', 'data': 2}]

prediction = client.predict(req_data, request_timeout=10)
logger.info('Prediction: {}'.format(prediction))
コード例 #9
0
import tensorflow as tf
import numpy as np
import pickle
import config as cfg

from predict_client.prod_client import ProdClient
from inference import simple_inference

#import Flask dependencies
from flask import Flask, request, render_template, send_from_directory

#Set root dir
APP_ROOT = os.path.dirname(os.path.abspath(__file__))

#Define client
client = ProdClient('localhost:9000', 'simple', 2)

#Load training set vectors
with open('hamming_train_vectors.pickle', 'rb') as f:
    train_vectors = pickle.load(f)

#Load training set paths
with open('train_images_pickle.pickle', 'rb') as f:
    train_images_paths = pickle.load(f)

#Define Flask app
app = Flask(__name__, static_url_path='/static')


#Define apps home page
@app.route("/")  #www.image-search.com/
コード例 #10
0
from predict_client.prod_client import ProdClient

# Used by Docker 1
client = ProdClient('localhost:9001', 'simple', 1)
req_data = [{
    'in_tensor_name': 'inputs',
    'in_tensor_dtype': 'DT_INT32',
    'data': 4
}]
client.predict(req_data)

# Used by Docker 2
client = ProdClient('localhost:9000', 'simple_1', 2)
req_data = [{
    'in_tensor_name': 'inputs',
    'in_tensor_dtype': 'DT_INT32',
    'data': 4
}]
client.predict(req_data)

# Used by Docker 3
client = ProdClient('localhost:9002', 'subtract_model', 1)
req_data = [{
    'in_tensor_name': 'inputs',
    'in_tensor_dtype': 'DT_INT32',
    'data': 22
}]
client.predict(req_data)