def main(user_text):

    logging.basicConfig(
        level=logging.DEBUG,
        format='%(asctime)s - %(levelname)s - %(name)s - %(message)s')

    # In each file/module, do this to get the module name in the logs
    logger = logging.getLogger(__name__)

    # Make sure you have a model running on localhost:9000
    host = 'localhost:8500'
    model_name = 'bert'
    model_version = 1

    #-------------------------------------------------------------------#
    #----------------INPUT STRING----------------------------
    #user_text = 'i hate all twitter users'
    request_id = np.zeros((128), dtype=int).tolist()
    content = {
        'user_text': user_text,
    }
    label_list = [0, 0, 0, 0, 0, 0]

    VOCAB_FILE = 'vocab.txt'
    tokenizer = tokenization.FullTokenizer(vocab_file=VOCAB_FILE,
                                           do_lower_case=True)

    processor = MultiLabelTextProcessor()

    inputExample = processor.serving_create_example(
        [request_id, content['user_text']], 'test')
    feature = convert_single_example(0, inputExample, label_list, 128,
                                     tokenizer)

    features = collections.OrderedDict()
    features["input_ids"] = create_int_feature(feature.input_ids)
    features["input_mask"] = create_int_feature(feature.input_mask)
    features["segment_ids"] = create_int_feature(feature.segment_ids)
    features["is_real_example"] = create_int_feature(
        [int(feature.is_real_example)])
    if isinstance(feature.label_id, list):
        label_ids = feature.label_id
    else:
        label_ids = [feature.label_id]
    features["label_ids"] = create_int_feature(label_ids)

    tf_example = tf.train.Example(features=tf.train.Features(feature=features))
    #----------------------------------------------------------------------------#
    tf_example = tf_example.SerializeToString()

    print('test')

    client = ProdClient(host, model_name, model_version)

    prediction = client.predict(tf_example, request_timeout=10)
    logger.info('Prediction: {}'.format(prediction))

    return prediction
Ejemplo n.º 2
0
def test(request):
    client = ProdClient('localhost:9000', 'simple', 2)
    req_data = [{
        'in_tensor_name': 'inputs',
        'in_tensor_dtype': 'DT_INT32',
        'data': 6
    }]
    value = client.predict(req_data)
    print(value)
    return render(request, 'tf_model_test/test.html', {'value': value})
class TweetAutocompleter(object):
    def __init__(self, max_input_len: int, host: str, port: int,
                 model_name: str, model_version: int):
        self.max_input_len = max_input_len

        self.client = ProdClient('{host}:{port}'.format(host=host, port=port),
                                 model_name, model_version)

        _, self.char_to_id, self.id_to_char, self.vocabulary_size = get_vocabulary_and_dictionaries(
        )

    def autocomplete(self,
                     beginning_of_tweet: str,
                     temperature: float = DEFAULT_TEMPERATURE) -> str:
        complete_tweet = beginning_of_tweet
        input_sentence = beginning_of_tweet
        for i in range(TWEET_MAX_LEN - len(beginning_of_tweet)):
            next_char = self._predict_next_char(input_sentence, temperature)
            if next_char == END_OF_TWEET:
                break

            complete_tweet += next_char
            input_sentence = input_sentence[1:] + next_char
        return complete_tweet

    def _predict_next_char(self, input_sentence: str,
                           temperature: float) -> str:
        input_data = vectorize_sentences([input_sentence], self.max_input_len,
                                         self.vocabulary_size, self.char_to_id)
        request_data = [{
            'in_tensor_name': IN_TENSOR_NAME,
            'in_tensor_dtype': IN_TENSOR_DTYPE,
            'data': input_data.astype(int)
        }]
        response = self.client.predict(request_data)
        input_predictions = response['outputs'][0]

        next_index = sample(input_predictions, temperature)
        next_char = self.id_to_char[next_index]

        return next_char
Ejemplo n.º 4
0
#!/usr/bin/env python
#
# https://medium.com/epigramai/tensorflow-serving-101-pt-2-682eaf7469e7
#
# Example gRPC client of TFServing for hellworld model. Note the "predict_client"
# directory contains wrapper for tensorflow serving API, thus we don't have to
# depend on "tensorflow_serving.apis".

import argparse
import logging

from predict_client.prod_client import ProdClient

# Make logging work.
logging.basicConfig(level=logging.DEBUG, format='%(message)s')

# Parse arguments.
parser = argparse.ArgumentParser(description='Process arguments.')
parser.add_argument('--host', type=str, default='localhost:9000')
parser.add_argument('--model_name', type=str, default='simple')
parser.add_argument('--model_version', type=int, default=1)
args = parser.parse_args()

client = ProdClient(args.host, args.model_name, args.model_version)
# Note in_tensor_name ‘a’ is the same ‘a’ that we used in the signature
# definition in our model. The input tensor’s data type must also match
# the one of the placeholder a in our model.
req_data = [{'in_tensor_name': 'a', 'in_tensor_dtype': 'DT_INT32', 'data': 2}]
print(client.predict(req_data))
Ejemplo n.º 5
0
import utils
import numpy as np

images = utils.images

images = utils.images
labels = utils.labels

import numpy as np
from predict_client.prod_client import ProdClient

HOST = '0.0.0.0:8500'
MODEL_NAME = 'mnistpb'
MODEL_VERSION = 1

client = ProdClient(
    HOST, MODEL_NAME,
    MODEL_VERSION)  #connect to the tf-serving running through docker

#parameters to request
req_data = [{
    'in_tensor_name': 'inputs',
    'in_tensor_dtype': 'DT_FLOAT',
    'data': images[0:4]
}]

prediction = client.predict(req_data, request_timeout=10)  #get the output

#axis 1 because of batch request
print(np.argmax(np.asarray(prediction['outputs']), axis=1))
Ejemplo n.º 6
0
                                       do_lower_case=True)

processor = MultiLabelTextProcessor()

inputExample = processor.serving_create_example(
    [request_id, content['user_text']], 'test')
feature = convert_single_example(0, inputExample, label_list, 128, tokenizer)

features = collections.OrderedDict()
features["input_ids"] = create_int_feature(feature.input_ids)
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
features["is_real_example"] = create_int_feature(
    [int(feature.is_real_example)])
if isinstance(feature.label_id, list):
    label_ids = feature.label_id
else:
    label_ids = [feature.label_id]
features["label_ids"] = create_int_feature(label_ids)

tf_example = tf.train.Example(features=tf.train.Features(feature=features))
#----------------------------------------------------------------------------#
tf_example = tf_example.SerializeToString()

print('test')

client = ProdClient(host, model_name, model_version)

prediction = client.predict(tf_example, request_timeout=10)
logger.info('Prediction: {}'.format(prediction))
Ejemplo n.º 7
0
from predict_client.prod_client import ProdClient

# Used by Docker 1
client = ProdClient('localhost:9001', 'simple', 1)
req_data = [{
    'in_tensor_name': 'inputs',
    'in_tensor_dtype': 'DT_INT32',
    'data': 4
}]
client.predict(req_data)

# Used by Docker 2
client = ProdClient('localhost:9000', 'simple_1', 2)
req_data = [{
    'in_tensor_name': 'inputs',
    'in_tensor_dtype': 'DT_INT32',
    'data': 4
}]
client.predict(req_data)

# Used by Docker 3
client = ProdClient('localhost:9002', 'subtract_model', 1)
req_data = [{
    'in_tensor_name': 'inputs',
    'in_tensor_dtype': 'DT_INT32',
    'data': 22
}]
client.predict(req_data)