def testRegress(self):
    """Test PredictionService.Regress implementation."""
    model_path = self._GetSavedModelBundlePath()

    atexit.register(self.TerminateProcs)
    model_server_address = self.RunServer(PickUnusedPort(), 'default',
                                          model_path)

    print 'Sending Regress request...'
    # Prepare request
    request = regression_pb2.RegressionRequest()
    request.model_spec.name = 'default'
    request.model_spec.signature_name = 'regress_x_to_y'

    example = request.input.example_list.examples.add()
    example.features.feature['x'].float_list.value.extend([2.0])

    # Send request
    host, port = model_server_address.split(':')
    channel = implementations.insecure_channel(host, int(port))
    stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
    result = stub.Regress(request, RPC_TIMEOUT)  # 5 secs timeout
    # Verify response
    self.assertEquals(1, len(result.result.regressions))
    expected_output = 3.0
    self.assertEquals(expected_output, result.result.regressions[0].value)
    self._VerifyModelSpec(result.model_spec, request.model_spec.name,
                          request.model_spec.signature_name,
                          self._GetModelVersion(model_path))
示例#2
0
def do_inference(hostport, work_dir, concurrency, num_tests):
  """Tests PredictionService with concurrent requests.

  Args:
    hostport: Host:port address of the PredictionService.
    work_dir: The full path of working directory for test data set.
    concurrency: Maximum number of concurrent requests.
    num_tests: Number of test images to use.

  Returns:
    The classification error rate.

  Raises:
    IOError: An error occurred processing test data set.
  """
  test_data_set = mnist_input_data.read_data_sets(work_dir).test
  host, port = hostport.split(':')
  channel = implementations.insecure_channel(host, int(port))
  stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
  result_counter = _ResultCounter(num_tests, concurrency)
  for _ in range(num_tests):
    request = predict_pb2.PredictRequest()
    request.model_spec.name = 'mnist'
    request.model_spec.signature_name = 'predict_images'
    image, label = test_data_set.next_batch(1)
    request.inputs['images'].CopyFrom(
        tf.contrib.util.make_tensor_proto(image[0], shape=[1, image[0].size]))
    result_counter.throttle()
    result_future = stub.Predict.future(request, 5.0)  # 5 seconds
    result_future.add_done_callback(
        _create_rpc_callback(label[0], result_counter))
  return result_counter.get_error_rate()
def main():
  host = "0.0.0.0"
  port = 8502
  model_name = "default"
  model_version = -1
  signature_name = ""
  request_timeout = 10.0

  # Generate inference data
  image_b64_string = base64.urlsafe_b64encode(open("./0.jpg", "rb").read())
  images_tensor_proto = tf.contrib.util.make_tensor_proto(
      [image_b64_string], dtype=tf.string)

  # Create gRPC client
  channel = implementations.insecure_channel(host, port)
  stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
  request = predict_pb2.PredictRequest()
  request.model_spec.name = model_name
  if model_version > 0:
    request.model_spec.version.value = model_version
  if signature_name != "":
    request.model_spec.signature_name = signature_name
  request.inputs["images"].CopyFrom(images_tensor_proto)

  # Send request
  start_time = time.time()
  for i in range(10):
    result = stub.Predict(request, request_timeout)
  end_time = time.time()
  print("Cost time: {}".format(end_time - start_time))

  print(result)
def main():
  host = "0.0.0.0"
  port = 8502
  model_name = "default"
  model_version = -1
  signature_name = ""
  request_timeout = 10.0

  # Generate inference data
  keys = numpy.asarray([[1]])
  keys_tensor_proto = tf.contrib.util.make_tensor_proto(keys, dtype=tf.int32)
  features = numpy.asarray([[1, 2, 3, 4, 5, 6, 7, 8, 9]])
  features_tensor_proto = tf.contrib.util.make_tensor_proto(
      features, dtype=tf.float32)

  # Create gRPC client
  channel = implementations.insecure_channel(host, port)
  stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
  request = predict_pb2.PredictRequest()
  request.model_spec.name = model_name
  if model_version > 0:
    request.model_spec.version.value = model_version
  if signature_name != "":
    request.model_spec.signature_name = signature_name
  request.inputs["keys"].CopyFrom(keys_tensor_proto)
  request.inputs["features"].CopyFrom(features_tensor_proto)

  # Send request
  start_time = time.time()
  for i in range(100):
    result = stub.Predict(request, request_timeout)
  end_time = time.time()
  print("Cost time: {}".format(end_time - start_time))

  print(result)
def main():
  # Generate inference data
  keys = numpy.asarray([1, 2, 3, 4])
  keys_tensor_proto = tf.contrib.util.make_tensor_proto(keys, dtype=tf.int32)
  features = numpy.asarray(
      [[1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 1, 1, 1, 1, 1, 1, 1, 1],
       [9, 8, 7, 6, 5, 4, 3, 2, 1], [9, 9, 9, 9, 9, 9, 9, 9, 9]])
  features_tensor_proto = tf.contrib.util.make_tensor_proto(
      features, dtype=tf.float32)

  # Create gRPC client
  channel = implementations.insecure_channel(FLAGS.host, FLAGS.port)
  stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
  request = predict_pb2.PredictRequest()
  request.model_spec.name = FLAGS.model_name
  if FLAGS.model_version > 0:
    request.model_spec.version.value = FLAGS.model_version
  if FLAGS.signature_name != "":
    request.model_spec.signature_name = FLAGS.signature_name
  request.inputs["keys"].CopyFrom(keys_tensor_proto)
  request.inputs["features"].CopyFrom(features_tensor_proto)

  # Send request
  result = stub.Predict(request, FLAGS.request_timeout)
  print(result)
  def VerifyPredictRequest(self,
                           model_server_address,
                           expected_output,
                           model_name='default',
                           specify_output=True):
    """Send PredictionService.Predict request and verify output."""
    print 'Sending Predict request...'
    # Prepare request
    request = predict_pb2.PredictRequest()
    request.model_spec.name = model_name
    request.inputs['x'].dtype = types_pb2.DT_FLOAT
    request.inputs['x'].float_val.append(2.0)
    dim = request.inputs['x'].tensor_shape.dim.add()
    dim.size = 1

    if specify_output:
      request.output_filter.append('y')
    # Send request
    host, port = model_server_address.split(':')
    channel = implementations.insecure_channel(host, int(port))
    stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
    result = stub.Predict(request, 5.0)  # 5 secs timeout
    # Verify response
    self.assertTrue('y' in result.outputs)
    self.assertIs(types_pb2.DT_FLOAT, result.outputs['y'].dtype)
    self.assertEquals(1, len(result.outputs['y'].float_val))
    self.assertEquals(expected_output, result.outputs['y'].float_val[0])
示例#7
0
def main():
    parser = argparse.ArgumentParser(description="Translation client example")
    parser.add_argument("--model_name", required=True,
                        help="model name")
    parser.add_argument("--host", default="localhost",
                        help="model server host")
    parser.add_argument("--port", type=int, default=9000,
                        help="model server port")
    parser.add_argument("--timeout", type=float, default=10.0,
                        help="request timeout")
    parser.add_argument("--concurrency", type=int, default=10,
                        help="number of concurrent requests")
    parser.add_argument('--spm_model', type=str,
                        help="sentencepiece model file")
    parser.add_argument('input', type=str, help="string to split")
    args = parser.parse_args()

    channel = implementations.insecure_channel(args.host, args.port)
    stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
    sp = spm.SentencePieceProcessor()
    sp.Load(args.spm_model)
    future = translate(stub, args.model_name, sp, args.input,
                       timeout=args.timeout)
    output = parse_translation_result(future.result(), sp)
    print("Input:", args.input)
    print("Split:", output)
示例#8
0
def do_inference(hostport, work_dir, concurrency, num_tests):
  """Tests mnist_inference service with concurrent requests.

  Args:
    hostport: Host:port address of the mnist_inference service.
    work_dir: The full path of working directory for test data set.
    concurrency: Maximum number of concurrent requests.
    num_tests: Number of test images to use.

  Returns:
    The classification error rate.

  Raises:
    IOError: An error occurred processing test data set.
  """
  test_data_set = mnist_input_data.read_data_sets(work_dir).test
  host, port = hostport.split(':')
  channel = implementations.insecure_channel(host, int(port))
  stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
  cv = threading.Condition()
  result = {'active': 0, 'error': 0, 'done': 0}
  def done(result_future, label):
    with cv:
      # Workaround for gRPC issue https://github.com/grpc/grpc/issues/7133
      try:
        exception = result_future.exception()
      except AttributeError:
        exception = None
      if exception:
        result['error'] += 1
        print exception
      else:
        sys.stdout.write('.')
        sys.stdout.flush()
        response = numpy.array(result_future.result().outputs['scores'])
        prediction = numpy.argmax(response)
        if label != prediction:
          result['error'] += 1
      result['done'] += 1
      result['active'] -= 1
      cv.notify()
  for _ in range(num_tests):
    request = predict_pb2.PredictRequest()
    request.model_spec.name = 'mnist'
    image, label = test_data_set.next_batch(1)
    request.inputs['images'].CopyFrom(
        tf.contrib.util.make_tensor_proto(image[0], shape=[1, image[0].size]))
    with cv:
      while result['active'] == concurrency:
        cv.wait()
      result['active'] += 1
    result_future = stub.Predict.future(request, 5.0)  # 5 seconds
    result_future.add_done_callback(
        lambda result_future, l=label[0]: done(result_future, l))  # pylint: disable=cell-var-from-loop
  with cv:
    while result['done'] != num_tests:
      cv.wait()
    return result['error'] / float(num_tests)
    def classification(self, data):
        channel = implementations.insecure_channel(self.host, self.tf_serving_port)
        stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)

        request = self._create_classification_request(data)

        result = stub.Classify(request, self.request_timeout)

        return result
示例#10
0
def main():
    parse_command_line()

    channel = implementations.insecure_channel(options.rpc_address, options.rpc_port)
    stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
    extra_settings = dict(
            stub = stub,
        )
    app = get_application(**extra_settings)
    app.listen(options.port)
    logging.info('running at http://localhost:%s'%options.port)
    tornado.ioloop.IOLoop.current().start()
def main(_):
  # Prepare request
  request = predict_pb2.PredictRequest()
  request.model_spec.name = 'default'
  request.inputs['x'].dtype = types_pb2.DT_FLOAT
  request.inputs['x'].float_val.append(2.0)
  request.output_filter.append('y')
  # Send request
  host, port = FLAGS.server.split(':')
  channel = implementations.insecure_channel(host, int(port))
  stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
  print stub.Predict(request, 5.0)  # 5 secs timeout
def __open_tf_server_channel__(server_name, server_port):
    '''
    Opens channel to TensorFlow server for requests

    :param server_name: String, server name (localhost, IP address)
    :param server_port: String, server port
    :return: Channel stub
    '''
    channel = implementations.insecure_channel(
        server_name,
        int(server_port))
    stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)

    return stub
示例#13
0
def main(_):
  host, port = FLAGS.server.split(':')
  channel = implementations.insecure_channel(host, int(port))
  stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
  # Send request
  with open(FLAGS.image, 'rb') as f:
    # See prediction_service.proto for gRPC request/response details.
    data = f.read()
    request = predict_pb2.PredictRequest()
    request.model_spec.name = 'inception'
    request.inputs['images'].CopyFrom(
        tf.contrib.util.make_tensor_proto(data, shape=[1]))
    result = stub.Predict(request, 10.0)  # 10 secs timeout
    print(result)
  def __init__(self):
    self.thread_lock = threading.Lock()
    self.num_completed_requests = 0
    self.num_failed_requests = 0
    self.latencies = []
    self.file_list = get_files_in_directory_sorted(FLAGS.image_directory)
    self.num_images = len(self.file_list)

    host, port = FLAGS.server.split(':')
    channel = implementations.insecure_channel(host, int(port))
    self.stub = prediction_service_pb2.beta_create_PredictionService_stub(
        channel)

    # Fix random seed so that sequence of images sent to server is
    # deterministic.
    random.seed(RANDOM_SEED)
示例#15
0
文件: client.py 项目: DXZ/git_test
def main(_):
    host, port = FLAGS.server.split(':')
    channel = implementations.insecure_channel(host, int(port))
    stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
    # Send request
 
    # See prediction_service.proto for gRPC request/response details.
    data = get_melgram("T_1000001.wav")
    data = data.astype(np.float32)

    request = predict_pb2.PredictRequest()
    request.model_spec.name = 'voice' # 这个name跟tensorflow_model_server  --model_name="voice" 对应
    request.model_spec.signature_name = 'voice_classification' # 这个signature_name  跟signature_def_map 对应
    request.inputs['voice'].CopyFrom(
          tf.contrib.util.make_tensor_proto(data, shape=[1, 1, 96, 89])) # shape跟 keras的model.input类型对应
    result = stub.Predict(request, 10.0)  # 10 secs timeout
    print(result)
def WaitForServerReady(port):
  """Waits for a server on the localhost to become ready."""
  for _ in range(0, WAIT_FOR_SERVER_READY_INT_SECS):
    time.sleep(1)
    request = predict_pb2.PredictRequest()
    request.model_spec.name = 'intentionally_missing_model'

    try:
      # Send empty request to missing model
      channel = implementations.insecure_channel('localhost', port)
      stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
      stub.Predict(request, RPC_TIMEOUT)
    except face.AbortionError as error:
      # Missing model error will have details containing 'Servable'
      if 'Servable' in error.details:
        print 'Server is ready'
        break
def _do_local_inference(host, port, serialized_examples, model_name):
  """Performs inference on a model hosted by the host:port server."""

  channel = implementations.insecure_channel(host, int(port))
  stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)

  request = predict_pb2.PredictRequest()
  # request.model_spec.name = 'chicago_taxi'
  request.model_spec.name = model_name
  request.model_spec.signature_name = 'predict'

  tfproto = tf.contrib.util.make_tensor_proto([serialized_examples],
                                              shape=[len(serialized_examples)],
                                              dtype=tf.string)
  # The name of the input tensor is 'examples' based on
  # https://github.com/tensorflow/tensorflow/blob/r1.9/tensorflow/python/estimator/export/export.py#L290
  request.inputs['examples'].CopyFrom(tfproto)
  print(stub.Predict(request, _LOCAL_INFERENCE_TIMEOUT_SECONDS))
示例#18
0
def main(image_paths, server, port):
  channel = implementations.insecure_channel(server, port)
  stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)

  raw_images = []
  for path in image_paths:
    with tf.gfile.Open(path) as img:
      raw_images.append(img.read())

  # Send request
  # See prediction_service.proto for gRPC request/response details.
  request = predict_pb2.PredictRequest()
  request.model_spec.name = 'inception'
  request.model_spec.signature_name = 'predict_images'
  request.inputs['images'].CopyFrom(
      tf.make_tensor_proto(raw_images, shape=[len(raw_images)]))
  result = stub.Predict(request, 10.0)  # 10 secs timeout
  print(result)
示例#19
0
def main():  
  # create prediction service client stub
  channel = implementations.insecure_channel(FLAGS.host, int(FLAGS.port))
  stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
  
  # create request
  request = predict_pb2.PredictRequest()
  request.model_spec.name = 'resnet'
  request.model_spec.signature_name = 'serving_default'
  
  # read image into numpy array
  img = cv2.imread(FLAGS.image).astype(np.float32)
  
  # convert to tensor proto and make request
  # shape is in NHWC (num_samples x height x width x channels) format
  tensor = tf.contrib.util.make_tensor_proto(img, shape=[1]+list(img.shape))
  request.inputs['input'].CopyFrom(tensor)
  resp = stub.Predict(request, 30.0)
  
  print('total time: {}s'.format(time.time() - tt))
  def testMultiInference(self):
    """Test PredictionService.MultiInference implementation."""
    model_path = self._GetSavedModelBundlePath()
    enable_batching = False

    atexit.register(self.TerminateProcs)
    model_server_address = self.RunServer(PickUnusedPort(), 'default',
                                          model_path,
                                          enable_batching)

    print 'Sending MultiInference request...'
    # Prepare request
    request = inference_pb2.MultiInferenceRequest()
    request.tasks.add().model_spec.name = 'default'
    request.tasks[0].model_spec.signature_name = 'regress_x_to_y'
    request.tasks[0].method_name = 'tensorflow/serving/regress'
    request.tasks.add().model_spec.name = 'default'
    request.tasks[1].model_spec.signature_name = 'classify_x_to_y'
    request.tasks[1].method_name = 'tensorflow/serving/classify'

    example = request.input.example_list.examples.add()
    example.features.feature['x'].float_list.value.extend([2.0])

    # Send request
    host, port = model_server_address.split(':')
    channel = implementations.insecure_channel(host, int(port))
    stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
    result = stub.MultiInference(request, RPC_TIMEOUT)  # 5 secs timeout

    # Verify response
    self.assertEquals(2, len(result.results))
    expected_output = 3.0
    self.assertEquals(expected_output,
                      result.results[0].regression_result.regressions[0].value)
    self.assertEquals(expected_output, result.results[
        1].classification_result.classifications[0].classes[0].score)
    for i in xrange(2):
      self._VerifyModelSpec(result.results[i].model_spec,
                            request.tasks[i].model_spec.name,
                            request.tasks[i].model_spec.signature_name,
                            self._GetModelVersion(model_path))
    def cache_prediction_metadata(self):
        channel = implementations.insecure_channel(self.host, self.tf_serving_port)
        stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
        request = get_model_metadata_pb2.GetModelMetadataRequest()

        request.model_spec.name = self.model_name
        request.metadata_field.append('signature_def')
        result = stub.GetModelMetadata(request, self.request_timeout)

        _logger.info('---------------------------Model Spec---------------------------')
        _logger.info(json_format.MessageToJson(result))
        _logger.info('----------------------------------------------------------------')

        signature_def = result.metadata['signature_def']
        signature_map = get_model_metadata_pb2.SignatureDefMap()
        signature_map.ParseFromString(signature_def.value)

        serving_default = signature_map.ListFields()[0][1]['serving_default']
        serving_inputs = serving_default.inputs

        self.input_type_map = {key: serving_inputs[key].dtype for key in serving_inputs.keys()}
        self.prediction_type = serving_default.method_name
def main():
    parser = argparse.ArgumentParser(description="Translation client example")
    parser.add_argument("--model_name", required=True,
                        help="model name")
    parser.add_argument("--host", default="localhost",
                        help="model server host")
    parser.add_argument("--port", type=int, default=9000,
                        help="model server port")
    parser.add_argument("--timeout", type=float, default=10.0,
                        help="request timeout")
    parser.add_argument("--concurrency", type=int, default=10,
                        help="number of concurrent requests")
    parser.add_argument('--spm_model', type=str,
                        help="sentencepiece model file")
    parser.add_argument('--input_file', type=str, help="input file")
    parser.add_argument('--output_file', type=str, help="output file")
    args = parser.parse_args()

    channel = implementations.insecure_channel(args.host, args.port)
    stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
    rate_limiter = _RateLimiter(args.concurrency)
    sp = spm.SentencePieceProcessor()
    sp.Load(args.spm_model)
    bar = progressbar.ProgressBar()
    with codecs.open(args.input_file, "r", "utf8") as f:
        for i, line in bar(enumerate(f)):

            rate_limiter.throttle()
            future = translate(stub, args.model_name, sp, line.strip(),
                               timeout=args.timeout)
            future.add_done_callback(_create_rpc_callback(i, rate_limiter, sp))

    rate_limiter.wait_done(i+1)
    results.sort(key=operator.itemgetter(0))

    with codecs.open(args.output_file, "w", "utf8") as f:
        for r in results:
            f.write(r[1] + "\n")
示例#23
0
  def testMultiInference(self):
    """Test PredictionService.MultiInference implementation."""
    model_path = self._GetSavedModelBundlePath()
    enable_batching = False

    atexit.register(self.TerminateProcs)
    model_server_address = self.RunServer(PickUnusedPort(), 'default',
                                          model_path,
                                          enable_batching)

    print 'Sending MultiInference request...'
    # Prepare request
    request = inference_pb2.MultiInferenceRequest()
    request.tasks.add().model_spec.name = 'default'
    request.tasks[0].model_spec.signature_name = 'regress_x_to_y'
    request.tasks[0].method_name = 'tensorflow/serving/regress'
    request.tasks.add().model_spec.name = 'default'
    request.tasks[1].model_spec.signature_name = 'classify_x_to_y'
    request.tasks[1].method_name = 'tensorflow/serving/classify'

    example = request.input.example_list.examples.add()
    example.features.feature['x'].float_list.value.extend([2.0])

    # Send request
    host, port = model_server_address.split(':')
    channel = implementations.insecure_channel(host, int(port))
    stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
    result = stub.MultiInference(request, RPC_TIMEOUT)  # 5 secs timeout

    # Verify response
    self.assertEquals(2, len(result.results))
    expected_output = 3.0
    self.assertEquals(expected_output,
                      result.results[0].regression_result.regressions[0].value)
    self.assertEquals(expected_output, result.results[
        1].classification_result.classifications[0].classes[0].score)
示例#24
0
def main():
    host = FLAGS.host
    port = FLAGS.port
    model_name = FLAGS.model_name
    model_version = FLAGS.model_version
    request_timeout = FLAGS.request_timeout

    filename = "example3.bmp"
    fullpath = os.path.join("/ts/", filename)
    src_img = Image.open(fullpath, 'r')
    print('TF Processing source/reference image  %dx%d - %s.' %
          (src_img.size[0], src_img.size[1], src_img.format))
    src_img.show()

    # Create gRPC client and request
    channel = implementations.insecure_channel(host, int(port))
    stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
    request = predict_pb2.PredictRequest()
    request.model_spec.name = model_name
    request.model_spec.version.value = 1
    request.model_spec.signature_name = 'serving_default'

    raw = cv2.imread(fullpath, 0)
    data = numpy.asarray(raw, dtype=numpy.float32) / 255.
    flat = data.reshape(1, 28, 28)
    print((flat))
    request.inputs['image'].CopyFrom(
        tf.contrib.util.make_tensor_proto(flat, shape=flat.shape))
    print((request))

    # Send request
    result = stub.Predict(request, request_timeout)
    print('waiting response....')
    print(' response received \r\n %s ' % (result))
    response = numpy.array(result.outputs['classes'].int64_val)
    print('prediction is %s ' % (response))
def main(_):
    host, port = FLAGS.server.split(':')
    channel = implementations.insecure_channel(host, int(port))
    stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
    # Send request
    with open(FLAGS.image, 'rb') as f:
        # See prediction_service.proto for gRPC request/response details.
        data = f.read()
        request = predict_pb2.PredictRequest()
        request.model_spec.name = 'cifar10-model'
        request.model_spec.signature_name = 'predict_images'
        request.inputs['images'].CopyFrom(
            tf.contrib.util.make_tensor_proto(data, shape=[1]))
        result = stub.Predict(request, 10.0)  # 10 secs timeout

        if result:
            # inference result
            output_scores = result.outputs['scores'].float_val
            output_classes = result.outputs['classes'].string_val
            print('This is a ' + output_classes[0] + '.')

            # Barchart plot
            import numpy as np
            import matplotlib.pyplot as plt
            fig, ax = plt.subplots()
            ind = np.arange(len(output_scores))
            softmax = lambda x: np.exp(x) / np.sum(np.exp(x))
            prob = softmax(output_scores)

            ax.bar(ind, prob)
            ax.set_ylabel('Probabilities')
            ax.set_title('Probabilities by classes')
            ax.set_xticks(ind)
            ax.set_xticklabels(output_classes)
            ax.legend()
            plt.show()
示例#26
0
def main(_):
    if FLAGS.image:
        with open(FLAGS.image, 'rb') as f:
            data = f.read()
    else:
        # Download the image since we weren't given one
        dl_request = requests.get(IMAGE_URL, stream=True)
        dl_request.raise_for_status()
        data = dl_request.content

    channel = implementations.insecure_channel(FLAGS.host, int(FLAGS.port))
    stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
    # Send request
    # See prediction_service.proto for gRPC request/response details.
    request = predict_pb2.PredictRequest()
    request.model_spec.name = 'resnet'
    request.model_spec.signature_name = 'serving_default'
    request.inputs['image_bytes'].CopyFrom(
        tf.contrib.util.make_tensor_proto(data, shape=[1]))
    result = stub.Predict(request, 10.0)  # 10 secs timeout
    #print(result)
    print(
        'Prediction class: {}'.format(result.outputs['classes'].int64_val[0] -
                                      1))
def do_inference(hostport, work_dir, req_x):
    req_x = np.array([req_x], dtype=np.float32)
    host, port = hostport.split(':')
    channel = implementations.insecure_channel(host, int(port))
    stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)

    request = predict_pb2.PredictRequest()
    request.model_spec.name = 'cifar10_cnn'
    request.model_spec.signature_name = 'predict_x'
    request.inputs['req_x'].CopyFrom(tf.contrib.util.make_tensor_proto(req_x))

    result_future = stub.Predict.future(request, 10.0)  # 5 seconds

    exception = result_future.exception()
    print(exception)
    response_data = {'tensor': {}}
    if exception:
        response_data['tensor']['error_code'] = 1
        response_data = exception
    else:
        response_data['tensor']['error_code'] = 0
        response_data['tensor']['data'] = np.array(
            result_future.result().outputs['res_y'].float_val).tolist()
    return response_data
示例#28
0
def main(test_sent):
    start_time = time.time()
    channel = implementations.insecure_channel('192.168.1.210', 5075)
    stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)

    test_sent = list(test_sent.strip())
    test_data = [(test_sent, ['O'] * len(test_sent))]
    label_list = []
    for seqs, labels in batch_yield(test_data,
                                    batch_size=64,
                                    vocab=word2id,
                                    tag2label=tag2label,
                                    shuffle=False):
        label_list_, _ = predict_one_batch(seqs, stub)
        label_list.extend(label_list_)
    # label2tag = {}
    # for tag, label in tag2label.items():
    #     label2tag[label] = tag if label != 0 else label
    tag = [label2tag[label] for label in label_list[0]]
    print 'tag', tag
    PER, LOC, ORG = get_entity(tag, test_sent)
    time_used = time.time() - start_time
    print 'tim_used', time_used
    return PER, LOC, ORG
def main(_):
    params = {}
    params["channel"] = "RGB"
    patch_size = 64
    host, port = FLAGS.server.split(':')
    channel = implementations.insecure_channel(host, int(port))
    stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)

    # Change test image to RGB mode
    img = np.array(Image.open(FLAGS.image).convert(params["channel"]))
    type_id = FLAGS.type  #type of the id 0: CCCD mat truoc, 1: CCCD mat sau, 2: CMND mat truoc, 3: CMND mat sau
    if (int(type_id) == 1):
        threshole_1 = (0.2, 0.45)
        threshole_2 = (0.1, 0.1)
    elif (int(type_id) == 2):
        threshole_1 = (0.23, 0.5)
        threshole_2 = (0.15, 0.15)
    else:
        threshole_1 = (0.17, 0.35)
        threshole_2 = (0.1, 0.1)
    decision = inference(
        img, int(type_id), stub, threshole_1, threshole_2,
        patch_size)  #1: la recapture    0: la anh chup binh thuong
    print("Detection result {}".format(decision))
示例#30
0
def run(host, port, input_str, model, signature_name):

    channel = implementations.insecure_channel(host, port)
    stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)

    # Pre-processing input
    prediction_input = [json.dumps(eval(input_str))]
    ink, classname = creat.parse_line(prediction_input[0])

    # encapsulate as tf.Example object
    classnames = ['doodle', 'expression', 'symbols']
    features = {}
    features["class_index"] = tf.train.Feature(int64_list=tf.train.Int64List(
        value=[classnames.index("doodle")]))
    features["ink"] = tf.train.Feature(float_list=tf.train.FloatList(
        value=ink.flatten()))
    features["shape"] = tf.train.Feature(int64_list=tf.train.Int64List(
        value=ink.shape))
    f = tf.train.Features(feature=features)
    example = tf.train.Example(features=f)
    final_req = [example]
    start = time.time()

    #generate request
    request = classification_pb2.ClassificationRequest()
    request.model_spec.name = model
    request.model_spec.signature_name = signature_name
    request.input.example_list.examples.extend(final_req)

    result = stub.Classify(request, 10.0)

    end = time.time()
    time_diff = end - start

    print(result)
    print('time elapased: {}'.format(time_diff))
示例#31
0
def onet_serving(image):
    host = '127.0.0.1'
    port = 9000

    channel = implementations.insecure_channel(host, port)
    stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)

    request = predict_pb2.PredictRequest()

    request.model_spec.name = 'mtcnn'

    request.model_spec.signature_name = 'onet_predict'

    tp = tf.make_tensor_proto(image, dtype=tf.float32, shape=image.shape)

    request.inputs['images'].CopyFrom(tp)

    result = stub.Predict(request, 10.0)

    return [
        MakeNdarray(result.outputs['result1']),
        MakeNdarray(result.outputs['result2']),
        MakeNdarray(result.outputs['result3'])
    ]
def predict_and_profile(host, port, model, batch):

    # Prepare the RPC request to send to the TF server.
    channel = implementations.insecure_channel(host, int(port))
    stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
    request = predict_pb2.PredictRequest()
    request.model_spec.name = model

    # 'predict' is the default signature used for canned estimators and the
    # preferred signature. If you used a different signature when creating the
    # servable model, be sure to change the line below.
    request.model_spec.signature_name = 'predict'  # TODO: change if necessary

    request.inputs['images'].CopyFrom(
        tf.contrib.util.make_tensor_proto(batch,
                                          shape=[len(batch)],
                                          dtype=tf.string))

    # Call the server to predict, return the result, and compute round trip time
    start_time = int(round(time.time() * 1000))
    result = stub.Predict(request, 60.0)  # 60 second timeout
    elapsed = int(round(time.time() * 1000)) - start_time

    return result, elapsed
示例#33
0
def do_inference(hostport):
    """Tests PredictionService with concurrent requests.
    Args:
    hostport: Host:port address of the Prediction Service.
    Returns:
    pred values, ground truth label
    """
    # create connection
    host, port = hostport.split(':')
    channel = implementations.insecure_channel(host, int(port))
    stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
    print("Connection is Done*************************")
    # initialize a request
    request = predict_pb2.PredictRequest()
    request.model_spec.name = 'HashTable'
    request.model_spec.signature_name = 'prediction'
    #data = FLAGS.errorcode
    data = '156'
    print(data)

    # Randomly generate some test data
    # temp_data = numpy.random.randn(10, 3).astype(numpy.float32)
    # data, label = temp_data, numpy.sum(temp_data * numpy.array([1, 2, 3]).astype(numpy.float32), 1)
    # request.inputs['input'].CopyFrom(
    #     tf.contrib.util.make_tensor_proto(data, shape=data.shape))

    # # Randomly generate some test data
    # temp_data = numpy.random.randn(10, 3).astype(numpy.float32)
    # data, label = temp_data, numpy.sum(temp_data * numpy.array([1, 2, 3]).astype(numpy.float32), 1)
    request.inputs['input'].CopyFrom(
        tf.contrib.util.make_tensor_proto(data, shape=[1]))

    # predict
    result = stub.Predict(request, 30.0)  # 5 seconds
    print(result)
    return result
示例#34
0
def do_inference(url):
    mnist_serving_url = os.environ.get('INCEPTION_SERVING_URL', 'ai02:9001')
    host, port = mnist_serving_url.split(':')

    channel = implementations.insecure_channel(host, int(port))
    stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
    request = predict_pb2.PredictRequest()
    request.model_spec.name = 'inception'
    request.model_spec.signature_name = 'predict_images'

    image, image_b64 = get_image(url)
    request.inputs['images'].CopyFrom(
        tf.contrib.util.make_tensor_proto(image, shape=[1]))

    result = stub.Predict(request, 10.0)  # 10 seconds
    result = zip(
        map(lambda byte_str: byte_str.decode(),
            list(result.outputs['classes'].string_val)),
        map(float, list(result.outputs['scores'].float_val)))

    def to_dict(name_prob_tuple):
        return {'name': name_prob_tuple[0], 'probability': name_prob_tuple[1]}

    return {'image_b64': image_b64, 'labels': list(map(to_dict, result))}
示例#35
0
def main(_):
    x_tr, x_te, y_tr, y_te = input_data()
    x_test_arr = numpy.float32(x_te)
    #feed_value2 = numpy.asarray([90.234,1352.642,5978.735])
    # Prepare request
    request = predict_pb2.PredictRequest()
    request.model_spec.name = 'deka'
    request.inputs['inputs'].dtype = types_pb2.DT_FLOAT
    #request.inputs['inputs'].float_val.append(feed_value2)
    request.inputs['inputs'].CopyFrom(
        tf.contrib.util.make_tensor_proto(x_test_arr))
    request.output_filter.append('classes')
    # Send request
    host, port = FLAGS.server.split(':')
    channel = implementations.insecure_channel(host, int(port))
    stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
    prediction = stub.Predict(request, 5.0)  # 5 secs timeout
    predd = numpy.asarray(prediction)
    floats = prediction.outputs['classes'].int64_val
    pred_array = numpy.asarray(floats)
    df = pandas.DataFrame({"predicted_value": pred_array})
    print(df)
    df.to_csv(
        '/home/deka/Desktop/prediction_log.csv')  # Save prediction as dataset
示例#36
0
def main(_):
    x_tr, x_te, y_tr, y_te = input_data()
    x_test_arr = numpy.float32(x_te)
    # Prepare request
    request = predict_pb2.PredictRequest()
    request.model_spec.name = 'deka'
    request.inputs['inputs'].dtype = types_pb2.DT_FLOAT
    #request.inputs['inputs'].float_val.append(feed_value2)
    request.inputs['inputs'].CopyFrom(
        tf.contrib.util.make_tensor_proto(x_test_arr))
    request.output_filter.append('outputs')
    # Send request
    host, port = FLAGS.server.split(':')
    channel = implementations.insecure_channel(host, int(port))
    stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
    prediction = stub.Predict(request, 5.0)  # 5 secs timeout
    floats = prediction.outputs['outputs'].float_val
    pred_arr = numpy.array(floats)
    pred_df = pandas.DataFrame(data=pred_arr)
    print(pred_df)
    plt.plot(pred_arr, 'pink')
    plt.plot(y_te, 'blue')
    plt.title("NPT prediction")
    plt.show()
示例#37
0
def main(_):
    host, port = FLAGS.server.split(':')
    channel = implementations.insecure_channel(host, int(port))
    stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)

    request = predict_pb2.PredictRequest()
    request.model_spec.name = FLAGS.model
    request.model_spec.signature_name = 'serving_default'

    feature_dict = {
        'age': _float_feature(value=25),
        'capital_gain': _float_feature(value=0),
        'capital_loss': _float_feature(value=0),
        'education': _bytes_feature(value='11th'.encode()),
        'education_num': _float_feature(value=7),
        'gender': _bytes_feature(value='Male'.encode()),
        'hours_per_week': _float_feature(value=40),
        'native_country': _bytes_feature(value='United-States'.encode()),
        'occupation': _bytes_feature(value='Machine-op-inspct'.encode()),
        'relationship': _bytes_feature(value='Own-child'.encode()),
        'workclass': _bytes_feature(value='Private'.encode())
    }
    label = 0

    example = tf.train.Example(features=tf.train.Features(
        feature=feature_dict))
    serialized = example.SerializeToString()

    request.inputs['inputs'].CopyFrom(
        tf.contrib.util.make_tensor_proto(serialized, shape=[1]))

    result_future = stub.Predict.future(request, 5.0)
    prediction = result_future.result().outputs['scores']

    print('True label: ' + str(label))
    print('Prediction: ' + str(np.argmax(prediction.float_val)))
示例#38
0
def do_inference(host, port, concurrency, feature_vector):
    """
    :param host: The IP of the server (usually localhost)
    :param port: The port of the model
    :param concurrency: The number of concurrent requests made to the server
    :param feature_vector: Feature vector already scaled
    :return: Prediction
    """
    # Assuming one look up at a time for now
    feature_vector = feature_vector.reshape(1, -1)

    channel = implementations.insecure_channel(host, int(port))
    stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)

    request = predict_pb2.PredictRequest()
    request.model_spec.name = 'ec2pred_mlp'
    request.inputs['input'].CopyFrom(
        tf.contrib.util.make_tensor_proto(feature_vector, shape=feature_vector.shape, dtype=tf.float32))

    result_future = stub.Predict.future(request, 5.0)  # 5 seconds
    result = result_future.result().outputs['prediction']
    response = numpy.array(result.float_val).reshape((result.tensor_shape.dim[0].size, result.tensor_shape.dim[1].size))

    return response
示例#39
0
def get_prediction(image,
                   server_host='127.0.0.1',
                   server_port=9000,
                   server_name="server",
                   timeout=10.0):
    """
  Retrieve a prediction from a TensorFlow model server

  :param image:       a MNIST image represented as a 1x784 array
  :param server_host: the address of the TensorFlow server
  :param server_port: the port used by the server
  :param server_name: the name of the server
  :param timeout:     the amount of time to wait for a prediction to complete
  :return 0:          the integer predicted in the MNIST image
  :return 1:          the confidence scores for all classes
  :return 2:          the version number of the model handling the request
  """

    print("connecting to:%s:%i" % (server_host, server_port))
    # initialize to server connection
    channel = implementations.insecure_channel(server_host, server_port)
    stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)

    # build request
    request = predict_pb2.PredictRequest()
    request.model_spec.name = server_name
    request.model_spec.signature_name = 'serving_default'
    request.inputs['x'].CopyFrom(
        tf.contrib.util.make_tensor_proto(image, shape=image.shape))

    # retrieve results
    result = stub.Predict(request, timeout)
    resultVal = result.outputs["classes"].int_val[0]
    scores = result.outputs['predictions'].float_val
    version = result.outputs["classes"].int_val[0]
    return resultVal, scores, version
示例#40
0
def _create_stub(server):
  host, port = server.split(":")
  channel = implementations.insecure_channel(host, int(port))
  # TODO(bgb): Migrate to GA API.
  return prediction_service_pb2.beta_create_PredictionService_stub(channel)
def web_client_serving():
    args_batch_size = 10
    args_model_name = "aver_ende"
    args_host = "localhost"
    args_tf_port= 9000
    args_redis_port = 6379
    args_timeout = 100
    args_src = None
    args_tgt = None
    # Redis
    red0 = redis.Redis(host=args_host, port=args_redis_port, db=0) # for hash caching
    red1 = redis.Redis(host=args_host, port=args_redis_port, db=1) # for message queue
    # TensorFlow Serving
    channel = implementations.insecure_channel(args_host, args_tf_port)
    stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
    # Fast Execution
    fast_execution = 0
    fast_model_name = "toy_ende"
    fast_timeout = 100000
    # Looping User Serving
    while True:
        try:
            if red1.get("timeout_exist"):
                print("Fast Execution for Timeout user !")
                fast_execution = 1
                user_to_serve = red1.lpop("timeout_user_list")
                if user_to_serve == None:
                    print("Well.. False Alarm. No user there.")
                    fast_execution = 0
                    red1.delete("timeout_exist")
                    print("Waiting for new users ...")
                    while not red1.get("timeout_exist"):
                        user_to_serve = red1.blpop('web_user_list', 100)[1] # pick the first user, if none then wait
                    print("Serving: "+user_to_serve)
            else:
                print("Waiting for new users ...")
                while not red1.get("timeout_exist"):
                    user_to_serve = red1.blpop('web_user_list', 100)[1] # pick the first user, if none then wait
                print("Serving: "+user_to_serve)
            src_list_id = user_to_serve + "_src" # e.g. "web_1_src"
            tgt_list_id = user_to_serve + "_tgt" # e.g. "web_1_tgt"

            #: retrieve all queries of this user
            queries = red1.lrange(src_list_id, 0, -1)
            #: try caching first
            length_of_queries = len(queries)
            queries_for_loop = queries[:]
            for item in queries_for_loop:
                # print(str(length_of_queries)+repr(item))
                result = red0.hget(item, args_model_name)
                if result == None:
                    # print(str(length_of_queries))
                    break
                    # no hope, go to TensorFlow Serving :(
                else:
                    red1.rpush(tgt_list_id, result) # for users to retrieve from list
                    queries.remove(item)
                    length_of_queries = length_of_queries - 1

            if length_of_queries > 0:
                batch_tokens = []
                for query in queries:
                    batch_token = [str(item) for item in query.split()]
                    batch_tokens.append(batch_token)
                    # red1.rpush(tgt_list_id, str(batch_token)) # too good to be true :)
                
                # batch_tokens = [
                #     ["Hello", "world", "!"],
                #     ["My", "name", "is", "John", "."],
                #     ["I", "live", "on", "the", "West", "coast", "."]]

                #: ready for TensorFlow Serving
                tf_start_time = time.time()
                futures = []
                for tokens in batch_tokens:
                    if 0 == len(tokens):
                        continue
                    if fast_execution:
                        future = translate(stub, fast_model_name, tokens, timeout=fast_timeout)
                        futures.append(future)
                    else:
                        future = translate(stub, args_model_name, tokens, timeout=args_timeout)
                        futures.append(future)
                
                for tokens, future in zip(batch_tokens, futures):
                    result_tokens = parse_translation_result(future.result())
                    #: get results from tensorflow serving
                    #: result_tokens = ["Hallo", "Welt", "!"]
                    query = ' '.join(tokens)
                    result = ' '.join(result_tokens)
                    if not fast_execution: # only store full execution
                        red0.hset(query, args_model_name, result) # cache result
                        red0.expire(query, 1200) # key expires after 20 minutes
                    red1.rpush(tgt_list_id, result) # return to users
                    if not fast_execution:
                        print(tgt_list_id + '||' + result + "|| Latency: " + str(time.time() - tf_start_time))
                    else:
                        print("Fast Execution "+tgt_list_id + '||' + result + "|| Latency: " + str(time.time() - tf_start_time))
                        fast_execution = 0
                        red1.delete("timeout_exist")
            print("Well served: "+user_to_serve)
        except:
            red1.lpush('web_user_list', user_to_serve)
            print("Fail to serve: "+ str(user_to_serve))
示例#42
0
class ModelHandler(tornado.web.RequestHandler):
    __metaclass__ = abc.ABCMeta
    serverlg.info(
        '[ModelServer] [Initialization: service %s, schedule %d] [%s]' %
        (options.service, options.schedule,
         time.strftime('%Y-%m-%d %H:%M:%S')))
    for conf_name in schedule:
        #may be deprecated
        graph = create(conf_name)
        graph.apply_deploy_conf(schedule[conf_name])

        host, port = schedule[conf_name]["tf_server"].split(":")
        channel = implementations.insecure_channel(host, int(port))
        stub = prediction_service_pb2.beta_create_PredictionService_stub(
            channel)
        schedule[conf_name]["graph_stub"] = (graph, stub)

    @abc.abstractmethod
    def handle(self):
        query = self.get_argument('query', None)
        if not query:
            ret = {}
            ret["status"] = "missing params"
            serverlg.info(
                '[chatbot] [ERROR: missing params] [REQUEST] [%s] [%s]' %
                (time.strftime('%Y-%m-%d %H:%M:%S'), self.request.uri))
            self.write(json.dumps(ret, ensure_ascii=False))
            self.finish()
        results = []
        debug_infos = []

        graph_stubs = [schedule[name]["graph_stub"] for name in schedule]
        # Multi model compatible, but here just one model exists
        multi_models = []
        for graph, stub in graph_stubs:
            multi_models.append(
                self.run_model(graph, stub, [query.encode("utf-8")]))
        outs = yield multi(multi_models)
        serverlg.info(outs)

        raise gen.Return(None)

    @abc.abstractmethod
    def form_multi_results(self, model_name, model_out):
        return

    def set_default_header(self):
        self.set_header('Access-Control-Allow-Origin', "*")

    @tornado.gen.coroutine
    def run_model(self, graph, stub, records, use_seg=True):
        # Use model specific preprocess
        feed_data = graph.preproc(records, use_seg=use_seg, for_deploy=True)
        # make request
        request = predict_pb2.PredictRequest()
        request.model_spec.name = graph.name
        values = feed_data.values()
        N = len(values[0]) if len(values[0]) < 2 else 2
        see_feed = {k: v[0:N] for k, v in feed_data.items()}
        serverlg.info('[DispatcherServer] [sample %d/%d] %s' %
                      (N, len(values), str(see_feed)))
        for key, value in feed_data.items():
            v = np.array(value)
            value_tensor = tensor_util.make_tensor_proto(value, shape=v.shape)
            # For compatibility to the old placeholder key
            request.inputs[key].CopyFrom(value_tensor)

        # query the model
        #result = stub.Predict(request, 4.0)
        result = yield fwrap(stub.Predict.future(request, 3.0))
        out = {}
        for key, value in result.outputs.items():
            out[key] = tensor_util.MakeNdarray(value)
        model_results = graph.after_proc(out)

        raise gen.Return(model_results)

    @tornado.web.asynchronous
    @tornado.gen.coroutine
    def post(self):
        serverlg.info('[DispatcherServer] [BEGIN] [REQUEST] [%s] [%s]' %
                      (time.strftime('%Y-%m-%d %H:%M:%S'), self.request.uri))
        gc.disable()

        # prepare locks, events, and results container for coroutine
        #self.results = [None] * len(deployments)
        #self.model_results = []
        #self.evt = Event()
        #self.lock = locks.Lock()

        # query all models
        #for name in self.servings:
        model_results, debug_infos, desc = yield self.handle()
        results = self.form_multi_results(model_results, debug_infos)

        # wait until told to proceed
        #yield self.evt.wait()

        #self.run()

        # form response
        ret = {"status": "ok", "result": results}
        #self.write(json.dumps(ret))
        self.write(ret)
        #self.finish()

    @tornado.web.asynchronous
    @tornado.gen.coroutine
    def get(self):
        gc.disable()
        serverlg.info('[DispatcherServer] [BEGIN] [REQUEST] [%s] [%s]' %
                      (time.strftime('%Y-%m-%d %H:%M:%S'), self.request.uri))
        # preproc
        model_results, debug_infos, desc = yield self.handle()
        results = self.form_multi_results(model_results, debug_infos)

        # form response
        ret = {"status": "ok", "result": results, "desc": desc}
        self.write(json.dumps(ret, ensure_ascii=False))
示例#43
0
 def __init__(self, host, port):
     self.logger = logging.getLogger('doodle.predictions')
     self.channel = insecure_channel(host, port)
     self.stub = beta_create_PredictionService_stub(self.channel)
     self.predict_keys = ['probabilities', 'classes']
示例#44
0
def main(_):
    host, port = FLAGS.server.split(':')
    channel = implementations.insecure_channel(host, int(port))
    stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)

    # Send request
    #with open(FLAGS.image, 'rb') as f:
    # See prediction_service.proto for gRPC request/response details.
    #data = f.read()
    request = predict_pb2.PredictRequest()
    images = load_and_align_data(['images/12.jpg'], 160, 44, 0.9)

    # Call Facenet model to make prediction on the image
    request.model_spec.name = 'face128'
    request.model_spec.signature_name = 'calculate_embeddings'
    request.inputs['images'].CopyFrom(
        tf.contrib.util.make_tensor_proto(images, dtype=tf.float32))
    request.inputs['phase'].CopyFrom(
        tf.contrib.util.make_tensor_proto(False, dtype=tf.bool))

    start_time = time.time()
    result = stub.Predict(request, 60.0)  # 60 secs timeout

    # Convert to friendly python object
    results_dict = {}
    for key in result.outputs:
        tensor_proto = result.outputs[key]
        nd_array = tf.contrib.util.make_ndarray(tensor_proto)
        results_dict[key] = nd_array

#storing embeddings
    emb = results_dict.get("embeddings")
    feature_time = time.time() - start_time

    # calculate distance between images
    nrof_embeds = labels.size
    dist_array = np.zeros((nrof_embeds, 1))
    for i in range(nrof_embeds):
        tmp = embeds[i, :] - emb[0, :]
        sum_squared = np.dot(tmp.T, tmp)
        dist = math.sqrt(sum_squared)  # AVGTIME = 0.09sec
        dist_array[i][0] = dist

# arranging distance in ascending order
    pred_array = dist_array.argmin(0)

    # threshold distance to 0.8
    if dist_array[pred_array[0]][0] < 0.8:
        pred_label = labels[pred_array[0]]
        pred_face = class_names[int(pred_label)]

    else:
        pred_face = 'Unknown'

    print('Face name:  {}'.format(pred_face))
    print('Face Dist:  {}'.format(dist_array[pred_array[0]][0]))
    print(' ')
    distance_time = time.time() - start_time - feature_time

    print('Feature Calculation time:  {}'.format(feature_time))
    print('Distance Calculation time: {}'.format(distance_time))
    print('Image Recognition time:    {}'.format(time.time() - start_time))
    print(' ')
    def predict(self, sents):
        _, self.sents, self.segs, self.tags = self.sents2id(sents)
        # hostport = '192.168.31.186:6000'

        # host, port = hostport.split(':')
        # channel = implementations.insecure_channel(host, int(port))
        channel = implementations.insecure_channel(TF_SERVING_HOST,
                                                   TF_SERVING_PORT)

        stub = prediction_service_pb2.beta_create_PredictionService_stub(
            channel)
        # build request
        request = predict_pb2.PredictRequest()
        request.model_spec.name = self.model_name
        request.model_spec.signature_name = self.signature_name
        request.inputs['input_w'].CopyFrom(
            tf.contrib.util.make_tensor_proto(self.sents, dtype=tf.int32))
        request.inputs['input_seg'].CopyFrom(
            tf.contrib.util.make_tensor_proto(self.segs, dtype=tf.int32))
        request.inputs['target'].CopyFrom(
            tf.contrib.util.make_tensor_proto(self.tags, dtype=tf.int32))
        request.inputs['dropout'].CopyFrom(
            tf.contrib.util.make_tensor_proto(1.0, dtype=tf.float32))
        model_results = stub.Predict(request, 60.0)

        trans = tensor_util.MakeNdarray(model_results.outputs["trans"])
        scores = tensor_util.MakeNdarray(model_results.outputs["scores"])
        lengths = tensor_util.MakeNdarray(model_results.outputs["lengths"])
        batch_paths = self.decode(scores, lengths, trans)
        tags = [self.id_to_tag[idx] for idx in batch_paths[0]]
        item = self.result_to_json(sents, tags)
        lbl_list = ["O"] * len(sents)
        for lbldict in item["entities"]:
            start, end, lbl = lbldict["start"], lbldict["end"], lbldict["type"]
            lbl_list[start:end] = [lbl] * (end - start)
        ner_str = ""
        for c, lbl in zip(sents, lbl_list):
            ner_str += c + "/" + lbl + " "
        ner_str = ner_str.rstrip()

        year_dict = {"YEAR": 1}
        year_str = self.str_spec(sents, lbl_list, year_dict)
        # print("year_str:{}".format(year_str))
        month_dict = {"MONTH": 1}
        month_str = self.str_spec(sents, lbl_list, month_dict)
        # print("month_str:{}".format(month_str))
        day_dict = {"DAY": 1}
        day_str = self.str_spec(sents, lbl_list, day_dict)
        # print("day_str:{}".format(day_str))
        part_dict = {"PART": 1}
        part_str = self.str_spec(sents, lbl_list, part_dict)
        # print("part_str:{}".format(part_str))
        speed_dict = {"SPEED": 1}
        speed_str = self.str_spec(sents, lbl_list, speed_dict)
        # print("speed_str:{}".format(speed_str))
        type_dict = {"TYPE": 1}
        type_str = self.str_spec(sents, lbl_list, type_dict)
        # print("type_str:{}".format(type_str))

        loc_dict = {"SLOC": 1, "ELOC": 2}
        loc_str = self.str_spec(sents, lbl_list, loc_dict)
        # print("loc_str:{}".format(loc_str))
        time_dict = {"STIME": 1, "ETIME": 2}
        time_str = self.str_spec(sents, lbl_list, time_dict)
        # print("time_str:{}".format(time_str))

        return loc_str, time_str, year_str, month_str, day_str, part_str, speed_str, type_str
def main(_):
    host, port = FLAGS.server.split(':')
    channel = implementations.insecure_channel(host, int(port))
    stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)

    request = predict_pb2.PredictRequest()
    request.model_spec.name = FLAGS.model
    request.model_spec.signature_name = 'serving_default'

    # true label value, this is taken from the eval.py portion
    label = 1

    # hard coded inputs, however, the hard coded inputs can be easily converted into flexible inputs as well
    # these inputs are derived from observing the signature definition using the saved_model_cli show command
    request.inputs['C1'].CopyFrom(
        tf.contrib.util.make_tensor_proto("68fd1e64", shape=[1]))
    request.inputs['C10'].CopyFrom(
        tf.contrib.util.make_tensor_proto("547c0ffe", shape=[1]))
    request.inputs['C11'].CopyFrom(
        tf.contrib.util.make_tensor_proto("bc8c9f21", shape=[1]))
    request.inputs['C12'].CopyFrom(
        tf.contrib.util.make_tensor_proto("60ab2f07", shape=[1]))
    request.inputs['C13'].CopyFrom(
        tf.contrib.util.make_tensor_proto("46f42a63", shape=[1]))
    request.inputs['C14'].CopyFrom(
        tf.contrib.util.make_tensor_proto("07d13a8f", shape=[1]))
    request.inputs['C15'].CopyFrom(
        tf.contrib.util.make_tensor_proto("18231224", shape=[1]))
    request.inputs['C16'].CopyFrom(
        tf.contrib.util.make_tensor_proto("e6b6bdc7", shape=[1]))
    request.inputs['C17'].CopyFrom(
        tf.contrib.util.make_tensor_proto("e5ba7672", shape=[1]))
    request.inputs['C18'].CopyFrom(
        tf.contrib.util.make_tensor_proto("74ef3502", shape=[1]))
    request.inputs['C19'].CopyFrom(
        tf.contrib.util.make_tensor_proto("0", shape=[1]))
    request.inputs['C2'].CopyFrom(
        tf.contrib.util.make_tensor_proto("2c16a946", shape=[1]))
    request.inputs['C20'].CopyFrom(
        tf.contrib.util.make_tensor_proto("0", shape=[1]))
    request.inputs['C21'].CopyFrom(
        tf.contrib.util.make_tensor_proto("5316a17f", shape=[1]))
    request.inputs['C22'].CopyFrom(
        tf.contrib.util.make_tensor_proto("0", shape=[1]))
    request.inputs['C23'].CopyFrom(
        tf.contrib.util.make_tensor_proto("32c7478e", shape=[1]))
    request.inputs['C24'].CopyFrom(
        tf.contrib.util.make_tensor_proto("9117a34a", shape=[1]))
    request.inputs['C25'].CopyFrom(
        tf.contrib.util.make_tensor_proto("0", shape=[1]))
    request.inputs['C26'].CopyFrom(
        tf.contrib.util.make_tensor_proto("", shape=[1]))
    request.inputs['C3'].CopyFrom(
        tf.contrib.util.make_tensor_proto("503b9dbc", shape=[1]))
    request.inputs['C4'].CopyFrom(
        tf.contrib.util.make_tensor_proto("e4dbea90", shape=[1]))
    request.inputs['C5'].CopyFrom(
        tf.contrib.util.make_tensor_proto("f3474129", shape=[1]))
    request.inputs['C6'].CopyFrom(
        tf.contrib.util.make_tensor_proto("13718bbd", shape=[1]))
    request.inputs['C7'].CopyFrom(
        tf.contrib.util.make_tensor_proto("38eb9cf4", shape=[1]))
    request.inputs['C8'].CopyFrom(
        tf.contrib.util.make_tensor_proto("1f89b562", shape=[1]))
    request.inputs['C9'].CopyFrom(
        tf.contrib.util.make_tensor_proto("a73ee510", shape=[1]))
    request.inputs['I1'].CopyFrom(
        tf.contrib.util.make_tensor_proto(1.0, shape=[1]))
    request.inputs['I10'].CopyFrom(
        tf.contrib.util.make_tensor_proto(1.0, shape=[1]))
    request.inputs['I11'].CopyFrom(
        tf.contrib.util.make_tensor_proto(1.0, shape=[1]))
    request.inputs['I12'].CopyFrom(
        tf.contrib.util.make_tensor_proto(0.0, shape=[1]))
    request.inputs['I13'].CopyFrom(
        tf.contrib.util.make_tensor_proto(0.0, shape=[1]))
    request.inputs['I2'].CopyFrom(
        tf.contrib.util.make_tensor_proto(4.0, shape=[1]))
    request.inputs['I3'].CopyFrom(
        tf.contrib.util.make_tensor_proto(2.0, shape=[1]))
    request.inputs['I4'].CopyFrom(
        tf.contrib.util.make_tensor_proto(0.0, shape=[1]))
    request.inputs['I5'].CopyFrom(
        tf.contrib.util.make_tensor_proto(0.0, shape=[1]))
    request.inputs['I6'].CopyFrom(
        tf.contrib.util.make_tensor_proto(0.0, shape=[1]))
    request.inputs['I7'].CopyFrom(
        tf.contrib.util.make_tensor_proto(1.0, shape=[1]))
    request.inputs['I8'].CopyFrom(
        tf.contrib.util.make_tensor_proto(0.0, shape=[1]))
    request.inputs['I9'].CopyFrom(
        tf.contrib.util.make_tensor_proto(0.0, shape=[1]))

    result_future = stub.Predict.future(request, 5.0)
    prediction = result_future.result().outputs

    # Uncomment this if you want to see the output of the entire TensorProto
    # print('Prediction: ' + str(prediction))

    # True label value
    print('True label: ' + str(label))

    # converting the tensorproto to an Ndarray for extracting output
    probList = tensor_util.MakeNdarray(prediction['probabilities'])[0]
    if probList[0] < probList[1]:
        print("Prediction: 1")
    else:
        print("Prediction: 0")
 def _MakeStub(self, hostport):
   """Returns a gRPC stub using beta gRPC API."""
   host, port = hostport.split(':')
   channel = implementations.insecure_channel(host, int(port))
   return prediction_service_pb2.beta_create_PredictionService_stub(channel)
示例#48
0
# TensorFlow serving stuff to send messages
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2
from tensorflow.contrib.util import make_tensor_proto

app = Flask(__name__)

crash_vocab = [
    'Crash', 'Earthquake', 'Explosion', 'Fire', 'Floods', 'Terrorism',
    'Typhoon', 'None'
]

vector_channel = implementations.insecure_channel('35.184.14.40', 80)
disaster_channel = implementations.insecure_channel('0.0.0.0', 8500)

vector_stub = prediction_service_pb2.beta_create_PredictionService_stub(
    vector_channel)
disaster_stub = prediction_service_pb2.beta_create_PredictionService_stub(
    disaster_channel)


def sendVectorRequest(sents):
    req = predict_pb2.PredictRequest()
    req.model_spec.name = 'serving_saved_model'
    req.model_spec.signature_name = 'serving_default'
    req.inputs['text'].CopyFrom(
        make_tensor_proto(sents, shape=[len(sents)], dtype=tf.string))
    result = vector_stub.Predict(req, 60.0)

    predictions = np.reshape(result.outputs['embedding'].float_val,
                             [len(sents), 512])
    return predictions
def main(unused_argv):

    test_file_path = FLAGS.test_file_path
    id_data_dir = FLAGS.id_data_dir

    batch_size = FLAGS.batch_size
    seed_num = FLAGS.seed_num
    max_timesteps = FLAGS.max_timesteps
    vocab_size = FLAGS.vocab_size
    test_size = FLAGS.test_size
    use_local = FLAGS.use_local

    DR_path = os.path.join(id_data_dir, 'DataReader.pkl')
    with open(DR_path, 'rb') as f:
        DR = pickle.load(f)

    input_pinyin_data, input_word_data, target_data = DR.make_data_from_dataframe(
        file_path=test_file_path, build_dictionary=False, max_rows=test_size)

    np.random.seed(seed_num)
    np.random.shuffle(input_pinyin_data)
    np.random.seed(seed_num)
    np.random.shuffle(input_word_data)
    np.random.seed(seed_num)
    np.random.shuffle(target_data)

    test_data_full = batch_generator_triple_with_length(
        input_pinyin_data, input_word_data, target_data, batch_size,
        max_timesteps, DR.word2id, DR.pinyin2id)

    n_iter_per_epoch = len(input_pinyin_data) // (batch_size)

    for t in range(1, n_iter_per_epoch + 1):
        batch_full = next(test_data_full)
        src_pinyin_list, src_word_list, src_length_list, tgt_list, tgt_length_list = batch_full
        src_pinyin_list = np.asarray(src_pinyin_list, dtype=np.int32)
        src_word_list = np.asarray(src_word_list, dtype=np.int32)
        src_length_list = np.asarray(src_length_list, dtype=np.int32)
        tgt_list = np.asarray(tgt_list, dtype=np.int32)

        hostport = '0.0.0.0:9012'
        host, port = hostport.split(':')

        channel = implementations.insecure_channel(host, int(port))
        #channel = insecure_channel(host,int(port))

        stub = prediction_service_pb2.beta_create_PredictionService_stub(
            channel)
        request = predict_pb2.PredictRequest()

        request.model_spec.name = 'spell'
        request.model_spec.signature_name = "predict"

        request.inputs['pinyin_ids'].CopyFrom(
            tf.contrib.util.make_tensor_proto(
                src_pinyin_list,
                shape=[src_pinyin_list.shape[0], src_pinyin_list.shape[1]]))
        request.inputs['word_ids'].CopyFrom(
            tf.contrib.util.make_tensor_proto(
                src_word_list,
                shape=[src_word_list.shape[0], src_word_list.shape[1]]))
        request.inputs['input_lengths'].CopyFrom(
            tf.contrib.util.make_tensor_proto(src_length_list))
        request.inputs['keep_ratio'].CopyFrom(
            tf.contrib.util.make_tensor_proto(FLAGS.keep_ratio))

        print('Predict:')
        proba = stub.Predict(request, 50.0)
        results = {}
        for key in proba.outputs:
            tensor_proto = proba.outputs[key]
            nd_array = tf.contrib.util.make_ndarray(tensor_proto)
            results[key] = nd_array

        #import pdb
        #pdb.set_trace()

        predict_ids = np.argmax(results['predict'], axis=1)
        print(predict_ids)
示例#50
0
    def post(self):
        upload_folder = os.path.abspath(app.config["UPLOAD_FOLDER"])
        file_name = str(uuid.uuid4())
        mp3_path = os.path.join(upload_folder, file_name)
        train_id_list_path = os.path.join(app.config["PICKLE_FOLDER"], "train_id_list.pickle")
        bird_id_map_path = os.path.join(app.config["PICKLE_FOLDER"], "bird_id_map.pickle")

        # Load pickle files
        pickle.HIGHEST_PROTOCOL
        train_id_list = None
        try:
            with open(train_id_list_path, "rb") as rf:
                train_id_list = pickle.load(rf)
        except:
            print_exc()
            return response(500, "An internal error occured", 0, None)
        # print(train_id_list)

        bird_id_map = None
        try:
            with open(bird_id_map_path, "rb") as rf:
                bird_id_map = pickle.load(rf)
        except:
            print_exc()
            return response(500, "An internal error occured", 0, None)
        # print(bird_id_map)

        # Accept binary file file
        try:
            with open(mp3_path, "wb") as wf:
                chunk_size = 4096
                while True:
                    chunk = request.stream.read(chunk_size)
                    if len(chunk) == 0:
                        break

                    wf.write(chunk)
        except:
            print_exc()
            rm_file(mp3_path)
            return response(500, "An error occured while trying to upload file", 0, None)
        
        # Check if it has been saved
        if not os.path.isfile(mp3_path) or os.path.getsize(mp3_path) <= 0:
            print("File {} doesn't exist".format(mp3_path))
            return response(500, "Uploaded file could not been saved", 0, None)
        else:
            # print("File saved to {}, size: {}".format(mp3_path, os.path.getsize(mp3_path)))
            pass

        # Transform to WAV
        # TODO: use query stringd to accept different audio formats
        wav_path = mp4_to_wav(mp3_path, file_name + ".wav")
        if wav_path is None:
            rm_file(mp3_path)
            return response(500, "An error occured while trying to convert file to WAV", 0, None)

        try:
            my_input = vggish_input.wavfile_to_examples(wav_path)
        except:
            print_exc()
            rm_file(mp3_path)
            rm_file(wav_path)   
            return response(400, "Unable to extract spectogram, file seems to be corrupted", 0, None)

        # Query TF Serving instance
        try:
            host = app.config["SERVING_URL"]
            port = app.config["SERVING_PORT"]
            channel = implementations.insecure_channel(host, port)
            stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)

            start = time.time()
            req = predict_pb2.PredictRequest()
            req.model_spec.name = app.config["MODEL_NAME"]
            req.model_spec.signature_name = tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
            req.inputs['inputs'].CopyFrom(make_tensor_proto(my_input, dtype=tf.float32))
            result = stub.Predict(req, 10.0)
            end = time.time()
            # print("Finished query after {}s".format(end - start))

            pred = [x for x in result.outputs["outputs"].float_val]
            size_vector = int(result.outputs["outputs"].tensor_shape.dim[0].size)
            num_classes = int(result.outputs["outputs"].tensor_shape.dim[1].size)
            # print("len(pred): {}, size_vector: {}, num_classes: {}".format(len(pred), size_vector, num_classes))

        except:
            print_exc()
            rm_file(mp3_path)
            rm_file(wav_path)   
            return response(500, "An error occured while trying to query Serving instance", 0, None)

        # Tuning
        try:
            out_array = np.zeros(num_classes)
            for i in range(0, len(pred), num_classes):
                for j in range(num_classes):
                    out_array[j] += pred[i+j] * pred[i+j]

            out_list = out_array.tolist()
            sorted_array = sorted(out_list)

            # 1st, 2nd, 3rd accumulated values
            first, second, third = sorted_array[-1], sorted_array[-2], sorted_array[-3]

            # 1st, 2nd, 3rd Train IDs
            first_bird, second_bird, third_bird = out_list.index(first), out_list.index(second), out_list.index(third)

            # Getting confidences
            sum_acc = sum([first, second, third])
            first_conf, second_conf, third_conf = first / sum_acc, second / sum_acc, third / sum_acc

            # print(first, second, third)
            # print(first_bird, second_bird, third_bird)
            # print(first_conf, second_conf, third_conf)
        except:
            print_exc()
            rm_file(mp3_path)
            rm_file(wav_path)
            return response(500, "An error occured while trying to calculate accuracies", 0, None)

        out = []
        try:
            out = [
                {"id": bird_id_map[train_id_list[first_bird]],
                "accuracy": first_conf},
                {"id": bird_id_map[train_id_list[second_bird]],
                "accuracy": second_conf},
                {"id": bird_id_map[train_id_list[third_bird]],
                "accuracy": third_conf}
            ]
        except KeyError:
            print_exc()
            rm_file(mp3_path)
            rm_file(wav_path)
            return response(400, "No clear bird could be identified", 0, None)

        except Exception:
            print_exc()
            rm_file(mp3_path)
            rm_file(wav_path)
            return response(500, "An error occured while trying to construct response", 0, None)

        rm_file(mp3_path)
        rm_file(wav_path)
        return response(200, "Detection successful", len(out), out)
num_tests = 100
host = ''
port = 8000
work_dir = '/tmp'


def _create_rpc_callback():
  def _callback(result):
      response = numpy.array(
        result.result().outputs['y'].float_val)
      prediction = numpy.argmax(response)
      print(prediction)
  return _callback


test_data_set = mnist.test
test_image = mnist.test.images[0]

predict_request = predict_pb2.PredictRequest()
predict_request.model_spec.name = 'mnist'
predict_request.model_spec.signature_name = 'prediction'

predict_channel = implementations.insecure_channel(host, int(port))
predict_stub = prediction_service_pb2.beta_create_PredictionService_stub(predict_channel)

predict_request.inputs['x'].CopyFrom(
    tf.contrib.util.make_tensor_proto(test_image, shape=[1, test_image.size]))
result = predict_stub.Predict.future(predict_request, 3.0)
result.add_done_callback(
    _create_rpc_callback())
示例#52
0
 async def __aenter__(self):
   self._channel = insecure_channel(self.endpoint)
   self._stub = prediction_service_pb2.beta_create_PredictionService_stub(self._channel)
   return self
示例#53
0
def main(_):
  print(time.ctime())
  host, port = FLAGS.server.split(':')
  channel = implementations.insecure_channel(host, int(port))
  stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
  # Send request
  IMAGE_NAME = '/home/rice/tensorflow1/models/research/object_detection/test_image/'

  TEST_IMAGE_PATHS = glob.glob(os.path.join(IMAGE_NAME, '*.*'))


  for image_path in TEST_IMAGE_PATHS:
    print(image_path)

    image = Image.open(image_path)
    # the array based representation of the image will be used later in order to prepare the
    # result image with boxes and labels on it.
    image_np = load_image_into_numpy_array(image)
    image_np_expanded = np.expand_dims(image_np, axis=0)
    print(image_np_expanded.shape)

    request = predict_pb2.PredictRequest()
    request.model_spec.name = 'saved_model'
    request.model_spec.signature_name = 'serving_default'
    # request.inputs['inputs'].CopyFrom(
    #     tf.contrib.util.make_tensor_proto(image_np_expanded,shape=[1]))
    # print(time.ctime())
    # request.inputs['inputs'].CopyFrom(
    #     make_tensor_proto(image_np_expanded)
    # )

    # dims = [tensor_shape_pb2.TensorShapeProto.Dim(size=1)]
    # tensor_shape_proto = tensor_shape_pb2.TensorShapeProto(dim=dims)
    tensor_proto = tensor_pb2.TensorProto(
        dtype=types_pb2.DT_UINT8,
        tensor_shape=tensor_shape.as_shape(image_np_expanded.shape).as_proto(),
        # string_val=[open(image_path, 'rb').read()]
    )
    tensor_proto.tensor_content = image_np_expanded.tostring()

    request.inputs['inputs'].CopyFrom(tensor_proto)

    print(time.ctime())
    start = time.time()
    result = stub.Predict(request, 100.0)  # 10 secs timeout
    #print(result)
    print(time.time() - start)


    boxes = (result.outputs['detection_boxes'].float_val)
    classes = (result.outputs['detection_classes'].float_val)
    scores = (result.outputs['detection_scores'].float_val)

    box_np_arr = array([boxes[x:x + 4] for x in range(0, len(boxes), 4)])
    class_np_arr = array([classes[x:x + 1] for x in range(0, len(classes), 1)])
    score_np_arr = array([scores[x:x + 1] for x in range(0, len(scores), 1)])

    # print(type(box_np_arr),len(box_np_arr))
    # print(type(class_np_arr),len(classes))
    # print(type(class_np_arr),len(scores))

    # print(result)

    label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
    categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=14, use_display_name=True)
    category_index = label_map_util.create_category_index(categories)

    image_vis,result_box = vis_util.visualize_boxes_and_labels_on_image_array(
      image_np,
      np.squeeze(box_np_arr),
      np.squeeze(class_np_arr).astype(np.int32),
      np.squeeze(score_np_arr),
      category_index,
      use_normalized_coordinates=True,
      line_thickness=8,
      min_score_thresh=0.80)

    # Save inference to disk
    scipy.misc.imsave('%s.jpg' % (image), image_vis)

    for box,label_score in result_box.items():
        print(label_score,',',box)

    break
from multiprocessing import Pool, Queue, Manager, Array
dense_columns = []
sparse_columns = ['uid', 'snsid', 'authorid']
used_columns = ['uid', 'snsid', 'authorid']
max_value_dict = {
    "communitylist": 10000,
    "u_communitylist": 10000,
    "snsid": 100000,
    "uid": 200000,
    "authorid": 200000
}
items = [{'uid': '100', 'snsid': 367511, 'authorid': 174415}]
host = 'localhost'
port = '9000'
channel = implementations.insecure_channel(host, int(port))
stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
import pandas as pd
testLog = pd.read_csv("laosiji_tmp_log_2018-06-01",
                      usecols=['uid', 'snsid', 'authorid', 'is_click'])
testset = (testLog.loc[testLog['is_click'] == 1]).pivot_table("is_click",
                                                              'uid',
                                                              'snsid',
                                                              fill_value=0)
testset['click_count'] = testset.apply(lambda x: sum(x), axis=1)
testset.sort_values(by="click_count", ascending=False, inplace=True)
del testset['click_count']


def hash_value(str):
    md5value = hashlib.md5(str.encode('utf-8')).hexdigest()
    return int(md5value, 16)
示例#55
0
def create_stub():
  host, port = FLAGS.server.split(":")
  channel = implementations.insecure_channel(host, int(port))
  return prediction_service_pb2.beta_create_PredictionService_stub(channel)
def index():
    #host, port, image = parse_args()
    try:
        host = "localhost"
        port = 9000
        channel = implementations.insecure_channel(host, int(port))
        stub = prediction_service_pb2.beta_create_PredictionService_stub(
            channel)

        # dataset = input_data.read_data_sets('/tmp/Mnist', one_hot=True)
        # batch = dataset.train.next_batch(1)
        # print(batch[1])
        if (request.method == 'GET'):
            #img = [[0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,0,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]]
            numpyarray = np.array(img, np.float32)
            print("Expected result: %d" % (2))
        if (request.method == 'POST'):
            modelUrl = request.form.get('modelUrl')
            imgUrl = request.form.get('imageUrl')
            modelName = request.form.get('modelName')
            imageSize = request.form.get('imageSize')
            checkClearModelPath = 1
            modelFolder = "/home/ubuntu/model"
            if (os.listdir(modelFolder) != []):
                #checkClearModelPath = subprocess.check_call(["sudo", "rm", "-r", "/home/ubuntu/model/*"])
                for the_file in os.listdir(modelFolder):
                    file_path = os.path.join(modelFolder, the_file)
                    try:
                        if os.path.isfile(file_path):
                            os.unlink(file_path)
                        elif os.path.isdir(file_path):
                            shutil.rmtree(file_path)
                    except Exception as e:
                        raise Exception(str(e))

            #if(checkClearModelPath == 0):
            downloadModel = subprocess.check_call(
                ["wget", "-O", "/home/ubuntu/model/model.zip", modelUrl])
            if (downloadModel == 0):
                unzipModel = subprocess.check_call([
                    "unzip", "/home/ubuntu/model/model.zip", "-d",
                    "/home/ubuntu/model/"
                ])
                if (unzipModel == 0):
                    try:
                        thread.start_new_thread(runServingService,
                                                (modelName, ))
                    except Exception as e:
                        raise Exception(str(e))

            checkRunningProcess = ""
            while ("tensorflow_model_server" not in checkRunningProcess):
                checkRunningProcess = subprocess.check_output(['ps', '-au'])
                print(checkRunningProcess)

            time.sleep(1.5)
            downloadImg = subprocess.check_call(
                ["wget", "-O", "/home/ubuntu/serveimg/img.jpg", imgUrl])
            if (downloadImg == 0):
                print("downloaded image")
                img = cv2.imread('/home/ubuntu/serveimg/img.jpg')
                imgarray = []
                for i in range(0, len(img)):
                    for j in range(0, len(img[i])):
                        tmp = img[i][j]
                        px = 0
                        for item in tmp:
                            if (item > 5):
                                px = 1
                                break
                        imgarray.append(px)
                numpyarray = np.array(imgarray, np.float32)
            else:
                raise Exception("cannot download image")

        start = time.time()

        servingrequest = predict_pb2.PredictRequest()

        servingrequest.model_spec.name = modelName

        servingrequest.model_spec.signature_name = 'predict_images'

        #servingrequest.inputs['images'].CopyFrom(make_tensor_proto(numpyarray, shape=[1, 784]))
        servingrequest.inputs['images'].CopyFrom(
            make_tensor_proto(numpyarray, shape=[1, imageSize]))

        result = stub.Predict(servingrequest, 60.0)  # 60 secs timeout

        end = time.time()

        time_diff = end - start

        print(result.outputs['scores'].float_val)
        print(result)

        print('time elapased: {}'.format(time_diff))

        result_list = result.outputs['scores'].float_val

        max_val = max(result_list)

        num_result = 0
        for i in range(0, len(result_list)):
            if (result_list[i] == max_val):
                num_result = i

        return str(num_result)
    except Exception as e:
        with open('/home/ubuntu/myproject/log.txt', 'a+') as f:
            error = "\n\n Internal Server Error \n" + str(
                traceback.format_exc())
            f.write(error)
            f.close()
示例#57
0
def main():
    # Connect to server
    client = InfluxDBClient(host=HOST,
                            port=PORT,
                            username=USER,
                            password=PASSWORD,
                            database=DBNAME)
    print("connect to Influxdb", DBNAME, HOST, PORT)
    # Time
    dt1 = datetime.now()
    dt1 = dt1 - timedelta(hours=6)
    dt = dt1.isoformat()
    # Query to database
    query = 'SELECT "value_gasflow", "value_eta_a", "value_ngp", "value_npt" FROM "example"."autogen"."unit3" WHERE time > now() - 4h'
    data = DataFrameClient(host=HOST, \
                                       username=USER, \
                                       password=PASSWORD, \
                                       database=DBNAME)
    dict_query = data.query(query)
    df = pandas.DataFrame(data=dict_query['unit3'])
    index = df.index
    empty_df = pandas.DataFrame(
        columns=['gas_fuel_flow', 'hpc_eta', 'ngp', 'npt', 'prediction'],
        index=index)
    empty_df.gas_fuel_flow = df['value_gasflow']
    empty_df.hpc_eta = df['value_eta_a']
    empty_df.ngp = df['value_ngp']
    empty_df.npt = df['value_npt']
    strafe = numpy.array(empty_df.values[:, 0:3])
    out_pp = numpy.float32(strafe)
    # Prepare request
    request = predict_pb2.PredictRequest()
    request.model_spec.name = 'deka'
    request.inputs['inputs'].dtype = types_pb2.DT_FLOAT
    request.inputs['inputs'].CopyFrom(
        tf.contrib.util.make_tensor_proto(out_pp))
    request.output_filter.append('outputs')
    # Send request
    host, port = FLAGS.server.split(':')
    channel = implementations.insecure_channel(host, int(port))
    stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
    prediction = stub.Predict(request, 5.0)  # 5 secs timeout
    floats = prediction.outputs['outputs'].float_val
    predicted_array = numpy.array(floats)
    empty_df.prediction = predicted_array
    #print(empty_df)
    json_body = [{
        "measurement": "prediction",
        "tags": {
            "type": "npt predict"
        },
        "time": dt,
        "fields": {
            "gas_fuel_flow": empty_df.gas_fuel_flow[-1],
            "hpc_eta": empty_df.hpc_eta[-1],
            "ngp": empty_df.ngp[-1],
            "npt": empty_df.npt[-1],
            "prediction": empty_df.prediction[-1]
        }
    }]
    client.write_points(json_body, database="example")
    #print(json_body)

    client.close()