コード例 #1
0
def WaitForServerReady(usersettings, host, port):
  #inspired from https://github.com/tensorflow/serving/blob/master/tensorflow_serving/model_servers/tensorflow_model_server_test.py
  """Waits for a server on the localhost to become ready.
  returns True if server is ready or False on timeout
  Args:
      host:tensorfow server address
      port: port address of the PredictionService.
  """
  #FIXME fix the following imports that may be deprecated:
  from grpc import implementations

  for _ in range(0, usersettings.wait_for_server_ready_int_secs):
    request = predict_pb2.PredictRequest()
    request.model_spec.name = 'server_not_real_model_name'

    try:
      # Send empty request to missing model
      print('Trying to reach tensorflow-server {srv} on port {port} for {delay} seconds'.format(srv=host,
                                                             port=port,
                                                             delay=usersettings.wait_for_server_ready_int_secs))
      channel = implementations.insecure_channel(host, int(port))
      stub = prediction_service_pb2.PredictionServiceStub(channel)
      stub.Predict(request, 1)
    except face.AbortionError as error:
      # Missing model error will have details containing 'Servable'
      if 'Servable' in error.details:
        print('Server is ready')
        return True
      else:
        print('Error:'+str(error.details))
    return False
    time.sleep(1)
コード例 #2
0
def grpc_predict_raw(input):
    port = 8500
    channel = grpc.insecure_channel('{host}:{port}'.format(host=host,
                                                           port=port))
    # channel = implementations.insecure_channel(host, int(port))

    stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
    request = predict_pb2.PredictRequest()
    request.model_spec.name = 'test_dnn_raw'
    request.model_spec.signature_name = "serving_default"

    tensor_protos = {
        'last_note_creators':
        tf.make_tensor_proto(input['last_note_creators'],
                             dtype=tf.string,
                             shape=[1, 50]),
        'last_note_ids':
        tf.make_tensor_proto(input['last_note_ids'],
                             dtype=tf.int64,
                             shape=[1, 50]),
        'note_open_id':
        tf.make_tensor_proto(input['note_open_id'], dtype=tf.string,
                             shape=[1]),
        'note_id':
        tf.make_tensor_proto(input['note_id'], dtype=tf.int64, shape=[1]),
        'note_video_duration':
        tf.make_tensor_proto(input['note_video_duration'],
                             dtype=tf.int64,
                             shape=[1])
    }
    for k in tensor_protos:
        request.inputs[k].CopyFrom(tensor_protos[k])

    response = stub.Predict(request, 5.0)
    print(response)
コード例 #3
0
def main(argv=None):
    # GPU配置
    # gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_memory_fraction)
    # config = tf.ConfigProto(gpu_options=gpu_options,log_device_placement=False,)
    # 类别、视频或图像输入
    classes = load_coco_names(args.class_names)

    vid = cv2.VideoCapture(args.input_video)
    video_frame_cnt = int(vid.get(7))  # AVI:10148  RSTP: 中无总帧数属性 视频文件中的帧数
    timeF = 10  # 分帧率 130ms配合2
    fpsnum = int(vid.get(1))  # 基于以0开始的被捕获或解码的帧索引
    if (fpsnum % timeF == 0):
        for i in range(video_frame_cnt):
            ret, img_ori = vid.read()
            # 图像填充
            img_ori = cv2.cvtColor(img_ori, cv2.COLOR_BGR2RGB)
            img_ori = Image.fromarray(img_ori) # CV2图片转PIL
            img_resized = letter_box_image(img_ori,img_ori.size[1], img_ori.size[0], args.size, args.size, 128)
            img_resized = img_resized.astype(np.float32)
            # 图像插值
            # img = cv2.resize(img_ori, (args.size, args.size))
            # img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)  # cv2默认为bgr顺序
            # img_resized = np.asarray(img, np.float32)
            # 编码方式1
            # scipy.misc.imsave(args.temp_img, img_resized)
            # _, jpeg_bytes = base64_encode_img(args.temp_img)
            # 编码方式2
            img_encode = cv2.imencode('.jpg', img_resized)[1]
            data_encode = np.array(img_encode)
            jpeg_bytes = data_encode.tostring()
            start_time = time.time()
            # 服务器通讯配置
            channel = grpc.insecure_channel(args.server)
            stub = prediction_service_pb2.PredictionServiceStub(channel)
            request = predict_pb2.PredictRequest()
            request.model_spec.name = 'yolov3_2'
            request.model_spec.signature_name = 'predict_images'
            # 等待服务器答复
            request.inputs['images'].CopyFrom(tf.contrib.util.make_tensor_proto(jpeg_bytes, shape=[1]))
            response = stub.Predict(request, 10.0)
            # 对返回值进行操作
            results = {}
            for key in response.outputs:
                tensor_proto = response.outputs[key]
                nd_array = tf.contrib.util.make_ndarray(tensor_proto)
                results[key] = nd_array
            detected_boxes = results['scores']
            # nms计算
            filtered_boxes = non_max_suppression(detected_boxes,confidence_threshold=args.conf_threshold,iou_threshold=args.iou_threshold)
            end_time = time.time()
            difference_time = end_time - start_time  # 网络运行时间
            # 画图
            draw_boxes(filtered_boxes, img_ori, classes, (args.size, args.size), True)
            # 输出图像
            cv2charimg = cv2.cvtColor(np.array(img_ori), cv2.COLOR_RGB2BGR) # PIL图片转cv2 图片
            cv2.putText(cv2charimg, '{:.2f}ms'.format((difference_time) * 1000), (40, 40), 0,
                        fontScale=1, color=(0, 255, 0), thickness=2)
            cv2.imshow('image', cv2charimg)
            if cv2.waitKey(1) & 0xFF == ord('q'): # 视频退出
                break
コード例 #4
0
 def predict_mobilenet_batch(self, images_batch):
     stub = prediction_service_pb2.PredictionServiceStub(
         self.channel_served)
     request = predict_pb2.PredictRequest()
     request.model_spec.name = 'mobilenet'
     request.inputs['image'].CopyFrom(tf.make_tensor_proto(images_batch))
     result = stub.Predict(request, 20.0)
     proto_result = result.outputs['prediction']
     return tf.make_ndarray(proto_result)
    def __init__(self, host_list, model_name, signature='default'):
        self.stubs = []
        self.host_list = host_list
        self.model_name = model_name
        self.signature_name = signature

        for host in host_list:
            channel = grpc.insecure_channel(host)
            stub = prediction_service_pb2.PredictionServiceStub(channel)
            self.stubs.append(stub)
        self.request = predict_pb2.PredictRequest()
        self.request.model_spec.name = model_name
        self.request.model_spec.signature_name = signature
コード例 #6
0
def main():
    channel = grpc.insecure_channel(FLAGS.server)
    stub = prediction_service_pb2.PredictionServiceStub(channel)
    request = predict_pb2.PredictRequest()
    request.model_spec.name = 'twice_plus_three'
    # request.model_spec.signature_name = 'serving_default'

    request.inputs['x'].dtype = types_pb2.DT_FLOAT
    request.inputs['x'].float_val.append(2.0)

    result_future = stub.Predict.future(request, 5.0)
    result = result_future.result()
    print(result)
コード例 #7
0
def toy_demo():
    port = 8500
    channel = grpc.insecure_channel('{host}:{port}'.format(host=host,
                                                           port=port))
    # channel = implementations.insecure_channel(host, int(port))

    stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
    request = predict_pb2.PredictRequest()
    request.model_spec.name = 'half_plus_two'
    request.model_spec.signature_name = "serving_default"

    request.inputs["x"].CopyFrom(tf.make_tensor_proto([1.0], shape=[1]))

    response = stub.Predict(request, 5.0)
    print(response)
コード例 #8
0
def run():
    token = 'Bearer eyJhbGciOiJSUzI1NiIsImtpZCI6ImxlZ2FjeS10b2tlbi1rZXkiLCJ0eXAiOiJKV1QifQ.eyJqdGkiOiI4Yzk3YWZjM2I4Mzk0N2E5OWVhMjMyNzE3Y2U3ZTFjMyIsInN1YiI6Im1scHRlc3RjbGllbnQiLCJhdXRob3JpdGllcyI6WyJtbHB0ZXN0Y2xpZW50Il0sInNjb3BlIjpbIm1scHRlc3RjbGllbnQiXSwiY2xpZW50X2lkIjoibWxwdGVzdGNsaWVudCIsImNpZCI6Im1scHRlc3RjbGllbnQiLCJhenAiOiJtbHB0ZXN0Y2xpZW50IiwiZ3JhbnRfdHlwZSI6ImNsaWVudF9jcmVkZW50aWFscyIsInJldl9zaWciOiJiNThjNWQ0ZiIsImlhdCI6MTQ4NjYzNDIzMSwiZXhwIjozNjMxMDgyMjMxLCJpc3MiOiJodHRwOi8vbG9jYWxob3N0OjgwODAvdWFhL29hdXRoL3Rva2VuIiwiemlkIjoidWFhIiwiYXVkIjpbIm1scHRlc3RjbGllbnQiXX0.nIl71Dxktizfb5B870Mlh_-62kN9_Wlda8WYbiz3iFaj22LzIUkQiRIAI57g3IwPXbJnJ1tlrf5_DIJpycRxzfxIZnW_GJW56sgY5L4mdPVHSIUHjeFh5v5tGwmOG6a1mYH_H0y8G-nHNolfSejcyvc4RYvcba4kS2nm-wDKKgfqDVaspM4Ktsa15eLHYn1P0LIUEsewTDm3qL_PgbJC3WKq_qgk02B5Or1n0doLkGBtccYlQEZ9lRixmkdov7_4Nl9UNTPgaYchC0AEaxd_RRCBK78FwC6tw3v1X3xJFXoYdJlMNOnTGdbQ4CVP5-Jd7gifPnUilPPPoJmITg0HZQ'
    metadata = [('authorization', token)]
    channel = grpc.insecure_channel("127.0.0.1:9000")
    stub = tensorflow__serving_dot_apis_dot_prediction_service__pb2.PredictionServiceStub(
        channel)

    request = tensorflow__serving_dot_apis_dot_predict__pb2.PredictRequest()
    request.model_spec.name = 'sum-model'
    request.model_spec.signature_name = "predict_doubles"
    request.model_spec.version.value = 1
    request.inputs["X"].CopyFrom(
        tf.contrib.util.make_tensor_proto([1, 45, 3, 4], shape=[4]))
    response = stub.Predict(request, metadata=metadata)
    print(response)
コード例 #9
0
def get_results(data, server, port):
    channel = grpc.insecure_channel(':'.join([server, port]))
    stub = prediction_service_pb2.PredictionServiceStub(channel)
    processed_data = process_data(data)

    results = []
    for input_x in processed_data:
        request = predict_pb2.PredictRequest()
        request.model_spec.name = 'spam_ham'
        request.model_spec.signature_name = 'predict_spam'  # Change to predict spam
        request.inputs['texts'].CopyFrom(tf.contrib.util.make_tensor_proto(input_x, shape=[4, 20]))  # 'texts'
        prediction_future = stub.Predict(request)
        prediction = prediction_future.result().outputs['scores']
        # prediction = np.array(prediction_future.result().outputs['scores'].float_val)
        results.append(prediction)
    return results
コード例 #10
0
def grpc_predict_example(input_datas):
    port = 9000
    channel = grpc.insecure_channel('{host}:{port}'.format(host=host,
                                                           port=port))
    # channel = implementations.insecure_channel(host, int(port))

    stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
    request = predict_pb2.PredictRequest()
    request.model_spec.name = 'test_dnn'
    request.model_spec.signature_name = "serving_default"

    feature = {
        'last_note_creators':
        tf.train.Feature(bytes_list=tf.train.BytesList(
            value=input_datas['last_note_creators'])),
        'last_note_ids':
        tf.train.Feature(int64_list=tf.train.Int64List(
            value=input_datas['last_note_ids'])),
        'note_open_id':
        tf.train.Feature(bytes_list=tf.train.BytesList(
            value=[input_datas['note_open_id']])),
        'note_id':
        tf.train.Feature(int64_list=tf.train.Int64List(
            value=[input_datas['note_id']])),
        'note_video_duration':
        tf.train.Feature(float_list=tf.train.FloatList(
            value=[input_datas['note_video_duration']])),
    }

    print(feature)

    example_proto = tf.train.Example(features=tf.train.Features(
        feature=feature))
    serialize_example = example_proto.SerializeToString()
    batch_size = 200
    serialize_examples = [serialize_example] * batch_size
    request.inputs['examples'].CopyFrom(
        tf.make_tensor_proto(serialize_examples,
                             dtype=tf.string,
                             shape=[batch_size]))
    for _ in range(10):
        begin = time.time()
        response = stub.Predict(request, 5.0)
        end = time.time()
        print(end - begin)
        print(response.outputs["output"].float_val)
コード例 #11
0
def main(_):
    hostport, image, image_path = parse_args()
    print("The server hostport is: ", hostport)
    print("The image is: ", image)

    #start counting time
    start = time.time()

    #preprocessing
    images_np = np.zeros((1, 224, 224, 3), dtype=np.float32)
    img = load_img(image, target_size=(224, 224))
    images_np[0] = img_to_array(img)
    images_np = preprocess_input(images_np)

    #construct grpc request to model server
    channel = grpc.insecure_channel(hostport)
    stub = prediction_service_pb2.PredictionServiceStub(channel)
    request = predict_pb2.PredictRequest()
    request.model_spec.name = 'vgg16_bottleneck-features'
    request.model_spec.signature_name = 'predict_images'
    request.inputs['images'].CopyFrom(
        tf.contrib.util.make_tensor_proto(images_np,
                                          shape=list(images_np.shape)))
    result = stub.Predict(request, 60.0)

    #get result and postprocess
    images_predictions_np_b_f = np.zeros((1, 7, 7, 512), dtype=np.float32)
    images_predictions_np_b_f = result.outputs['scores_1'].float_val
    print("The result of bottleneck features: ", images_predictions_np_b_f)

    images_predictions_np = np.zeros((1, 1000), dtype=np.float32)
    images_predictions_np[0] = result.outputs['scores_2'].float_val
    images_predictions_np = images_predictions_np.astype(float)
    images_predictions_list = decode_predictions(images_predictions_np, top=5)
    print("The result of predictions is: ", images_predictions_list)

    #end counting time
    end = time.time()
    time_diff = end - start
    print("Time elapsed: {}".format(time_diff))
コード例 #12
0
def run():
    token = 'Bearer eyJhbGciOiJSUzI1NiIsImtpZCI6ImxlZ2FjeS10b2tlbi1rZXkiLCJ0eXAiOiJKV1QifQ.eyJqdGkiOiI4Yzk3YWZjM2I4Mzk0N2E5OWVhMjMyNzE3Y2U3ZTFjMyIsInN1YiI6Im1scHRlc3RjbGllbnQiLCJhdXRob3JpdGllcyI6WyJtbHB0ZXN0Y2xpZW50Il0sInNjb3BlIjpbIm1scHRlc3RjbGllbnQiXSwiY2xpZW50X2lkIjoibWxwdGVzdGNsaWVudCIsImNpZCI6Im1scHRlc3RjbGllbnQiLCJhenAiOiJtbHB0ZXN0Y2xpZW50IiwiZ3JhbnRfdHlwZSI6ImNsaWVudF9jcmVkZW50aWFscyIsInJldl9zaWciOiJiNThjNWQ0ZiIsImlhdCI6MTQ4NjYzNDIzMSwiZXhwIjozNjMxMDgyMjMxLCJpc3MiOiJodHRwOi8vbG9jYWxob3N0OjgwODAvdWFhL29hdXRoL3Rva2VuIiwiemlkIjoidWFhIiwiYXVkIjpbIm1scHRlc3RjbGllbnQiXX0.nIl71Dxktizfb5B870Mlh_-62kN9_Wlda8WYbiz3iFaj22LzIUkQiRIAI57g3IwPXbJnJ1tlrf5_DIJpycRxzfxIZnW_GJW56sgY5L4mdPVHSIUHjeFh5v5tGwmOG6a1mYH_H0y8G-nHNolfSejcyvc4RYvcba4kS2nm-wDKKgfqDVaspM4Ktsa15eLHYn1P0LIUEsewTDm3qL_PgbJC3WKq_qgk02B5Or1n0doLkGBtccYlQEZ9lRixmkdov7_4Nl9UNTPgaYchC0AEaxd_RRCBK78FwC6tw3v1X3xJFXoYdJlMNOnTGdbQ4CVP5-Jd7gifPnUilPPPoJmITg0HZQ'
    metadata = [('authorization', token)]
    channel = grpc.insecure_channel("127.0.0.1:9000")
    stub = tensorflow__serving_dot_apis_dot_prediction_service__pb2.PredictionServiceStub(
        channel)

    request = tensorflow__serving_dot_apis_dot_classification__pb2.ClassificationRequest(
    )
    request.model_spec.name = "func.pkl"
    request.model_spec.signature_name = "classify_x_to_y"

    example = request.input.example_list.examples.add()
    example.features.feature["sepal length (cm)"].float_list.value.extend(
        [5.1, 7.0, 6.3, 4.6])
    example.features.feature["sepal width (cm)"].float_list.value.extend(
        [3.5, 3.2, 3.3, 3.1])
    example.features.feature["petal length (cm)"].float_list.value.extend(
        [1.4, 4.7, 6.0, 1.5])
    example.features.feature["petal width (cm)"].float_list.value.extend(
        [0.2, 1.4, 2.5, 0.2])

    response = stub.Classify(request)
    print(response)
コード例 #13
0
def predict_window(text_predict, number_predict, window_size):
    """
    Generate a secuence to continue text_predict of len number_predict
    param:
        text_predict .- Init text
        number_predict .- Number chars to generate
        window_size .- The same that used in trainig
   return
        text_predict + number_predict chars
    """
    # Get values used in trainig
    chars_to_indices, indices_to_chars = load_coded_dictionaries()
    number_chars = len(chars_to_indices)
    # Clean input
    input_clean = clean_text(text_predict.lower())
    # Get the stub
    channel = grpc.insecure_channel(IP + str(9000))
    stub = prediction_service_pb2.PredictionServiceStub(channel)
    # Call the service n times
    for i in range(number_predict):
        d = predict_one(input_clean[i:], stub, window_size, number_chars,
                        chars_to_indices, indices_to_chars)
        input_clean += d
    return input_clean
コード例 #14
0
def main(argv=None):
    # GPU配置
    # gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_memory_fraction)
    # config = tf.ConfigProto(gpu_options=gpu_options,log_device_placement=False,)
    # 类别、视频或图像输入
    classes = load_coco_names(args.class_names)

    # 图像填充
    # img = Image.open(args.input_img)
    # img_resized = letter_box_image(img, img.size[1], img.size[0], args.size, args.size, 128)
    # img_resized = img_resized.astype(np.float32)
    # 图像插值
    # img_ori = cv2.imread(args.input_img)
    # height_ori, width_ori = img_ori.shape[:2]
    # img = cv2.resize(img_ori, (args.size, args.size))
    # img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)  # cv2默认为bgr顺序
    # img_resized = np.asarray(img, np.float32)

    img_ori = cv2.imread(args.input_img)
    img_ori = cv2.cvtColor(img_ori, cv2.COLOR_BGR2RGB)
    img = Image.fromarray(img_ori)  # CV2图片转PIL
    img_resized = letter_box_image(img, img.size[1], img.size[0], args.size,
                                   args.size, 128)
    img_resized = img_resized.astype(np.float32)
    scipy.misc.imsave(args.temp_img, img_resized)
    _, jpeg_bytes = base64_encode_img(args.temp_img)

    # 服务器通讯配置
    channel = grpc.insecure_channel(args.server)
    stub = prediction_service_pb2.PredictionServiceStub(channel)
    request = predict_pb2.PredictRequest()
    request.model_spec.name = 'yolov3_2'
    request.model_spec.signature_name = 'predict_images'
    # 等待服务器答复
    t0 = time.time()
    request.inputs['images'].CopyFrom(
        tf.contrib.util.make_tensor_proto(jpeg_bytes, shape=[1]))
    response = stub.Predict(request, 10.0)
    # 对返回值进行操作
    results = {}
    for key in response.outputs:
        tensor_proto = response.outputs[key]
        nd_array = tf.contrib.util.make_ndarray(tensor_proto)
        results[key] = nd_array
    detected_boxes = results['scores']
    # nms计算
    filtered_boxes = non_max_suppression(
        detected_boxes,
        confidence_threshold=args.conf_threshold,
        iou_threshold=args.iou_threshold)
    # 画图
    draw_boxes(filtered_boxes, img, classes, (args.size, args.size), True)
    # 输出图像
    plt.figure('判断结果')
    font = {
        'family': 'simhei',
        'weight': 'normal',
        'size': 18,
    }
    plt.title('判断耗时:{:.2f}ms'.format((time.time() - t0) * 1000), font)
    plt.imshow(img)
    plt.show()
    print('done!')
コード例 #15
0
    def predict(self,
                request_data,
                model_name,
                model_version,
                signature_name='',
                request_timeout=5):
        """
        :param request_data:
        :param model_name:
        :param model_version:
        :param signature_name:
        :param request_timeout:
        :return:
        """

        self.logger.info('Sending request to the model')
        self.logger.info('HOST: {} / PORT: {}'.format(
            self.host_port.split(":")[0],
            self.host_port.split(":")[-1]))
        self.logger.info('MODEL: {}'.format(model_name))
        self.logger.info('MODEL VERSION: {}'.format(model_version))

        # Create the connection
        # ---------------------------------------------------

        # Create the channel
        channel_time = time.time()
        channel = grpc.insecure_channel(self.host_port)
        self.logger.debug(
            'Establishing insecure channel during {}'.format(time.time() -
                                                             channel_time))

        # Create Stub
        stub_time = time.time()
        stub = prediction_service_pb2.PredictionServiceStub(channel)
        self.logger.debug('Creating stub during {}'.format(time.time() -
                                                           stub_time))

        # Initialise a request
        # ---------------------------------------------------
        request_time = time.time()
        request = predict_pb2.PredictRequest()
        self.logger.debug('Creating request during {}'.format(time.time() -
                                                              request_time))

        # Specify request arguments
        # ---------------------------------------------------
        request.model_spec.name = model_name

        if type(model_version) == int and model_version > 0:
            request.model_spec.version.value = model_version

        if signature_name != '':
            request.model_spec.signature_name = signature_name

        # Set inputs
        # ---------------------------------------------------
        input_time = time.time()
        for field in request_data:
            tensor_proto = make_tensor_proto(field['data'], field['dtype'])
            request.inputs[field['tensor_name']].CopyFrom(tensor_proto)

        self.logger.debug('Making tensor protos during {}'.format(time.time() -
                                                                  input_time))

        # Send the request
        # ---------------------------------------------------

        try:
            send_request_time = time.time()
            predict_response = stub.Predict(request, timeout=request_timeout)

            self.logger.debug(
                'Time to get response: {}'.format(time.time() -
                                                  send_request_time))

            return predict_response

        except RpcError as e:
            self.logger.error(e)
            self.logger.error('Prediction failed!')
            return {"error": "request failed!"}

        return {}
コード例 #16
0
Das Modell wird mit den Daten vom 04.05.2018 bis zum 08.06.2018 gefüttert
Das erwartete Ergebnis ist die Temperatur vom 09.05.2018
"""
zeitraum_vergangenheit = 120
zeitraum_zukunft = 24
test_x = np.expand_dims(stuendlich[:zeitraum_vergangenheit], axis=0)
test_y = stuendlich[zeitraum_vergangenheit + zeitraum_zukunft][1]

#%%
"""
Die Verbindungsdaten zum TensorFlow-Serving ModelServer
"""
host = "localhost"
port = "9000"
channel = grpc.insecure_channel("localhost:9000")
stub = prediction_service_pb2.PredictionServiceStub(channel)

#%%
status = get_model_metadata_pb2.GetModelMetadataRequest()
status.model_spec.name = 'weather'
status.model_spec.version.value = 4
status.metadata_field.append("signature_def")
response = stub.GetModelMetadata(status, 10)
print response

#%%
from tensorflow_serving.apis import predict_pb2
from tensorflow.contrib.util import make_tensor_proto

request = predict_pb2.PredictRequest()
request.model_spec.name = 'weather'
コード例 #17
0
 def __init__(self, hostport, model_name, timeout):
     # TODO(lamberce): look into if this needs to be secure
     channel = grpc.insecure_channel(hostport)
     self._stub = prediction_service_pb2.PredictionServiceStub(channel)
     self._model_name = model_name
     self._timeout = timeout
コード例 #18
0
ファイル: server.py プロジェクト: sellpy/stockholm-ai
def create_gprc_client(host):
    """Simple wrapper."""
    channel = grpc.insecure_channel(host)
    stub = prediction_service_pb2.PredictionServiceStub(channel)
    request = predict_pb2.PredictRequest()
    return stub, request