Ejemplo n.º 1
0
def do_inference(hostport, work_dir, concurrency, num_tests):
    """Tests PredictionService with concurrent requests.

  Args:
    hostport: Host:port address of the PredictionService.
    work_dir: The full path of working directory for test data set.
    concurrency: Maximum number of concurrent requests.
    num_tests: Number of test images to use.

  Returns:
    The classification error rate.

  Raises:
    IOError: An error occurred processing test data set.
  """

    #test_data_set = mnist_input_data.read_data_sets(work_dir).test
    test_data_set = DataReadAndNegSamp(file_input='./20190623.ID.test')
    test_data_set = test_data_set.train_data.values
    print('test_data_set.shape:', test_data_set.shape, type(test_data_set))
    channel = grpc.insecure_channel(hostport)
    stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
    pred_res = []
    real_res = []
    for index in range(int(num_tests / concurrency)):
        request = predict_pb2.PredictRequest()
        request.model_spec.name = 'seq_model'
        request.model_spec.signature_name = 'pred_class'
        #request.model_spec.signature_name = 'pred_prob' ## here used 'input'
        cur_data = test_data_set[index * concurrency:(index + 1) * concurrency]
        cur_label = cur_data[:, 0]
        cur_feature = cur_data[:, 1:].reshape((-1, 13))
        #print ('cur_data.shape:', cur_data.shape, type(cur_data))
        #print ('cur_label.shape:', cur_label.shape, type(cur_label))
        #print ('cur_feature.shape:', cur_feature.shape, type(cur_feature))
        #image, label = test_data_set[index]
        #request.inputs['input'].CopyFrom(
        request.inputs['input_'].CopyFrom(
            tf.contrib.util.make_tensor_proto(values=cur_feature,
                                              dtype=tf.int32,
                                              shape=cur_feature.shape))
        #tf.contrib.util.make_tensor_proto(image[0], shape=[1, image[0].size]))
        response = stub.Predict(request, 5.0)
        #print ('respose:', response, type(response))
        results = tf.contrib.util.make_ndarray(response.outputs['output_'])
        pred_res.append(list(results))
        real_res.append(list(cur_label))
        '''
    ## here means saveModel can multi-ouput sig_def ##
    results  = {}
    for key in response.outputs:
      tensor_proto = response.outputs[key]
      nd_array = tf.contrib.util.make_ndarray(tensor_proto)
      results[key] = nd_array
    for key, values in results.items():
      print ('in result:', key, values)
    '''
    pred_res = np.array(pred_res).reshape((-1, 1))
    real_res = np.array(pred_res).reshape((-1, 1))
    res = np.concatenate((real_res, pred_res), axis=1)
    print('real-label\t pred-label \n', res)
    return np.sum(np.equal(res[:, 0], res[:, 1])) / res.shape[0]
Ejemplo n.º 2
0
def get_interactive_infer_results(model, model_in):
    fetches = [
        model.get_data_layer().input_tensors,
        model.get_output_tensors(),
    ]

    feed_dict = model.get_data_layer().create_feed_dict(model_in)

    # inputs, outputs = sess.run(fetches, feed_dict=feed_dict)

    # export_path = "/tmp/speech2text/0"
    # print('Exporting trained model to', export_path)

    # builder = tf.saved_model.builder.SavedModelBuilder(export_path)
    # # Define input tensors
    # audio = tf.saved_model.utils.build_tensor_info(
    #     model.get_data_layer().input_tensors["source_tensors"][0])
    # audio_length = tf.saved_model.utils.build_tensor_info(
    #     model.get_data_layer().input_tensors["source_tensors"][1])
    # x_id = tf.saved_model.utils.build_tensor_info(
    #     model.get_data_layer().input_tensors["source_ids"][0])

    # # Define output tensors
    # # decoded_sequence = tf.saved_model.utils.build_tensor_info(
    # #     model.get_output_tensors()[0])

    # # prediction_signature = (
    # #     tf.saved_model.signature_def_utils.build_signature_def(
    # #         inputs={'audio': audio, 'audio_length': audio_length, 'x_id': x_id},
    # #         outputs={'decoded_sequence': decoded_sequence},
    # #         method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME))

    # indices_decoded_sequence = tf.saved_model.utils.build_tensor_info(
    #     model.get_output_tensors()[0].indices)
    # values_decoded_sequence = tf.saved_model.utils.build_tensor_info(
    #     model.get_output_tensors()[0].values)
    # dense_shape_decoded_sequence = tf.saved_model.utils.build_tensor_info(
    #     model.get_output_tensors()[0].dense_shape)

    # prediction_signature = (
    #     tf.saved_model.signature_def_utils.build_signature_def(
    #         inputs={'audio': audio, 'audio_length': audio_length, 'x_id': x_id},
    #         outputs={'indices_decoded_sequence': indices_decoded_sequence,
    #                  'values_decoded_sequence': values_decoded_sequence,
    #                  'dense_shape_decoded_sequence': dense_shape_decoded_sequence},
    #         method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME))

    # builder.add_meta_graph_and_variables(
    #     sess, [tf.saved_model.tag_constants.SERVING],
    #     signature_def_map={
    #         'predict_output':
    #             prediction_signature,
    #     },
    #     main_op=tf.tables_initializer(),
    #     strip_default_attrs=True)

    # builder.save()

    audio = feed_dict[model.get_data_layer().input_tensors["source_tensors"]
                      [0]]
    audio_length = feed_dict[
        model.get_data_layer().input_tensors["source_tensors"][1]]
    x_id = feed_dict[model.get_data_layer().input_tensors["source_ids"][0]]

    print('audio shape: ', audio.shape)
    print('audio_length shape: ', audio_length.shape)

    # inputs, outputs = sess.run(fetches, feed_dict=feed_dict)

    channel = grpc.insecure_channel('0.0.0.0:8500')
    stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
    request = predict_pb2.PredictRequest()
    request.model_spec.name = 'speech2text'
    request.model_spec.signature_name = 'predict_output'
    request.inputs['audio'].CopyFrom(
        tf.contrib.util.make_tensor_proto(audio, shape=list(audio.shape)))
    request.inputs['audio_length'].CopyFrom(
        tf.contrib.util.make_tensor_proto(audio_length,
                                          shape=list(audio_length.shape)))
    request.inputs['x_id'].CopyFrom(
        tf.contrib.util.make_tensor_proto(x_id, shape=list(x_id.shape)))

    result_future = stub.Predict.future(request, 5.0)  # 5 seconds
    exception = result_future.exception()
    if exception:
        print(exception)
    else:
        print('Result returned from rpc')

    inputs = model.get_data_layer().input_tensors
    indices_decoded_sequence = tensor_util.MakeNdarray(
        result_future.result().outputs['indices_decoded_sequence'])
    values_decoded_sequence = tensor_util.MakeNdarray(
        result_future.result().outputs['values_decoded_sequence'])
    dense_shape_decoded_sequence = tensor_util.MakeNdarray(
        result_future.result().outputs['dense_shape_decoded_sequence'])

    outputs = tf.SparseTensorValue(indices=indices_decoded_sequence,
                                   values=values_decoded_sequence,
                                   dense_shape=dense_shape_decoded_sequence)

    outputs = [outputs]

    return model.infer(inputs, outputs)
Ejemplo n.º 3
0
def main(req: func.HttpRequest, context: func.Context) -> func.HttpResponse:
    _NAME = 'image'

    event_id = context.invocation_id
    logging.info(
        f"Python humanpose function start process.\nID:{event_id}\nBack-end server host: {_HOST}:{_PORT}"
    )

    try:
        method = req.method
        url = req.url
        files = req.files[_NAME]

        if method != 'POST':
            logging.warning(
                f'ID:{event_id},the method was {files.content_type}.refused.')
            return func.HttpResponse(f'only accept POST method',
                                     status_code=400)

        if files:
            if files.content_type != 'image/jpeg':
                logging.warning(
                    f'ID:{event_id},the file type was {files.content_type}.refused.'
                )
                return func.HttpResponse(f'only accept jpeg images',
                                         status_code=400)

            # pre processing
            # get image_bin form request
            img_bin = files.read()
            img = to_pil_image(img_bin)
            img_cv_copied = cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR)
            # w,h = 256,256
            img = resize(img)
            img_np = np.array(img)
            img_np = img_np.astype(np.float32)
            # hwc > bchw [1,3,256,256]
            img_np = transpose(img_np)
            # print(img_np.shape)

            request = predict_pb2.PredictRequest()
            request.model_spec.name = 'object-detection'
            request.inputs["image"].CopyFrom(
                make_tensor_proto(img_np, shape=img_np.shape))
            # send to infer model by grpc
            start = time()
            channel = grpc.insecure_channel("{}:{}".format(_HOST, _PORT))
            stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
            result = stub.Predict(request, timeout=10.0)
            res = make_ndarray(result.outputs["detection_out"])

            logging.warning(f'OutputType:{type(result)}')

            # Change a shape of a numpy.ndarray with results ([1, 1, N, 7]) to get another one ([N, 7]),
            # where N is the number of detected bounding boxes
            detections = res.reshape(-1, 7)

            img_copied = np.copy(img_cv_copied)
            image_h, image_w = img_copied.shape[:2]
            for i, detection in enumerate(detections):

                _, class_id, confidence, xmin, ymin, xmax, ymax = detection

                if confidence > 0.5:

                    xmin = int(xmin * image_w)
                    ymin = int(ymin * image_h)
                    xmax = int(xmax * image_w)
                    ymax = int(ymax * image_h)

                    #logging.info(f'Found: label = {label}, confidence = {confidence:.2f}, ' f'coords = ({xmin}, {ymin}), ({xmax}, {ymax})')

                    # Draw a bounding box on a output image
                    cv2.rectangle(img_copied, (xmin, ymin), (xmax, ymax),
                                  (0, 255, 0), 2)

            timecost = time() - start
            logging.info(f"Inference complete,Takes{timecost}")

            # post processing
            response_image = img_copied

            imgbytes = cv2.imencode('.jpg', response_image)[1].tobytes()
            MIMETYPE = 'image/jpeg'

            return func.HttpResponse(body=imgbytes,
                                     status_code=200,
                                     mimetype=MIMETYPE,
                                     charset='utf-8')

        else:
            logging.warning(f'ID:{event_id},Failed to get image,down.')
            return func.HttpResponse(f'no image files', status_code=400)

    except grpc.RpcError as e:
        status_code = e.code()
        if "DEADLINE_EXCEEDED" in status_code.name:
            logging.error(e)
            return func.HttpResponse(f'the grpc request timeout',
                                     status_code=408)
        else:
            logging.error(f"grpcError:{e}")
            return func.HttpResponse(f'Failed to get grpcResponse',
                                     status_code=500)

    except Exception as e:
        logging.error(f"Error:{e}\n\
                        url:{url}\n\
                        method:{method}\n")
        return func.HttpResponse(f'Service Error.check the log.',
                                 status_code=500)
def main():

    credentials = grpc.ssl_channel_credentials(
        root_certificates=ROOT_CERT.encode())
    channel = grpc.secure_channel(
        '{}:{}'.format(MODEL_SERVER_HOST, MODEL_SERVER_PORT), credentials)
    stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)

    # get the sentences of input
    sentences = globals.request.form.to_dict()

    # convert single sentence to feature
    tokenizer = tokenization.FullTokenizer(vocab_file=VOCAB_FILE_PATH,
                                           do_lower_case=True)

    # Construct the request to tensorflow serving
    request = predict_pb2.PredictRequest()
    request.model_spec.name = MODEL_NAME
    request.model_spec.signature_name = 'serving_default'

    results = {}
    for key, sentence in sentences.items():
        example = run_classifier.InputExample(
            guid="test-0",
            text_a=tokenization.convert_to_unicode(sentence),
            text_b=None,
            label=LABELS_LIST[0])
        feature = run_classifier.convert_single_example(
            0, example, LABELS_LIST, MAX_SEQ_LENGTH, tokenizer)

        # get the input of model
        input_ids = np.reshape([feature.input_ids], (1, MAX_SEQ_LENGTH))
        input_mask = np.reshape([feature.input_mask], (1, MAX_SEQ_LENGTH))
        segment_ids = np.reshape([feature.segment_ids], (MAX_SEQ_LENGTH))
        label_ids = [feature.label_id]

        # package the input into request, Note the format of the input(follow the model)
        request.inputs['input_ids'].CopyFrom(
            tf.contrib.util.make_tensor_proto(input_ids,
                                              shape=[1, MAX_SEQ_LENGTH],
                                              dtype=tf.int32))
        request.inputs['input_mask'].CopyFrom(
            tf.contrib.util.make_tensor_proto(input_mask,
                                              shape=[1, MAX_SEQ_LENGTH],
                                              dtype=tf.int32))
        request.inputs['label_ids'].CopyFrom(
            tf.contrib.util.make_tensor_proto(label_ids,
                                              shape=[1],
                                              dtype=tf.int32))
        request.inputs['segment_ids'].CopyFrom(
            tf.contrib.util.make_tensor_proto(segment_ids,
                                              shape=[1, MAX_SEQ_LENGTH],
                                              dtype=tf.int32))

        # do predict
        result = stub.Predict(
            request, 100, metadata=metadata_transformer())  # 100 secs timeout

        # parse the result
        probabilities_tensor_proto = result.outputs["probabilities"]
        probabilities = list(probabilities_tensor_proto.float_val)
        probabilities_np = np.array(probabilities)
        top3_index_np = probabilities_np.argsort()[-3:][::-1]
        probabilities_top3 = probabilities_np[top3_index_np]
        label_top3 = np.array(LABELS_LIST)[top3_index_np]
        # shape = tf.TensorShape(probabilities_tensor_proto.tensor_shape)
        # probabilities = np.array(probabilities_tensor_proto.float_val).reshape(
        #     shape.as_list())
        result_list = []
        for index in range(3):
            result_list.append({
                "label": label_top3[index],
                "score": str(probabilities_top3[index])
            })
        results[key] = result_list
    return Response(json.dumps(results), mimetype='application/json')
Ejemplo n.º 5
0
def do_inference(hostport, work_dir, concurrency, num_tests):
    #test_data_set = DataRead('../data/seq.train.data.20191127.part')
    #test_data_set = DataRead('../data/seq.test.data.20191128.part')
    test_data_set = DataReadRaw( \
                    file_input = '../data/cur.day.testdata', \
                    seq_len    = seq_len, \
                    feat_len   = user_len + seq_len*item_len)
    test_data_set = test_data_set.data_read.values
    print(getNow(), 'test_set.shape', test_data_set.shape, type(test_data_set))
    channel = grpc.insecure_channel(hostport)
    stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
    pred_res = []
    real_res = []
    pred_label = []
    pred_logits = []
    pred_logits_ = []
    atime = []
    atime1 = []

    request = predict_pb2.PredictRequest()
    request.model_spec.name = 'seq_model'  ## model name
    request.model_spec.signature_name = 'predictByRaw'  ## signature_def_map ##

    for index in range(int(num_tests / concurrency)):
        cur_data = test_data_set[index * concurrency:(index + 1) * concurrency]
        print('cur_data.shape=', cur_data.shape)
        cur_label = cur_data[:, 0:seq_len]
        cur_feature = cur_data[:, seq_len:].reshape(
            (-1, user_len + seq_len * item_len))
        #print ('index=', index, 'cur_feature.shape=', cur_feature.shape, 'cur_feature=', cur_feature)
        request.inputs['input'].CopyFrom(
            tf.contrib.util.make_tensor_proto(values=cur_feature,
                                              dtype=tf.string,
                                              shape=cur_feature.shape))
        btime = time.time()
        response = stub.Predict(request, 5.0)
        etime = time.time()
        atime.append(etime - btime)

        try:
            results = tf.contrib.util.make_ndarray(
                response.outputs['predict_prob'])
            results1 = tf.contrib.util.make_ndarray(
                response.outputs['predict_label'])
            results2 = tf.contrib.util.make_ndarray(
                response.outputs['predict_logits'])
            #results3 = tf.contrib.util.make_ndarray(response.outputs['predict_logits_'])
            results3 = tf.contrib.util.make_ndarray(
                response.outputs['predict_logits_no'])
        except:
            continue
        atime1.append(etime - btime)

        #print ('index=', index, 'result=', results)
        pred_res.append(list(results))
        real_res.append(list(cur_label))
        pred_label.append(list(results1))
        pred_logits.append(list(results2))
        pred_logits_.append(list(results3))

    pred_res = np.array(pred_res).reshape((-1, 14))
    real_res = np.array(real_res).reshape((-1, 14))
    pred_label = np.array(pred_label).reshape((-1, 14))
    pred_logits = np.array(pred_logits).reshape((-1, 14))
    pred_logits_ = np.array(pred_logits_).reshape((-1, 14))

    count, e_count, p_count, n_count = 0, 0, 0, 0
    e_p_count, e_n_count = 0, 0
    index_zero_p_count = 0
    index_zero_p_pred = 0
    index_zero_n_count = 0
    index_zero_n_pred = 0

    for r in range(pred_res.shape[0]):
        for z in range(14):
            count += 1
            if real_res[r][z] == pred_label[r][z]:
                e_count += 1
            if real_res[r][z] == 1:
                p_count += 1
                if real_res[r][z] == pred_label[r][z]:
                    e_p_count += 1
            if real_res[r][z] == 0:
                n_count += 1
                if real_res[r][z] == pred_label[r][z]:
                    e_n_count += 1

        if real_res[r][0] == 1:
            index_zero_p_count += 1
            if pred_label[r][0] == 1:
                index_zero_p_pred += 1
        else:
            index_zero_n_count += 1
            if pred_label[r][0] == 0:
                index_zero_n_pred += 1

        print('real_label', '\t'.join([str(int(x)) for x in real_res[r]]))
        print('pred_label', '\t'.join([str(int(x)) for x in pred_label[r]]))
        print('pred_logits',
              '\t'.join([str(round(x, 3)) for x in pred_logits[r]]))
        print('pred_prob', '\t'.join([str(round(x, 3)) for x in pred_res[r]]))
        print('pred_logits_',
              '\t'.join([str(round(x, 3)) for x in pred_logits_[r]]))
        print(
            'pred_logits_[prob]', '\t'.join([
                str(round(1 / (1 + math.exp(-x)), 4)) for x in pred_logits_[r]
            ]))
        print('===' * 40)
    print('equal_count/count=', round(e_count / count * 1.0, 4), e_count,
          count)
    print('positive equal_count/count=', round(e_p_count / p_count * 1.0, 4),
          e_p_count, p_count)
    print('negative equal_count/count=', round(e_n_count / n_count * 1.0, 4),
          e_n_count, n_count)
    print('index-0, pred-1/label-1',
          round(index_zero_p_pred / index_zero_p_count * 1.0, 4),
          index_zero_p_pred, index_zero_p_count)
    print('index-0, pred-0/label-0',
          round(index_zero_n_pred / index_zero_n_count * 1.0, 4),
          index_zero_n_pred, index_zero_n_count)
    print('ava-time[all]:', np.mean(atime))
    print('ava-time[success]:', np.mean(atime1))
def img_to_emb_feature(images, channel):
    # print(img.shape)
    img = []
    for i in range(len(images)):
        img.append(prewhiten(images[i]))

    img = np.stack(img)
    stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
    request = predict_pb2.PredictRequest()

    request.model_spec.name = 'facenet'
    request.model_spec.signature_name = 'calculate_embeddings'
    # print('request ', request)
    request.inputs['images'].CopyFrom(
        tf.contrib.util.make_tensor_proto(img, dtype=tf.float32))
    request.inputs['phase'].CopyFrom(tf.contrib.util.make_tensor_proto(False))
    # print('========================')
    result_tmp = stub.Predict(request, 15.0)  # 10 secs timeout
    # print(result_tmp)

    result = result_tmp.outputs['embeddings'].float_val
    # request.model_spec.name = 'facenet'
    # request.model_spec.signature_name = tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
    # request.inputs['input'].CopyFrom(
    #     tf.contrib.util.make_tensor_proto(img, shape=[1, img.shape[1], img.shape[2], img.shape[3]]))
    # result = stub.Predict(request, 10.0)  # 10 secs timeout
    # print("result: ", result)

    # boxes = np.array(result.outputs['embeddings'].float_val).reshape(
    #     result.outputs['detection_boxes'].tensor_shape.dim[0].size,
    #     result.outputs['detection_boxes'].tensor_shape.dim[1].size,
    #     result.outputs['detection_boxes'].tensor_shape.dim[2].size
    # )
    #
    # scores = np.array(result.outputs['detection_scores'].float_val)
    # detection_classes = np.array(result.outputs['detection_classes'].float_val)
    #
    # # num_detections = np.array(result.outputs['num_detections'].float_val)
    # boxes = np.squeeze(boxes)
    # scores = np.squeeze(scores)
    # height, width = img.shape[:2]
    #
    # pts_box = []
    # pts = None
    # door_img = None
    # scores_max = 0
    # detection_class = None
    # for i in range(boxes.shape[0]):
    #     if (scores[i] > 0.5) and (scores[i] > scores_max):
    #         scores_max = scores[i]
    #         ymin, xmin, ymax, xmax = boxes[i]
    #         ymin = int(ymin * height)
    #         ymax = int(ymax * height)
    #         xmin = int(xmin * width)
    #         xmax = int(xmax * width)
    #
    #         pts = np.array([xmin, ymin, xmax, ymax])
    #
    #         detection_class = detection_classes[i]
    #
    # # channel.close()
    return result
def transform_view():
    from flask import request
    csv = request.files['file']
    csv.save("iBeacon_RSSI_Unlabeled.csv")
    BLE_RSSI_UL = pd.read_csv("iBeacon_RSSI_Unlabeled.csv",
                              encoding='utf8')  # Unlabeled dataset
    COLUMNS = list(BLE_RSSI_UL.columns)
    FEATURES = COLUMNS[2:]
    LABEL = [COLUMNS[0]]

    # Data Preprocesssing
    df_predict = BLE_RSSI_UL  # Unlabeled dataset
    df_predict = df_predict.drop(['date', 'location'], axis=1)
    df_predict_original = df_predict.copy()
    # df_predict[FEATURES] = (df_predict[FEATURES] - df_predict[FEATURES].mean()) / df_predict[FEATURES].std()
    df_predict[FEATURES] = (df_predict[FEATURES]) / (-200)

    max_rows = 2800
    dataframes = []
    dataframes_original = []
    while len(df_predict) > max_rows:
        top_original = df_predict_original[:max_rows]
        top = df_predict[:max_rows]
        dataframes.append(top)
        dataframes_original.append(top_original)
        df_predict = df_predict[max_rows:]
        df_predict_original = df_predict_original[max_rows:]
    else:
        dataframes.append(df_predict)
        dataframes_original.append(df_predict_original)

#    server = str(TF_MODEL_SERVER_HOST)+":"+str(TF_MODEL_SERVER_PORT)
    final_df = pd.DataFrame(columns=[
        'b3001', 'b3002', 'b3003', 'b3004', 'b3005', 'b3006', 'b3007', 'b3008',
        'b3009', 'b3010', 'b3011', 'b3012', 'b3013', 'Location', 'Probability'
    ])
    for i, j in zip(dataframes, dataframes_original):
        j.index = pd.RangeIndex(len(i.index))
        j.index = range(len(i.index))
        examples = []
        for index, row in i.iterrows():
            example = tf.train.Example()
            for col, value in row.iteritems():
                example.features.feature[col].float_list.value.append(value)
            examples.append(example)


#      channel = grpc.insecure_channel(server)
        stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
        request = classification_pb2.ClassificationRequest()
        request.model_spec.name = 'Model_Blerssi'
        request.model_spec.signature_name = 'serving_default'
        request.input.example_list.examples.extend(examples)
        response = stub.Classify(request, 10.0)

        outputs = j.copy()
        for index, row in outputs.iterrows():
            max_class = max(response.result.classifications[index].classes,
                            key=lambda c: c.score)
            outputs.loc[index, 'Location'] = get_key(int(max_class.label))
            outputs.loc[index, 'Probability'] = max_class.score
        print(outputs)

        final_df = final_df.append(outputs, ignore_index=True)

    os.remove("iBeacon_RSSI_Unlabeled.csv")
    return render_template('view.html',
                           tables=[final_df.to_html()],
                           titles=['na'])
Ejemplo n.º 8
0
def create_channel_for_batching_server_auto():
    channel = grpc.insecure_channel('localhost:9005')
    stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
    return stub
Ejemplo n.º 9
0
def create_channel_for_model_ver_pol_server():
    channel = grpc.insecure_channel('localhost:9006')
    stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
    return stub
Ejemplo n.º 10
0
def predict():
    # MODEL PARAMS

    logging.basicConfig(
        level=logging.DEBUG,
        format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
    )
    logger = logging.getLogger(__name__)
    logger.info("Sending request to tfserving model")

    # Create gRPC client and request
    t = time.time()
    channel = grpc.insecure_channel("bert-toxic:8500")
    logger.debug("Establishing insecure channel took: {}".format(time.time() -
                                                                 t))

    t = time.time()
    stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
    logger.debug("Creating stub took: {}".format(time.time() - t))

    # Parse Description
    tokenizer = tokenization.FullTokenizer(vocab_file="asset/vocab.txt",
                                           do_lower_case=True)
    processor = MultiLabelTextProcessor()
    label_list = [0, 0, 0, 0, 0, 0]

    t = time.time()
    content = request.get_json()
    logger.debug("Parsing Incoming JSON took: {}".format(time.time() - t))

    max_seq_length = content["max_seq_length"]

    request_id = str(random.randint(1, 9223372036854775807))

    t = time.time()

    input_strings = content["description"]
    model_inputs = []

    for string in input_strings:
        inputExample = processor.serving_create_example([request_id, string],
                                                        "test")
        logger.debug("Create Input Example Took: {}".format(time.time() - t))

        t = time.time()
        feature = convert_single_example(0, inputExample, label_list,
                                         max_seq_length, tokenizer)
        logger.debug("Feature Creation Took: {}".format(time.time() - t))

        t = time.time()
        features = collections.OrderedDict()
        features["input_ids"] = create_int_feature(feature.input_ids)
        features["input_mask"] = create_int_feature(feature.input_mask)
        features["segment_ids"] = create_int_feature(feature.segment_ids)
        features["is_real_example"] = create_int_feature(
            [int(feature.is_real_example)])
        if isinstance(feature.label_id, list):
            label_ids = feature.label_id
        else:
            label_ids = [feature.label_id]
        features["label_ids"] = create_int_feature(label_ids)

        tf_example = tf.train.Example(features=tf.train.Features(
            feature=features))

        model_input = tf_example.SerializeToString()
        model_inputs.append(model_input)

    logger.debug("Serialize Features Took: {}".format(time.time() - t))

    t = time.time()

    # Send request
    # See prediction_service.proto for gRPC request/response details.
    model_request = predict_pb2.PredictRequest()
    model_request.model_spec.name = "bert"
    model_request.model_spec.signature_name = "serving_default"
    dims = [tensor_shape_pb2.TensorShapeProto.Dim(size=len(model_inputs))]
    tensor_shape_proto = tensor_shape_pb2.TensorShapeProto(dim=dims)
    tensor_proto = tensor_pb2.TensorProto(
        dtype=types_pb2.DT_STRING,
        tensor_shape=tensor_shape_proto,
        string_val=[tweet for tweet in model_inputs],
    )
    logger.debug("Format Tensors Took: {}".format(time.time() - t))

    t = time.time()
    model_request.inputs["examples"].CopyFrom(tensor_proto)
    logger.debug("Create Model Request Inputs: {}".format(time.time() - t))

    t = time.time()
    result = stub.Predict(model_request, 10.0)  # 10 secs timeout
    logger.debug("stub predict took: {}".format(time.time() - t))

    t = time.time()
    predict_response_dict = predict_response_to_dict(result)
    logger.debug("predict_response_dict took: {}".format(time.time() - t))

    t = time.time()
    keys = [k for k in predict_response_dict]
    logger.debug("Receive and Iterate Took: {}".format(time.time() - t))

    t = time.time()

    def format_dict(response):
        f_dict = {
            "toxic": round(response[0], 4),
            "severe_toxic": round(response[1], 4),
            "obscene": round(response[2], 4),
            "threat": round(response[3], 4),
            "insult": round(response[4], 4),
            "identity_hate": round(response[5], 4),
        }
        return f_dict

    response_list = [
        format_dict(response)
        for response in predict_response_dict["probabilities"]
    ]

    label_dict = {"results": response_list}
    logger.debug("Create Label Dict Took: {}".format(time.time() - t))

    return jsonify(label_dict)
Ejemplo n.º 11
0
def create_channel_for_port_mapping_server():
    channel = grpc.insecure_channel('localhost:9002')
    stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
    return stub
Ejemplo n.º 12
0
def create_grpc_stub(host, port=8500):
    hostport = '{}:{}'.format(host, port)
    channel = grpc.insecure_channel(hostport)
    stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)

    return stub
Ejemplo n.º 13
0
def main(_):
    s = open('mscoco_complete_label_map.pbtxt', 'r').read()
    mymap = labelmap.StringIntLabelMap()
    global _label_map
    _label_map = text_format.Parse(s, mymap)

    channel = grpc.insecure_channel(FLAGS.server)
    stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)

    durationSum = 0.0
    run_num = 1
    batch_size = 1

    image, org = decode_image_opencv(FLAGS.image)
    _draw = org.copy()

    image = image.astype(np.uint8)
    inputs = image
    for i in range(batch_size - 1):
        inputs = np.append(inputs, image, axis=0)

    model_name = "ssd_inception_v2_coco"
    # model_name = "ssd_mobilenet"
    # model_name = "ssd_resnet50"
    # model_name = "faster_rcnn_resnet50"

    request = predict_pb2.PredictRequest()
    request.model_spec.name = model_name
    request.model_spec.signature_name = 'serving_default'
    request.inputs['inputs'].CopyFrom(
        tf.contrib.util.make_tensor_proto(inputs, shape=inputs.shape))

    for i in range(run_num):

        start = time.time()
        result = stub.Predict(request, 10.0)
        end = time.time()
        duration = end - start
        durationSum += duration
        print("duration = %f" % duration)
        # print(result)

        boxes = result.outputs['detection_boxes']
        scores = result.outputs['detection_scores']
        labels = result.outputs['detection_classes']
        num_detections = result.outputs['num_detections']

        boxes = tf.make_ndarray(boxes)
        scores = tf.make_ndarray(scores)
        labels = tf.make_ndarray(labels)
        num_detections = tf.make_ndarray(num_detections)

        print("boxes output", (boxes).shape)
        print("scores output", (scores).shape)
        print("labels output", (labels).shape)
        print('num_detections', num_detections[0])

        # visualize detections hints from
        # # https://github.com/tensorflow/models/blob/master/research/object_detection/object_detection_tutorial.ipynb

        for box, score, label in zip(boxes[0], scores[0], labels[0]):
            # scores are sorted so we can break
            if score < 0.5:
                break
            #dim = image.shape[0:2]
            dim = _draw.shape
            #print("Label-raw",labels_to_names[label-1]," at ",box," Score ",score)
            box = box_normal_to_pixel(box, dim)
            b = box.astype(int)
            class_label = get_label(int(label))
            print("Label", class_label, " at ", b, " Score ", score)
            # draw the image and write out
            cv2.rectangle(_draw, (b[0], b[1]), (b[2], b[3]), (0, 0, 255), 1)
            cv2.putText(_draw,class_label + "-"+str(round(score,2)), (b[0]+2,b[1]+8),\
               cv2.FONT_HERSHEY_SIMPLEX, .45, (0,0,255))

        cv2.imshow("test", _draw)
        cv2.waitKey(0)

    print("average duration for batch size of %d = %f" %
          (batch_size, durationSum / run_num))
Ejemplo n.º 14
0
def benchmark(batch_size=1, num_iteration=20, warm_up_iteration=10):
    channel = grpc.insecure_channel(SERVER_URL)
    stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
    request = predict_pb2.PredictRequest()
    request.model_spec.name = 'wide-deep-large-ds-fp32-training'
    request.model_spec.signature_name = 'predict'

    i = 0
    total_time = 0
    res_dataset = generate_input_fn(DATA_FILE, batch_size=batch_size, num_epochs=1)
    #res_dataset = input_fn(DATA_FILE, 1, False, batch_size)

    CONTINUOUS_COLUMNS = ["I" + str(i) for i in range(1, 14)]  # 1-13 inclusive
    CATEGORICAL_COLUMNS = ["C" + str(i) for i in range(1, 27)]  # 1-26 inclusive
    LABEL_COLUMN = ["clicked"]
    TRAIN_DATA_COLUMNS = LABEL_COLUMN + CONTINUOUS_COLUMNS + CATEGORICAL_COLUMNS
    FEATURE_COLUMNS = CONTINUOUS_COLUMNS + CATEGORICAL_COLUMNS

    iterator = tf.compat.v1.data.make_one_shot_iterator(res_dataset)
    next_element = iterator.get_next()
    with tf.compat.v1.Session() as sess:
        sess.run(tf.compat.v1.global_variables_initializer())
        try:
            for _ in range(num_iteration):
                correct_predictions = 0
                i += 1
                input_data = sess.run(next_element)

                for c in FEATURE_COLUMNS:
                    request.inputs[c].CopyFrom(
                        tf.make_tensor_proto(input_data[0][c], shape=[1 * batch_size]))

                # request.inputs['numeric'].CopyFrom(
                #     tf.make_tensor_proto(input_data[0],
                #                          shape=[1 * batch_size, 13]))
                # request.inputs['categorical'].CopyFrom(
                #     tf.make_tensor_proto(input_data[1],
                #                          shape=[26 * batch_size, 2]))


                start_time = time.time()
                result = stub.Predict(request)
                # print("-" * 20 + " result " + "-" * 20)
                # print(result.outputs['probabilities'])
                # print("-" * 20 + " result " + "-" * 20)
                time_consume = time.time() - start_time
                if i > warm_up_iteration:
                    total_time += time_consume
                for j in range(batch_size):
                    if input_data[1][j] == np.argmax(
                            result.outputs['probabilities'].float_val[
                            j * 2:(j * 2) + 2]):
                        correct_predictions += 1
                print('Iteration %d: %.3f sec (accuracy: %.2f%%)' % (
                i, time_consume, 100.0 * correct_predictions / batch_size))

        except tf.errors.OutOfRangeError:
            pass

    time_average = total_time / (num_iteration - warm_up_iteration)
    print('Average time: %.3f sec' % (time_average))
    print('Batch size = %d' % batch_size)
    if batch_size == 1:
        print('Latency: %.3f ms' % (time_average * 1000))
    print('Throughput: %.3f examples/sec' % (batch_size / time_average))
Ejemplo n.º 15
0
 def __init__(self):
     channel = grpc.insecure_channel("localhost:8500")
     self.stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
     pass
Ejemplo n.º 16
0
def create_channel_for_update_flow_specific():
    channel = grpc.insecure_channel('localhost:9008')
    stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
    return stub
Ejemplo n.º 17
0
def _create_stub(server):
    channel = grpc.insecure_channel(server)
    return prediction_service_pb2_grpc.PredictionServiceStub(channel)
Ejemplo n.º 18
0
 def __init__(self, ip_port):
     channel = grpc.insecure_channel(ip_port)
     self.stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
Ejemplo n.º 19
0
    async def __connection__(self, task_idx, loop_num):
        request_signatures = self.request_signatures[task_idx]
        response_list = []

        # create channel
        if self.certificate == None:
            async with grpc.aio.insecure_channel(self.url) as channel:
                stub = prediction_service_pb2_grpc.PredictionServiceStub(
                    channel)
                if loop_num != 0:
                    format_string = 'query: {} channel, task {}, batch {}, loop_idx {}, latency(ms) {:.1f}, tps: {:.1f}'
                    for loop_idx in range(loop_num):
                        start_time = time.time()
                        response = await stub.Predict(request_signatures)
                        stop_time = time.time()
                        latency = stop_time - start_time
                        tps = self.batch_size / latency
                        response_list.append([response, latency])
                        print(
                            format_string.format('insecure', task_idx,
                                                 self.batch_size, loop_idx,
                                                 1000 * latency, tps))
                else:
                    format_string = 'query: {} channel, task {}, batch {}, latency(ms) {:.1f}, tps: {:.1f}'
                    while True:
                        start_time = time.time()
                        response = await stub.Predict(request_signatures)
                        stop_time = time.time()
                        latency = stop_time - start_time
                        tps = self.batch_size / latency
                        print(
                            format_string.format('insecure', task_idx,
                                                 self.batch_size,
                                                 1000 * latency, tps))
        else:
            creds = grpc.ssl_channel_credentials(
                root_certificates=open(self.certificate, 'rb').read())
            async with grpc.aio.secure_channel(self.url, creds) as channel:
                stub = prediction_service_pb2_grpc.PredictionServiceStub(
                    channel)
                if loop_num != 0:
                    format_string = 'query: {} channel, task {}, batch {}, loop_idx {}, latency(ms) {:.1f}, tps: {:.1f}'
                    for loop_idx in range(loop_num):
                        start_time = time.time()
                        response = await stub.Predict(request_signatures)
                        stop_time = time.time()
                        latency = stop_time - start_time
                        tps = self.batch_size / latency
                        response_list.append([response, latency])
                        print(
                            format_string.format('secure', task_idx,
                                                 self.batch_size, loop_idx,
                                                 1000 * latency, tps))
                else:
                    format_string = 'query: {} channel, task {}, batch {}, latency(ms) {:.1f}, tps: {:.1f}'
                    while True:
                        start_time = time.time()
                        response = await stub.Predict(request_signatures)
                        stop_time = time.time()
                        latency = stop_time - start_time
                        tps = self.batch_size / latency
                        try:
                            proto_msg_to_dict(response)
                        except Exception as e:
                            print('Error response:', e)
                        print(
                            format_string.format('secure', task_idx,
                                                 self.batch_size,
                                                 1000 * latency, tps))
        return response_list
Ejemplo n.º 20
0
def do_inference():
    """Tests PredictionService with concurrent requests.    
    Raises:
    IOError: An error occurred processing test data set.
    """
    if FLAGS.mode == 'dna':
        CONF = DNA_CONF()
    elif FLAGS.mode == 'rna':
        CONF = RNA_CONF()
    else:
        raise ValueError("Mode has to be either rna or dna.")
    make_dirs(FLAGS.output)
    FLAGS.segment_len = CONF.SEGMENT_LEN
    FLAGS.jump = CONF.JUMP
    FLAGS.start = CONF.START
    pbars = multi_pbars(["Request Submit:", "Request finished"])
    channel = grpc.insecure_channel(FLAGS.server)
    stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
    request = predict_pb2.PredictRequest()
    request.model_spec.name = 'chiron'
    #    request.model_spec.signature_name = 'predicted_sequences'
    request.model_spec.signature_name = tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
    collector = _Result_Collection(concurrency=FLAGS.concurrency)
    file_list = gen_file_list(FLAGS.input)
    batch_iterator = data_iterator(file_list)

    def submit_fn():
        for batch_x, seq_len, i, f, N, reads_n in batch_iterator:
            seq_len = np.reshape(seq_len, (seq_len.shape[0], 1))
            #            combined_input = np.concatenate((batch_x,seq_len),axis = 1).astype(np.float32)
            #            request.inputs['combined_inputs'].CopyFrom(
            #                tf.contrib.util.make_tensor_proto(combined_input, shape=[FLAGS.batch_size, CONF.SEGMENT_LEN+1]))
            request.inputs['x'].CopyFrom(
                tf.contrib.util.make_tensor_proto(
                    batch_x, shape=[FLAGS.batch_size, CONF.SEGMENT_LEN]))
            request.inputs['seq_len'].CopyFrom(
                tf.contrib.util.make_tensor_proto(seq_len,
                                                  shape=[FLAGS.batch_size]))
            collector.throttle()
            result_future = stub.Predict.future(request, 100.0)  # 5 seconds
            result_future.add_done_callback(
                _post_process(collector, i, f, N, reads_n))
            pbars.update(0, total=reads_n, progress=(i + 1) * FLAGS.batch_size)
            pbars.update_bar()

    submiter = threading.Thread(target=submit_fn, args=())
    submiter.setDaemon(True)
    submiter.start()
    pbars.update(1, total=len(file_list))
    pbars.update_bar()
    while not collector.all_done():
        if len(collector._done) > 0:
            qs_string = None
            f_p = collector._done[0]
            reads, probs = collector.pop_out(f_p)
            bpreads = [index2base(read) for read in reads]
            consensus, qs_consensus = simple_assembly_qs(bpreads, probs)
            qs_string = qs(consensus, qs_consensus)
            c_bpread = index2base(np.argmax(consensus, axis=0))
            file_pre = os.path.basename(os.path.splitext(f_p)[0])
            write_output(bpreads,
                         c_bpread, [np.NaN] * 4,
                         file_pre,
                         concise=FLAGS.concise,
                         suffix=FLAGS.extension,
                         q_score=qs_string,
                         global_setting=FLAGS)
            pbars.update(1, progress=pbars.progress[1] + 1)
            pbars.update_bar()
Ejemplo n.º 21
0
def make_grpc_api_call(context_answer,
                       batch_size,
                       server_host,
                       server_port,
                       vocab_file,
                       max_seq_length=512,
                       do_lower_case=False,
                       server_name="bert_QG",
                       timeout=60.0):

    input_examples = []
    for tup in context_answer:
        context, answer = tup
        input_example = create_example(context, answer)
        input_examples.append(input_example)

    tokenizer = tokenization.FullTokenizer(vocab_file=vocab_file,
                                           do_lower_case=do_lower_case)

    serialized_tf_examples = []
    for input_example in input_examples:
        serialized_tf_example = convert_example_to_feature(
            input_example, max_seq_length, tokenizer)
        serialized_tf_examples.append(serialized_tf_example)

    if batch_size > len(serialized_tf_examples):
        batch_size = len(serialized_tf_examples)

    num_batches = int(len(serialized_tf_examples) / batch_size)
    if num_batches % batch_size != 0:
        num_batches = num_batches + 1

    # all examples
    pred_questions = []

    for i in range(num_batches):
        # get the batch of serialized tf example
        inp_batch = serialized_tf_examples[i * batch_size:(i + 1) * batch_size]
        if len(inp_batch) == 0:
            continue

        # update the server host:port for making grpc api call
        # create the RPC stub
        server_grpc = server_host + ':' + server_port
        channel = grpc.insecure_channel(server_grpc)
        stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)

        # create the request object and set the name and signature_name params
        request = predict_pb2.PredictRequest()
        request.model_spec.name = server_name
        request.model_spec.signature_name = 'serving_default'

        # fill in the request object with the necessary data
        request.inputs['examples'].CopyFrom(
            tf.make_tensor_proto(inp_batch,
                                 dtype=tf.string,
                                 shape=[len(inp_batch)]))

        # sync requests with 30 sec wait before termination
        # result_future = stub.Predict(request, 30.)
        # For async requests
        result_future = stub.Predict.future(request, timeout)
        result_future = result_future.result()

        # Get shape of batch and categories in prediction
        NUM_PREDICTIONS = \
            result_future.outputs['pred_ids'].tensor_shape.dim[0].size
        NUM_TOKENS = \
            result_future.outputs['pred_ids'].tensor_shape.dim[1].size

        batch_preds = np.reshape(result_future.outputs['pred_ids'].int32_val,
                                 (int(NUM_PREDICTIONS), int(NUM_TOKENS)))

        for (i, pred_ids) in enumerate(batch_preds):

            pred_tokens = tokenizer.convert_ids_to_tokens(pred_ids)
            output = []
            for tok in pred_tokens:
                if tok == "[SEP]":
                    break
                output.append(tok)
            question_text = " ".join(output)

            pred_questions.append(question_text)

    return pred_questions
 def create_grpc_stub(self, grpc_channel):
   return prediction_service_pb2_grpc.PredictionServiceStub(grpc_channel)
Ejemplo n.º 23
0
def start(args):
    api = None
    try:
        ctx = Context(s3_path=args.context,
                      cache_dir=args.cache_dir,
                      workload_id=args.workload_id)
        api = ctx.apis_id_map[args.api]
        local_cache["api"] = api
        local_cache["ctx"] = ctx

        if api.get("request_handler") is not None:
            local_cache["request_handler"] = ctx.get_request_handler_impl(
                api["name"], args.project_dir)
        request_handler = local_cache.get("request_handler")

        if request_handler is not None and util.has_function(
                request_handler, "pre_inference"):
            logger.info(
                "using pre_inference request handler provided in {}".format(
                    api["request_handler"]))
        else:
            logger.info("pre_inference request handler not found")

        if request_handler is not None and util.has_function(
                request_handler, "post_inference"):
            logger.info(
                "using post_inference request handler provided in {}".format(
                    api["request_handler"]))
        else:
            logger.info("post_inference request handler not found")

    except Exception as e:
        logger.exception("failed to start api")
        sys.exit(1)

    try:
        validate_model_dir(args.model_dir)
    except Exception as e:
        logger.exception("failed to validate model")
        sys.exit(1)

    if api.get("tracker") is not None and api["tracker"].get(
            "model_type") == "classification":
        try:
            local_cache["class_set"] = api_utils.get_classes(ctx, api["name"])
        except Exception as e:
            logger.warn("an error occurred while attempting to load classes",
                        exc_info=True)

    channel = grpc.insecure_channel("localhost:" + str(args.tf_serve_port))
    local_cache["stub"] = prediction_service_pb2_grpc.PredictionServiceStub(
        channel)

    # wait a bit for tf serving to start before querying metadata
    limit = 60
    for i in range(limit):
        try:
            local_cache["model_metadata"] = run_get_model_metadata()
            break
        except Exception as e:
            if i > 6:
                logger.warn(
                    "unable to read model metadata - model is still loading. Retrying..."
                )
            if i == limit - 1:
                logger.exception("retry limit exceeded")
                sys.exit(1)

        time.sleep(5)

    signature_key, parsed_signature = extract_signature(
        local_cache["model_metadata"]["signatureDef"], api["tf_signature_key"])

    local_cache["signature_key"] = signature_key
    local_cache["parsed_signature"] = parsed_signature
    logger.info("model_signature: {}".format(local_cache["parsed_signature"]))
    serve(app, listen="*:{}".format(args.port))
Ejemplo n.º 24
0
def main(req: func.HttpRequest, context: func.Context) -> func.HttpResponse:
    _NAME = 'image'

    event_id = context.invocation_id
    logging.info(
        f"Python yolo-v3 function start process.\nID:{event_id}\nBack-end server host: {_HOST}:{_PORT}")

    try:
        method = req.method
        url = req.url
        files = req.files[_NAME]

        if method != 'POST':
            logging.warning(
                f'ID:{event_id},the method was {files.content_type}.refused.')
            return func.HttpResponse(f'only accept POST method', status_code=400)

        if files:
            if files.content_type != 'image/jpeg':
                logging.warning(
                    f'ID:{event_id},the file type was {files.content_type}.refused.')
                return func.HttpResponse(f'only accept jpeg images', status_code=400)

            img_bin = files.read()

            img = prep.to_pil_image(img_bin)
            img=prep.rotate_image(img)

            frame = cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR)

            # resize image to [416, 416]
            img = prep.resize(img, w=416, h=416)
            # img = prep.resize(img, w=608, h=608)
            img_np = np.array(img)
            img_np = img_np.astype(np.float32)
            # hwc > bchw [1,3,416, 416]
            img_np = prep.transpose(img_np)

            # semantic segmentation
            request = predict_pb2.PredictRequest()
            request.model_spec.name = 'yolo-v3'
            # request.model_spec.name = 'yolo-v4'
            request.inputs["input_1"].CopyFrom(make_tensor_proto(img_np))
            # request.inputs["image_input"].CopyFrom(make_tensor_proto(img_np))

            # send to infer model by grpc
            start = time()
            channel = grpc.insecure_channel("{}:{}".format(_HOST, _PORT))
            stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
            channel = grpc.insecure_channel("{}:{}".format(_HOST, _PORT))
            result = stub.Predict(request, timeout=10.0)

            # logging.warning(f'Output:{result}')
            logging.warning(f'OutputType:{type(result)}')

            # print(result.outputs)
            
            output1=make_ndarray(result.outputs['conv2d_58/Conv2D/YoloRegion'])
            output2=make_ndarray(result.outputs['conv2d_66/Conv2D/YoloRegion'])
            output3=make_ndarray(result.outputs['conv2d_74/Conv2D/YoloRegion'])
            outputs=[output1, output2, output3]
            #-----------------------------------------------------------
            # output image which objects are surrounded with rectangles.
            # Their labels are shown on them.
            frame=postp.object_detection(frame, img_np, outputs)
            #-----------------------------------------------------------


            timecost = time()-start
            logging.info(f"Inference complete,Takes{timecost}")

            imgbytes = cv2.imencode('.jpg', frame)[1].tobytes()
            # imgbytes = prep.encode(image)
            MIMETYPE = 'image/jpeg'

            return func.HttpResponse(body=imgbytes, status_code=200, mimetype=MIMETYPE, charset='utf-8')

        else:
            logging.warning(f'ID:{event_id},Failed to get image,down.')
            return func.HttpResponse(f'no image files', status_code=400)

    except grpc.RpcError as e:
        status_code = e.code()
        if "DEADLINE_EXCEEDED" in status_code.name:
            logging.error(e)
            return func.HttpResponse(f'the grpc request timeout', status_code=408)
        else:
            logging.error(f"grpcError:{e}")
            return func.HttpResponse(f'Failed to get grpcResponse', status_code=500)

    except Exception as e:
        logging.error(f"Error:{e}\n\
                        url:{url}\n\
                        method:{method}\n")
        return func.HttpResponse(f'Service Error.check the log.', status_code=500)
lemmatizer = WordNetLemmatizer()
stemmer = PorterStemmer()
stop = set(stopwords.words('english'))

# Map data into vocabulary

# RNN
text_path_rnn = os.path.join(FLAGS.checkpoint_dir_rnn, "..", "text_vocab")

#text_path = os.path.join(FLAGS.checkpoint_dir, "..", "text_vocab")
text_vocab_processor_rnn = tf.contrib.learn.preprocessing.VocabularyProcessor.restore(
    text_path_rnn)

channel_rnn = grpc.insecure_channel('0.0.0.0:8500')
stub_rnn = prediction_service_pb2_grpc.PredictionServiceStub(channel_rnn)

request_rnn = predict_pb2.PredictRequest()
request_rnn.model_spec.name = 'lstm'

# CNN
text_path_cnn = os.path.join(FLAGS.checkpoint_dir_cnn, "..", "vocab")

#text_path = os.path.join(FLAGS.checkpoint_dir, "..", "text_vocab")
text_vocab_processor_cnn = tf.contrib.learn.preprocessing.VocabularyProcessor.restore(
    text_path_cnn)

channel_cnn = grpc.insecure_channel('0.0.0.0:8501')
stub_cnn = prediction_service_pb2_grpc.PredictionServiceStub(channel_cnn)

request_cnn = predict_pb2.PredictRequest()
Ejemplo n.º 26
0
 def __init__(self):
     channel = grpc.insecure_channel(FLAGS.server)
     self.predict_stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
     self.model_name = 'segment'
     self.signature_name = 'predict_segment'
     self.timeout = 3 
Ejemplo n.º 27
0
import grpc
from tensorflow_serving.apis import prediction_service_pb2_grpc
from tensorflow_serving.apis import predict_pb2

import sys
sys.path.append(
    '/home/yitao/Documents/fun-project/tensorflow-related/video-captioning-serving/'
)

from modules_video_cap.video_cap_alexnet_d2 import CapAlexnet

alexnet = CapAlexnet()
alexnet.Setup()

ichannel = grpc.insecure_channel("localhost:8500")
istub = prediction_service_pb2_grpc.PredictionServiceStub(ichannel)

video_path = "/home/yitao/Documents/fun-project/tensorflow-related/video-captioning-serving/inputs/vid264.mp4"
reader = cv2.VideoCapture(video_path)

frame_id = 1
batch_size = 16

while (frame_id < 256):

    start = time.time()

    data_array = []

    for i in range(batch_size):
        _, image = reader.read()
Ejemplo n.º 28
0
def do_inference(server, batch_size, num_tests, img_path):

    channel = grpc.insecure_channel(server)
    stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
    request = predict_pb2.PredictRequest()
    request.model_spec.name = 'retinanet_od'
    request.model_spec.signature_name = 'serving_default'
    print("Image path", img_path)
    #post process the image
    #image,org= decode_image_opencv(img_path)
    image, org = decode_image_retinanet(img_path)
    #image,org = decode_image_tf_reader(img_path)
    global _draw
    _draw = org.copy()
    print("in image shape", image.shape)
    #('in image shape', (480, 640, 3))

    #input = np.expand_dims(image, axis=0)
    # do this once as tf. make_tensor proto is slow on first use
    #request.inputs['input_image'].CopyFrom(tf.contrib.util.make_tensor_proto \
    #  (input, shape=[1, 800, 1067, 3]))  # (input, shape=[1, 500, 567, 3]))

    global _start
    _start = time.time()
    global _response_awaiting
    _response_awaiting = True
    for i in range(num_tests):
        #print("Going to send the request")
        # batching
        # If using anything other than decode_opencv uncomment line below
        input = np.expand_dims(image, axis=0)
        #('Input shape=', (1, 480, 640, 3))
        #input = image # comment this if using anything other than retinanet
        print("Input shape=", input.shape)
        inputs = input
        for _ in range(batch_size - 1):
            inputs = np.append(inputs, input, axis=0)

        print("in tf shape", inputs.shape)
        request.inputs['input_image'].CopyFrom(
            tf.contrib.util.make_tensor_proto(inputs, shape=inputs.shape))

        # call back way - this is faster
        result_future = stub.Predict.future(request,
                                            60.25)  # Intial takes time
        result_future.add_done_callback(_callback)

        # request reponse way - this is slower
        # result = stub.Predict(request, 10.25)  #  seconds
        # parse_result(result)
        _response_awaiting = True

        #print("Send the request")
        # End for loop

    while (_response_awaiting):
        time.sleep(.000010)
    print("Response Received Exiting")

    ################################################################################
    #  Helper functions  for image preoricessing                                   #
    ################################################################################
    # Going to read the image via TF helper

    img_raw = tf.read_file(img_path)
    start = timer()
    img_tensor = tf.image.decode_jpeg(
        img_raw, channels=0,
        dct_method="INTEGER_FAST")  #not much effect here decode time 30ms
    print("img_tensor.shape=", img_tensor.shape)
Ejemplo n.º 29
0
### Using the gRPC API

from tensorflow_serving.apis.predict_pb2 import PredictRequest

request = PredictRequest()
request.model_spec.name = model_name
request.model_spec.signature_name = "serving_default"
input_name = model.input_names[0]
request.inputs[input_name].CopyFrom(tf.make_tensor_proto(X_new))

import grpc
from tensorflow_serving.apis import prediction_service_pb2_grpc

channel = grpc.insecure_channel('localhost:8500')
predict_service = prediction_service_pb2_grpc.PredictionServiceStub(channel)
response = predict_service.Predict(request, timeout=10.0)

response

Convert the response to a tensor:

output_name = model.output_names[0]
outputs_proto = response.outputs[output_name]
y_proba = tf.make_ndarray(outputs_proto)
y_proba.round(2)

Or to a NumPy array if your client does not include the TensorFlow library:

output_name = model.output_names[0]
outputs_proto = response.outputs[output_name]
Ejemplo n.º 30
0
def predict(filename):

    channel = grpc.insecure_channel("localhost:8500")
    stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)

    request = predict_pb2.PredictRequest()
    # model_name
    request.model_spec.name = "cropper_model"
    # signature name, default is 'serving_default'
    request.model_spec.signature_name = "serving_default"
    start = time.time()
    """
    =========================================
    ===== Crop and align id card image
    =========================================
    """
    filepath = app.config["IMAGE_UPLOADS"] + "/" + filename
    # preprocess image
    img, original_image, original_width, original_height = preprocess_image(
        filepath, Cropper.TARGET_SIZE)
    if img.ndim == 3:
        img = np.expand_dims(img, axis=0)
    # request to cropper model
    request.inputs["input_1"].CopyFrom(
        tf.make_tensor_proto(img, dtype=np.float32, shape=img.shape))
    try:
        result = stub.Predict(request, 10.0)
        result = result.outputs["tf_op_layer_concat_14"].float_val
        result = np.array(result).reshape((-1, 9))

    except Exception as e:
        print(e)

    cropper = Cropper()
    cropper.set_best_bboxes(result,
                            original_width=original_width,
                            original_height=original_height,
                            iou_threshold=0.5)

    # respone to client if image is invalid
    if not cropper.respone_client(threshold_idcard=0.8):
        return render_template('upload_image_again.html')

    cropper.set_image(original_image=original_image)

    # output of cropper part
    aligned_image = getattr(cropper, "image_output")
    cv2.imwrite('app/static/aligned_images/' + filename, aligned_image)
    aligned_image = cv2.cvtColor(aligned_image, cv2.COLOR_BGR2RGB)
    """
    ===========================================
    ==== Detect informations in aligned image
    ===========================================
    """
    # preprocess aligned image
    original_height, original_width, _ = aligned_image.shape
    img = cv2.resize(aligned_image, Detector.TARGET_SIZE)
    img = np.float32(img / 255.)
    # model_name
    request.model_spec.name = "detector_model"
    # signature name, default is 'serving_default'
    request.model_spec.signature_name = "serving_default"

    if img.ndim == 3:
        img = np.expand_dims(img, axis=0)
    # new request to detector model
    request.inputs["input_1"].CopyFrom(
        tf.make_tensor_proto(img, dtype=np.float32, shape=img.shape))

    try:
        result = stub.Predict(request, 10.0)
        result = result.outputs["tf_op_layer_concat_14"].float_val
        result = np.array(result).reshape((-1, 13))

    except Exception as e:
        print(e)

    detector = Detector()
    detector.set_best_bboxes(result,
                             original_width=original_width,
                             original_height=original_height,
                             iou_threshold=0.5)
    detector.set_info_images(original_image=aligned_image)
    # output of detector part
    info_images = getattr(detector, "info_images")
    """
    =====================================
    ==== Reader infors from infors image
    =====================================
    """
    keys = list(info_images.keys())
    keys.remove("thoi_han")
    keys.remove("chan_dung")
    infors = dict()

    # init default value of quoc_tich, dan_toc
    infors['quoc_tich'] = ""
    infors['dan_toc'] = ""

    if "quoc_tich" in keys:
        infors['quoc_tich'] = ["Việt Nam"]
        keys.remove("quoc_tich")

    if "sex" in keys:
        info_image = info_images["sex"]
        infors["sex"] = list()
        for i in range(len(info_image)):
            img = info_image[i]['image']
            s = reader.predict(img)
            if "Na" in s:
                infors["sex"].append("Nam")
            else:
                infors["sex"].append("Nữ")
        keys.remove("sex")

    if "dan_toc" in keys:
        info_image = info_images["dan_toc"]
        infors["dan_toc"] = list()
        for i in range(len(info_image)):
            img = info_image[i]['image']
            s = reader.predict(img)
            s = s.split(" ")[-1]
            infors["dan_toc"].append(s)

        keys.remove("dan_toc")

    for key in keys:
        infors[key] = list()
        info_image = info_images[key]
        for i in range(len(info_image)):
            img = info_image[i]['image']
            s = reader.predict(img)
            infors[key].append(s)
    que_quan_0 = infors['que_quan'][0]
    que_quan_1 = ''
    noi_thuong_tru_0 = infors['noi_thuong_tru'][0]
    noi_thuong_tru_1 = ''
    if len(infors['que_quan']) == 2:
        que_quan_1 = infors['que_quan'][1]
    if len(infors['noi_thuong_tru']) == 2:
        noi_thuong_tru_1 = infors['noi_thuong_tru'][1]

    print("total_time:{}".format(time.time() - start))
    return render_template('predict.html',
                           id=infors['id'][0].replace(" ", ""),
                           full_name=infors['full_name'][0],
                           date_of_birth=infors['date_of_birth'][0],
                           sex=infors['sex'][0],
                           quoc_tich=infors['quoc_tich'],
                           dan_toc=infors['dan_toc'],
                           que_quan_0=que_quan_0,
                           que_quan_1=que_quan_1,
                           noi_thuong_tru_0=noi_thuong_tru_0,
                           noi_thuong_tru_1=noi_thuong_tru_1,
                           filename=str(filename))