Exemplo n.º 1
0
 def __init__(self, config_file: str, urls: list):
     self.config_file = config_file
     self.urls = urls
     self.feed_var = 'generated_var_17144'
     self.fetch_var = 'translated_layer/scale_0.tmp_0'
     self.client = Client()
     self.connect_to_servers()
Exemplo n.º 2
0
class OCRService(WebService):
    def init_det_client(self, det_port, det_client_config):
        self.det_preprocess = Sequential([
            ResizeByFactor(32, 960),
            Div(255),
            Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
            Transpose((2, 0, 1))
        ])
        self.det_client = Client()
        self.det_client.load_client_config(det_client_config)
        self.det_client.connect(["127.0.0.1:{}".format(det_port)])
        self.ocr_reader = OCRReader()

    def preprocess(self, feed=[], fetch=[]):
        data = base64.b64decode(feed[0]["image"].encode('utf8'))
        data = np.fromstring(data, np.uint8)
        im = cv2.imdecode(data, cv2.IMREAD_COLOR)
        ori_h, ori_w, _ = im.shape
        det_img = self.det_preprocess(im)
        det_out = self.det_client.predict(feed={"image": det_img},
                                          fetch=["concat_1.tmp_0"],
                                          batch=False)
        _, new_h, new_w = det_img.shape
        filter_func = FilterBoxes(10, 10)
        post_func = DBPostProcess({
            "thresh": 0.3,
            "box_thresh": 0.5,
            "max_candidates": 1000,
            "unclip_ratio": 1.5,
            "min_size": 3
        })
        sorted_boxes = SortedBoxes()
        ratio_list = [float(new_h) / ori_h, float(new_w) / ori_w]
        dt_boxes_list = post_func(det_out["concat_1.tmp_0"], [ratio_list])
        dt_boxes = filter_func(dt_boxes_list[0], [ori_h, ori_w])
        dt_boxes = sorted_boxes(dt_boxes)
        get_rotate_crop_image = GetRotateCropImage()
        feed_list = []
        img_list = []
        max_wh_ratio = 0
        for i, dtbox in enumerate(dt_boxes):
            boximg = get_rotate_crop_image(im, dt_boxes[i])
            img_list.append(boximg)
            h, w = boximg.shape[0:2]
            wh_ratio = w * 1.0 / h
            max_wh_ratio = max(max_wh_ratio, wh_ratio)
        for img in img_list:
            norm_img = self.ocr_reader.resize_norm_img(img, max_wh_ratio)
            feed_list.append(norm_img[np.newaxis, :])
        feed_batch = {"image": np.concatenate(feed_list, axis=0)}
        fetch = ["ctc_greedy_decoder_0.tmp_0", "softmax_0.tmp_0"]
        return feed_batch, fetch, True

    def postprocess(self, feed={}, fetch=[], fetch_map=None):
        rec_res = self.ocr_reader.postprocess(fetch_map, with_score=True)
        res_lst = []
        for res in rec_res:
            res_lst.append(res[0])
        res = {"res": res_lst}
        return res
class MainbodyDetect():
    """
    pp-shitu mainbody detect.
    include preprocess, process, postprocess
    return detect results
    Attention: Postprocess include num limit and box filter; no nms 
    """
    def __init__(self):
        self.preprocess = DetectionSequential([
            DetectionFile2Image(),
            DetectionNormalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225],
                               True),
            DetectionResize((640, 640), False, interpolation=2),
            DetectionTranspose((2, 0, 1))
        ])

        self.client = Client()
        self.client.load_client_config(
            "../../models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_client/serving_client_conf.prototxt"
        )
        self.client.connect(['127.0.0.1:9293'])

        self.max_det_result = 5
        self.conf_threshold = 0.2

    def predict(self, imgpath):
        im, im_info = self.preprocess(imgpath)
        im_shape = np.array(im.shape[1:]).reshape(-1)
        scale_factor = np.array(list(im_info['scale_factor'])).reshape(-1)

        fetch_map = self.client.predict(
            feed={
                "image": im,
                "im_shape": im_shape,
                "scale_factor": scale_factor,
            },
            fetch=["save_infer_model/scale_0.tmp_1"],
            batch=False)
        return self.postprocess(fetch_map, imgpath)

    def postprocess(self, fetch_map, imgpath):
        #1. get top max_det_result
        det_results = fetch_map["save_infer_model/scale_0.tmp_1"]
        if len(det_results) > self.max_det_result:
            boxes_reserved = fetch_map[
                "save_infer_model/scale_0.tmp_1"][:self.max_det_result]
        else:
            boxes_reserved = det_results

        #2. do conf threshold
        boxes_list = []
        for i in range(boxes_reserved.shape[0]):
            if (boxes_reserved[i, 1]) > self.conf_threshold:
                boxes_list.append(boxes_reserved[i, :])

        #3. add origin image box
        origin_img = cv2.imread(imgpath)
        boxes_list.append(
            np.array([0, 1.0, 0, 0, origin_img.shape[1], origin_img.shape[0]]))
        return np.array(boxes_list)
Exemplo n.º 4
0
 def init_det_client(self, det_port, det_client_config):
     self.det_preprocess = Sequential([
         ResizeByFactor(32, 960), Div(255),
         Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), Transpose(
             (2, 0, 1))
     ])
     self.det_client = Client()
     self.det_client.load_client_config(det_client_config)
     self.det_client.connect(["127.0.0.1:{}".format(det_port)])
     self.ocr_reader = OCRReader()
Exemplo n.º 5
0
class TextSystemHelper(TextSystem):
    def __init__(self, args):
        self.text_detector = TextDetectorHelper(args)
        self.text_recognizer = TextRecognizerHelper(args)
        self.use_angle_cls = args.use_angle_cls
        if self.use_angle_cls:
            self.clas_client = Client()
            self.clas_client.load_client_config(
                os.path.join(args.cls_client_dir,
                             "serving_client_conf.prototxt"))
            self.clas_client.connect(["127.0.0.1:9294"])
            self.text_classifier = TextClassifierHelper(args)
        self.det_client = Client()
        self.det_client.load_client_config(
            os.path.join(args.det_client_dir, "serving_client_conf.prototxt"))
        self.det_client.connect(["127.0.0.1:9293"])
        self.fetch = [
            "save_infer_model/scale_0.tmp_0", "save_infer_model/scale_1.tmp_0"
        ]

    def preprocess(self, img):
        feed, fetch, self.tmp_args = self.text_detector.preprocess(img)
        fetch_map = self.det_client.predict(feed, fetch)
        outputs = [fetch_map[x] for x in fetch]
        dt_boxes = self.text_detector.postprocess(outputs, self.tmp_args)
        if dt_boxes is None:
            return None, None
        img_crop_list = []
        dt_boxes = sorted_boxes(dt_boxes)
        self.dt_boxes = dt_boxes
        for bno in range(len(dt_boxes)):
            tmp_box = copy.deepcopy(dt_boxes[bno])
            img_crop = self.get_rotate_crop_image(img, tmp_box)
            img_crop_list.append(img_crop)
        if self.use_angle_cls:
            feed, fetch, self.tmp_args = self.text_classifier.preprocess(
                img_crop_list)
            fetch_map = self.clas_client.predict(feed, fetch)
            outputs = [fetch_map[x] for x in self.text_classifier.fetch]
            for x in fetch_map.keys():
                if ".lod" in x:
                    self.tmp_args[x] = fetch_map[x]
            img_crop_list, _ = self.text_classifier.postprocess(
                outputs, self.tmp_args)
        feed, fetch, self.tmp_args = self.text_recognizer.preprocess(
            img_crop_list)
        return feed, self.fetch, self.tmp_args

    def postprocess(self, outputs, args):
        return self.text_recognizer.postprocess(outputs, args)
Exemplo n.º 6
0
 def __init__(self, args):
     self.text_detector = TextDetectorHelper(args)
     self.text_recognizer = TextRecognizerHelper(args)
     self.use_angle_cls = args.use_angle_cls
     if self.use_angle_cls:
         self.clas_client = Client()
         self.clas_client.load_client_config(
             "ocr_clas_client/serving_client_conf.prototxt")
         self.clas_client.connect(["127.0.0.1:9294"])
         self.text_classifier = TextClassifierHelper(args)
     self.det_client = Client()
     self.det_client.load_client_config(
         "det_db_client/serving_client_conf.prototxt")
     self.det_client.connect(["127.0.0.1:9293"])
     self.fetch = ["ctc_greedy_decoder_0.tmp_0", "softmax_0.tmp_0"]
Exemplo n.º 7
0
 def __init__(self, args):
     self.text_detector = TextDetectorHelper(args)
     self.text_recognizer = TextRecognizerHelper(args)
     self.use_angle_cls = args.use_angle_cls
     if self.use_angle_cls:
         self.clas_client = Client()
         self.clas_client.load_client_config(
             "cls_infer_client/serving_client_conf.prototxt")
         self.clas_client.connect(["127.0.0.1:9294"])
         self.text_classifier = TextClassifierHelper(args)
     self.det_client = Client()
     self.det_client.load_client_config(
         "det_infer_client/serving_client_conf.prototxt")
     self.det_client.connect(["127.0.0.1:9293"])
     self.fetch = ["save_infer_model/scale_0.tmp_0", "save_infer_model/scale_1.tmp_0"]
    def __init__(self):
        self.preprocess = DetectionSequential([
            DetectionFile2Image(),
            DetectionNormalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225],
                               True),
            DetectionResize((640, 640), False, interpolation=2),
            DetectionTranspose((2, 0, 1))
        ])

        self.client = Client()
        self.client.load_client_config(
            "../../models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_client/serving_client_conf.prototxt"
        )
        self.client.connect(['127.0.0.1:9293'])

        self.max_det_result = 5
        self.conf_threshold = 0.2
Exemplo n.º 9
0
def single_func(idx, resource):
    batch = 1
    buf_size = 100
    dataset = criteo.CriteoDataset()
    dataset.setup(1000001)
    test_filelists = [
        "./raw_data/part-%d" % x for x in range(len(os.listdir("./raw_data")))
    ]
    reader = dataset.infer_reader(test_filelists[len(test_filelists) - 40:],
                                  batch, buf_size)
    if args.request == "rpc":
        fetch = ["prob"]
        client = Client()
        client.load_client_config(args.model)
        client.connect([resource["endpoint"][idx % len(resource["endpoint"])]])

        start = time.time()
        for i in range(1000):
            if args.batch_size == 1:
                data = reader().next()
                feed_dict = {}
                for i in range(1, 27):
                    feed_dict["sparse_{}".format(i - 1)] = data[0][i]
                result = client.predict(feed=feed_dict, fetch=fetch)
            else:
                print("unsupport batch size {}".format(args.batch_size))

    elif args.request == "http":
        raise ("Not support http service.")
    end = time.time()
    return [[end - start]]
Exemplo n.º 10
0
def single_func(idx, resource):
    train_reader = paddle.batch(
        paddle.reader.shuffle(
            paddle.dataset.uci_housing.train(), buf_size=500),
        batch_size=1)
    total_number = sum(1 for _ in train_reader())
    latency_list = []

    if args.request == "rpc":
        client = Client()
        client.load_client_config(args.model)
        client.connect([args.endpoint])
        start = time.time()
        for data in train_reader():
            l_start = time.time()
            fetch_map = client.predict(feed={"x": data[0][0]}, fetch=["price"])
            l_end = time.time()
            latency_list.append(l_end * 1000 - l_start * 1000)
        end = time.time()
        return [[end - start], latency_list, [total_number]]
    elif args.request == "http":
        train_reader = paddle.batch(
            paddle.reader.shuffle(
                paddle.dataset.uci_housing.train(), buf_size=500),
            batch_size=1)
        start = time.time()
        for data in train_reader():
            l_start = time.time()
            r = requests.post(
                'http://{}/uci/prediction'.format(args.endpoint),
                data={"x": data[0]})
            l_end = time.time()
            latency_list.append(l_end * 1000 - l_start * 1000)
        end = time.time()
        return [[end - start], latency_list, [total_number]]
Exemplo n.º 11
0
def single_func(idx, resource):
    imdb_dataset = IMDBDataset()
    imdb_dataset.load_resource("./imdb.vocab")
    dataset = []
    with open("./test_data/part-0") as fin:
        for line in fin:
            dataset.append(line.strip())
    profile_flags = False
    latency_flags = False
    if os.getenv("FLAGS_profile_client"):
        profile_flags = True
    if os.getenv("FLAGS_serving_latency"):
        latency_flags = True
        latency_list = []
    start = time.time()
    if args.request == "rpc":
        client = Client()
        client.load_client_config(args.model)
        client.connect([args.endpoint])
        for i in range(1000):
            if args.batch_size >= 1:
                feed_batch = []
                feed = {"words": [], "words.lod": [0]}
                for bi in range(args.batch_size):
                    word_ids, label = imdb_dataset.get_words_and_label(
                        dataset[bi])
                    feed["words.lod"].append(feed["words.lod"][-1] +
                                             len(word_ids))
                    feed["words"].extend(word_ids)
                feed["words"] = np.array(feed["words"]).reshape(
                    len(feed["words"]), 1)
                result = client.predict(feed=feed,
                                        fetch=["prediction"],
                                        batch=True)
                if result is None:
                    raise ("predict failed.")
            else:
                print("unsupport batch size {}".format(args.batch_size))

    elif args.request == "http":
        if args.batch_size >= 1:
            feed_batch = []
            for bi in range(args.batch_size):
                feed_batch.append({"words": dataset[bi]})
            r = requests.post("http://{}/imdb/prediction".format(
                args.endpoint),
                              json={
                                  "feed": feed_batch,
                                  "fetch": ["prediction"]
                              })
            if r.status_code != 200:
                print('HTTP status code -ne 200')
                raise ("predict failed.")
        else:
            print("unsupport batch size {}".format(args.batch_size))
    end = time.time()
    return [[end - start]]
Exemplo n.º 12
0
class SentaService(WebService):
    #初始化lac模型预测服务
    def init_lac_client(self, lac_port, lac_client_config):
        self.lac_reader = LACReader()
        self.senta_reader = SentaReader()
        self.lac_client = Client()
        self.lac_client.load_client_config(lac_client_config)
        self.lac_client.connect(["127.0.0.1:{}".format(lac_port)])

    #定义senta模型预测服务的预处理,调用顺序:lac reader->lac模型预测->预测结果后处理->senta reader
    def preprocess(self, feed=[], fetch=[]):
        feed_batch = []
        is_batch = True
        words_lod = [0]
        for ins in feed:
            if "words" not in ins:
                raise ("feed data error!")
            feed_data = self.lac_reader.process(ins["words"])
            words_lod.append(words_lod[-1] + len(feed_data))
            feed_batch.append(np.array(feed_data).reshape(len(feed_data), 1))
        words = np.concatenate(feed_batch, axis=0)

        lac_result = self.lac_client.predict(feed={
            "words": words,
            "words.lod": words_lod
        },
                                             fetch=["crf_decode"],
                                             batch=True)
        result_lod = lac_result["crf_decode.lod"]
        feed_batch = []
        words_lod = [0]
        for i in range(len(feed)):
            segs = self.lac_reader.parse_result(
                feed[i]["words"],
                lac_result["crf_decode"][result_lod[i]:result_lod[i + 1]])
            feed_data = self.senta_reader.process(segs)
            feed_batch.append(np.array(feed_data).reshape(len(feed_data), 1))
            words_lod.append(words_lod[-1] + len(feed_data))
        return {
            "words": np.concatenate(feed_batch),
            "words.lod": words_lod
        }, fetch, is_batch
Exemplo n.º 13
0
def run_web_client(args):
    headers = {"Content-type": "application/json"}
    url = "http://" + args.connect + "/rec/prediction"
    place = paddle.set_device('gpu' if args.use_gpu else 'cpu')
    args.place = place
    test_dataloader = create_data_loader(args)
    client = Client()
    client.load_client_config(args.client_config)
    feed_names = client.feed_names_
    fetch_names = client.fetch_names_
    start = time.time()
    while True:
        for batch_id, batch_data in enumerate(test_dataloader):
            batch_data = [tensor.numpy().tolist() for tensor in batch_data]
            feed_dict = dict(zip(feed_names, batch_data))
            data = {"feed": [feed_dict], "fetch": fetch_names}
            r = requests.post(url=url, headers=headers, data=json.dumps(data))
            print(r.json())
        if time.time() - start > 30:
            break
    def __init__(self):
        self.client = Client()
        self.client.load_client_config(
            "../../models/general_PPLCNet_x2_5_lite_v1.0_client/serving_client_conf.prototxt"
        )
        self.client.connect(["127.0.0.1:9294"])

        self.seq = Sequential([
            BGR2RGB(),
            Resize((224, 224)),
            Div(255),
            Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225], False),
            Transpose((2, 0, 1))
        ])

        self.searcher, self.id_map = self.init_index()

        self.rec_nms_thresold = 0.05
        self.rec_score_thres = 0.5
        self.feature_normalize = True
        self.return_k = 1
Exemplo n.º 15
0
 def predict_brpc(self, batch_size=1):
     data = np.array([
         0.0137, -0.1136, 0.2553, -0.0692, 0.0582, -0.0727, -0.1583,
         -0.0584, 0.6283, 0.4919, 0.1856, 0.0795, -0.0332
     ]).astype("float32")[np.newaxis, :]
     client = Client()
     client.load_client_config(
         "uci_housing_client/serving_client_conf.prototxt")
     client.connect(["127.0.0.1:9494"])
     fetch_list = client.get_fetch_names()
     fetch_map = client.predict(feed={"x": data},
                                fetch=fetch_list,
                                batch=True)
     return fetch_map
Exemplo n.º 16
0
def predict(data, label_map, batch_size):
    """
    Args:
        sentences (list[str]): each string is a sentence. If have sentences then no need paths
        paths (list[str]): The paths of file which contain sentences. If have paths then no need sentences
    Returns:
        res (list(numpy.ndarray)): The result of sentence, indicate whether each word is replaced, same shape with sentences.
    """
    # TODO: Text tokenization which is done in the serving end not the client end may be better.
    tokenizer = ErnieTinyTokenizer.from_pretrained("ernie-tiny")
    examples = []
    for text in data:
        example = {"text": text}
        input_ids, token_type_ids = convert_example(
            example,
            tokenizer,
            max_seq_length=args.max_seq_length,
            is_test=True)
        examples.append((input_ids, token_type_ids))

    batchify_fn = lambda samples, fn=Tuple(
        Pad(axis=0, pad_val=tokenizer.pad_token_id, dtype='int64'
            ),  # input ids
        Pad(axis=0, pad_val=tokenizer.pad_token_id, dtype='int64'
            ),  # token type ids
    ): fn(samples)

    # Seperates data into some batches.
    batches = [
        examples[idx:idx + batch_size]
        for idx in range(0, len(examples), batch_size)
    ]

    # initialize client
    client = Client()
    client.load_client_config(args.client_config_file)
    client.connect([args.server_ip_port])

    results = []
    for batch in batches:
        input_ids, token_type_ids = batchify_fn(batch)
        fetch_map = client.predict(feed={
            "input_ids": input_ids,
            "token_type_ids": token_type_ids
        },
                                   fetch=["save_infer_model/scale_0.tmp_1"],
                                   batch=True)
        output_data = np.array(fetch_map["save_infer_model/scale_0.tmp_1"])
        probs = softmax(output_data, axis=1)
        idx = np.argmax(probs, axis=1)
        idx = idx.tolist()
        labels = [label_map[i] for i in idx]
        results.extend(labels)

    return results
Exemplo n.º 17
0
def benckmark_worker(idx, resource):
    """
    Brief:
        benchmark single worker for unet
    Args:
        idx(int): worker idx ,use idx to select backend unet service
        resource(dict): unet serving endpoint dict 
    Returns:
        latency
    TODO:
        http benckmarks
    """
    profile_flags = False
    latency_flags = False
    postprocess = SegPostprocess(2)
    if os.getenv("FLAGS_profile_client"):
        profile_flags = True
    if os.getenv("FLAGS_serving_latency"):
        latency_flags = True
        latency_list = []
    client_handler = Client()
    client_handler.load_client_config(args.model)
    client_handler.connect(
        [resource["endpoint"][idx % len(resource["endpoint"])]])
    start = time.time()
    turns = resource["turns"]
    img_list = resource["img_list"]
    for i in range(turns):
        if args.batch_size >= 1:
            l_start = time.time()
            feed_batch = []
            b_start = time.time()
            for bi in range(args.batch_size):
                feed_batch.append({"image": img_list[bi]})
            b_end = time.time()
            if profile_flags:
                sys.stderr.write(
                    "PROFILE\tpid:{}\tunt_pre_0:{} unet_pre_1:{}\n".format(
                        os.getpid(), int(round(b_start * 1000000)),
                        int(round(b_end * 1000000))))
            result = client_handler.predict(feed={"image": img_list[bi]},
                                            fetch=["output"])
            #result["filename"] = "./img_data/N0060.jpg" % (os.getpid(), idx, time.time())
            #postprocess(result) # if you  want to measure post process time, you have to uncomment this line
            l_end = time.time()
            if latency_flags:
                latency_list.append(l_end * 1000 - l_start * 1000)
        else:
            print("unsupport batch size {}".format(args.batch_size))
    end = time.time()
    if latency_flags:
        return [[end - start], latency_list]
    else:
        return [[end - start]]
Exemplo n.º 18
0
def single_func(idx, resource):
    total_number = 0
    profile_flags = False
    latency_flags = False
    if os.getenv("FLAGS_profile_client"):
        profile_flags = True
    if os.getenv("FLAGS_serving_latency"):
        latency_flags = True
        latency_list = []

    if args.request == "rpc":
        client = Client()
        client.load_client_config(args.model)
        client.connect([resource["endpoint"][idx % len(resource["endpoint"])]])
        start = time.time()
        for i in range(turns):
            if args.batch_size >= 1:
                l_start = time.time()
                seq = Sequential([
                    File2Image(),
                    Resize(256),
                    CenterCrop(224),
                    RGB2BGR(),
                    Transpose((2, 0, 1)),
                    Div(255),
                    Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225],
                              True)
                ])
                image_file = "daisy.jpg"
                img = seq(image_file)
                feed_data = np.array(img)
                feed_data = np.expand_dims(feed_data,
                                           0).repeat(args.batch_size, axis=0)
                result = client.predict(
                    feed={"image": feed_data},
                    fetch=["save_infer_model/scale_0.tmp_0"],
                    batch=True)
                l_end = time.time()
                if latency_flags:
                    latency_list.append(l_end * 1000 - l_start * 1000)
                total_number = total_number + 1
            else:
                print("unsupport batch size {}".format(args.batch_size))

    else:
        raise ValueError("not implemented {} request".format(args.request))
    end = time.time()
    if latency_flags:
        return [[end - start], latency_list, [total_number]]
    else:
        return [[end - start]]
Exemplo n.º 19
0
def predict(args, sentences=[], paths=[]):
    """
    Args:
        sentences (list[str]): each string is a sentence. If have sentences then no need paths
        paths (list[str]): The paths of file which contain sentences. If have paths then no need sentences
    Returns:
        res (list(numpy.ndarray)): The result of sentence, indicate whether each word is replaced, same shape with sentences.
    """

    # initialize client
    client = Client()
    client.load_client_config(args.client_config_file)
    #"serving_client/serving_client_conf.prototxt")
    client.connect([args.server_ip_port])

    # initialize data
    if sentences != [] and isinstance(sentences, list) and (paths == []
                                                            or paths is None):
        predicted_data = sentences
    elif (sentences == [] or sentences is None) and isinstance(
            paths, list) and paths != []:
        predicted_data = read_sentences(paths)
    else:
        raise TypeError("The input data is inconsistent with expectations.")

    tokenizer = ElectraTokenizer.from_pretrained(args.model_name)
    predicted_input, predicted_sens = get_predicted_input(
        predicted_data, tokenizer, args.max_seq_length, args.batch_size)

    start_time = time.time()
    output_datas = []
    count = 0
    for i, sen in enumerate(predicted_input):
        sen = np.array(sen).astype("int64")

        fetch_map = client.predict(feed={"input_ids": sen},
                                   fetch=["save_infer_model/scale_0.tmp_0"],
                                   batch=True)
        output_data = np.array(fetch_map["save_infer_model/scale_0.tmp_0"])
        output_res = np.argmax(output_data, axis=1)

        print("===== batch {} =====".format(i))
        for j in range(len(predicted_sens[i])):
            print("Input sentence is : {}".format(predicted_sens[i][j]))
            #print("Output logis is : {}".format(output_data[j]))
            print("Output data is : {}".format(output_res[j]))

        count += len(predicted_sens[i])
    print("inference total %s sentences done, total time : %s s" %
          (count, time.time() - start_time))
Exemplo n.º 20
0
def client2():
    from paddle_serving_client import Client
    import numpy as np
    client = Client()
    client.load_client_config("uci_housing_client/serving_client_conf.prototxt")
    client.connect(["127.0.0.1:9393"])
    data = [0.0137, -0.1136, 0.2553, -0.0692, 0.0582, -0.0727,
            -0.1583, -0.0584, 0.6283, 0.4919, 0.1856, 0.0795, -0.0332]
#    fetch_map = client.predict(feed={"x": np.array(data).reshape(1,13,1)}, fetch=["price"])
    print(fetch_map)
    return 1
def single_func(idx, resource):
    client = Client()
    client.load_client_config(
        "./uci_housing_client/serving_client_conf.prototxt")
    client.connect(["127.0.0.1:9293", "127.0.0.1:9292"])
    x = [
        0.0137, -0.1136, 0.2553, -0.0692, 0.0582, -0.0727, -0.1583, -0.0584,
        0.6283, 0.4919, 0.1856, 0.0795, -0.0332
    ]
    x = np.array(x)
    for i in range(1000):
        fetch_map = client.predict(feed={"x": x}, fetch=["price"])
        if fetch_map is None:
            return [[None]]
    return [[0]]
Exemplo n.º 22
0
 def init_client(self, client_type, client_config, server_endpoints,
                 fetch_names):
     if self.with_serving == False:
         _LOGGER.debug("{} no client".format(self.name))
         return None
     _LOGGER.debug("{} client_config: {}".format(self.name, client_config))
     _LOGGER.debug("{} fetch_names: {}".format(self.name, fetch_names))
     if client_type == 'brpc':
         client = Client()
         client.load_client_config(client_config)
     elif client_type == 'grpc':
         client = MultiLangClient()
     else:
         raise ValueError("unknow client type: {}".format(client_type))
     client.connect(server_endpoints)
     self._fetch_names = fetch_names
     return client
Exemplo n.º 23
0
def run_rpc_client(args):
    client = Client()
    client.load_client_config(args.client_config)
    client.connect([args.connect])
    place = paddle.set_device('gpu' if args.use_gpu else 'cpu')
    args.place = place
    test_dataloader = create_data_loader(args)
    feed_names = client.feed_names_
    fetch_names = client.fetch_names_

    for batch_id, batch_data in enumerate(test_dataloader):
        batch_data = [tensor.numpy() for tensor in batch_data]
        feed_dict = dict(zip(feed_names, batch_data))
        fetch_map = client.predict(feed=feed_dict,
                                   fetch=fetch_names,
                                   batch=True)
        print(fetch_map)
Exemplo n.º 24
0
def run(args):
    client = Client()
    client.load_client_config(
        os.path.join(args.serving_client_path, "serving_client_conf.prototxt"))
    client.connect([args.serving_ip_port])

    seq = Sequential([
        File2Image(),
        RGB2BGR(),
        Div(255),
        Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5], False),
        Transpose((2, 0, 1))
    ])

    img = seq(args.image_path)
    fetch_map = client.predict(
        feed={"x": img}, fetch=["save_infer_model/scale_0.tmp_1"])

    result = fetch_map["save_infer_model/scale_0.tmp_1"]
    color_img = get_pseudo_color_map(result[0])
    color_img.save("./result.png")
    print("The segmentation image is saved in ./result.png")
Exemplo n.º 25
0
def single_func(idx, resource):
    reader = LACReader()
    start = time.time()
    if args.request == "rpc":
        client = Client()
        client.load_client_config(args.model)
        client.connect([args.endpoint])
        fin = open("jieba_test.txt")
        for line in fin:
            feed_data = reader.process(line)
            fetch_map = client.predict(feed={"words": feed_data},
                                       fetch=["crf_decode"])
    elif args.request == "http":
        fin = open("jieba_test.txt")
        for line in fin:
            req_data = {"words": line.strip(), "fetch": ["crf_decode"]}
            r = requests.post("http://{}/lac/prediction".format(args.endpoint),
                              data={
                                  "words": line.strip(),
                                  "fetch": ["crf_decode"]
                              })
    end = time.time()
    return [[end - start]]
Exemplo n.º 26
0
def single_func(idx, resource):
    if args.request == "rpc":
        client = Client()
        client.load_client_config(args.model)
        client.connect([args.endpoint])
        train_reader = paddle.batch(paddle.reader.shuffle(
            paddle.dataset.uci_housing.train(), buf_size=500),
                                    batch_size=1)
        start = time.time()
        for data in train_reader():
            fetch_map = client.predict(feed={"x": data[0][0]}, fetch=["price"])
        end = time.time()
        return [[end - start]]
    elif args.request == "http":
        train_reader = paddle.batch(paddle.reader.shuffle(
            paddle.dataset.uci_housing.train(), buf_size=500),
                                    batch_size=1)
        start = time.time()
        for data in train_reader():
            r = requests.post('http://{}/uci/prediction'.format(args.endpoint),
                              data={"x": data[0]})
        end = time.time()
        return [[end - start]]
Exemplo n.º 27
0
class FeedClassificationClient(object):
    """
    Feed classification client
    """
    def __init__(self, config_file: str, urls: list):
        self.config_file = config_file
        self.urls = urls
        self.feed_var = 'generated_var_17144'
        self.fetch_var = 'translated_layer/scale_0.tmp_0'
        self.client = Client()
        self.connect_to_servers()

    def connect_to_servers(self):
        """
        连接到server
        """
        self.client.load_client_config(self.config_file)
        self.client.connect(self.urls)

    def predict(self, tokens):
        """
        预测
        """
        fetch_map = self.client.predict(feed={self.feed_var: tokens},
                                        fetch=[self.fetch_var])
        logits = fetch_map[self.fetch_var]
        return np.argmax(logits)

    def predict_batch(self, batch_tokens):
        """
        batch预测
        """
        fetch_map = self.client.predict(feed={self.feed_var: batch_tokens},
                                        fetch=[self.fetch_var],
                                        batch=True)
        logits = fetch_map[self.fetch_var]
        return np.argmax(logits, axis=1)
Exemplo n.º 28
0
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=doc-string-missing

from paddle_serving_client import Client
import sys
import numpy as np

client = Client()
client.load_client_config(sys.argv[1])
client.connect(["127.0.0.1:9393"])
fetch_list = client.get_fetch_names()
import paddle
test_reader = paddle.batch(paddle.reader.shuffle(
    paddle.dataset.uci_housing.test(), buf_size=500),
                           batch_size=1)

for data in test_reader():
    new_data = np.zeros((1, 13)).astype("float32")
    new_data[0] = data[0][0]
    fetch_map = client.predict(feed={"x": new_data},
                               fetch=fetch_list,
                               batch=True)
    print(fetch_map)
Exemplo n.º 29
0
def general_ocr_port(img,det_ip_port,rec_ip_port,MyList):

    det_client = Client()
    det_client.load_client_config("./general_ocr_config/det_infer_client/serving_client_conf.prototxt")
    det_client.connect(det_ip_port)

    #start rec Client
    rec_client = Client()
    rec_client.load_client_config("./general_ocr_config/rec_infer_client/serving_client_conf.prototxt")
    rec_client.connect(rec_ip_port)
    #前处理
    feed, fetch, tmp_args = det_preprocess(img)
    #推理
    fetch_map = det_client.predict(feed, fetch)
    outputs = [fetch_map[x] for x in fetch]

    #后处理
    dt_boxes = det_postprocess(outputs, tmp_args)
    # print(dt_boxes.shape)
    #裁剪出框
    img_crop_list = []
    dt_boxes = sorted_boxes(dt_boxes)
    for bno in range(len(dt_boxes)):
        tmp_box = copy.deepcopy(dt_boxes[bno])
        img_crop = get_rotate_crop_image(img, tmp_box)
        img_crop_list.append(img_crop)

    #以batch为30开始识别
    batch_size = 8
    batch_num = len(img_crop_list) // batch_size + 1
    batch_num = len(img_crop_list)
    text_list = []
    score_list =[]
    for i in range(batch_num):
        if i == (batch_num-1):
            img_batch = img_crop_list[i*batch_size:]
        else :
            img_batch = img_crop_list[i*batch_size:(i+1)*batch_size]
        if(len(img_batch)==0):
            continue
        feed, fetch, tmp_args = rec_preprocess(img_batch)
        #推理
        fetch_map = rec_client.predict(feed, fetch)
        # print(fetch_map)
        outputs = [fetch_map[x] for x in fetch]
        for x in fetch_map.keys():
            if ".lod" in x:
                # print(x),fetch_map[x]
                tmp_args[x] = fetch_map[x]
        #后处理
        rec_res = rec_postprocess(outputs, tmp_args)
        for x in rec_res:
            text_list.append(x[0])
            score_list.append(x[1])
    MyList.append(text_list)    
    det_client.release()
    rec_client.release()
    return 
Exemplo n.º 30
0
import sys
import numpy as np
from paddle_serving_client import Client
from paddle_serving_app.reader import *
import cv2

preprocess = DetectionSequential([
    DetectionFile2Image(),
    DetectionResize((300, 300), False, interpolation=cv2.INTER_LINEAR),
    DetectionNormalize([104.0, 117.0, 123.0], [1.0, 1.0, 1.0], False),
    DetectionTranspose((2, 0, 1)),
])

postprocess = RCNNPostprocess("label_list.txt", "output")
client = Client()

client.load_client_config("serving_client/serving_client_conf.prototxt")
client.connect(['127.0.0.1:9494'])

im, im_info = preprocess(sys.argv[1])
fetch_map = client.predict(
    feed={
        "image": im,
        "im_shape": np.array(list(im.shape[1:])).reshape(-1),
        "scale_factor": im_info['scale_factor'],
    },
    fetch=["save_infer_model/scale_0.tmp_1"],
    batch=False)
print(fetch_map)
fetch_map["image"] = sys.argv[1]