def general_ocr_port(img,det_ip_port,rec_ip_port,MyList): det_client = Client() det_client.load_client_config("./general_ocr_config/det_infer_client/serving_client_conf.prototxt") det_client.connect(det_ip_port) #start rec Client rec_client = Client() rec_client.load_client_config("./general_ocr_config/rec_infer_client/serving_client_conf.prototxt") rec_client.connect(rec_ip_port) #前处理 feed, fetch, tmp_args = det_preprocess(img) #推理 fetch_map = det_client.predict(feed, fetch) outputs = [fetch_map[x] for x in fetch] #后处理 dt_boxes = det_postprocess(outputs, tmp_args) # print(dt_boxes.shape) #裁剪出框 img_crop_list = [] dt_boxes = sorted_boxes(dt_boxes) for bno in range(len(dt_boxes)): tmp_box = copy.deepcopy(dt_boxes[bno]) img_crop = get_rotate_crop_image(img, tmp_box) img_crop_list.append(img_crop) #以batch为30开始识别 batch_size = 8 batch_num = len(img_crop_list) // batch_size + 1 batch_num = len(img_crop_list) text_list = [] score_list =[] for i in range(batch_num): if i == (batch_num-1): img_batch = img_crop_list[i*batch_size:] else : img_batch = img_crop_list[i*batch_size:(i+1)*batch_size] if(len(img_batch)==0): continue feed, fetch, tmp_args = rec_preprocess(img_batch) #推理 fetch_map = rec_client.predict(feed, fetch) # print(fetch_map) outputs = [fetch_map[x] for x in fetch] for x in fetch_map.keys(): if ".lod" in x: # print(x),fetch_map[x] tmp_args[x] = fetch_map[x] #后处理 rec_res = rec_postprocess(outputs, tmp_args) for x in rec_res: text_list.append(x[0]) score_list.append(x[1]) MyList.append(text_list) det_client.release() rec_client.release() return
class TextSystemHelper(TextSystem): def __init__(self, args): self.text_detector = TextDetectorHelper(args) self.text_recognizer = TextRecognizerHelper(args) self.use_angle_cls = args.use_angle_cls if self.use_angle_cls: self.clas_client = Client() self.clas_client.load_client_config( os.path.join(args.cls_client_dir, "serving_client_conf.prototxt")) self.clas_client.connect(["127.0.0.1:9294"]) self.text_classifier = TextClassifierHelper(args) self.det_client = Client() self.det_client.load_client_config( os.path.join(args.det_client_dir, "serving_client_conf.prototxt")) self.det_client.connect(["127.0.0.1:9293"]) self.fetch = [ "save_infer_model/scale_0.tmp_0", "save_infer_model/scale_1.tmp_0" ] def preprocess(self, img): feed, fetch, self.tmp_args = self.text_detector.preprocess(img) fetch_map = self.det_client.predict(feed, fetch) outputs = [fetch_map[x] for x in fetch] dt_boxes = self.text_detector.postprocess(outputs, self.tmp_args) if dt_boxes is None: return None, None img_crop_list = [] dt_boxes = sorted_boxes(dt_boxes) self.dt_boxes = dt_boxes for bno in range(len(dt_boxes)): tmp_box = copy.deepcopy(dt_boxes[bno]) img_crop = self.get_rotate_crop_image(img, tmp_box) img_crop_list.append(img_crop) if self.use_angle_cls: feed, fetch, self.tmp_args = self.text_classifier.preprocess( img_crop_list) fetch_map = self.clas_client.predict(feed, fetch) outputs = [fetch_map[x] for x in self.text_classifier.fetch] for x in fetch_map.keys(): if ".lod" in x: self.tmp_args[x] = fetch_map[x] img_crop_list, _ = self.text_classifier.postprocess( outputs, self.tmp_args) feed, fetch, self.tmp_args = self.text_recognizer.preprocess( img_crop_list) return feed, self.fetch, self.tmp_args def postprocess(self, outputs, args): return self.text_recognizer.postprocess(outputs, args)
class OCRService(WebService): def init_det_client(self, det_port, det_client_config): self.det_preprocess = Sequential([ ResizeByFactor(32, 960), Div(255), Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), Transpose((2, 0, 1)) ]) self.det_client = Client() self.det_client.load_client_config(det_client_config) self.det_client.connect(["127.0.0.1:{}".format(det_port)]) self.ocr_reader = OCRReader() def preprocess(self, feed=[], fetch=[]): data = base64.b64decode(feed[0]["image"].encode('utf8')) data = np.fromstring(data, np.uint8) im = cv2.imdecode(data, cv2.IMREAD_COLOR) ori_h, ori_w, _ = im.shape det_img = self.det_preprocess(im) det_out = self.det_client.predict(feed={"image": det_img}, fetch=["concat_1.tmp_0"], batch=False) _, new_h, new_w = det_img.shape filter_func = FilterBoxes(10, 10) post_func = DBPostProcess({ "thresh": 0.3, "box_thresh": 0.5, "max_candidates": 1000, "unclip_ratio": 1.5, "min_size": 3 }) sorted_boxes = SortedBoxes() ratio_list = [float(new_h) / ori_h, float(new_w) / ori_w] dt_boxes_list = post_func(det_out["concat_1.tmp_0"], [ratio_list]) dt_boxes = filter_func(dt_boxes_list[0], [ori_h, ori_w]) dt_boxes = sorted_boxes(dt_boxes) get_rotate_crop_image = GetRotateCropImage() feed_list = [] img_list = [] max_wh_ratio = 0 for i, dtbox in enumerate(dt_boxes): boximg = get_rotate_crop_image(im, dt_boxes[i]) img_list.append(boximg) h, w = boximg.shape[0:2] wh_ratio = w * 1.0 / h max_wh_ratio = max(max_wh_ratio, wh_ratio) for img in img_list: norm_img = self.ocr_reader.resize_norm_img(img, max_wh_ratio) feed_list.append(norm_img[np.newaxis, :]) feed_batch = {"image": np.concatenate(feed_list, axis=0)} fetch = ["ctc_greedy_decoder_0.tmp_0", "softmax_0.tmp_0"] return feed_batch, fetch, True def postprocess(self, feed={}, fetch=[], fetch_map=None): rec_res = self.ocr_reader.postprocess(fetch_map, with_score=True) res_lst = [] for res in rec_res: res_lst.append(res[0]) res = {"res": res_lst} return res
def single_func(idx, resource): batch = 1 buf_size = 100 dataset = criteo.CriteoDataset() dataset.setup(1000001) test_filelists = [ "./raw_data/part-%d" % x for x in range(len(os.listdir("./raw_data"))) ] reader = dataset.infer_reader(test_filelists[len(test_filelists) - 40:], batch, buf_size) if args.request == "rpc": fetch = ["prob"] client = Client() client.load_client_config(args.model) client.connect([resource["endpoint"][idx % len(resource["endpoint"])]]) start = time.time() for i in range(1000): if args.batch_size == 1: data = reader().next() feed_dict = {} for i in range(1, 27): feed_dict["sparse_{}".format(i - 1)] = data[0][i] result = client.predict(feed=feed_dict, fetch=fetch) else: print("unsupport batch size {}".format(args.batch_size)) elif args.request == "http": raise ("Not support http service.") end = time.time() return [[end - start]]
def single_func(idx, resource): train_reader = paddle.batch( paddle.reader.shuffle( paddle.dataset.uci_housing.train(), buf_size=500), batch_size=1) total_number = sum(1 for _ in train_reader()) latency_list = [] if args.request == "rpc": client = Client() client.load_client_config(args.model) client.connect([args.endpoint]) start = time.time() for data in train_reader(): l_start = time.time() fetch_map = client.predict(feed={"x": data[0][0]}, fetch=["price"]) l_end = time.time() latency_list.append(l_end * 1000 - l_start * 1000) end = time.time() return [[end - start], latency_list, [total_number]] elif args.request == "http": train_reader = paddle.batch( paddle.reader.shuffle( paddle.dataset.uci_housing.train(), buf_size=500), batch_size=1) start = time.time() for data in train_reader(): l_start = time.time() r = requests.post( 'http://{}/uci/prediction'.format(args.endpoint), data={"x": data[0]}) l_end = time.time() latency_list.append(l_end * 1000 - l_start * 1000) end = time.time() return [[end - start], latency_list, [total_number]]
class MainbodyDetect(): """ pp-shitu mainbody detect. include preprocess, process, postprocess return detect results Attention: Postprocess include num limit and box filter; no nms """ def __init__(self): self.preprocess = DetectionSequential([ DetectionFile2Image(), DetectionNormalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225], True), DetectionResize((640, 640), False, interpolation=2), DetectionTranspose((2, 0, 1)) ]) self.client = Client() self.client.load_client_config( "../../models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_client/serving_client_conf.prototxt" ) self.client.connect(['127.0.0.1:9293']) self.max_det_result = 5 self.conf_threshold = 0.2 def predict(self, imgpath): im, im_info = self.preprocess(imgpath) im_shape = np.array(im.shape[1:]).reshape(-1) scale_factor = np.array(list(im_info['scale_factor'])).reshape(-1) fetch_map = self.client.predict( feed={ "image": im, "im_shape": im_shape, "scale_factor": scale_factor, }, fetch=["save_infer_model/scale_0.tmp_1"], batch=False) return self.postprocess(fetch_map, imgpath) def postprocess(self, fetch_map, imgpath): #1. get top max_det_result det_results = fetch_map["save_infer_model/scale_0.tmp_1"] if len(det_results) > self.max_det_result: boxes_reserved = fetch_map[ "save_infer_model/scale_0.tmp_1"][:self.max_det_result] else: boxes_reserved = det_results #2. do conf threshold boxes_list = [] for i in range(boxes_reserved.shape[0]): if (boxes_reserved[i, 1]) > self.conf_threshold: boxes_list.append(boxes_reserved[i, :]) #3. add origin image box origin_img = cv2.imread(imgpath) boxes_list.append( np.array([0, 1.0, 0, 0, origin_img.shape[1], origin_img.shape[0]])) return np.array(boxes_list)
def single_func(idx, resource): imdb_dataset = IMDBDataset() imdb_dataset.load_resource("./imdb.vocab") dataset = [] with open("./test_data/part-0") as fin: for line in fin: dataset.append(line.strip()) profile_flags = False latency_flags = False if os.getenv("FLAGS_profile_client"): profile_flags = True if os.getenv("FLAGS_serving_latency"): latency_flags = True latency_list = [] start = time.time() if args.request == "rpc": client = Client() client.load_client_config(args.model) client.connect([args.endpoint]) for i in range(1000): if args.batch_size >= 1: feed_batch = [] feed = {"words": [], "words.lod": [0]} for bi in range(args.batch_size): word_ids, label = imdb_dataset.get_words_and_label( dataset[bi]) feed["words.lod"].append(feed["words.lod"][-1] + len(word_ids)) feed["words"].extend(word_ids) feed["words"] = np.array(feed["words"]).reshape( len(feed["words"]), 1) result = client.predict(feed=feed, fetch=["prediction"], batch=True) if result is None: raise ("predict failed.") else: print("unsupport batch size {}".format(args.batch_size)) elif args.request == "http": if args.batch_size >= 1: feed_batch = [] for bi in range(args.batch_size): feed_batch.append({"words": dataset[bi]}) r = requests.post("http://{}/imdb/prediction".format( args.endpoint), json={ "feed": feed_batch, "fetch": ["prediction"] }) if r.status_code != 200: print('HTTP status code -ne 200') raise ("predict failed.") else: print("unsupport batch size {}".format(args.batch_size)) end = time.time() return [[end - start]]
def predict(data, label_map, batch_size): """ Args: sentences (list[str]): each string is a sentence. If have sentences then no need paths paths (list[str]): The paths of file which contain sentences. If have paths then no need sentences Returns: res (list(numpy.ndarray)): The result of sentence, indicate whether each word is replaced, same shape with sentences. """ # TODO: Text tokenization which is done in the serving end not the client end may be better. tokenizer = ErnieTinyTokenizer.from_pretrained("ernie-tiny") examples = [] for text in data: example = {"text": text} input_ids, token_type_ids = convert_example( example, tokenizer, max_seq_length=args.max_seq_length, is_test=True) examples.append((input_ids, token_type_ids)) batchify_fn = lambda samples, fn=Tuple( Pad(axis=0, pad_val=tokenizer.pad_token_id, dtype='int64' ), # input ids Pad(axis=0, pad_val=tokenizer.pad_token_id, dtype='int64' ), # token type ids ): fn(samples) # Seperates data into some batches. batches = [ examples[idx:idx + batch_size] for idx in range(0, len(examples), batch_size) ] # initialize client client = Client() client.load_client_config(args.client_config_file) client.connect([args.server_ip_port]) results = [] for batch in batches: input_ids, token_type_ids = batchify_fn(batch) fetch_map = client.predict(feed={ "input_ids": input_ids, "token_type_ids": token_type_ids }, fetch=["save_infer_model/scale_0.tmp_1"], batch=True) output_data = np.array(fetch_map["save_infer_model/scale_0.tmp_1"]) probs = softmax(output_data, axis=1) idx = np.argmax(probs, axis=1) idx = idx.tolist() labels = [label_map[i] for i in idx] results.extend(labels) return results
def benckmark_worker(idx, resource): """ Brief: benchmark single worker for unet Args: idx(int): worker idx ,use idx to select backend unet service resource(dict): unet serving endpoint dict Returns: latency TODO: http benckmarks """ profile_flags = False latency_flags = False postprocess = SegPostprocess(2) if os.getenv("FLAGS_profile_client"): profile_flags = True if os.getenv("FLAGS_serving_latency"): latency_flags = True latency_list = [] client_handler = Client() client_handler.load_client_config(args.model) client_handler.connect( [resource["endpoint"][idx % len(resource["endpoint"])]]) start = time.time() turns = resource["turns"] img_list = resource["img_list"] for i in range(turns): if args.batch_size >= 1: l_start = time.time() feed_batch = [] b_start = time.time() for bi in range(args.batch_size): feed_batch.append({"image": img_list[bi]}) b_end = time.time() if profile_flags: sys.stderr.write( "PROFILE\tpid:{}\tunt_pre_0:{} unet_pre_1:{}\n".format( os.getpid(), int(round(b_start * 1000000)), int(round(b_end * 1000000)))) result = client_handler.predict(feed={"image": img_list[bi]}, fetch=["output"]) #result["filename"] = "./img_data/N0060.jpg" % (os.getpid(), idx, time.time()) #postprocess(result) # if you want to measure post process time, you have to uncomment this line l_end = time.time() if latency_flags: latency_list.append(l_end * 1000 - l_start * 1000) else: print("unsupport batch size {}".format(args.batch_size)) end = time.time() if latency_flags: return [[end - start], latency_list] else: return [[end - start]]
def single_func(idx, resource): total_number = 0 profile_flags = False latency_flags = False if os.getenv("FLAGS_profile_client"): profile_flags = True if os.getenv("FLAGS_serving_latency"): latency_flags = True latency_list = [] if args.request == "rpc": client = Client() client.load_client_config(args.model) client.connect([resource["endpoint"][idx % len(resource["endpoint"])]]) start = time.time() for i in range(turns): if args.batch_size >= 1: l_start = time.time() seq = Sequential([ File2Image(), Resize(256), CenterCrop(224), RGB2BGR(), Transpose((2, 0, 1)), Div(255), Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225], True) ]) image_file = "daisy.jpg" img = seq(image_file) feed_data = np.array(img) feed_data = np.expand_dims(feed_data, 0).repeat(args.batch_size, axis=0) result = client.predict( feed={"image": feed_data}, fetch=["save_infer_model/scale_0.tmp_0"], batch=True) l_end = time.time() if latency_flags: latency_list.append(l_end * 1000 - l_start * 1000) total_number = total_number + 1 else: print("unsupport batch size {}".format(args.batch_size)) else: raise ValueError("not implemented {} request".format(args.request)) end = time.time() if latency_flags: return [[end - start], latency_list, [total_number]] else: return [[end - start]]
def predict(args, sentences=[], paths=[]): """ Args: sentences (list[str]): each string is a sentence. If have sentences then no need paths paths (list[str]): The paths of file which contain sentences. If have paths then no need sentences Returns: res (list(numpy.ndarray)): The result of sentence, indicate whether each word is replaced, same shape with sentences. """ # initialize client client = Client() client.load_client_config(args.client_config_file) #"serving_client/serving_client_conf.prototxt") client.connect([args.server_ip_port]) # initialize data if sentences != [] and isinstance(sentences, list) and (paths == [] or paths is None): predicted_data = sentences elif (sentences == [] or sentences is None) and isinstance( paths, list) and paths != []: predicted_data = read_sentences(paths) else: raise TypeError("The input data is inconsistent with expectations.") tokenizer = ElectraTokenizer.from_pretrained(args.model_name) predicted_input, predicted_sens = get_predicted_input( predicted_data, tokenizer, args.max_seq_length, args.batch_size) start_time = time.time() output_datas = [] count = 0 for i, sen in enumerate(predicted_input): sen = np.array(sen).astype("int64") fetch_map = client.predict(feed={"input_ids": sen}, fetch=["save_infer_model/scale_0.tmp_0"], batch=True) output_data = np.array(fetch_map["save_infer_model/scale_0.tmp_0"]) output_res = np.argmax(output_data, axis=1) print("===== batch {} =====".format(i)) for j in range(len(predicted_sens[i])): print("Input sentence is : {}".format(predicted_sens[i][j])) #print("Output logis is : {}".format(output_data[j])) print("Output data is : {}".format(output_res[j])) count += len(predicted_sens[i]) print("inference total %s sentences done, total time : %s s" % (count, time.time() - start_time))
def predict_brpc(self, batch_size=1): data = np.array([ 0.0137, -0.1136, 0.2553, -0.0692, 0.0582, -0.0727, -0.1583, -0.0584, 0.6283, 0.4919, 0.1856, 0.0795, -0.0332 ]).astype("float32")[np.newaxis, :] client = Client() client.load_client_config( "uci_housing_client/serving_client_conf.prototxt") client.connect(["127.0.0.1:9494"]) fetch_list = client.get_fetch_names() fetch_map = client.predict(feed={"x": data}, fetch=fetch_list, batch=True) return fetch_map
class FeedClassificationClient(object): """ Feed classification client """ def __init__(self, config_file: str, urls: list): self.config_file = config_file self.urls = urls self.feed_var = 'generated_var_17144' self.fetch_var = 'translated_layer/scale_0.tmp_0' self.client = Client() self.connect_to_servers() def connect_to_servers(self): """ 连接到server """ self.client.load_client_config(self.config_file) self.client.connect(self.urls) def predict(self, tokens): """ 预测 """ fetch_map = self.client.predict(feed={self.feed_var: tokens}, fetch=[self.fetch_var]) logits = fetch_map[self.fetch_var] return np.argmax(logits) def predict_batch(self, batch_tokens): """ batch预测 """ fetch_map = self.client.predict(feed={self.feed_var: batch_tokens}, fetch=[self.fetch_var], batch=True) logits = fetch_map[self.fetch_var] return np.argmax(logits, axis=1)
def single_func(idx, resource): client = Client() client.load_client_config( "./uci_housing_client/serving_client_conf.prototxt") client.connect(["127.0.0.1:9293", "127.0.0.1:9292"]) x = [ 0.0137, -0.1136, 0.2553, -0.0692, 0.0582, -0.0727, -0.1583, -0.0584, 0.6283, 0.4919, 0.1856, 0.0795, -0.0332 ] x = np.array(x) for i in range(1000): fetch_map = client.predict(feed={"x": x}, fetch=["price"]) if fetch_map is None: return [[None]] return [[0]]
def run_rpc_client(args): client = Client() client.load_client_config(args.client_config) client.connect([args.connect]) place = paddle.set_device('gpu' if args.use_gpu else 'cpu') args.place = place test_dataloader = create_data_loader(args) feed_names = client.feed_names_ fetch_names = client.fetch_names_ for batch_id, batch_data in enumerate(test_dataloader): batch_data = [tensor.numpy() for tensor in batch_data] feed_dict = dict(zip(feed_names, batch_data)) fetch_map = client.predict(feed=feed_dict, fetch=fetch_names, batch=True) print(fetch_map)
class SentaService(WebService): #初始化lac模型预测服务 def init_lac_client(self, lac_port, lac_client_config): self.lac_reader = LACReader() self.senta_reader = SentaReader() self.lac_client = Client() self.lac_client.load_client_config(lac_client_config) self.lac_client.connect(["127.0.0.1:{}".format(lac_port)]) #定义senta模型预测服务的预处理,调用顺序:lac reader->lac模型预测->预测结果后处理->senta reader def preprocess(self, feed=[], fetch=[]): feed_batch = [] is_batch = True words_lod = [0] for ins in feed: if "words" not in ins: raise ("feed data error!") feed_data = self.lac_reader.process(ins["words"]) words_lod.append(words_lod[-1] + len(feed_data)) feed_batch.append(np.array(feed_data).reshape(len(feed_data), 1)) words = np.concatenate(feed_batch, axis=0) lac_result = self.lac_client.predict(feed={ "words": words, "words.lod": words_lod }, fetch=["crf_decode"], batch=True) result_lod = lac_result["crf_decode.lod"] feed_batch = [] words_lod = [0] for i in range(len(feed)): segs = self.lac_reader.parse_result( feed[i]["words"], lac_result["crf_decode"][result_lod[i]:result_lod[i + 1]]) feed_data = self.senta_reader.process(segs) feed_batch.append(np.array(feed_data).reshape(len(feed_data), 1)) words_lod.append(words_lod[-1] + len(feed_data)) return { "words": np.concatenate(feed_batch), "words.lod": words_lod }, fetch, is_batch
def run(args): client = Client() client.load_client_config( os.path.join(args.serving_client_path, "serving_client_conf.prototxt")) client.connect([args.serving_ip_port]) seq = Sequential([ File2Image(), RGB2BGR(), Div(255), Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5], False), Transpose((2, 0, 1)) ]) img = seq(args.image_path) fetch_map = client.predict( feed={"x": img}, fetch=["save_infer_model/scale_0.tmp_1"]) result = fetch_map["save_infer_model/scale_0.tmp_1"] color_img = get_pseudo_color_map(result[0]) color_img.save("./result.png") print("The segmentation image is saved in ./result.png")
def single_func(idx, resource): reader = LACReader() start = time.time() if args.request == "rpc": client = Client() client.load_client_config(args.model) client.connect([args.endpoint]) fin = open("jieba_test.txt") for line in fin: feed_data = reader.process(line) fetch_map = client.predict(feed={"words": feed_data}, fetch=["crf_decode"]) elif args.request == "http": fin = open("jieba_test.txt") for line in fin: req_data = {"words": line.strip(), "fetch": ["crf_decode"]} r = requests.post("http://{}/lac/prediction".format(args.endpoint), data={ "words": line.strip(), "fetch": ["crf_decode"] }) end = time.time() return [[end - start]]
def single_func(idx, resource): if args.request == "rpc": client = Client() client.load_client_config(args.model) client.connect([args.endpoint]) train_reader = paddle.batch(paddle.reader.shuffle( paddle.dataset.uci_housing.train(), buf_size=500), batch_size=1) start = time.time() for data in train_reader(): fetch_map = client.predict(feed={"x": data[0][0]}, fetch=["price"]) end = time.time() return [[end - start]] elif args.request == "http": train_reader = paddle.batch(paddle.reader.shuffle( paddle.dataset.uci_housing.train(), buf_size=500), batch_size=1) start = time.time() for data in train_reader(): r = requests.post('http://{}/uci/prediction'.format(args.endpoint), data={"x": data[0]}) end = time.time() return [[end - start]]
import sys import os import io import numpy as np client = Client() client.load_client_config(sys.argv[1]) client.connect(["127.0.0.1:9292"]) reader = LACReader() for line in sys.stdin: if len(line) <= 0: continue feed_data = reader.process(line) if len(feed_data) <= 0: continue print(feed_data) #fetch_map = client.predict(feed={"words": np.array(feed_data).reshape(len(feed_data), 1), "words.lod": [0, len(feed_data)]}, fetch=["crf_decode"], batch=True) fetch_map = client.predict(feed={ "words": np.array(feed_data + feed_data).reshape(len(feed_data) * 2, 1), "words.lod": [0, len(feed_data), 2 * len(feed_data)] }, fetch=["crf_decode"], batch=True) print(fetch_map) begin = fetch_map['crf_decode.lod'][0] end = fetch_map['crf_decode.lod'][1] segs = reader.parse_result(line, fetch_map["crf_decode"][begin:end]) print("word_seg: " + "|".join(str(words) for words in segs))
import sys import numpy as np from paddle_serving_client import Client from paddle_serving_app.reader import * import cv2 preprocess = DetectionSequential([ DetectionFile2Image(), DetectionNormalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225], True), DetectionResize((800, 1333), True, interpolation=cv2.INTER_LINEAR), DetectionTranspose((2, 0, 1)), DetectionPadStride(128) ]) postprocess = RCNNPostprocess("label_list.txt", "output") client = Client() client.load_client_config("serving_client/serving_client_conf.prototxt") client.connect(['127.0.0.1:9494']) im, im_info = preprocess(sys.argv[1]) fetch_map = client.predict(feed={ "image": im, "scale_factor": im_info['scale_factor'], }, fetch=["save_infer_model/scale_0.tmp_1"], batch=False) print(fetch_map) fetch_map["image"] = sys.argv[1] postprocess(fetch_map)
img_path = sys.argv[3] print(img_path) # 这是图片的路径 input_shape = (608, 608) image = cv2.imread(img_path) h, w = image.shape[:2] img = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) scale_x = float(input_shape[1]) / w scale_y = float(input_shape[0]) / h img = cv2.resize(img, None, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_CUBIC) pimage = img.astype(np.float32) / 255. pimage = pimage.transpose(2, 0, 1) fetch_map = client.predict( feed={ "image": pimage, "origin_shape": np.array([h, w]), }, fetch=["multiclass_nms_0.tmp_0"]) print('===============================================') pred = fetch_map['multiclass_nms_0.tmp_0'] print(pred.shape) print(pred) if pred[0][0] < 0.0: boxes = np.array([]) classes = np.array([]) scores = np.array([]) else: boxes = pred[:, 2:] scores = pred[:, 1] classes = pred[:, 0].astype(np.int32)
def single_func(idx, resource): file_list = [] turns = resource["turns"] latency_flags = False if os.getenv("FLAGS_serving_latency"): latency_flags = True latency_list = [] for file_name in os.listdir("./image_data/n01440764"): file_list.append(file_name) img_list = [] for i in range(1000): img_list.append("./image_data/n01440764/" + file_list[i]) profile_flags = False if "FLAGS_profile_client" in os.environ and os.environ[ "FLAGS_profile_client"]: profile_flags = True if args.request == "rpc": fetch = ["score"] client = Client() client.load_client_config(args.model) client.connect([resource["endpoint"][idx % len(resource["endpoint"])]]) start = time.time() for i in range(turns): if args.batch_size >= 1: l_start = time.time() feed_batch = [] i_start = time.time() for bi in range(args.batch_size): img = seq_preprocess(img_list[i]) feed_batch.append({"image": img}) i_end = time.time() if profile_flags: print("PROFILE\tpid:{}\timage_pre_0:{} image_pre_1:{}". format(os.getpid(), int(round(i_start * 1000000)), int(round(i_end * 1000000)))) result = client.predict(feed=feed_batch, fetch=fetch) l_end = time.time() if latency_flags: latency_list.append(l_end * 1000 - l_start * 1000) else: print("unsupport batch size {}".format(args.batch_size)) elif args.request == "http": py_version = sys.version_info[0] server = "http://" + resource["endpoint"][idx % len( resource["endpoint"])] + "/image/prediction" start = time.time() for i in range(turns): if py_version == 2: image = base64.b64encode( open("./image_data/n01440764/" + file_list[i]).read()) else: image_path = "./image_data/n01440764/" + file_list[i] image = base64.b64encode(open(image_path, "rb").read()).decode("utf-8") req = json.dumps({"feed": [{"image": image}], "fetch": ["score"]}) r = requests.post(server, data=req, headers={"Content-Type": "application/json"}) end = time.time() if latency_flags: return [[end - start], latency_list] return [[end - start]]
client = Client() # TODO:load_client need to load more than one client model. # this need to figure out some details. client.load_client_config(sys.argv[1:]) client.connect(["127.0.0.1:9293"]) import paddle test_img_dir = "test_img/" ocr_reader = OCRReader(char_dict_path="../../ppocr/utils/ppocr_keys_v1.txt") def cv2_to_base64(image): return base64.b64encode(image).decode( 'utf8') #data.tostring()).decode('utf8') for img_file in os.listdir(test_img_dir): with open(os.path.join(test_img_dir, img_file), 'rb') as file: image_data = file.read() image = cv2_to_base64(image_data) res_list = [] fetch_map = client.predict(feed={"x": image}, fetch=["save_infer_model/scale_0.tmp_1"], batch=True) one_batch_res = ocr_reader.postprocess(fetch_map, with_score=True) for res in one_batch_res: res_list.append(res[0]) res = {"res": str(res_list)} print(res)
class WebService(object): def __init__(self, name="default_service"): self.name = name def load_model_config(self, model_config): self.model_config = model_config def _launch_rpc_service(self): op_maker = OpMaker() read_op = op_maker.create('general_reader') general_infer_op = op_maker.create('general_infer') general_response_op = op_maker.create('general_response') op_seq_maker = OpSeqMaker() op_seq_maker.add_op(read_op) op_seq_maker.add_op(general_infer_op) op_seq_maker.add_op(general_response_op) server = Server() server.set_op_sequence(op_seq_maker.get_op_sequence()) server.set_num_threads(16) server.load_model_config(self.model_config) server.prepare_server(workdir=self.workdir, port=self.port_list[0], device=self.device) server.run_server() def port_is_available(self, port): with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock: sock.settimeout(2) result = sock.connect_ex(('0.0.0.0', port)) if result != 0: return True else: return False def prepare_server(self, workdir="", port=9393, device="cpu"): self.workdir = workdir self.port = port self.device = device default_port = 12000 self.port_list = [] for i in range(1000): if self.port_is_available(default_port + i): self.port_list.append(default_port + i) break def _launch_web_service(self): self.client = Client() self.client.load_client_config( "{}/serving_server_conf.prototxt".format(self.model_config)) self.client.connect(["0.0.0.0:{}".format(self.port_list[0])]) def get_prediction(self, request): if not request.json: abort(400) if "fetch" not in request.json: abort(400) try: feed, fetch = self.preprocess(request.json["feed"], request.json["fetch"]) if isinstance(feed, dict) and "fetch" in feed: del feed["fetch"] fetch_map = self.client.predict(feed=feed, fetch=fetch) for key in fetch_map: fetch_map[key] = fetch_map[key].tolist() result = self.postprocess(feed=request.json["feed"], fetch=fetch, fetch_map=fetch_map) result = {"result": result} except ValueError: result = {"result": "Request Value Error"} return result def run_rpc_service(self): import socket localIP = socket.gethostbyname(socket.gethostname()) print("web service address:") print("http://{}:{}/{}/prediction".format(localIP, self.port, self.name)) p_rpc = Process(target=self._launch_rpc_service) p_rpc.start() app_instance = Flask(__name__) @app_instance.before_first_request def init(): self._launch_web_service() service_name = "/" + self.name + "/prediction" @app_instance.route(service_name, methods=["POST"]) def run(): return self.get_prediction(request) self.app_instance = app_instance def run_web_service(self): self.app_instance.run(host="0.0.0.0", port=self.port, threaded=False, processes=1) def get_app_instance(self): return self.app_instance def preprocess(self, feed=[], fetch=[]): return feed, fetch def postprocess(self, feed=[], fetch=[], fetch_map=None): return fetch_map
import sys import numpy as np from paddle_serving_client import Client from paddle_serving_app.reader import * import cv2 preprocess = Sequential([ File2Image(), BGR2RGB(), Resize((608, 608), interpolation=cv2.INTER_LINEAR), Div(255.0), Transpose((2, 0, 1)) ]) postprocess = RCNNPostprocess(sys.argv[1], "output", [608, 608]) client = Client() client.load_client_config("serving_client/serving_client_conf.prototxt") client.connect(['127.0.0.1:9393']) im = preprocess(sys.argv[2]) fetch_map = client.predict(feed={ "image": im, "im_shape": np.array(list(im.shape[1:])).reshape(-1), "scale_factor": np.array([1.0, 1.0]).reshape(-1), }, fetch=["multiclass_nms3_0.tmp_0"], batch=False) print(fetch_map) fetch_map["image"] = sys.argv[2] postprocess(fetch_map)
import numpy as np from paddle_serving_client import Client from paddle_serving_app.reader import * import cv2 preprocess = DetectionSequential([ DetectionFile2Image(), DetectionResize((300, 300), False, interpolation=cv2.INTER_LINEAR), DetectionNormalize([104.0, 117.0, 123.0], [1.0, 1.0, 1.0], False), DetectionTranspose((2, 0, 1)), ]) postprocess = RCNNPostprocess("label_list.txt", "output") client = Client() client.load_client_config("serving_client/serving_client_conf.prototxt") client.connect(['127.0.0.1:9494']) im, im_info = preprocess(sys.argv[1]) fetch_map = client.predict( feed={ "image": im, "im_shape": np.array(list(im.shape[1:])).reshape(-1), "scale_factor": im_info['scale_factor'], }, fetch=["save_infer_model/scale_0.tmp_1"], batch=False) print(fetch_map) fetch_map["image"] = sys.argv[1] postprocess(fetch_map)
# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from paddle_serving_client import Client from paddle_serving_app.reader import Sequential, File2Image, Resize, Transpose, BGR2RGB, SegPostprocess import sys import cv2 client = Client() client.load_client_config("unet_client/serving_client_conf.prototxt") client.connect(["127.0.0.1:9494"]) preprocess = Sequential( [File2Image(), Resize((512, 512), interpolation=cv2.INTER_LINEAR)]) postprocess = SegPostprocess(2) filename = "N0060.jpg" im = preprocess(filename) fetch_map = client.predict(feed={"image": im}, fetch=["output"]) fetch_map["filename"] = filename postprocess(fetch_map)
# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=doc-string-missing from paddle_serving_client import Client import sys import numpy as np client = Client() client.load_client_config(sys.argv[1]) client.connect(["127.0.0.1:9393"]) fetch_list = client.get_fetch_names() import paddle test_reader = paddle.batch(paddle.reader.shuffle( paddle.dataset.uci_housing.test(), buf_size=500), batch_size=1) for data in test_reader(): new_data = np.zeros((1, 13)).astype("float32") new_data[0] = data[0][0] fetch_map = client.predict(feed={"x": new_data}, fetch=fetch_list, batch=True) print(fetch_map)
# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys from paddle_serving_client import Client from paddle_serving_client.utils import benchmark_args from chinese_bert_reader import ChineseBertReader import numpy as np args = benchmark_args() reader = ChineseBertReader({"max_seq_len": 128}) fetch = ["save_infer_model/scale_0.tmp_1"] endpoint_list = ['127.0.0.1:7703'] client = Client() client.load_client_config(args.model) client.connect(endpoint_list) for line in sys.stdin: feed_dict = reader.process(line) for key in feed_dict.keys(): feed_dict[key] = np.array(feed_dict[key]).reshape((1, 128)) #print(feed_dict) result = client.predict(feed=feed_dict, fetch=fetch, batch=True) print(result)