def init_det_client(self, det_port, det_client_config): self.det_preprocess = Sequential([ ResizeByFactor(32, 960), Div(255), Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), Transpose( (2, 0, 1)) ]) self.det_client = Client() self.det_client.load_client_config(det_client_config) self.det_client.connect(["127.0.0.1:{}".format(det_port)]) self.ocr_reader = OCRReader()
def init_op(self): self.seq = Sequential([ Resize(256), CenterCrop(224), RGB2BGR(), Transpose((2, 0, 1)), Div(255), Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225], True) ]) self.label_dict = {} label_idx = 0 with open("imagenet.label") as fin: for line in fin: self.label_dict[label_idx] = line.strip() label_idx += 1
def init_det_debugger(self, det_model_config): self.det_preprocess = Sequential([ ResizeByFactor(32, 960), Div(255), Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), Transpose((2, 0, 1)) ]) self.det_client = Debugger() self.det_client.load_model_config(det_model_config, gpu=True, profile=False) self.ocr_reader = OCRReader()
def single_func(idx, resource): total_number = 0 profile_flags = False latency_flags = False if os.getenv("FLAGS_profile_client"): profile_flags = True if os.getenv("FLAGS_serving_latency"): latency_flags = True latency_list = [] if args.request == "rpc": client = Client() client.load_client_config(args.model) client.connect([resource["endpoint"][idx % len(resource["endpoint"])]]) start = time.time() for i in range(turns): if args.batch_size >= 1: l_start = time.time() seq = Sequential([ File2Image(), Resize(256), CenterCrop(224), RGB2BGR(), Transpose((2, 0, 1)), Div(255), Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225], True) ]) image_file = "daisy.jpg" img = seq(image_file) feed_data = np.array(img) feed_data = np.expand_dims(feed_data, 0).repeat(args.batch_size, axis=0) result = client.predict( feed={"image": feed_data}, fetch=["save_infer_model/scale_0.tmp_0"], batch=True) l_end = time.time() if latency_flags: latency_list.append(l_end * 1000 - l_start * 1000) total_number = total_number + 1 else: print("unsupport batch size {}".format(args.batch_size)) else: raise ValueError("not implemented {} request".format(args.request)) end = time.time() if latency_flags: return [[end - start], latency_list, [total_number]] else: return [[end - start]]
def init_det_debugger(self, det_model_config): self.det_preprocess = Sequential([ ResizeByFactor(32, 960), Div(255), Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), Transpose( (2, 0, 1)) ]) self.det_client = LocalPredictor() if sys.argv[1] == 'gpu': self.det_client.load_model_config( det_model_config, use_gpu=True, gpu_id=0) elif sys.argv[1] == 'cpu': self.det_client.load_model_config(det_model_config) self.ocr_reader = OCRReader( char_dict_path="../../../ppocr/utils/ppocr_keys_v1.txt")
def init_det(self): self.det_preprocess = Sequential([ ResizeByFactor(32, 960), Div(255), Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), Transpose( (2, 0, 1)) ]) self.filter_func = FilterBoxes(10, 10) self.post_func = DBPostProcess({ "thresh": 0.3, "box_thresh": 0.5, "max_candidates": 1000, "unclip_ratio": 1.5, "min_size": 3 })
def run(args): client = Client() client.load_client_config( os.path.join(args.serving_client_path, "serving_client_conf.prototxt")) client.connect([args.serving_ip_port]) seq = Sequential([ File2Image(), RGB2BGR(), Div(255), Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5], False), Transpose((2, 0, 1)) ]) img = seq(args.image_path) fetch_map = client.predict( feed={"x": img}, fetch=["save_infer_model/scale_0.tmp_1"]) result = fetch_map["save_infer_model/scale_0.tmp_1"] color_img = get_pseudo_color_map(result[0]) color_img.save("./result.png") print("The segmentation image is saved in ./result.png")
def resize_norm_img(self, img, max_wh_ratio): imgC, imgH, imgW = self.rec_image_shape if self.character_type == "ch": imgW = int(32 * max_wh_ratio) h = img.shape[0] w = img.shape[1] ratio = w / float(h) if math.ceil(imgH * ratio) > imgW: resized_w = imgW else: resized_w = int(math.ceil(imgH * ratio)) seq = Sequential([ Resize(imgH, resized_w), Transpose((2, 0, 1)), Div(255), Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5], True) ]) resized_image = seq(img) padding_im = np.zeros((imgC, imgH, imgW), dtype=np.float32) padding_im[:, :, 0:resized_w] = resized_image return padding_im
#client.set_http_proto(True) client.connect(["127.0.0.1:9696"]) label_dict = {} label_idx = 0 with open("imagenet.label") as fin: for line in fin: label_dict[label_idx] = line.strip() label_idx += 1 seq = Sequential([ URL2Image(), Resize(256), CenterCrop(224), RGB2BGR(), Transpose((2, 0, 1)), Div(255), Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225], True) ]) start = time.time() image_file = "https://paddle-serving.bj.bcebos.com/imagenet-example/daisy.jpg" for i in range(10): img = seq(image_file) fetch_map = client.predict(feed={"image": img}, fetch=["score"], batch=False) print(fetch_map) end = time.time() print(end - start)
from paddle_serving_app.reader import CenterCrop, RGB2BGR, Transpose, Div, Normalize import time client = Client() client.load_client_config(sys.argv[1]) client.connect(["127.0.0.1:9696"]) label_dict = {} label_idx = 0 with open("imagenet.label") as fin: for line in fin: label_dict[label_idx] = line.strip() label_idx += 1 seq = Sequential([ URL2Image(), Resize(256), CenterCrop(224), RGB2BGR(), Transpose((2, 0, 1)), Div(255), Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225], True) ]) start = time.time() image_file = "https://paddle-serving.bj.bcebos.com/imagenet-example/daisy.jpg" for i in range(10): img = seq(image_file) fetch_map = client.predict( feed={"image": img}, fetch=["score"], batch=False) prob = max(fetch_map["score"][0]) label = label_dict[fetch_map["score"][0].tolist().index(prob)].strip( ).replace(",", "") print("prediction: {}, probability: {}".format(label, prob)) end = time.time()