def test_sub(self): height = 4 width = 5 channels = 3 img = np.arange(height * width * channels).reshape( [height, width, channels]) # input size is an int value = 10.0 seq = Sequential( [pp.Image2Gpubuffer(), pp.Sub(value), pp.Gpubuffer2Image()]) result = seq(img).reshape(-1) for i in range(0, result.size): self.assertEqual(i - value, result[i]) # input size is a sequence values = (9, 4, 2) seq = Sequential( [pp.Image2Gpubuffer(), pp.Sub(values), pp.Gpubuffer2Image()]) result = seq(img) for i in range(0, result.shape[0]): for j in range(0, result.shape[1]): for k in range(0, result.shape[2]): self.assertEqual(result[i][j][k], img[i][j][k] - values[k])
def init_det_client(self, det_port, det_client_config): self.det_preprocess = Sequential([ ResizeByFactor(32, 960), Div(255), Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), Transpose( (2, 0, 1)) ]) self.det_client = Client() self.det_client.load_client_config(det_client_config) self.det_client.connect(["127.0.0.1:{}".format(det_port)]) self.ocr_reader = OCRReader()
def init_det_debugger(self, det_model_config): self.det_preprocess = Sequential([ ResizeByFactor(32, 960), Div(255), Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), Transpose((2, 0, 1)) ]) self.det_client = Debugger() self.det_client.load_model_config(det_model_config, gpu=True, profile=False) self.ocr_reader = OCRReader()
def init_op(self): self.seq = Sequential([ Resize(256), CenterCrop(224), RGB2BGR(), Transpose((2, 0, 1)), Div(255), Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225], True) ]) self.label_dict = {} label_idx = 0 with open("imagenet.label") as fin: for line in fin: self.label_dict[label_idx] = line.strip() label_idx += 1
def test_resize(self): height = 9 width = 5 channels = 3 img = np.arange(height * width).reshape([height, width, 1]) * np.ones( (1, channels)) # input size is an int for new_size in [3, 10]: seq_gpu = Sequential([ pp.Image2Gpubuffer(), pp.Resize(new_size), pp.Gpubuffer2Image() ]) seq_paddle = Sequential([Resize(new_size)]) result_gpu = seq_gpu(img) result_paddle = seq_paddle(img) self.assertEqual(result_gpu.shape, result_paddle.shape) for i in range(0, result_gpu.shape[0]): for j in range(0, result_gpu.shape[1]): for k in range(0, result_gpu.shape[2]): self.assertAlmostEqual(result_gpu[i][j][k], result_paddle[i][j][k], 5) # input size is a sequence for new_height, new_width in [(7, 3), (15, 10)]: seq_gpu = Sequential([ pp.Image2Gpubuffer(), pp.Resize((new_width, new_height)), pp.Gpubuffer2Image() ]) seq_paddle = Sequential([Resize((new_width, new_height))]) result_gpu = seq_gpu(img) result_paddle = seq_paddle(img) self.assertEqual(result_gpu.shape, result_paddle.shape) for i in range(0, result_gpu.shape[0]): for j in range(0, result_gpu.shape[1]): for k in range(0, result_gpu.shape[2]): self.assertAlmostEqual(result_gpu[i][j][k], result_paddle[i][j][k], 5)
def single_func(idx, resource): total_number = 0 profile_flags = False latency_flags = False if os.getenv("FLAGS_profile_client"): profile_flags = True if os.getenv("FLAGS_serving_latency"): latency_flags = True latency_list = [] if args.request == "rpc": client = Client() client.load_client_config(args.model) client.connect([resource["endpoint"][idx % len(resource["endpoint"])]]) start = time.time() for i in range(turns): if args.batch_size >= 1: l_start = time.time() seq = Sequential([ File2Image(), Resize(256), CenterCrop(224), RGB2BGR(), Transpose((2, 0, 1)), Div(255), Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225], True) ]) image_file = "daisy.jpg" img = seq(image_file) feed_data = np.array(img) feed_data = np.expand_dims(feed_data, 0).repeat(args.batch_size, axis=0) result = client.predict( feed={"image": feed_data}, fetch=["save_infer_model/scale_0.tmp_0"], batch=True) l_end = time.time() if latency_flags: latency_list.append(l_end * 1000 - l_start * 1000) total_number = total_number + 1 else: print("unsupport batch size {}".format(args.batch_size)) else: raise ValueError("not implemented {} request".format(args.request)) end = time.time() if latency_flags: return [[end - start], latency_list, [total_number]] else: return [[end - start]]
def test_div(self): height = 4 width = 5 channels = 3 value = 255.0 img = np.arange(height * width * channels).reshape( [height, width, channels]) seq = Sequential( [pp.Image2Gpubuffer(), pp.Div(value), pp.Gpubuffer2Image()]) result = seq(img).reshape(-1) for i in range(0, result.size): self.assertAlmostEqual(i / value, result[i], 5)
def test_resize_fixed_point(self): new_height = 256 new_width = 256 * 4 / 3 seq = Sequential([ File2Image(), pp.Image2Gpubuffer(), pp.Resize((new_width, new_height), use_fixed_point=True), pp.Gpubuffer2Image() ]) img = seq("./capture_16.bmp") img = np.resize(img, (new_height, new_width * 3)) img_vis = np.loadtxt("./cap_resize_16.raw") img_resize_diff = img_vis - img self.assertEqual(np.all(img_resize_diff == 0), True)
def init_det(self): self.det_preprocess = Sequential([ ResizeByFactor(32, 960), Div(255), Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), Transpose( (2, 0, 1)) ]) self.filter_func = FilterBoxes(10, 10) self.post_func = DBPostProcess({ "thresh": 0.3, "box_thresh": 0.5, "max_candidates": 1000, "unclip_ratio": 1.5, "min_size": 3 })
def init_det_debugger(self, det_model_config): self.det_preprocess = Sequential([ ResizeByFactor(32, 960), Div(255), Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), Transpose( (2, 0, 1)) ]) self.det_client = LocalPredictor() if sys.argv[1] == 'gpu': self.det_client.load_model_config( det_model_config, use_gpu=True, gpu_id=0) elif sys.argv[1] == 'cpu': self.det_client.load_model_config(det_model_config) self.ocr_reader = OCRReader( char_dict_path="../../../ppocr/utils/ppocr_keys_v1.txt")
def preprocess_img(img_list): """ Brief: prepare img data for benchmark Args: img_list(list): list for img file path Returns: image content binary list after preprocess """ preprocess = Sequential([File2Image(), Resize((512, 512))]) result_list = [] for img in img_list: img_tmp = preprocess(img) result_list.append(img_tmp) return result_list
def test_center_crop(self): height = 9 width = 7 channels = 3 img = np.arange(height * width * channels).reshape( [height, width, channels]) new_size = 5 seq = Sequential([ pp.Image2Gpubuffer(), pp.CenterCrop(new_size), pp.Gpubuffer2Image() ]) result = seq(img) self.assertEqual(result.shape[0], new_size) self.assertEqual(result.shape[1], new_size) self.assertEqual(result.shape[2], channels)
def test_normalize(self): height = 4 width = 5 channels = 3 img = np.random.rand(height, width, channels) mean = [5.0, 5.0, 5.0] std = [2.0, 2.0, 2.0] seq = Sequential([ pp.Image2Gpubuffer(), pp.Normalize(mean, std), pp.Gpubuffer2Image() ]) result = seq(img) for i in range(0, height): for j in range(0, width): for k in range(0, channels): self.assertAlmostEqual((img[i][j][k] - mean[k]) / std[k], result[i][j][k], 5)
def run(args): client = Client() client.load_client_config( os.path.join(args.serving_client_path, "serving_client_conf.prototxt")) client.connect([args.serving_ip_port]) seq = Sequential([ File2Image(), RGB2BGR(), Div(255), Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5], False), Transpose((2, 0, 1)) ]) img = seq(args.image_path) fetch_map = client.predict( feed={"x": img}, fetch=["save_infer_model/scale_0.tmp_1"]) result = fetch_map["save_infer_model/scale_0.tmp_1"] color_img = get_pseudo_color_map(result[0]) color_img.save("./result.png") print("The segmentation image is saved in ./result.png")
def resize_norm_img(self, img, max_wh_ratio): imgC, imgH, imgW = self.rec_image_shape if self.character_type == "ch": imgW = int(32 * max_wh_ratio) h = img.shape[0] w = img.shape[1] ratio = w / float(h) if math.ceil(imgH * ratio) > imgW: resized_w = imgW else: resized_w = int(math.ceil(imgH * ratio)) seq = Sequential([ Resize(imgH, resized_w), Transpose((2, 0, 1)), Div(255), Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5], True) ]) resized_image = seq(img) padding_im = np.zeros((imgC, imgH, imgW), dtype=np.float32) padding_im[:, :, 0:resized_w] = resized_image return padding_im
# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from paddle_serving_client import Client from paddle_serving_app.reader import Sequential, File2Image, Resize, Transpose, BGR2RGB, SegPostprocess import sys import cv2 client = Client() client.load_client_config("unet_client/serving_client_conf.prototxt") client.connect(["127.0.0.1:9494"]) preprocess = Sequential( [File2Image(), Resize((512, 512), interpolation=cv2.INTER_LINEAR)]) postprocess = SegPostprocess(2) filename = "N0060.jpg" im = preprocess(filename) fetch_map = client.predict(feed={"image": im}, fetch=["output"]) fetch_map["filename"] = filename postprocess(fetch_map)
''' #client.set_http_proto(True) client.connect(["127.0.0.1:9696"]) label_dict = {} label_idx = 0 with open("imagenet.label") as fin: for line in fin: label_dict[label_idx] = line.strip() label_idx += 1 seq = Sequential([ URL2Image(), Resize(256), CenterCrop(224), RGB2BGR(), Transpose((2, 0, 1)), Div(255), Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225], True) ]) start = time.time() image_file = "https://paddle-serving.bj.bcebos.com/imagenet-example/daisy.jpg" for i in range(10): img = seq(image_file) fetch_map = client.predict(feed={"image": img}, fetch=["score"], batch=False) print(fetch_map) end = time.time()
from paddle_serving_client import Client from paddle_serving_app.reader import Sequential, File2Image, Resize, Transpose, BGR2RGB, Normalize, Div import sys import cv2 from postprocess import SegPostprocess client = Client() client.load_client_config("serving_client/serving_client_conf.prototxt") client.connect(["127.0.0.1:9494"]) preprocess = Sequential([ File2Image(), Resize((512, 512), interpolation=cv2.INTER_LINEAR), Div(255.0), Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5], False), Transpose((2, 0, 1)) ]) postprocess = SegPostprocess(2) filename = sys.argv[1] im = preprocess(filename) fetch_map = client.predict(feed={"image": im}, fetch=["transpose_1.tmp_0"]) fetch_map["filename"] = filename result_png = postprocess(fetch_map)