def test(self, test_dir, out_dir): for _, data_dict in enumerate( self.test_loader.get_testloader(test_dir=test_dir)): data_dict['testing'] = True out_dict = self.det_net(data_dict) meta_list = DCHelper.tolist(data_dict['meta']) batch_detections = self.decode(out_dict['loc'], out_dict['conf'], self.configer, meta_list) for i in range(len(meta_list)): ori_img_bgr = ImageHelper.read_image(meta_list[i]['img_path'], tool='cv2', mode='BGR') json_dict = self.__get_info_tree(batch_detections[i]) image_canvas = self.det_parser.draw_bboxes( ori_img_bgr.copy(), json_dict, conf_threshold=self.configer.get('res', 'vis_conf_thre')) ImageHelper.save(image_canvas, save_path=os.path.join( out_dir, 'vis/{}.png'.format( meta_list[i]['filename']))) Log.info('Json Path: {}'.format( os.path.join( out_dir, 'json/{}.json'.format(meta_list[i]['filename'])))) JsonHelper.save_file(json_dict, save_path=os.path.join( out_dir, 'json/{}.json'.format( meta_list[i]['filename'])))
def __test_img(self, image_path, json_path, raw_path, vis_path): Log.info('Image Path: {}'.format(image_path)) ori_image = ImageHelper.read_image(image_path, tool=self.configer.get('data', 'image_tool'), mode=self.configer.get('data', 'input_mode')) ori_width, ori_height = ImageHelper.get_size(ori_image) ori_img_bgr = ImageHelper.get_cv2_bgr(ori_image, mode=self.configer.get('data', 'input_mode')) heatmap_avg = np.zeros((ori_height, ori_width, self.configer.get('network', 'heatmap_out'))) paf_avg = np.zeros((ori_height, ori_width, self.configer.get('network', 'paf_out'))) multiplier = [scale * self.configer.get('test', 'input_size')[1] / ori_height for scale in self.configer.get('test', 'scale_search')] stride = self.configer.get('network', 'stride') for i, scale in enumerate(multiplier): image, border_hw = self._get_blob(ori_image, scale=scale) with torch.no_grad(): paf_out_list, heatmap_out_list = self.pose_net(image) paf_out = paf_out_list[-1] heatmap_out = heatmap_out_list[-1] # extract outputs, resize, and remove padding heatmap = heatmap_out.squeeze(0).cpu().numpy().transpose(1, 2, 0) heatmap = cv2.resize(heatmap, None, fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC) heatmap = cv2.resize(heatmap[:border_hw[0], :border_hw[1]], (ori_width, ori_height), interpolation=cv2.INTER_CUBIC) paf = paf_out.squeeze(0).cpu().numpy().transpose(1, 2, 0) paf = cv2.resize(paf, None, fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC) paf = cv2.resize(paf[:border_hw[0], :border_hw[1]], (ori_width, ori_height), interpolation=cv2.INTER_CUBIC) heatmap_avg = heatmap_avg + heatmap / len(multiplier) paf_avg = paf_avg + paf / len(multiplier) all_peaks = self.__extract_heatmap_info(heatmap_avg) special_k, connection_all = self.__extract_paf_info(ori_img_bgr, paf_avg, all_peaks) subset, candidate = self.__get_subsets(connection_all, special_k, all_peaks) json_dict = self.__get_info_tree(ori_img_bgr, subset, candidate) image_canvas = self.pose_parser.draw_points(ori_img_bgr.copy(), json_dict) image_canvas = self.pose_parser.link_points(image_canvas, json_dict) ImageHelper.save(image_canvas, vis_path) ImageHelper.save(ori_img_bgr, raw_path) Log.info('Json Save Path: {}'.format(json_path)) JsonHelper.save_file(json_dict, json_path)
def __test_img(self, image_path, json_path, raw_path, vis_path): Log.info('Image Path: {}'.format(image_path)) img = ImageHelper.read_image( image_path, tool=self.configer.get('data', 'image_tool'), mode=self.configer.get('data', 'input_mode')) trans = None if self.configer.get('dataset') == 'imagenet': if self.configer.get('data', 'image_tool') == 'cv2': img = Image.fromarray(img) trans = transforms.Compose([ transforms.Scale(256), transforms.CenterCrop(224), ]) assert trans is not None img = trans(img) ori_img_bgr = ImageHelper.get_cv2_bgr(img, mode=self.configer.get( 'data', 'input_mode')) inputs = self.blob_helper.make_input(img, input_size=self.configer.get( 'test', 'input_size'), scale=1.0) with torch.no_grad(): outputs = self.cls_net(inputs) json_dict = self.__get_info_tree(outputs, image_path) image_canvas = self.cls_parser.draw_label(ori_img_bgr.copy(), json_dict['label']) cv2.imwrite(vis_path, image_canvas) cv2.imwrite(raw_path, ori_img_bgr) Log.info('Json Path: {}'.format(json_path)) JsonHelper.save_file(json_dict, json_path) return json_dict
def __read_json(self, root_dir, json_path): item_list = [] for item in JsonHelper.load_file(json_path): img_path = os.path.join(root_dir, item['image_path']) if not os.path.exists(img_path) or not ImageHelper.is_img(img_path): Log.error('Image Path: {} is Invalid.'.format(img_path)) exit(1) item_list.append((img_path, '.'.join(item['image_path'].split('.')[:-1]))) Log.info('There are {} images..'.format(len(item_list))) return item_list
def __read_json_file(self, json_file): """ filename: JSON file return: three list: key_points list, centers list and scales list. """ json_dict = JsonHelper.load_file(json_file) kpts = list() bboxes = list() for object in json_dict['objects']: kpts.append(object['keypoints']) if 'bbox' in object: bboxes.append(object['bbox']) return np.array(kpts).astype(np.float32), np.array(bboxes).astype( np.float32)
def __read_json_file(self, json_file): """ filename: JSON file return: three list: key_points list, centers list and scales list. """ json_dict = JsonHelper.load_file(json_file) labels = list() bboxes = list() polygons = list() for object in json_dict['objects']: if 'difficult' in object and object['difficult'] and not self.configer.get('data', 'keep_difficult'): continue labels.append(object['label']) bboxes.append(object['bbox']) polygons.append(object['segm']) return np.array(labels), np.array(bboxes).astype(np.float32), polygons