def __test_img(self, image_path, json_path, raw_path, vis_path): Log.info('Image Path: {}'.format(image_path)) img = ImageHelper.read_image(image_path, tool=self.configer.get('data', 'image_tool'), mode=self.configer.get('data', 'input_mode')) ori_img_bgr = ImageHelper.get_cv2_bgr(img, mode=self.configer.get('data', 'input_mode')) inputs = self.blob_helper.make_input(img, input_size=self.configer.get('test', 'input_size'), scale=1.0) with torch.no_grad(): feat_list, bbox, cls = self.det_net(inputs) batch_detections = self.decode(bbox, cls, self.ssd_priorbox_layer(feat_list, self.configer.get('test', 'input_size')), self.configer, [inputs.size(3), inputs.size(2)]) json_dict = self.__get_info_tree(batch_detections[0], ori_img_bgr, [inputs.size(3), inputs.size(2)]) image_canvas = self.det_parser.draw_bboxes(ori_img_bgr.copy(), json_dict, conf_threshold=self.configer.get('res', 'vis_conf_thre')) cv2.imwrite(vis_path, image_canvas) cv2.imwrite(raw_path, ori_img_bgr) Log.info('Json Path: {}'.format(json_path)) JsonHelper.save_file(json_dict, json_path) return json_dict
def __test_img(self, image_path, json_path, raw_path, vis_path): Log.info('Image Path: {}'.format(image_path)) ori_img_rgb = ImageHelper.img2np(ImageHelper.pil_open_rgb(image_path)) ori_img_bgr = ImageHelper.rgb2bgr(ori_img_rgb) inputs = ImageHelper.resize(ori_img_rgb, tuple(self.configer.get('data', 'input_size')), Image.CUBIC) inputs = ToTensor()(inputs) inputs = Normalize(mean=self.configer.get('trans_params', 'mean'), std=self.configer.get('trans_params', 'std'))(inputs) with torch.no_grad(): inputs = inputs.unsqueeze(0).to(self.device) bbox, cls = self.det_net(inputs) bbox = bbox.cpu().data.squeeze(0) cls = F.softmax(cls.cpu().squeeze(0), dim=-1).data boxes, lbls, scores = self.__decode(bbox, cls) json_dict = self.__get_info_tree(boxes, lbls, scores, ori_img_rgb) image_canvas = self.det_parser.draw_bboxes(ori_img_bgr.copy(), json_dict, conf_threshold=self.configer.get('vis', 'conf_threshold')) cv2.imwrite(vis_path, image_canvas) cv2.imwrite(raw_path, ori_img_bgr) Log.info('Json Path: {}'.format(json_path)) JsonHelper.save_file(json_dict, json_path) return json_dict
def __test_img(self, image_path, save_path): image_raw = ImageHelper.cv2_open_bgr(image_path) inputs = ImageHelper.bgr2rgb(image_raw) heatmap_avg = self.__get_heatmap(inputs) all_peaks = self.__extract_heatmap_info(heatmap_avg) image_save = self.__draw_key_point(all_peaks, image_raw) cv2.imwrite(save_path, image_save)
def __getitem__(self, index): imgA = ImageHelper.read_image( self.imgA_list[index], tool=self.configer.get('data', 'image_tool'), mode=self.configer.get('data', 'input_mode')) indexB = random.randint(0, len(self.imgB_list) - 1) % len(self.imgB_list) imgB = ImageHelper.read_image( self.imgB_list[indexB], tool=self.configer.get('data', 'image_tool'), mode=self.configer.get('data', 'input_mode')) if self.aug_transform is not None: imgA = self.aug_transform(imgA) imgB = self.aug_transform(imgB) if self.img_transform is not None: imgA = self.img_transform(imgA) imgB = self.img_transform(imgB) return dict(imgA=DataContainer(imgA, stack=True), imgB=DataContainer(imgB, stack=True), labelA=DataContainer(self.labelA_list[index], stack=True), labelB=DataContainer(self.labelB_list[indexB], stack=True))
def __test_img(self, image_path, json_path, raw_path, vis_path): Log.info('Image Path: {}'.format(image_path)) ori_img_rgb = ImageHelper.img2np(ImageHelper.pil_open_rgb(image_path)) cur_img_rgb = ImageHelper.resize(ori_img_rgb, self.configer.get( 'data', 'input_size'), interpolation=Image.CUBIC) ori_img_bgr = ImageHelper.bgr2rgb(ori_img_rgb) paf_avg, heatmap_avg = self.__get_paf_and_heatmap(cur_img_rgb) all_peaks = self.__extract_heatmap_info(heatmap_avg) special_k, connection_all = self.__extract_paf_info( cur_img_rgb, paf_avg, all_peaks) subset, candidate = self.__get_subsets(connection_all, special_k, all_peaks) json_dict = self.__get_info_tree(cur_img_rgb, subset, candidate) for i in range(len(json_dict['objects'])): for index in range(len(json_dict['objects'][i]['keypoints'])): if json_dict['objects'][i]['keypoints'][index][2] == -1: continue json_dict['objects'][i]['keypoints'][index][0] *= ( ori_img_rgb.shape[1] / cur_img_rgb.shape[1]) json_dict['objects'][i]['keypoints'][index][1] *= ( ori_img_rgb.shape[0] / cur_img_rgb.shape[0]) image_canvas = self.pose_parser.draw_points(ori_img_bgr.copy(), json_dict) image_canvas = self.pose_parser.link_points(image_canvas, json_dict) cv2.imwrite(vis_path, image_canvas) cv2.imwrite(raw_path, ori_img_bgr) Log.info('Json Save Path: {}'.format(json_path)) with open(json_path, 'w') as save_stream: save_stream.write(json.dumps(json_dict))
def test(self, test_dir, out_dir): for _, data_dict in enumerate( self.test_loader.get_testloader(test_dir=test_dir)): data_dict['testing'] = True loc, conf = self.det_net(data_dict) meta_list = DCHelper.tolist(data_dict['meta']) batch_detections = self.decode(loc, conf, self.configer, meta_list) for i in range(len(meta_list)): ori_img_bgr = ImageHelper.read_image(meta_list[i]['img_path'], tool='cv2', mode='BGR') json_dict = self.__get_info_tree(batch_detections[i]) image_canvas = self.det_parser.draw_bboxes( ori_img_bgr.copy(), json_dict, conf_threshold=self.configer.get('res', 'vis_conf_thre')) ImageHelper.save(image_canvas, save_path=os.path.join( out_dir, 'vis/{}.png'.format( meta_list[i]['filename']))) Log.info('Json Path: {}'.format( os.path.join( out_dir, 'json/{}.json'.format(meta_list[i]['filename'])))) JsonHelper.save_file(json_dict, save_path=os.path.join( out_dir, 'json/{}.json'.format( meta_list[i]['filename'])))
def __getitem__(self, index): img = ImageHelper.read_image( self.img_list[index], tool=self.configer.get('data', 'image_tool'), mode=self.configer.get('data', 'input_mode')) img_size = ImageHelper.get_size(img) bboxes, labels = self.__read_json_file(self.json_list[index]) if self.aug_transform is not None: img, bboxes, labels = self.aug_transform(img, bboxes=bboxes, labels=labels) labels = torch.from_numpy(labels).long() bboxes = torch.from_numpy(bboxes).float() scale1 = 600 / min(img_size) scale2 = 1000 / max(img_size) scale = min(scale1, scale2) if self.img_transform is not None: img = self.img_transform(img) return img, scale, bboxes, labels
def __getitem__(self, index): img = ImageHelper.read_image( self.img_list[index], tool=self.configer.get('data', 'image_tool'), mode=self.configer.get('data', 'input_mode')) labelmap = ImageHelper.read_image(self.label_list[index], tool=self.configer.get( 'data', 'image_tool'), mode='P') if not self.configer.is_empty('data', 'label_list'): labelmap = self._encode_label(labelmap) if not self.configer.is_empty('data', 'reduce_zero_label'): labelmap = self._reduce_zero_label(labelmap) if self.aug_transform is not None: img, labelmap = self.aug_transform(img, labelmap=labelmap) if self.img_transform is not None: img = self.img_transform(img) if self.label_transform is not None: labelmap = self.label_transform(labelmap) return img, labelmap
def __list_dirs(self, root_dir, dataset): img_list = list() json_list = list() image_dir = os.path.join(root_dir, dataset, 'image') json_dir = os.path.join(root_dir, dataset, 'json') for file_name in os.listdir(json_dir): image_name = '.'.join(file_name.split('.')[:-1]) img_path = ImageHelper.imgpath(image_dir, image_name) json_path = os.path.join(json_dir, file_name) if not os.path.exists(json_path) or img_path is None: Log.warn('Json Path: {} not exists.'.format(json_path)) continue json_list.append(json_path) img_list.append(img_path) if dataset == 'train' and self.configer.get('data', 'include_val'): image_dir = os.path.join(root_dir, 'val/image') json_dir = os.path.join(root_dir, 'val/json') for file_name in os.listdir(json_dir): image_name = '.'.join(file_name.split('.')[:-1]) img_path = ImageHelper.imgpath(image_dir, image_name) json_path = os.path.join(json_dir, file_name) if not os.path.exists(json_path) or img_path is None: Log.warn('Json Path: {} not exists.'.format(json_path)) continue json_list.append(json_path) img_list.append(img_path) return img_list, json_list
def __test_img(self, image_path, save_path): Log.info('Image Path: {}'.format(image_path)) image_raw = ImageHelper.cv2_open_bgr(image_path) inputs = ImageHelper.bgr2rgb(image_raw) inputs = ImageHelper.resize(inputs, tuple(self.configer.get('data', 'input_size')), Image.CUBIC) inputs = ToTensor()(inputs) inputs = Normalize(mean=self.configer.get('trans_params', 'mean'), std=self.configer.get('trans_params', 'std'))(inputs) with torch.no_grad(): inputs = inputs.unsqueeze(0).to(self.device) bbox, cls = self.det_net(inputs) bbox = bbox.cpu().data.squeeze(0) cls = F.softmax(cls.cpu().squeeze(0), dim=-1).data boxes, lbls, scores, has_obj = self.__decode(bbox, cls) if has_obj: boxes = boxes.cpu().numpy() boxes = np.clip(boxes, 0, 1) lbls = lbls.cpu().numpy() scores = scores.cpu().numpy() img_canvas = self.__draw_box(image_raw, boxes, lbls, scores) else: # print('None obj detected!') img_canvas = image_raw Log.info('Save Path: {}'.format(save_path)) cv2.imwrite(save_path, img_canvas) # Boxes is within 0-1. self.__save_json(save_path, boxes, lbls, scores, image_raw) return image_raw, lbls, scores, boxes, has_obj
def __list_dirs(self, root_dir, dataset): imgA_list = list() imgB_list = list() imageA_dir = os.path.join(root_dir, dataset, 'imageA') imageB_dir = os.path.join(root_dir, dataset, 'imageB') for file_name in os.listdir(imageA_dir): image_name = '.'.join(file_name.split('.')[:-1]) imgA_path = ImageHelper.imgpath(imageA_dir, image_name) imgB_path = ImageHelper.imgpath(imageB_dir, image_name) if not os.path.exists(imgA_path) or not os.path.exists(imgB_path): Log.warn('Img Path: {} not exists.'.format(imgA_path)) continue imgA_list.append(imgA_path) imgB_list.append(imgB_path) if dataset == 'train' and self.configer.get('data', 'include_val'): imageA_dir = os.path.join(root_dir, 'val/imageA') imageB_dir = os.path.join(root_dir, 'val/imageB') for file_name in os.listdir(imageA_dir): image_name = '.'.join(file_name.split('.')[:-1]) imgA_path = ImageHelper.imgpath(imageA_dir, image_name) imgB_path = ImageHelper.imgpath(imageB_dir, image_name) if not os.path.exists(imgA_path) or not os.path.exists( imgB_path): Log.warn('Img Path: {} not exists.'.format(imgA_path)) continue imgA_list.append(imgA_path) imgB_list.append(imgB_path) return imgA_list, imgB_list
def __getitem__(self, index): img = ImageHelper.read_image(self.img_list[index], tool=self.configer.get('data', 'image_tool'), mode=self.configer.get('data', 'input_mode')) if os.path.exists(self.mask_list[index]): maskmap = ImageHelper.read_image(self.mask_list[index], tool=self.configer.get('data', 'image_tool'), mode='P') else: maskmap = np.ones((img.size[1], img.size[0]), dtype=np.uint8) if self.configer.get('data', 'image_tool') == 'pil': maskmap = ImageHelper.np2img(maskmap) kpts, bboxes = self.__read_json_file(self.json_list[index]) if self.aug_transform is not None and len(bboxes) > 0: img, maskmap, kpts, bboxes = self.aug_transform(img, maskmap=maskmap, kpts=kpts, bboxes=bboxes) elif self.aug_transform is not None: img, maskmap, kpts = self.aug_transform(img, maskmap=maskmap, kpts=kpts) width, height = maskmap.size maskmap = ImageHelper.resize(maskmap, (width // self.configer.get('network', 'stride'), height // self.configer.get('network', 'stride')), interpolation='nearest') maskmap = torch.from_numpy(np.array(maskmap, dtype=np.float32)) kpts = torch.from_numpy(kpts).float() if self.img_transform is not None: img = self.img_transform(img) return img, maskmap, kpts
def __test_img(self, image_path, save_path): Log.info('Image Path: {}'.format(image_path)) ori_image = ImageHelper.read_image( image_path, tool=self.configer.get('data', 'image_tool'), mode=self.configer.get('data', 'input_mode')) ori_width, ori_height = ImageHelper.get_size(ori_image) ori_img_bgr = ImageHelper.get_cv2_bgr(ori_image, mode=self.configer.get( 'data', 'input_mode')) heatmap_avg = np.zeros( (ori_height, ori_width, self.configer.get('network', 'heatmap_out'))) for i, scale in enumerate(self.configer.get('test', 'scale_search')): image = self.blob_helper.make_input(ori_image, input_size=self.configer.get( 'test', 'input_size'), scale=scale) with torch.no_grad(): heatmap_out_list = self.pose_net(image) heatmap_out = heatmap_out_list[-1] # extract outputs, resize, and remove padding heatmap = heatmap_out.squeeze(0).cpu().numpy().transpose( 1, 2, 0) heatmap = cv2.resize(heatmap, (ori_width, ori_height), interpolation=cv2.INTER_CUBIC) heatmap_avg = heatmap_avg + heatmap / len( self.configer.get('test', 'scale_search')) all_peaks = self.__extract_heatmap_info(heatmap_avg) image_canvas = self.__draw_key_point(all_peaks, ori_img_bgr) ImageHelper.save(image_canvas, save_path)
def test(self, test_dir, out_dir): imgA_dir = os.path.join(test_dir, 'imgA') imgB_dir = os.path.join(test_dir, 'imgB') if os.path.exists(imgA_dir): Log.info('ImageA Dir: {}'.format(imgA_dir)) for data_dict in self.test_loader.get_testloader(test_dir=imgA_dir): new_data_dict = dict(imgA=data_dict['img']) out_dict = self.gan_net(new_data_dict) meta_list = DCHelper.tolist(data_dict['meta']) for key, value in out_dict.item(): for i in range(len(value)): img_bgr = self.blob_helper.tensor2bgr(value[i]) img_path = meta_list['img_path'] Log.info('Image Path: {}'.format(img_path)) filename = img_path.rstrip().split('/')[-1] ImageHelper.save(img_bgr, os.path.join(out_dir, key, filename)) if os.path.exists(imgB_dir): Log.info('ImageB Dir: {}'.format(imgB_dir)) for data_dict in self.test_loader.get_testloader(test_dir=imgB_dir): new_data_dict = dict(imgB=data_dict['img']) out_dict = self.gan_net(new_data_dict) meta_list = DCHelper.tolist(data_dict['meta']) for key, value in out_dict.item(): for i in range(len(value)): img_bgr = self.blob_helper.tensor2bgr(value[i]) img_path = meta_list['img_path'] Log.info('Image Path: {}'.format(img_path)) filename = img_path.rstrip().split('/')[-1] ImageHelper.save(img_bgr, os.path.join(out_dir, key, filename))
def _get_batch_per_gpu(self, cur_index): img = ImageHelper.read_image( self.img_list[cur_index], tool=self.configer.get('data', 'image_tool'), mode=self.configer.get('data', 'input_mode')) labelmap = ImageHelper.read_image(self.label_list[cur_index], tool=self.configer.get( 'data', 'image_tool'), mode='P') img_size = self.size_list[cur_index] img_out = [img] label_out = [labelmap] for i in range(self.configer.get('train', 'batch_per_gpu') - 1): while True: cur_index = (cur_index + random.randint( 1, len(self.img_list) - 1)) % len(self.img_list) now_img_size = self.size_list[cur_index] now_mark = 0 if now_img_size[0] > now_img_size[1] else 1 mark = 0 if img_size[0] > img_size[1] else 1 if now_mark == mark: img = ImageHelper.read_image( self.img_list[cur_index], tool=self.configer.get('data', 'image_tool'), mode=self.configer.get('data', 'input_mode')) img_out.append(img) labelmap = ImageHelper.read_image( self.label_list[cur_index], tool=self.configer.get('data', 'image_tool'), mode='P') label_out.append(labelmap) break return img_out, label_out
def vis_bboxes(self, image_in, bboxes_list, name='default', sub_dir='bbox'): """ Show the diff bbox of individuals. """ base_dir = os.path.join(self.configer.get('project_dir'), DET_DIR, sub_dir) if isinstance(image_in, Image.Image): image = ImageHelper.rgb2bgr(ImageHelper.to_np(image_in)) else: image = image_in.copy() if not os.path.exists(base_dir): log.error('Dir:{} not exists!'.format(base_dir)) os.makedirs(base_dir) img_path = os.path.join( base_dir, name if ImageHelper.is_img(name) else '{}.jpg'.format(name)) for bbox in bboxes_list: image = cv2.rectangle(image, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 255, 0), 2) cv2.imwrite(img_path, image)
def __test_img(self, image_path, json_path, raw_path, vis_path): Log.info('Image Path: {}'.format(image_path)) img = ImageHelper.read_image( image_path, tool=self.configer.get('data', 'image_tool'), mode=self.configer.get('data', 'input_mode')) ori_img_bgr = ImageHelper.get_cv2_bgr(img, mode=self.configer.get( 'data', 'input_mode')) img, scale = BoundResize()(img) inputs = self.blob_helper.make_input(img, scale=1.0) with torch.no_grad(): # Forward pass. test_group = self.det_net(inputs, scale) test_indices_and_rois, test_roi_locs, test_roi_scores, test_rois_num = test_group batch_detections = self.decode(test_roi_locs, test_roi_scores, test_indices_and_rois, test_rois_num, self.configer, ImageHelper.get_size(img)) json_dict = self.__get_info_tree(batch_detections[0], ori_img_bgr, scale=scale) image_canvas = self.det_parser.draw_bboxes( ori_img_bgr.copy(), json_dict, conf_threshold=self.configer.get('vis', 'conf_threshold')) cv2.imwrite(vis_path, image_canvas) cv2.imwrite(raw_path, ori_img_bgr) Log.info('Json Path: {}'.format(json_path)) JsonHelper.save_file(json_dict, json_path) return json_dict
def __getitem__(self, index): img = ImageHelper.read_image(self.img_list[index], tool=self.configer.get('data', 'image_tool'), mode=self.configer.get('data', 'input_mode')) img_size = ImageHelper.get_size(img) bboxes, labels = self.__read_json_file(self.json_list[index]) if self.aug_transform is not None: img, bboxes, labels = self.aug_transform(img, bboxes=bboxes, labels=labels) img_scale = ImageHelper.get_size(img)[0] / img_size[0] labels = torch.from_numpy(labels).long() bboxes = torch.from_numpy(bboxes).float() meta = dict( ori_img_size=img_size, border_size=ImageHelper.get_size(img), img_scale=img_scale, ) if self.img_transform is not None: img = self.img_transform(img) return dict( img=DataContainer(img, stack=True), bboxes=DataContainer(bboxes, stack=False), labels=DataContainer(labels, stack=False), meta=DataContainer(meta, stack=False, cpu_only=True) )
def __getitem__(self, index): img = ImageHelper.read_image( self.img_list[index], tool=self.configer.get('data', 'image_tool'), mode=self.configer.get('data', 'input_mode')) ori_img_size = ImageHelper.get_size(img) if self.aug_transform is not None: img = self.aug_transform(img) border_hw = ImageHelper.get_size(img)[::-1] if self.img_transform is not None: img = self.img_transform(img) meta = dict(ori_img_size=ori_img_size, border_hw=border_hw, img_path=self.img_list[index]) return dict(img=DataContainer(img, stack=True, return_dc=True, samples_per_gpu=True), meta=DataContainer(meta, stack=False, cpu_only=True, return_dc=True, samples_per_gpu=True))
def __getitem__(self, index): img = ImageHelper.pil_open_rgb(self.img_list[index]) if os.path.exists(self.mask_list[index]): maskmap = ImageHelper.pil_open_p(self.mask_list[index]) else: maskmap = ImageHelper.np2img(np.ones((img.size[1], img.size[0]), dtype=np.uint8)) kpts, bboxes = self.__read_json_file(self.json_list[index]) if self.aug_transform is not None and len(bboxes) > 0: img, maskmap, kpts, bboxes = self.aug_transform(img, mask=maskmap, kpts=kpts, bboxes=bboxes) elif self.aug_transform is not None: img, maskmap, kpts = self.aug_transform(img, mask=maskmap, kpts=kpts) width, height = maskmap.size maskmap = ImageHelper.resize(maskmap, (width // self.configer.get('network', 'stride'), height // self.configer.get('network', 'stride')), Image.NEAREST) maskmap = np.expand_dims(np.array(maskmap, dtype=np.float32), axis=2) heatmap = self.pose_data_utilizer.generate_heatmap(kpts=kpts, mask=maskmap) vecmap = self.pose_data_utilizer.generate_paf(kpts=kpts, mask=maskmap) if self.img_transform is not None: img = self.img_transform(img) if self.label_transform is not None: heatmap = self.label_transform(heatmap) vecmap = self.label_transform(vecmap) maskmap = self.label_transform(maskmap) return img, heatmap, maskmap, vecmap
def test(self, test_dir, out_dir): for i, data_dict in enumerate( self.test_loader.get_testloader(test_dir=test_dir)): total_logits = None if self.configer.get('test', 'mode') == 'ss_test': total_logits = self.ss_test(data_dict) elif self.configer.get('test', 'mode') == 'sscrop_test': total_logits = self.sscrop_test(data_dict, params_dict=self.configer.get( 'test', 'sscrop_test')) elif self.configer.get('test', 'mode') == 'ms_test': total_logits = self.ms_test(data_dict, params_dict=self.configer.get( 'test', 'ms_test')) elif self.configer.get('test', 'mode') == 'mscrop_test': total_logits = self.mscrop_test(data_dict, params_dict=self.configer.get( 'test', 'mscrop_test')) else: Log.error('Invalid test mode:{}'.format( self.configer.get('test', 'mode'))) exit(1) meta_list = DCHelper.tolist(data_dict['meta']) img_list = DCHelper.tolist(data_dict['img']) for i in range(len(meta_list)): filename = meta_list[i]['img_path'].split('/')[-1].split( '.')[0] label_map = np.argmax(total_logits[i], axis=-1) label_img = np.array(label_map, dtype=np.uint8) ori_img_bgr = self.blob_helper.tensor2bgr(img_list[i][0]) ori_img_bgr = ImageHelper.resize( ori_img_bgr, target_size=meta_list[i]['ori_img_size'], interpolation='linear') image_canvas = self.seg_parser.colorize( label_img, image_canvas=ori_img_bgr) ImageHelper.save(image_canvas, save_path=os.path.join( out_dir, 'vis/{}.png'.format(filename))) if self.configer.exists('data', 'label_list'): label_img = self.__relabel(label_img) if self.configer.exists( 'data', 'reduce_zero_label') and self.configer.get( 'data', 'reduce_zero_label'): label_img = label_img + 1 label_img = label_img.astype(np.uint8) label_img = Image.fromarray(label_img, 'P') label_path = os.path.join(out_dir, 'label/{}.png'.format(filename)) Log.info('Label Path: {}'.format(label_path)) ImageHelper.save(label_img, label_path)
def __call__(self, img): img_size = ImageHelper.get_size(img) scale1 = self.resize_bound[0] / min(img_size) scale2 = self.resize_bound[1] / max(img_size) scale = min(scale1, scale2) target_size = [int(round(i * scale)) for i in img_size] img = ImageHelper.resize(img, target_size=target_size, interpolation='cubic') return img, scale
def __test_img(self, image_path, save_path): Log.info('Image Path: {}'.format(image_path)) image_raw = ImageHelper.cv2_open_bgr(image_path) inputs = ImageHelper.bgr2rgb(image_raw) paf_avg, heatmap_avg = self.__get_paf_and_heatmap(inputs) all_peaks = self.__extract_heatmap_info(heatmap_avg) special_k, connection_all = self.__extract_paf_info(image_raw, paf_avg, all_peaks) subset, candidate = self.__get_subsets(connection_all, special_k, all_peaks) subset, img_canvas = self.__draw_key_point(subset, all_peaks, image_raw) img_canvas = self.__link_key_point(img_canvas, candidate, subset) cv2.imwrite(save_path, img_canvas)
def __getitem__(self, index): img = ImageHelper.pil_open_rgb(self.img_list[index]) label = ImageHelper.pil_open_p(self.label_list[index]) if self.aug_transform is not None: img, label = self.aug_transform(img, label=label) if self.img_transform is not None: img = self.img_transform(img) if self.label_transform is not None: label = self.label_transform(label) return img, label
def make_input(self, image=None, input_size=None, min_side_length=None, max_side_length=None, scale=None): in_width, in_height = None, None if input_size is None and min_side_length is None and max_side_length is None: in_width, in_height = ImageHelper.get_size(image) elif input_size is not None and min_side_length is None and max_side_length is None: in_width, in_height = input_size elif input_size is None and min_side_length is not None and max_side_length is None: width, height = ImageHelper.get_size(image) scale_ratio = min_side_length / min(width, height) w_scale_ratio, h_scale_ratio = scale_ratio, scale_ratio in_width, in_height = int(round(width * w_scale_ratio)), int(round(height * h_scale_ratio)) elif input_size is None and min_side_length is None and max_side_length is not None: width, height = ImageHelper.get_size(image) scale_ratio = max_side_length / max(width, height) w_scale_ratio, h_scale_ratio = scale_ratio, scale_ratio in_width, in_height = int(round(width * w_scale_ratio)), int(round(height * h_scale_ratio)) else: Log.error('Incorrect target size setting.') exit(1) if not isinstance(scale, (list, tuple)): image = ImageHelper.resize(image, (int(in_width * scale), int(in_height * scale)), interpolation='linear') img_tensor = ToTensor()(image) img_tensor = Normalize(div_value=self.configer.get('normalize', 'div_value'), mean=self.configer.get('normalize', 'mean'), std=self.configer.get('normalize', 'std'))(img_tensor) img_tensor = img_tensor.unsqueeze(0).to(torch.device('cpu' if self.configer.get('gpu') is None else 'cuda')) return img_tensor else: img_tensor_list = [] for s in scale: image = ImageHelper.resize(image, (int(in_width * s), int(in_height * s)), interpolation='linear') img_tensor = ToTensor()(image) img_tensor = Normalize(div_value=self.configer.get('normalize', 'div_value'), mean=self.configer.get('normalize', 'mean'), std=self.configer.get('normalize', 'std'))(img_tensor) img_tensor = img_tensor.unsqueeze(0).to( torch.device('cpu' if self.configer.get('gpu') is None else 'cuda')) img_tensor_list.append(img_tensor) return img_tensor_list
def ms_test(self, ori_image): ori_width, ori_height = ImageHelper.get_size(ori_image) total_logits = np.zeros( (ori_height, ori_width, self.configer.get('data', 'num_classes')), np.float32) for scale in self.configer.get('test', 'scale_search'): image, border_hw = self._get_blob(ori_image, scale=scale) results = self._predict(image) results = cv2.resize(results[:border_hw[0], :border_hw[1]], (ori_width, ori_height), interpolation=cv2.INTER_CUBIC) total_logits += results if self.configer.get('data', 'image_tool') == 'cv2': mirror_image = cv2.flip(ori_image, 1) else: mirror_image = ori_image.transpose(Image.FLIP_LEFT_RIGHT) image, border_hw = self._get_blob(mirror_image, scale=1.0) results = self._predict(image) results = results[:border_hw[0], :border_hw[1]] results = cv2.resize(results[:, ::-1], (ori_width, ori_height), interpolation=cv2.INTER_CUBIC) total_logits += results return total_logits
def evaluate(self, pred_dir, gt_dir): img_cnt = 0 for filename in os.listdir(pred_dir): pred_path = os.path.join(pred_dir, filename) gt_path = os.path.join(gt_dir, filename) predmap = ImageHelper.img2np(ImageHelper.read_image(pred_path, tool='pil', mode='P')) gtmap = ImageHelper.img2np(ImageHelper.read_image(gt_path, tool='pil', mode='P')) predmap = self.relabel(predmap) gtmap = self.relabel(gtmap) self.seg_running_score.update(predmap[np.newaxis, :, :], gtmap[np.newaxis, :, :]) img_cnt += 1 Log.info('Evaluate {} images'.format(img_cnt)) Log.info('mIOU: {}'.format(self.seg_running_score.get_mean_iou())) Log.info('Pixel ACC: {}'.format(self.seg_running_score.get_pixel_acc()))
def __init__(self, test_dir=None, aug_transform=None, img_transform=None, configer=None): super(DefaultLoader, self).__init__() self.configer = configer self.aug_transform=aug_transform self.img_transform = img_transform self.item_list = [(os.path.join(test_dir, filename), '.'.join(filename.split('.')[:-1])) for filename in FileHelper.list_dir(test_dir) if ImageHelper.is_img(filename)]
def __list_dirs(self, root_dir, dataset): img_list = list() label_list = list() size_list = list() image_dir = os.path.join(root_dir, dataset, 'image') label_dir = os.path.join(root_dir, dataset, 'label') img_extension = os.listdir(image_dir)[0].split('.')[-1] for file_name in os.listdir(label_dir): image_name = '.'.join(file_name.split('.')[:-1]) img_path = os.path.join(image_dir, '{}.{}'.format(image_name, img_extension)) label_path = os.path.join(label_dir, file_name) if not os.path.exists(label_path) or not os.path.exists(img_path): Log.error('Label Path: {} not exists.'.format(label_path)) continue img_list.append(img_path) label_list.append(label_path) img = ImageHelper.read_image( img_path, tool=self.configer.get('data', 'image_tool'), mode=self.configer.get('data', 'input_mode')) size_list.append(ImageHelper.get_size(img)) if dataset == 'train' and self.configer.get('data', 'include_val'): image_dir = os.path.join(root_dir, 'val/image') label_dir = os.path.join(root_dir, 'val/label') for file_name in os.listdir(label_dir): image_name = '.'.join(file_name.split('.')[:-1]) img_path = os.path.join( image_dir, '{}.{}'.format(image_name, img_extension)) label_path = os.path.join(label_dir, file_name) if not os.path.exists(label_path) or not os.path.exists( img_path): Log.error('Label Path: {} not exists.'.format(label_path)) continue img_list.append(img_path) label_list.append(label_path) img = ImageHelper.read_image( img_path, tool=self.configer.get('data', 'image_tool'), mode=self.configer.get('data', 'input_mode')) size_list.append(ImageHelper.get_size(img)) return img_list, label_list, size_list
def __test_img(self, image_path, label_path, vis_path, raw_path): Log.info('Image Path: {}'.format(image_path)) ori_image = ImageHelper.read_image(image_path, tool=self.configer.get('data', 'image_tool'), mode=self.configer.get('data', 'input_mode')) ori_width, ori_height = ImageHelper.get_size(ori_image) total_logits = np.zeros((ori_height, ori_width, self.configer.get('data', 'num_classes')), np.float32) for scale in self.configer.get('test', 'scale_search'): image = self.blob_helper.make_input(image=ori_image, input_size=self.configer.get('test', 'input_size'), scale=scale) if self.configer.get('test', 'crop_test'): crop_size = self.configer.get('test', 'crop_size') if image.size()[3] > crop_size[0] and image.size()[2] > crop_size[1]: results = self._crop_predict(image, crop_size) else: results = self._predict(image) else: results = self._predict(image) results = cv2.resize(results, (ori_width, ori_height), interpolation=cv2.INTER_LINEAR) total_logits += results if self.configer.get('test', 'mirror'): if self.configer.get('data', 'image_tool') == 'cv2': image = cv2.flip(ori_image, 1) else: image = ori_image.transpose(Image.FLIP_LEFT_RIGHT) image = self.blob_helper.make_input(image, input_size=self.configer.get('test', 'input_size'), scale=1.0) if self.configer.get('test', 'crop_test'): crop_size = self.configer.get('test', 'crop_size') if image.size()[3] > crop_size[0] and image.size()[2] > crop_size[1]: results = self._crop_predict(image, crop_size) else: results = self._predict(image) else: results = self._predict(image) results = cv2.resize(results[:, ::-1], (ori_width, ori_height), interpolation=cv2.INTER_LINEAR) total_logits += results label_map = np.argmax(total_logits, axis=-1) label_img = np.array(label_map, dtype=np.uint8) image_bgr = cv2.cvtColor(np.array(ori_image), cv2.COLOR_RGB2BGR) image_canvas = self.seg_parser.colorize(label_img, image_canvas=image_bgr) ImageHelper.save(image_canvas, save_path=vis_path) ImageHelper.save(ori_image, save_path=raw_path) if not self.configer.is_empty('data', 'label_list'): label_img = self.__relabel(label_img) label_img = Image.fromarray(label_img, 'P') Log.info('Label Path: {}'.format(label_path)) ImageHelper.save(label_img, label_path)