def __init__(self, configer): self.configer = configer self.blob_helper = BlobHelper(configer) self.model_manager = ModelManager(configer) self.test_loader = TestDataLoader(configer) self.device = torch.device('cpu' if self.configer.get('gpu') is None else 'cuda') self.gan_net = None self._init_model()
def __init__(self, configer): self.configer = configer self.blob_helper = BlobHelper(configer) self.pose_vis = PoseVisualizer(configer) self.pose_model_manager = PoseModelManager(configer) self.pose_data_loader = DataLoader(configer) self.heatmap_generator = HeatmapGenerator(configer) self.device = torch.device('cpu' if self.configer.get('gpu') is None else 'cuda') self.pose_net = None self._init_model()
class ImageTranslatorTest(object): def __init__(self, configer): self.configer = configer self.blob_helper = BlobHelper(configer) self.model_manager = ModelManager(configer) self.test_loader = TestDataLoader(configer) self.device = torch.device( 'cpu' if self.configer.get('gpu') is None else 'cuda') self.gan_net = None self._init_model() def _init_model(self): self.gan_net = self.model_manager.gan_model() self.gan_net = RunnerHelper.load_net(self, self.gan_net) self.gan_net.eval() def test(self, test_dir, out_dir): imgA_dir = os.path.join(test_dir, 'imageA') imgB_dir = os.path.join(test_dir, 'imageB') if os.path.exists(imgA_dir): Log.info('ImageA Dir: {}'.format(imgA_dir)) for data_dict in self.test_loader.get_testloader( test_dir=imgA_dir): new_data_dict = dict(imgA=data_dict['img']) with torch.no_grad(): out_dict = self.gan_net(new_data_dict, testing=True) meta_list = DCHelper.tolist(data_dict['meta']) for key, value in out_dict.items(): for i in range(len(value)): img_bgr = self.blob_helper.tensor2bgr(value[i]) img_path = meta_list[i]['img_path'] Log.info('Image Path: {}'.format(img_path)) filename = img_path.rstrip().split('/')[-1] ImageHelper.save(img_bgr, os.path.join(out_dir, key, filename)) if os.path.exists(imgB_dir): Log.info('ImageB Dir: {}'.format(imgB_dir)) for data_dict in self.test_loader.get_testloader( test_dir=imgB_dir): new_data_dict = dict(imgB=data_dict['img']) with torch.no_grad(): out_dict = self.gan_net(new_data_dict, testing=True) meta_list = DCHelper.tolist(data_dict['meta']) for key, value in out_dict.items(): for i in range(len(value)): img_bgr = self.blob_helper.tensor2bgr(value[i]) img_path = meta_list[i]['img_path'] Log.info('Image Path: {}'.format(img_path)) filename = img_path.rstrip().split('/')[-1] ImageHelper.save(img_bgr, os.path.join(out_dir, key, filename))
def __init__(self, configer): self.configer = configer self.blob_helper = BlobHelper(configer) self.seg_visualizer = SegVisualizer(configer) self.seg_parser = SegParser(configer) self.seg_model_manager = SegModelManager(configer) self.seg_data_loader = DataLoader(configer) self.device = torch.device('cpu' if self.configer.get('gpu') is None else 'cuda') self.seg_net = None self._init_model()
def __init__(self, configer): self.configer = configer self.blob_helper = BlobHelper(configer) self.det_visualizer = DetVisualizer(configer) self.det_parser = DetParser(configer) self.det_model_manager = ModelManager(configer) self.det_data_loader = DataLoader(configer) self.ssd_priorbox_layer = SSDPriorBoxLayer(configer) self.ssd_target_generator = SSDTargetGenerator(configer) self.device = torch.device('cpu' if self.configer.get('gpu') is None else 'cuda') self.det_net = None self._init_model()
def __init__(self, configer): self.configer = configer self.blob_helper = BlobHelper(configer) self.det_visualizer = DetVisualizer(configer) self.det_parser = DetParser(configer) self.det_model_manager = ModelManager(configer) self.det_data_loader = DataLoader(configer) self.yolo_target_generator = YOLOTargetGenerator(configer) self.yolo_detection_layer = YOLODetectionLayer(configer) self.device = torch.device( 'cpu' if self.configer.get('gpu') is None else 'cuda') self.det_net = None self._init_model()
def __init__(self, configer): self.configer = configer self.blob_helper = BlobHelper(configer) self.pose_visualizer = PoseVisualizer(configer) self.pose_parser = PoseParser(configer) self.pose_model_manager = PoseModelManager(configer) self.pose_data_loader = PoseDataLoader(configer) self.module_utilizer = ModuleUtilizer(configer) self.heatmap_generator = HeatmapGenerator(configer) self.paf_generator = PafGenerator(configer) self.data_transformer = DataTransformer(configer) self.device = torch.device( 'cpu' if self.configer.get('gpu') is None else 'cuda') self.pose_net = None self._init_model()
def __init__(self, configer): self.configer = configer self.blob_helper = BlobHelper(configer) self.det_visualizer = DetVisualizer(configer) self.det_parser = DetParser(configer) self.det_model_manager = DetModelManager(configer) self.det_data_loader = DataLoader(configer) self.roi_sampler = FRROISampler(configer) self.rpn_target_generator = RPNTargetAssigner(configer) self.fr_priorbox_layer = FRPriorBoxLayer(configer) self.fr_roi_generator = FRROIGenerator(configer) self.device = torch.device( 'cpu' if self.configer.get('gpu') is None else 'cuda') self.det_net = None self._init_model()
def __init__(self, configer): self.configer = configer self.blob_helper = BlobHelper(configer) self.cls_model_manager = ClsModelManager(configer) self.cls_data_loader = ClsDataLoader(configer) self.module_utilizer = ModuleUtilizer(configer) self.cls_parser = ClsParser(configer) self.device = torch.device('cpu' if self.configer.get('gpu') is None else 'cuda') self.cls_net = None if self.configer.get('dataset') == 'imagenet': with open(os.path.join(self.configer.get('project_dir'), 'datasets/cls/imagenet/imagenet_class_index.json')) as json_stream: name_dict = json.load(json_stream) name_seq = [name_dict[str(i)][1] for i in range(self.configer.get('data', 'num_classes'))] self.configer.add_key_value(['details', 'name_seq'], name_seq) self._init_model()
class YOLOv3Test(object): def __init__(self, configer): self.configer = configer self.blob_helper = BlobHelper(configer) self.det_visualizer = DetVisualizer(configer) self.det_parser = DetParser(configer) self.det_model_manager = ModelManager(configer) self.det_data_loader = DataLoader(configer) self.yolo_target_generator = YOLOTargetGenerator(configer) self.yolo_detection_layer = YOLODetectionLayer(configer) self.device = torch.device( 'cpu' if self.configer.get('gpu') is None else 'cuda') self.det_net = None self._init_model() def _init_model(self): self.det_net = self.det_model_manager.object_detector() self.det_net = RunnerHelper.load_net(self, self.det_net) self.det_net.eval() def __test_img(self, image_path, json_path, raw_path, vis_path): Log.info('Image Path: {}'.format(image_path)) img = ImageHelper.read_image( image_path, tool=self.configer.get('data', 'image_tool'), mode=self.configer.get('data', 'input_mode')) ori_img_bgr = ImageHelper.get_cv2_bgr(img, mode=self.configer.get( 'data', 'input_mode')) inputs = self.blob_helper.make_input(img, input_size=self.configer.get( 'data', 'input_size'), scale=1.0) with torch.no_grad(): inputs = inputs.unsqueeze(0).to(self.device) _, _, detections = self.det_net(inputs) batch_detections = self.decode(detections, self.configer) json_dict = self.__get_info_tree(batch_detections[0], ori_img_bgr) image_canvas = self.det_parser.draw_bboxes( ori_img_bgr.copy(), json_dict, conf_threshold=self.configer.get('res', 'vis_conf_thre')) ImageHelper.save(ori_img_bgr, raw_path) ImageHelper.save(image_canvas, vis_path) Log.info('Json Path: {}'.format(json_path)) JsonHelper.save_file(json_dict, json_path) return json_dict @staticmethod def decode(batch_pred_bboxes, configer, input_size): box_corner = batch_pred_bboxes.new(batch_pred_bboxes.shape) box_corner[:, :, 0] = batch_pred_bboxes[:, :, 0] - batch_pred_bboxes[:, :, 2] / 2 box_corner[:, :, 1] = batch_pred_bboxes[:, :, 1] - batch_pred_bboxes[:, :, 3] / 2 box_corner[:, :, 2] = batch_pred_bboxes[:, :, 0] + batch_pred_bboxes[:, :, 2] / 2 box_corner[:, :, 3] = batch_pred_bboxes[:, :, 1] + batch_pred_bboxes[:, :, 3] / 2 # clip bounding box box_corner[:, :, 0::2] = box_corner[:, :, 0::2].clamp(min=0, max=1.0) box_corner[:, :, 1::2] = box_corner[:, :, 1::2].clamp(min=0, max=1.0) batch_pred_bboxes[:, :, :4] = box_corner[:, :, :4] batch_pred_bboxes[:, :, 0::2] *= input_size[0] batch_pred_bboxes[:, :, 1::2] *= input_size[1] output = [None for _ in range(len(batch_pred_bboxes))] for image_i, image_pred in enumerate(batch_pred_bboxes): # Filter out confidence scores below threshold conf_mask = (image_pred[:, 4] > configer.get( 'res', 'val_conf_thre')).squeeze() image_pred = image_pred[conf_mask] # If none are remaining => process next image if image_pred.numel() == 0: continue # Get score and class with highest confidence class_conf, class_pred = torch.max( image_pred[:, 5:5 + configer.get('data', 'num_classes')], 1, keepdim=True) # Detections ordered as (x1, y1, x2, y2, obj_conf, class_conf, class_pred) detections = torch.cat( (image_pred[:, :5], class_conf.float(), class_pred.float()), 1) output[image_i] = DetHelper.cls_nms(detections, labels=class_pred.squeeze(1), max_threshold=configer.get( 'res', 'nms')['max_threshold']) return output def __get_info_tree(self, detections, image_raw, input_size): height, width, _ = image_raw.shape json_dict = dict() object_list = list() if detections is not None: for x1, y1, x2, y2, conf, cls_conf, cls_pred in detections: object_dict = dict() xmin = x1.cpu().item() / input_size[0] * width ymin = y1.cpu().item() / input_size[1] * height xmax = x2.cpu().item() / input_size[0] * width ymax = y2.cpu().item() / input_size[1] * height object_dict['bbox'] = [xmin, ymin, xmax, ymax] object_dict['label'] = int(cls_pred.cpu().item()) object_dict['score'] = float('%.2f' % conf.cpu().item()) object_list.append(object_dict) json_dict['objects'] = object_list return json_dict def debug(self, vis_dir): count = 0 for i, data_dict in enumerate(self.det_data_loader.get_trainloader()): inputs = data_dict['img'] batch_gt_bboxes = data_dict['bboxes'] batch_gt_labels = data_dict['labels'] input_size = [inputs.size(3), inputs.size(2)] feat_list = list() for stride in self.configer.get('network', 'stride_list'): feat_list.append( torch.zeros((inputs.size(0), 1, input_size[1] // stride, input_size[0] // stride))) targets, _, _ = self.yolo_target_generator(feat_list, batch_gt_bboxes, batch_gt_labels, input_size) targets = targets.to(self.device) anchors_list = self.configer.get('gt', 'anchors_list') output_list = list() be_c = 0 for f_index, anchors in enumerate(anchors_list): feat_stride = self.configer.get('network', 'stride_list')[f_index] fm_size = [ int(round(border / feat_stride)) for border in input_size ] num_c = len(anchors) * fm_size[0] * fm_size[1] output_list.append( targets[:, be_c:be_c + num_c].contiguous().view( targets.size(0), len(anchors), fm_size[1], fm_size[0], -1).permute(0, 1, 4, 2, 3).contiguous().view( targets.size(0), -1, fm_size[1], fm_size[0])) be_c += num_c batch_detections = self.decode( self.yolo_detection_layer(output_list)[2], self.configer, input_size) for j in range(inputs.size(0)): count = count + 1 if count > 20: exit(1) ori_img_bgr = self.blob_helper.tensor2bgr(inputs[j]) json_dict = self.__get_info_tree(batch_detections[j], ori_img_bgr, input_size) image_canvas = self.det_parser.draw_bboxes( ori_img_bgr.copy(), json_dict, conf_threshold=self.configer.get('vis', 'obj_threshold')) cv2.imwrite( os.path.join(vis_dir, '{}_{}_vis.png'.format(i, j)), image_canvas) cv2.imshow('main', image_canvas) cv2.waitKey()
class FCNSegmentorTest(object): def __init__(self, configer): self.configer = configer self.blob_helper = BlobHelper(configer) self.seg_visualizer = SegVisualizer(configer) self.seg_parser = SegParser(configer) self.seg_model_manager = ModelManager(configer) self.seg_data_loader = DataLoader(configer) self.test_loader = TestDataLoader(configer) self.device = torch.device( 'cpu' if self.configer.get('gpu') is None else 'cuda') self.seg_net = None self._init_model() def _init_model(self): self.seg_net = self.seg_model_manager.semantic_segmentor() self.seg_net = RunnerHelper.load_net(self, self.seg_net) self.seg_net.eval() def test(self, test_dir, out_dir): for i, data_dict in enumerate( self.test_loader.get_testloader(test_dir=test_dir)): total_logits = None if self.configer.get('test', 'mode') == 'ss_test': total_logits = self.ss_test(data_dict) elif self.configer.get('test', 'mode') == 'sscrop_test': total_logits = self.sscrop_test(data_dict) elif self.configer.get('test', 'mode') == 'ms_test': total_logits = self.ms_test(data_dict) elif self.configer.get('test', 'mode') == 'mscrop_test': total_logits = self.mscrop_test(data_dict) else: Log.error('Invalid test mode:{}'.format( self.configer.get('test', 'mode'))) exit(1) meta_list = DCHelper.tolist(data_dict['meta']) img_list = DCHelper.tolist(data_dict['img']) for i in range(len(meta_list)): filename = meta_list[i]['img_path'].split('/')[-1].split( '.')[0] label_map = np.argmax(total_logits[i], axis=-1) label_img = np.array(label_map, dtype=np.uint8) ori_img_bgr = self.blob_helper.tensor2bgr(img_list[i][0]) ori_img_bgr = ImageHelper.resize( ori_img_bgr, target_size=meta_list[i]['ori_img_size'], interpolation='linear') image_canvas = self.seg_parser.colorize( label_img, image_canvas=ori_img_bgr) ImageHelper.save(image_canvas, save_path=os.path.join( out_dir, 'vis/{}.png'.format(filename))) if self.configer.exists('data', 'label_list'): label_img = self.__relabel(label_img) if self.configer.exists( 'data', 'reduce_zero_label') and self.configer.get( 'data', 'reduce_zero_label'): label_img = label_img + 1 label_img = label_img.astype(np.uint8) label_img = Image.fromarray(label_img, 'P') label_path = os.path.join(out_dir, 'label/{}.png'.format(filename)) Log.info('Label Path: {}'.format(label_path)) ImageHelper.save(label_img, label_path) def ss_test(self, in_data_dict): data_dict = self.blob_helper.get_blob(in_data_dict, scale=1.0) results = self._predict(data_dict) return results def sscrop_test(self, in_data_dict): data_dict = self.blob_helper.get_blob(in_data_dict, scale=1.0) crop_size = self.configer.get('test', 'crop_size') if any(image.size()[3] < crop_size[0] or image.size()[2] < crop_size[1] for image in DCHelper.tolist(data_dict['img'])): results = self._predict(data_dict) else: results = self._crop_predict(data_dict, crop_size) return results def mscrop_test(self, in_data_dict): total_logits = [ np.zeros((meta['ori_img_size'][1], meta['ori_img_size'][0], self.configer.get('data', 'num_classes')), np.float32) for meta in DCHelper.tolist(in_data_dict['meta']) ] for scale in self.configer.get('test', 'scale_search'): data_dict = self.blob_helper.get_blob(in_data_dict, scale=scale) crop_size = self.configer.get('test', 'crop_size') if any(image.size()[3] < crop_size[0] or image.size()[2] < crop_size[1] for image in DCHelper.tolist(data_dict['img'])): results = self._predict(data_dict) else: results = self._crop_predict(data_dict, crop_size) for i in range(len(total_logits)): total_logits[i] += results[i] for scale in self.configer.get('test', 'scale_search'): data_dict = self.blob_helper.get_blob(in_data_dict, scale=scale, flip=True) crop_size = self.configer.get('test', 'crop_size') if any(image.size()[3] < crop_size[0] or image.size()[2] < crop_size[1] for image in DCHelper.tolist(data_dict['img'])): results = self._predict(data_dict) else: results = self._crop_predict(data_dict, crop_size) for i in range(len(total_logits)): total_logits[i] += results[i][:, ::-1] return total_logits def ms_test(self, in_data_dict): total_logits = [ np.zeros((meta['ori_img_size'][1], meta['ori_img_size'][0], self.configer.get('data', 'num_classes')), np.float32) for meta in DCHelper.tolist(in_data_dict['meta']) ] for scale in self.configer.get('test', 'scale_search'): data_dict = self.blob_helper.get_blob(in_data_dict, scale=scale) results = self._predict(data_dict) for i in range(len(total_logits)): total_logits[i] += results[i] for scale in self.configer.get('test', 'scale_search'): data_dict = self.blob_helper.get_blob(in_data_dict, scale=scale, flip=True) results = self._predict(data_dict) for i in range(len(total_logits)): total_logits[i] += results[i][:, ::-1] return total_logits def _crop_predict(self, data_dict, crop_size): split_batch = list() height_starts_list = list() width_starts_list = list() hw_list = list() for image in DCHelper.tolist(data_dict['img']): height, width = image.size()[2:] hw_list.append([height, width]) np_image = image.squeeze(0).permute(1, 2, 0).cpu().numpy() height_starts = self._decide_intersection(height, crop_size[1]) width_starts = self._decide_intersection(width, crop_size[0]) split_crops = [] for height in height_starts: for width in width_starts: image_crop = np_image[height:height + crop_size[1], width:width + crop_size[0]] split_crops.append(image_crop[np.newaxis, :]) height_starts_list.append(height_starts) width_starts_list.append(width_starts) split_crops = np.concatenate( split_crops, axis=0) # (n, crop_image_size, crop_image_size, 3) inputs = torch.from_numpy(split_crops).permute(0, 3, 1, 2).to(self.device) split_batch.append(inputs) out_list = list() with torch.no_grad(): results = self.seg_net.forward( DCHelper.todc(split_batch, stack=True, samples_per_gpu=1)) for res in results: out_list.append(res[-1].permute(0, 2, 3, 1).cpu().numpy()) total_logits = [ np.zeros((hw[0], hw[1], self.configer.get('data', 'num_classes')), np.float32) for hw in hw_list ] count_predictions = [ np.zeros((hw[0], hw[1], self.configer.get('data', 'num_classes')), np.float32) for hw in hw_list ] for i in range(len(height_starts_list)): index = 0 for height in height_starts_list[i]: for width in width_starts_list[i]: total_logits[i][height:height + crop_size[1], width:width + crop_size[0]] += out_list[i][index] count_predictions[i][height:height + crop_size[1], width:width + crop_size[0]] += 1 index += 1 for i in range(len(total_logits)): total_logits[i] /= count_predictions[i] for i, meta in enumerate(DCHelper.tolist(data_dict['meta'])): total_logits[i] = cv2.resize( total_logits[i][:meta['border_hw'][0], :meta['border_hw'][1]], (meta['ori_img_size'][0], meta['ori_img_size'][1]), interpolation=cv2.INTER_CUBIC) return total_logits def _decide_intersection(self, total_length, crop_length): stride = int(crop_length * self.configer.get( 'test', 'crop_stride_ratio')) # set the stride as the paper do times = (total_length - crop_length) // stride + 1 cropped_starting = [] for i in range(times): cropped_starting.append(stride * i) if total_length - cropped_starting[-1] > crop_length: cropped_starting.append(total_length - crop_length) # must cover the total image return cropped_starting def _predict(self, data_dict): with torch.no_grad(): total_logits = list() results = self.seg_net.forward(data_dict['img']) for res in results: total_logits.append(res[-1].squeeze(0).permute( 1, 2, 0).cpu().numpy()) for i, meta in enumerate(DCHelper.tolist(data_dict['meta'])): total_logits[i] = cv2.resize( total_logits[i] [:meta['border_hw'][0], :meta['border_hw'][1]], (meta['ori_img_size'][0], meta['ori_img_size'][1]), interpolation=cv2.INTER_CUBIC) return total_logits def __relabel(self, label_map): height, width = label_map.shape label_dst = np.zeros((height, width), dtype=np.uint8) for i in range(self.configer.get('data', 'num_classes')): label_dst[label_map == i] = self.configer.get( 'data', 'label_list')[i] label_dst = np.array(label_dst, dtype=np.uint8) return label_dst def debug(self, vis_dir): count = 0 for i, data_dict in enumerate(self.seg_data_loader.get_trainloader()): inputs = data_dict['img'] targets = data_dict['labelmap'] for j in range(inputs.size(0)): count = count + 1 if count > 20: exit(1) image_bgr = self.blob_helper.tensor2bgr(inputs[j]) label_map = targets[j].numpy() image_canvas = self.seg_parser.colorize(label_map, image_canvas=image_bgr) cv2.imwrite( os.path.join(vis_dir, '{}_{}_vis.png'.format(i, j)), image_canvas) cv2.imshow('main', image_canvas) cv2.waitKey()
class FCNSegmentorTest(object): def __init__(self, configer): self.configer = configer self.blob_helper = BlobHelper(configer) self.seg_visualizer = SegVisualizer(configer) self.seg_parser = SegParser(configer) self.seg_model_manager = SegModelManager(configer) self.seg_data_loader = DataLoader(configer) self.device = torch.device('cpu' if self.configer.get('gpu') is None else 'cuda') self.seg_net = None self._init_model() def _init_model(self): self.seg_net = self.seg_model_manager.semantic_segmentor() self.seg_net = RunnerHelper.load_net(self, self.seg_net) self.seg_net.eval() def _get_blob(self, ori_image, scale=None): assert scale is not None image = None if self.configer.exists('test', 'input_size'): image = self.blob_helper.make_input(image=ori_image, input_size=self.configer.get('test', 'input_size'), scale=scale) elif self.configer.exists('test', 'min_side_length') and not self.configer.exists('test', 'max_side_length'): image = self.blob_helper.make_input(image=ori_image, min_side_length=self.configer.get('test', 'min_side_length'), scale=scale) elif not self.configer.exists('test', 'min_side_length') and self.configer.exists('test', 'max_side_length'): image = self.blob_helper.make_input(image=ori_image, max_side_length=self.configer.get('test', 'max_side_length'), scale=scale) elif self.configer.exists('test', 'min_side_length') and self.configer.exists('test', 'max_side_length'): image = self.blob_helper.make_input(image=ori_image, min_side_length=self.configer.get('test', 'min_side_length'), max_side_length=self.configer.get('test', 'max_side_length'), scale=scale) else: Log.error('Test setting error') exit(1) b, c, h, w = image.size() border_hw = [h, w] if self.configer.exists('test', 'fit_stride'): stride = self.configer.get('test', 'fit_stride') pad_w = 0 if (w % stride == 0) else stride - (w % stride) # right pad_h = 0 if (h % stride == 0) else stride - (h % stride) # down expand_image = torch.zeros((b, c, h + pad_h, w + pad_w)).to(image.device) expand_image[:, :, 0:h, 0:w] = image image = expand_image return image, border_hw def test_img(self, image_path, label_path, vis_path, raw_path): Log.info('Image Path: {}'.format(image_path)) ori_image = ImageHelper.read_image(image_path, tool=self.configer.get('data', 'image_tool'), mode=self.configer.get('data', 'input_mode')) total_logits = None if self.configer.get('test', 'mode') == 'ss_test': total_logits = self.ss_test(ori_image) elif self.configer.get('test', 'mode') == 'sscrop_test': total_logits = self.sscrop_test(ori_image) elif self.configer.get('test', 'mode') == 'ms_test': total_logits = self.ms_test(ori_image) elif self.configer.get('test', 'mode') == 'mscrop_test': total_logits = self.mscrop_test(ori_image) else: Log.error('Invalid test mode:{}'.format(self.configer.get('test', 'mode'))) exit(1) label_map = np.argmax(total_logits, axis=-1) label_img = np.array(label_map, dtype=np.uint8) ori_img_bgr = ImageHelper.get_cv2_bgr(ori_image, mode=self.configer.get('data', 'input_mode')) image_canvas = self.seg_parser.colorize(label_img, image_canvas=ori_img_bgr) ImageHelper.save(image_canvas, save_path=vis_path) ImageHelper.save(ori_image, save_path=raw_path) if self.configer.exists('data', 'label_list'): label_img = self.__relabel(label_img) if self.configer.exists('data', 'reduce_zero_label') and self.configer.get('data', 'reduce_zero_label'): label_img = label_img + 1 label_img = label_img.astype(np.uint8) label_img = Image.fromarray(label_img, 'P') Log.info('Label Path: {}'.format(label_path)) ImageHelper.save(label_img, label_path) def ss_test(self, ori_image): ori_width, ori_height = ImageHelper.get_size(ori_image) total_logits = np.zeros((ori_height, ori_width, self.configer.get('data', 'num_classes')), np.float32) image, border_hw = self._get_blob(ori_image, scale=1.0) results = self._predict(image) results = cv2.resize(results[:border_hw[0], :border_hw[1]], (ori_width, ori_height), interpolation=cv2.INTER_CUBIC) total_logits += results return total_logits def sscrop_test(self, ori_image): ori_width, ori_height = ImageHelper.get_size(ori_image) total_logits = np.zeros((ori_height, ori_width, self.configer.get('data', 'num_classes')), np.float32) image, _ = self._get_blob(ori_image, scale=1.0) crop_size = self.configer.get('test', 'crop_size') if image.size()[3] > crop_size[0] and image.size()[2] > crop_size[1]: results = self._crop_predict(image, crop_size) else: results = self._predict(image) results = cv2.resize(results, (ori_width, ori_height), interpolation=cv2.INTER_CUBIC) total_logits += results return total_logits def mscrop_test(self, ori_image): ori_width, ori_height = ImageHelper.get_size(ori_image) total_logits = np.zeros((ori_height, ori_width, self.configer.get('data', 'num_classes')), np.float32) for scale in self.configer.get('test', 'scale_search'): image, _ = self._get_blob(ori_image, scale=scale) crop_size = self.configer.get('test', 'crop_size') if image.size()[3] > crop_size[0] and image.size()[2] > crop_size[1]: results = self._crop_predict(image, crop_size) else: results = self._predict(image) results = cv2.resize(results, (ori_width, ori_height), interpolation=cv2.INTER_CUBIC) total_logits += results return total_logits def ms_test(self, ori_image): ori_width, ori_height = ImageHelper.get_size(ori_image) total_logits = np.zeros((ori_height, ori_width, self.configer.get('data', 'num_classes')), np.float32) for scale in self.configer.get('test', 'scale_search'): image, border_hw = self._get_blob(ori_image, scale=scale) results = self._predict(image) results = cv2.resize(results[:border_hw[0], :border_hw[1]], (ori_width, ori_height), interpolation=cv2.INTER_CUBIC) total_logits += results if self.configer.get('data', 'image_tool') == 'cv2': mirror_image = cv2.flip(ori_image, 1) else: mirror_image = ori_image.transpose(Image.FLIP_LEFT_RIGHT) image, border_hw = self._get_blob(mirror_image, scale=1.0) results = self._predict(image) results = results[:border_hw[0], :border_hw[1]] results = cv2.resize(results[:, ::-1], (ori_width, ori_height), interpolation=cv2.INTER_CUBIC) total_logits += results return total_logits def _crop_predict(self, image, crop_size): height, width = image.size()[2:] np_image = image.squeeze(0).permute(1, 2, 0).cpu().numpy() height_starts = self._decide_intersection(height, crop_size[1]) width_starts = self._decide_intersection(width, crop_size[0]) split_crops = [] for height in height_starts: for width in width_starts: image_crop = np_image[height:height + crop_size[1], width:width + crop_size[0]] split_crops.append(image_crop[np.newaxis, :]) split_crops = np.concatenate(split_crops, axis=0) # (n, crop_image_size, crop_image_size, 3) inputs = torch.from_numpy(split_crops).permute(0, 3, 1, 2).to(self.device) with torch.no_grad(): results = self.seg_net.forward(inputs) results = results[-1].permute(0, 2, 3, 1).cpu().numpy() reassemble = np.zeros((np_image.shape[0], np_image.shape[1], results.shape[-1]), np.float32) index = 0 for height in height_starts: for width in width_starts: reassemble[height:height+crop_size[1], width:width+crop_size[0]] += results[index] index += 1 return reassemble def _decide_intersection(self, total_length, crop_length): stride = int(crop_length * self.configer.get('test', 'crop_stride_ratio')) # set the stride as the paper do times = (total_length - crop_length) // stride + 1 cropped_starting = [] for i in range(times): cropped_starting.append(stride*i) if total_length - cropped_starting[-1] > crop_length: cropped_starting.append(total_length - crop_length) # must cover the total image return cropped_starting def _predict(self, inputs): with torch.no_grad(): results = self.seg_net.forward(inputs) results = results[-1].squeeze(0).permute(1, 2, 0).cpu().numpy() return results def __relabel(self, label_map): height, width = label_map.shape label_dst = np.zeros((height, width), dtype=np.uint8) for i in range(self.configer.get('data', 'num_classes')): label_dst[label_map == i] = self.configer.get('data', 'label_list')[i] label_dst = np.array(label_dst, dtype=np.uint8) return label_dst def debug(self, vis_dir): count = 0 for i, data_dict in enumerate(self.seg_data_loader.get_trainloader()): inputs = data_dict['img'] targets = data_dict['labelmap'] for j in range(inputs.size(0)): count = count + 1 if count > 20: exit(1) image_bgr = self.blob_helper.tensor2bgr(inputs[j]) label_map = targets[j].numpy() image_canvas = self.seg_parser.colorize(label_map, image_canvas=image_bgr) cv2.imwrite(os.path.join(vis_dir, '{}_{}_vis.png'.format(i, j)), image_canvas) cv2.imshow('main', image_canvas) cv2.waitKey()
class FastRCNNTest(object): def __init__(self, configer): self.configer = configer self.blob_helper = BlobHelper(configer) self.det_visualizer = DetVisualizer(configer) self.det_parser = DetParser(configer) self.det_model_manager = DetModelManager(configer) self.det_data_loader = DetDataLoader(configer) self.roi_sampler = FRRoiSampleLayer(configer) self.module_utilizer = ModuleUtilizer(configer) self.rpn_target_generator = RPNTargetGenerator(configer) self.fr_priorbox_layer = FRPriorBoxLayer(configer) self.fr_roi_generator = FRRoiGenerator(configer) self.data_transformer = DataTransformer(configer) self.device = torch.device( 'cpu' if self.configer.get('gpu') is None else 'cuda') self.det_net = None self._init_model() def _init_model(self): self.det_net = self.det_model_manager.object_detector() self.det_net = self.module_utilizer.load_net(self.det_net) self.det_net.eval() def __test_img(self, image_path, json_path, raw_path, vis_path): Log.info('Image Path: {}'.format(image_path)) img = ImageHelper.read_image( image_path, tool=self.configer.get('data', 'image_tool'), mode=self.configer.get('data', 'input_mode')) ori_img_bgr = ImageHelper.get_cv2_bgr(img, mode=self.configer.get( 'data', 'input_mode')) img, scale = BoundResize()(img) inputs = self.blob_helper.make_input(img, scale=1.0) with torch.no_grad(): # Forward pass. test_group = self.det_net(inputs, scale) test_indices_and_rois, test_roi_locs, test_roi_scores, test_rois_num = test_group batch_detections = self.decode(test_roi_locs, test_roi_scores, test_indices_and_rois, test_rois_num, self.configer, ImageHelper.get_size(img)) json_dict = self.__get_info_tree(batch_detections[0], ori_img_bgr, scale=scale) image_canvas = self.det_parser.draw_bboxes( ori_img_bgr.copy(), json_dict, conf_threshold=self.configer.get('vis', 'conf_threshold')) cv2.imwrite(vis_path, image_canvas) cv2.imwrite(raw_path, ori_img_bgr) Log.info('Json Path: {}'.format(json_path)) JsonHelper.save_file(json_dict, json_path) return json_dict @staticmethod def decode(roi_locs, roi_scores, indices_and_rois, test_rois_num, configer, input_size): roi_locs = roi_locs.cpu() roi_scores = roi_scores.cpu() indices_and_rois = indices_and_rois.cpu() num_classes = configer.get('data', 'num_classes') mean = torch.Tensor(configer.get( 'roi', 'loc_normalize_mean')).repeat(num_classes)[None] std = torch.Tensor(configer.get( 'roi', 'loc_normalize_std')).repeat(num_classes)[None] mean = mean.to(roi_locs.device) std = std.to(roi_locs.device) roi_locs = (roi_locs * std + mean) roi_locs = roi_locs.contiguous().view(-1, num_classes, 4) # roi_locs = roi_locs[:,:, [1, 0, 3, 2]] rois = indices_and_rois[:, 1:] rois = rois.contiguous().view(-1, 1, 4).expand_as(roi_locs) wh = torch.exp(roi_locs[:, :, 2:]) * (rois[:, :, 2:] - rois[:, :, :2]) cxcy = roi_locs[:, :, :2] * (rois[:, :, 2:] - rois[:, :, :2]) + ( rois[:, :, :2] + rois[:, :, 2:]) / 2 dst_bbox = torch.cat([cxcy - wh / 2, cxcy + wh / 2], 2) # [b, 8732,4] # clip bounding box dst_bbox[:, :, 0::2] = (dst_bbox[:, :, 0::2]).clamp(min=0, max=input_size[0] - 1) dst_bbox[:, :, 1::2] = (dst_bbox[:, :, 1::2]).clamp(min=0, max=input_size[1] - 1) if configer.get('phase') != 'debug': cls_prob = F.softmax(roi_scores, dim=1) else: cls_prob = roi_scores cls_label = torch.LongTensor([i for i in range(num_classes)])\ .contiguous().view(1, num_classes).repeat(indices_and_rois.size(0), 1) output = [None for _ in range(test_rois_num.size(0))] start_index = 0 for i in range(test_rois_num.size(0)): # batch_index = (indices_and_rois[:, 0] == i).nonzero().contiguous().view(-1,) # tmp_dst_bbox = dst_bbox[batch_index] # tmp_cls_prob = cls_prob[batch_index] # tmp_cls_label = cls_label[batch_index] tmp_dst_bbox = dst_bbox[start_index:start_index + test_rois_num[i]] tmp_cls_prob = cls_prob[start_index:start_index + test_rois_num[i]] tmp_cls_label = cls_label[start_index:start_index + test_rois_num[i]] start_index += test_rois_num[i] mask = (tmp_cls_prob > configer.get( 'vis', 'conf_threshold')) & (tmp_cls_label > 0) tmp_dst_bbox = tmp_dst_bbox[mask].contiguous().view(-1, 4) if tmp_dst_bbox.numel() == 0: continue tmp_cls_prob = tmp_cls_prob[mask].contiguous().view( -1, ).unsqueeze(1) tmp_cls_label = tmp_cls_label[mask].contiguous().view( -1, ).unsqueeze(1) valid_preds = torch.cat( (tmp_dst_bbox, tmp_cls_prob.float(), tmp_cls_label.float()), 1) keep = DetHelper.cls_nms(valid_preds[:, :4], scores=valid_preds[:, 4], labels=valid_preds[:, 5], nms_threshold=configer.get( 'nms', 'overlap_threshold'), iou_mode=configer.get('nms', 'mode')) output[i] = valid_preds[keep] return output def __make_tensor(self, gt_bboxes, gt_labels): len_arr = [gt_labels[i].numel() for i in range(len(gt_bboxes))] batch_maxlen = max(max(len_arr), 1) target_bboxes = torch.zeros((len(gt_bboxes), batch_maxlen, 4)).float() target_labels = torch.zeros((len(gt_bboxes), batch_maxlen)).long() for i in range(len(gt_bboxes)): target_bboxes[i, :len_arr[i], :] = gt_bboxes[i] target_labels[i, :len_arr[i]] = gt_labels[i] target_bboxes_num = torch.Tensor(len_arr).long() return target_bboxes, target_bboxes_num, target_labels def __get_info_tree(self, detections, image_raw, scale=1.0): height, width, _ = image_raw.shape json_dict = dict() object_list = list() if detections is not None: for x1, y1, x2, y2, conf, cls_pred in detections: object_dict = dict() xmin = min(x1.cpu().item() / scale, width - 1) ymin = min(y1.cpu().item() / scale, height - 1) xmax = min(x2.cpu().item() / scale, width - 1) ymax = min(y2.cpu().item() / scale, height - 1) object_dict['bbox'] = [xmin, ymin, xmax, ymax] object_dict['label'] = int(cls_pred.cpu().item()) - 1 object_dict['score'] = float('%.2f' % conf.cpu().item()) object_list.append(object_dict) json_dict['objects'] = object_list return json_dict def test(self): base_dir = os.path.join(self.configer.get('project_dir'), 'val/results/det', self.configer.get('dataset')) test_img = self.configer.get('test_img') test_dir = self.configer.get('test_dir') if test_img is None and test_dir is None: Log.error('test_img & test_dir not exists.') exit(1) if test_img is not None and test_dir is not None: Log.error('Either test_img or test_dir.') exit(1) if test_img is not None: base_dir = os.path.join(base_dir, 'test_img') filename = test_img.rstrip().split('/')[-1] json_path = os.path.join( base_dir, 'json', '{}.json'.format('.'.join(filename.split('.')[:-1]))) raw_path = os.path.join(base_dir, 'raw', filename) vis_path = os.path.join( base_dir, 'vis', '{}_vis.png'.format('.'.join(filename.split('.')[:-1]))) FileHelper.make_dirs(json_path, is_file=True) FileHelper.make_dirs(raw_path, is_file=True) FileHelper.make_dirs(vis_path, is_file=True) self.__test_img(test_img, json_path, raw_path, vis_path) else: base_dir = os.path.join(base_dir, 'test_dir', test_dir.rstrip('/').split('/')[-1]) FileHelper.make_dirs(base_dir) for filename in FileHelper.list_dir(test_dir): image_path = os.path.join(test_dir, filename) json_path = os.path.join( base_dir, 'json', '{}.json'.format('.'.join(filename.split('.')[:-1]))) raw_path = os.path.join(base_dir, 'raw', filename) vis_path = os.path.join( base_dir, 'vis', '{}_vis.png'.format('.'.join(filename.split('.')[:-1]))) FileHelper.make_dirs(json_path, is_file=True) FileHelper.make_dirs(raw_path, is_file=True) FileHelper.make_dirs(vis_path, is_file=True) self.__test_img(image_path, json_path, raw_path, vis_path) def debug(self): base_dir = os.path.join(self.configer.get('project_dir'), 'vis/results/det', self.configer.get('dataset'), 'debug') if not os.path.exists(base_dir): os.makedirs(base_dir) count = 0 for i, data_dict in enumerate(self.det_data_loader.get_trainloader()): img_scale = data_dict['imgscale'] inputs = data_dict['img'] batch_gt_bboxes = data_dict['bboxes'] # batch_gt_bboxes = ResizeBoxes()(inputs, data_dict['bboxes']) batch_gt_labels = data_dict['labels'] input_size = [inputs.size(3), inputs.size(2)] feat_list = list() for stride in self.configer.get('rpn', 'stride_list'): feat_list.append( torch.zeros((inputs.size(0), 1, input_size[1] // stride, input_size[0] // stride))) gt_rpn_locs, gt_rpn_labels = self.rpn_target_generator( feat_list, batch_gt_bboxes, input_size) eye_matrix = torch.eye(2) gt_rpn_labels[gt_rpn_labels == -1] = 0 gt_rpn_scores = eye_matrix[gt_rpn_labels.view(-1)].view( inputs.size(0), -1, 2) test_indices_and_rois, _ = self.fr_roi_generator( feat_list, gt_rpn_locs, gt_rpn_scores, self.configer.get('rpn', 'n_test_pre_nms'), self.configer.get('rpn', 'n_test_post_nms'), input_size, img_scale) gt_bboxes, gt_nums, gt_labels = self.__make_tensor( batch_gt_bboxes, batch_gt_labels) sample_rois, gt_roi_locs, gt_roi_labels = self.roi_sampler( test_indices_and_rois, gt_bboxes, gt_nums, gt_labels, input_size) self.det_visualizer.vis_rois(inputs, sample_rois[gt_roi_labels > 0]) gt_cls_roi_locs = torch.zeros( (gt_roi_locs.size(0), self.configer.get('data', 'num_classes'), 4)) gt_cls_roi_locs[torch.arange(0, sample_rois.size(0)).long(), gt_roi_labels.long()] = gt_roi_locs gt_cls_roi_locs = gt_cls_roi_locs.contiguous().view( -1, 4 * self.configer.get('data', 'num_classes')) eye_matrix = torch.eye(self.configer.get('data', 'num_classes')) gt_roi_scores = eye_matrix[gt_roi_labels.view(-1)].view( gt_roi_labels.size(0), self.configer.get('data', 'num_classes')) test_rois_num = torch.zeros((len(gt_bboxes), )).long() for batch_id in range(len(gt_bboxes)): batch_index = ( sample_rois[:, 0] == batch_id).nonzero().contiguous().view( -1, ) test_rois_num[batch_id] = batch_index.numel() batch_detections = FastRCNNTest.decode(gt_cls_roi_locs, gt_roi_scores, sample_rois, test_rois_num, self.configer, input_size) for j in range(inputs.size(0)): count = count + 1 if count > 20: exit(1) ori_img_bgr = self.blob_helper.tensor2bgr(inputs[j]) self.det_visualizer.vis_default_bboxes( ori_img_bgr, self.fr_priorbox_layer(feat_list, input_size), gt_rpn_labels[j]) json_dict = self.__get_info_tree(batch_detections[j], ori_img_bgr) image_canvas = self.det_parser.draw_bboxes( ori_img_bgr.copy(), json_dict, conf_threshold=self.configer.get('vis', 'conf_threshold')) cv2.imwrite( os.path.join(base_dir, '{}_{}_vis.png'.format(i, j)), image_canvas) cv2.imshow('main', image_canvas) cv2.waitKey()
class OpenPoseTest(object): def __init__(self, configer): self.configer = configer self.blob_helper = BlobHelper(configer) self.pose_visualizer = PoseVisualizer(configer) self.pose_parser = PoseParser(configer) self.pose_model_manager = PoseModelManager(configer) self.pose_data_loader = DataLoader(configer) self.heatmap_generator = HeatmapGenerator(configer) self.paf_generator = PafGenerator(configer) self.device = torch.device('cpu' if self.configer.get('gpu') is None else 'cuda') self.pose_net = None self._init_model() def _init_model(self): self.pose_net = self.pose_model_manager.multi_pose_detector() self.pose_net = RunnerHelper.load_net(self, self.pose_net) self.pose_net.eval() def _get_blob(self, ori_image, scale=None): assert scale is not None image = self.blob_helper.make_input(image=ori_image, scale=scale) b, c, h, w = image.size() border_hw = [h, w] if self.configer.exists('test', 'fit_stride'): stride = self.configer.get('test', 'fit_stride') pad_w = 0 if (w % stride == 0) else stride - (w % stride) # right pad_h = 0 if (h % stride == 0) else stride - (h % stride) # down expand_image = torch.zeros((b, c, h + pad_h, w + pad_w)).to(image.device) expand_image[:, :, 0:h, 0:w] = image image = expand_image return image, border_hw def __test_img(self, image_path, json_path, raw_path, vis_path): Log.info('Image Path: {}'.format(image_path)) ori_image = ImageHelper.read_image(image_path, tool=self.configer.get('data', 'image_tool'), mode=self.configer.get('data', 'input_mode')) ori_width, ori_height = ImageHelper.get_size(ori_image) ori_img_bgr = ImageHelper.get_cv2_bgr(ori_image, mode=self.configer.get('data', 'input_mode')) heatmap_avg = np.zeros((ori_height, ori_width, self.configer.get('network', 'heatmap_out'))) paf_avg = np.zeros((ori_height, ori_width, self.configer.get('network', 'paf_out'))) multiplier = [scale * self.configer.get('test', 'input_size')[1] / ori_height for scale in self.configer.get('test', 'scale_search')] stride = self.configer.get('network', 'stride') for i, scale in enumerate(multiplier): image, border_hw = self._get_blob(ori_image, scale=scale) with torch.no_grad(): paf_out_list, heatmap_out_list = self.pose_net(image) paf_out = paf_out_list[-1] heatmap_out = heatmap_out_list[-1] # extract outputs, resize, and remove padding heatmap = heatmap_out.squeeze(0).cpu().numpy().transpose(1, 2, 0) heatmap = cv2.resize(heatmap, None, fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC) heatmap = cv2.resize(heatmap[:border_hw[0], :border_hw[1]], (ori_width, ori_height), interpolation=cv2.INTER_CUBIC) paf = paf_out.squeeze(0).cpu().numpy().transpose(1, 2, 0) paf = cv2.resize(paf, None, fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC) paf = cv2.resize(paf[:border_hw[0], :border_hw[1]], (ori_width, ori_height), interpolation=cv2.INTER_CUBIC) heatmap_avg = heatmap_avg + heatmap / len(multiplier) paf_avg = paf_avg + paf / len(multiplier) all_peaks = self.__extract_heatmap_info(heatmap_avg) special_k, connection_all = self.__extract_paf_info(ori_img_bgr, paf_avg, all_peaks) subset, candidate = self.__get_subsets(connection_all, special_k, all_peaks) json_dict = self.__get_info_tree(ori_img_bgr, subset, candidate) image_canvas = self.pose_parser.draw_points(ori_img_bgr.copy(), json_dict) image_canvas = self.pose_parser.link_points(image_canvas, json_dict) ImageHelper.save(image_canvas, vis_path) ImageHelper.save(ori_img_bgr, raw_path) Log.info('Json Save Path: {}'.format(json_path)) JsonHelper.save_file(json_dict, json_path) def __get_info_tree(self, image_raw, subset, candidate): json_dict = dict() height, width, _ = image_raw.shape json_dict['image_height'] = height json_dict['image_width'] = width object_list = list() for n in range(len(subset)): if subset[n][-1] < self.configer.get('res', 'num_threshold'): continue if subset[n][-2] / subset[n][-1] < self.configer.get('res', 'avg_threshold'): continue object_dict = dict() object_dict['kpts'] = np.zeros((self.configer.get('data', 'num_kpts'), 3)).tolist() for j in range(self.configer.get('data', 'num_kpts')): index = subset[n][j] if index == -1: object_dict['kpts'][j][0] = -1 object_dict['kpts'][j][1] = -1 object_dict['kpts'][j][2] = -1 else: object_dict['kpts'][j][0] = candidate[index.astype(int)][0] object_dict['kpts'][j][1] = candidate[index.astype(int)][1] object_dict['kpts'][j][2] = 1 object_dict['score'] = subset[n][-2] object_list.append(object_dict) json_dict['objects'] = object_list return json_dict def __extract_heatmap_info(self, heatmap_avg): all_peaks = [] peak_counter = 0 for part in range(self.configer.get('data', 'num_kpts')): map_ori = heatmap_avg[:, :, part] map_gau = gaussian_filter(map_ori, sigma=3) map_left = np.zeros(map_gau.shape) map_left[1:, :] = map_gau[:-1, :] map_right = np.zeros(map_gau.shape) map_right[:-1, :] = map_gau[1:, :] map_up = np.zeros(map_gau.shape) map_up[:, 1:] = map_gau[:, :-1] map_down = np.zeros(map_gau.shape) map_down[:, :-1] = map_gau[:, 1:] peaks_binary = np.logical_and.reduce( (map_gau >= map_left, map_gau >= map_right, map_gau >= map_up, map_gau >= map_down, map_gau > self.configer.get('res', 'part_threshold'))) peaks = zip(np.nonzero(peaks_binary)[1], np.nonzero(peaks_binary)[0]) # note reverse peaks = list(peaks) ''' del_flag = [0 for i in range(len(peaks))] for i in range(len(peaks)): if del_flag[i] == 0: for j in range(i+1, len(peaks)): if max(abs(peaks[i][0] - peaks[j][0]), abs(peaks[i][1] - peaks[j][1])) <= 6: del_flag[j] = 1 new_peaks = list() for i in range(len(peaks)): if del_flag[i] == 0: new_peaks.append(peaks[i]) peaks = new_peaks ''' peaks_with_score = [x + (map_ori[x[1], x[0]],) for x in peaks] ids = range(peak_counter, peak_counter + len(peaks)) peaks_with_score_and_id = [peaks_with_score[i] + (ids[i],) for i in range(len(ids))] all_peaks.append(peaks_with_score_and_id) peak_counter += len(peaks) return all_peaks def __extract_paf_info(self, img_raw, paf_avg, all_peaks): connection_all = [] special_k = [] mid_num = self.configer.get('res', 'mid_point_num') for k in range(len(self.configer.get('details', 'limb_seq'))): score_mid = paf_avg[:, :, [k*2, k*2+1]] candA = all_peaks[self.configer.get('details', 'limb_seq')[k][0] - 1] candB = all_peaks[self.configer.get('details', 'limb_seq')[k][1] - 1] nA = len(candA) nB = len(candB) if nA != 0 and nB != 0: connection_candidate = [] for i in range(nA): for j in range(nB): vec = np.subtract(candB[j][:2], candA[i][:2]) norm = math.sqrt(vec[0] * vec[0] + vec[1] * vec[1]) + 1e-9 vec = np.divide(vec, norm) startend = zip(np.linspace(candA[i][0], candB[j][0], num=mid_num), np.linspace(candA[i][1], candB[j][1], num=mid_num)) startend = list(startend) vec_x = np.array([score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 0] for I in range(len(startend))]) vec_y = np.array([score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 1] for I in range(len(startend))]) score_midpts = np.multiply(vec_x, vec[0]) + np.multiply(vec_y, vec[1]) score_with_dist_prior = sum(score_midpts) / len(score_midpts) score_with_dist_prior += min(0.5 * img_raw.shape[0] / norm - 1, 0) num_positive = len(np.nonzero(score_midpts > self.configer.get('res', 'limb_threshold'))[0]) criterion1 = num_positive > int(self.configer.get('res', 'limb_pos_ratio') * len(score_midpts)) criterion2 = score_with_dist_prior > 0 if criterion1 and criterion2: connection_candidate.append( [i, j, score_with_dist_prior, score_with_dist_prior + candA[i][2] + candB[j][2]]) connection_candidate = sorted(connection_candidate, key=lambda x: x[2], reverse=True) connection = np.zeros((0, 5)) for c in range(len(connection_candidate)): i, j, s = connection_candidate[c][0:3] if i not in connection[:, 3] and j not in connection[:, 4]: connection = np.vstack([connection, [candA[i][3], candB[j][3], s, i, j]]) if len(connection) >= min(nA, nB): break connection_all.append(connection) else: special_k.append(k) connection_all.append([]) return special_k, connection_all def __get_subsets(self, connection_all, special_k, all_peaks): # last number in each row is the total parts number of that person # the second last number in each row is the score of the overall configuration subset = -1 * np.ones((0, self.configer.get('data', 'num_kpts') + 2)) candidate = np.array([item for sublist in all_peaks for item in sublist]) for k in self.configer.get('details', 'mini_tree'): if k not in special_k: partAs = connection_all[k][:, 0] partBs = connection_all[k][:, 1] indexA, indexB = np.array(self.configer.get('details', 'limb_seq')[k]) - 1 for i in range(len(connection_all[k])): # = 1:size(temp,1) found = 0 subset_idx = [-1, -1] for j in range(len(subset)): # 1:size(subset,1): if subset[j][indexA] == partAs[i] or subset[j][indexB] == partBs[i]: subset_idx[found] = j found += 1 if found == 1: j = subset_idx[0] if (subset[j][indexB] != partBs[i]): subset[j][indexB] = partBs[i] subset[j][-1] += 1 subset[j][-2] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2] elif found == 2: # if found 2 and disjoint, merge them j1, j2 = subset_idx membership = ((subset[j1] >= 0).astype(int) + (subset[j2] >= 0).astype(int))[:-2] if len(np.nonzero(membership == 2)[0]) == 0: # merge subset[j1][:-2] += (subset[j2][:-2] + 1) subset[j1][-2:] += subset[j2][-2:] subset[j1][-2] += connection_all[k][i][2] subset = np.delete(subset, j2, 0) else: # as like found == 1 subset[j1][indexB] = partBs[i] subset[j1][-1] += 1 subset[j1][-2] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2] # if find no partA in the subset, create a new subset elif not found: row = -1 * np.ones(self.configer.get('data', 'num_kpts') + 2) row[indexA] = partAs[i] row[indexB] = partBs[i] row[-1] = 2 row[-2] = sum(candidate[connection_all[k][i, :2].astype(int), 2]) + connection_all[k][i][2] subset = np.vstack([subset, row]) return subset, candidate def debug(self, vis_dir): for i, data_dict in enumerate(self.pose_data_loader.get_trainloader()): inputs = data_dict['img'] maskmap = data_dict['maskmap'] heatmap = data_dict['heatmap'] vecmap = data_dict['vecmap'] for j in range(inputs.size(0)): count = count + 1 if count > 10: exit(1) Log.info(heatmap.size()) image_bgr = self.blob_helper.tensor2bgr(inputs[j]) mask_canvas = maskmap[j].repeat(3, 1, 1).numpy().transpose(1, 2, 0) mask_canvas = (mask_canvas * 255).astype(np.uint8) mask_canvas = cv2.resize(mask_canvas, (0, 0), fx=self.configer.get('network', 'stride'), fy=self.configer.get('network', 'stride'), interpolation=cv2.INTER_CUBIC) image_bgr = cv2.addWeighted(image_bgr, 0.6, mask_canvas, 0.4, 0) heatmap_avg = heatmap[j].numpy().transpose(1, 2, 0) heatmap_avg = cv2.resize(heatmap_avg, (0, 0), fx=self.configer.get('network', 'stride'), fy=self.configer.get('network', 'stride'), interpolation=cv2.INTER_CUBIC) paf_avg = vecmap[j].numpy().transpose(1, 2, 0) paf_avg = cv2.resize(paf_avg, (0, 0), fx=self.configer.get('network', 'stride'), fy=self.configer.get('network', 'stride'), interpolation=cv2.INTER_CUBIC) self.pose_visualizer.vis_peaks(heatmap_avg, image_bgr) self.pose_visualizer.vis_paf(paf_avg, image_bgr) all_peaks = self.__extract_heatmap_info(heatmap_avg) special_k, connection_all = self.__extract_paf_info(image_bgr, paf_avg, all_peaks) subset, candidate = self.__get_subsets(connection_all, special_k, all_peaks) json_dict = self.__get_info_tree(image_bgr, subset, candidate) image_canvas = self.pose_parser.draw_points(image_bgr, json_dict) image_canvas = self.pose_parser.link_points(image_canvas, json_dict) cv2.imwrite(os.path.join(vis_dir, '{}_{}_vis.png'.format(i, j)), image_canvas) cv2.imshow('main', image_canvas) cv2.waitKey()
class ConvPoseMachineTest(object): def __init__(self, configer): self.configer = configer self.blob_helper = BlobHelper(configer) self.pose_vis = PoseVisualizer(configer) self.pose_model_manager = ModelManager(configer) self.pose_data_loader = DataLoader(configer) self.device = torch.device( 'cpu' if self.configer.get('gpu') is None else 'cuda') self.pose_net = None self._init_model() def _init_model(self): self.pose_net = self.pose_model_manager.multi_pose_detector() self.pose_net = RunnerHelper.load_net(self, self.pose_net) self.pose_net.eval() def __test_img(self, image_path, save_path): Log.info('Image Path: {}'.format(image_path)) ori_image = ImageHelper.read_image( image_path, tool=self.configer.get('data', 'image_tool'), mode=self.configer.get('data', 'input_mode')) ori_width, ori_height = ImageHelper.get_size(ori_image) ori_img_bgr = ImageHelper.get_cv2_bgr(ori_image, mode=self.configer.get( 'data', 'input_mode')) heatmap_avg = np.zeros( (ori_height, ori_width, self.configer.get('network', 'heatmap_out'))) for i, scale in enumerate(self.configer.get('test', 'scale_search')): image = self.blob_helper.make_input(ori_image, input_size=self.configer.get( 'test', 'input_size'), scale=scale) with torch.no_grad(): heatmap_out_list = self.pose_net(image) heatmap_out = heatmap_out_list[-1] # extract outputs, resize, and remove padding heatmap = heatmap_out.squeeze(0).cpu().numpy().transpose( 1, 2, 0) heatmap = cv2.resize(heatmap, (ori_width, ori_height), interpolation=cv2.INTER_CUBIC) heatmap_avg = heatmap_avg + heatmap / len( self.configer.get('test', 'scale_search')) all_peaks = self.__extract_heatmap_info(heatmap_avg) image_canvas = self.__draw_key_point(all_peaks, ori_img_bgr) ImageHelper.save(image_canvas, save_path) def __extract_heatmap_info(self, heatmap_avg): all_peaks = [] for part in range(self.configer.get('network', 'heatmap_out') - 1): map_ori = heatmap_avg[:, :, part] map_gau = gaussian_filter(map_ori, sigma=3) map_left = np.zeros(map_gau.shape) map_left[1:, :] = map_gau[:-1, :] map_right = np.zeros(map_gau.shape) map_right[:-1, :] = map_gau[1:, :] map_up = np.zeros(map_gau.shape) map_up[:, 1:] = map_gau[:, :-1] map_down = np.zeros(map_gau.shape) map_down[:, :-1] = map_gau[:, 1:] peaks_binary = np.logical_and.reduce( (map_gau >= map_left, map_gau >= map_right, map_gau >= map_up, map_gau >= map_down, map_gau > self.configer.get('vis', 'part_threshold'))) peaks = zip( np.nonzero(peaks_binary)[1], np.nonzero(peaks_binary)[0]) # note reverse peaks = list(peaks) peaks_with_score = [x + (map_ori[x[1], x[0]], ) for x in peaks] all_peaks.append(peaks_with_score) return all_peaks def __draw_key_point(self, all_peaks, img_raw): img_canvas = img_raw.copy() # B,G,R order for i in range(self.configer.get('network', 'heatmap_out') - 1): for j in range(len(all_peaks[i])): cv2.circle(img_canvas, all_peaks[i][j][0:2], self.configer.get('vis', 'stick_width'), self.configer.get('details', 'color_list')[i], thickness=-1) return img_canvas def debug(self, vis_dir): for i, data_dict in enumerate(self.pose_data_loader.get_trainloader()): inputs = data_dict['img'] heatmap = data_dict['heatmap'] for j in range(inputs.size(0)): image_bgr = self.blob_helper.tensor2bgr(inputs[j]) heatmap_avg = heatmap[j].numpy().transpose(1, 2, 0) heatmap_avg = cv2.resize( heatmap_avg, (0, 0), fx=self.configer.get('network', 'stride'), fy=self.configer.get('network', 'stride'), interpolation=cv2.INTER_CUBIC) all_peaks = self.__extract_heatmap_info(heatmap_avg) image_save = self.__draw_key_point(all_peaks, image_bgr) cv2.imwrite( os.path.join(vis_dir, '{}_{}_result.jpg'.format(i, j)), image_save)
class FaceGANTest(object): def __init__(self, configer): self.configer = configer self.blob_helper = BlobHelper(configer) self.model_manager = ModelManager(configer) self.test_loader = TestDataLoader(configer) self.device = torch.device( 'cpu' if self.configer.get('gpu') is None else 'cuda') self.gan_net = None self._init_model() def _init_model(self): self.gan_net = self.model_manager.gan_model() self.gan_net = RunnerHelper.load_net(self, self.gan_net) self.gan_net.eval() def test(self, test_dir, out_dir): gallery_file_list = '*_gallery_*.txt' probe_file_list = '*_probe_*.txt' gallery_file_list = glob.glob(test_dir + '/' + gallery_file_list) probe_file_list = glob.glob(test_dir + '/' + probe_file_list) # remove *_dev.txt file in both list gallery_file_list = sorted(gallery_file_list) probe_file_list = sorted(probe_file_list) rank1_acc = [] vr_acc = [] for i in range(len(gallery_file_list)): probe_features = [] gallery_features = [] probe_names = [] gallery_names = [] Log.info('Gallery File: {}'.format(gallery_file_list[i])) for data_dict in self.test_loader.get_testloader( list_path=gallery_file_list[i]): new_data_dict = dict(gallery=data_dict['img']) out_dict = self.gan_net(new_data_dict) meta_list = DCHelper.tolist(data_dict['meta']) for idx in range(len(out_dict['feat'])): gallery_features.append( out_dict['feat'][idx].cpu().numpy()) gallery_names.append( meta_list[idx]['img_path'].split("/")[-2]) Log.info('Probe File: {}'.format(probe_file_list[i])) for data_dict in self.test_loader.get_testloader( list_path=probe_file_list[i]): new_data_dict = dict(probe=data_dict['img']) out_dict = self.gan_net(new_data_dict) meta_list = DCHelper.tolist(data_dict['meta']) for key, value in out_dict.item(): if 'feat' in key: for idx in range(len(value)): probe_features.append(value[idx].cpu().numpy()) probe_names.append( meta_list[idx]['img_path'].split("/")[-2]) continue else: for idx in range(len(value)): img_bgr = self.blob_helper.tensor2bgr(value[idx]) filename = meta_list[idx]['img_path'].rstrip( ).split('/')[-1] ImageHelper.save( img_bgr, os.path.join(out_dir, key, filename)) probe_features = np.array(probe_features) gallery_features = np.array(gallery_features) score = cosine_similarity(gallery_features, probe_features).T r_acc, tpr = self.compute_metric(score, probe_names, gallery_names) # print('score={}, probe_names={}, gallery_names={}'.format(score, probe_names, gallery_names)) rank1_acc.append(r_acc) vr_acc.append(tpr) avg_r_a = np.mean(np.array(rank1_acc)) std_r_a = np.std(np.array(rank1_acc)) avg_v_a = np.mean(np.array(vr_acc)) std_v_a = np.std(np.array(vr_acc)) # avg_vr_acc = sum(vr_acc)/(len(vr_acc) + 1e-5) print() print('=====================================================') print('Final Rank1 accuracy is', avg_r_a * 100, "% +", std_r_a) print('Final VR@FAR=0.1% accuracy is', avg_v_a * 100, "% +", std_v_a) print('=====================================================') print() return avg_r_a, std_r_a, avg_v_a, std_v_a def compute_metric(self, score, probe_names, gallery_names): # print('score.shape =', score.shape) # print('probe_names =', np.array(probe_names).shape) # print('gallery_names =', np.array(gallery_names).shape) print('===> compute metrics') # print(probe_names[1], type(probe_names[1])) # exit() label = np.zeros_like(score) maxIndex = np.argmax(score, axis=1) # print('len = ', len(maxIndex)) count = 0 for i in range(len(maxIndex)): probe_names_repeat = np.repeat([probe_names[i]], len(gallery_names), axis=0).T # compare two string list result = np.equal(probe_names_repeat, gallery_names) * 1 # result = np.core.defchararray.equal(probe_names_repeat, gallery_names) * 1 # find the index of image in the gallery that has the same name as probe image # print(result) # print('++++++++++++++++++++++++++++++++=') index = np.nonzero(result == 1) # if i == 10: # exit() assert len(index[0]) == 1 label[i][index[0][0]] = 1 # find the max similarty score in gallery has the same name as probe image if np.equal(int(probe_names[i]), int(gallery_names[maxIndex[i]])): count += 1 else: pass # print(probe_img_list[i], gallery_img_list[ind]) r_acc = count / (len(probe_names) + 1e-5) fpr, tpr, thresholds = roc_curve(label.flatten(), score.flatten()) print("In sub_experiment", label.size(0), 'count of true label :', count) print('rank1 accuracy =', r_acc) print('VR@FAR=0.1% accuracy =', tpr[fpr <= 0.001][-1]) # plot_roc(fpr, tpr, thresholds, g_count) return r_acc, tpr[fpr <= 0.001][-1]
class ImageTranslatorTest(object): def __init__(self, configer): self.configer = configer self.blob_helper = BlobHelper(configer) self.model_manager = ModelManager(configer) self.test_loader = TestDataLoader(configer) self.device = torch.device( 'cpu' if self.configer.get('gpu') is None else 'cuda') self.gan_net = None self._init_model() def _init_model(self): self.gan_net = self.model_manager.gan_model() self.gan_net = RunnerHelper.load_net(self, self.gan_net) self.gan_net.eval() def test(self, test_dir, out_dir): if self.configer.exists('test', 'mode') and self.configer.get( 'test', 'mode') == 'nir2vis': jsonA_path = os.path.join( self.configer.get('test', 'test_dir'), 'val_label{}A.json'.format(self.configer.get('data', 'tag'))) test_loader_A = self.test_loader.get_testloader( json_path=jsonA_path) if os.path.exists(jsonA_path) else None jsonB_path = os.path.join( self.configer.get('test', 'test_dir'), 'val_label{}B.json'.format(self.configer.get('data', 'tag'))) test_loader_B = self.test_loader.get_testloader( json_path=jsonB_path) if os.path.exists(jsonB_path) else None elif self.configer.exists('test', 'mode') and self.configer.get( 'test', 'mode') == 'pix2pix': imgA_dir = os.path.join(test_dir, 'imageA') test_loader_A = self.test_loader.get_testloader( test_dir=imgA_dir) if os.path.exists(imgA_dir) else None test_loader_B = None else: imgA_dir = os.path.join(test_dir, 'imageA') test_loader_A = self.test_loader.get_testloader( test_dir=imgA_dir) if os.path.exists(imgA_dir) else None imgB_dir = os.path.join(test_dir, 'imageB') test_loader_B = self.test_loader.get_testloader( test_dir=imgB_dir) if os.path.exists(imgB_dir) else None if test_loader_A is not None: for data_dict in test_loader_A: new_data_dict = dict(imgA=data_dict['img']) with torch.no_grad(): out_dict = self.gan_net(new_data_dict, testing=True) meta_list = DCHelper.tolist(data_dict['meta']) for key, value in out_dict.items(): for i in range(len(value)): img_bgr = self.blob_helper.tensor2bgr(value[i]) img_path = meta_list[i]['img_path'] Log.info('Image Path: {}'.format(img_path)) # filename = '_'.join(img_path.rstrip().split('/')[-2:]) img_bgr = ImageHelper.resize( img_bgr, target_size=self.configer.get('test', 'out_size'), interpolation='linear') ImageHelper.save( img_bgr, os.path.join(out_dir, key, meta_list[i]['filename'])) if test_loader_B is not None: for data_dict in test_loader_B: new_data_dict = dict(imgB=data_dict['img']) with torch.no_grad(): out_dict = self.gan_net(new_data_dict, testing=True) meta_list = DCHelper.tolist(data_dict['meta']) for key, value in out_dict.items(): for i in range(len(value)): img_bgr = self.blob_helper.tensor2bgr(value[i]) img_path = meta_list[i]['img_path'] Log.info('Image Path: {}'.format(img_path)) # filename = img_path.rstrip().split('/')[-1] # filename = '_'.join(img_path.rstrip().split('/')[-2:]) img_bgr = ImageHelper.resize( img_bgr, target_size=self.configer.get('test', 'out_size'), interpolation='linear') ImageHelper.save( img_bgr, os.path.join(out_dir, key, meta_list[i]['filename']))
class FaceGANTest(object): def __init__(self, configer): self.configer = configer self.blob_helper = BlobHelper(configer) self.model_manager = ModelManager(configer) self.test_loader = TestDataLoader(configer) self.device = torch.device( 'cpu' if self.configer.get('gpu') is None else 'cuda') self.gan_net = None self._init_model() def _init_model(self): self.gan_net = self.model_manager.gan_model() self.gan_net = RunnerHelper.load_net(self, self.gan_net) self.gan_net.eval() def test(self, test_dir, out_dir): if self.configer.exists('test', 'mode') and self.configer.get( 'test', 'mode') == 'nir2vis': jsonA_path = os.path.join( test_dir, 'val_label{}A.json'.format(self.configer.get('data', 'tag'))) test_loader_A = self.test_loader.get_testloader( json_path=jsonA_path) if os.path.exists(jsonA_path) else None jsonB_path = os.path.join( test_dir, 'val_label{}B.json'.format(self.configer.get('data', 'tag'))) test_loader_B = self.test_loader.get_testloader( json_path=jsonB_path) if os.path.exists(jsonB_path) else None else: test_loader_A, test_loader_B = None, None Log.error('Test Mode not Exists!') exit(1) assert test_loader_A is not None and test_loader_B is not None probe_features = [] gallery_features = [] probe_labels = [] gallery_labels = [] for data_dict in test_loader_A: new_data_dict = dict(imgA=data_dict['img']) with torch.no_grad(): out_dict = self.gan_net(new_data_dict, testing=True) meta_list = DCHelper.tolist(data_dict['meta']) for idx in range(len(meta_list)): probe_features.append(out_dict['featA'][idx].cpu().numpy()) probe_labels.append(meta_list[idx]['label']) for key, value in out_dict.items(): for i in range(len(value)): if 'feat' in key: continue img_bgr = self.blob_helper.tensor2bgr(value[i]) img_path = meta_list[i]['img_path'] Log.info('Image Path: {}'.format(img_path)) img_bgr = ImageHelper.resize(img_bgr, target_size=self.configer.get( 'test', 'out_size'), interpolation='linear') ImageHelper.save( img_bgr, os.path.join(out_dir, key, meta_list[i]['filename'])) for data_dict in test_loader_B: new_data_dict = dict(imgB=data_dict['img']) with torch.no_grad(): out_dict = self.gan_net(new_data_dict, testing=True) meta_list = DCHelper.tolist(data_dict['meta']) for idx in range(len(meta_list)): gallery_features.append(out_dict['feat'][idx].cpu().numpy()) gallery_labels.append(meta_list[idx]['label']) for key, value in out_dict.items(): for i in range(len(value)): if 'feat' in key: continue img_bgr = self.blob_helper.tensor2bgr(value[i]) img_path = meta_list[i]['img_path'] Log.info('Image Path: {}'.format(img_path)) img_bgr = ImageHelper.resize(img_bgr, target_size=self.configer.get( 'test', 'out_size'), interpolation='linear') ImageHelper.save( img_bgr, os.path.join(out_dir, key, meta_list[i]['filename'])) r_acc, tpr = self.decode(probe_features, gallery_features, probe_labels, gallery_labels) Log.info('Final Rank1 accuracy is {}'.format(r_acc)) Log.info('Final VR@FAR=0.1% accuracy is {}'.format(tpr)) @staticmethod def decode(probe_features, gallery_features, probe_labels, gallery_labels): probe_features = np.array(probe_features) gallery_features = np.array(gallery_features) score = cosine_similarity(gallery_features, probe_features).T # print('score.shape =', score.shape) # print('probe_names =', np.array(probe_names).shape) # print('gallery_names =', np.array(gallery_names).shape) print('===> compute metrics') # print(probe_names[1], type(probe_names[1])) # exit() label = np.zeros_like(score) maxIndex = np.argmax(score, axis=1) # print('len = ', len(maxIndex)) count = 0 for i in range(len(maxIndex)): probe_names_repeat = np.repeat([probe_labels[i]], len(gallery_labels), axis=0).T # compare two string list result = np.equal(probe_names_repeat, gallery_labels) * 1 # result = np.core.defchararray.equal(probe_names_repeat, gallery_names) * 1 # find the index of image in the gallery that has the same name as probe image # print(result) # print('++++++++++++++++++++++++++++++++=') index = np.nonzero(result == 1) # if i == 10: # exit() assert len(index[0]) == 1 label[i][index[0][0]] = 1 # find the max similarty score in gallery has the same name as probe image if np.equal(int(probe_labels[i]), int(gallery_labels[maxIndex[i]])): count += 1 else: pass # print(probe_img_list[i], gallery_img_list[ind]) r_acc = count / (len(probe_labels) + 1e-5) fpr, tpr, thresholds = roc_curve(label.flatten(), score.flatten()) # print("In sub_experiment", label.size(0), 'count of true label :', count) # print('rank1 accuracy =', r_acc) # print('VR@FAR=0.1% accuracy =', tpr[fpr <= 0.001][-1]) # plot_roc(fpr, tpr, thresholds, g_count) return r_acc, tpr[fpr <= 0.001][-1]
class FCClassifierTest(object): def __init__(self, configer): self.configer = configer self.blob_helper = BlobHelper(configer) self.cls_model_manager = ClsModelManager(configer) self.cls_data_loader = DataLoader(configer) self.cls_parser = ClsParser(configer) self.device = torch.device( 'cpu' if self.configer.get('gpu') is None else 'cuda') self.cls_net = None if self.configer.get('dataset') == 'imagenet': with open( os.path.join( self.configer.get('project_dir'), 'datasets/cls/imagenet/imagenet_class_index.json') ) as json_stream: name_dict = json.load(json_stream) name_seq = [ name_dict[str(i)][1] for i in range(self.configer.get('data', 'num_classes')) ] self.configer.add(['details', 'name_seq'], name_seq) self._init_model() def _init_model(self): self.cls_net = self.cls_model_manager.image_classifier() self.cls_net = RunnerHelper.load_net(self, self.cls_net) self.cls_net.eval() def __test_img(self, image_path, json_path, raw_path, vis_path): Log.info('Image Path: {}'.format(image_path)) img = ImageHelper.read_image( image_path, tool=self.configer.get('data', 'image_tool'), mode=self.configer.get('data', 'input_mode')) trans = None if self.configer.get('dataset') == 'imagenet': if self.configer.get('data', 'image_tool') == 'cv2': img = Image.fromarray(img) trans = transforms.Compose([ transforms.Scale(256), transforms.CenterCrop(224), ]) assert trans is not None img = trans(img) ori_img_bgr = ImageHelper.get_cv2_bgr(img, mode=self.configer.get( 'data', 'input_mode')) inputs = self.blob_helper.make_input(img, input_size=self.configer.get( 'test', 'input_size'), scale=1.0) with torch.no_grad(): outputs = self.cls_net(inputs) json_dict = self.__get_info_tree(outputs, image_path) image_canvas = self.cls_parser.draw_label(ori_img_bgr.copy(), json_dict['label']) cv2.imwrite(vis_path, image_canvas) cv2.imwrite(raw_path, ori_img_bgr) Log.info('Json Path: {}'.format(json_path)) JsonHelper.save_file(json_dict, json_path) return json_dict def __get_info_tree(self, outputs, image_path=None): json_dict = dict() if image_path is not None: json_dict['image_path'] = image_path topk = (1, 3, 5) maxk = max(topk) _, pred = outputs.topk(maxk, 0, True, True) for k in topk: if k == 1: json_dict['label'] = pred[0] else: json_dict['label_top{}'.format(k)] = pred[:k] return json_dict def debug(self, vis_dir): count = 0 for i, data_dict in enumerate(self.cls_data_loader.get_trainloader()): inputs = data_dict['img'] labels = data_dict['label'] eye_matrix = torch.eye(self.configer.get('data', 'num_classes')) labels_target = eye_matrix[labels.view(-1)].view( inputs.size(0), self.configer.get('data', 'num_classes')) for j in range(inputs.size(0)): count = count + 1 if count > 20: exit(1) ori_img_bgr = self.blob_helper.tensor2bgr(inputs[j]) json_dict = self.__get_info_tree(labels_target[j]) image_canvas = self.cls_parser.draw_label( ori_img_bgr.copy(), json_dict['label']) cv2.imwrite( os.path.join(vis_dir, '{}_{}_vis.png'.format(i, j)), image_canvas) cv2.imshow('main', image_canvas) cv2.waitKey()
class FastRCNNTest(object): def __init__(self, configer): self.configer = configer self.blob_helper = BlobHelper(configer) self.det_visualizer = DetVisualizer(configer) self.det_parser = DetParser(configer) self.det_model_manager = DetModelManager(configer) self.det_data_loader = DataLoader(configer) self.roi_sampler = FRROISampler(configer) self.rpn_target_generator = RPNTargetAssigner(configer) self.fr_priorbox_layer = FRPriorBoxLayer(configer) self.fr_roi_generator = FRROIGenerator(configer) self.device = torch.device( 'cpu' if self.configer.get('gpu') is None else 'cuda') self.det_net = None self._init_model() def _init_model(self): self.det_net = self.det_model_manager.object_detector() self.det_net = RunnerHelper.load_net(self, self.det_net) self.det_net.eval() def __test_img(self, image_path, json_path, raw_path, vis_path): Log.info('Image Path: {}'.format(image_path)) image = ImageHelper.read_image( image_path, tool=self.configer.get('data', 'image_tool'), mode=self.configer.get('data', 'input_mode')) ori_img_bgr = ImageHelper.get_cv2_bgr(image, mode=self.configer.get( 'data', 'input_mode')) width, height = ImageHelper.get_size(image) scale1 = self.configer.get('test', 'resize_bound')[0] / min( width, height) scale2 = self.configer.get('test', 'resize_bound')[1] / max( width, height) scale = min(scale1, scale2) inputs = self.blob_helper.make_input(image, scale=scale) b, c, h, w = inputs.size() border_wh = [w, h] if self.configer.exists('test', 'fit_stride'): stride = self.configer.get('test', 'fit_stride') pad_w = 0 if (w % stride == 0) else stride - (w % stride) # right pad_h = 0 if (h % stride == 0) else stride - (h % stride) # down expand_image = torch.zeros( (b, c, h + pad_h, w + pad_w)).to(inputs.device) expand_image[:, :, 0:h, 0:w] = inputs inputs = expand_image data_dict = dict( img=inputs, meta=DataContainer([[ dict(ori_img_size=ImageHelper.get_size(ori_img_bgr), aug_img_size=border_wh, img_scale=scale, input_size=[inputs.size(3), inputs.size(2)]) ]], cpu_only=True)) with torch.no_grad(): # Forward pass. test_group = self.det_net(data_dict) test_indices_and_rois, test_roi_locs, test_roi_scores, test_rois_num = test_group batch_detections = self.decode(test_roi_locs, test_roi_scores, test_indices_and_rois, test_rois_num, self.configer, DCHelper.tolist(data_dict['meta'])) json_dict = self.__get_info_tree(batch_detections[0], ori_img_bgr, scale=scale) image_canvas = self.det_parser.draw_bboxes( ori_img_bgr.copy(), json_dict, conf_threshold=self.configer.get('res', 'vis_conf_thre')) cv2.imwrite(vis_path, image_canvas) cv2.imwrite(raw_path, ori_img_bgr) Log.info('Json Path: {}'.format(json_path)) JsonHelper.save_file(json_dict, json_path) return json_dict @staticmethod def decode(roi_locs, roi_scores, indices_and_rois, test_rois_num, configer, metas): indices_and_rois = indices_and_rois num_classes = configer.get('data', 'num_classes') mean = torch.Tensor(configer.get( 'roi', 'loc_normalize_mean')).repeat(num_classes)[None] std = torch.Tensor(configer.get( 'roi', 'loc_normalize_std')).repeat(num_classes)[None] mean = mean.to(roi_locs.device) std = std.to(roi_locs.device) roi_locs = (roi_locs * std + mean) roi_locs = roi_locs.contiguous().view(-1, num_classes, 4) # roi_locs = roi_locs[:,:, [1, 0, 3, 2]] rois = indices_and_rois[:, 1:] rois = rois.contiguous().view(-1, 1, 4).expand_as(roi_locs) wh = torch.exp(roi_locs[:, :, 2:]) * (rois[:, :, 2:] - rois[:, :, :2]) cxcy = roi_locs[:, :, :2] * (rois[:, :, 2:] - rois[:, :, :2]) + ( rois[:, :, :2] + rois[:, :, 2:]) / 2 dst_bbox = torch.cat([cxcy - wh / 2, cxcy + wh / 2], 2) # [b, 8732,4] if configer.get('phase') != 'debug': cls_prob = F.softmax(roi_scores, dim=1) else: cls_prob = roi_scores cls_label = torch.LongTensor([i for i in range(num_classes)])\ .contiguous().view(1, num_classes).repeat(indices_and_rois.size(0), 1).to(roi_locs.device) output = [None for _ in range(test_rois_num.size(0))] start_index = 0 for i in range(test_rois_num.size(0)): # batch_index = (indices_and_rois[:, 0] == i).nonzero().contiguous().view(-1,) # tmp_dst_bbox = dst_bbox[batch_index] # tmp_cls_prob = cls_prob[batch_index] # tmp_cls_label = cls_label[batch_index] tmp_dst_bbox = dst_bbox[start_index:start_index + test_rois_num[i]] # clip bounding box tmp_dst_bbox[:, :, 0::2] = tmp_dst_bbox[:, :, 0::2].clamp( min=0, max=metas[i]['border_size'][0] - 1) tmp_dst_bbox[:, :, 1::2] = tmp_dst_bbox[:, :, 1::2].clamp( min=0, max=metas[i]['border_size'][1] - 1) tmp_cls_prob = cls_prob[start_index:start_index + test_rois_num[i]] tmp_cls_label = cls_label[start_index:start_index + test_rois_num[i]] start_index += test_rois_num[i] mask = (tmp_cls_prob > configer.get( 'res', 'val_conf_thre')) & (tmp_cls_label > 0) tmp_dst_bbox = tmp_dst_bbox[mask].contiguous().view(-1, 4) if tmp_dst_bbox.numel() == 0: continue tmp_cls_prob = tmp_cls_prob[mask].contiguous().view( -1, ).unsqueeze(1) tmp_cls_label = tmp_cls_label[mask].contiguous().view( -1, ).unsqueeze(1) valid_preds = torch.cat( (tmp_dst_bbox, tmp_cls_prob.float(), tmp_cls_label.float()), 1) output[i] = DetHelper.cls_nms(valid_preds, labels=valid_preds[:, 5], max_threshold=configer.get( 'nms', 'max_threshold')) return output def __get_info_tree(self, detections, image_raw, scale=1.0): height, width, _ = image_raw.shape json_dict = dict() object_list = list() if detections is not None: for x1, y1, x2, y2, conf, cls_pred in detections: object_dict = dict() xmin = min(x1.cpu().item() / scale, width - 1) ymin = min(y1.cpu().item() / scale, height - 1) xmax = min(x2.cpu().item() / scale, width - 1) ymax = min(y2.cpu().item() / scale, height - 1) object_dict['bbox'] = [xmin, ymin, xmax, ymax] object_dict['label'] = int(cls_pred.cpu().item()) - 1 object_dict['score'] = float('%.2f' % conf.cpu().item()) object_list.append(object_dict) json_dict['objects'] = object_list return json_dict def debug(self, vis_dir): count = 0 for i, data_dict in enumerate(self.det_data_loader.get_trainloader()): feat_list = list() input_size = data_dict['meta'][0]['input_size'] for stride in self.configer.get('rpn', 'stride_list'): feat_list.append( torch.zeros( (data_dict['img'].size(0), 1, input_size[1] // stride, input_size[0] // stride))) gt_rpn_locs, gt_rpn_labels = self.rpn_target_generator( feat_list, data_dict['bboxes'], data_dict['meta']) eye_matrix = torch.eye(2) gt_rpn_labels[gt_rpn_labels == -1] = 0 gt_rpn_scores = eye_matrix[gt_rpn_labels.view(-1)].view( data_dict['img'].size(0), -1, 2) test_indices_and_rois, _ = self.fr_roi_generator( feat_list, gt_rpn_locs, gt_rpn_scores, self.configer.get('rpn', 'n_test_pre_nms'), self.configer.get('rpn', 'n_test_post_nms'), data_dict['meta']) sample_rois, gt_roi_locs, gt_roi_labels = self.roi_sampler( test_indices_and_rois, data_dict['bboxes'], data_dict['labels'], data_dict['meta']) self.det_visualizer.vis_rois(data_dict['img'], sample_rois[gt_roi_labels > 0]) gt_cls_roi_locs = torch.zeros( (gt_roi_locs.size(0), self.configer.get('data', 'num_classes'), 4)) gt_cls_roi_locs[torch.arange(0, sample_rois.size(0)).long(), gt_roi_labels.long()] = gt_roi_locs gt_cls_roi_locs = gt_cls_roi_locs.contiguous().view( -1, 4 * self.configer.get('data', 'num_classes')) eye_matrix = torch.eye(self.configer.get('data', 'num_classes')) gt_roi_scores = eye_matrix[gt_roi_labels.view(-1)].view( gt_roi_labels.size(0), self.configer.get('data', 'num_classes')) test_rois_num = torch.zeros((len(data_dict['bboxes']), )).long() for batch_id in range(len(data_dict['bboxes'])): batch_index = ( sample_rois[:, 0] == batch_id).nonzero().contiguous().view( -1, ) test_rois_num[batch_id] = batch_index.numel() batch_detections = FastRCNNTest.decode(gt_cls_roi_locs, gt_roi_scores, sample_rois, test_rois_num, self.configer, data_dict['meta']) for j in range(data_dict['img'].size(0)): count = count + 1 if count > 20: exit(1) ori_img_bgr = self.blob_helper.tensor2bgr(data_dict['img'][j]) self.det_visualizer.vis_default_bboxes( ori_img_bgr, self.fr_priorbox_layer(feat_list, input_size), gt_rpn_labels[j]) json_dict = self.__get_info_tree(batch_detections[j], ori_img_bgr) image_canvas = self.det_parser.draw_bboxes( ori_img_bgr.copy(), json_dict, conf_threshold=self.configer.get('res', 'vis_conf_thre')) cv2.imwrite( os.path.join(vis_dir, '{}_{}_vis.png'.format(i, j)), image_canvas) cv2.imshow('main', image_canvas) cv2.waitKey()
class FCNSegmentorTest(object): def __init__(self, configer): self.configer = configer self.blob_helper = BlobHelper(configer) self.seg_visualizer = SegVisualizer(configer) self.seg_parser = SegParser(configer) self.seg_model_manager = SegModelManager(configer) self.seg_data_loader = SegDataLoader(configer) self.module_utilizer = ModuleUtilizer(configer) self.data_transformer = DataTransformer(configer) self.device = torch.device('cpu' if self.configer.get('gpu') is None else 'cuda') self.seg_net = None self._init_model() def _init_model(self): self.seg_net = self.seg_model_manager.semantic_segmentor() self.seg_net = self.module_utilizer.load_net(self.seg_net) self.seg_net.eval() def __test_img(self, image_path, label_path, vis_path, raw_path): Log.info('Image Path: {}'.format(image_path)) ori_image = ImageHelper.read_image(image_path, tool=self.configer.get('data', 'image_tool'), mode=self.configer.get('data', 'input_mode')) ori_width, ori_height = ImageHelper.get_size(ori_image) total_logits = np.zeros((ori_height, ori_width, self.configer.get('data', 'num_classes')), np.float32) for scale in self.configer.get('test', 'scale_search'): image = self.blob_helper.make_input(image=ori_image, input_size=self.configer.get('test', 'input_size'), scale=scale) if self.configer.get('test', 'crop_test'): crop_size = self.configer.get('test', 'crop_size') if image.size()[3] > crop_size[0] and image.size()[2] > crop_size[1]: results = self._crop_predict(image, crop_size) else: results = self._predict(image) else: results = self._predict(image) results = cv2.resize(results, (ori_width, ori_height), interpolation=cv2.INTER_LINEAR) total_logits += results if self.configer.get('test', 'mirror'): if self.configer.get('data', 'image_tool') == 'cv2': image = cv2.flip(ori_image, 1) else: image = ori_image.transpose(Image.FLIP_LEFT_RIGHT) image = self.blob_helper.make_input(image, input_size=self.configer.get('test', 'input_size'), scale=1.0) if self.configer.get('test', 'crop_test'): crop_size = self.configer.get('test', 'crop_size') if image.size()[3] > crop_size[0] and image.size()[2] > crop_size[1]: results = self._crop_predict(image, crop_size) else: results = self._predict(image) else: results = self._predict(image) results = cv2.resize(results[:, ::-1], (ori_width, ori_height), interpolation=cv2.INTER_LINEAR) total_logits += results label_map = np.argmax(total_logits, axis=-1) label_img = np.array(label_map, dtype=np.uint8) image_bgr = cv2.cvtColor(np.array(ori_image), cv2.COLOR_RGB2BGR) image_canvas = self.seg_parser.colorize(label_img, image_canvas=image_bgr) ImageHelper.save(image_canvas, save_path=vis_path) ImageHelper.save(ori_image, save_path=raw_path) if not self.configer.is_empty('data', 'label_list'): label_img = self.__relabel(label_img) label_img = Image.fromarray(label_img, 'P') Log.info('Label Path: {}'.format(label_path)) ImageHelper.save(label_img, label_path) def _crop_predict(self, image, crop_size): height, width = image.size()[2:] np_image = image.squeeze(0).permute(1, 2, 0).cpu().numpy() height_starts = self._decide_intersection(height, crop_size[1]) width_starts = self._decide_intersection(width, crop_size[0]) split_crops = [] for height in height_starts: for width in width_starts: image_crop = np_image[height:height + crop_size[1], width:width + crop_size[0]] split_crops.append(image_crop[np.newaxis, :]) split_crops = np.concatenate(split_crops, axis=0) # (n, crop_image_size, crop_image_size, 3) inputs = torch.from_numpy(split_crops).permute(0, 3, 1, 2).to(self.device) with torch.no_grad(): results = self.seg_net.forward(inputs) results = results[0].permute(0, 2, 3, 1).cpu().numpy() reassemble = np.zeros((np_image.shape[0], np_image.shape[1], results.shape[-1]), np.float32) index = 0 for height in height_starts: for width in width_starts: reassemble[height:height+crop_size[1], width:width+crop_size[0]] += results[index] index += 1 return reassemble def _decide_intersection(self, total_length, crop_length): stride = int(crop_length * self.configer.get('test', 'crop_stride_ratio')) # set the stride as the paper do times = (total_length - crop_length) // stride + 1 cropped_starting = [] for i in range(times): cropped_starting.append(stride*i) if total_length - cropped_starting[-1] > crop_length: cropped_starting.append(total_length - crop_length) # must cover the total image return cropped_starting def _predict(self, inputs): with torch.no_grad(): results = self.seg_net.forward(inputs) results = results[0].squeeze().permute(1, 2, 0).cpu().numpy() return results def __relabel(self, label_map): height, width = label_map.shape label_dst = np.zeros((height, width), dtype=np.uint8) for i in range(self.configer.get('data', 'num_classes')): label_dst[label_map == i] = self.configer.get('data', 'label_list')[i] label_dst = np.array(label_dst, dtype=np.uint8) return label_dst def test(self): base_dir = os.path.join(self.configer.get('project_dir'), 'val/results/seg', self.configer.get('dataset')) test_img = self.configer.get('test_img') test_dir = self.configer.get('test_dir') if test_img is None and test_dir is None: Log.error('test_img & test_dir not exists.') exit(1) if test_img is not None and test_dir is not None: Log.error('Either test_img or test_dir.') exit(1) if test_img is not None: base_dir = os.path.join(base_dir, 'test_img') filename = test_img.rstrip().split('/')[-1] label_path = os.path.join(base_dir, 'label', '{}.png'.format('.'.join(filename.split('.')[:-1]))) raw_path = os.path.join(base_dir, 'raw', filename) vis_path = os.path.join(base_dir, 'vis', '{}_vis.png'.format('.'.join(filename.split('.')[:-1]))) FileHelper.make_dirs(label_path, is_file=True) FileHelper.make_dirs(raw_path, is_file=True) FileHelper.make_dirs(vis_path, is_file=True) self.__test_img(test_img, label_path, vis_path, raw_path) else: base_dir = os.path.join(base_dir, 'test_dir', test_dir.rstrip('/').split('/')[-1]) FileHelper.make_dirs(base_dir) for filename in FileHelper.list_dir(test_dir): image_path = os.path.join(test_dir, filename) label_path = os.path.join(base_dir, 'label', '{}.png'.format('.'.join(filename.split('.')[:-1]))) raw_path = os.path.join(base_dir, 'raw', filename) vis_path = os.path.join(base_dir, 'vis', '{}_vis.png'.format('.'.join(filename.split('.')[:-1]))) FileHelper.make_dirs(label_path, is_file=True) FileHelper.make_dirs(raw_path, is_file=True) FileHelper.make_dirs(vis_path, is_file=True) self.__test_img(image_path, label_path, vis_path, raw_path) def debug(self): base_dir = os.path.join(self.configer.get('project_dir'), 'vis/results/seg', self.configer.get('dataset'), 'debug') if not os.path.exists(base_dir): os.makedirs(base_dir) count = 0 for i, data_dict in enumerate(self.seg_data_loader.get_trainloader()): inputs = data_dict['img'] targets = data_dict['labelmap'] for j in range(inputs.size(0)): count = count + 1 if count > 20: exit(1) image_bgr = self.blob_helper.tensor2bgr(inputs[j]) label_map = targets[j].numpy() image_canvas = self.seg_parser.colorize(label_map, image_canvas=image_bgr) cv2.imwrite(os.path.join(base_dir, '{}_{}_vis.png'.format(i, j)), image_canvas) cv2.imshow('main', image_canvas) cv2.waitKey()
class SingleShotDetectorTest(object): def __init__(self, configer): self.configer = configer self.blob_helper = BlobHelper(configer) self.det_visualizer = DetVisualizer(configer) self.det_parser = DetParser(configer) self.det_model_manager = DetModelManager(configer) self.det_data_loader = DataLoader(configer) self.ssd_priorbox_layer = SSDPriorBoxLayer(configer) self.ssd_target_generator = SSDTargetGenerator(configer) self.device = torch.device( 'cpu' if self.configer.get('gpu') is None else 'cuda') self.det_net = None self._init_model() def _init_model(self): self.det_net = self.det_model_manager.object_detector() self.det_net = RunnerHelper.load_net(self, self.det_net) self.det_net.eval() def __test_img(self, image_path, json_path, raw_path, vis_path): Log.info('Image Path: {}'.format(image_path)) img = ImageHelper.read_image( image_path, tool=self.configer.get('data', 'image_tool'), mode=self.configer.get('data', 'input_mode')) ori_img_bgr = ImageHelper.get_cv2_bgr(img, mode=self.configer.get( 'data', 'input_mode')) inputs = self.blob_helper.make_input(img, input_size=self.configer.get( 'test', 'input_size'), scale=1.0) with torch.no_grad(): feat_list, bbox, cls = self.det_net(inputs) batch_detections = self.decode( bbox, cls, self.ssd_priorbox_layer(feat_list, self.configer.get('test', 'input_size')), self.configer, [inputs.size(3), inputs.size(2)]) json_dict = self.__get_info_tree( batch_detections[0], ori_img_bgr, [inputs.size(3), inputs.size(2)]) image_canvas = self.det_parser.draw_bboxes( ori_img_bgr.copy(), json_dict, conf_threshold=self.configer.get('res', 'vis_conf_thre')) cv2.imwrite(vis_path, image_canvas) cv2.imwrite(raw_path, ori_img_bgr) Log.info('Json Path: {}'.format(json_path)) JsonHelper.save_file(json_dict, json_path) return json_dict @staticmethod def decode(bbox, conf, default_boxes, configer, input_size): loc = bbox if configer.get('phase') != 'debug': conf = F.softmax(conf, dim=-1) default_boxes = default_boxes.unsqueeze(0).repeat(loc.size(0), 1, 1).to(bbox.device) variances = [0.1, 0.2] wh = torch.exp(loc[:, :, 2:] * variances[1]) * default_boxes[:, :, 2:] cxcy = loc[:, :, :2] * variances[ 0] * default_boxes[:, :, 2:] + default_boxes[:, :, :2] boxes = torch.cat([cxcy - wh / 2, cxcy + wh / 2], 2) # [b, 8732,4] batch_size, num_priors, _ = boxes.size() boxes = boxes.unsqueeze(2).repeat(1, 1, configer.get('data', 'num_classes'), 1) boxes = boxes.contiguous().view(boxes.size(0), -1, 4) # clip bounding box boxes[:, :, 0::2] = boxes[:, :, 0::2].clamp(min=0, max=input_size[0] - 1) boxes[:, :, 1::2] = boxes[:, :, 1::2].clamp(min=0, max=input_size[1] - 1) labels = torch.Tensor([ i for i in range(configer.get('data', 'num_classes')) ]).to(boxes.device) labels = labels.view(1, 1, -1, 1).repeat(batch_size, num_priors, 1, 1).contiguous().view(batch_size, -1, 1) max_conf = conf.contiguous().view(batch_size, -1, 1) # max_conf, labels = conf.max(2, keepdim=True) # [b, 8732,1] predictions = torch.cat((boxes, max_conf.float(), labels.float()), 2) output = [None for _ in range(len(predictions))] for image_i, image_pred in enumerate(predictions): ids = labels[image_i].squeeze(1).nonzero().contiguous().view(-1, ) if ids.numel() == 0: continue valid_preds = image_pred[ids] _, order = valid_preds[:, 4].sort(0, descending=True) order = order[:configer.get('nms', 'pre_nms')] valid_preds = valid_preds[order] valid_preds = valid_preds[ valid_preds[:, 4] > configer.get('res', 'val_conf_thre')] if valid_preds.numel() == 0: continue valid_preds = DetHelper.cls_nms( valid_preds[:, :6], labels=valid_preds[:, 5], max_threshold=configer.get('nms', 'max_threshold'), cls_keep_num=configer.get('res', 'cls_keep_num')) _, order = valid_preds[:, 4].sort(0, descending=True) order = order[:configer.get('res', 'max_per_image')] output[image_i] = valid_preds[order] return output def __get_info_tree(self, detections, image_raw, input_size): height, width, _ = image_raw.shape in_width, in_height = input_size json_dict = dict() object_list = list() if detections is not None: for x1, y1, x2, y2, conf, cls_pred in detections: object_dict = dict() xmin = x1.cpu().item() / in_width * width ymin = y1.cpu().item() / in_height * height xmax = x2.cpu().item() / in_width * width ymax = y2.cpu().item() / in_height * height object_dict['bbox'] = [xmin, ymin, xmax, ymax] object_dict['label'] = int(cls_pred.cpu().item()) - 1 object_dict['score'] = float('%.2f' % conf.cpu().item()) object_list.append(object_dict) json_dict['objects'] = object_list return json_dict def debug(self, vis_dir): count = 0 for i, data_dict in enumerate(self.det_data_loader.get_trainloader()): inputs = data_dict['img'] batch_gt_bboxes = data_dict['bboxes'] batch_gt_labels = data_dict['labels'] input_size = [inputs.size(3), inputs.size(2)] feat_list = list() for stride in self.configer.get('network', 'stride_list'): feat_list.append( torch.zeros((inputs.size(0), 1, input_size[1] // stride, input_size[0] // stride))) bboxes, labels = self.ssd_target_generator(feat_list, batch_gt_bboxes, batch_gt_labels, input_size) eye_matrix = torch.eye(self.configer.get('data', 'num_classes')) labels_target = eye_matrix[labels.view(-1)].view( inputs.size(0), -1, self.configer.get('data', 'num_classes')) batch_detections = self.decode( bboxes, labels_target, self.ssd_priorbox_layer(feat_list, input_size), self.configer, input_size) for j in range(inputs.size(0)): count = count + 1 if count > 20: exit(1) ori_img_bgr = self.blob_helper.tensor2bgr(inputs[j]) self.det_visualizer.vis_default_bboxes( ori_img_bgr, self.ssd_priorbox_layer(feat_list, input_size), labels[j]) json_dict = self.__get_info_tree(batch_detections[j], ori_img_bgr, input_size) image_canvas = self.det_parser.draw_bboxes( ori_img_bgr.copy(), json_dict, conf_threshold=self.configer.get('res', 'vis_conf_thre')) cv2.imwrite( os.path.join(vis_dir, '{}_{}_vis.png'.format(i, j)), image_canvas) cv2.imshow('main', image_canvas) cv2.waitKey()
class FCClassifierTest(object): def __init__(self, configer): self.configer = configer self.blob_helper = BlobHelper(configer) self.cls_model_manager = ClsModelManager(configer) self.cls_data_loader = ClsDataLoader(configer) self.module_utilizer = ModuleUtilizer(configer) self.cls_parser = ClsParser(configer) self.device = torch.device( 'cpu' if self.configer.get('gpu') is None else 'cuda') self.cls_net = None if self.configer.get('dataset') == 'imagenet': with open( os.path.join( self.configer.get('project_dir'), 'datasets/cls/imagenet/imagenet_class_index.json') ) as json_stream: name_dict = json.load(json_stream) name_seq = [ name_dict[str(i)][1] for i in range(self.configer.get('data', 'num_classes')) ] self.configer.add_key_value(['details', 'name_seq'], name_seq) self._init_model() def _init_model(self): self.cls_net = self.cls_model_manager.image_classifier() self.cls_net = self.module_utilizer.load_net(self.cls_net) self.cls_net.eval() def __test_img(self, image_path, json_path, raw_path, vis_path): Log.info('Image Path: {}'.format(image_path)) img = ImageHelper.read_image( image_path, tool=self.configer.get('data', 'image_tool'), mode=self.configer.get('data', 'input_mode')) trans = None if self.configer.get('dataset') == 'imagenet': if self.configer.get('data', 'image_tool') == 'cv2': img = Image.fromarray(img) trans = transforms.Compose([ transforms.Scale(256), transforms.CenterCrop(224), ]) assert trans is not None img = trans(img) ori_img_bgr = ImageHelper.get_cv2_bgr(img, mode=self.configer.get( 'data', 'input_mode')) inputs = self.blob_helper.make_input(img, input_size=self.configer.get( 'test', 'input_size'), scale=1.0) with torch.no_grad(): outputs = self.cls_net(inputs) json_dict = self.__get_info_tree(outputs, image_path) image_canvas = self.cls_parser.draw_label(ori_img_bgr.copy(), json_dict['label']) cv2.imwrite(vis_path, image_canvas) cv2.imwrite(raw_path, ori_img_bgr) Log.info('Json Path: {}'.format(json_path)) JsonHelper.save_file(json_dict, json_path) return json_dict def __get_info_tree(self, outputs, image_path=None): json_dict = dict() if image_path is not None: json_dict['image_path'] = image_path topk = (1, 3, 5) maxk = max(topk) _, pred = outputs.topk(maxk, 1, True, True) pred = pred.t() for k in topk: if k == 1: json_dict['label'] = pred[0][0] else: json_dict['label_top{}'.format(k)] = pred[0][:k] return json_dict def test(self): base_dir = os.path.join(self.configer.get('project_dir'), 'val/results/cls', self.configer.get('dataset')) test_img = self.configer.get('test_img') test_dir = self.configer.get('test_dir') if test_img is None and test_dir is None: Log.error('test_img & test_dir not exists.') exit(1) if test_img is not None and test_dir is not None: Log.error('Either test_img or test_dir.') exit(1) if test_img is not None: base_dir = os.path.join(base_dir, 'test_img') filename = test_img.rstrip().split('/')[-1] json_path = os.path.join( base_dir, 'json', '{}.json'.format('.'.join(filename.split('.')[:-1]))) raw_path = os.path.join(base_dir, 'raw', filename) vis_path = os.path.join( base_dir, 'vis', '{}_vis.png'.format('.'.join(filename.split('.')[:-1]))) FileHelper.make_dirs(json_path, is_file=True) FileHelper.make_dirs(raw_path, is_file=True) FileHelper.make_dirs(vis_path, is_file=True) self.__test_img(test_img, json_path, raw_path, vis_path) else: base_dir = os.path.join(base_dir, 'test_dir', test_dir.rstrip('/').split('/')[-1]) FileHelper.make_dirs(base_dir) for filename in FileHelper.list_dir(test_dir): image_path = os.path.join(test_dir, filename) json_path = os.path.join( base_dir, 'json', '{}.json'.format('.'.join(filename.split('.')[:-1]))) raw_path = os.path.join(base_dir, 'raw', filename) vis_path = os.path.join( base_dir, 'vis', '{}_vis.png'.format('.'.join(filename.split('.')[:-1]))) FileHelper.make_dirs(json_path, is_file=True) FileHelper.make_dirs(raw_path, is_file=True) FileHelper.make_dirs(vis_path, is_file=True) self.__test_img(image_path, json_path, raw_path, vis_path) def debug(self): base_dir = os.path.join(self.configer.get('project_dir'), 'vis/results/cls', self.configer.get('dataset'), 'debug') if not os.path.exists(base_dir): os.makedirs(base_dir) count = 0 for i, data_dict in enumerate(self.cls_data_loader.get_trainloader()): inputs = data_dict['img'] labels = data_dict['label'] eye_matrix = torch.eye(self.configer.get('data', 'num_classes')) labels_target = eye_matrix[labels.view(-1)].view( inputs.size(0), self.configer.get('data', 'num_classes')) for j in range(inputs.size(0)): count = count + 1 if count > 20: exit(1) ori_img_bgr = self.blob_helper.tensor2bgr(inputs[j]) json_dict = self.__get_info_tree(labels_target) image_canvas = self.cls_parser.draw_label( ori_img_bgr.copy(), json_dict['label']) cv2.imwrite( os.path.join(base_dir, '{}_{}_vis.png'.format(i, j)), image_canvas) cv2.imshow('main', image_canvas) cv2.waitKey()