def __init__(self, configer): self.configer = configer self.det_visualizer = DetVisualizer(configer) self.det_parser = DetParser(configer) self.det_model_manager = DetModelManager(configer) self.det_data_loader = DetDataLoader(configer) self.module_utilizer = ModuleUtilizer(configer) self.default_boxes = PriorBoxLayer(configer)() self.device = torch.device('cpu' if self.configer.get('gpu') is None else 'cuda') self.det_net = None
def __init__(self, configer): self.configer = configer self.blob_helper = BlobHelper(configer) self.det_visualizer = DetVisualizer(configer) self.det_parser = DetParser(configer) self.det_model_manager = DetModelManager(configer) self.det_data_loader = DataLoader(configer) self.ssd_priorbox_layer = SSDPriorBoxLayer(configer) self.ssd_target_generator = SSDTargetGenerator(configer) self.device = torch.device( 'cpu' if self.configer.get('gpu') is None else 'cuda') self.det_net = None self._init_model()
def __init__(self, configer): self.configer = configer self.blob_helper = BlobHelper(configer) self.det_visualizer = DetVisualizer(configer) self.det_parser = DetParser(configer) self.det_model_manager = DetModelManager(configer) self.det_data_loader = DataLoader(configer) self.yolo_target_generator = YOLOTargetGenerator(configer) self.yolo_detection_layer = YOLODetectionLayer(configer) self.device = torch.device( 'cpu' if self.configer.get('gpu') is None else 'cuda') self.det_net = None self._init_model()
def __init__(self, configer): self.configer = configer self.blob_helper = BlobHelper(configer) self.det_visualizer = DetVisualizer(configer) self.det_parser = DetParser(configer) self.det_model_manager = DetModelManager(configer) self.det_data_loader = DataLoader(configer) self.roi_sampler = FRROISampler(configer) self.rpn_target_generator = RPNTargetAssigner(configer) self.fr_priorbox_layer = FRPriorBoxLayer(configer) self.fr_roi_generator = FRROIGenerator(configer) self.device = torch.device( 'cpu' if self.configer.get('gpu') is None else 'cuda') self.det_net = None self._init_model()
class SingleShotDetectorTest(object): def __init__(self, configer): self.configer = configer self.blob_helper = BlobHelper(configer) self.det_visualizer = DetVisualizer(configer) self.det_parser = DetParser(configer) self.det_model_manager = DetModelManager(configer) self.det_data_loader = DataLoader(configer) self.ssd_priorbox_layer = SSDPriorBoxLayer(configer) self.ssd_target_generator = SSDTargetGenerator(configer) self.device = torch.device( 'cpu' if self.configer.get('gpu') is None else 'cuda') self.det_net = None self._init_model() def _init_model(self): self.det_net = self.det_model_manager.object_detector() self.det_net = RunnerHelper.load_net(self, self.det_net) self.det_net.eval() def __test_img(self, image_path, json_path, raw_path, vis_path): Log.info('Image Path: {}'.format(image_path)) img = ImageHelper.read_image( image_path, tool=self.configer.get('data', 'image_tool'), mode=self.configer.get('data', 'input_mode')) ori_img_bgr = ImageHelper.get_cv2_bgr(img, mode=self.configer.get( 'data', 'input_mode')) inputs = self.blob_helper.make_input(img, input_size=self.configer.get( 'test', 'input_size'), scale=1.0) with torch.no_grad(): feat_list, bbox, cls = self.det_net(inputs) batch_detections = self.decode( bbox, cls, self.ssd_priorbox_layer(feat_list, self.configer.get('test', 'input_size')), self.configer, [inputs.size(3), inputs.size(2)]) json_dict = self.__get_info_tree( batch_detections[0], ori_img_bgr, [inputs.size(3), inputs.size(2)]) image_canvas = self.det_parser.draw_bboxes( ori_img_bgr.copy(), json_dict, conf_threshold=self.configer.get('res', 'vis_conf_thre')) cv2.imwrite(vis_path, image_canvas) cv2.imwrite(raw_path, ori_img_bgr) Log.info('Json Path: {}'.format(json_path)) JsonHelper.save_file(json_dict, json_path) return json_dict @staticmethod def decode(bbox, conf, default_boxes, configer, input_size): loc = bbox if configer.get('phase') != 'debug': conf = F.softmax(conf, dim=-1) default_boxes = default_boxes.unsqueeze(0).repeat(loc.size(0), 1, 1).to(bbox.device) variances = [0.1, 0.2] wh = torch.exp(loc[:, :, 2:] * variances[1]) * default_boxes[:, :, 2:] cxcy = loc[:, :, :2] * variances[ 0] * default_boxes[:, :, 2:] + default_boxes[:, :, :2] boxes = torch.cat([cxcy - wh / 2, cxcy + wh / 2], 2) # [b, 8732,4] batch_size, num_priors, _ = boxes.size() boxes = boxes.unsqueeze(2).repeat(1, 1, configer.get('data', 'num_classes'), 1) boxes = boxes.contiguous().view(boxes.size(0), -1, 4) # clip bounding box boxes[:, :, 0::2] = boxes[:, :, 0::2].clamp(min=0, max=input_size[0] - 1) boxes[:, :, 1::2] = boxes[:, :, 1::2].clamp(min=0, max=input_size[1] - 1) labels = torch.Tensor([ i for i in range(configer.get('data', 'num_classes')) ]).to(boxes.device) labels = labels.view(1, 1, -1, 1).repeat(batch_size, num_priors, 1, 1).contiguous().view(batch_size, -1, 1) max_conf = conf.contiguous().view(batch_size, -1, 1) # max_conf, labels = conf.max(2, keepdim=True) # [b, 8732,1] predictions = torch.cat((boxes, max_conf.float(), labels.float()), 2) output = [None for _ in range(len(predictions))] for image_i, image_pred in enumerate(predictions): ids = labels[image_i].squeeze(1).nonzero().contiguous().view(-1, ) if ids.numel() == 0: continue valid_preds = image_pred[ids] _, order = valid_preds[:, 4].sort(0, descending=True) order = order[:configer.get('nms', 'pre_nms')] valid_preds = valid_preds[order] valid_preds = valid_preds[ valid_preds[:, 4] > configer.get('res', 'val_conf_thre')] if valid_preds.numel() == 0: continue valid_preds = DetHelper.cls_nms( valid_preds[:, :6], labels=valid_preds[:, 5], max_threshold=configer.get('nms', 'max_threshold'), cls_keep_num=configer.get('res', 'cls_keep_num')) _, order = valid_preds[:, 4].sort(0, descending=True) order = order[:configer.get('res', 'max_per_image')] output[image_i] = valid_preds[order] return output def __get_info_tree(self, detections, image_raw, input_size): height, width, _ = image_raw.shape in_width, in_height = input_size json_dict = dict() object_list = list() if detections is not None: for x1, y1, x2, y2, conf, cls_pred in detections: object_dict = dict() xmin = x1.cpu().item() / in_width * width ymin = y1.cpu().item() / in_height * height xmax = x2.cpu().item() / in_width * width ymax = y2.cpu().item() / in_height * height object_dict['bbox'] = [xmin, ymin, xmax, ymax] object_dict['label'] = int(cls_pred.cpu().item()) - 1 object_dict['score'] = float('%.2f' % conf.cpu().item()) object_list.append(object_dict) json_dict['objects'] = object_list return json_dict def debug(self, vis_dir): count = 0 for i, data_dict in enumerate(self.det_data_loader.get_trainloader()): inputs = data_dict['img'] batch_gt_bboxes = data_dict['bboxes'] batch_gt_labels = data_dict['labels'] input_size = [inputs.size(3), inputs.size(2)] feat_list = list() for stride in self.configer.get('network', 'stride_list'): feat_list.append( torch.zeros((inputs.size(0), 1, input_size[1] // stride, input_size[0] // stride))) bboxes, labels = self.ssd_target_generator(feat_list, batch_gt_bboxes, batch_gt_labels, input_size) eye_matrix = torch.eye(self.configer.get('data', 'num_classes')) labels_target = eye_matrix[labels.view(-1)].view( inputs.size(0), -1, self.configer.get('data', 'num_classes')) batch_detections = self.decode( bboxes, labels_target, self.ssd_priorbox_layer(feat_list, input_size), self.configer, input_size) for j in range(inputs.size(0)): count = count + 1 if count > 20: exit(1) ori_img_bgr = self.blob_helper.tensor2bgr(inputs[j]) self.det_visualizer.vis_default_bboxes( ori_img_bgr, self.ssd_priorbox_layer(feat_list, input_size), labels[j]) json_dict = self.__get_info_tree(batch_detections[j], ori_img_bgr, input_size) image_canvas = self.det_parser.draw_bboxes( ori_img_bgr.copy(), json_dict, conf_threshold=self.configer.get('res', 'vis_conf_thre')) cv2.imwrite( os.path.join(vis_dir, '{}_{}_vis.png'.format(i, j)), image_canvas) cv2.imshow('main', image_canvas) cv2.waitKey()
class FastRCNNTest(object): def __init__(self, configer): self.configer = configer self.blob_helper = BlobHelper(configer) self.det_visualizer = DetVisualizer(configer) self.det_parser = DetParser(configer) self.det_model_manager = DetModelManager(configer) self.det_data_loader = DetDataLoader(configer) self.roi_sampler = FRRoiSampleLayer(configer) self.module_utilizer = ModuleUtilizer(configer) self.rpn_target_generator = RPNTargetGenerator(configer) self.fr_priorbox_layer = FRPriorBoxLayer(configer) self.fr_roi_generator = FRRoiGenerator(configer) self.data_transformer = DataTransformer(configer) self.device = torch.device( 'cpu' if self.configer.get('gpu') is None else 'cuda') self.det_net = None self._init_model() def _init_model(self): self.det_net = self.det_model_manager.object_detector() self.det_net = self.module_utilizer.load_net(self.det_net) self.det_net.eval() def __test_img(self, image_path, json_path, raw_path, vis_path): Log.info('Image Path: {}'.format(image_path)) img = ImageHelper.read_image( image_path, tool=self.configer.get('data', 'image_tool'), mode=self.configer.get('data', 'input_mode')) ori_img_bgr = ImageHelper.get_cv2_bgr(img, mode=self.configer.get( 'data', 'input_mode')) img, scale = BoundResize()(img) inputs = self.blob_helper.make_input(img, scale=1.0) with torch.no_grad(): # Forward pass. test_group = self.det_net(inputs, scale) test_indices_and_rois, test_roi_locs, test_roi_scores, test_rois_num = test_group batch_detections = self.decode(test_roi_locs, test_roi_scores, test_indices_and_rois, test_rois_num, self.configer, ImageHelper.get_size(img)) json_dict = self.__get_info_tree(batch_detections[0], ori_img_bgr, scale=scale) image_canvas = self.det_parser.draw_bboxes( ori_img_bgr.copy(), json_dict, conf_threshold=self.configer.get('vis', 'conf_threshold')) cv2.imwrite(vis_path, image_canvas) cv2.imwrite(raw_path, ori_img_bgr) Log.info('Json Path: {}'.format(json_path)) JsonHelper.save_file(json_dict, json_path) return json_dict @staticmethod def decode(roi_locs, roi_scores, indices_and_rois, test_rois_num, configer, input_size): roi_locs = roi_locs.cpu() roi_scores = roi_scores.cpu() indices_and_rois = indices_and_rois.cpu() num_classes = configer.get('data', 'num_classes') mean = torch.Tensor(configer.get( 'roi', 'loc_normalize_mean')).repeat(num_classes)[None] std = torch.Tensor(configer.get( 'roi', 'loc_normalize_std')).repeat(num_classes)[None] mean = mean.to(roi_locs.device) std = std.to(roi_locs.device) roi_locs = (roi_locs * std + mean) roi_locs = roi_locs.contiguous().view(-1, num_classes, 4) # roi_locs = roi_locs[:,:, [1, 0, 3, 2]] rois = indices_and_rois[:, 1:] rois = rois.contiguous().view(-1, 1, 4).expand_as(roi_locs) wh = torch.exp(roi_locs[:, :, 2:]) * (rois[:, :, 2:] - rois[:, :, :2]) cxcy = roi_locs[:, :, :2] * (rois[:, :, 2:] - rois[:, :, :2]) + ( rois[:, :, :2] + rois[:, :, 2:]) / 2 dst_bbox = torch.cat([cxcy - wh / 2, cxcy + wh / 2], 2) # [b, 8732,4] # clip bounding box dst_bbox[:, :, 0::2] = (dst_bbox[:, :, 0::2]).clamp(min=0, max=input_size[0] - 1) dst_bbox[:, :, 1::2] = (dst_bbox[:, :, 1::2]).clamp(min=0, max=input_size[1] - 1) if configer.get('phase') != 'debug': cls_prob = F.softmax(roi_scores, dim=1) else: cls_prob = roi_scores cls_label = torch.LongTensor([i for i in range(num_classes)])\ .contiguous().view(1, num_classes).repeat(indices_and_rois.size(0), 1) output = [None for _ in range(test_rois_num.size(0))] start_index = 0 for i in range(test_rois_num.size(0)): # batch_index = (indices_and_rois[:, 0] == i).nonzero().contiguous().view(-1,) # tmp_dst_bbox = dst_bbox[batch_index] # tmp_cls_prob = cls_prob[batch_index] # tmp_cls_label = cls_label[batch_index] tmp_dst_bbox = dst_bbox[start_index:start_index + test_rois_num[i]] tmp_cls_prob = cls_prob[start_index:start_index + test_rois_num[i]] tmp_cls_label = cls_label[start_index:start_index + test_rois_num[i]] start_index += test_rois_num[i] mask = (tmp_cls_prob > configer.get( 'vis', 'conf_threshold')) & (tmp_cls_label > 0) tmp_dst_bbox = tmp_dst_bbox[mask].contiguous().view(-1, 4) if tmp_dst_bbox.numel() == 0: continue tmp_cls_prob = tmp_cls_prob[mask].contiguous().view( -1, ).unsqueeze(1) tmp_cls_label = tmp_cls_label[mask].contiguous().view( -1, ).unsqueeze(1) valid_preds = torch.cat( (tmp_dst_bbox, tmp_cls_prob.float(), tmp_cls_label.float()), 1) keep = DetHelper.cls_nms(valid_preds[:, :4], scores=valid_preds[:, 4], labels=valid_preds[:, 5], nms_threshold=configer.get( 'nms', 'overlap_threshold'), iou_mode=configer.get('nms', 'mode')) output[i] = valid_preds[keep] return output def __make_tensor(self, gt_bboxes, gt_labels): len_arr = [gt_labels[i].numel() for i in range(len(gt_bboxes))] batch_maxlen = max(max(len_arr), 1) target_bboxes = torch.zeros((len(gt_bboxes), batch_maxlen, 4)).float() target_labels = torch.zeros((len(gt_bboxes), batch_maxlen)).long() for i in range(len(gt_bboxes)): target_bboxes[i, :len_arr[i], :] = gt_bboxes[i] target_labels[i, :len_arr[i]] = gt_labels[i] target_bboxes_num = torch.Tensor(len_arr).long() return target_bboxes, target_bboxes_num, target_labels def __get_info_tree(self, detections, image_raw, scale=1.0): height, width, _ = image_raw.shape json_dict = dict() object_list = list() if detections is not None: for x1, y1, x2, y2, conf, cls_pred in detections: object_dict = dict() xmin = min(x1.cpu().item() / scale, width - 1) ymin = min(y1.cpu().item() / scale, height - 1) xmax = min(x2.cpu().item() / scale, width - 1) ymax = min(y2.cpu().item() / scale, height - 1) object_dict['bbox'] = [xmin, ymin, xmax, ymax] object_dict['label'] = int(cls_pred.cpu().item()) - 1 object_dict['score'] = float('%.2f' % conf.cpu().item()) object_list.append(object_dict) json_dict['objects'] = object_list return json_dict def test(self): base_dir = os.path.join(self.configer.get('project_dir'), 'val/results/det', self.configer.get('dataset')) test_img = self.configer.get('test_img') test_dir = self.configer.get('test_dir') if test_img is None and test_dir is None: Log.error('test_img & test_dir not exists.') exit(1) if test_img is not None and test_dir is not None: Log.error('Either test_img or test_dir.') exit(1) if test_img is not None: base_dir = os.path.join(base_dir, 'test_img') filename = test_img.rstrip().split('/')[-1] json_path = os.path.join( base_dir, 'json', '{}.json'.format('.'.join(filename.split('.')[:-1]))) raw_path = os.path.join(base_dir, 'raw', filename) vis_path = os.path.join( base_dir, 'vis', '{}_vis.png'.format('.'.join(filename.split('.')[:-1]))) FileHelper.make_dirs(json_path, is_file=True) FileHelper.make_dirs(raw_path, is_file=True) FileHelper.make_dirs(vis_path, is_file=True) self.__test_img(test_img, json_path, raw_path, vis_path) else: base_dir = os.path.join(base_dir, 'test_dir', test_dir.rstrip('/').split('/')[-1]) FileHelper.make_dirs(base_dir) for filename in FileHelper.list_dir(test_dir): image_path = os.path.join(test_dir, filename) json_path = os.path.join( base_dir, 'json', '{}.json'.format('.'.join(filename.split('.')[:-1]))) raw_path = os.path.join(base_dir, 'raw', filename) vis_path = os.path.join( base_dir, 'vis', '{}_vis.png'.format('.'.join(filename.split('.')[:-1]))) FileHelper.make_dirs(json_path, is_file=True) FileHelper.make_dirs(raw_path, is_file=True) FileHelper.make_dirs(vis_path, is_file=True) self.__test_img(image_path, json_path, raw_path, vis_path) def debug(self): base_dir = os.path.join(self.configer.get('project_dir'), 'vis/results/det', self.configer.get('dataset'), 'debug') if not os.path.exists(base_dir): os.makedirs(base_dir) count = 0 for i, data_dict in enumerate(self.det_data_loader.get_trainloader()): img_scale = data_dict['imgscale'] inputs = data_dict['img'] batch_gt_bboxes = data_dict['bboxes'] # batch_gt_bboxes = ResizeBoxes()(inputs, data_dict['bboxes']) batch_gt_labels = data_dict['labels'] input_size = [inputs.size(3), inputs.size(2)] feat_list = list() for stride in self.configer.get('rpn', 'stride_list'): feat_list.append( torch.zeros((inputs.size(0), 1, input_size[1] // stride, input_size[0] // stride))) gt_rpn_locs, gt_rpn_labels = self.rpn_target_generator( feat_list, batch_gt_bboxes, input_size) eye_matrix = torch.eye(2) gt_rpn_labels[gt_rpn_labels == -1] = 0 gt_rpn_scores = eye_matrix[gt_rpn_labels.view(-1)].view( inputs.size(0), -1, 2) test_indices_and_rois, _ = self.fr_roi_generator( feat_list, gt_rpn_locs, gt_rpn_scores, self.configer.get('rpn', 'n_test_pre_nms'), self.configer.get('rpn', 'n_test_post_nms'), input_size, img_scale) gt_bboxes, gt_nums, gt_labels = self.__make_tensor( batch_gt_bboxes, batch_gt_labels) sample_rois, gt_roi_locs, gt_roi_labels = self.roi_sampler( test_indices_and_rois, gt_bboxes, gt_nums, gt_labels, input_size) self.det_visualizer.vis_rois(inputs, sample_rois[gt_roi_labels > 0]) gt_cls_roi_locs = torch.zeros( (gt_roi_locs.size(0), self.configer.get('data', 'num_classes'), 4)) gt_cls_roi_locs[torch.arange(0, sample_rois.size(0)).long(), gt_roi_labels.long()] = gt_roi_locs gt_cls_roi_locs = gt_cls_roi_locs.contiguous().view( -1, 4 * self.configer.get('data', 'num_classes')) eye_matrix = torch.eye(self.configer.get('data', 'num_classes')) gt_roi_scores = eye_matrix[gt_roi_labels.view(-1)].view( gt_roi_labels.size(0), self.configer.get('data', 'num_classes')) test_rois_num = torch.zeros((len(gt_bboxes), )).long() for batch_id in range(len(gt_bboxes)): batch_index = ( sample_rois[:, 0] == batch_id).nonzero().contiguous().view( -1, ) test_rois_num[batch_id] = batch_index.numel() batch_detections = FastRCNNTest.decode(gt_cls_roi_locs, gt_roi_scores, sample_rois, test_rois_num, self.configer, input_size) for j in range(inputs.size(0)): count = count + 1 if count > 20: exit(1) ori_img_bgr = self.blob_helper.tensor2bgr(inputs[j]) self.det_visualizer.vis_default_bboxes( ori_img_bgr, self.fr_priorbox_layer(feat_list, input_size), gt_rpn_labels[j]) json_dict = self.__get_info_tree(batch_detections[j], ori_img_bgr) image_canvas = self.det_parser.draw_bboxes( ori_img_bgr.copy(), json_dict, conf_threshold=self.configer.get('vis', 'conf_threshold')) cv2.imwrite( os.path.join(base_dir, '{}_{}_vis.png'.format(i, j)), image_canvas) cv2.imshow('main', image_canvas) cv2.waitKey()
class FastRCNNTest(object): def __init__(self, configer): self.configer = configer self.blob_helper = BlobHelper(configer) self.det_visualizer = DetVisualizer(configer) self.det_parser = DetParser(configer) self.det_model_manager = DetModelManager(configer) self.det_data_loader = DataLoader(configer) self.roi_sampler = FRROISampler(configer) self.rpn_target_generator = RPNTargetAssigner(configer) self.fr_priorbox_layer = FRPriorBoxLayer(configer) self.fr_roi_generator = FRROIGenerator(configer) self.device = torch.device( 'cpu' if self.configer.get('gpu') is None else 'cuda') self.det_net = None self._init_model() def _init_model(self): self.det_net = self.det_model_manager.object_detector() self.det_net = RunnerHelper.load_net(self, self.det_net) self.det_net.eval() def __test_img(self, image_path, json_path, raw_path, vis_path): Log.info('Image Path: {}'.format(image_path)) image = ImageHelper.read_image( image_path, tool=self.configer.get('data', 'image_tool'), mode=self.configer.get('data', 'input_mode')) ori_img_bgr = ImageHelper.get_cv2_bgr(image, mode=self.configer.get( 'data', 'input_mode')) width, height = ImageHelper.get_size(image) scale1 = self.configer.get('test', 'resize_bound')[0] / min( width, height) scale2 = self.configer.get('test', 'resize_bound')[1] / max( width, height) scale = min(scale1, scale2) inputs = self.blob_helper.make_input(image, scale=scale) b, c, h, w = inputs.size() border_wh = [w, h] if self.configer.exists('test', 'fit_stride'): stride = self.configer.get('test', 'fit_stride') pad_w = 0 if (w % stride == 0) else stride - (w % stride) # right pad_h = 0 if (h % stride == 0) else stride - (h % stride) # down expand_image = torch.zeros( (b, c, h + pad_h, w + pad_w)).to(inputs.device) expand_image[:, :, 0:h, 0:w] = inputs inputs = expand_image data_dict = dict( img=inputs, meta=DataContainer([[ dict(ori_img_size=ImageHelper.get_size(ori_img_bgr), aug_img_size=border_wh, img_scale=scale, input_size=[inputs.size(3), inputs.size(2)]) ]], cpu_only=True)) with torch.no_grad(): # Forward pass. test_group = self.det_net(data_dict) test_indices_and_rois, test_roi_locs, test_roi_scores, test_rois_num = test_group batch_detections = self.decode(test_roi_locs, test_roi_scores, test_indices_and_rois, test_rois_num, self.configer, DCHelper.tolist(data_dict['meta'])) json_dict = self.__get_info_tree(batch_detections[0], ori_img_bgr, scale=scale) image_canvas = self.det_parser.draw_bboxes( ori_img_bgr.copy(), json_dict, conf_threshold=self.configer.get('res', 'vis_conf_thre')) cv2.imwrite(vis_path, image_canvas) cv2.imwrite(raw_path, ori_img_bgr) Log.info('Json Path: {}'.format(json_path)) JsonHelper.save_file(json_dict, json_path) return json_dict @staticmethod def decode(roi_locs, roi_scores, indices_and_rois, test_rois_num, configer, metas): indices_and_rois = indices_and_rois num_classes = configer.get('data', 'num_classes') mean = torch.Tensor(configer.get( 'roi', 'loc_normalize_mean')).repeat(num_classes)[None] std = torch.Tensor(configer.get( 'roi', 'loc_normalize_std')).repeat(num_classes)[None] mean = mean.to(roi_locs.device) std = std.to(roi_locs.device) roi_locs = (roi_locs * std + mean) roi_locs = roi_locs.contiguous().view(-1, num_classes, 4) # roi_locs = roi_locs[:,:, [1, 0, 3, 2]] rois = indices_and_rois[:, 1:] rois = rois.contiguous().view(-1, 1, 4).expand_as(roi_locs) wh = torch.exp(roi_locs[:, :, 2:]) * (rois[:, :, 2:] - rois[:, :, :2]) cxcy = roi_locs[:, :, :2] * (rois[:, :, 2:] - rois[:, :, :2]) + ( rois[:, :, :2] + rois[:, :, 2:]) / 2 dst_bbox = torch.cat([cxcy - wh / 2, cxcy + wh / 2], 2) # [b, 8732,4] if configer.get('phase') != 'debug': cls_prob = F.softmax(roi_scores, dim=1) else: cls_prob = roi_scores cls_label = torch.LongTensor([i for i in range(num_classes)])\ .contiguous().view(1, num_classes).repeat(indices_and_rois.size(0), 1).to(roi_locs.device) output = [None for _ in range(test_rois_num.size(0))] start_index = 0 for i in range(test_rois_num.size(0)): # batch_index = (indices_and_rois[:, 0] == i).nonzero().contiguous().view(-1,) # tmp_dst_bbox = dst_bbox[batch_index] # tmp_cls_prob = cls_prob[batch_index] # tmp_cls_label = cls_label[batch_index] tmp_dst_bbox = dst_bbox[start_index:start_index + test_rois_num[i]] # clip bounding box tmp_dst_bbox[:, :, 0::2] = tmp_dst_bbox[:, :, 0::2].clamp( min=0, max=metas[i]['border_size'][0] - 1) tmp_dst_bbox[:, :, 1::2] = tmp_dst_bbox[:, :, 1::2].clamp( min=0, max=metas[i]['border_size'][1] - 1) tmp_cls_prob = cls_prob[start_index:start_index + test_rois_num[i]] tmp_cls_label = cls_label[start_index:start_index + test_rois_num[i]] start_index += test_rois_num[i] mask = (tmp_cls_prob > configer.get( 'res', 'val_conf_thre')) & (tmp_cls_label > 0) tmp_dst_bbox = tmp_dst_bbox[mask].contiguous().view(-1, 4) if tmp_dst_bbox.numel() == 0: continue tmp_cls_prob = tmp_cls_prob[mask].contiguous().view( -1, ).unsqueeze(1) tmp_cls_label = tmp_cls_label[mask].contiguous().view( -1, ).unsqueeze(1) valid_preds = torch.cat( (tmp_dst_bbox, tmp_cls_prob.float(), tmp_cls_label.float()), 1) output[i] = DetHelper.cls_nms(valid_preds, labels=valid_preds[:, 5], max_threshold=configer.get( 'nms', 'max_threshold')) return output def __get_info_tree(self, detections, image_raw, scale=1.0): height, width, _ = image_raw.shape json_dict = dict() object_list = list() if detections is not None: for x1, y1, x2, y2, conf, cls_pred in detections: object_dict = dict() xmin = min(x1.cpu().item() / scale, width - 1) ymin = min(y1.cpu().item() / scale, height - 1) xmax = min(x2.cpu().item() / scale, width - 1) ymax = min(y2.cpu().item() / scale, height - 1) object_dict['bbox'] = [xmin, ymin, xmax, ymax] object_dict['label'] = int(cls_pred.cpu().item()) - 1 object_dict['score'] = float('%.2f' % conf.cpu().item()) object_list.append(object_dict) json_dict['objects'] = object_list return json_dict def debug(self, vis_dir): count = 0 for i, data_dict in enumerate(self.det_data_loader.get_trainloader()): feat_list = list() input_size = data_dict['meta'][0]['input_size'] for stride in self.configer.get('rpn', 'stride_list'): feat_list.append( torch.zeros( (data_dict['img'].size(0), 1, input_size[1] // stride, input_size[0] // stride))) gt_rpn_locs, gt_rpn_labels = self.rpn_target_generator( feat_list, data_dict['bboxes'], data_dict['meta']) eye_matrix = torch.eye(2) gt_rpn_labels[gt_rpn_labels == -1] = 0 gt_rpn_scores = eye_matrix[gt_rpn_labels.view(-1)].view( data_dict['img'].size(0), -1, 2) test_indices_and_rois, _ = self.fr_roi_generator( feat_list, gt_rpn_locs, gt_rpn_scores, self.configer.get('rpn', 'n_test_pre_nms'), self.configer.get('rpn', 'n_test_post_nms'), data_dict['meta']) sample_rois, gt_roi_locs, gt_roi_labels = self.roi_sampler( test_indices_and_rois, data_dict['bboxes'], data_dict['labels'], data_dict['meta']) self.det_visualizer.vis_rois(data_dict['img'], sample_rois[gt_roi_labels > 0]) gt_cls_roi_locs = torch.zeros( (gt_roi_locs.size(0), self.configer.get('data', 'num_classes'), 4)) gt_cls_roi_locs[torch.arange(0, sample_rois.size(0)).long(), gt_roi_labels.long()] = gt_roi_locs gt_cls_roi_locs = gt_cls_roi_locs.contiguous().view( -1, 4 * self.configer.get('data', 'num_classes')) eye_matrix = torch.eye(self.configer.get('data', 'num_classes')) gt_roi_scores = eye_matrix[gt_roi_labels.view(-1)].view( gt_roi_labels.size(0), self.configer.get('data', 'num_classes')) test_rois_num = torch.zeros((len(data_dict['bboxes']), )).long() for batch_id in range(len(data_dict['bboxes'])): batch_index = ( sample_rois[:, 0] == batch_id).nonzero().contiguous().view( -1, ) test_rois_num[batch_id] = batch_index.numel() batch_detections = FastRCNNTest.decode(gt_cls_roi_locs, gt_roi_scores, sample_rois, test_rois_num, self.configer, data_dict['meta']) for j in range(data_dict['img'].size(0)): count = count + 1 if count > 20: exit(1) ori_img_bgr = self.blob_helper.tensor2bgr(data_dict['img'][j]) self.det_visualizer.vis_default_bboxes( ori_img_bgr, self.fr_priorbox_layer(feat_list, input_size), gt_rpn_labels[j]) json_dict = self.__get_info_tree(batch_detections[j], ori_img_bgr) image_canvas = self.det_parser.draw_bboxes( ori_img_bgr.copy(), json_dict, conf_threshold=self.configer.get('res', 'vis_conf_thre')) cv2.imwrite( os.path.join(vis_dir, '{}_{}_vis.png'.format(i, j)), image_canvas) cv2.imshow('main', image_canvas) cv2.waitKey()
class YOLOv3Test(object): def __init__(self, configer): self.configer = configer self.blob_helper = BlobHelper(configer) self.det_visualizer = DetVisualizer(configer) self.det_parser = DetParser(configer) self.det_model_manager = DetModelManager(configer) self.det_data_loader = DataLoader(configer) self.yolo_target_generator = YOLOTargetGenerator(configer) self.yolo_detection_layer = YOLODetectionLayer(configer) self.device = torch.device( 'cpu' if self.configer.get('gpu') is None else 'cuda') self.det_net = None self._init_model() def _init_model(self): self.det_net = self.det_model_manager.object_detector() self.det_net = RunnerHelper.load_net(self, self.det_net) self.det_net.eval() def __test_img(self, image_path, json_path, raw_path, vis_path): Log.info('Image Path: {}'.format(image_path)) img = ImageHelper.read_image( image_path, tool=self.configer.get('data', 'image_tool'), mode=self.configer.get('data', 'input_mode')) ori_img_bgr = ImageHelper.get_cv2_bgr(img, mode=self.configer.get( 'data', 'input_mode')) inputs = self.blob_helper.make_input(img, input_size=self.configer.get( 'data', 'input_size'), scale=1.0) with torch.no_grad(): inputs = inputs.unsqueeze(0).to(self.device) _, _, detections = self.det_net(inputs) batch_detections = self.decode(detections, self.configer) json_dict = self.__get_info_tree(batch_detections[0], ori_img_bgr) image_canvas = self.det_parser.draw_bboxes( ori_img_bgr.copy(), json_dict, conf_threshold=self.configer.get('res', 'vis_conf_thre')) ImageHelper.save(ori_img_bgr, raw_path) ImageHelper.save(image_canvas, vis_path) Log.info('Json Path: {}'.format(json_path)) JsonHelper.save_file(json_dict, json_path) return json_dict @staticmethod def decode(batch_pred_bboxes, configer, input_size): box_corner = batch_pred_bboxes.new(batch_pred_bboxes.shape) box_corner[:, :, 0] = batch_pred_bboxes[:, :, 0] - batch_pred_bboxes[:, :, 2] / 2 box_corner[:, :, 1] = batch_pred_bboxes[:, :, 1] - batch_pred_bboxes[:, :, 3] / 2 box_corner[:, :, 2] = batch_pred_bboxes[:, :, 0] + batch_pred_bboxes[:, :, 2] / 2 box_corner[:, :, 3] = batch_pred_bboxes[:, :, 1] + batch_pred_bboxes[:, :, 3] / 2 # clip bounding box box_corner[:, :, 0::2] = box_corner[:, :, 0::2].clamp(min=0, max=1.0) box_corner[:, :, 1::2] = box_corner[:, :, 1::2].clamp(min=0, max=1.0) batch_pred_bboxes[:, :, :4] = box_corner[:, :, :4] batch_pred_bboxes[:, :, 0::2] *= input_size[0] batch_pred_bboxes[:, :, 1::2] *= input_size[1] output = [None for _ in range(len(batch_pred_bboxes))] for image_i, image_pred in enumerate(batch_pred_bboxes): # Filter out confidence scores below threshold conf_mask = (image_pred[:, 4] > configer.get( 'res', 'val_conf_thre')).squeeze() image_pred = image_pred[conf_mask] # If none are remaining => process next image if image_pred.numel() == 0: continue # Get score and class with highest confidence class_conf, class_pred = torch.max( image_pred[:, 5:5 + configer.get('data', 'num_classes')], 1, keepdim=True) # Detections ordered as (x1, y1, x2, y2, obj_conf, class_conf, class_pred) detections = torch.cat( (image_pred[:, :5], class_conf.float(), class_pred.float()), 1) output[image_i] = DetHelper.cls_nms(detections, labels=class_pred.squeeze(1), max_threshold=configer.get( 'nms', 'max_threshold')) return output def __get_info_tree(self, detections, image_raw, input_size): height, width, _ = image_raw.shape json_dict = dict() object_list = list() if detections is not None: for x1, y1, x2, y2, conf, cls_conf, cls_pred in detections: object_dict = dict() xmin = x1.cpu().item() / input_size[0] * width ymin = y1.cpu().item() / input_size[1] * height xmax = x2.cpu().item() / input_size[0] * width ymax = y2.cpu().item() / input_size[1] * height object_dict['bbox'] = [xmin, ymin, xmax, ymax] object_dict['label'] = int(cls_pred.cpu().item()) object_dict['score'] = float('%.2f' % conf.cpu().item()) object_list.append(object_dict) json_dict['objects'] = object_list return json_dict def debug(self, vis_dir): count = 0 for i, data_dict in enumerate(self.det_data_loader.get_trainloader()): inputs = data_dict['img'] batch_gt_bboxes = data_dict['bboxes'] batch_gt_labels = data_dict['labels'] input_size = [inputs.size(3), inputs.size(2)] feat_list = list() for stride in self.configer.get('network', 'stride_list'): feat_list.append( torch.zeros((inputs.size(0), 1, input_size[1] // stride, input_size[0] // stride))) targets, _, _ = self.yolo_target_generator(feat_list, batch_gt_bboxes, batch_gt_labels, input_size) targets = targets.to(self.device) anchors_list = self.configer.get('gt', 'anchors_list') output_list = list() be_c = 0 for f_index, anchors in enumerate(anchors_list): feat_stride = self.configer.get('network', 'stride_list')[f_index] fm_size = [ int(round(border / feat_stride)) for border in input_size ] num_c = len(anchors) * fm_size[0] * fm_size[1] output_list.append( targets[:, be_c:be_c + num_c].contiguous().view( targets.size(0), len(anchors), fm_size[1], fm_size[0], -1).permute(0, 1, 4, 2, 3).contiguous().view( targets.size(0), -1, fm_size[1], fm_size[0])) be_c += num_c batch_detections = self.decode( self.yolo_detection_layer(output_list)[2], self.configer, input_size) for j in range(inputs.size(0)): count = count + 1 if count > 20: exit(1) ori_img_bgr = self.blob_helper.tensor2bgr(inputs[j]) json_dict = self.__get_info_tree(batch_detections[j], ori_img_bgr, input_size) image_canvas = self.det_parser.draw_bboxes( ori_img_bgr.copy(), json_dict, conf_threshold=self.configer.get('vis', 'obj_threshold')) cv2.imwrite( os.path.join(vis_dir, '{}_{}_vis.png'.format(i, j)), image_canvas) cv2.imshow('main', image_canvas) cv2.waitKey()
class SingleShotDetectorTest(object): def __init__(self, configer): self.configer = configer self.det_visualizer = DetVisualizer(configer) self.det_parser = DetParser(configer) self.det_model_manager = DetModelManager(configer) self.det_data_loader = DetDataLoader(configer) self.module_utilizer = ModuleUtilizer(configer) self.default_boxes = PriorBoxLayer(configer)() self.device = torch.device('cpu' if self.configer.get('gpu') is None else 'cuda') self.det_net = None def init_model(self): self.det_net = self.det_model_manager.object_detector() self.det_net = self.module_utilizer.load_net(self.det_net) self.det_net.eval() def __test_img(self, image_path, json_path, raw_path, vis_path): Log.info('Image Path: {}'.format(image_path)) ori_img_rgb = ImageHelper.img2np(ImageHelper.pil_open_rgb(image_path)) ori_img_bgr = ImageHelper.rgb2bgr(ori_img_rgb) inputs = ImageHelper.resize(ori_img_rgb, tuple(self.configer.get('data', 'input_size')), Image.CUBIC) inputs = ToTensor()(inputs) inputs = Normalize(mean=self.configer.get('trans_params', 'mean'), std=self.configer.get('trans_params', 'std'))(inputs) with torch.no_grad(): inputs = inputs.unsqueeze(0).to(self.device) bbox, cls = self.det_net(inputs) bbox = bbox.cpu().data.squeeze(0) cls = F.softmax(cls.cpu().squeeze(0), dim=-1).data boxes, lbls, scores = self.__decode(bbox, cls) json_dict = self.__get_info_tree(boxes, lbls, scores, ori_img_rgb) image_canvas = self.det_parser.draw_bboxes(ori_img_bgr.copy(), json_dict, conf_threshold=self.configer.get('vis', 'conf_threshold')) cv2.imwrite(vis_path, image_canvas) cv2.imwrite(raw_path, ori_img_bgr) Log.info('Json Path: {}'.format(json_path)) JsonHelper.save_file(json_dict, json_path) return json_dict def __nms(self, bboxes, scores, mode='union'): """Non maximum suppression. Args: bboxes(tensor): bounding boxes, sized [N,4]. scores(tensor): bbox scores, sized [N,]. threshold(float): overlap threshold. mode(str): 'union' or 'min'. Returns: keep(tensor): selected indices. Ref: https://github.com/rbgirshick/py-faster-rcnn/blob/master/lib/nms/py_cpu_nms.py """ x1 = bboxes[:, 0] y1 = bboxes[:, 1] x2 = bboxes[:, 2] y2 = bboxes[:, 3] areas = (x2 - x1) * (y2 - y1) _, order = scores.sort(0, descending=True) keep = [] while order.numel() > 0: i = order[0] keep.append(i) if order.numel() == 1: break xx1 = x1[order[1:]].clamp(min=x1[i]) yy1 = y1[order[1:]].clamp(min=y1[i]) xx2 = x2[order[1:]].clamp(max=x2[i]) yy2 = y2[order[1:]].clamp(max=y2[i]) w = (xx2-xx1).clamp(min=0) h = (yy2-yy1).clamp(min=0) inter = w*h if self.configer.get('nms', 'mode') == 'union': ovr = inter / (areas[i] + areas[order[1:]] - inter) elif self.configer.get('nms', 'mode') == 'min': ovr = inter / areas[order[1:]].clamp(max=areas[i]) else: raise TypeError('Unknown nms mode: %s.' % mode) ids = (ovr <= self.configer.get('nms', 'overlap_threshold')).nonzero().squeeze() if ids.numel() == 0: break order = order[ids + 1] return torch.LongTensor(keep) def __decode(self, loc, conf): """Transform predicted loc/conf back to real bbox locations and class labels. Args: loc: (tensor) predicted loc, sized [8732, 4]. conf: (tensor) predicted conf, sized [8732, 21]. Returns: boxes: (tensor) bbox locations, sized [#obj, 4]. labels: (tensor) class labels, sized [#obj,1]. """ variances = [0.1, 0.2] wh = torch.exp(loc[:, 2:] * variances[1]) * self.default_boxes[:, 2:] cxcy = loc[:, :2] * variances[0] * self.default_boxes[:, 2:] + self.default_boxes[:, :2] boxes = torch.cat([cxcy - wh / 2, cxcy + wh / 2], 1) # [8732,4] max_conf, labels = conf.max(1) # [8732,1] ids = labels.nonzero() tmp = ids.cpu().numpy() if tmp.__len__() > 0: # print('detected %d objs' % tmp.__len__()) ids = ids.squeeze(1) # [#boxes,] keep = self.__nms(boxes[ids], max_conf[ids]) pred_bboxes = boxes[ids][keep].cpu().numpy() pred_bboxes = np.clip(pred_bboxes, 0, 1) pred_labels = labels[ids][keep].cpu().numpy() pred_confs = max_conf[ids][keep].cpu().numpy() return pred_bboxes, pred_labels, pred_confs else: Log.info('None object detected!') pred_bboxes = list() pred_labels = list() pred_confs = list() return pred_bboxes, pred_labels, pred_confs def __get_info_tree(self, box_list, label_list, conf, image_raw): height, width, _ = image_raw.shape json_dict = dict() object_list = list() for bbox, label, cf in zip(box_list, label_list, conf): if cf < self.configer.get('vis', 'conf_threshold'): continue object_dict = dict() xmin = bbox[0] * width xmax = bbox[2] * width ymin = bbox[1] * height ymax = bbox[3] * height object_dict['bbox'] = [xmin, ymin, xmax, ymax] object_dict['label'] = label - 1 object_dict['score'] = cf object_list.append(object_dict) json_dict['objects'] = object_list return json_dict def test(self): base_dir = os.path.join(self.configer.get('project_dir'), 'val/results/det', self.configer.get('dataset')) test_img = self.configer.get('test_img') test_dir = self.configer.get('test_dir') if test_img is None and test_dir is None: Log.error('test_img & test_dir not exists.') exit(1) if test_img is not None and test_dir is not None: Log.error('Either test_img or test_dir.') exit(1) if test_img is not None: base_dir = os.path.join(base_dir, 'test_img') filename = test_img.rstrip().split('/')[-1] json_path = os.path.join(base_dir, 'json', '{}.json'.format('.'.join(filename.split('.')[:-1]))) raw_path = os.path.join(base_dir, 'raw', filename) vis_path = os.path.join(base_dir, 'vis', '{}_vis.png'.format('.'.join(filename.split('.')[:-1]))) if not os.path.exists(os.path.dirname(json_path)): os.makedirs(os.path.dirname(json_path)) if not os.path.exists(os.path.dirname(raw_path)): os.makedirs(os.path.dirname(raw_path)) if not os.path.exists(os.path.dirname(vis_path)): os.makedirs(os.path.dirname(vis_path)) self.__test_img(test_img, json_path, raw_path, vis_path) else: base_dir = os.path.join(base_dir, 'test_dir', test_dir.rstrip('/').split('/')[-1]) if not os.path.exists(base_dir): os.makedirs(base_dir) for filename in FileHelper.list_dir(test_dir): image_path = os.path.join(test_dir, filename) json_path = os.path.join(base_dir, 'json', '{}.json'.format('.'.join(filename.split('.')[:-1]))) raw_path = os.path.join(base_dir, 'raw', filename) vis_path = os.path.join(base_dir, 'vis', '{}_vis.png'.format('.'.join(filename.split('.')[:-1]))) if not os.path.exists(os.path.dirname(json_path)): os.makedirs(os.path.dirname(json_path)) if not os.path.exists(os.path.dirname(raw_path)): os.makedirs(os.path.dirname(raw_path)) if not os.path.exists(os.path.dirname(vis_path)): os.makedirs(os.path.dirname(vis_path)) self.__test_img(image_path, json_path, raw_path, vis_path) def debug(self): base_dir = os.path.join(self.configer.get('project_dir'), 'vis/results/det', self.configer.get('dataset'), 'debug') if not os.path.exists(base_dir): os.makedirs(base_dir) val_data_loader = self.det_data_loader.get_valloader() count = 0 for i, (inputs, bboxes, labels) in enumerate(val_data_loader): for j in range(inputs.size(0)): count = count + 1 if count > 20: exit(1) ori_img_rgb = DeNormalize(mean=self.configer.get('trans_params', 'mean'), std=self.configer.get('trans_params', 'std'))(inputs[j]) ori_img_rgb = ori_img_rgb.numpy().transpose(1, 2, 0).astype(np.uint8) ori_img_bgr = cv2.cvtColor(ori_img_rgb, cv2.COLOR_RGB2BGR) eye_matrix = torch.eye(self.configer.get('data', 'num_classes')) labels_target = eye_matrix[labels.view(-1)].view(inputs.size(0), -1, self.configer.get('data', 'num_classes')) boxes, lbls, scores = self.__decode(bboxes[j], labels_target[j]) json_dict = self.__get_info_tree(boxes, lbls, scores, ori_img_rgb) image_canvas = self.det_parser.draw_bboxes(ori_img_bgr.copy(), json_dict, conf_threshold=self.configer.get('vis', 'conf_threshold')) cv2.imwrite(os.path.join(base_dir, '{}_{}_vis.png'.format(i, j)), image_canvas) cv2.imshow('main', image_canvas) cv2.waitKey()