class FasterRCNN(object):
    """
      The class for Single Shot Detector. Include train, val, test & predict.
    """
    def __init__(self, configer):
        self.configer = configer
        self.batch_time = AverageMeter()
        self.data_time = AverageMeter()
        self.train_losses = AverageMeter()
        self.val_losses = AverageMeter()
        self.det_visualizer = DetVisualizer(configer)
        self.det_model_manager = ModelManager(configer)
        self.det_data_loader = DataLoader(configer)
        self.fr_priorbox_layer = FRPriorBoxLayer(configer)
        self.det_running_score = DetRunningScore(configer)

        self.det_net = None
        self.train_loader = None
        self.val_loader = None
        self.optimizer = None
        self.scheduler = None
        self.runner_state = dict()

        self._init_model()

    def _init_model(self):
        self.det_net = self.det_model_manager.object_detector()
        self.det_net = RunnerHelper.load_net(self, self.det_net)

        self.optimizer, self.scheduler = Trainer.init( self._get_parameters(), self.configer.get('solver'))

        self.train_loader = self.det_data_loader.get_trainloader()
        self.val_loader = self.det_data_loader.get_valloader()
        self.det_loss = self.det_model_manager.get_det_loss()

    def _get_parameters(self):
        lr_1 = []
        lr_2 = []
        params_dict = dict(self.det_net.named_parameters())
        for key, value in params_dict.items():
            if value.requires_grad:
                if 'bias' in key:
                    lr_2.append(value)
                else:
                    lr_1.append(value)

        params = [{'params': lr_1, 'lr': self.configer.get('solver', 'lr')['base_lr']},
                  {'params': lr_2, 'lr': self.configer.get('solver', 'lr')['base_lr'] * 2., 'weight_decay': 0}]
        return params

    def train(self):
        """
          Train function of every epoch during train phase.
        """
        self.det_net.train()
        start_time = time.time()
        # Adjust the learning rate after every epoch.
        self.runner_state['epoch'] += 1

        for i, data_dict in enumerate(self.train_loader):
            Trainer.update(self, solver_dict=self.configer.get('solver'))
            self.data_time.update(time.time() - start_time)
            # Forward pass.
            data_dict = RunnerHelper.to_device(self, data_dict)
            out = self.det_net(data_dict)
            loss_dict = self.det_loss(out)
            loss = loss_dict['loss'].mean()
            self.train_losses.update(loss.item(), len(DCHelper.tolist(data_dict['meta'])))

            self.optimizer.zero_grad()
            loss.backward()
            RunnerHelper.clip_grad(self.det_net, 10.)
            self.optimizer.step()

            # Update the vars of the train phase.
            self.batch_time.update(time.time() - start_time)
            start_time = time.time()
            self.runner_state['iters'] += 1

            # Print the log info & reset the states.
            if self.runner_state['iters'] % self.configer.get('solver', 'display_iter') == 0:
                Log.info('Train Epoch: {0}\tTrain Iteration: {1}\t'
                         'Time {batch_time.sum:.3f}s / {2}iters, ({batch_time.avg:.3f})\t'
                         'Data load {data_time.sum:.3f}s / {2}iters, ({data_time.avg:3f})\n'
                         'Learning rate = {3}\tLoss = {loss.val:.8f} (ave = {loss.avg:.8f})\n'.format(
                    self.runner_state['epoch'], self.runner_state['iters'],
                    self.configer.get('solver', 'display_iter'),
                    RunnerHelper.get_lr(self.optimizer), batch_time=self.batch_time,
                    data_time=self.data_time, loss=self.train_losses))
                self.batch_time.reset()
                self.data_time.reset()
                self.train_losses.reset()

            if self.configer.get('solver', 'lr')['metric'] == 'iters' \
                    and self.runner_state['iters'] == self.configer.get('solver', 'max_iters'):
                break

            # Check to val the current model.
            if self.runner_state['iters'] % self.configer.get('solver', 'test_interval') == 0:
                self.val()

    def val(self):
        """
          Validation function during the train phase.
        """
        self.det_net.eval()
        start_time = time.time()
        with torch.no_grad():
            for j, data_dict in enumerate(self.val_loader):
                # Forward pass.
                data_dict = RunnerHelper.to_device(self, data_dict)
                out = self.det_net(data_dict)
                loss_dict = self.det_loss(out)
                # Compute the loss of the train batch & backward.
                loss = loss_dict['loss'].mean()
                out_dict, _ = RunnerHelper.gather(self, out)
                self.val_losses.update(loss.item(), len(DCHelper.tolist(data_dict['meta'])))
                test_indices_and_rois, test_roi_locs, test_roi_scores, test_rois_num = out_dict['test_group']
                batch_detections = FastRCNNTest.decode(test_roi_locs,
                                                       test_roi_scores,
                                                       test_indices_and_rois,
                                                       test_rois_num,
                                                       self.configer,
                                                       DCHelper.tolist(data_dict['meta']))
                batch_pred_bboxes = self.__get_object_list(batch_detections)
                self.det_running_score.update(batch_pred_bboxes,
                                              [item['ori_bboxes'] for item in DCHelper.tolist(data_dict['meta'])],
                                              [item['ori_labels'] for item in DCHelper.tolist(data_dict['meta'])])

                # Update the vars of the val phase.
                self.batch_time.update(time.time() - start_time)
                start_time = time.time()

            RunnerHelper.save_net(self, self.det_net, iters=self.runner_state['iters'])
            # Print the log info & reset the states.
            Log.info(
                'Test Time {batch_time.sum:.3f}s, ({batch_time.avg:.3f})\t'
                'Loss {loss.avg:.8f}\n'.format(
                    batch_time=self.batch_time, loss=self.val_losses))
            Log.info('Val mAP: {}\n'.format(self.det_running_score.get_mAP()))
            self.det_running_score.reset()
            self.batch_time.reset()
            self.val_losses.reset()
            self.det_net.train()

    def __get_object_list(self, batch_detections):
        batch_pred_bboxes = list()
        for idx, detections in enumerate(batch_detections):
            object_list = list()
            if detections is not None:
                for x1, y1, x2, y2, conf, cls_pred in detections:
                    cf = float('%.2f' % conf.item())
                    cls_pred = int(cls_pred.cpu().item()) - 1
                    object_list.append([x1.item(), y1.item(), x2.item(), y2.item(), cls_pred, cf])

            batch_pred_bboxes.append(object_list)

        return batch_pred_bboxes
Exemplo n.º 2
0
class YOLOv3Test(object):
    def __init__(self, configer):
        self.configer = configer
        self.det_visualizer = DetVisualizer(configer)
        self.det_parser = DetParser(configer)
        self.det_model_manager = ModelManager(configer)
        self.test_loader = TestDataLoader(configer)
        self.device = torch.device('cpu' if self.configer.get('gpu') is None else 'cuda')
        self.det_net = None

        self._init_model()

    def _init_model(self):
        self.det_net = self.det_model_manager.object_detector()
        self.det_net = RunnerHelper.load_net(self, self.det_net)
        self.det_net.eval()

    def test(self, test_dir, out_dir):
        for _, data_dict in enumerate(self.test_loader.get_testloader(test_dir=test_dir)):
            data_dict['testing'] = True
            detections = self.det_net(data_dict)
            meta_list = DCHelper.tolist(data_dict['meta'])
            batch_detections = self.decode(detections, self.configer, meta_list)
            for i in range(len(meta_list)):
                ori_img_bgr = ImageHelper.read_image(meta_list[i]['img_path'], tool='cv2', mode='BGR')
                json_dict = self.__get_info_tree(batch_detections[i])
                image_canvas = self.det_parser.draw_bboxes(ori_img_bgr.copy(), json_dict,
                                                           conf_threshold=self.configer.get('res', 'vis_conf_thre'))
                ImageHelper.save(image_canvas,
                                 save_path=os.path.join(out_dir, 'vis/{}.png'.format(meta_list[i]['filename'])))

                Log.info('Json Path: {}'.format(os.path.join(out_dir, 'json/{}.json'.format(meta_list[i]['filename']))))
                JsonHelper.save_file(json_dict,
                                     save_path=os.path.join(out_dir, 'json/{}.json'.format(meta_list[i]['filename'])))

    @staticmethod
    def decode(batch_detections, configer, meta):
        output = [None for _ in range(len(meta))]
        for i in range(len(meta)):
            image_pred = batch_detections[i]
            image_pred[:, 0] *= meta[i]['ori_img_size'][0]
            image_pred[:, 1] *= meta[i]['ori_img_size'][1]
            image_pred[:, 2] *= meta[i]['ori_img_size'][0]
            image_pred[:, 3] *= meta[i]['ori_img_size'][1]
            # Filter out confidence scores below threshold
            image_pred = image_pred[image_pred[:, 4] > configer.get('res', 'val_conf_thre')]
            # If none are remaining => process next image
            if image_pred.numel() == 0:
                continue

            # Get score and class with highest confidence
            class_conf, class_pred = torch.max(image_pred[:, 5:5 + configer.get('data', 'num_classes')], 1, keepdim=True)
            # Detections ordered as (x1, y1, x2, y2, obj_conf, class_conf, class_pred)
            detections = torch.cat((image_pred[:, :5], class_conf.float(), class_pred.float()), 1)
            valid_ind = DetHelper.cls_nms(detections[:, :5], labels=class_pred.squeeze(1),
                                          max_threshold=configer.get('res', 'nms')['max_threshold'], return_ind=True)
            output[i] = detections[valid_ind]

        return output

    def __get_info_tree(self, detections):
        json_dict = dict()
        object_list = list()
        if detections is not None:
            for x1, y1, x2, y2, conf, cls_conf, cls_pred in detections:
                object_dict = dict()
                object_dict['bbox'] = [x1.item(), y1.item(), x2.item(), y2.item()]
                object_dict['label'] = int(cls_pred.item())
                object_dict['score'] = float('%.2f' % conf.item())
                object_list.append(object_dict)

        json_dict['objects'] = object_list

        return json_dict
class FastRCNNTest(object):
    def __init__(self, configer):
        self.configer = configer
        self.blob_helper = BlobHelper(configer)
        self.det_visualizer = DetVisualizer(configer)
        self.det_parser = DetParser(configer)
        self.det_model_manager = ModelManager(configer)
        self.test_loader = TestDataLoader(configer)
        self.roi_sampler = FRROISampler(configer)
        self.rpn_target_generator = RPNTargetAssigner(configer)
        self.fr_priorbox_layer = FRPriorBoxLayer(configer)
        self.fr_roi_generator = FRROIGenerator(configer)
        self.device = torch.device(
            'cpu' if self.configer.get('gpu') is None else 'cuda')
        self.det_net = None

        self._init_model()

    def _init_model(self):
        self.det_net = self.det_model_manager.object_detector()
        self.det_net = RunnerHelper.load_net(self, self.det_net)
        self.det_net.eval()

    def test(self, test_dir, out_dir):
        for _, data_dict in enumerate(
                self.test_loader.get_testloader(test_dir=test_dir)):
            data_dict['testing'] = True
            data_dict = RunnerHelper.to_device(self, data_dict)
            out_dict = self.det_net(data_dict)
            meta_list = DCHelper.tolist(data_dict['meta'])
            test_indices_and_rois, test_roi_locs, test_roi_scores, test_rois_num = out_dict[
                'test_group']
            batch_detections = self.decode(test_roi_locs, test_roi_scores,
                                           test_indices_and_rois,
                                           test_rois_num, self.configer,
                                           meta_list)
            for i in range(len(meta_list)):
                ori_img_bgr = ImageHelper.read_image(meta_list[i]['img_path'],
                                                     tool='cv2',
                                                     mode='BGR')
                json_dict = self.__get_info_tree(batch_detections[i])
                image_canvas = self.det_parser.draw_bboxes(
                    ori_img_bgr.copy(),
                    json_dict,
                    conf_threshold=self.configer.get('res', 'vis_conf_thre'))
                ImageHelper.save(image_canvas,
                                 save_path=os.path.join(
                                     out_dir, 'vis/{}.png'.format(
                                         meta_list[i]['filename'])))

                Log.info('Json Path: {}'.format(
                    os.path.join(
                        out_dir,
                        'json/{}.json'.format(meta_list[i]['filename']))))
                JsonHelper.save_file(json_dict,
                                     save_path=os.path.join(
                                         out_dir, 'json/{}.json'.format(
                                             meta_list[i]['filename'])))

    @staticmethod
    def decode(roi_locs, roi_scores, indices_and_rois, test_rois_num, configer,
               metas):
        indices_and_rois = indices_and_rois
        num_classes = configer.get('data', 'num_classes')
        mean = torch.Tensor(configer.get(
            'roi', 'loc_normalize_mean')).repeat(num_classes)[None]
        std = torch.Tensor(configer.get(
            'roi', 'loc_normalize_std')).repeat(num_classes)[None]
        mean = mean.to(roi_locs.device)
        std = std.to(roi_locs.device)

        roi_locs = (roi_locs * std + mean)
        roi_locs = roi_locs.contiguous().view(-1, num_classes, 4)

        rois = indices_and_rois[:, 1:]
        rois = rois.contiguous().view(-1, 1, 4).expand_as(roi_locs)
        wh = torch.exp(roi_locs[:, :, 2:]) * (rois[:, :, 2:] - rois[:, :, :2])
        cxcy = roi_locs[:, :, :2] * (rois[:, :, 2:] - rois[:, :, :2]) + (
            rois[:, :, :2] + rois[:, :, 2:]) / 2
        dst_bbox = torch.cat([cxcy - wh / 2, cxcy + wh / 2], 2)  # [b, 8732,4]

        if configer.get('phase') != 'debug':
            cls_prob = F.softmax(roi_scores, dim=1)
        else:
            cls_prob = roi_scores

        cls_label = torch.LongTensor([i for i in range(num_classes)])\
            .contiguous().view(1, num_classes).repeat(indices_and_rois.size(0), 1).to(roi_locs.device)

        output = [None for _ in range(test_rois_num.size(0))]
        start_index = 0
        for i in range(test_rois_num.size(0)):
            tmp_dst_bbox = dst_bbox[start_index:start_index + test_rois_num[i]]
            tmp_dst_bbox[:, :, 0::2] = tmp_dst_bbox[:, :, 0::2].clamp(
                min=0, max=metas[i]['border_wh'][0] - 1)
            tmp_dst_bbox[:, :, 1::2] = tmp_dst_bbox[:, :, 1::2].clamp(
                min=0, max=metas[i]['border_wh'][1] - 1)
            tmp_dst_bbox *= (metas[i]['ori_img_size'][0] /
                             metas[i]['border_wh'][0])

            tmp_cls_prob = cls_prob[start_index:start_index + test_rois_num[i]]
            tmp_cls_label = cls_label[start_index:start_index +
                                      test_rois_num[i]]
            start_index += test_rois_num[i]

            mask = (tmp_cls_prob > configer.get(
                'res', 'val_conf_thre')) & (tmp_cls_label > 0)

            tmp_dst_bbox = tmp_dst_bbox[mask].contiguous().view(-1, 4)
            if tmp_dst_bbox.numel() == 0:
                continue

            tmp_cls_prob = tmp_cls_prob[mask].contiguous().view(
                -1, ).unsqueeze(1)
            tmp_cls_label = tmp_cls_label[mask].contiguous().view(
                -1, ).unsqueeze(1)

            valid_preds = torch.cat(
                (tmp_dst_bbox, tmp_cls_prob.float(), tmp_cls_label.float()), 1)

            valid_ind = DetHelper.cls_nms(valid_preds[:, :5],
                                          labels=valid_preds[:, 5],
                                          max_threshold=configer.get(
                                              'res', 'nms')['max_threshold'],
                                          return_ind=True)

            valid_preds = valid_preds[valid_ind]
            output[i] = valid_preds

        return output

    def __get_info_tree(self, detections):
        json_dict = dict()
        object_list = list()
        if detections is not None:
            for x1, y1, x2, y2, conf, cls_pred in detections:
                object_dict = dict()
                object_dict['bbox'] = [
                    x1.item(), y1.item(),
                    x2.item(), y2.item()
                ]
                object_dict['label'] = int(cls_pred.cpu().item()) - 1
                object_dict['score'] = float('%.2f' % conf.cpu().item())
                object_list.append(object_dict)

        json_dict['objects'] = object_list

        return json_dict
Exemplo n.º 4
0
class SingleShotDetectorTest(object):
    def __init__(self, configer):
        self.configer = configer
        self.blob_helper = BlobHelper(configer)
        self.det_visualizer = DetVisualizer(configer)
        self.det_parser = DetParser(configer)
        self.det_model_manager = ModelManager(configer)
        self.test_loader = TestDataLoader(configer)
        self.device = torch.device(
            'cpu' if self.configer.get('gpu') is None else 'cuda')
        self.det_net = None

        self._init_model()

    def _init_model(self):
        self.det_net = self.det_model_manager.object_detector()
        self.det_net = RunnerHelper.load_net(self, self.det_net)
        self.det_net.eval()

    def test(self, test_dir, out_dir):
        for _, data_dict in enumerate(
                self.test_loader.get_testloader(test_dir=test_dir)):
            data_dict['testing'] = True
            out_dict = self.det_net(data_dict)
            meta_list = DCHelper.tolist(data_dict['meta'])
            batch_detections = self.decode(out_dict['loc'], out_dict['conf'],
                                           self.configer, meta_list)
            for i in range(len(meta_list)):
                ori_img_bgr = ImageHelper.read_image(meta_list[i]['img_path'],
                                                     tool='cv2',
                                                     mode='BGR')
                json_dict = self.__get_info_tree(batch_detections[i])
                image_canvas = self.det_parser.draw_bboxes(
                    ori_img_bgr.copy(),
                    json_dict,
                    conf_threshold=self.configer.get('res', 'vis_conf_thre'))
                ImageHelper.save(image_canvas,
                                 save_path=os.path.join(
                                     out_dir, 'vis/{}.png'.format(
                                         meta_list[i]['filename'])))

                Log.info('Json Path: {}'.format(
                    os.path.join(
                        out_dir,
                        'json/{}.json'.format(meta_list[i]['filename']))))
                JsonHelper.save_file(json_dict,
                                     save_path=os.path.join(
                                         out_dir, 'json/{}.json'.format(
                                             meta_list[i]['filename'])))

    @staticmethod
    def decode(loc, conf, configer, meta):
        batch_size, num_priors, _ = loc.size()
        loc = loc.unsqueeze(2).repeat(1, 1,
                                      configer.get('data', 'num_classes'), 1)
        loc = loc.contiguous().view(loc.size(0), -1, 4)

        labels = torch.Tensor([
            i for i in range(configer.get('data', 'num_classes'))
        ]).to(loc.device)
        labels = labels.view(1, 1, -1,
                             1).repeat(batch_size, num_priors, 1,
                                       1).contiguous().view(batch_size, -1, 1)
        conf = conf.contiguous().view(batch_size, -1, 1)

        # max_conf, labels = conf.max(2, keepdim=True)  # [b, 8732,1]
        predictions = torch.cat((loc.float(), conf.float(), labels.float()), 2)
        output = [None for _ in range(len(predictions))]
        for i, image_pred in enumerate(predictions):
            image_pred[:, 0] *= meta[i]['ori_img_size'][0]
            image_pred[:, 1] *= meta[i]['ori_img_size'][1]
            image_pred[:, 2] *= meta[i]['ori_img_size'][0]
            image_pred[:, 3] *= meta[i]['ori_img_size'][1]
            ids = labels[i].squeeze(1).nonzero().contiguous().view(-1, )
            if ids.numel() == 0:
                continue

            valid_preds = image_pred[ids]
            _, order = valid_preds[:, 4].sort(0, descending=True)
            order = order[:configer.get('res', 'nms')['pre_nms']]
            valid_preds = valid_preds[order]
            valid_preds = valid_preds[
                valid_preds[:, 4] > configer.get('res', 'val_conf_thre')]
            if valid_preds.numel() == 0:
                continue

            valid_ind = DetHelper.cls_nms(
                valid_preds[:, :5],
                labels=valid_preds[:, 5],
                max_threshold=configer.get('res', 'nms')['max_threshold'],
                cls_keep_num=configer.get('res', 'cls_keep_num'),
                return_ind=True)

            valid_preds = valid_preds[valid_ind]
            _, order = valid_preds[:, 4].sort(0, descending=True)
            order = order[:configer.get('res', 'max_per_image')]
            output[i] = valid_preds[order]

        return output

    def __get_info_tree(self, detections):
        json_dict = dict()
        object_list = list()
        if detections is not None:
            for x1, y1, x2, y2, conf, cls_pred in detections:
                object_dict = dict()
                object_dict['bbox'] = [
                    x1.item(), y1.item(),
                    x2.item(), y2.item()
                ]
                object_dict['label'] = int(cls_pred.cpu().item()) - 1
                object_dict['score'] = float('%.2f' % conf.cpu().item())

                object_list.append(object_dict)

        json_dict['objects'] = object_list

        return json_dict