Esempio n. 1
0
    def decode(roi_locs, roi_scores, indices_and_rois, test_rois_num, configer, metas):
        indices_and_rois = indices_and_rois
        num_classes = configer.get('data', 'num_classes')
        mean = torch.Tensor(configer.get('roi', 'loc_normalize_mean')).repeat(num_classes)[None]
        std = torch.Tensor(configer.get('roi', 'loc_normalize_std')).repeat(num_classes)[None]
        mean = mean.to(roi_locs.device)
        std = std.to(roi_locs.device)

        roi_locs = (roi_locs * std + mean)
        roi_locs = roi_locs.contiguous().view(-1, num_classes, 4)

        rois = indices_and_rois[:, 1:]
        rois = rois.contiguous().view(-1, 1, 4).expand_as(roi_locs)
        wh = torch.exp(roi_locs[:, :, 2:]) * (rois[:, :, 2:] - rois[:, :, :2])
        cxcy = roi_locs[:, :, :2] * (rois[:, :, 2:] - rois[:, :, :2]) + (rois[:, :, :2] + rois[:, :, 2:]) / 2
        dst_bbox = torch.cat([cxcy - wh / 2, cxcy + wh / 2], 2)  # [b, 8732,4]

        if configer.get('phase') != 'debug':
            cls_prob = F.softmax(roi_scores, dim=1)
        else:
            cls_prob = roi_scores

        cls_label = torch.LongTensor([i for i in range(num_classes)])\
            .contiguous().view(1, num_classes).repeat(indices_and_rois.size(0), 1).to(roi_locs.device)

        output = [None for _ in range(test_rois_num.size(0))]
        start_index = 0
        for i in range(test_rois_num.size(0)):
            tmp_dst_bbox = dst_bbox[start_index:start_index+test_rois_num[i]]
            tmp_dst_bbox[:, :, 0::2] = tmp_dst_bbox[:, :, 0::2].clamp(min=0, max=metas[i]['border_size'][0] - 1)
            tmp_dst_bbox[:, :, 1::2] = tmp_dst_bbox[:, :, 1::2].clamp(min=0, max=metas[i]['border_size'][1] - 1)
            tmp_dst_bbox *= (metas[i]['ori_img_size'][0] / metas[i]['border_size'][0])

            tmp_cls_prob = cls_prob[start_index:start_index+test_rois_num[i]]
            tmp_cls_label = cls_label[start_index:start_index+test_rois_num[i]]
            start_index += test_rois_num[i]

            mask = (tmp_cls_prob > configer.get('res', 'val_conf_thre')) & (tmp_cls_label > 0)

            tmp_dst_bbox = tmp_dst_bbox[mask].contiguous().view(-1, 4)
            if tmp_dst_bbox.numel() == 0:
                continue

            tmp_cls_prob = tmp_cls_prob[mask].contiguous().view(-1,).unsqueeze(1)
            tmp_cls_label = tmp_cls_label[mask].contiguous().view(-1,).unsqueeze(1)

            valid_preds = torch.cat((tmp_dst_bbox, tmp_cls_prob.float(), tmp_cls_label.float()), 1)

            valid_ind = DetHelper.cls_nms(valid_preds[:, :5],
                                          labels=valid_preds[:, 5],
                                          max_threshold=configer.get('res', 'nms')['max_threshold'],
                                          return_ind=True)

            valid_preds = valid_preds[valid_ind]
            output[i] = valid_preds

        return output
Esempio n. 2
0
    def __call__(self, feat_list, data_dict):
        device = feat_list[0].device
        gt_bboxes = data_dict['bboxes']
        gt_labels = data_dict['labels']
        input_size = [data_dict['img'].size(3), data_dict['img'].size(2)]
        anchor_boxes = self.fr_proirbox_layer(feat_list, input_size).to(device)
        target_bboxes = list()
        target_labels = list()
        for i in range(len(gt_bboxes)):
            if gt_bboxes[i] is None or len(gt_bboxes[i]) == 0:
                loc = torch.zeros_like(anchor_boxes)
                conf = torch.zeros((anchor_boxes.size(0), )).long()

            else:
                iou = DetHelper.bbox_iou(
                    gt_bboxes[i],
                    torch.cat([
                        anchor_boxes[:, :2] - anchor_boxes[:, 2:] / 2,
                        anchor_boxes[:, :2] + anchor_boxes[:, 2:] / 2
                    ], 1))  # [#obj,8732]

                prior_box_iou, max_idx = iou.max(0, keepdim=False)  # [1,8732]
                boxes = gt_bboxes[i][max_idx]  # [8732,4]
                variances = [0.1, 0.2]
                cxcy = (boxes[:, :2] +
                        boxes[:, 2:]) / 2 - anchor_boxes[:, :2]  # [8732,2]
                cxcy /= variances[0] * anchor_boxes[:, 2:]
                wh = (boxes[:, 2:] -
                      boxes[:, :2]) / anchor_boxes[:, 2:]  # [8732,2]
                wh = torch.log(wh) / variances[1]
                loc = torch.cat([cxcy, wh], 1)  # [8732,4]
                conf = 1 + gt_labels[i][
                    max_idx]  # [8732,], background class = 0
                if self.configer.get('anchor', 'anchor_method') == 'retina':
                    conf[prior_box_iou < self.configer.get(
                        'anchor', 'iou_threshold')] = -1
                    conf[prior_box_iou <
                         self.configer.get('anchor', 'iou_threshold') -
                         0.1] = 0
                else:
                    conf[prior_box_iou < self.configer.get(
                        'anchor', 'iou_threshold')] = 0  # background

                # According to IOU, it give every prior box a class label.
                # Then if the IOU is lower than the threshold, the class label is 0(background).
                class_iou, prior_box_idx = iou.max(1, keepdim=False)
                conf_class_idx = prior_box_idx.cpu().numpy()
                conf[conf_class_idx] = gt_labels[i] + 1

            target_bboxes.append(loc)
            target_labels.append(conf)

        return torch.stack(target_bboxes, 0), torch.stack(target_labels, 0)
    def decode(bbox, conf, default_boxes, configer, input_size):
        loc = bbox
        if configer.get('phase') != 'debug':
            conf = F.softmax(conf, dim=-1)

        default_boxes = default_boxes.unsqueeze(0).repeat(loc.size(0), 1, 1).to(bbox.device)

        variances = [0.1, 0.2]
        wh = torch.exp(loc[:, :, 2:] * variances[1]) * default_boxes[:, :, 2:]
        cxcy = loc[:, :, :2] * variances[0] * default_boxes[:, :, 2:] + default_boxes[:, :, :2]
        boxes = torch.cat([cxcy - wh / 2, cxcy + wh / 2], 2)  # [b, 8732,4]

        batch_size, num_priors, _ = boxes.size()
        boxes = boxes.unsqueeze(2).repeat(1, 1, configer.get('data', 'num_classes'), 1)
        boxes = boxes.contiguous().view(boxes.size(0), -1, 4)

        # clip bounding box
        boxes[:, :, 0::2] = boxes[:, :, 0::2].clamp(min=0, max=input_size[0] - 1)
        boxes[:, :, 1::2] = boxes[:, :, 1::2].clamp(min=0, max=input_size[1] - 1)

        labels = torch.Tensor([i for i in range(configer.get('data', 'num_classes'))]).to(boxes.device)
        labels = labels.view(1, 1, -1, 1).repeat(batch_size, num_priors, 1, 1).contiguous().view(batch_size, -1, 1)
        max_conf = conf.contiguous().view(batch_size, -1, 1)

        # max_conf, labels = conf.max(2, keepdim=True)  # [b, 8732,1]
        predictions = torch.cat((boxes, max_conf.float(), labels.float()), 2)
        output = [None for _ in range(len(predictions))]
        for image_i, image_pred in enumerate(predictions):
            ids = labels[image_i].squeeze(1).nonzero().contiguous().view(-1,)
            if ids.numel() == 0:
                continue

            valid_preds = image_pred[ids]
            _, order = valid_preds[:, 4].sort(0, descending=True)
            order = order[:configer.get('res', 'nms')['pre_nms']]
            valid_preds = valid_preds[order]
            valid_preds = valid_preds[valid_preds[:, 4] > configer.get('res', 'val_conf_thre')]
            if valid_preds.numel() == 0:
                continue

            valid_preds = DetHelper.cls_nms(valid_preds[:, :6],
                                            labels=valid_preds[:, 5],
                                            max_threshold=configer.get('res', 'nms')['max_threshold'],
                                            cls_keep_num=configer.get('res', 'cls_keep_num'))

            _, order = valid_preds[:, 4].sort(0, descending=True)
            order = order[:configer.get('res', 'max_per_image')]
            output[image_i] = valid_preds[order]

        return output
Esempio n. 4
0
    def decode(batch_pred_bboxes, configer):
        box_corner = batch_pred_bboxes.new(batch_pred_bboxes.shape)
        box_corner[:, :,
                   0] = batch_pred_bboxes[:, :,
                                          0] - batch_pred_bboxes[:, :, 2] / 2
        box_corner[:, :,
                   1] = batch_pred_bboxes[:, :,
                                          1] - batch_pred_bboxes[:, :, 3] / 2
        box_corner[:, :,
                   2] = batch_pred_bboxes[:, :,
                                          0] + batch_pred_bboxes[:, :, 2] / 2
        box_corner[:, :,
                   3] = batch_pred_bboxes[:, :,
                                          1] + batch_pred_bboxes[:, :, 3] / 2

        # clip bounding box
        box_corner[:, :, 0::2] = box_corner[:, :, 0::2].clamp(min=0, max=1.0)
        box_corner[:, :, 1::2] = box_corner[:, :, 1::2].clamp(min=0, max=1.0)

        batch_pred_bboxes[:, :, :4] = box_corner[:, :, :4]

        output = [None for _ in range(len(batch_pred_bboxes))]
        for image_i, image_pred in enumerate(batch_pred_bboxes):
            # Filter out confidence scores below threshold
            conf_mask = (image_pred[:, 4] > configer.get(
                'vis', 'obj_threshold')).squeeze()
            image_pred = image_pred[conf_mask]
            # If none are remaining => process next image
            if image_pred.numel() == 0:
                continue

            # Get score and class with highest confidence
            class_conf, class_pred = torch.max(
                image_pred[:, 5:5 + configer.get('data', 'num_classes')],
                1,
                keepdim=True)
            # Detections ordered as (x1, y1, x2, y2, obj_conf, class_conf, class_pred)
            detections = torch.cat(
                (image_pred[:, :5], class_conf.float(), class_pred.float()), 1)
            keep_index = DetHelper.cls_nms(
                image_pred[:, :4],
                scores=image_pred[:, 4],
                labels=class_pred.squeeze(1),
                nms_threshold=configer.get('nms', 'max_threshold'),
                iou_mode=configer.get('nms', 'mode'),
                nms_mode='cython_nms')

            output[image_i] = detections[keep_index]

        return output
Esempio n. 5
0
    def decode(loc, conf, configer, meta):
        batch_size, num_priors, _ = loc.size()
        loc = loc.unsqueeze(2).repeat(1, 1,
                                      configer.get('data', 'num_classes'), 1)
        loc = loc.contiguous().view(loc.size(0), -1, 4)

        labels = torch.Tensor([
            i for i in range(configer.get('data', 'num_classes'))
        ]).to(loc.device)
        labels = labels.view(1, 1, -1,
                             1).repeat(batch_size, num_priors, 1,
                                       1).contiguous().view(batch_size, -1, 1)
        conf = conf.contiguous().view(batch_size, -1, 1)

        # max_conf, labels = conf.max(2, keepdim=True)  # [b, 8732,1]
        predictions = torch.cat((loc.float(), conf.float(), labels.float()), 2)
        output = [None for _ in range(len(predictions))]
        for i, image_pred in enumerate(predictions):
            image_pred[:, 0] *= meta[i]['ori_img_size'][0]
            image_pred[:, 1] *= meta[i]['ori_img_size'][1]
            image_pred[:, 2] *= meta[i]['ori_img_size'][0]
            image_pred[:, 3] *= meta[i]['ori_img_size'][1]
            ids = labels[i].squeeze(1).nonzero().contiguous().view(-1, )
            if ids.numel() == 0:
                continue

            valid_preds = image_pred[ids]
            _, order = valid_preds[:, 4].sort(0, descending=True)
            order = order[:configer.get('res', 'nms')['pre_nms']]
            valid_preds = valid_preds[order]
            valid_preds = valid_preds[
                valid_preds[:, 4] > configer.get('res', 'val_conf_thre')]
            if valid_preds.numel() == 0:
                continue

            valid_ind = DetHelper.cls_nms(
                valid_preds[:, :5],
                labels=valid_preds[:, 5],
                max_threshold=configer.get('res', 'nms')['max_threshold'],
                cls_keep_num=configer.get('res', 'cls_keep_num'),
                return_ind=True)

            valid_preds = valid_preds[valid_ind]
            _, order = valid_preds[:, 4].sort(0, descending=True)
            order = order[:configer.get('res', 'max_per_image')]
            output[i] = valid_preds[order]

        return output
Esempio n. 6
0
    def decode(batch_detections, configer, meta):
        output = [None for _ in range(len(meta))]
        for i in range(len(meta)):
            image_pred = batch_detections[i]
            image_pred[:, 0] *= meta[i]['ori_img_size'][0]
            image_pred[:, 1] *= meta[i]['ori_img_size'][1]
            image_pred[:, 2] *= meta[i]['ori_img_size'][0]
            image_pred[:, 3] *= meta[i]['ori_img_size'][1]
            # Filter out confidence scores below threshold
            image_pred = image_pred[image_pred[:, 4] > configer.get('res', 'val_conf_thre')]
            # If none are remaining => process next image
            if image_pred.numel() == 0:
                continue

            # Get score and class with highest confidence
            class_conf, class_pred = torch.max(image_pred[:, 5:5 + configer.get('data', 'num_classes')], 1, keepdim=True)
            # Detections ordered as (x1, y1, x2, y2, obj_conf, class_conf, class_pred)
            detections = torch.cat((image_pred[:, :5], class_conf.float(), class_pred.float()), 1)
            valid_ind = DetHelper.cls_nms(detections[:, :5], labels=class_pred.squeeze(1),
                                          max_threshold=configer.get('res', 'nms')['max_threshold'], return_ind=True)
            output[i] = detections[valid_ind]

        return output
Esempio n. 7
0
    def __call__(self, indices_and_rois, gt_bboxes, gt_labels, meta, gt_polygons=None):
        n_sample = self.configer.get('roi', 'sampler')['n_sample']
        pos_iou_thresh = self.configer.get('roi', 'sampler')['pos_iou_thresh']
        neg_iou_thresh_hi = self.configer.get('roi', 'sampler')['neg_iou_thresh_hi']
        neg_iou_thresh_lo = self.configer.get('roi', 'sampler')['neg_iou_thresh_lo']
        pos_ratio = self.configer.get('roi', 'sampler')['pos_ratio']
        loc_normalize_mean = self.configer.get('roi', 'loc_normalize_mean')
        loc_normalize_std = self.configer.get('roi', 'loc_normalize_std')

        sample_roi_list = list()
        gt_roi_loc_list = list()
        gt_roi_label_list= list()
        gt_roi_mask_list = list()

        for i in range(len(gt_bboxes)):
            temp_gt_bboxes = gt_bboxes[i].to(indices_and_rois.device)
            temp_gt_labels = gt_labels[i].to(indices_and_rois.device)

            if temp_gt_bboxes.numel() == 0:
                min_size = self.configer.get('rpn', 'min_size')
                roi_size = random.randint(min_size, min(meta[i]['border_size']))
                sample_roi = torch.zeros((1, 4), requires_grad=True).float().to(indices_and_rois.device)
                sample_roi[0, 2:] = roi_size
                gt_roi_loc = torch.zeros((1, 4), requires_grad=True).float().to(sample_roi.device)
                gt_roi_label = torch.ones((1,), requires_grad=True).long().to(sample_roi.device).mul_(-1)

            else:
                pos_roi_per_image = np.round(n_sample * pos_ratio)
                if self.configer.get('phase') == 'debug':
                    rois = indices_and_rois[indices_and_rois[:, 0] == i][:, 1:]
                else:
                    if indices_and_rois.numel() == 0:
                        rois = temp_gt_bboxes
                    else:
                        rois = torch.cat((indices_and_rois[indices_and_rois[:, 0] == i][:, 1:], temp_gt_bboxes), 0)

                iou = DetHelper.bbox_iou(rois, temp_gt_bboxes)
                max_iou, gt_assignment = iou.max(1, keepdim=False)
                # Offset range of classes from [0, n_fg_class - 1] to [1, n_fg_class].
                # The label with value 0 is the background.
                gt_roi_label = temp_gt_labels[gt_assignment] + 1

                max_iou = max_iou.cpu().detach().numpy()
                # Select foreground RoIs as those with >= pos_iou_thresh IoU.
                pos_index = np.where(max_iou >= pos_iou_thresh)[0]
                pos_roi_per_this_image = int(min(pos_roi_per_image, pos_index.size))
                if pos_index.size > 0:
                    pos_index = np.random.choice(pos_index, size=pos_roi_per_this_image, replace=False)

                # Select background RoIs as those within
                # [neg_iou_thresh_lo, neg_iou_thresh_hi).
                neg_index = np.where((max_iou < neg_iou_thresh_hi) & (max_iou >= neg_iou_thresh_lo))[0]
                neg_roi_per_this_image = n_sample - pos_roi_per_this_image
                neg_roi_per_this_image = int(min(neg_roi_per_this_image, neg_index.size))
                if neg_index.size > 0:
                    neg_index = np.random.choice(neg_index, size=neg_roi_per_this_image, replace=False)

                # The indices that we're selecting (both positive and negative).
                keep_index = np.append(pos_index, neg_index)
                gt_roi_label = gt_roi_label[keep_index].detach()
                gt_roi_label[pos_roi_per_this_image:] = 0  # negative labels --> 0
                sample_roi = rois[keep_index].detach()

                if gt_polygons is not None:
                    temp_gt_polygons = gt_polygons[i]
                    target_size = [self.configer.get('roi', 'pooled_width'), self.configer.get('roi', 'pooled_height')]
                    for roi_index in range(pos_roi_per_this_image):
                        gt_index = gt_assignment[keep_index[roi_index]]
                        roi_polygons = temp_gt_polygons[gt_index]
                        roi = sample_roi[roi_index].cpu().numpy()
                        mask = MaskHelper.polys2mask_wrt_box(roi_polygons, roi, target_size)
                        mask = torch.from_numpy(mask).to(indices_and_rois.device)
                        gt_roi_mask_list.append(mask)

                # Compute offsets and scales to match sampled RoIs to the GTs.
                boxes = temp_gt_bboxes[gt_assignment][keep_index]
                cxcy = (boxes[:, :2] + boxes[:, 2:]) / 2 - (sample_roi[:, :2] + sample_roi[:, 2:]) / 2  # [8732,2]
                cxcy /= (sample_roi[:, 2:] - sample_roi[:, :2])
                wh = (boxes[:, 2:] - boxes[:, :2]) / (sample_roi[:, 2:] - sample_roi[:, :2])  # [8732,2]
                wh = torch.log(wh)
                loc = torch.cat([cxcy, wh], 1).detach()  # [8732,4]
                # loc = loc[:, [1, 0, 3, 2]]

                normalize_mean = torch.Tensor(loc_normalize_mean).to(loc.device)
                normalize_std = torch.Tensor(loc_normalize_std).to(loc.device)
                gt_roi_loc = (loc - normalize_mean) / normalize_std

            batch_index = i * torch.ones((len(sample_roi),)).to(sample_roi.device)
            sample_roi = torch.cat([batch_index[:, None], sample_roi], dim=1).contiguous()
            sample_roi_list.append(sample_roi)
            gt_roi_loc_list.append(gt_roi_loc)
            gt_roi_label_list.append(gt_roi_label)
            # sample_roi.register_hook(lambda g: print(g))

        sample_roi = torch.cat(sample_roi_list, 0)
        gt_roi_loc = torch.cat(gt_roi_loc_list, 0)
        gt_roi_label = torch.cat(gt_roi_label_list, 0)

        if gt_polygons is not None:
            gt_pos_roi_mask = torch.cat(gt_roi_mask_list, 0)
            return sample_roi, gt_roi_loc, gt_roi_label, gt_pos_roi_mask
        else:
            return sample_roi, gt_roi_loc, gt_roi_label
Esempio n. 8
0
    def __call__(self, feat_list, gt_bboxes, meta):
        anchor_boxes = self.fr_proirbox_layer(feat_list, meta[0]['input_size'])
        n_sample = self.configer.get('rpn', 'loss')['n_sample']
        pos_iou_thresh = self.configer.get('rpn', 'loss')['pos_iou_thresh']
        neg_iou_thresh = self.configer.get('rpn', 'loss')['neg_iou_thresh']
        pos_ratio = self.configer.get('rpn', 'loss')['pos_ratio']
        # Calc indicies of anchors which are located completely inside of the image
        # whose size is speficied.
        target_bboxes = list()
        target_labels = list()
        for i in range(len(gt_bboxes)):
            index_inside = (
                ((anchor_boxes[:, 0] - anchor_boxes[:, 2] / 2) >= 0)
                & ((anchor_boxes[:, 1] - anchor_boxes[:, 3] / 2) >= 0)
                & ((anchor_boxes[:, 0] + anchor_boxes[:, 2] / 2) <
                   meta[i]['border_size'][0])
                & ((anchor_boxes[:, 1] + anchor_boxes[:, 3] / 2) <
                   meta[i]['border_size'][1]))
            index_inside = index_inside.nonzero().contiguous().view(-1, )
            default_boxes = anchor_boxes[index_inside]
            loc = torch.zeros_like(default_boxes)
            label = torch.ones((default_boxes.size(0), )).mul_(-1).long()

            if gt_bboxes[i].numel() > 0:
                # label: 1 is positive, 0 is negative, -1 is dont care
                ious = DetHelper.bbox_iou(
                    gt_bboxes[i],
                    torch.cat([
                        default_boxes[:, :2] - default_boxes[:, 2:] / 2,
                        default_boxes[:, :2] + default_boxes[:, 2:] / 2
                    ], 1))

                max_ious, argmax_ious = ious.max(0, keepdim=False)
                _, gt_argmax_ious = ious.max(1, keepdim=False)

                # assign negative labels first so that positive labels can clobber them
                label[max_ious < neg_iou_thresh] = 0

                # positive label: for each gt, anchor with highest iou
                label[gt_argmax_ious] = 1

                # positive label: above threshold IOU
                label[max_ious >= pos_iou_thresh] = 1

                # subsample positive labels if we have too many
                n_pos = int(pos_ratio * n_sample)
                pos_index = (label == 1).nonzero().contiguous().view(
                    -1, ).numpy()
                if len(pos_index) > n_pos:
                    disable_index = np.random.choice(pos_index,
                                                     size=(len(pos_index) -
                                                           n_pos),
                                                     replace=False)
                    label[disable_index] = -1

                # subsample negative labels if we have too many
                n_neg = n_sample - torch.sum(label == 1).item()
                neg_index = (label == 0).nonzero().contiguous().view(
                    -1, ).numpy()

                if len(neg_index) > n_neg:
                    disable_index = np.random.choice(neg_index,
                                                     size=(len(neg_index) -
                                                           n_neg),
                                                     replace=False)
                    label[disable_index] = -1

                boxes = gt_bboxes[i][argmax_ious]  # [8732,4]
                cxcy = (boxes[:, :2] +
                        boxes[:, 2:]) / 2 - default_boxes[:, :2]  # [8732,2]
                cxcy /= default_boxes[:, 2:]
                wh = (boxes[:, 2:] -
                      boxes[:, :2]) / default_boxes[:, 2:]  # [8732,2]
                wh = torch.log(wh)
                loc = torch.cat([cxcy, wh], 1)  # [8732,4]
                # loc = loc[:, [1, 0, 3, 2]]
            else:
                # subsample negative labels if we have too many
                n_neg = n_sample // 2
                neg_index = (label == -1).nonzero().contiguous().view(
                    -1, ).numpy()
                if len(neg_index) > n_neg:
                    disable_index = np.random.choice(neg_index,
                                                     size=n_neg,
                                                     replace=False)
                    label[disable_index] = 0

            ret_label = torch.ones((anchor_boxes.size(0), ),
                                   dtype=torch.long).mul_(-1)
            ret_label[index_inside] = torch.LongTensor(label)
            ret_loc = torch.zeros((anchor_boxes.size(0), 4))
            ret_loc[index_inside] = loc
            target_bboxes.append(ret_loc)
            target_labels.append(ret_label)

        return torch.stack(target_bboxes, 0), torch.stack(target_labels, 0)
Esempio n. 9
0
    def __call__(self, feat_list, batch_gt_bboxes, batch_gt_labels,
                 input_size):
        batch_target_list = list()
        batch_objmask_list = list()
        batch_noobjmask_list = list()
        for i, ori_anchors in enumerate(self.configer.get(
                'gt', 'anchors_list')):
            in_h, in_w = feat_list[i].size()[2:]
            w_fm_stride, h_fm_stride = input_size[0] / in_w, input_size[
                1] / in_h
            anchors = [(a_w / w_fm_stride, a_h / h_fm_stride)
                       for a_w, a_h in ori_anchors]
            batch_size = len(batch_gt_bboxes)
            num_anchors = len(anchors)
            obj_mask = torch.zeros(batch_size, num_anchors, in_h, in_w)
            noobj_mask = torch.ones(batch_size, num_anchors, in_h, in_w)
            tx = torch.zeros(batch_size, num_anchors, in_h, in_w)
            ty = torch.zeros(batch_size, num_anchors, in_h, in_w)
            tw = torch.zeros(batch_size, num_anchors, in_h, in_w)
            th = torch.zeros(batch_size, num_anchors, in_h, in_w)
            tconf = torch.zeros(batch_size, num_anchors, in_h, in_w)
            tcls = torch.zeros(batch_size, num_anchors, in_h, in_w,
                               self.configer.get('data', 'num_classes'))

            for b in range(batch_size):
                for t in range(batch_gt_bboxes[b].size(0)):
                    # Convert to position relative to box
                    gx = (batch_gt_bboxes[b][t, 0] + batch_gt_bboxes[b][t, 2]
                          ) / (2.0 * input_size[0]) * in_w
                    gy = (batch_gt_bboxes[b][t, 1] + batch_gt_bboxes[b][t, 3]
                          ) / (2.0 * input_size[1]) * in_h
                    gw = (batch_gt_bboxes[b][t, 2] -
                          batch_gt_bboxes[b][t, 0]) / input_size[0] * in_w
                    gh = (batch_gt_bboxes[b][t, 3] -
                          batch_gt_bboxes[b][t, 1]) / input_size[1] * in_h
                    if gw * gh == 0 or gx >= in_w or gy >= in_h:
                        continue

                    # Get grid box indices
                    gi = int(gx)
                    gj = int(gy)
                    # Get shape of gt box
                    gt_box = torch.FloatTensor(np.array([0, 0, gw,
                                                         gh])).unsqueeze(0)
                    # Get shape of anchor box
                    anchor_shapes = torch.FloatTensor(
                        np.concatenate((np.zeros(
                            (num_anchors, 2)), np.array(anchors)), 1))
                    # Calculate iou between gt and anchor shapes
                    anch_ious = DetHelper.bbox_iou(gt_box, anchor_shapes)
                    # Where the overlap is larger than threshold set mask to zero (ignore)
                    noobj_mask[b, anch_ious[0] > self.configer.
                               get('gt', 'iou_threshold')] = 0
                    # Find the best matching anchor box
                    best_n = np.argmax(anch_ious, axis=1)

                    # Masks
                    obj_mask[b, best_n, gj, gi] = 1
                    # Coordinates
                    tx[b, best_n, gj, gi] = gx - gi
                    ty[b, best_n, gj, gi] = gy - gj
                    # Width and height
                    tw[b, best_n, gj,
                       gi] = math.log(gw / anchors[best_n][0] + 1e-16)
                    th[b, best_n, gj,
                       gi] = math.log(gh / anchors[best_n][1] + 1e-16)
                    # object
                    tconf[b, best_n, gj, gi] = 1
                    # One-hot encoding of label
                    tcls[b, best_n, gj, gi, int(batch_gt_labels[b][t])] = 1

            obj_mask = obj_mask.view(batch_size, -1)
            noobj_mask = noobj_mask.view(batch_size, -1)
            tx = tx.view(batch_size, -1).unsqueeze(2)
            ty = ty.view(batch_size, -1).unsqueeze(2)
            tw = tw.view(batch_size, -1).unsqueeze(2)
            th = th.view(batch_size, -1).unsqueeze(2)
            tconf = tconf.view(batch_size, -1).unsqueeze(2)
            tcls = tcls.view(batch_size, -1,
                             self.configer.get('data', 'num_classes'))
            target = torch.cat((tx, ty, tw, th, tconf, tcls), -1)
            batch_target_list.append(target)
            batch_objmask_list.append(obj_mask)
            batch_noobjmask_list.append(noobj_mask)

        batch_target = torch.cat(batch_target_list, 1)
        batch_objmask = torch.cat(batch_objmask_list, 1)
        batch_noobjmask = torch.cat(batch_noobjmask_list, 1)

        return batch_target, batch_objmask, batch_noobjmask
Esempio n. 10
0
    def decode(roi_locs, roi_scores, indices_and_rois, test_rois_num, configer,
               input_size):
        roi_locs = roi_locs.cpu()
        roi_scores = roi_scores.cpu()
        indices_and_rois = indices_and_rois.cpu()
        num_classes = configer.get('data', 'num_classes')
        mean = torch.Tensor(configer.get(
            'roi', 'loc_normalize_mean')).repeat(num_classes)[None]
        std = torch.Tensor(configer.get(
            'roi', 'loc_normalize_std')).repeat(num_classes)[None]
        mean = mean.to(roi_locs.device)
        std = std.to(roi_locs.device)

        roi_locs = (roi_locs * std + mean)
        roi_locs = roi_locs.contiguous().view(-1, num_classes, 4)
        # roi_locs = roi_locs[:,:, [1, 0, 3, 2]]

        rois = indices_and_rois[:, 1:]
        rois = rois.contiguous().view(-1, 1, 4).expand_as(roi_locs)
        wh = torch.exp(roi_locs[:, :, 2:]) * (rois[:, :, 2:] - rois[:, :, :2])
        cxcy = roi_locs[:, :, :2] * (rois[:, :, 2:] - rois[:, :, :2]) + (
            rois[:, :, :2] + rois[:, :, 2:]) / 2
        dst_bbox = torch.cat([cxcy - wh / 2, cxcy + wh / 2], 2)  # [b, 8732,4]

        # clip bounding box
        dst_bbox[:, :, 0::2] = (dst_bbox[:, :,
                                         0::2]).clamp(min=0,
                                                      max=input_size[0] - 1)
        dst_bbox[:, :, 1::2] = (dst_bbox[:, :,
                                         1::2]).clamp(min=0,
                                                      max=input_size[1] - 1)

        if configer.get('phase') != 'debug':
            cls_prob = F.softmax(roi_scores, dim=1)
        else:
            cls_prob = roi_scores

        cls_label = torch.LongTensor([i for i in range(num_classes)])\
            .contiguous().view(1, num_classes).repeat(indices_and_rois.size(0), 1)

        output = [None for _ in range(test_rois_num.size(0))]
        start_index = 0
        for i in range(test_rois_num.size(0)):
            # batch_index = (indices_and_rois[:, 0] == i).nonzero().contiguous().view(-1,)
            # tmp_dst_bbox = dst_bbox[batch_index]
            # tmp_cls_prob = cls_prob[batch_index]
            # tmp_cls_label = cls_label[batch_index]
            tmp_dst_bbox = dst_bbox[start_index:start_index + test_rois_num[i]]
            tmp_cls_prob = cls_prob[start_index:start_index + test_rois_num[i]]
            tmp_cls_label = cls_label[start_index:start_index +
                                      test_rois_num[i]]
            start_index += test_rois_num[i]

            mask = (tmp_cls_prob > configer.get(
                'vis', 'conf_threshold')) & (tmp_cls_label > 0)

            tmp_dst_bbox = tmp_dst_bbox[mask].contiguous().view(-1, 4)
            if tmp_dst_bbox.numel() == 0:
                continue

            tmp_cls_prob = tmp_cls_prob[mask].contiguous().view(
                -1, ).unsqueeze(1)
            tmp_cls_label = tmp_cls_label[mask].contiguous().view(
                -1, ).unsqueeze(1)

            valid_preds = torch.cat(
                (tmp_dst_bbox, tmp_cls_prob.float(), tmp_cls_label.float()), 1)

            keep = DetHelper.cls_nms(valid_preds[:, :4],
                                     scores=valid_preds[:, 4],
                                     labels=valid_preds[:, 5],
                                     nms_threshold=configer.get(
                                         'nms', 'overlap_threshold'),
                                     iou_mode=configer.get('nms', 'mode'))

            output[i] = valid_preds[keep]

        return output
Esempio n. 11
0
    def ssd_batch_encode(self, gt_bboxes, gt_labels, default_boxes):
        """Transform target bounding boxes and class labels to SSD boxes and classes.

        Match each object box to all the default boxes, pick the ones with the Jaccard-Index > threshold:
        Jaccard(A,B) = AB / (A+B-AB)

        Args:
          boxes(tensor): object bounding boxes (xmin,ymin,xmax,ymax) of a image, sized [#obj, 4].
          classes(tensor): object class labels of a image, sized [#obj,].
          threshold(float): Jaccard index threshold
        Returns:
          boxes(tensor): bounding boxes, sized [#obj, 8732, 4].
          classes(tensor): class labels, sized [8732,]
        """
        target_bboxes = list()
        target_labels = list()
        for i in range(len(gt_bboxes)):
            if gt_bboxes[i] is None or len(gt_bboxes[i]) == 0:
                loc = torch.zeros_like(default_boxes)
                conf = torch.zeros((default_boxes.size(0), )).long()

            else:

                iou = DetHelper.bbox_iou(
                    gt_bboxes[i],
                    torch.cat([
                        default_boxes[:, :2] - default_boxes[:, 2:] / 2,
                        default_boxes[:, :2] + default_boxes[:, 2:] / 2
                    ], 1))  # [#obj,8732]

                prior_box_iou, max_idx = iou.max(0, keepdim=False)  # [1,8732]

                boxes = gt_bboxes[i][max_idx]  # [8732,4]
                variances = [0.1, 0.2]
                cxcy = (boxes[:, :2] +
                        boxes[:, 2:]) / 2 - default_boxes[:, :2]  # [8732,2]
                cxcy /= variances[0] * default_boxes[:, 2:]
                wh = (boxes[:, 2:] -
                      boxes[:, :2]) / default_boxes[:, 2:]  # [8732,2]
                wh = torch.log(wh) / variances[1]
                loc = torch.cat([cxcy, wh], 1)  # [8732,4]

                conf = 1 + gt_labels[i][
                    max_idx]  # [8732,], background class = 0

                if self.configer.get('gt', 'anchor_method') == 'retina':
                    conf[prior_box_iou < self.configer.get(
                        'gt', 'iou_threshold')] = -1
                    conf[prior_box_iou <
                         self.configer.get('gt', 'iou_threshold') - 0.1] = 0
                else:
                    conf[prior_box_iou < self.configer.get(
                        'gt', 'iou_threshold')] = 0  # background

                # According to IOU, it give every prior box a class label.
                # Then if the IOU is lower than the threshold, the class label is 0(background).
                class_iou, prior_box_idx = iou.max(1, keepdim=False)
                conf_class_idx = prior_box_idx.cpu().numpy()
                conf[conf_class_idx] = gt_labels[i] + 1

            target_bboxes.append(loc)
            target_labels.append(conf)

        return torch.stack(target_bboxes, 0), torch.stack(target_labels, 0)
Esempio n. 12
0
    def roi_batch_encode(self, gt_bboxes, gt_labels, indices_and_rois):
        n_sample = self.configer.get('roi', 'loss')['n_sample']
        pos_iou_thresh = self.configer.get('roi', 'loss')['pos_iou_thresh']
        neg_iou_thresh_hi = self.configer.get('roi',
                                              'loss')['neg_iou_thresh_hi']
        neg_iou_thresh_lo = self.configer.get('roi',
                                              'loss')['neg_iou_thresh_lo']
        pos_ratio = self.configer.get('roi', 'loss')['pos_ratio']
        loc_normalize_mean = self.configer.get('roi', 'loc_normalize_mean')
        loc_normalize_std = self.configer.get('roi', 'loc_normalize_std')
        sample_roi_list = list()
        gt_roi_loc_list = list()
        gt_roi_label_list = list()

        for i in range(len(gt_bboxes)):
            rois = torch.cat(
                (indices_and_rois[indices_and_rois[:, 0] == i][:, :4],
                 gt_bboxes[i]), 0)
            pos_roi_per_image = np.round(n_sample * pos_ratio)
            iou = DetHelper.bbox_iou(rois, gt_bboxes[i])
            max_iou, gt_assignment = iou.max(1, keepdim=False)
            # Offset range of classes from [0, n_fg_class - 1] to [1, n_fg_class].
            # The label with value 0 is the background.
            gt_roi_label = gt_labels[i][gt_assignment] + 1

            max_iou = max_iou.cpu().detach().numpy()
            # Select foreground RoIs as those with >= pos_iou_thresh IoU.
            pos_index = np.where(max_iou >= pos_iou_thresh)[0]
            pos_roi_per_this_image = int(min(pos_roi_per_image,
                                             pos_index.size))
            if pos_index.size > 0:
                pos_index = np.random.choice(pos_index,
                                             size=pos_roi_per_this_image,
                                             replace=False)

            # Select background RoIs as those within
            # [neg_iou_thresh_lo, neg_iou_thresh_hi).
            neg_index = np.where((max_iou < neg_iou_thresh_hi)
                                 & (max_iou >= neg_iou_thresh_lo))[0]
            neg_roi_per_this_image = n_sample - pos_roi_per_this_image
            neg_roi_per_this_image = int(
                min(neg_roi_per_this_image, neg_index.size))
            if neg_index.size > 0:
                neg_index = np.random.choice(neg_index,
                                             size=neg_roi_per_this_image,
                                             replace=False)

            # The indices that we're selecting (both positive and negative).
            keep_index = np.append(pos_index, neg_index)
            gt_roi_label = gt_roi_label[keep_index]
            gt_roi_label[pos_roi_per_this_image:] = 0  # negative labels --> 0
            sample_roi = rois[keep_index]

            # Compute offsets and scales to match sampled RoIs to the GTs.
            boxes = gt_bboxes[i][gt_assignment][keep_index]
            cxcy = (boxes[:, :2] + boxes[:, 2:]) / 2 - (
                sample_roi[:, :2] + sample_roi[:, 2:]) / 2  # [8732,2]
            cxcy /= (sample_roi[:, 2:] - sample_roi[:, :2])
            wh = (boxes[:, 2:] - boxes[:, :2]) / (
                sample_roi[:, 2:] - sample_roi[:, :2])  # [8732,2]
            wh = torch.log(wh)
            loc = torch.cat([cxcy, wh], 1)  # [8732,4]
            gt_roi_loc = ((loc - torch.Tensor(loc_normalize_mean)) /
                          torch.Tensor(loc_normalize_std))

            batch_index = i * torch.ones((len(sample_roi), ))
            sample_roi = torch.cat([batch_index[:, None], sample_roi],
                                   dim=1).contiguous()
            sample_roi_list.append(sample_roi)
            gt_roi_loc_list.append(gt_roi_loc)
            gt_roi_label_list.append(gt_roi_label)

        return torch.cat(sample_roi_list,
                         0), torch.cat(gt_roi_loc_list,
                                       0), torch.cat(gt_roi_label_list, 0)