Beispiel #1
0
    def cpu_nms(self, scores: torch.Tensor, boxes: torch.Tensor,
                classes: torch.Tensor) -> List[torch.Tensor]:
        batch = scores.shape[0]
        selected_box_indx = torch.full((batch, self.nms_max_detections),
                                       -1,
                                       dtype=torch.long)
        cpu_classes = torch.full((batch, self.nms_max_detections),
                                 torch.iinfo(torch.int32).max,
                                 dtype=int)
        cpu_boxes = torch.zeros((batch, self.nms_max_detections, 4))
        cpu_scores = torch.zeros((batch, self.nms_max_detections))
        cpu_true_max_detections = torch.full((batch, ), -1)

        for i, (bscores, bboxes,
                bclasses) in enumerate(zip(scores, boxes, classes)):
            nms_preds = torchvision_nms(bboxes, bscores, self.iou_threshold)

            if nms_preds.shape[0] > self.nms_max_detections:
                selected_box_indx[i] = nms_preds[:self.nms_max_detections]
            else:
                selected_box_indx[i, :nms_preds.shape[0]] = nms_preds
                cpu_true_max_detections[i] = nms_preds.shape[0]

            batch_indices = selected_box_indx[i, :cpu_true_max_detections[i]]

            cpu_classes[
                i, :cpu_true_max_detections[i]] = bclasses[batch_indices]
            cpu_boxes[i, :cpu_true_max_detections[i]] = bboxes[batch_indices]
            cpu_scores[i, :cpu_true_max_detections[i]] = bscores[batch_indices]

        return [
            selected_box_indx, cpu_scores, cpu_boxes,
            cpu_classes.int(),
            cpu_true_max_detections.int()
        ]
Beispiel #2
0
def nms(scores: torch.Tensor, boxes: torch.Tensor, classes: torch.Tensor,
        iou_threshold: float, max_detections: int) -> List[torch.Tensor]:
    """
    Perform non maximum suppression on predictions
    Parameters:
        scores (torch.Tensor): objectness scores per box
        boxes (torch.Tensor): (xmin, ymin, xmax, ymax)
        classes (torch.Tensor): classes per box
        iou_threshold (float):  Predictions that overlap by more than this threshold will be discarded
        max_detections (int) : Maximum number of detections per image
    Returns:
        List[torch.Tensor]: Predictions filtered after NMS, indexes, scores, boxes, classes, and the number of detection per image
    """
    batch = scores.shape[0]
    selected_box_indx = torch.full((batch, max_detections),
                                   -1,
                                   dtype=torch.long)
    cpu_classes = torch.full((batch, max_detections),
                             torch.iinfo(torch.int32).max,
                             dtype=int)
    cpu_boxes = torch.zeros((batch, max_detections, 4))
    cpu_scores = torch.zeros((batch, max_detections))
    cpu_true_max_detections = torch.full((batch, ), max_detections)

    for i, (bscores, bboxes,
            bclasses) in enumerate(zip(scores, boxes, classes)):
        nms_preds = torchvision_nms(bboxes, bscores, iou_threshold)

        if nms_preds.shape[0] > max_detections:
            selected_box_indx[i] = nms_preds[:max_detections]
        else:
            selected_box_indx[i, :nms_preds.shape[0]] = nms_preds
            cpu_true_max_detections[i] = nms_preds.shape[0]

        batch_indices = selected_box_indx[i, :cpu_true_max_detections[i]]

        cpu_classes[i, :cpu_true_max_detections[i]] = bclasses[batch_indices]
        cpu_boxes[i, :cpu_true_max_detections[i]] = bboxes[batch_indices]
        cpu_scores[i, :cpu_true_max_detections[i]] = bscores[batch_indices]

    return [
        selected_box_indx, cpu_scores, cpu_boxes,
        cpu_classes.int(),
        cpu_true_max_detections.int()
    ]
Beispiel #3
0
def make_pred_bbox(preds, conf_threshold=0.35):

    # assign to cpu
    preds = preds.cpu()
    pred_targets = preds.view(-1, 13, 13, 5, 5 + 20)
    pred_xy = pred_targets[..., :2].sigmoid()  # sigmoid(tx ty)  0, 1
    pred_wh = pred_targets[..., 2:4].exp()  # 2, 3

    pred_conf = pred_targets[..., 4].sigmoid()  # 4
    pred_cls = pred_targets[..., 5:]  # 20

    # pred_bbox
    anchors_wh = [(1.3221, 1.73145), (3.19275, 4.00944), (5.05587, 8.09892),
                  (9.47112, 4.84053), (11.2364, 10.0071)]
    anchors = make_center_anchors(anchors_wh)  # cy, cx, w, h - [845, 4]

    # assign to cpu
    cxcy_anchors = anchors.cpu()  # cxcy anchors 0~1

    anchors_xy = cxcy_anchors[..., :2]  # torch.Size([13, 13, 5, 2])
    anchors_wh = cxcy_anchors[..., 2:]  # torch.Size([13, 13, 5, 2])

    pred_bbox_xy = anchors_xy.floor().expand_as(
        pred_xy) + pred_xy  # torch.Size([B, 13, 13, 5, 2])  # floor() is very
    pred_bbox_wh = anchors_wh.expand_as(pred_wh) * pred_wh
    pred_bbox = torch.cat([pred_bbox_xy, pred_bbox_wh],
                          dim=-1)  # torch.Size([B, 13, 13, 5, 4])
    pred_bbox = pred_bbox.view(
        -1, 13 * 13 * 5,
        4) / 13.  # rescale 0~1   # [B, 845, 4]  # center_coord.
    pred_cls = F.softmax(pred_cls, dim=-1).view(-1, 13 * 13 * 5,
                                                20)  # [B, 845, 20]
    pred_conf = pred_conf.reshape(-1, 13 * 13 * 5)  # [B, 845]

    image_boxes = list()
    image_labels = list()
    image_scores = list()

    # per class
    for c in range(20):
        class_scores = pred_cls[..., c]
        class_scores = class_scores * pred_conf

        idx = class_scores > conf_threshold  # 0.01 for evaluation
        if idx.sum() == 0:
            continue

        class_scores = class_scores[idx]  # (n_qualified), n_min_score <= 845
        class_bboxes = pred_bbox[idx]  # (n_qualified, 4)

        sorted_scores, idx_scores = class_scores.sort(descending=True)
        sorted_boxes = class_bboxes[idx_scores]  # center to corner and clamp
        sorted_boxes = center_to_corner(sorted_boxes).clamp(0, 1)

        num_boxes = len(sorted_boxes)
        keep_idx = torchvision_nms(boxes=sorted_boxes,
                                   scores=sorted_scores,
                                   iou_threshold=0.45)
        keep_ = torch.zeros(num_boxes, dtype=torch.bool)
        keep_[keep_idx] = 1  # int64 to bool
        keep = keep_

        image_boxes.append(
            sorted_boxes[keep])  # convert to corner coord ans scale 0~1
        image_labels.append(
            torch.LongTensor((keep).sum().item() * [c]).to(device))
        image_scores.append(sorted_scores[keep])

    if len(image_boxes) == 0:
        image_boxes.append(torch.FloatTensor([[0., 0., 1., 1.]]).to(device))
        image_labels.append(torch.LongTensor([20]).to(device))
        image_scores.append(torch.FloatTensor([0.]).to(device))

    # Concatenate into single tensors
    image_boxes = torch.cat(image_boxes, dim=0)  # (n_objects, 4)
    image_labels = torch.cat(image_labels, dim=0)  # (n_objects)
    image_scores = torch.cat(image_scores, dim=0)  # (n_objects)
    n_objects = image_scores.size(0)

    # Keep only the top k objects
    top_k = 200
    if n_objects > top_k:
        image_scores, sort_ind = image_scores.sort(dim=0, descending=True)
        image_scores = image_scores[:top_k]  # (top_k)
        image_boxes = image_boxes[sort_ind][:top_k]  # (top_k, 4)
        image_labels = image_labels[sort_ind][:top_k]  # (top_k)

    return image_boxes, image_labels, image_scores
Beispiel #4
0
def detect(pred, coder, opts, max_overlap=0.5, top_k=1000, is_demo=False):
    """
    post processing of out of models
    batch 1 에 대한 prediction ([N, 8732, 4] ,[N, 8732, n])을  pred boxes pred labels 와 pred scores 로 변환하는 함수
    :param pred (loc, cls) prediction tuple
    :param coder
    """
    pred_bboxes, pred_scores = coder.post_processing(pred, is_demo)

    # Lists to store boxes and scores for this image
    image_boxes = list()
    image_labels = list()
    image_scores = list()

    # Check for each class
    for c in range(0, opts.num_classes):
        # Keep only predicted boxes and scores where scores for this class are above the minimum score
        class_scores = pred_scores[:, c]  # (8732)
        idx = class_scores > opts.conf_thres  # torch.uint8 (byte) tensor, for indexing

        if idx.sum() == 0:
            continue

        class_scores = class_scores[idx]  # (n_qualified), n_min_score <= 8732
        class_bboxes = pred_bboxes[idx]

        sorted_scores, idx_scores = class_scores.sort(descending=True)
        sorted_boxes = class_bboxes[idx_scores]

        # NMS
        num_boxes = len(sorted_boxes)
        keep_idx = torchvision_nms(boxes=sorted_boxes,
                                   scores=sorted_scores,
                                   iou_threshold=max_overlap)
        keep_ = torch.zeros(num_boxes, dtype=torch.bool)
        keep_[keep_idx] = 1  # int64 to bool
        keep = keep_

        # Store only unsuppressed boxes for this class
        image_boxes.append(sorted_boxes[keep])
        image_labels.append(
            torch.LongTensor((keep).sum().item() * [c]).to(device))
        image_scores.append(sorted_scores[keep])

    # If no object in any class is found, store a placeholder for 'background'
    if len(image_boxes) == 0:
        image_boxes.append(torch.FloatTensor([[0., 0., 1., 1.]]).to(device))
        image_labels.append(torch.LongTensor([opts.num_classes
                                              ]).to(device))  # background
        image_scores.append(torch.FloatTensor([0.]).to(device))

    # Concatenate into single tensors
    image_boxes = torch.cat(image_boxes, dim=0)  # (n_objects, 4)
    image_labels = torch.cat(image_labels, dim=0)  # (n_objects)
    image_scores = torch.cat(image_scores, dim=0)  # (n_objects)
    n_objects = image_scores.size(0)

    # Keep only the top k objects --> 다구하고 200 개를 자르는 것은 느리지 않은가?
    if n_objects > top_k:
        image_scores, sort_ind = image_scores.sort(dim=0, descending=True)
        image_scores = image_scores[:top_k]  # (top_k)
        image_boxes = image_boxes[sort_ind][:top_k]  # (top_k, 4)
        image_labels = image_labels[sort_ind][:top_k]  # (top_k)

    return image_boxes, image_labels, image_scores  # lists of length batch_size