Exemple #1
0
    def loss_boxes(self, outputs, targets, indices, num_boxes):
        """Compute the losses related to the bounding boxes, the L1 regression loss and
             the GIoU loss
        targets dicts must contain the key "boxes" containing a tensor of dim
             [nb_target_boxes, 4]
        The target boxes are expected in format (center_x, center_y, h, w),
             normalized by the image size.
        """
        assert "pred_boxes" in outputs
        idx = self._get_src_permutation_idx(indices)
        src_boxes = outputs["pred_boxes"][idx]
        target_boxes = torch.cat(
            [t["boxes"][i] for t, (_, i) in zip(targets, indices)], dim=0)

        loss_bbox = F.l1_loss(src_boxes, target_boxes, reduction="none")

        losses = {}
        losses["loss_bbox"] = loss_bbox.sum() / num_boxes

        loss_giou = 1 - torch.diag(
            box_ops.generalized_box_iou(
                box_ops.box_cxcywh_to_xyxy(src_boxes).float(),
                box_ops.box_cxcywh_to_xyxy(target_boxes),
            ))
        losses["loss_giou"] = loss_giou.sum() / num_boxes
        return losses
Exemple #2
0
    def forward(self, outputs: Dict[str, Tensor], target_sizes: Tensor):
        out_logits, out_bbox = outputs["pred_logits"], outputs["pred_boxes"]

        assert len(out_logits) == len(target_sizes)
        assert target_sizes.shape[1] == 2

        prob = F.softmax(out_logits, -1)
        scores, labels = prob[..., :-1].max(-1)

        # convert to [x0, y0, x1, y1] format
        from mmf.utils.box_ops import box_cxcywh_to_xyxy

        boxes = box_cxcywh_to_xyxy(out_bbox)
        # and from relative [0, 1] to absolute [0, height] coordinates
        img_h, img_w = target_sizes.unbind(1)
        scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1)
        boxes = boxes * scale_fct[:, None, :]

        results = [{
            "scores": s,
            "labels": l,
            "boxes": b
        } for s, l, b in zip(scores, labels, boxes)]

        if "attr_logits" in outputs:
            assert len(outputs["attr_logits"]) == len(results)
            attr_scores, attr_labels = outputs["attr_logits"].max(-1)
            for idx, r in enumerate(results):
                r["attr_scores"] = attr_scores[idx]
                r["attr_labels"] = attr_labels[idx]

        return results
Exemple #3
0
    def forward(self, outputs: Dict[str, Tensor], targets: List[Dict[str,
                                                                     Tensor]]):
        """ Performs the matching

        Params:
            outputs: This is a dict that contains at least these entries:
                 "pred_logits": Tensor of dim [batch_size, num_queries, num_classes]
                    with the classification logits
                 "pred_boxes": Tensor of dim [batch_size, num_queries, 4] with the
                    predicted box coordinates

            targets: This is a list of targets (len(targets) = batch_size), where each
                    target is a dict containing:
                 "labels": Tensor of dim [num_target_boxes] (where num_target_boxes is
                    the number of ground-truth objects in the target) containing the
                    class labels
                 "boxes": Tensor of dim [num_target_boxes, 4] containing the target box
                    coordinates

        Returns:
            A list of size batch_size, containing tuples of (index_i, index_j) where:
                - index_i is the indices of the selected predictions (in order)
                - index_j is the indices of the corresponding selected targets (in
                  order)
            For each batch element, it holds:
                len(index_i) = len(index_j) = min(num_queries, num_target_boxes)
        """
        bs, num_queries = outputs["pred_logits"].shape[:2]

        # We flatten to compute the cost matrices in a batch
        out_prob = self.norm(outputs["pred_logits"].flatten(
            0, 1))  # [batch_size * num_queries, num_classes]
        out_bbox = outputs["pred_boxes"].flatten(
            0, 1)  # [batch_size * num_queries, 4]

        # Also concat the target labels and boxes
        tgt_ids = torch.cat([v["labels"] for v in targets])
        tgt_bbox = torch.cat([v["boxes"] for v in targets])

        # Compute the classification cost. Contrary to the loss, we don't use the NLL,
        # but approximate it in 1 - proba[target class].
        # The 1 is a constant that doesn't change the matching, it can be omitted.
        cost_class = -out_prob[:, tgt_ids]

        # Compute the L1 cost between boxes
        cost_bbox = torch.cdist(out_bbox, tgt_bbox, p=1)

        # Compute the giou cost betwen boxes
        cost_giou = -generalized_box_iou(
            box_cxcywh_to_xyxy(out_bbox).float(), box_cxcywh_to_xyxy(tgt_bbox))

        # Final cost matrix
        C = (self.cost_bbox * cost_bbox + self.cost_class * cost_class +
             self.cost_giou * cost_giou)
        C = C.view(bs, num_queries, -1).cpu()

        sizes = [len(v["boxes"]) for v in targets]
        indices = [
            linear_sum_assignment(c[i])
            for i, c in enumerate(C.split(sizes, -1))
        ]
        return [(
            torch.as_tensor(i, dtype=torch.int64),
            torch.as_tensor(j, dtype=torch.int64),
        ) for i, j in indices]