Exemple #1
0
 def get_points_single(self, featmap_size, stride, dtype, device):
     x_range = arange(0, end=featmap_size[1], dtype=dtype,
                      device=device) * stride
     y_range = arange(0, end=featmap_size[0], dtype=dtype,
                      device=device) * stride
     x, y = meshgrid(x_range, y_range, flatten=True)
     points = torch.stack((x, y), dim=-1) + stride // 2
     return points
Exemple #2
0
 def get_points(self, featmap_sizes, dtype, device, flatten=False):
     points = []
     for featmap_size in featmap_sizes:
         x_range = arange(0, featmap_size[1], dtype=dtype,
                          device=device) + 0.5
         y_range = arange(0, featmap_size[0], dtype=dtype,
                          device=device) + 0.5
         x, y = meshgrid(x_range, y_range, flatten=flatten)
         points.append((y, x))
     return points
Exemple #3
0
    def grid_anchors(self, featmap_size, stride=16, device='cuda'):
        base_anchors = self.base_anchors.to(device)

        shift_x = arange(
            start=0, end=featmap_size[1], dtype=torch.float32,
            device=device) * stride
        shift_y = arange(
            start=0, end=featmap_size[0], dtype=torch.float32,
            device=device) * stride
        shift_xx, shift_yy = meshgrid(shift_x, shift_y)
        shifts = torch.stack([shift_xx, shift_yy, shift_xx, shift_yy], dim=1)
        shifts = shifts.type_as(base_anchors)
        # first feat_w elements correspond to the first row of shifts
        # add A anchors (1, A, 4) to K shifts (K, 1, 4) to get
        # shifted anchors (K, A, 4), reshape to (K*A, 4)

        all_anchors = base_anchors[None, :, :] + shifts[:, None, :]
        all_anchors = all_anchors.view(-1, 4)
        # first A rows correspond to A anchors of (0, 0) in feature map,
        # then (0, 1), (0, 2), ...
        return all_anchors
    def get_seg_masks(self, mask_pred, det_bboxes, det_labels, rcnn_test_cfg,
                      ori_shape, scale_factor, rescale):
        """Get segmentation masks from mask_pred and bboxes.

        Args:
            mask_pred (Tensor or ndarray): shape (n, #class, h, w).
                For single-scale testing, mask_pred is the direct output of
                model, whose type is Tensor, while for multi-scale testing,
                it will be converted to numpy array outside of this method.
            det_bboxes (Tensor): shape (n, 4/5)
            det_labels (Tensor): shape (n, )
            img_shape (Tensor): shape (3, )
            rcnn_test_cfg (dict): rcnn testing config
            ori_shape: original image size

        Returns:
            list[list]: encoded masks
        """
        segm_result = mask_pred[
            arange(end=det_labels.shape[0], device=mask_pred.device),
            det_labels].sigmoid()

        return segm_result
    def get_seg_masks(self, det_masks, det_labels):
        """Get segmentation masks from mask_pred and bboxes.

        Args:
            det_masks (Tensor): shape (n, #class+1, h, w).
                For single-scale testing, mask_pred is the direct output of
                model, whose type is Tensor, while for multi-scale testing,
                it will be converted to numpy array outside of this method.
            det_labels (Tensor): shape (n, )

        Returns:
            Tensor of shape (n, 1, h, w) with mask heatmaps
            for all detected boxes.
        """

        if not self.class_agnostic:
            class_indices = det_labels + 1
        else:
            class_indices = 0

        segm_result = det_masks[arange(
            end=det_labels.shape[0], device=det_masks.device
        ), class_indices].sigmoid()
        return segm_result