Example #1
0
    def __init__(
        self,
        output_size,
        scales,
        sampling_ratio,
        pooler_type,
        canonical_box_size=224,
        canonical_level=4,
    ):
        super().__init__()

        if isinstance(output_size, int):
            output_size = (output_size, output_size)
        assert len(output_size) == 2
        assert isinstance(output_size[0], int) and isinstance(
            output_size[1], int)
        self.output_size = output_size

        if pooler_type == "ROIAlign":
            self.level_poolers = nn.ModuleList(
                ROIAlign(output_size,
                         spatial_scale=scale,
                         sampling_ratio=sampling_ratio,
                         aligned=False) for scale in scales)
        elif pooler_type == "ROIAlignV2":
            self.level_poolers = nn.ModuleList(
                ROIAlign(output_size,
                         spatial_scale=scale,
                         sampling_ratio=sampling_ratio,
                         aligned=True) for scale in scales)
        elif pooler_type == "ROIPool":
            self.level_poolers = nn.ModuleList(
                RoIPool(output_size, spatial_scale=scale) for scale in scales)
        elif pooler_type == "ROIAlignRotated":
            self.level_poolers = nn.ModuleList(
                ROIAlignRotated(output_size,
                                spatial_scale=scale,
                                sampling_ratio=sampling_ratio)
                for scale in scales)
        else:
            raise ValueError("Unknown pooler type: {}".format(pooler_type))

        # Map scale (defined as 1 / stride) to its feature map level under the
        # assumption that stride is a power of 2.
        min_level = -(math.log2(scales[0]))
        max_level = -(math.log2(scales[-1]))
        assert math.isclose(min_level, int(min_level)) and math.isclose(
            max_level, int(max_level)), "Featuremap stride is not power of 2!"
        self.min_level = int(min_level)
        self.max_level = int(max_level)
        assert (
            len(scales) == self.max_level - self.min_level +
            1), "[ROIPooler] Sizes of input featuremaps do not form a pyramid!"
        assert 0 < self.min_level and self.min_level <= self.max_level
        self.canonical_level = canonical_level
        assert canonical_box_size > 0
        self.canonical_box_size = canonical_box_size
Example #2
0
 def crop_and_resize(self, instance_mask, boxes, mask_size):
     """
     Crop each bitmask by the given box, and resize results to (mask_size, mask_size).
     This can be used to prepare training targets for Mask R-CNN.
     It has less reconstruction error compared to rasterization with polygons.
     However we observe no difference in accuracy,
     but BitMasks requires more memory to store all the masks.
     Args:
         boxes (Tensor): Nx4 tensor storing the boxes for each mask
         mask_size (int): the size of the rasterized mask.
     Returns:
         Tensor:
             A bool tensor of shape (N, mask_size, mask_size), where
             N is the number of predicted boxes for this image.
     """
     assert len(boxes) == len(instance_mask), "{} != {}".format(len(boxes), len(instance_mask))
     device = instance_mask.device
     batch_inds = torch.arange(len(boxes), device=device).to(dtype=boxes.dtype)[:, None]
     rois = torch.cat([batch_inds, boxes], dim=1)  # Nx5
     bit_masks = instance_mask.to(dtype=torch.float32)
     rois = rois.to(device=device)
     output = (
         ROIAlign((mask_size, mask_size), 1.0, 0, aligned=True)
             .forward(bit_masks[:, None, :, :], rois)
             .squeeze(1)
     )
     output = output >= 0.5
     return output
    def __init__(self, dim_in, temp_pool_size, resolution, scale_factor):
        super(Head_featextract_roi, self).__init__()
        self.dim_in = dim_in
        self.num_pathways = len(temp_pool_size)

        for pi in range(self.num_pathways):
            pi_temp_pool_size = temp_pool_size[pi]
            if pi_temp_pool_size is not None:
                tpool = nn.AvgPool3d(
                        [pi_temp_pool_size, 1, 1], stride=1)
                self.add_module(f's{pi}_tpool', tpool)
            roi_align = ROIAlign(
                    resolution[pi],
                    spatial_scale=1.0/scale_factor[pi],
                    sampling_ratio=0,
                    aligned=True)
            self.add_module(f's{pi}_roi', roi_align)
            spool = nn.MaxPool2d(resolution[pi], stride=1)
            self.add_module(f's{pi}_spool', spool)
 def _add_densepose_masks_as_segmentation(self, annotations: Dict[str, Any],
                                          image_shape_hw: Tuple[int, int]):
     for obj in annotations:
         if ("densepose" not in obj) or ("segmentation" in obj):
             continue
         # DP segmentation: torch.Tensor [S, S] of float32, S=256
         segm_dp = torch.zeros_like(obj["densepose"].segm)
         segm_dp[obj["densepose"].segm > 0] = 1
         segm_h, segm_w = segm_dp.shape
         bbox_segm_dp = torch.tensor((0, 0, segm_h - 1, segm_w - 1),
                                     dtype=torch.float32)
         # image bbox
         x0, y0, x1, y1 = (v.item() for v in BoxMode.convert(
             obj["bbox"], obj["bbox_mode"], BoxMode.XYXY_ABS))
         segm_aligned = (ROIAlign(
             (y1 - y0, x1 - x0), 1.0, 0,
             aligned=True).forward(segm_dp.view(1, 1, *segm_dp.shape),
                                   bbox_segm_dp).squeeze())
         image_mask = torch.zeros(*image_shape_hw, dtype=torch.float32)
         image_mask[y0:y1, x0:x1] = segm_aligned
         # segmentation for BitMask: np.array [H, W] of np.bool
         obj["segmentation"] = image_mask >= 0.5
Example #5
0
    def __init__(
        self,
        dim_in,
        num_classes,
        pool_size,
        resolution,
        scale_factor,
        dropout_rate=0.0,
        act_func="softmax",
        aligned=True,
    ):
        """
        The `__init__` method of any subclass should also contain these
            arguments.
        ResNetRoIHead takes p pathways as input where p in [1, infty].

        Args:
            dim_in (list): the list of channel dimensions of the p inputs to the
                ResNetHead.
            num_classes (int): the channel dimensions of the p outputs to the
                ResNetHead.
            pool_size (list): the list of kernel sizes of p spatial temporal
                poolings, temporal pool kernel size, spatial pool kernel size,
                spatial pool kernel size in order.
            resolution (list): the list of spatial output size from the ROIAlign.
            scale_factor (list): the list of ratio to the input boxes by this
                number.
            dropout_rate (float): dropout rate. If equal to 0.0, perform no
                dropout.
            act_func (string): activation function to use. 'softmax': applies
                softmax on the output. 'sigmoid': applies sigmoid on the output.
            aligned (bool): if False, use the legacy implementation. If True,
                align the results more perfectly.
        Note:
            Given a continuous coordinate c, its two neighboring pixel indices
            (in our pixel model) are computed by floor (c - 0.5) and ceil
            (c - 0.5). For example, c=1.3 has pixel neighbors with discrete
            indices [0] and [1] (which are sampled from the underlying signal at
            continuous coordinates 0.5 and 1.5). But the original roi_align
            (aligned=False) does not subtract the 0.5 when computing neighboring
            pixel indices and therefore it uses pixels with a slightly incorrect
            alignment (relative to our pixel model) when performing bilinear
            interpolation.
            With `aligned=True`, we first appropriately scale the ROI and then
            shift it by -0.5 prior to calling roi_align. This produces the
            correct neighbors; It makes negligible differences to the model's
            performance if ROIAlign is used together with conv layers.
        """
        super(ResNetRoIHead, self).__init__()
        assert (
            len({len(pool_size), len(dim_in)}) == 1
        ), "pathway dimensions are not consistent."
        self.num_pathways = len(pool_size)
        for pathway in range(self.num_pathways):
            temporal_pool = nn.AvgPool3d(
                [pool_size[pathway][0], 1, 1], stride=1
            )
            self.add_module("s{}_tpool".format(pathway), temporal_pool)

            roi_align = ROIAlign(
                resolution[pathway],
                spatial_scale=1.0 / scale_factor[pathway],
                sampling_ratio=0,
                aligned=aligned,
            )
            self.add_module("s{}_roi".format(pathway), roi_align)
            spatial_pool = nn.MaxPool2d(resolution[pathway], stride=1)
            self.add_module("s{}_spool".format(pathway), spatial_pool)

        if dropout_rate > 0.0:
            self.dropout = nn.Dropout(dropout_rate)

        # Perform FC in a fully convolutional manner. The FC layer will be
        # initialized with a different std comparing to convolutional layers.
        self.projection = nn.Linear(sum(dim_in), num_classes, bias=True)

        # Softmax for evaluation and testing.
        if act_func == "softmax":
            self.act = nn.Softmax(dim=1)
        elif act_func == "sigmoid":
            self.act = nn.Sigmoid()
        else:
            raise NotImplementedError(
                "{} is not supported as an activation"
                "function.".format(act_func)
            )
Example #6
0
    def __init__(
        self,
        output_size,
        scales,
        sampling_ratio,
        pooler_type,
        canonical_box_size=224,
        canonical_level=4,
    ):
        """
        Args:
            output_size (int, tuple[int] or list[int]): output size of the pooled region,
                e.g., 14 x 14. If tuple or list is given, the length must be 2.
            scales (list[float]): The scale for each low-level pooling op relative to
                the input image. For a feature map with stride s relative to the input
                image, scale is defined as a 1 / s. The stride must be power of 2.
                When there are multiple scales, they must form a pyramid, i.e. they must be
                a monotically decreasing geometric sequence with a factor of 1/2.
            sampling_ratio (int): The `sampling_ratio` parameter for the ROIAlign op.
            pooler_type (string): Name of the type of pooling operation that should be applied.
                For instance, "ROIPool" or "ROIAlignV2".
            canonical_box_size (int): A canonical box size in pixels (sqrt(box area)). The default
                is heuristically defined as 224 pixels in the FPN paper (based on ImageNet
                pre-training).
            canonical_level (int): The feature map level index from which a canonically-sized box
                should be placed. The default is defined as level 4 (stride=16) in the FPN paper,
                i.e., a box of size 224x224 will be placed on the feature with stride=16.
                The box placement for all boxes will be determined from their sizes w.r.t
                canonical_box_size. For example, a box whose area is 4x that of a canonical box
                should be used to pool features from feature level ``canonical_level+1``.

                Note that the actual input feature maps given to this module may not have
                sufficiently many levels for the input boxes. If the boxes are too large or too
                small for the input feature maps, the closest level will be used.
        """
        super().__init__()

        if isinstance(output_size, int):
            output_size = (output_size, output_size)
        assert len(output_size) == 2
        assert isinstance(output_size[0], int) and isinstance(output_size[1], int)
        self.output_size = output_size

        if pooler_type == "ROIAlign":
            self.level_poolers = nn.ModuleList(
                ROIAlign(
                    output_size, spatial_scale=scale, sampling_ratio=sampling_ratio, aligned=False
                )
                for scale in scales
            )
        elif pooler_type == "ROIAlignV2":
            self.level_poolers = nn.ModuleList(
                ROIAlign(
                    output_size, spatial_scale=scale, sampling_ratio=sampling_ratio, aligned=True
                )
                for scale in scales
            )
        elif pooler_type == "ROIPool":
            self.level_poolers = nn.ModuleList(
                RoIPool(output_size, spatial_scale=scale) for scale in scales
            )
        elif pooler_type == "ROIAlignRotated":
            self.level_poolers = nn.ModuleList(
                ROIAlignRotated(output_size, spatial_scale=scale, sampling_ratio=sampling_ratio)
                for scale in scales
            )
        else:
            raise ValueError("Unknown pooler type: {}".format(pooler_type))

        # Map scale (defined as 1 / stride) to its feature map level under the
        # assumption that stride is a power of 2.
        min_level = -(math.log2(scales[0]))
        max_level = -(math.log2(scales[-1]))
        assert math.isclose(min_level, int(min_level)) and math.isclose(
            max_level, int(max_level)
        ), "Featuremap stride is not power of 2!"
        self.min_level = int(min_level)
        self.max_level = int(max_level)
        assert (
            len(scales) == self.max_level - self.min_level + 1
        ), "[ROIPooler] Sizes of input featuremaps do not form a pyramid!"
        assert 0 <= self.min_level and self.min_level <= self.max_level
        self.canonical_level = canonical_level
        assert canonical_box_size > 0
        self.canonical_box_size = canonical_box_size
Example #7
0
    def __init__(
        self,
        output_size,
        scales,
        sampling_ratio,
        pooler_type,
        canonical_box_size=224,
        canonical_level=4,
    ):
        """
        Args:
            output_size (int, tuple[int] or list[int]): output size of the pooled region,
                e.g., 14 x 14. If tuple or list is given, the length must be 2.
            scales (list[float]): The scale for each low-level pooling op relative to
                the input image. For a feature map with stride s relative to the input
                image, scale is defined as a 1 / s.
            sampling_ratio (int): The `sampling_ratio` parameter for the ROIAlign op.
            pooler_type (string): Name of the type of pooling operation that should be applied.
                For instance, "ROIPool" or "ROIAlignV2".
            canonical_box_size (int): A canonical box size in pixels (sqrt(box area)). The default
                is heuristically defined as 224 pixels in the FPN paper (based on ImageNet
                pre-training).
            canonical_level (int): The feature map level index on which a canonically-sized box
                should be placed. The default is defined as level 4 in the FPN paper.
        """
        super().__init__()

        if isinstance(output_size, int):
            output_size = (output_size, output_size)
        assert len(output_size) == 2
        assert isinstance(output_size[0], int) and isinstance(output_size[1], int)
        self.output_size = output_size

        if pooler_type == "ROIAlign":
            self.level_poolers = nn.ModuleList(
                ROIAlign(
                    output_size, spatial_scale=scale, sampling_ratio=sampling_ratio, aligned=False
                )
                for scale in scales
            )
        elif pooler_type == "ROIAlignV2":
            self.level_poolers = nn.ModuleList(
                ROIAlign(
                    output_size, spatial_scale=scale, sampling_ratio=sampling_ratio, aligned=True
                )
                for scale in scales
            )
        elif pooler_type == "ROIPool":
            self.level_poolers = nn.ModuleList(
                RoIPool(output_size, spatial_scale=scale) for scale in scales
            )
        elif pooler_type == "ROIAlignRotated":
            self.level_poolers = nn.ModuleList(
                ROIAlignRotated(output_size, spatial_scale=scale, sampling_ratio=sampling_ratio)
                for scale in scales
            )
        else:
            raise ValueError("Unknown pooler type: {}".format(pooler_type))

        # Map scale (defined as 1 / stride) to its feature map level under the
        # assumption that stride is a power of 2.
        min_level = -math.log2(scales[0])
        max_level = -math.log2(scales[-1])
        assert math.isclose(min_level, int(min_level)) and math.isclose(max_level, int(max_level))
        self.min_level = int(min_level)
        self.max_level = int(max_level)
        assert 0 < self.min_level and self.min_level <= self.max_level
        assert self.min_level <= canonical_level and canonical_level <= self.max_level
        self.canonical_level = canonical_level
        assert canonical_box_size > 0
        self.canonical_box_size = canonical_box_size
    def __init__(self, config, average_pool=True, final_dim=768):
        """
        :param config:
        :param average_pool: whether or not to average pool the representations
        :param final_dim:
        :param is_train:
        """
        super(FastRCNN, self).__init__()
        self.average_pool = average_pool
        self.final_dim = final_dim

        # about the resnet network
        self.stride_in_1x1 = config.NETWORK.IMAGE_STRIDE_IN_1x1
        self.c5_dilated = config.NETWORK.IMAGE_C5_DILATED
        self.num_layers = config.NETWORK.IMAGE_NUM_LAYERS
        self.pretrained_model_path = '{}-{:04d}.model'.format(
            config.NETWORK.IMAGE_PRETRAINED,
            config.NETWORK.IMAGE_PRETRAINED_EPOCH
        ) if config.NETWORK.IMAGE_PRETRAINED != '' else None
        self.output_conv5 = config.NETWORK.OUTPUT_CONV5
        if self.num_layers == 18:
            self.backbone = resnet18(
                pretrained=True,
                pretrained_model_path=self.pretrained_model_path,
                expose_stages=[4])
            block = BasicBlock
        elif self.num_layers == 34:
            self.backbone = resnet34(
                pretrained=True,
                pretrained_model_path=self.pretrained_model_path,
                expose_stages=[4])
            block = BasicBlock
        elif self.num_layers == 50:
            self.backbone = resnet50(
                pretrained=True,
                pretrained_model_path=self.pretrained_model_path,
                expose_stages=[4],
                stride_in_1x1=self.stride_in_1x1)
            block = Bottleneck
        elif self.num_layers == 101:
            self.backbone = resnet101(
                pretrained=True,
                pretrained_model_path=self.pretrained_model_path,
                expose_stages=[4],
                stride_in_1x1=self.stride_in_1x1)
            block = Bottleneck
        elif self.num_layers == 152:
            self.backbone = resnet152(
                pretrained=True,
                pretrained_model_path=self.pretrained_model_path,
                expose_stages=[4],
                stride_in_1x1=self.stride_in_1x1)
            block = Bottleneck
        else:
            raise NotImplemented

        # for roi align
        output_size = (14, 14)
        self.roi_align = ROIAlign(output_size=output_size,
                                  spatial_scale=1.0 / 16,
                                  sampling_ratio=2)

        # if object labels are available
        if config.NETWORK.IMAGE_SEMANTIC:
            self.object_embed = torch.nn.Embedding(num_embeddings=81,
                                                   embedding_dim=128)
        else:
            self.object_embed = None
            self.mask_upsample = None

        # construct a head feature extractor
        self.roi_head_feature_extractor = self.backbone._make_layer(
            block=block,
            planes=512,
            blocks=3,
            stride=2 if not self.c5_dilated else 1,
            dilation=1 if not self.c5_dilated else 2,
            stride_in_1x1=self.stride_in_1x1)
        if average_pool:
            self.head = torch.nn.Sequential(
                self.roi_head_feature_extractor,
                nn.AvgPool2d(7 if not self.c5_dilated else 14, stride=1),
                Flattener())
        else:
            self.head = self.roi_head_feature_extractor

        # if we need to freeze some layers
        if config.NETWORK.IMAGE_FROZEN_BN:
            for module in self.roi_head_feature_extractor.modules():
                if isinstance(module, nn.BatchNorm2d):
                    for param in module.parameters():
                        param.requires_grad = False

        frozen_stages = config.NETWORK.IMAGE_FROZEN_BACKBONE_STAGES
        if 5 in frozen_stages:
            for p in self.roi_head_feature_extractor.parameters():
                p.requires_grad = False
            frozen_stages = [stage for stage in frozen_stages if stage != 5]
        self.backbone.frozen_parameters(
            frozen_stages=frozen_stages,
            frozen_bn=config.NETWORK.IMAGE_FROZEN_BN)

        # downsample the object feats
        self.obj_downsample = torch.nn.Sequential(
            torch.nn.Dropout(p=0.1),
            torch.nn.Linear(
                2 * 2048 + (128 if config.NETWORK.IMAGE_SEMANTIC else 0),
                final_dim),
            torch.nn.ReLU(inplace=True),
        )