Пример #1
0
    def forward(self, x):
        """
        Args:
            - x: The convOut from a layer in the backbone network
                 Size: [batch_size, in_channels, conv_h, conv_w])

        Returns a tuple (bbox_coords, class_confs, mask_output, prior_boxes) with sizes
            - bbox_coords: [batch_size, conv_h*conv_w*num_priors, 4]
            - class_confs: [batch_size, conv_h*conv_w*num_priors, num_classes]
            - mask_output: [batch_size, conv_h*conv_w*num_priors, mask_dim]
            - prior_boxes: [conv_h*conv_w*num_priors, 4]
        """
        # In case we want to use another module's layers
        src = self if self.parent[0] is None else self.parent[0]

        conv_h = x.size(2)
        conv_w = x.size(3)

        if cfg.extra_head_net is not None:
            x = src.upfeature(x)

        if cfg.use_prediction_module:
            # The two branches of PM design (c)
            a = src.block(x)

            b = src.conv(x)
            b = src.bn(b)
            b = F.relu(b)

            # TODO: Possibly switch this out for a product
            x = a + b

        bbox_x = src.bbox_extra(x)
        conf_x = src.conf_extra(x)
        mask_x = src.mask_extra(x)

        bbox = src.bbox_layer(bbox_x).permute(0, 2, 3, 1).contiguous().view(
            x.size(0), -1, 4)
        conf = src.conf_layer(conf_x).permute(0, 2, 3, 1).contiguous().view(
            x.size(0), -1, self.num_classes)
        if cfg.eval_mask_branch:
            mask = src.mask_layer(mask_x).permute(
                0, 2, 3, 1).contiguous().view(x.size(0), -1, self.mask_dim)
        else:
            mask = torch.zeros(x.size(0),
                               bbox.size(1),
                               self.mask_dim,
                               device=bbox.device)

        if cfg.use_instance_coeff:
            inst = src.inst_layer(x).permute(0, 2, 3, 1).contiguous().view(
                x.size(0), -1, cfg.num_instance_coeffs)

        # See box_utils.decode for an explanation of this
        if cfg.use_yolo_regressors:
            bbox[:, :, :2] = torch.sigmoid(bbox[:, :, :2]) - 0.5
            bbox[:, :, 0] /= conv_w
            bbox[:, :, 1] /= conv_h

        if cfg.eval_mask_branch:
            if cfg.mask_type == mask_type.direct:
                mask = torch.sigmoid(mask)
            elif cfg.mask_type == mask_type.lincomb:
                mask = cfg.mask_proto_coeff_activation(mask)

                if cfg.mask_proto_coeff_gate:
                    gate = src.gate_layer(x).permute(0, 2, 3,
                                                     1).contiguous().view(
                                                         x.size(0), -1,
                                                         self.mask_dim)
                    mask = mask * torch.sigmoid(gate)

        priors = self.make_priors(conv_h, conv_w)

        preds = {'loc': bbox, 'conf': conf, 'mask': mask, 'priors': priors}

        if cfg.use_instance_coeff:
            preds['inst'] = inst

        return preds
Пример #2
0
    def forward(self, x):
        """
        Args:
            - x: The convOut from a layer in the backbone network
                 Size: [batch_size, in_channels, conv_h, conv_w]) -- channel first type

        Returns a tuple (bbox_coords, class_confs, mask_output, prior_boxes) with sizes
            - bbox_coords: [batch_size, conv_h*conv_w*num_priors, 4]
            - class_confs: [batch_size, conv_h*conv_w*num_priors, num_classes]
            - mask_output: [batch_size, conv_h*conv_w*num_priors, mask_dim]
            - prior_boxes: [conv_h*conv_w*num_priors, 4]
        """
        #x->outs[0] #(n, 256, 69, 69)
        #   outs[1] #(n, 256, 34, 34)
        #   outs[2] #(n, 256, 17, 17)
        #   outs[3] #(n, 256, 8, 8)
        #   outs[4] #(n, 256, 4, 4)

        # 2번째 호출부터 parent가짐.
        # In case we want to use another module's layers
        src = self if self.parent[0] is None else self.parent[0]

        conv_h = x.size(2)
        conv_w = x.size(3)

        #'extra_head_net': [(256, 3, {'padding': 1})],
        if cfg.extra_head_net is not None:
            x = src.upfeature(x)

        # False
        if cfg.use_prediction_module:
            # The two branches of PM design (c)
            a = src.block(x)

            b = src.conv(x)
            b = src.bn(b)
            b = F.relu(b)

            # TODO: Possibly switch this out for a product
            x = a + b

        # lambda(x): return x
        bbox_x = src.bbox_extra(x)
        conf_x = src.conf_extra(x)
        mask_x = src.mask_extra(x)

        #   H*W pixels 각각에서 9개의 prior box생성, 각 prior boxes는 4개의 좌표와 num_classes개의 confidence value 가짐.
        #   bbox_coords: [batch_size, conv_h*conv_w*num_priors, 4]
        #   class_confs: [batch_size, conv_h*conv_w*num_priors, num_classes]
        bbox = src.bbox_layer(bbox_x).permute(0, 2, 3, 1).contiguous().view(
            x.size(0), -1, 4)
        conf = src.conf_layer(conf_x).permute(0, 2, 3, 1).contiguous().view(
            x.size(0), -1, self.num_classes)

        #True
        if cfg.eval_mask_branch:
            #   mask_output: [batch_size, conv_h*conv_w*num_priors, mask_dim]
            mask = src.mask_layer(mask_x).permute(
                0, 2, 3, 1).contiguous().view(x.size(0), -1, self.mask_dim)
        else:
            mask = torch.zeros(x.size(0),
                               bbox.size(1),
                               self.mask_dim,
                               device=bbox.device)

        # False
        if cfg.use_mask_scoring:
            score = src.score_layer(x).permute(0, 2, 3, 1).contiguous().view(
                x.size(0), -1, 1)
        # False
        if cfg.use_instance_coeff:
            inst = src.inst_layer(x).permute(0, 2, 3, 1).contiguous().view(
                x.size(0), -1, cfg.num_instance_coeffs)

        # See box_utils.decode for an explanation of this
        if cfg.use_yolo_regressors:
            bbox[:, :, :2] = torch.sigmoid(bbox[:, :, :2]) - 0.5
            bbox[:, :, 0] /= conv_w
            bbox[:, :, 1] /= conv_h

        if cfg.eval_mask_branch:
            if cfg.mask_type == mask_type.direct:
                mask = torch.sigmoid(mask)
            #Here
            elif cfg.mask_type == mask_type.lincomb:
                mask = cfg.mask_proto_coeff_activation(mask)  #tanh(mask)

                if cfg.mask_proto_coeff_gate:
                    gate = src.gate_layer(x).permute(0, 2, 3,
                                                     1).contiguous().view(
                                                         x.size(0), -1,
                                                         self.mask_dim)
                    mask = mask * torch.sigmoid(gate)

        # False
        if cfg.mask_proto_split_prototypes_by_head and cfg.mask_type == mask_type.lincomb:
            mask = F.pad(mask,
                         (self.index * self.mask_dim,
                          (self.num_heads - self.index - 1) * self.mask_dim),
                         mode='constant',
                         value=0)

        priors = self.make_priors(conv_h, conv_w, x.device)

        preds = {'loc': bbox, 'conf': conf, 'mask': mask, 'priors': priors}

        # False
        if cfg.use_mask_scoring:
            preds['score'] = score
        # False
        if cfg.use_instance_coeff:
            preds['inst'] = inst

        return preds