Пример #1
0
    def __init__(self, in_channels, representation_size):
        super(TwoMLPHead, self).__init__()

        self.fc6 = nn.Linear(in_channels, representation_size)
        self.fc7 = nn.Linear(representation_size, representation_size)
        self.inABN1 = InPlaceABN(1024)
        self.inABN2 = InPlaceABN(1024)
Пример #2
0
    def __init__(self, block_args, global_params):
        super().__init__()
        self._block_args = block_args
        self._bn_mom = 1 - global_params.batch_norm_momentum
        self._bn_eps = global_params.batch_norm_epsilon
        self.has_se = (self._block_args.se_ratio
                       is not None) and (0 < self._block_args.se_ratio <= 1)
        self.id_skip = block_args.id_skip  # skip connection and drop connect

        # Get static or dynamic convolution depending on image size
        Conv2d = get_same_padding_conv2d(image_size=global_params.image_size)

        # Expansion phase
        inp = self._block_args.input_filters  # number of input channels
        oup = self._block_args.input_filters * self._block_args.expand_ratio  # number of output channels
        if self._block_args.expand_ratio != 1:
            self._expand_conv = Conv2d(in_channels=inp,
                                       out_channels=oup,
                                       kernel_size=1,
                                       bias=False)
            self._bn0 = InPlaceABN(oup)

        # Depthwise convolution phase
        k = self._block_args.kernel_size
        s = self._block_args.stride
        self._depthwise_conv = Conv2d(
            in_channels=oup,
            out_channels=oup,
            groups=oup,  # groups makes it depthwise
            kernel_size=k,
            stride=s,
            bias=False)
        self._bn1 = InPlaceABN(oup)

        # Squeeze and Excitation layer, if desired

        ## Deleted it from here

        # Output phase
        final_oup = self._block_args.output_filters
        self._project_conv = Conv2d(in_channels=oup,
                                    out_channels=final_oup,
                                    kernel_size=1,
                                    bias=False)
        self._bn2 = InPlaceABN(final_oup)
Пример #3
0
 def __init__(self, in_channels, dim_reduced, num_classes):
     super(MaskRCNNPredictor, self).__init__(
         OrderedDict([
             ("conv5_mask",
              nn.ConvTranspose2d(in_channels, dim_reduced, 2, 2, 0)),
             ("InPlaceABN", InPlaceABN(dim_reduced)),
             ("mask_fcn_logits", nn.Conv2d(dim_reduced, num_classes, 1, 1,
                                           0)),
         ]))
Пример #4
0
    def __init__(self, in_channels, layers, dilation):
        """
        Arguments:
            in_channels (int): number of input channels
            layers (list): feature dimensions of each FCN layer
            dilation (int): dilation rate of kernel
        """
        d = OrderedDict()
        next_feature = in_channels
        for layer_idx, layer_features in enumerate(layers, 1):
            d["mask_fcn{}".format(layer_idx)] = nn.Conv2d(next_feature,
                                                          layer_features,
                                                          kernel_size=3,
                                                          stride=1,
                                                          padding=dilation,
                                                          dilation=dilation)
            d["InPlaceABN{}".format(layer_idx)] = InPlaceABN(layer_features)
            next_feature = layer_features

        super(MaskRCNNHeads, self).__init__(d)
Пример #5
0
    def __init__(self, in_channels, num_anchors):
        super(RPNHead, self).__init__()
        self.conv = nn.Conv2d(in_channels,
                              in_channels,
                              kernel_size=3,
                              stride=1,
                              padding=1)
        self.cls_logits = nn.Conv2d(in_channels,
                                    num_anchors,
                                    kernel_size=1,
                                    stride=1)
        self.bbox_pred = nn.Conv2d(in_channels,
                                   num_anchors * 4,
                                   kernel_size=1,
                                   stride=1)

        self.inABN = InPlaceABN(in_channels)

        for layer in self.children():
            torch.nn.init.normal_(layer.weight, std=0.01)
            torch.nn.init.constant_(layer.bias, 0)
Пример #6
0
    def __init__(self,
                 blocks,
                 blocks_args=None,
                 global_params=None,
                 out_channels=256):
        super().__init__()

        assert isinstance(blocks_args, list), 'blocks_args should be a list'
        assert len(blocks_args) > 0, 'block args must be greater than 0'
        self._global_params = global_params
        self._blocks_args = blocks_args

        # Get static or dynamic convolution depending on image size
        Conv2d = get_same_padding_conv2d(image_size=global_params.image_size)

        # Stem
        self._conv_stem = Conv2d(3, 48, kernel_size=3, stride=2, bias=False)
        self._bn0 = InPlaceABN(48)

        #blocks
        self.blocks0 = blocks[0]
        self.blocks1 = blocks[1]
        self.blocks2 = blocks[2]
        self.blocks3 = blocks[3]
        self.blocks4 = blocks[4]
        self.blocks5 = blocks[5]
        self.blocks6 = blocks[6]

        # Head
        self._conv_head = Conv2d(512, 2048, kernel_size=1, bias=False)
        self._bn1 = InPlaceABN(2048)

        same_Conv2d = get_same_padding_conv2d(
            image_size=global_params.image_size)

        # upper pyramid
        self.conv_up1 = same_Conv2d(40,
                                    256,
                                    kernel_size=1,
                                    stride=1,
                                    bias=False)
        self.conv_up2 = same_Conv2d(64,
                                    256,
                                    kernel_size=1,
                                    stride=1,
                                    bias=False)
        self.conv_up3 = same_Conv2d(176,
                                    256,
                                    kernel_size=1,
                                    stride=1,
                                    bias=False)
        self.conv_up4 = same_Conv2d(2048,
                                    256,
                                    kernel_size=1,
                                    stride=1,
                                    bias=False)

        self.inABNone = InPlaceABN(256)
        self.inABNtwo = InPlaceABN(256)
        self.inABNthree = InPlaceABN(256)
        self.inABNfour = InPlaceABN(256)

        #separable

        self.separable1 = SeparableConv2d(256, 256, 3)
        self.separable2 = SeparableConv2d(256, 256, 3)
        self.separable3 = SeparableConv2d(256, 256, 3)
        self.separable4 = SeparableConv2d(256, 256, 3)

        self.SepinABNone = InPlaceABN(256)
        self.SepinABNtwo = InPlaceABN(256)
        self.SepinABNthree = InPlaceABN(256)
        self.SepinABNfour = InPlaceABN(256)

        # upsample bilinear

        self.up1 = nn.Upsample(scale_factor=2, mode='bilinear')
        self.up2 = nn.Upsample(scale_factor=2, mode='bilinear')
        self.up3 = nn.Upsample(scale_factor=2, mode='bilinear')

        # downsample

        self.down1 = nn.MaxPool2d(2, stride=2)
        self.down2 = nn.MaxPool2d(2, stride=2)
        self.down3 = nn.MaxPool2d(2, stride=2)

        #additional layer
        self.additional_down = nn.MaxPool2d(2, stride=2)

        self.out_channels = out_channels