Exemple #1
0
    def __init__(self, cfg, in_channels):
        """
        Arguments:
            in_channels (int): number of channels of the input feature
            num_anchors (int): number of anchors to be predicted
        """
        super(RetinaNetHead, self).__init__()
        # TODO: Implement the sigmoid version first.
        num_classes = cfg.MODEL.RETINANET.NUM_CLASSES - 1
        num_anchors = len(cfg.MODEL.RETINANET.ASPECT_RATIOS) \
                        * cfg.MODEL.RETINANET.SCALES_PER_OCTAVE

        cls_tower = []
        bbox_tower = []
        for i in range(cfg.MODEL.RETINANET.NUM_CONVS):
            cls_tower.append(
                nn.Conv(
                    in_channels,
                    in_channels,
                    kernel_size=3,
                    stride=1,
                    padding=1
                )
            )
            cls_tower.append(nn.ReLU())
            bbox_tower.append(
                nn.Conv(
                    in_channels,
                    in_channels,
                    kernel_size=3,
                    stride=1,
                    padding=1
                )
            )
            bbox_tower.append(nn.ReLU())

        self.cls_tower = nn.Sequential(*cls_tower)
        self.bbox_tower =  nn.Sequential(*bbox_tower)
        self.cls_logits = nn.Conv(
            in_channels, num_anchors * num_classes, kernel_size=3, stride=1,
            padding=1
        )
        self.bbox_pred = nn.Conv(
            in_channels,  num_anchors * 4, kernel_size=3, stride=1,
            padding=1
        )

        # Initialization
        for modules in [self.cls_tower, self.bbox_tower, self.cls_logits,
                  self.bbox_pred]:
            for l in modules.modules():
                if isinstance(l, nn.Conv):
                    init.gauss_(l.weight, std=0.01)
                    init.constant_(l.bias, 0)


        # retinanet_bias_init
        prior_prob = cfg.MODEL.RETINANET.PRIOR_PROB
        bias_value = -math.log((1 - prior_prob) / prior_prob)
        init.constant_(self.cls_logits.bias, bias_value)
Exemple #2
0
    def __init__(self, n_classes=40):
        super(DGCNN, self).__init__()
        self.k = 20
        self.knn = KNN(self.k)
        self.bn1 = nn.BatchNorm(64)
        self.bn2 = nn.BatchNorm(64)
        self.bn3 = nn.BatchNorm(128)
        self.bn4 = nn.BatchNorm(256)
        self.bn5 = nn.BatchNorm1d(1024)

        self.conv1 = nn.Sequential(nn.Conv(6, 64, kernel_size=1, bias=False),
                                   self.bn1,
                                   nn.LeakyReLU(scale=0.2))
        self.conv2 = nn.Sequential(nn.Conv(64*2, 64, kernel_size=1, bias=False),
                                   self.bn2,
                                   nn.LeakyReLU(scale=0.2))
        self.conv3 = nn.Sequential(nn.Conv(64*2, 128, kernel_size=1, bias=False),
                                   self.bn3,
                                   nn.LeakyReLU(scale=0.2))
        self.conv4 = nn.Sequential(nn.Conv(128*2, 256, kernel_size=1, bias=False),
                                   self.bn4,
                                   nn.LeakyReLU(scale=0.2))
        self.conv5 = nn.Sequential(nn.Conv1d(512, 1024, kernel_size=1, bias=False),
                                   self.bn5,
                                   nn.LeakyReLU(scale=0.2))
        self.linear1 = nn.Linear(1024*2, 512, bias=False)
        self.bn6 = nn.BatchNorm1d(512)
        self.dp1 = nn.Dropout(p=0.5)
        self.linear2 = nn.Linear(512, 256)
        self.bn7 = nn.BatchNorm1d(256)
        self.dp2 = nn.Dropout(p=0.5)
        self.linear3 = nn.Linear(256, n_classes)
    def __init__(self, in_planes, planes, stride=1):
        super(Bottleneck, self).__init__()
        self.conv1 = nn.Conv(in_planes, planes, kernel_size=1, bias=False)
        self.bn1 = nn.BatchNorm(planes)
        self.conv2 = nn.Conv(planes,
                             planes,
                             kernel_size=3,
                             stride=stride,
                             padding=1,
                             bias=False)
        self.bn2 = nn.BatchNorm(planes)
        self.conv3 = nn.Conv(planes,
                             self.expansion * planes,
                             kernel_size=1,
                             bias=False)
        self.bn3 = nn.BatchNorm(self.expansion * planes)

        self.shortcut = nn.Sequential()
        if stride != 1 or in_planes != self.expansion * planes:
            self.shortcut = nn.Sequential(
                nn.Conv(in_planes,
                        self.expansion * planes,
                        kernel_size=1,
                        stride=stride,
                        bias=False), nn.BatchNorm(self.expansion * planes))
    def __init__(self, in_channels, out_channels):
        super(DANetHead, self).__init__()
        inter_channels = in_channels // 4
        self.conv5a = nn.Sequential(
            nn.Conv(in_channels, inter_channels, 3, padding=1, bias=False),
            nn.BatchNorm(inter_channels), nn.ReLU())

        self.conv5c = nn.Sequential(
            nn.Conv(in_channels, inter_channels, 3, padding=1, bias=False),
            nn.BatchNorm(inter_channels), nn.ReLU())

        self.sa = PAM_Module(inter_channels)
        self.sc = CAM_Module(inter_channels)
        self.conv51 = nn.Sequential(
            nn.Conv(inter_channels, inter_channels, 3, padding=1, bias=False),
            nn.BatchNorm(inter_channels), nn.ReLU())
        self.conv52 = nn.Sequential(
            nn.Conv(inter_channels, inter_channels, 3, padding=1, bias=False),
            nn.BatchNorm(inter_channels), nn.ReLU())

        #        self.conv6 = nn.Sequential(nn.Dropout(0.1, False), nn.Conv(inter_channels, out_channels, 1))
        #        self.conv7 = nn.Sequential(nn.Dropout(0.1, False), nn.Conv(inter_channels, out_channels, 1))

        self.conv8 = nn.Sequential(nn.Dropout(0.1, False),
                                   nn.Conv(inter_channels, out_channels, 1))
Exemple #5
0
 def __init__(self):
     super(Generator, self).__init__()
     self.init_size = (opt.img_size // 4)
     self.l1 = nn.Sequential(nn.Linear(opt.latent_dim, (128 * (self.init_size ** 2))))
     self.conv_blocks = nn.Sequential(nn.BatchNorm(128), nn.Upsample(scale_factor=2), nn.Conv(128, 128, 3, stride=1, padding=1), nn.BatchNorm(128, eps=0.8), nn.LeakyReLU(scale=0.2), nn.Upsample(scale_factor=2), nn.Conv(128, 64, 3, stride=1, padding=1), nn.BatchNorm(64, eps=0.8), nn.LeakyReLU(scale=0.2), nn.Conv(64, opt.channels, 3, stride=1, padding=1), nn.Tanh())
     for m in self.conv_blocks:
         weights_init_normal(m)
Exemple #6
0
    def _make_layer(self, block, planes, blocks, stride=1):
        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion:
            downsample = nn.Sequential(
                nn.Conv2d(self.inplanes,
                          planes * block.expansion,
                          kernel_size=1,
                          stride=stride,
                          bias=False),
                nn.BatchNorm2d(planes * block.expansion),
            )

        layers = []
        layers.append(
            block(self.inplanes,
                  planes,
                  stride,
                  downsample=downsample,
                  stype='stage',
                  baseWidth=self.baseWidth,
                  scale=self.scale))
        self.inplanes = planes * block.expansion
        for i in range(1, blocks):
            layers.append(
                block(self.inplanes,
                      planes,
                      baseWidth=self.baseWidth,
                      scale=self.scale))

        return nn.Sequential(*layers)
Exemple #7
0
    def _make_MG_unit(self, block, planes, blocks, stride=1, dilation=1):
        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion:
            downsample = nn.Sequential(
                nn.Conv(self.inplanes,
                        planes * block.expansion,
                        kernel_size=1,
                        stride=stride,
                        bias=False),
                nn.BatchNorm(planes * block.expansion),
            )

        layers = []
        layers.append(
            block(self.inplanes,
                  planes,
                  stride,
                  dilation=blocks[0] * dilation,
                  downsample=downsample))
        self.inplanes = planes * block.expansion
        for i in range(1, len(blocks)):
            layers.append(
                block(self.inplanes,
                      planes,
                      stride=1,
                      dilation=blocks[i] * dilation))

        return nn.Sequential(*layers)
Exemple #8
0
 def __init__(self,
              input_nc,
              num_plane,
              num_quat,
              biasTerms,
              activation=nn.LeakyReLU(scale=0.2)):
     super(symPred, self).__init__()
     self.num_quat = num_quat
     for i in range(self.num_quat):
         quatLayer = [
             nn.Linear(input_nc, int((input_nc / 2))), activation,
             nn.Linear(int((input_nc / 2)), int((input_nc / 4))), activation
         ]
         last = nn.Linear(int((input_nc / 4)), 4)
         last.bias.data = jt.transform.to_tensor(
             jt.array(biasTerms[('quat' + str((i + 1)))]))
         quatLayer += [last]
         setattr(self, ('quatLayer' + str((i + 1))),
                 nn.Sequential(*quatLayer))
     self.num_plane = num_plane
     for i in range(self.num_plane):
         planeLayer = [
             nn.Linear(int(input_nc), int((input_nc / 2))), activation,
             nn.Linear(int((input_nc / 2)), int((input_nc / 4))), activation
         ]
         last = nn.Linear(int((input_nc / 4)), 4)
         last.weight.data = jt.zeros((4, int(input_nc / 4)))
         last.bias.data = jt.transform.to_tensor(
             jt.array(biasTerms[('plane' + str((i + 1)))])).float()
         planeLayer += [last]
         setattr(self, ('planeLayer' + str((i + 1))),
                 nn.Sequential(*planeLayer))
Exemple #9
0
    def __init__(self, in_channel, out_channel, kernel_size, padding, downsample=False, fused=False):
        super(ConvBlock, self).__init__()

        self.conv1 = nn.Sequential(
            nn.Conv2d(in_channel, out_channel, kernel_size, padding=padding),
            nn.LeakyReLU(0.2)
        )

        if downsample:
            if fused:
                self.conv2 = nn.Sequential(
                    nn.Conv2d(out_channel, out_channel, kernel_size, padding=padding),
                    nn.Pool(2),
                    nn.LeakyReLU(0.2)
                )
            else:
                self.conv2 = nn.Sequential(
                    nn.Conv2d(out_channel, out_channel, kernel_size, padding=padding),
                    nn.Pool(2),
                    nn.LeakyReLU(0.2)
                )
        else:
            self.conv2 = nn.Sequential(
                nn.Conv2d(out_channel, out_channel, kernel_size, padding=padding),
                nn.LeakyReLU(0.2)
            )
Exemple #10
0
 def __init__(self, latent_dim, n_c, x_shape, verbose=False):
     super(Generator_CNN, self).__init__()
     self.name = 'generator'
     self.latent_dim = latent_dim
     self.n_c = n_c
     self.x_shape = x_shape
     self.ishape = (128, 7, 7)
     self.iels = int(np.prod(self.ishape))
     self.verbose = verbose
     self.model0 = nn.Sequential(
         nn.Linear((self.latent_dim + self.n_c), 1024))
     self.model1 = nn.Sequential(BatchNorm1d(1024), nn.Leaky_relu(0.2))
     self.model2 = nn.Sequential(nn.Linear(1024, self.iels),
                                 BatchNorm1d(self.iels), nn.Leaky_relu(0.2))
     self.model3 = nn.Sequential(
         Reshape(self.ishape),
         nn.ConvTranspose(128, 64, 4, stride=2, padding=1, bias=True),
         nn.BatchNorm(64), nn.Leaky_relu(0.2))
     self.model4 = nn.Sequential(
         nn.ConvTranspose(64, 1, 4, stride=2, padding=1, bias=True))
     self.sigmoid = nn.Sigmoid()
     initialize_weights(self)
     if self.verbose:
         print('Setting up {}...\n'.format(self.name))
         print(self.model)
Exemple #11
0
    def __init__(self, inp, oup, stride, expand_ratio):
        super(InvertedResidual, self).__init__()
        self.stride = stride
        assert stride in [1, 2]

        hidden_dim = int(round(inp * expand_ratio))
        self.use_res_connect = self.stride == 1 and inp == oup

        if expand_ratio == 1:
            self.conv = nn.Sequential(
                # dw
                Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False),
                FrozenBatchNorm2d(hidden_dim),
                nn.ReLU6(),
                # pw-linear
                Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
                FrozenBatchNorm2d(oup),
            )
        else:
            self.conv = nn.Sequential(
                # pw
                Conv2d(inp, hidden_dim, 1, 1, 0, bias=False),
                FrozenBatchNorm2d(hidden_dim),
                nn.ReLU6(),
                # dw
                Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False),
                FrozenBatchNorm2d(hidden_dim),
                nn.ReLU6(),
                # pw-linear
                Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
                FrozenBatchNorm2d(oup),
            )
Exemple #12
0
 def __init__(self,
              channels,
              filters=64,
              num_res_blocks=16,
              num_upsample=2):
     super(GeneratorRRDB, self).__init__()
     self.conv1 = nn.Conv(channels, filters, 3, stride=1, padding=1)
     self.res_blocks = nn.Sequential(*[
         ResidualInResidualDenseBlock(filters)
         for _ in range(num_res_blocks)
     ])
     self.conv2 = nn.Conv(filters, filters, 3, stride=1, padding=1)
     upsample_layers = []
     for _ in range(num_upsample):
         upsample_layers += [
             nn.Conv(filters, (filters * 4), 3, stride=1, padding=1),
             nn.LeakyReLU(),
             nn.PixelShuffle(upscale_factor=2)
         ]
     self.upsampling = nn.Sequential(*upsample_layers)
     self.conv3 = nn.Sequential(
         nn.Conv(filters, filters, 3, stride=1, padding=1), nn.LeakyReLU(),
         nn.Conv(filters, channels, 3, stride=1, padding=1))
     for m in self.modules():
         weights_init_normal(m)
    def __init__(self, norm_layer, image_size, input_nc, latent_dim=512):
        super( EncoderGenerator_Res, self).__init__()
        layers_list = []
        
        latent_size = int(image_size/32)
        longsize = 512*latent_size*latent_size
        self.longsize = longsize
        # print(image_size,latent_size, longsize)

        activation = nn.ReLU()
        padding_type='reflect'
        norm_layer=nn.BatchNorm

        # encode
        layers_list.append(EncoderBlock(channel_in=input_nc, channel_out=32, kernel_size=4, padding=1, stride=2))  # 176 176 

        dim_size = 32
        for i in range(4):
            layers_list.append(ResnetBlock(dim_size, padding_type=padding_type, activation=activation, norm_layer=norm_layer)) 
            layers_list.append(EncoderBlock(channel_in=dim_size, channel_out=dim_size*2, kernel_size=4, padding=1, stride=2)) 
            dim_size *= 2

        layers_list.append(ResnetBlock(512, padding_type=padding_type, activation=activation, norm_layer=norm_layer))  

        # final shape Bx256*7*6
        self.conv = nn.Sequential(*layers_list)
        self.fc_mu = nn.Sequential(nn.Linear(in_features=longsize, out_features=latent_dim))#,

        # self.fc_var = nn.Sequential(nn.Linear(in_features=longsize, out_features=latent_dim))#,

        for m in self.modules():
            weights_init_normal(m)
    def __init__(self, norm_layer, image_size, output_nc, latent_dim=512):  
        super(DecoderGenerator_image_Res, self).__init__()
        # start from B*1024
        latent_size = int(image_size/32)
        self.latent_size = latent_size
        longsize = 512*latent_size*latent_size

        activation = nn.ReLU()
        padding_type='reflect'
        norm_layer=nn.BatchNorm

        self.fc = nn.Sequential(nn.Linear(in_features=latent_dim, out_features=longsize))
        layers_list = []

        layers_list.append(ResnetBlock(512, padding_type=padding_type, activation=activation, norm_layer=norm_layer))  # 176 176 
        
        dim_size = 256
        for i in range(4):
            layers_list.append(DecoderBlock(channel_in=dim_size*2, channel_out=dim_size, kernel_size=4, padding=1, stride=2, output_padding=0)) #latent*2
            layers_list.append(ResnetBlock(dim_size, padding_type=padding_type, activation=activation, norm_layer=norm_layer))  
            dim_size = int(dim_size/2)

        layers_list.append(DecoderBlock(channel_in=32, channel_out=32, kernel_size=4, padding=1, stride=2, output_padding=0)) #352 352
        layers_list.append(ResnetBlock(32, padding_type=padding_type, activation=activation, norm_layer=norm_layer))  # 176 176 

        # layers_list.append(DecoderBlock(channel_in=64, channel_out=64, kernel_size=4, padding=1, stride=2, output_padding=0)) #96*160
        layers_list.append(nn.ReflectionPad2d(2))
        layers_list.append(nn.Conv(32,output_nc,kernel_size=5,padding=0))

        self.conv = nn.Sequential(*layers_list)

        for m in self.modules():
            weights_init_normal(m)
Exemple #15
0
    def __init__(self):
        super(Discriminator, self).__init__()

        def discriminator_block(in_filters, out_filters, bn=True):
            'Returns layers of each discriminator block'
            block = [
                nn.Conv(in_filters, out_filters, 3, stride=2, padding=1),
                nn.LeakyReLU(scale=0.2),
                nn.Dropout(p=0.25)
            ]
            if bn:
                block.append(nn.BatchNorm(out_filters, eps=0.8))
            return block

        self.conv_blocks = nn.Sequential(
            *discriminator_block(opt.channels, 16, bn=False),
            *discriminator_block(16, 32), *discriminator_block(32, 64),
            *discriminator_block(64, 128))
        ds_size = (opt.img_size // (2**4))
        self.adv_layer = nn.Sequential(nn.Linear((128 * (ds_size**2)), 1))
        self.aux_layer = nn.Sequential(
            nn.Linear((128 * (ds_size**2)), opt.n_classes), nn.Softmax())
        self.latent_layer = nn.Sequential(
            nn.Linear((128 * (ds_size**2)), opt.code_dim))

        for m in self.modules():
            weights_init_normal(m)
Exemple #16
0
    def _make_one_branch(self,
                         branch_index,
                         block,
                         num_blocks,
                         num_channels,
                         stride=1):
        downsample = None
        if stride != 1 or \
                self.num_inchannels[branch_index] != num_channels[branch_index] * block.expansion:
            downsample = nn.Sequential(
                nn.Conv(self.num_inchannels[branch_index],
                        num_channels[branch_index] * block.expansion,
                        kernel_size=1,
                        stride=stride,
                        bias=False),
                BatchNorm2d(num_channels[branch_index] * block.expansion,
                            momentum=BN_MOMENTUM),
            )

        layers = []
        layers.append(
            block(self.num_inchannels[branch_index],
                  num_channels[branch_index], stride, downsample))
        self.num_inchannels[branch_index] = \
            num_channels[branch_index] * block.expansion
        for i in range(1, num_blocks[branch_index]):
            layers.append(
                block(self.num_inchannels[branch_index],
                      num_channels[branch_index]))

        return nn.Sequential(*layers)
Exemple #17
0
 def __init__(self,
              in_channels,
              ch1x1,
              ch3x3red,
              ch3x3,
              ch5x5red,
              ch5x5,
              pool_proj,
              conv_block=None):
     super(Inception, self).__init__()
     if (conv_block is None):
         conv_block = BasicConv2d
     self.branch1 = conv_block(in_channels, ch1x1, kernel_size=1)
     self.branch2 = nn.Sequential(
         conv_block(in_channels, ch3x3red, kernel_size=1),
         conv_block(ch3x3red, ch3x3, kernel_size=3, padding=1))
     self.branch3 = nn.Sequential(
         conv_block(in_channels, ch5x5red, kernel_size=1),
         conv_block(ch5x5red, ch5x5, kernel_size=3, padding=1))
     self.branch4 = nn.Sequential(
         nn.Pool(kernel_size=3,
                 stride=1,
                 padding=1,
                 ceil_mode=True,
                 op='maximum'),
         conv_block(in_channels, pool_proj, kernel_size=1))
Exemple #18
0
    def __init__(self, input_shape):
        super(MultiDiscriminator, self).__init__()

        def discriminator_block(in_filters, out_filters, normalize=True):
            'Returns downsampling layers of each discriminator block'
            layers = [nn.Conv(in_filters, out_filters, 4, stride=2, padding=1)]
            if normalize:
                layers.append(nn.BatchNorm(out_filters, 0.8))
            layers.append(nn.Leaky_relu(0.2))
            return layers
        (channels, _, _) = input_shape
        self.disc_0 = nn.Sequential(
            *discriminator_block(channels, 64, normalize=False), 
            *discriminator_block(64, 128), 
            *discriminator_block(128, 256), 
            *discriminator_block(256, 512), 
            nn.Conv(512, 1, 3, padding=1)
        )
        self.disc_1 = nn.Sequential(
            *discriminator_block(channels, 64, normalize=False), 
            *discriminator_block(64, 128), 
            *discriminator_block(128, 256), 
            *discriminator_block(256, 512), 
            nn.Conv(512, 1, 3, padding=1)
        )
        self.disc_2 = nn.Sequential(
            *discriminator_block(channels, 64, normalize=False), 
            *discriminator_block(64, 128), 
            *discriminator_block(128, 256), 
            *discriminator_block(256, 512), 
            nn.Conv(512, 1, 3, padding=1)
        )
        for m in self.modules():
            weights_init_normal(m)
Exemple #19
0
    def _make_layer(self, block, planes, blocks, stride=1):
        downsample = None

        if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
            downsample = nn.Sequential(
                nn.Pool(stride, stride=stride, ceil_mode=True, op='mean'),
                nn.Conv(self.inplanes, (planes * block.expansion),
                        1,
                        stride=1,
                        bias=False), nn.BatchNorm((planes * block.expansion)))

        layers = []
        layers.append(
            block(self.inplanes,
                  planes,
                  stride,
                  downsample=downsample,
                  stype='stage',
                  baseWidth=self.baseWidth,
                  scale=self.scale))

        self.inplanes = (planes * block.expansion)
        for i in range(1, blocks):
            layers.append(
                block(self.inplanes,
                      planes,
                      baseWidth=self.baseWidth,
                      scale=self.scale))

        return nn.Sequential(*layers)
Exemple #20
0
    def __init__(self, part_num=50):
        super(Point_Transformer_partseg, self).__init__()
        self.part_num = part_num
        self.conv1 = nn.Conv1d(3, 128, kernel_size=1, bias=False)
        self.conv2 = nn.Conv1d(128, 128, kernel_size=1, bias=False)

        self.bn1 = nn.BatchNorm1d(128)
        self.bn2 = nn.BatchNorm1d(128)

        self.sa1 = SA_Layer(128)
        self.sa2 = SA_Layer(128)
        self.sa3 = SA_Layer(128)
        self.sa4 = SA_Layer(128)

        self.conv_fuse = nn.Sequential(
            nn.Conv1d(512, 1024, kernel_size=1, bias=False),
            nn.BatchNorm1d(1024), nn.LeakyReLU(scale=0.2))

        self.label_conv = nn.Sequential(
            nn.Conv1d(16, 64, kernel_size=1, bias=False), nn.BatchNorm1d(64),
            nn.LeakyReLU(scale=0.2))

        self.convs1 = nn.Conv1d(1024 * 3 + 64, 512, 1)
        self.dp1 = nn.Dropout(0.5)
        self.convs2 = nn.Conv1d(512, 256, 1)
        self.convs3 = nn.Conv1d(256, self.part_num, 1)
        self.bns1 = nn.BatchNorm1d(512)
        self.bns2 = nn.BatchNorm1d(256)

        self.relu = nn.ReLU()
Exemple #21
0
 def __init__(self, alpha, num_classes=1000, dropout=0.2):
     super(MNASNet, self).__init__()
     assert (alpha > 0.0)
     self.alpha = alpha
     self.num_classes = num_classes
     depths = _get_depths(alpha)
     layers = [
         nn.Conv(3, 32, 3, padding=1, stride=2, bias=False),
         nn.BatchNorm(32, momentum=_BN_MOMENTUM),
         nn.Relu(),
         nn.Conv(32, 32, 3, padding=1, stride=1, groups=32, bias=False),
         nn.BatchNorm(32, momentum=_BN_MOMENTUM),
         nn.Relu(),
         nn.Conv(32, 16, 1, padding=0, stride=1, bias=False),
         nn.BatchNorm(16, momentum=_BN_MOMENTUM),
         _stack(16, depths[0], 3, 2, 3, 3, _BN_MOMENTUM),
         _stack(depths[0], depths[1], 5, 2, 3, 3, _BN_MOMENTUM),
         _stack(depths[1], depths[2], 5, 2, 6, 3, _BN_MOMENTUM),
         _stack(depths[2], depths[3], 3, 1, 6, 2, _BN_MOMENTUM),
         _stack(depths[3], depths[4], 5, 2, 6, 4, _BN_MOMENTUM),
         _stack(depths[4], depths[5], 3, 1, 6, 1, _BN_MOMENTUM),
         nn.Conv(depths[5], 1280, 1, padding=0, stride=1, bias=False),
         nn.BatchNorm(1280, momentum=_BN_MOMENTUM),
         nn.Relu()
     ]
     self.layers = nn.Sequential(*layers)
     self.classifier = nn.Sequential(nn.Dropout(p=dropout),
                                     nn.Linear(1280, num_classes))
Exemple #22
0
    def __init__(self, in_planes, planes, stride=1):
        super(BasicBlock, self).__init__()
        self.conv1 = nn.Conv(in_planes,
                             planes,
                             kernel_size=3,
                             stride=stride,
                             padding=1,
                             bias=False)
        self.bn1 = nn.BatchNorm(planes)
        self.conv2 = nn.Conv(planes,
                             planes,
                             kernel_size=3,
                             stride=1,
                             padding=1,
                             bias=False)
        self.bn2 = nn.BatchNorm(planes)

        self.shortcut = nn.Sequential()
        if stride != 1 or in_planes != planes:
            self.shortcut = nn.Sequential(
                nn.Conv(in_planes,
                        planes,
                        kernel_size=1,
                        stride=stride,
                        bias=False), nn.BatchNorm(planes))

        # SE layers
        self.fc1 = nn.Conv(planes, planes // 16,
                           kernel_size=1)  # Use nn.Conv2d instead of nn.Linear
        self.fc2 = nn.Conv(planes // 16, planes, kernel_size=1)
Exemple #23
0
 def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
     norm_layer = self._norm_layer
     downsample = None
     previous_dilation = self.dilation
     if dilate:
         self.dilation *= stride
         stride = 1
     if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
         downsample = nn.Sequential(
             conv1x1(self.inplanes, (planes * block.expansion), stride),
             norm_layer((planes * block.expansion)))
     layers = []
     layers.append(
         block(self.inplanes, planes, stride, downsample, self.groups,
               self.base_width, previous_dilation, norm_layer))
     self.inplanes = (planes * block.expansion)
     for _ in range(1, blocks):
         layers.append(
             block(self.inplanes,
                   planes,
                   groups=self.groups,
                   base_width=self.base_width,
                   dilation=self.dilation,
                   norm_layer=norm_layer))
     return nn.Sequential(*layers)
Exemple #24
0
 def __init__(self, version='1_0', num_classes=1000):
     super(SqueezeNet, self).__init__()
     self.num_classes = num_classes
     if (version == '1_0'):
         self.features = nn.Sequential(
             nn.Conv(3, 96, kernel_size=7, stride=2), nn.Relu(),
             nn.Pool(kernel_size=3, stride=2, ceil_mode=True, op='maximum'),
             Fire(96, 16, 64, 64), Fire(128, 16, 64, 64),
             Fire(128, 32, 128, 128),
             nn.Pool(kernel_size=3, stride=2, ceil_mode=True, op='maximum'),
             Fire(256, 32, 128, 128), Fire(256, 48, 192, 192),
             Fire(384, 48, 192, 192), Fire(384, 64, 256, 256),
             nn.Pool(kernel_size=3, stride=2, ceil_mode=True, op='maximum'),
             Fire(512, 64, 256, 256))
     elif (version == '1_1'):
         self.features = nn.Sequential(
             nn.Conv(3, 64, kernel_size=3, stride=2), nn.Relu(),
             nn.Pool(kernel_size=3, stride=2, ceil_mode=True, op='maximum'),
             Fire(64, 16, 64, 64), Fire(128, 16, 64, 64),
             nn.Pool(kernel_size=3, stride=2, ceil_mode=True, op='maximum'),
             Fire(128, 32, 128, 128), Fire(256, 32, 128, 128),
             nn.Pool(kernel_size=3, stride=2, ceil_mode=True, op='maximum'),
             Fire(256, 48, 192, 192), Fire(384, 48, 192, 192),
             Fire(384, 64, 256, 256), Fire(512, 64, 256, 256))
     else:
         raise ValueError(
             'Unsupported SqueezeNet version {version}:1_0 or 1_1 expected'.
             format(version=version))
     final_conv = nn.Conv(512, self.num_classes, kernel_size=1)
     self.classifier = nn.Sequential(nn.Dropout(p=0.5),
                                     final_conv, nn.Relu(),
                                     nn.AdaptiveAvgPool2d((1, 1)))
Exemple #25
0
 def __init__(self, input_dim, dim, numAngle, numRho):
     super(DHT_Layer, self).__init__()
     self.fist_conv = nn.Sequential(nn.Conv2d(input_dim, dim, 1),
                                    nn.BatchNorm2d(dim), nn.ReLU())
     self.dht = DHT(numAngle=numAngle, numRho=numRho)
     self.convs = nn.Sequential(nn.Conv2d(dim, dim, 3, 1, 1),
                                nn.BatchNorm2d(dim), nn.ReLU(),
                                nn.Conv2d(dim, dim, 3, 1, 1),
                                nn.BatchNorm2d(dim), nn.ReLU())
Exemple #26
0
    def _make_fuse_layers(self):
        if self.num_branches == 1:
            return None

        num_branches = self.num_branches
        num_inchannels = self.num_inchannels
        fuse_layers = []
        for i in range(num_branches if self.multi_scale_output else 1):
            fuse_layer = []
            for j in range(num_branches):
                if j > i:
                    fuse_layer.append(
                        nn.Sequential(
                            nn.Conv(num_inchannels[j],
                                    num_inchannels[i],
                                    1,
                                    1,
                                    0,
                                    bias=False),
                            BatchNorm2d(num_inchannels[i],
                                        momentum=BN_MOMENTUM),
                            nn.Upsample(scale_factor=2**(j - i),
                                        mode='nearest')))
                elif j == i:
                    fuse_layer.append(None)
                else:
                    conv3x3s = []
                    for k in range(i - j):
                        if k == i - j - 1:
                            num_outchannels_conv3x3 = num_inchannels[i]
                            conv3x3s.append(
                                nn.Sequential(
                                    nn.Conv(num_inchannels[j],
                                            num_outchannels_conv3x3,
                                            3,
                                            2,
                                            1,
                                            bias=False),
                                    BatchNorm2d(num_outchannels_conv3x3,
                                                momentum=BN_MOMENTUM)))
                        else:
                            num_outchannels_conv3x3 = num_inchannels[j]
                            conv3x3s.append(
                                nn.Sequential(
                                    nn.Conv(num_inchannels[j],
                                            num_outchannels_conv3x3,
                                            3,
                                            2,
                                            1,
                                            bias=False),
                                    BatchNorm2d(num_outchannels_conv3x3,
                                                momentum=BN_MOMENTUM),
                                    nn.ReLU(False)))
                    fuse_layer.append(nn.Sequential(*conv3x3s))
            fuse_layers.append(nn.ModuleList(fuse_layer))

        return nn.ModuleList(fuse_layers)
Exemple #27
0
 def __init__(self, out_features):
     super().__init__()
     self.net = nn.Sequential(
         linear_init(True, IN_FEATURES, HIDDEN_FEATURES),
         Sine(),
         *[nn.Sequential(linear_init(False, HIDDEN_FEATURES, HIDDEN_FEATURES), Sine()) for _ in
           range(HIDDEN_LAYERS)],
         linear_init(False, HIDDEN_FEATURES, out_features)
     )
    def __init__(self,
                 output_stride=8,
                 BatchNorm=None,
                 width_mult=1.,
                 pretrained=True):
        super(MobileNetV2, self).__init__()
        block = InvertedResidual
        input_channel = 32
        current_stride = 1
        rate = 1
        interverted_residual_setting = [
            # t, c, n, s
            [1, 16, 1, 1],
            [6, 24, 2, 2],
            [6, 32, 3, 2],
            [6, 64, 4, 2],
            [6, 96, 3, 1],
            [6, 160, 3, 2],
            [6, 320, 1, 1],
        ]

        # building first layer
        input_channel = int(input_channel * width_mult)
        self.features = [conv_bn(3, input_channel, 2, BatchNorm)]
        current_stride *= 2
        # building inverted residual blocks
        for t, c, n, s in interverted_residual_setting:
            if current_stride == output_stride:
                stride = 1
                dilation = rate
                rate *= s
            else:
                stride = s
                dilation = 1
                current_stride *= s
            output_channel = int(c * width_mult)
            for i in range(n):
                if i == 0:
                    self.features.append(
                        block(input_channel, output_channel, stride, dilation,
                              t, BatchNorm))
                else:
                    self.features.append(
                        block(input_channel, output_channel, 1, dilation, t,
                              BatchNorm))
                input_channel = output_channel
        # self.features = nn.Sequential(*self.features)
        # self._initialize_weights()

        # if pretrained:
        #     self._load_pretrained_model()

        self.low_level_features = self.features[0:4]
        self.low_level_features = nn.Sequential(*self.low_level_features)
        self.high_level_features = self.features[4:]
        self.high_level_features = nn.Sequential(*self.high_level_features)
Exemple #29
0
 def __init__(self, c1, c2, k, s):
     super(GhostBottleneck, self).__init__()
     c_ = c2 // 2
     self.conv = nn.Sequential(
         GhostConv(c1, c_, 1, 1),  # pw
         DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(),  # dw
         GhostConv(c_, c2, 1, 1, act=False))  # pw-linear
     self.shortcut = nn.Sequential(DWConv(
         c1, c1, k, s, act=False), Conv(
             c1, c2, 1, 1, act=False)) if s == 2 else nn.Identity()
Exemple #30
0
    def __init__(self):
        super(CoupledGenerators, self).__init__()
        self.init_size = (opt.img_size // 4)
        self.fc = nn.Sequential(nn.Linear(opt.latent_dim, (128 * (self.init_size ** 2))))
        self.shared_conv = nn.Sequential(nn.BatchNorm(128), nn.Upsample(scale_factor=2), nn.Conv(128, 128, 3, stride=1, padding=1), nn.BatchNorm(128, eps=0.8), nn.LeakyReLU(0.2), nn.Upsample(scale_factor=2))
        self.G1 = nn.Sequential(nn.Conv(128, 64, 3, stride=1, padding=1), nn.BatchNorm(64, eps=0.8), nn.LeakyReLU(0.2), nn.Conv(64, opt.channels, 3, stride=1, padding=1), nn.Tanh())
        self.G2 = nn.Sequential(nn.Conv(128, 64, 3, stride=1, padding=1), nn.BatchNorm(64, eps=0.8), nn.LeakyReLU(0.2), nn.Conv(64, opt.channels, 3, stride=1, padding=1), nn.Tanh())

        for m in self.modules():
            weights_init_normal(m)