Пример #1
0
    def __init__(self, block, layers, num_classes=1000,
                 channels=(16, 32, 64, 128, 256, 512, 512, 512),
                 out_map=-1, out_middle=False, pool_size=28, arch='D'):
        super(DRN, self).__init__()
        self.inplanes = channels[0]
        self.out_map = out_map
        self.out_dim = channels[-1]
        self.out_middle = out_middle
        self.arch = arch

        if arch == 'C':
            self.conv1 = nn.Conv2d(3, channels[0], kernel_size=7, stride=1,
                                   padding=3, bias=False)
            self.bn1 = nn.BatchNorm2d(channels[0])
            self.relu = nn.ReLU(inplace=True)

            self.layer1 = self._make_layer(
                BasicBlock, channels[0], layers[0], stride=1)
            self.layer2 = self._make_layer(
                BasicBlock, channels[1], layers[1], stride=2)
        elif arch == 'D':
            self.layer0 = nn.Sequential(
                nn.Conv2d(3, channels[0], kernel_size=7, stride=1, padding=3,
                          bias=False),
                nn.BatchNorm2d(channels[0]),
                nn.ReLU(inplace=True)
            )

            self.layer1 = self._make_conv_layers(
                channels[0], layers[0], stride=1)
            self.layer2 = self._make_conv_layers(
                channels[1], layers[1], stride=2)

        self.layer3 = self._make_layer(block, channels[2], layers[2], stride=2)
        self.layer4 = self._make_layer(block, channels[3], layers[3], stride=2)
        self.layer5 = self._make_layer(block, channels[4], layers[4], dilation=2,
                                       new_level=False)
        self.layer6 = None if layers[5] == 0 else \
            self._make_layer(block, channels[5], layers[5], dilation=4,
                             new_level=False)

        if arch == 'C':
            self.layer7 = None if layers[6] == 0 else \
                self._make_layer(BasicBlock, channels[6], layers[6], dilation=2,
                                 new_level=False, residual=False)
            self.layer8 = None if layers[7] == 0 else \
                self._make_layer(BasicBlock, channels[7], layers[7], dilation=1,
                                 new_level=False, residual=False)
        elif arch == 'D':
            self.layer7 = None if layers[6] == 0 else \
                self._make_conv_layers(channels[6], layers[6], dilation=2)
            self.layer8 = None if layers[7] == 0 else \
                self._make_conv_layers(channels[7], layers[7], dilation=1)

        self.num_classes = num_classes
        if self.num_classes > 0:
            self.avgpool = nn.AvgPool2d(pool_size)
            self.pred = nn.Conv2d(self.out_dim, num_classes, kernel_size=1,
                                  stride=1, padding=0, bias=True)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()

        if self.out_map < 32:
            self.out_pool = nn.MaxPool2d(32 // self.out_map)
            pass
Пример #2
0
def conv3x3(in_planes, out_planes, stride=1, groups=1, bias=False):
    "3x3 convolution with padding"
    return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
                     padding=1, groups=groups, bias=bias)
Пример #3
0
 def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0):
     super(NINBlock, self).__init__()
     self.conv = nn.Conv2d(in_channels,out_channels,kernel_size=kernel_size,stride=stride, padding=padding)
     self.bn = nn.BatchNorm2d(out_channels)
     self.relu = nn.ReLU(inplace=True)
Пример #4
0
def conv_bn(inp, oup, stride=1):
    return nn.Sequential(nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
                         nn.BatchNorm2d(oup), nn.ReLU(inplace=True))
Пример #5
0
 def __init__(self):
     super().__init__()
     self.conv = nn.Conv2d(2, 2, 1)
 def __init__(self, block, layers, first_stride=1, num_classes=1000):
     super().__init__(block, layers, num_classes)
     self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=first_stride, padding=3,
                            bias=False)
Пример #7
0
def conv1x1(in_planes, out_planes, stride=1):
    """1x1 convolution"""
    return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
    def conv_model(self, input_size, input_channle):
        ''' 搭建卷积池化层网络 '''
        layers = []

        # 判断 激活函数
        if self.activate_function == 'relu':
            activate_function = nn.ReLU(True)
        if self.activate_function == 'sigmoid':
            activate_function = nn.Sigmoid()
        if self.activate_function == 'tanh':
            activate_function = nn.Tanh()
        if self.activate_function == 'LeakyReLU':
            activate_function = nn.LeakyReLU(True)

        # input layer
        padding = self.conv_padding_same(input_size) # 计算要补几层 0
        conv1 = nn.Conv2d(input_channle, self.channel_numbers[0], kernel_size=self.kernel_size,
                          stride=self.conv_stride, padding=padding)
        pool1 = nn.MaxPool2d(self.pooling_size, self.pool_stride)
        layers.append(conv1)
        layers.append(nn.BatchNorm2d(self.channel_numbers[0]))
        layers.append(activate_function)
        layers.append(nn.Dropout(self.dropout))
        layers.append(pool1)
        W_in = (input_size - 1) // 2 + 1 # To calculate the shape of matrix after pooling layer.

        # hidden layers
        hidden_layers_number = len(self.channel_numbers)
        for i in range(hidden_layers_number):
            try:
                padding = self.conv_padding_same(W_in)
                layers.append(nn.Conv2d(self.channel_numbers[i], self.channel_numbers[i + 1],
                                        kernel_size=self.kernel_size, stride=self.conv_stride, padding=padding))
                layers.append(nn.BatchNorm2d(self.channel_numbers[i + 1]))
                layers.append(activate_function)
                layers.append(nn.Dropout(self.dropout))
                layers.append(nn.MaxPool2d(self.pooling_size, self.pool_stride))
                W_in = (W_in - 1) // 2 + 1
            except:
                pass

        if hidden_layers_number == 1:
            cnn = nn.Sequential(
                layers[0], layers[1], layers[2], layers[3], layers[4]
            )

        if hidden_layers_number == 2:
            cnn = nn.Sequential(
                layers[0], layers[1], layers[2], layers[3], layers[4],
                layers[5], layers[6], layers[7], layers[8], layers[9]
            )

        if hidden_layers_number == 3:
            cnn = nn.Sequential(
                layers[0], layers[1], layers[2], layers[3], layers[4],
                layers[5], layers[6], layers[7], layers[8], layers[9],
                layers[10], layers[11], layers[12], layers[13], layers[14],
            )

        if hidden_layers_number == 4:
            cnn = nn.Sequential(
                layers[0], layers[1], layers[2], layers[3], layers[4],
                layers[5], layers[6], layers[7], layers[8], layers[9],
                layers[10], layers[11], layers[12], layers[13], layers[14],
                layers[15], layers[16], layers[17], layers[18], layers[19],
            )
        if hidden_layers_number == 5:
            cnn = nn.Sequential(
                layers[0], layers[1], layers[2], layers[3], layers[4],
                layers[5], layers[6], layers[7], layers[8], layers[9],
                layers[10], layers[11], layers[12], layers[13], layers[14],
                layers[15], layers[16], layers[17], layers[18], layers[19],
                layers[20], layers[21], layers[22], layers[23], layers[24],
            )
        return cnn
Пример #9
0
def meanpoolConv(inplanes, outplanes):
    sequence = []
    sequence += [nn.AvgPool2d(kernel_size=2, stride=2)]
    sequence += [nn.Conv2d(inplanes, outplanes,
                           kernel_size=1, stride=1, padding=0, bias=True)]
    return nn.Sequential(*sequence)
    def __init__(self,
                 input_nc,
                 ndf=64,
                 n_layers=3,
                 norm_layer=nn.BatchNorm2d):
        """Construct a PatchGAN discriminator

        Parameters:
            input_nc (int)  -- the number of channels in input images
            ndf (int)       -- the number of filters in the last conv layer
            n_layers (int)  -- the number of conv layers in the discriminator
            norm_layer      -- normalization layer
        """
        super(NLayerDiscriminator, self).__init__()
        if type(
                norm_layer
        ) == functools.partial:  # no need to use bias as BatchNorm2d has affine parameters
            use_bias = norm_layer.func == nn.InstanceNorm2d
        else:
            use_bias = norm_layer == nn.InstanceNorm2d

        global is_group_norm
        global number_of_groups
        global enable_Silu

        kw = 4
        padw = 1
        sequence = [
            nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw),
            nn.LeakyReLU(0.2, True)
        ]
        nf_mult = 1
        nf_mult_prev = 1
        for n in range(1,
                       n_layers):  # gradually increase the number of filters
            nf_mult_prev = nf_mult
            nf_mult = min(2**n, 8)
            sequence += [
                nn.Conv2d(ndf * nf_mult_prev,
                          ndf * nf_mult,
                          kernel_size=kw,
                          stride=2,
                          padding=padw,
                          bias=use_bias)
            ]
            if (is_group_norm == 0):
                sequence += [norm_layer(ndf * nf_mult)]
            else:
                sequence += [norm_layer(num_channels=ndf * nf_mult)]
            sequence += [nn.LeakyReLU(0.2, True)]

        nf_mult_prev = nf_mult
        nf_mult = min(2**n_layers, 8)

        sequence += [
            nn.Conv2d(ndf * nf_mult_prev,
                      ndf * nf_mult,
                      kernel_size=kw,
                      stride=1,
                      padding=padw,
                      bias=use_bias)
        ]
        if (is_group_norm == 0):
            sequence += [norm_layer(ndf * nf_mult)]
        else:
            sequence += [norm_layer(num_channels=ndf * nf_mult)]
        sequence += [nn.LeakyReLU(0.2, True)]

        sequence += [
            nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)
        ]  # output 1 channel prediction map
        self.model = nn.Sequential(*sequence)
Пример #11
0
    def __init__(self, dataset, NL, NF, lr, mom) :
        """
        \Description : Build a CNN following specific rules. Those are [A. Bakshi et al. 2019] :
            "
            (1) The CNN architecture is created by an alternative combination of the convolutional and max pooling layers 
            that are followed by an averaging pooling layer and a linear fully connected layer on the top
            (2) The minimum and maximum number of consecutive convolutional layers is 2 and 4, respectively
            (3) Two pooling layers never occur directly in sequence
            (4) We keep adding a sequence of convolutional layers and pooling layer until we reach the number of layers (NL)
            (5) A subsequence of size NF is selected from the sequence {32, 64, 128, 256, 512}. The elements of the selected
            subsequence are randomly used as the feature map value of convolutional block. NF can take the values 3, 4 or 5
            (6) If the number of feature maps (NF) were less than the number of different convolutional blocks in the 
            generated network, then one/more of the selected feature maps will be repeated randomly
            (7) For each layer the same kernel size and stride size is used
            (8) Each convolutional layer is followed by batch normalization and rectifier function (ReLU)
            "
            Here, the feature map of size 512 is replaced by a feature map of size 16  
        \Args : 
            dataset : the dataset used for the CNN
            NL      : number of hidden layers
            NF      : number of feature maps  
            lr      : learning rate
            mom     : momentum
        \Outputs : None
        """
        
        seed(987)           # Set the random seed

        super(CNN, self).__init__()
        
        self.dataset    = dataset                                               # Dataset used
        self.chromosome = {"NL" : NL, "NF" : NF, "lr" : lr, "mom" : mom}        # Hyper-parameters of the network
        self.layers     = nn.ModuleList()                                       # Array of layers
        self.inaccuracy = 0.0                                                   # Inaccuracy during the test phase
        self.time       = 0.0                                                   # Computation time during the test phase
        self.fitness    = 0.0                                                   # Fitness score of the model
        
        if dataset == "MNIST" :
            in_channels = 1    # Input dimension
            
        elif dataset == "CIFAR10" :
            in_channels = 3    # Input dimension
          
        else :
            raise ValueError("Invalid dataset name. Either choose 'MNIST' or 'CIFAR10'")
            
         
        
        # Determine the feature maps subsequence from {16, 32, 64, 128, 256}
        self.feat_maps_seq = [16, 32, 64, 128, 256]
        start = randint(0, len(self.feat_maps_seq) - self.chromosome["NF"])
        end = start + NF
        self.feat_maps_seq = self.feat_maps_seq[start : end]
        ind_feat_maps = 0
        
        classes    = 10            # Number of classes
        img_h      = 32            # Input image height (MNIST images are resized to 32x32)
        img_w      = 32            # Input image width  (MNIST images are resized to 32x32)
        
        # Add the layers
        while NL > 0 :
            
            # Convolutional block cannot be of size 4 if there are 7 layers left because it would make the 
            # last block to be of size 1 and it is forbidden
            if NL == 7 :
                conv_block_size = choice([2, 3])
                
            # Convolutional blocks cannot be of size 3 or 4 if there are 6 or 3 layers left because it would
            # make the last block to be of size 1 and it is forbidden
            elif NL == 6 or NL == 3 :
                conv_block_size = 2
                
            # # Convolutional block cannot be of size 2 or 3 if there are 5 layers left because it would make
            # the last block to be of size 1 and it is forbidden
            elif NL == 5 :
                conv_block_size = 4
               
            # Convolutional block cannot be of size 2 or 4 if there are 4 mayers left because it would make 
            # the last block to be of size 1 and it is forbidden
            elif NL == 4 :
                conv_block_size = 3
            
            # Can be whatever size
            # (We cannot have 3, 2 or 1 layer(s) left with the preceding cases)
            else :
                conv_block_size = choice([2, 3, 4])
                 
            # Select the feature maps size
            feat_maps = self.feat_maps_seq[ind_feat_maps] if ind_feat_maps < len(self.feat_maps_seq) \
                                                            else self.feat_maps_seq[len(self.feat_maps_seq) - 1]
            ind_feat_maps += 1
            
            # Add a convolutional block
            for i in range(conv_block_size) :
                # If it is the first layer
                if NL == self.chromosome["NL"] :
                    self.layers.append(
                            nn.Conv2d(in_channels=in_channels,
                                      out_channels=feat_maps,
                                      kernel_size= 3,
                                      stride=1))
                
                else :
                    # If the previous layer is a BatchNorm2d layer
                    if type(self.layers[len(self.layers) - 3]).__name__ == "Conv2d" :
                        self.layers.append(
                                nn.Conv2d(in_channels=self.layers[len(self.layers) - 3].out_channels,   # out_channels in the previous convolutional layer
                                          out_channels=feat_maps,
                                          kernel_size=3,
                                          stride=1))
                        
                    else :  # If the previous layer is a MaxPool2d layer
                        self.layers.append(
                                nn.Conv2d(in_channels=self.layers[len(self.layers) - 4].out_channels,   # out_channels in the previous convolutional layer
                                          out_channels=feat_maps,
                                          kernel_size=3,
                                          stride=1))
                    # end if
                # end if

                #Initiliaze the weights and bias
                self.layers[len(self.layers) - 1].weight.data.fill_(0.01)
                self.layers[len(self.layers) - 1].bias.data.fill_(0.01)
                
                # Update the height and width
                img_h = (img_h - self.layers[len(self.layers) - 1].kernel_size[0]) + 1      # Not divided by the stride because it is 1
                img_w = (img_w - self.layers[len(self.layers) - 1].kernel_size[0]) + 1      # Not divided by the stride because it is 1
                
                # Add a batch normalization layer
                self.layers.append(nn.BatchNorm2d(feat_maps))      # Number of features in the  previous convolutional layer
                
                # Add a ReLU activation
                self.layers.append(nn.ReLU())
                
                NL -= 1     # Decrements the number of layers left
                
            # End for i in range(conv_block_size)
            
            # Add the max pooling layer after the convolutional block
            self.layers.append(nn.MaxPool2d(kernel_size= 2,
                                            stride=1))
            
            # Update the height and width
            img_h = (img_h - self.layers[len(self.layers) - 1].kernel_size) + 1     # Not divided by the stride because it is 1
            img_w = (img_w - self.layers[len(self.layers) - 1].kernel_size) + 1     # Not divided by the stride because it is 1
            
            NL -= 1         # Decrements the number of layers left      
        # end while NL > 0
            
        # Add an average pooling layer
        self.layers.append(nn.AvgPool2d(kernel_size=2,
                                        stride=1))  
        
        # Update the height and width
        img_h = (img_h - self.layers[len(self.layers) - 1].kernel_size) + 1      # Not divided by the stride because it is 1
        img_w = (img_w - self.layers[len(self.layers) - 1].kernel_size) + 1      # Not divided by the stride because it is 1
        
        # Add a fully connected layer
        self.layers.append(
                nn.Linear(in_features=self.layers[len(self.layers) - 5].out_channels * img_h * img_w,
                          out_features=classes,
                          bias=True))
    def __init__(self,
                 outer_nc,
                 inner_nc,
                 input_nc=None,
                 submodule=None,
                 outermost=False,
                 innermost=False,
                 norm_layer=nn.BatchNorm2d,
                 use_dropout=False):
        """Construct a Unet submodule with skip connections.

        Parameters:
            outer_nc (int) -- the number of filters in the outer conv layer
            inner_nc (int) -- the number of filters in the inner conv layer
            input_nc (int) -- the number of channels in input images/features
            submodule (UnetSkipConnectionBlock) -- previously defined submodules
            outermost (bool)    -- if this module is the outermost module
            innermost (bool)    -- if this module is the innermost module
            norm_layer          -- normalization layer
            use_dropout (bool)  -- if use dropout layers.
        """
        super(UnetSkipConnectionBlock, self).__init__()
        self.outermost = outermost
        if type(norm_layer) == functools.partial:
            use_bias = (norm_layer.func == nn.InstanceNorm2d
                        or norm_layer.func == nn.GroupNorm)
        else:
            use_bias = (norm_layer == nn.InstanceNorm2d
                        or norm_layer == nn.GroupNorm)
        if input_nc is None:
            input_nc = outer_nc
        downconv = nn.Conv2d(input_nc,
                             inner_nc,
                             kernel_size=4,
                             stride=2,
                             padding=1,
                             bias=use_bias)

        global is_group_norm
        #global number_of_groups
        global enable_Silu
        downrelu = nn.LeakyReLU(0.2, True)
        if (is_group_norm == 0):
            downnorm = norm_layer(inner_nc)
        else:
            #downnorm = norm_layer(number_of_groups,inner_nc)
            downnorm = norm_layer(num_channels=inner_nc)
        if (enable_Silu == 0):
            uprelu = nn.ReLU(True)
        else:
            uprelu = SiLU()
        if (is_group_norm == 0):
            upnorm = norm_layer(outer_nc)
        else:
            #upnorm = norm_layer(number_of_groups,outer_nc)
            upnorm = norm_layer(num_channels=outer_nc)

        if outermost:
            upconv = nn.ConvTranspose2d(inner_nc * 2,
                                        outer_nc,
                                        kernel_size=4,
                                        stride=2,
                                        padding=1)
            down = [downconv]
            up = [uprelu, upconv, nn.Tanh()]
            model = down + [submodule] + up
        elif innermost:
            upconv = nn.ConvTranspose2d(inner_nc,
                                        outer_nc,
                                        kernel_size=4,
                                        stride=2,
                                        padding=1,
                                        bias=use_bias)
            down = [downrelu, downconv]
            up = [uprelu, upconv, upnorm]
            model = down + up
        else:
            upconv = nn.ConvTranspose2d(inner_nc * 2,
                                        outer_nc,
                                        kernel_size=4,
                                        stride=2,
                                        padding=1,
                                        bias=use_bias)
            down = [downrelu, downconv, downnorm]
            up = [uprelu, upconv, upnorm]

            if use_dropout:
                model = down + [submodule] + up + [nn.Dropout(0.5)]
            else:
                model = down + [submodule] + up

        self.model = nn.Sequential(*model)
    def __init__(self,
                 input_nc,
                 output_nc,
                 ngf=64,
                 norm_layer=nn.BatchNorm2d,
                 use_dropout=False,
                 n_blocks=6,
                 padding_type='reflect'):
        """Construct a Resnet-based generator

        Parameters:
            input_nc (int)      -- the number of channels in input images
            output_nc (int)     -- the number of channels in output images
            ngf (int)           -- the number of filters in the last conv layer
            norm_layer          -- normalization layer
            use_dropout (bool)  -- if use dropout layers
            n_blocks (int)      -- the number of ResNet blocks
            padding_type (str)  -- the name of padding layer in conv layers: reflect | replicate | zero
        """
        assert (n_blocks >= 0)
        super(ResnetGenerator, self).__init__()
        if type(norm_layer) == functools.partial:
            use_bias = norm_layer.func == nn.InstanceNorm2d
        else:
            use_bias = norm_layer == nn.InstanceNorm2d

        model = [
            nn.ReflectionPad2d(3),
            nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),
            norm_layer(ngf),
            nn.ReLU(True)
        ]

        n_downsampling = 2
        for i in range(n_downsampling):  # add downsampling layers
            mult = 2**i
            model += [
                nn.Conv2d(ngf * mult,
                          ngf * mult * 2,
                          kernel_size=3,
                          stride=2,
                          padding=1,
                          bias=use_bias),
                norm_layer(ngf * mult * 2),
                nn.ReLU(True)
            ]

        mult = 2**n_downsampling
        for i in range(n_blocks):  # add ResNet blocks

            model += [
                ResnetBlock(ngf * mult,
                            padding_type=padding_type,
                            norm_layer=norm_layer,
                            use_dropout=use_dropout,
                            use_bias=use_bias)
            ]

        for i in range(n_downsampling):  # add upsampling layers
            mult = 2**(n_downsampling - i)
            model += [
                nn.ConvTranspose2d(ngf * mult,
                                   int(ngf * mult / 2),
                                   kernel_size=3,
                                   stride=2,
                                   padding=1,
                                   output_padding=1,
                                   bias=use_bias),
                norm_layer(int(ngf * mult / 2)),
                nn.ReLU(True)
            ]
        model += [nn.ReflectionPad2d(3)]
        model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
        model += [nn.Tanh()]

        self.model = nn.Sequential(*model)
Пример #14
0
def conv3x3(in_channels, out_channels, dilation=1):
    return nn.Conv2d(in_channels,
                     out_channels,
                     3,
                     padding=dilation,
                     dilation=dilation)
Пример #15
0
def conv3x3(in_planes, out_planes, stride=1, padding=1, dilation=1):
    return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
                     padding=padding, bias=False, dilation=dilation)
 def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True):
     super(Focus, self).__init__()
     self.conv = nn.Conv2d(c1 * 4, c2, k, s, p, g, act)
Пример #17
0
def conv3x3(in_planes, out_planes, stride=1):
    return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=True)
Пример #18
0
 def _make_stage(self, features, size):
     prior = nn.AdaptiveAvgPool2d(output_size=(size, size))
     conv = nn.Conv2d(features, features, kernel_size=1, bias=False)
     return nn.Sequential(prior, conv)
Пример #19
0
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
    """3x3 convolution with padding"""
    return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
                     padding=dilation, groups=groups, bias=False, dilation=dilation)
Пример #20
0
 def __init__(self, features, out_features=1024, sizes=(1, 2, 3, 6)):
     super().__init__()
     self.stages = []
     self.stages = nn.ModuleList([self._make_stage(features, size) for size in sizes])
     self.bottleneck = nn.Conv2d(features * (len(sizes) + 1), out_features, kernel_size=1)
     self.relu = nn.ReLU(inplace=True)
Пример #21
0
def conv3x3(in_planes, out_planes, stride=1):
    # 3x3 convolution with padding
    return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
Пример #22
0
def conv_dw(inp, oup, stride):
    return nn.Sequential(
        nn.Conv2d(inp, inp, 3, stride, 1, groups=inp, bias=False),
        nn.BatchNorm2d(inp), nn.ReLU(inplace=True),
        nn.Conv2d(inp, oup, 1, 1, 0, bias=False), nn.BatchNorm2d(oup),
        nn.ReLU(inplace=True))