Example #1
0
 def __init__(self):
     super(MNISTModel, self).__init__()
     self.conv = Conv2d(in_channels=1,
                        out_channels=5,
                        kernel_size=(5, 5),
                        padding=(2, 2),
                        bias=True)
     ## BatchNorm2d
     self.maxpool = MaxPool2d(kernel_size=(2, 2), stride=(2, 2))
     self.dense1 = Linear(5 * 14 * 14, 120)
     self.dense2 = Linear(120, 10)
     self.relu = ReLU()
     self.softmax = Softmax(dim=1)
Example #2
0
    def __init__(self, ResidualBlock, num_classes=10):
        super(ResNet, self).__init__()
        self.inchannel = 64
        self.conv1 = nn.Sequential(
            Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False),
            nn.BatchNorm2d(64), nn.ReLU())

        self.layer1 = self.make_layer(ResidualBlock, 64, 2, stride=1)
        self.layer2 = self.make_layer(ResidualBlock, 128, 2, stride=2)
        self.layer3 = self.make_layer(ResidualBlock, 256, 2, stride=2)
        self.layer4 = self.make_layer(ResidualBlock, 512, 2, stride=2)
        self.maxpool = MaxPool2d(4)
        self.fc = nn.Linear(512, num_classes)
Example #3
0
    def _create_backbone(self):
        backbone = Sequential()

        section = self._create_initial_sections(index=1,
                                                in_channels=3,
                                                out_channels=32)
        backbone.add_module('Section1', section)
        backbone.add_module('Pool1', MaxPool2d(kernel_size=2, stride=2))

        section = self._create_initial_sections(index=2,
                                                in_channels=32,
                                                out_channels=64)
        backbone.add_module('Section2', section)
        backbone.add_module('Pool2', MaxPool2d(kernel_size=2, stride=2))

        section = self._create_middle_sections(index=3,
                                               in_channels=64,
                                               out_channels=128)
        backbone.add_module('Section3', section)
        backbone.add_module('Pool3', MaxPool2d(kernel_size=2, stride=2))

        section = self._create_middle_sections(index=6,
                                               in_channels=128,
                                               out_channels=256)
        backbone.add_module('Section4', section)
        backbone.add_module('Pool4', MaxPool2d(kernel_size=2, stride=2))

        section = self._create_final_sections(index=9,
                                              in_channels=256,
                                              out_channels=512)
        backbone.add_module('Section5', section)
        backbone.add_module('Pool5', MaxPool2d(kernel_size=2, stride=2))

        section = self._create_final_sections(index=14,
                                              in_channels=512,
                                              out_channels=1024)
        backbone.add_module('Section6', section)

        return backbone
Example #4
0
 def create_model(use_reduce=False):
     return Sequential(
         Conv2d(3, 6, kernel_size=5),
         Reduce('b c (h h2) (w w2) -> b c h w', 'max', h2=2, w2=2)
         if use_reduce else MaxPool2d(kernel_size=2),
         Conv2d(6, 16, kernel_size=5),
         Reduce('b c (h h2) (w w2) -> b (c h w)', 'max', h2=2, w2=2),
         Linear(16 * 5 * 5, 120),
         ReLU(),
         Linear(120, 84),
         ReLU(),
         Linear(84, 10),
     )
Example #5
0
 def __init__(self, n_channels):
     super(CNN, self).__init__()
     # input to first hidden layer
     self.hidden1 = Conv2d(n_channels, 32, (3, 3))
     kaiming_uniform_(self.hidden1.weight, nonlinearity='relu')
     self.act1 = ReLU()
     # first pooling layer
     self.pool1 = MaxPool2d((2, 2), stride=(2, 2))
     # second hidden layer
     self.hidden2 = Conv2d(32, 32, (3, 3))
     kaiming_uniform_(self.hidden2.weight, nonlinearity='relu')
     self.act2 = ReLU()
     # second pooling layer
     self.pool2 = MaxPool2d((2, 2), stride=(2, 2))
     # fully connected layer
     self.hidden3 = Linear(5 * 5 * 32, 100)
     kaiming_uniform_(self.hidden3.weight, nonlinearity='relu')
     self.act3 = ReLU()
     # output layer
     self.hidden4 = Linear(100, 10)
     xavier_uniform_(self.hidden4.weight)
     self.act4 = Softmax(dim=1)
Example #6
0
 def __init__(self):
     super(ResModel, self).__init__()
     n_labels = 12
     n_maps = 128
     self.conv0 = torch.nn.Conv2d(1, n_maps, (3, 3), padding=(1, 1), bias=False)
     self.n_layers = n_layers = 9
     self.convs = torch.nn.ModuleList([torch.nn.Conv2d(n_maps, n_maps, (3, 3), padding=1, dilation=1,
                             bias=False) for _ in range(n_layers)])
     self.pool = MaxPool2d(2, return_indices=True)
     for i, conv in enumerate(self.convs):
         self.add_module("bn{}".format(i + 1), torch.nn.BatchNorm2d(n_maps, affine=False))
         self.add_module("conv{}".format(i + 1), conv)
     self.output = torch.nn.Linear(n_maps, n_labels)
Example #7
0
 def __init__(self, channels_in):
     super(InceptionD, self).__init__()
     self.branch3x3 = Sequential(
         Conv2d_BN(channels_in, 192, 1, stride=1, padding=0),
         Conv2d_BN(192, 320, 3, stride=2, padding=1)
     )  # 320 channels
     self.branch7x7x3 = Sequential(
         Conv2d_BN(channels_in, 192, 1, stride=1, padding=0),
         Conv2d_BN(192, 192, (1, 7), stride=1, padding=(0, 3)),
         Conv2d_BN(192, 192, (7, 1), stride=1, padding=(3, 0)),
         Conv2d_BN(192, 192, 3, stride=2, padding=1)
     )  # 192 chnnels
     self.branch_pool = MaxPool2d(3, stride=2, padding=1)  # channels_in
 def __init__(self, maskDFPath: Path=None):
     super(MaskDetector, self).__init__()
     self.maskDFPath = maskDFPath
     
     self.maskDF = None
     self.trainDF = None
     self.validateDF = None
     self.crossEntropyLoss = None
     self.learningRate = 0.00001
     
     self.convLayer1 = convLayer1 = Sequential(
         Conv2d(3, 32, kernel_size=(3, 3), padding=(1, 1)),
         ReLU(),
         MaxPool2d(kernel_size=(2, 2))
     )
     
     self.convLayer2 = convLayer2 = Sequential(
         Conv2d(32, 64, kernel_size=(3, 3), padding=(1, 1)),
         ReLU(),
         MaxPool2d(kernel_size=(2, 2))
     )
     
     self.convLayer3 = convLayer3 = Sequential(
         Conv2d(64, 128, kernel_size=(3, 3), padding=(1, 1), stride=(3,3)),
         ReLU(),
         MaxPool2d(kernel_size=(2, 2))
     )
     
     self.linearLayers = linearLayers = Sequential(
         Linear(in_features=2048, out_features=1024),
         ReLU(),
         Linear(in_features=1024, out_features=2),
     )
     
     # Initialize layers' weights
     for sequential in [convLayer1, convLayer2, convLayer3, linearLayers]:
         for layer in sequential.children():
             if isinstance(layer, (Linear, Conv2d)):
                 init.xavier_uniform_(layer.weight)
    def __init__(self, n_layers, nclasses=5, model_dim=128, input_dim=3):
        super(cnn_attention_ocr, self).__init__()

        self.classes = nclasses + 1
        self.input_dim = input_dim
        self.n_layers = n_layers
        self.atn_blocks_0 = attention_block(19, model_dim)
        # what we can do then is whenever we reduce size we are allowed to increase dimension
        self.atn_blocks_1 = attention_block(model_dim, model_dim)
        self.atn_blocks_2 = attention_block(model_dim, model_dim)
        self.mp1 = MaxPool2d((2, 2))
        self.atn_blocks_3 = attention_block(model_dim, model_dim * 4, input_height=16)
        self.mp2 = MaxPool2d((2, 1))

        # For now we do 8 layers only
        if n_layers > 4:
            self.atn_blocks_4 = attention_block(model_dim * 4, model_dim * 8, input_height=16)

            atn_blocks = nn.ModuleList(
                [attention_block(model_dim * 8, model_dim * 8, input_height=8) for i in range(n_layers - 5)])
            self.layers = nn.Sequential(*atn_blocks)
        # For now we do 8 layers only
        if n_layers > 8:
            atn_blocks_16 = nn.ModuleList(
                [attention_block(model_dim * 8, model_dim * 8, input_height=8) for i in range(n_layers - 8)])
            self.layers_16 = nn.Sequential(*atn_blocks)

        self.conv1 = depthwise_separable_conv_bn(16, 16, 13, 6)

        self.reduce1 = nn.Conv2d(self.input_dim, 16, kernel_size=1)  # 1x1
        self.reduce2 = nn.Conv2d(model_dim * 8, self.classes, kernel_size=1)  # 1x1

        self.drop1 = nn.Dropout2d(0.75)
        self.drop2 = nn.Dropout2d(0.5)

        self.ln_3 = LayerNorm(self.classes)
        self.ln_1 = LayerNorm(3).cuda()
        self.ln_4 = LayerNorm(16).cuda()
        self.soft_norm = nn.Softmax2d()
 def __init__(self):
     super(CNN, self).__init__()
     #Defining the 2d convolutional layers
     self.cnn_layers = Sequential(
         #layer 1 #28x28
         Conv2d(1, 8, kernel_size=3, stride=1,
                padding=1),  #convolution 28 to 28
         BatchNorm2d(8),
         ReLU(inplace=True),  #RelU activation
         MaxPool2d(kernel_size=2, stride=2),  #28 to 14
     )
     self.linear_layers = Sequential(Linear(8 * 14 * 14, 100),
                                     Linear(100, 10))
Example #11
0
    def __init__(self,
                 kernel_size,
                 padding=0,
                 dilation=1,
                 return_indices=True,
                 ceil_mode=False):
        super(ComplexMaxPool2D, self).__init__()

        self.max_pool = MaxPool2d(kernel_size=kernel_size,
                                  padding=padding,
                                  dilation=dilation,
                                  return_indices=return_indices,
                                  ceil_mode=ceil_mode)
 def __init__(self, channels_in):
     super(InceptionB, self).__init__()
     self.branch3x3 = Conv2d_BN(channels_in, 384, 3, stride=2,
                                padding=1)  # 384 channels
     self.branch3x3dbl = Sequential(Conv2d_BN(channels_in, 64, 1,
                                              padding=0),
                                    Conv2d_BN(64, 96, 3, padding=1),
                                    Conv2d_BN(96,
                                              96,
                                              3,
                                              stride=2,
                                              padding=1))  # 96 channels
     self.branch_pool = MaxPool2d(3, stride=2, padding=1)  # channels_in
Example #13
0
    def __init__(self, num_classes):
        super(BasicCNN, self).__init__()

        self.pool = MaxPool2d(kernel_size=2, stride=2)
        self.drop = Dropout(0.8)

        self.cv1 = Conv2d(1, 32, kernel_size=3, stride=1)
        self.cv2 = Conv2d(32, 64, kernel_size=3, stride=1)
        self.cv3 = Conv2d(64, 64, kernel_size=3, stride=1)
        self.cv4 = Conv2d(64, 64, kernel_size=3, stride=1)

        self.fc1 = Linear(4224, 256)
        self.out = Linear(256, num_classes)
Example #14
0
 def __init__(self, in_channel, depth, stride):
     super(bottleneck_IR, self).__init__()
     if in_channel == depth:
         self.shortcut_layer = MaxPool2d(1, stride)
     else:
         self.shortcut_layer = Sequential(
             Conv2d(in_channel, depth, (1, 1), stride, bias=False),
             BatchNorm2d(depth))
     self.res_layer = Sequential(
         BatchNorm2d(in_channel),
         Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False),
         PReLU(depth), Conv2d(depth, depth, (3, 3), stride, 1, bias=False),
         BatchNorm2d(depth))
Example #15
0
    def __init__(self):
        super(BasicAutoEncoder, self).__init__()

        self.pool = MaxPool2d(kernel_size=2, stride=2)
        self.poolt = UpsamplingNearest2d(scale_factor=2)

        self.cv1 = Conv2d(1, 64, kernel_size=4, stride=1)
        self.cv2 = Conv2d(64, 32, kernel_size=3, stride=1)
        self.cv3 = Conv2d(32, 16, kernel_size=3, stride=1)

        self.cv1t = ConvTranspose2d(16, 32, kernel_size=3, stride=1)
        self.cv2t = ConvTranspose2d(32, 64, kernel_size=3, stride=1)
        self.cv3t = ConvTranspose2d(64, 1, kernel_size=4, stride=1)
Example #16
0
    def __init__(self):
        super(Network_simple_cnn, self).__init__()

        self.cnn_layers = Sequential(
            # 定义2D卷积层
            Conv2d(3, 4, kernel_size=3, stride=3, padding=0),
            BatchNorm2d(4),
            ReLU(inplace=True),
            MaxPool2d(kernel_size=2, stride=3),
        )

        self.linear_layers = Sequential(Linear(2500, 256), Dropout(0.3),
                                        Linear(256, 1), Sigmoid())
    def __init__(self,
                 input_size,
                 block,
                 layers,
                 zero_init_residual=True,
                 fc_input_size=2048):
        super(ResNet, self).__init__()
        assert input_size[0] in [
            112, 224
        ], "input_size should be [112, 112] or [224, 224]"
        self.inplanes = 64
        self.conv1 = Conv2d(3,
                            64,
                            kernel_size=7,
                            stride=2,
                            padding=3,
                            bias=False)
        self.bn1 = BatchNorm2d(64)
        self.relu = ReLU(inplace=True)
        self.maxpool = MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)

        self.bn_o1 = BatchNorm2d(fc_input_size)
        self.dropout = Dropout()
        if input_size[0] == 112:
            self.fc = Linear(fc_input_size * 4 * 4, 512)
        else:
            self.fc = Linear(fc_input_size * 8 * 8, 512)
        self.bn_o2 = BatchNorm1d(512)

        for m in self.modules():
            if isinstance(m, Conv2d):
                nn.init.kaiming_normal_(m.weight,
                                        mode='fan_out',
                                        nonlinearity='relu')
            elif isinstance(m, BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)

        # Zero-initialize the last BN in each residual branch,
        # so that the residual branch starts with zeros, and each residual block behaves like an identity.
        # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
        if zero_init_residual:
            for m in self.modules():
                if isinstance(m, Bottleneck):
                    nn.init.constant_(m.bn3.weight, 0)
                elif isinstance(m, BasicBlock):
                    nn.init.constant_(m.bn2.weight, 0)
Example #18
0
def dcn_vgg(input_channels):

    model = nn.Sequential(

        Conv2d(input_channels, 64, kernel_size=(3, 3), padding=0),
        ReLU(),
        Conv2d(64, 64, kernel_size=(3, 3), padding=0),
        ReLU()
        MaxPool2d(kernel_size=(2,2), stride=(2,2))

        Conv2d(64, 128, kernel_size=(3, 3), padding=0),
        ReLU(),
        Conv2d(128, 128, kernel_size=(3, 3), padding=0),
        ReLU()
        MaxPool2d(kernel_size=(2,2), stride=(2,2))

        Conv2d(128, 256, kernel_size=(3, 3), padding=0),
        ReLU(),
        Conv2d(256, 256, kernel_size=(3, 3), padding=0),
        ReLU()
        MaxPool2d(kernel_size=(2,2), stride=(2,2))

        Conv2d(256, 512, kernel_size=(3, 3), padding=0),
        ReLU(),
        Conv2d(512, 512, kernel_size=(3, 3), padding=0),
        ReLU()
        MaxPool2d(kernel_size=(2,2), stride=(2,2))

        Conv2d(512, 512, kernel_size=(3, 3), padding=0),
        ReLU(),
        Conv2d(512, 512, kernel_size=(3, 3), padding=0),
        ReLU()
        Conv2d(512, 512, kernel_size=(3, 3), padding=0),
        ReLU()

        )

    return model
Example #19
0
    def __init__(self):
        super(Net, self).__init__()

        ## TODO: Define all the layers of this CNN, the only requirements are:
        ## 1. This network takes in a square (same width and height), grayscale image as input
        ## 2. It ends with a linear layer that represents the keypoints
        ## it's suggested that you make this last layer output 136 values, 2 for each of the 68 keypoint (x, y) pairs

        # As an example, you've been given a convolutional layer, which you may (but don't have to) change:
        # 1 input image channel (grayscale), 32 output channels/feature maps, 5x5 square convolution kernel

        #Conv layer

        self.feature1 = Sequential(
            OrderedDict([
                ('batch_norm1', BatchNorm2d(1)),
                ('conv1_0', Conv2d(1, 32, 5)),
                ('relu1_0', ReLU()),
                ('batch_norm2', BatchNorm2d(32)),
                ('avg1_0', MaxPool2d((3, 3), stride=3)),
                ('conv1_1', Conv2d(32, 64, 5)),
                ('relu1_1', ReLU()),
                ('batch_norm3', BatchNorm2d(64)),
                ('avg1_1', MaxPool2d((2, 2), stride=2)),
                ('conv1_2', Conv2d(64, 150, 5)),
                ('relu1_2', ReLU()),
                ('batch_norm4', BatchNorm2d(150)),
                ('maxp1_2', MaxPool2d((2, 2), stride=2)),
                ('conv2_0', Conv2d(150, 148, 3)),
                ('relu2_0', ReLU()),
                ('batch_norm5', BatchNorm2d(148)),
                ('avg2_0', MaxPool2d((3, 3), stride=3)),
                ('conv2_1', Conv2d(148, 140, 3)),
                ('relu2_1', ReLU()),
                #('avg2_1',  nn.MaxPool2d((2,2), stride=1)),
                ('batch_norm6', BatchNorm2d(140)),
                ('conv2_2', Conv2d(140, 136, 2)),
            ]))
Example #20
0
    def __init__(self):
        super(ONet, self).__init__()

        self.features = Sequential(
            OrderedDict([
                ('conv1',
                 Conv2d(in_channels=3,
                        out_channels=32,
                        kernel_size=3,
                        stride=1)), ('relu1', ReLU()),
                ('pool1', MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)),
                ('conv2',
                 Conv2d(in_channels=32,
                        out_channels=64,
                        kernel_size=3,
                        stride=1)), ('relu2', ReLU()),
                ('pool2', MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)),
                ('conv3',
                 Conv2d(in_channels=64,
                        out_channels=64,
                        kernel_size=3,
                        stride=1)), ('relu3', ReLU()),
                ('pool3', MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)),
                ('conv4',
                 Conv2d(in_channels=64,
                        out_channels=128,
                        kernel_size=2,
                        stride=1)), ('relu4', ReLU()), ('flatten', Flatten()),
                ('dense', Linear(1152, 256)), ('relu', ReLU())
            ]))

        self.classification = Sequential(
            OrderedDict([('dense', Linear(256, 2)),
                         ('softmax', Softmax(dim=1))]))

        self.regression = Linear(256, 4)

        self.landmarks = Linear(256, 10)
    def __init__(self):
        super(Net, self).__init__()

        self.cnn_layers = Sequential(
            # Defining one 2D convolution layer
            # Conv output is
            Conv2d(in_channels=3,
                   out_channels=40,
                   kernel_size=3,
                   stride=2,
                   padding=1),
            BatchNorm2d(40),
            ReLU(inplace=True),
            # MaxPool output is
            MaxPool2d(kernel_size=2, stride=2),
            # Defining another 2D convolution layer
            # Conv output is 94 x 94 x 32
            Conv2d(in_channels=40,
                   out_channels=20,
                   kernel_size=3,
                   stride=2,
                   padding=1),
            BatchNorm2d(20),
            ReLU(inplace=True),
            # MaxPool output is 47 x 47 x 32
            MaxPool2d(kernel_size=2, stride=2),
        )

        # Vanilla neural network perceptron layer
        self.linear_layers = Sequential(
            # (in_features, out_features = predicted labels, bias)
            Linear(20 * 12 * 12, 6),
            ReLU(inplace=True))

        self.output_layer = Sequential(
            # (dimension to compute softmax along)
            # Output shape equals input shape
            Softmax(dim=1))
Example #22
0
    def __init__(self):
        super(CaptchaModelCNN, self).__init__()

        # 设定参数
        self.pool = 2  # 最大池化
        self.padding = 1  # 矩形边的补充层数
        self.dropout = 0.5  # 随机概率
        self.kernel_size = 3  # 卷积核大小 3x3

        # 卷积池化
        self.layer1 = Sequential(
            # 时序容器Sequential,参数按顺序传入
            # 2维卷积层,卷积核大小为self.kernel_size,边的补充层数为self.padding
            Conv2d(1, 32, kernel_size=self.kernel_size, padding=self.padding),
            # 对小批量3d数据组成的4d输入进行批标准化(Batch Normalization)操作
            BatchNorm2d(32),
            # 随机将输入张量中部分元素设置为0,随机概率为self.dropout。
            Dropout(self.dropout),
            # 对输入数据运用修正线性单元函数
            ReLU(),
            # 最大池化
            MaxPool2d(2))

        # 卷积池化
        self.layer2 = Sequential(
            Conv2d(32, 64, kernel_size=self.kernel_size, padding=self.padding),
            BatchNorm2d(64), Dropout(self.dropout), ReLU(), MaxPool2d(2))

        # 卷积池化
        self.layer3 = Sequential(
            Conv2d(64, 64, kernel_size=self.kernel_size, padding=self.padding),
            BatchNorm2d(64), Dropout(self.dropout), ReLU(), MaxPool2d(2))

        # 全连接
        self.fc = Sequential(
            Linear((IMAGE_WIDTH // 8) * (IMAGE_HEIGHT // 8) * 64, 1024),
            Dropout(self.dropout), ReLU())
        self.rfc = Sequential(Linear(1024, CAPTCHA_NUMBER * len(CHARACTER)))
Example #23
0
  def __init__(self):
    super().__init__()

    act = lambda: Mish()

    self.model = Sequential(
      Conv2d(in_channels=3, out_channels=96,
             kernel_size=(11, 11), stride=(4, 4), padding=2),
      LocalResponseNorm(size=5, k=2, alpha=1e-4, beta=0.75),
      MaxPool2d(kernel_size=(3, 3), stride=2),
      act(),

      Conv2d(in_channels=96, out_channels=256,
             kernel_size=(5, 5), padding=2),
      LocalResponseNorm(size=5, k=2, alpha=1e-4, beta=0.75),
      MaxPool2d(kernel_size=(3, 3), stride=2),
      act(),

      Conv2d(in_channels=256, out_channels=384,
             kernel_size=(3, 3), padding=1),
      act(),

      Conv2d(in_channels=384, out_channels=384,
             kernel_size=(3, 3), padding=1),
      act(),

      Conv2d(in_channels=384, out_channels=256,
             kernel_size=(3, 3), padding=1),
      LocalResponseNorm(size=5, k=2, alpha=1e-4, beta=0.75),
      MaxPool2d(kernel_size=(3, 3), stride=2),
      act(),

      Flatten(),
      Linear(in_features=9216, out_features=4096),
      act(),
      Linear(in_features=4096, out_features=4096),
      act(),
      Linear(in_features=4096, out_features=10))
Example #24
0
 def __init__(self):
     super(FKPStructure, self).__init__()
     #Have 3 Covolution layer for feature extraction
     self.cnn_layers = Sequential(
         #first Convolution layer and Max pooling
         Conv2d(1,
                4,
                kernel_size=3,
                stride=1,
                padding=1,
                padding_mode='reflect'),
         BatchNorm2d(4),
         ReLU(inplace=True),
         MaxPool2d(kernel_size=2, stride=2),
         #second Convolution layer and Max pooling
         Conv2d(4,
                8,
                kernel_size=3,
                stride=1,
                padding=1,
                padding_mode='reflect'),
         BatchNorm2d(8),
         ReLU(inplace=True),
         MaxPool2d(kernel_size=2, stride=2),
         #third Convolution layer and Average pooling
         Conv2d(8,
                16,
                kernel_size=3,
                stride=1,
                padding=1,
                padding_mode='reflect'),
         BatchNorm2d(16),
         ReLU(inplace=True),
         AvgPool2d(kernel_size=2, stride=2),
         #output is (16 *13.5 * 27.5)
     )
     #Have 1 Fully Connected layer for classification
     self.linear_layers1 = Sequential(Linear(16 * 13 * 27, 100))
Example #25
0
    def __init__(self, num_classes=7, kernel_init=None):
        super(GoogleNet, self).__init__()
        self.kernel_init = kernel_init
        blocks = [BasicConv2d, Inception, Output]

        conv_block = blocks[0]
        inception_block = blocks[1]
        output_block = blocks[2]

        # First layers
        self.conv1 = conv_block(1, 64, kernel_size=7, stride=2, padding=3)
        self.maxpool1 = MaxPool2d(3, stride=2, ceil_mode=True)
        self.conv2 = conv_block(64, 64, kernel_size=1)
        self.conv3 = conv_block(64, 192, kernel_size=3, padding=1)
        self.maxpool2 = MaxPool2d(3, stride=2, ceil_mode=True)

        # Inception part
        self.inception3a = inception_block(192, 64, 96, 128, 16, 32, 32)
        self.inception3b = inception_block(256, 128, 128, 192, 32, 96, 64)
        self.maxpool3 = MaxPool2d(3, stride=2, ceil_mode=True)

        self.inception4a = inception_block(480, 192, 96, 208, 16, 48, 64)
        self.inception4b = inception_block(512, 160, 112, 224, 24, 64, 64)
        self.inception4c = inception_block(512, 128, 128, 256, 24, 64, 64)
        self.inception4d = inception_block(512, 112, 144, 288, 32, 64, 64)
        self.inception4e = inception_block(528, 256, 160, 320, 32, 128, 128)
        self.maxpool4 = MaxPool2d(2, stride=2, ceil_mode=True)

        self.inception5a = inception_block(832, 256, 160, 320, 32, 128, 128)
        self.inception5b = inception_block(832, 384, 192, 384, 48, 128, 128)

        self.aux1 = output_block(512, num_classes)
        self.aux2 = output_block(528, num_classes)

        # Output
        self.avgpool = AdaptiveAvgPool2d((1, 1))
        self.dropout = Dropout(0.2)
        self.fc = Linear(1024, num_classes)
Example #26
0
    def __init__(self, total_class, vgg_model, pretrained_state_dict):
        super().__init__()

        self.vgg_feature_extractor = vgg_model.features
        self.vgg_feature_extractor.__setattr__(
            '16', MaxPool2d(kernel_size=2, stride=2, ceil_mode=True))
        self.vgg_feature_extractor.__setattr__(
            '30', MaxPool2d(kernel_size=3, stride=1, padding=1))

        self.global_average_pooling = GlobalAvgPool2d()
        self.classifier_1 = Conv2d(512,
                                   1024,
                                   kernel_size=3,
                                   padding=6,
                                   dilation=6)
        self.classifier_2 = Conv2d(1024, 1024, kernel_size=1)
        self.classifier_3 = Conv2d(1024, total_class, kernel_size=1)

        # Convert fully connected layers as convolution layers following SSD's approach
        state_dict = self.state_dict()

        classifier_1_weight = pretrained_state_dict[
            'classifier.0.weight'].view(4096, 512, 7, 7)
        classifier_1_bias = pretrained_state_dict['classifier.0.bias']
        state_dict['classifier_1.weight'] = self._decimate(classifier_1_weight,
                                                           m=[4, None, 3, 3])
        state_dict['classifier_2.bias'] = self._decimate(classifier_1_bias,
                                                         m=[4])

        classifier_2_weight = pretrained_state_dict[
            'classifier.3.weight'].view(4096, 4096, 1, 1)
        classifier_2_bias = pretrained_state_dict['classifier.3.bias']
        state_dict['classifier_2.weight'] = self._decimate(
            classifier_2_weight, m=[4, 4, None, None])
        state_dict['classifier_2.bias'] = self._decimate(classifier_2_bias,
                                                         m=[4])

        self.load_state_dict(state_dict)
Example #27
0
    def __init__(self, input_shape):
        """
        :param input_shape: input image shape, (h, w, c)
        """
        super(Net, self).__init__()

        self.features = Sequential(Conv2d(input_shape[-1], 64, kernel_size=10),
                                   ReLU(),
                                   MaxPool2d(kernel_size=(2, 2), stride=2),
                                   Conv2d(64, 128, kernel_size=7), ReLU(),
                                   MaxPool2d(kernel_size=(2, 2), stride=2),
                                   Conv2d(128, 128, kernel_size=4), ReLU(),
                                   MaxPool2d(kernel_size=(2, 2), stride=2),
                                   Conv2d(128, 256, kernel_size=4), ReLU())

        # Compute number of input features for the last fully-connected layer
        input_shape = (1, ) + input_shape[::-1]
        x = Variable(torch.rand(input_shape), requires_grad=False)
        x = self.features(x)
        x = Flatten()(x)
        n = x.size()[1]

        self.classifier = Sequential(Flatten(), Linear(n, 4096), Sigmoid())
 def __init__(self):
     super(VGG_Base, self).__init__()
     self.conv1_1 = Conv2d(in_channels = 3, out_channels = 64, kernel_size = 3, stride = 1, padding = 1)
     self.conv1_2 = Conv2d(in_channels = 64, out_channels = 64,  kernel_size = 3, stride = 1, padding = 1)
     self.conv2_1 = Conv2d(in_channels = 64, out_channels = 128,  kernel_size = 3, stride = 1, padding = 1)
     self.conv2_2 = Conv2d(in_channels = 128, out_channels = 128,  kernel_size = 3, stride = 1, padding = 1)
     self.conv3_1 = Conv2d(in_channels = 128, out_channels = 256,  kernel_size = 3, stride = 1, padding = 1)
     self.conv3_2 = Conv2d(in_channels = 256, out_channels = 256,  kernel_size = 3, stride = 1, padding = 1)
     self.conv3_3 = Conv2d(in_channels = 256, out_channels = 256,  kernel_size = 3, stride = 1, padding = 1)
     self.conv3_4 = Conv2d(in_channels = 256, out_channels = 256,  kernel_size = 3, stride = 1, padding = 1)
     self.conv4_1 = Conv2d(in_channels = 256, out_channels = 512,  kernel_size = 3, stride = 1, padding = 1)
     self.conv4_2 = Conv2d(in_channels = 512, out_channels = 512,  kernel_size = 3, stride = 1, padding = 1)
     self.relu = ReLU()
     self.max_pooling_2d = MaxPool2d(kernel_size = 2, stride = 2)
Example #29
0
def create_small_cnn(classes: int) -> Sequential:
    """Creates Small convolutional neural network.

    Parameters
    ----------
    classes : int
        The number of classes to predict.

    Returns
    -------
    Sequential
        Returns Small convolutional neural network.
    """
    model = Sequential(
        Conv2d(3, 4, 5),
        MaxPool2d(2),
        Conv2d(4, 8, 5),
        MaxPool2d(2),
        Flatten(),
        Linear(4 * 4 * 8, classes),
    )

    return model
Example #30
0
 def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5, k=(5, 9, 13)):
     super().__init__()
     c_ = int(2 * c2 * e)  # hidden channels
     self.cv1 = Conv(c1, c_, 1, 1)
     self.cv2 = Conv2d(c1, c_, 1, 1, bias=False)
     self.cv3 = Conv(c_, c_, 3, 1)
     self.cv4 = Conv(c_, c_, 1, 1)
     self.m = ModuleList(
         [MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k])
     self.cv5 = Conv(4 * c_, c_, 1, 1)
     self.cv6 = Conv(c_, c_, 3, 1)
     self.bn = BatchNorm2d(2 * c_)
     self.act = Mish()
     self.cv7 = Conv(2 * c_, c2, 1, 1)