def __init__(self, inplanes, squeeze_planes, expand1x1_planes,
                 expand3x3_planes):
        super(Fire, self).__init__()
        self.inplanes = inplanes
        self.bn1 = nn.BatchNorm2d(inplanes)
        self.activ1 = Active()
        self.squeeze = nn.Conv2d(inplanes,
                                 squeeze_planes,
                                 kernel_size=1,
                                 bias=False)
        self.squeeze_activation = nn.ReLU(inplace=True)

        self.bn2 = nn.BatchNorm2d(squeeze_planes)
        self.activ2 = Active()
        self.expand1x1 = nn.Conv2d(squeeze_planes,
                                   expand1x1_planes,
                                   kernel_size=1,
                                   bias=False)
        self.expand1x1_activation = nn.ReLU(inplace=True)

        self.bn3 = nn.BatchNorm2d(squeeze_planes)
        self.activ3 = Active()
        self.expand3x3 = nn.Conv2d(squeeze_planes,
                                   expand3x3_planes,
                                   kernel_size=3,
                                   padding=1,
                                   bias=False)
        self.expand3x3_activation = nn.ReLU(inplace=True)
 def __init__(self, inplanes, planes, stride=1, downsample=None):
     super(BasicBlock, self).__init__()
     self.conv1 = conv3x3(inplanes, planes, stride)
     self.active1 = Active()
     self.bn1 = nn.BatchNorm2d(inplanes)
     self.relu = nn.ReLU(inplace=True)
     self.conv2 = conv3x3(planes, planes)
     self.active2 = Active()
     self.bn2 = nn.BatchNorm2d(planes)
     self.downsample = downsample
     self.stride = stride
Exemplo n.º 3
0
 def __init__(self, inplanes, planes, stride=1, downsample=None):
     super(Bottleneck, self).__init__()
     self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
     self.active1 = Active()
     self.bn1 = nn.BatchNorm2d(inplanes)
     self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
                            padding=1, bias=False)
     self.active2 = Active()
     self.bn2 = nn.BatchNorm2d(planes)
     self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
     self.active3 = Active()
     self.bn3 = nn.BatchNorm2d(planes * 4)
     self.relu = nn.ReLU(inplace=True)
     self.downsample = downsample
     self.stride = stride
    def __init__(self,nClasses):
        super(Net, self).__init__()
        self.drop = nn.Dropout2d(0.0)
        self.relu = nn.ReLU(inplace=True)

        self.conv1 = nn.Conv2d(3, 96, kernel_size = 11, stride = 4, padding = 2)
        self.bn1 = nn.BatchNorm2d(96)
        self.maxpool1 = nn.MaxPool2d(kernel_size = 3, stride=2)

        self.conv2 = nn.Conv2d(96, 256, kernel_size = 5, stride = 1, padding = 2, bias=False)
        self.bn2 = nn.BatchNorm2d(96)
        self.activ2 = Active()
        self.maxpool2 = nn.MaxPool2d(kernel_size = 3, stride=2)

        self.conv3 = nn.Conv2d(256, 384, kernel_size = 3, stride = 1, padding= 1, bias=False)
        self.bn3 = nn.BatchNorm2d(256)
        self.activ3 = Active()

        self.conv4 = nn.Conv2d(384, 384, kernel_size = 3, stride = 1, padding= 1, bias=False)
        self.bn4 = nn.BatchNorm2d(384)
        self.activ4 = Active()

        self.conv5 = nn.Conv2d(384, 256, kernel_size = 3, stride = 1, padding= 1, bias=False)
        self.bn5 = nn.BatchNorm2d(384)
        self.activ5 = Active()

        self.maxpool3 = nn.MaxPool2d(kernel_size = 3, stride=2)

        self.conv6 = nn.Conv2d(256, 4096, kernel_size = 6, stride = 1, padding = 0, bias=False)
        self.bn6 = nn.BatchNorm2d(256)
        self.activ6 = Active()

        self.conv7 = nn.Conv2d(4096, 4096, kernel_size = 1, stride = 1, padding = 0, bias=False)
        self.bn7 = nn.BatchNorm2d(4096)

        self.conv8 = nn.Conv2d(4096, nClasses, kernel_size = 1, stride = 1)

        self.nClasses = nClasses
    def __init__(self,nClasses):
        super(Net, self).__init__()

        self.pre_layers = nn.Sequential(
            nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3),
            nn.BatchNorm2d(64),
            nn.ReLU(True),
            nn.MaxPool2d(3, stride=2, padding=1),
            nn.BatchNorm2d(64),
            Active(),
            nn.Conv2d(64, 64, kernel_size=1, stride=1, padding=0, bias=False),
            nn.ReLU(True),
            nn.BatchNorm2d(64),
            Active(),
            nn.Conv2d(64, 192, kernel_size=3, stride=1, padding=1, bias=False),
            nn.ReLU(True),
            nn.MaxPool2d(3, stride=2, padding=1)
        )

        self.a3 = Inception(192,  64,  96, 128, 16, 32, 32)
        self.b3 = Inception(256, 128, 128, 192, 32, 96, 64)

        self.maxpool = nn.MaxPool2d(3, stride=2)

        self.a4 = Inception(480, 192,  96, 204, 16,  48,  64)
        self.b4 = Inception(508, 160, 112, 224, 24,  64,  64)
        self.c4 = Inception(512, 128, 128, 256, 24,  64,  64)
        self.d4 = Inception(512, 112, 144, 288, 32,  64,  64)
        self.e4 = Inception(528, 256, 160, 320, 32, 128, 128)

        self.maxpool2 = nn.MaxPool2d(3, stride=2, padding=1)

        self.a5 = Inception(832, 256, 160, 320, 48, 128, 128)
        self.b5 = Inception(832, 384, 192, 384, 48, 128, 128)

        self.avgpool = nn.AvgPool2d(7, stride=1)
        self.drop = nn.Dropout(0.4)
        self.linear = nn.Linear(1024, nClasses)
Exemplo n.º 6
0
    def _make_layer(self, block, planes, blocks, stride=1):
        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion:
            downsample = nn.Sequential(
                nn.BatchNorm2d(self.inplanes),
                Active(),
                nn.Conv2d(self.inplanes, planes * block.expansion,
                          kernel_size=1, stride=stride, bias=False),
            )

        layers = []
        layers.append(block(self.inplanes, planes, stride, downsample))
        self.inplanes = planes * block.expansion
        for i in range(1, blocks):
            layers.append(block(self.inplanes, planes))

        return nn.Sequential(*layers)
    def __init__(self, nClasses):
        super(Net, self).__init__()

        self.nClasses = nClasses
        self.relu = nn.ReLU()
        self.hardtanh = nn.Hardtanh()
        self.drop = nn.Dropout2d(0.4)

        self.bn1 = nn.BatchNorm2d(128)
        self.conv1 = nn.Conv2d(3, 128, kernel_size=3, stride=1, padding=1)

        self.conv2 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1)
        self.bn2 = nn.BatchNorm2d(128)
        self.activ2 = Active()

        self.maxpool1 = nn.MaxPool2d(kernel_size=2)

        self.conv3 = nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1)
        self.bn3 = nn.BatchNorm2d(128)
        self.activ3 = Active()

        self.conv4 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)
        self.bn4 = nn.BatchNorm2d(256)
        self.activ4 = Active()

        self.maxpool2 = nn.MaxPool2d(kernel_size=2)

        self.conv5 = nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1)
        self.bn5 = nn.BatchNorm2d(256)
        self.activ5 = Active()

        self.conv6 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1)
        self.bn6 = nn.BatchNorm2d(512)
        self.activ6 = Active()

        self.maxpool3 = nn.MaxPool2d(kernel_size=2)

        self.conv7 = nn.Conv2d(512, 1024, kernel_size=4, stride=1)
        self.bn7 = nn.BatchNorm2d(512)
        self.activ7 = Active()

        self.conv8 = nn.Conv2d(1024,
                               nClasses,
                               kernel_size=1,
                               stride=1,
                               padding=0)
    def __init__(self, in_planes, n1x1, n3x3red, n3x3, n5x5red, n5x5, pool_planes):
        super(Inception, self).__init__()

        # 1x1 conv branch
        self.b1 = nn.Sequential(
            nn.BatchNorm2d(in_planes),
            Active(),
            nn.Conv2d(in_planes, n1x1, kernel_size=1, bias=False),
            nn.ReLU(True),
        )

        # 1x1 conv -> 3x3 conv branch
        self.b2 = nn.Sequential(
            nn.BatchNorm2d(in_planes),
            Active(),
            nn.Conv2d(in_planes, n3x3red, kernel_size=1, bias=False),
            nn.ReLU(True),
            nn.BatchNorm2d(n3x3red),
            Active(),
            nn.Conv2d(n3x3red, n3x3, kernel_size=3, padding=1, bias=False),
            nn.ReLU(True),
        )

        # 1x1 conv -> 5x5 conv branch
        self.b3 = nn.Sequential(
            nn.BatchNorm2d(in_planes),
            Active(),
            nn.Conv2d(in_planes, n5x5red, kernel_size=1, bias=False),
            nn.ReLU(True),
            nn.BatchNorm2d(n5x5red),
            Active(),
            nn.Conv2d(n5x5red, n5x5, kernel_size=5, padding=2, bias=False),
            nn.ReLU(True),
        )

        # 3x3 pool -> 1x1 conv branch
        self.b4 = nn.Sequential(
            nn.MaxPool2d(3, stride=1, padding=1),
            nn.BatchNorm2d(in_planes),
            Active(),
            nn.Conv2d(in_planes, pool_planes, kernel_size=1, bias=False),
            nn.ReLU(True),
        )
    def __init__(self, nClasses):
        super(SqueezeNet, self).__init__()
        self.num_classes = nClasses
        self.conv1 = nn.Conv2d(1, 96, kernel_size=7, stride=2)
        self.bn1 = nn.BatchNorm2d(96)
        self.relu1 = nn.ReLU(inplace=True)
        self.maxpool1 = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)

        self.conv21 = nn.Conv2d(96, 16, kernel_size=1)
        self.bn21 = nn.BatchNorm2d(16)
        self.relu21 = nn.ReLU(inplace=True)
        self.conv22 = nn.Conv2d(16, 64, kernel_size=1)
        self.bn22 = nn.BatchNorm2d(64)
        self.relu22 = nn.ReLU(inplace=True)
        self.conv23 = nn.Conv2d(16, 64, kernel_size=3, padding=1)
        self.bn23 = nn.BatchNorm2d(64)
        self.relu23 = nn.ReLU(inplace=True)

        self.conv31 = nn.Conv2d(128, 16, kernel_size=1, bias=False)
        self.bn31 = nn.BatchNorm2d(16)
        self.relu31 = nn.ReLU(inplace=True)
        self.bn32 = nn.BatchNorm2d(16)
        self.activ32 = Active()
        self.conv32 = nn.Conv2d(16, 64, kernel_size=1, bias=False)
        self.relu32 = nn.ReLU(inplace=True)
        self.bn33 = nn.BatchNorm2d(16)
        self.activ33 = Active()
        self.conv33 = nn.Conv2d(16, 64, kernel_size=3, padding=1, bias=False)
        self.relu33 = nn.ReLU(inplace=True)

        self.conv41 = nn.Conv2d(128, 32, kernel_size=1, bias=False)
        self.bn41 = nn.BatchNorm2d(32)
        self.relu41 = nn.ReLU(inplace=True)
        self.bn42 = nn.BatchNorm2d(32)
        self.activ42 = Active()
        self.conv42 = nn.Conv2d(32, 128, kernel_size=1, bias=False)
        self.relu42 = nn.ReLU(inplace=True)
        self.bn43 = nn.BatchNorm2d(32)
        self.activ43 = Active()
        self.conv43 = nn.Conv2d(32, 128, kernel_size=3, padding=1, bias=False)
        self.relu43 = nn.ReLU(inplace=True)

        self.maxpool5 = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)

        self.conv61 = nn.Conv2d(256, 32, kernel_size=1, bias=False)
        self.bn61 = nn.BatchNorm2d(32)
        self.relu61 = nn.ReLU(inplace=True)
        self.bn62 = nn.BatchNorm2d(32)
        self.activ62 = Active()
        self.conv62 = nn.Conv2d(32, 128, kernel_size=1, bias=False)
        self.relu62 = nn.ReLU(inplace=True)
        self.bn63 = nn.BatchNorm2d(32)
        self.activ63 = Active()
        self.conv63 = nn.Conv2d(32, 128, kernel_size=3, padding=1, bias=False)
        self.relu63 = nn.ReLU(inplace=True)

        self.conv71 = nn.Conv2d(256, 48, kernel_size=1, bias=False)
        self.bn71 = nn.BatchNorm2d(48)
        self.relu71 = nn.ReLU(inplace=True)
        self.bn72 = nn.BatchNorm2d(48)
        self.activ72 = Active()
        self.conv72 = nn.Conv2d(48, 192, kernel_size=1, bias=False)
        self.relu72 = nn.ReLU(inplace=True)
        self.bn73 = nn.BatchNorm2d(48)
        self.activ73 = Active()
        self.conv73 = nn.Conv2d(48, 192, kernel_size=3, padding=1, bias=False)
        self.relu73 = nn.ReLU(inplace=True)

        self.conv81 = nn.Conv2d(384, 48, kernel_size=1, bias=False)
        self.bn81 = nn.BatchNorm2d(48)
        self.relu81 = nn.ReLU(inplace=True)
        self.bn82 = nn.BatchNorm2d(48)
        self.activ82 = Active()
        self.conv82 = nn.Conv2d(48, 192, kernel_size=1, bias=False)
        self.relu82 = nn.ReLU(inplace=True)
        self.bn83 = nn.BatchNorm2d(48)
        self.activ83 = Active()
        self.conv83 = nn.Conv2d(48, 192, kernel_size=3, padding=1, bias=False)
        self.relu83 = nn.ReLU(inplace=True)

        self.conv91 = nn.Conv2d(384, 64, kernel_size=1, bias=False)
        self.bn91 = nn.BatchNorm2d(64)
        self.relu91 = nn.ReLU(inplace=True)
        self.bn92 = nn.BatchNorm2d(64)
        self.activ92 = Active()
        self.conv92 = nn.Conv2d(64, 256, kernel_size=1, bias=False)
        self.relu92 = nn.ReLU(inplace=True)
        self.bn93 = nn.BatchNorm2d(64)
        self.activ93 = Active()
        self.conv93 = nn.Conv2d(64, 256, kernel_size=3, padding=1, bias=False)
        self.relu93 = nn.ReLU(inplace=True)

        self.maxpool10 = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)

        self.conv111 = nn.Conv2d(512, 64, kernel_size=1, bias=False)
        self.bn111 = nn.BatchNorm2d(64)
        self.relu111 = nn.ReLU(inplace=True)
        self.conv112 = nn.Conv2d(64, 256, kernel_size=1, bias=False)
        self.bn112 = nn.BatchNorm2d(256)
        self.relu112 = nn.ReLU(inplace=True)
        self.conv113 = nn.Conv2d(64, 256, kernel_size=3, padding=1, bias=False)
        self.bn113 = nn.BatchNorm2d(256)
        self.relu113 = nn.ReLU(inplace=True)

        self.drop121 = nn.Dropout(p=0.2)
        self.conv121 = nn.Conv2d(512, self.num_classes, kernel_size=1)
        self.relu121 = nn.ReLU(inplace=True)
        self.avgpool121 = nn.AvgPool2d(13)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                if m is self.conv121:
                    init.normal(m.weight.data, mean=0.0, std=0.01)
                else:
                    init.kaiming_uniform(m.weight.data)
                if m.bias is not None:
                    m.bias.data.zero_()
Exemplo n.º 10
0
    def __init__(self, nClasses):
        super(Net, self).__init__()

        self.relu = nn.ReLU(True)
        self.drop = nn.Dropout2d(0.2)

        self.conv1 = nn.Conv2d(1, 64, kernel_size=15, stride=3, padding=0)
        self.bn1 = nn.BatchNorm2d(64)
        self.maxpool1 = nn.MaxPool2d(kernel_size=3, stride=2)

        self.conv2 = nn.Conv2d(64,
                               128,
                               kernel_size=5,
                               stride=1,
                               padding=0,
                               bias=False)
        self.bn2 = nn.BatchNorm2d(64)
        self.activ2 = Active()
        self.maxpool2 = nn.MaxPool2d(kernel_size=3, stride=2)

        self.conv3 = nn.Conv2d(128,
                               256,
                               kernel_size=3,
                               stride=1,
                               padding=1,
                               bias=False)
        self.bn3 = nn.BatchNorm2d(128)
        self.activ3 = Active()

        # self.conv4 = nn.Conv2d(96, 192, 1, 1)
        self.conv4 = nn.Conv2d(256,
                               256,
                               kernel_size=3,
                               stride=1,
                               padding=1,
                               bias=False)
        self.bn4 = nn.BatchNorm2d(256)
        self.activ4 = Active()

        self.conv5 = nn.Conv2d(256,
                               256,
                               kernel_size=3,
                               stride=1,
                               padding=1,
                               bias=False)
        self.bn5 = nn.BatchNorm2d(256)
        self.activ5 = Active()

        self.maxpool3 = nn.MaxPool2d(kernel_size=3, stride=2)

        self.conv6 = nn.Conv2d(256,
                               512,
                               kernel_size=7,
                               stride=1,
                               padding=0,
                               bias=False)
        self.bn6 = nn.BatchNorm2d(256)
        self.activ6 = Active()

        self.conv7 = nn.Conv2d(512,
                               512,
                               kernel_size=1,
                               stride=1,
                               padding=0,
                               bias=False)
        self.bn7 = nn.BatchNorm2d(512)
        self.activ7 = Active()

        self.conv8 = nn.Conv2d(512,
                               nClasses,
                               kernel_size=1,
                               stride=1,
                               padding=0)

        self.nClasses = nClasses
Exemplo n.º 11
0
    def __init__(self, nClasses):
        super(ResNet, self).__init__()
        self.nClasses = nClasses

        self.conv1 = nn.Conv2d(1,
                               64,
                               kernel_size=7,
                               stride=2,
                               padding=3,
                               bias=False)
        self.bn1 = nn.BatchNorm2d(64)
        self.relu1 = nn.ReLU(inplace=True)
        self.maxpool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)

        self.bn21 = nn.BatchNorm2d(64)
        self.activ21 = Active()
        self.conv21 = nn.Conv2d(64,
                                64,
                                kernel_size=3,
                                stride=1,
                                padding=1,
                                bias=False)
        self.relu21 = nn.ReLU(inplace=True)
        self.bn22 = nn.BatchNorm2d(64)
        self.activ22 = Active()
        self.conv22 = nn.Conv2d(64, 64, kernel_size=3, padding=1, bias=False)
        self.relu22 = nn.ReLU(inplace=True)

        self.bn31 = nn.BatchNorm2d(64)
        self.activ31 = Active()
        self.conv31 = nn.Conv2d(64,
                                64,
                                kernel_size=3,
                                stride=1,
                                padding=1,
                                bias=False)
        self.relu31 = nn.ReLU(inplace=True)
        self.bn32 = nn.BatchNorm2d(64)
        self.activ32 = Active()
        self.conv32 = nn.Conv2d(64, 64, kernel_size=3, padding=1, bias=False)
        self.relu32 = nn.ReLU(inplace=True)

        self.bn41 = nn.BatchNorm2d(64)
        self.activ41 = Active()
        self.conv41 = nn.Conv2d(64,
                                128,
                                kernel_size=3,
                                stride=2,
                                padding=1,
                                bias=False)
        self.relu41 = nn.ReLU(inplace=True)
        self.bn42 = nn.BatchNorm2d(128)
        self.activ42 = Active()
        self.conv42 = nn.Conv2d(128, 128, kernel_size=3, padding=1, bias=False)
        self.conv43 = nn.Conv2d(64, 128, kernel_size=1, stride=2, bias=False)
        self.bn43 = nn.BatchNorm2d(128)
        self.relu43 = nn.ReLU(inplace=True)

        self.bn51 = nn.BatchNorm2d(128)
        self.activ51 = Active()
        self.conv51 = nn.Conv2d(128,
                                128,
                                kernel_size=3,
                                stride=1,
                                padding=1,
                                bias=False)
        self.relu51 = nn.ReLU(inplace=True)
        self.bn52 = nn.BatchNorm2d(128)
        self.activ52 = Active()
        self.conv52 = nn.Conv2d(128, 128, kernel_size=3, padding=1, bias=False)
        self.relu52 = nn.ReLU(inplace=True)

        self.bn61 = nn.BatchNorm2d(128)
        self.activ61 = Active()
        self.conv61 = nn.Conv2d(128,
                                256,
                                kernel_size=3,
                                stride=2,
                                padding=1,
                                bias=False)
        self.relu61 = nn.ReLU(inplace=True)
        self.bn62 = nn.BatchNorm2d(256)
        self.activ62 = Active()
        self.conv62 = nn.Conv2d(256, 256, kernel_size=3, padding=1, bias=False)
        self.conv63 = nn.Conv2d(128, 256, kernel_size=1, stride=2, bias=False)
        self.bn63 = nn.BatchNorm2d(256)
        self.relu63 = nn.ReLU(inplace=True)

        self.bn71 = nn.BatchNorm2d(256)
        self.activ71 = Active()
        self.conv71 = nn.Conv2d(256,
                                256,
                                kernel_size=3,
                                stride=1,
                                padding=1,
                                bias=False)
        self.relu71 = nn.ReLU(inplace=True)
        self.bn72 = nn.BatchNorm2d(256)
        self.activ72 = Active()
        self.conv72 = nn.Conv2d(256, 256, kernel_size=3, padding=1, bias=False)
        self.relu72 = nn.ReLU(inplace=True)

        self.bn81 = nn.BatchNorm2d(256)
        self.activ81 = Active()
        self.conv81 = nn.Conv2d(256,
                                512,
                                kernel_size=3,
                                stride=2,
                                padding=1,
                                bias=False)
        self.relu81 = nn.ReLU(inplace=True)
        self.bn82 = nn.BatchNorm2d(512)
        self.activ82 = Active()
        self.conv82 = nn.Conv2d(512, 512, kernel_size=3, padding=1, bias=False)
        self.conv83 = nn.Conv2d(256, 512, kernel_size=1, stride=2, bias=False)
        self.bn83 = nn.BatchNorm2d(512)
        self.relu83 = nn.ReLU(inplace=True)

        self.conv91 = nn.Conv2d(512,
                                512,
                                kernel_size=3,
                                stride=1,
                                padding=1,
                                bias=False)
        self.bn91 = nn.BatchNorm2d(512)
        self.relu91 = nn.ReLU(inplace=True)
        self.conv92 = nn.Conv2d(512, 512, kernel_size=3, padding=1, bias=False)
        self.bn92 = nn.BatchNorm2d(512)
        self.relu92 = nn.ReLU(inplace=True)

        self.avgpool101 = nn.AvgPool2d(7)
        self.linear111 = nn.Conv2d(512, nClasses, kernel_size=1, bias=False)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()