예제 #1
0
    def __init__(self, ninput, noutput, use_sppad=False, split=1):
        super(DownsamplerBlock, self).__init__()

        self.ninput = ninput
        self.noutput = noutput

        if self.ninput < self.noutput:
            # Wout > Win
            self.conv = spConv2d(ninput,
                                 noutput - ninput,
                                 kernel_size=3,
                                 stride=2,
                                 padding=1,
                                 use_sppad=use_sppad,
                                 split=split)
            self.pool = nn.MaxPool2d(2, stride=2)
        else:
            # Wout < Win
            self.conv = spConv2d(ninput,
                                 noutput,
                                 kernel_size=3,
                                 stride=2,
                                 padding=1,
                                 use_sppad=use_sppad,
                                 split=split)

        self.bn = nn.BatchNorm2d(noutput)
예제 #2
0
 def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0, groups=1, bias=False, use_sppad=False, split=1):
     super(BasicBlock, self).__init__()
     self.conv1 = spConv2d(in_planes, out_planes, kernel_size, stride, padding, groups=groups, bias=bias, use_sppad=use_sppad, split=split)
     #self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size, stride, padding, groups=groups, bias=bias)
     self.bn1 = nn.BatchNorm2d(out_planes)
     self.relu = nn.ReLU(inplace=True)
     self.conv2 = spConv2d(out_planes, out_planes, kernel_size, 1, padding, groups=groups, bias=bias, use_sppad=use_sppad, split=split)
     #self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size, 1, padding, groups=groups, bias=bias)
     self.bn2 = nn.BatchNorm2d(out_planes)
     self.downsample = None
     if stride > 1:
         self.downsample = nn.Sequential(spConv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False, use_sppad=use_sppad, split=split),
                         nn.BatchNorm2d(out_planes),)
예제 #3
0
 def __init__(self, inplanes, planes,use_sppad=False, split=1):
     super(ParallelDilatedConv, self).__init__()
     #self.dilated_conv_1 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=1, padding=1, dilation=1) 
     #self.dilated_conv_2 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=1, padding=2, dilation=2)
     #self.dilated_conv_3 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=1, padding=3, dilation=3)
     #self.dilated_conv_4 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=1, padding=4, dilation=4)
     self.dilated_conv_1 = spConv2d(inplanes, planes, kernel_size=3, stride=1, padding=1, dilation=1, use_sppad=use_sppad, split=split) 
     self.dilated_conv_2 = spConv2d(inplanes, planes, kernel_size=3, stride=1, padding=2, dilation=2, use_sppad=use_sppad,split=split)
     self.dilated_conv_3 = spConv2d(inplanes, planes, kernel_size=3, stride=1, padding=3, dilation=3, use_sppad=use_sppad,split=split)
     self.dilated_conv_4 = spConv2d(inplanes, planes, kernel_size=3, stride=1, padding=4, dilation=4, use_sppad=use_sppad,split=split)
     self.relu1 = nn.ELU(inplace=True)
     self.relu2 = nn.ELU(inplace=True)
     self.relu3 = nn.ELU(inplace=True)
     self.relu4 = nn.ELU(inplace=True)
예제 #4
0
 def __init__(self,
              nIn,
              nOut,
              kSize,
              stride=1,
              d=1,
              use_sppad=False,
              split=1):
     """
     args:
        nIn: number of input channels
        nOut: number of output channels, default (nIn == nOut)
        kSize: kernel size
        stride: optional stride rate for down-sampling
        d: dilation rate
     """
     super().__init__()
     padding = int((kSize - 1) / 2) * d
     self.conv = spConv2d(nIn,
                          nOut, (kSize, kSize),
                          stride=stride,
                          padding=(padding, padding),
                          groups=nIn,
                          dilation=d,
                          use_sppad=use_sppad,
                          split=split)
예제 #5
0
    def __init__(self, ninput, noutput):
        super().__init__()

        self.conv = spConv2d(ninput, noutput-ninput, kernel_size=3, stride=2, padding=1, use_sppad=True, split=4)
        #self.conv = nn.Conv2d(ninput, noutput-ninput, (3, 3), stride=2, padding=1, bias=True)
        self.pool = nn.MaxPool2d(2, stride=2)
        self.bn = nn.BatchNorm2d(noutput, eps=1e-3)
예제 #6
0
    def __init__(self, num_classes=28, encoder=None, use_sppad=True, split=4): #use special padding and upsampling, 4 feature models
        super(Net, self).__init__()

        self.num_classes = num_classes
        self.split=split
         
        #self.conv1 = nn.Conv2d(3, 96, kernel_size=3, stride=2, padding=1) # 32
        self.conv1 = spConv2d(3, 96, kernel_size=3, stride=2, padding=1, use_sppad=use_sppad, split=split) # 32
        # self.bn1 = nn.BatchNorm2d(96)
        self.relu1 = nn.ELU(inplace=True)
        self.maxpool1 = nn.MaxPool2d(kernel_size=2, stride=2) # 16
        self.fire1_1 = Fire(96, 16, 64, use_sppad=use_sppad, split=split)
        self.fire1_2 = Fire(128, 16, 64, use_sppad=use_sppad, split=split)
        self.maxpool2 = nn.MaxPool2d(kernel_size=2, stride=2) # 8
        self.fire2_1 = Fire(128, 32, 128, use_sppad=use_sppad, split=split)
        self.fire2_2 = Fire(256, 32, 128, use_sppad=use_sppad, split=split)
        self.maxpool3 = nn.MaxPool2d(kernel_size=2, stride=2) # 4
        self.fire3_1 = Fire(256, 64, 256, use_sppad=use_sppad, split=split)
        self.fire3_2 = Fire(512, 64, 256, use_sppad=use_sppad, split=split)
        self.fire3_3 = Fire(512, 64, 256, use_sppad=use_sppad, split=split)
        self.parallel = ParallelDilatedConv(512, 512, use_sppad=use_sppad, split=split)
        self.deconv1 = nn.ConvTranspose2d(512, 256, 3, stride=2, padding=1, output_padding=1)
        # self.bn2 = nn.BatchNorm2d(256)
        self.relu2 = nn.ELU(inplace=True)
        self.deconv2 = nn.ConvTranspose2d(512, 128, 3, stride=2, padding=1, output_padding=1)
        # self.bn3 = nn.BatchNorm2d(128)
        self.relu3 = nn.ELU(inplace=True)
        self.deconv3 = nn.ConvTranspose2d(256, 96, 3, stride=2, padding=1, output_padding=1)
        # self.bn4 = nn.BatchNorm2d(96)
        self.relu4 = nn.ELU(inplace=True)
        self.deconv4 = nn.ConvTranspose2d(192, self.num_classes, 3, stride=2, padding=1, output_padding=1)

        self.conv3_1 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1) # 32
        self.conv3_2 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1) # 32
        self.conv2_1 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1) # 32
        self.conv2_2 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1) # 32
        self.conv1_1 = nn.Conv2d(96, 96, kernel_size=3, stride=1, padding=1) # 32
        self.conv1_2 = nn.Conv2d(192, 192, kernel_size=3, stride=1, padding=1) # 32

        #self.conv3_1 = spConv2d(256, 256, kernel_size=3, stride=1, padding=1, use_sppad=use_sppad, split=split) # 32
        #self.conv3_2 = spConv2d(512, 512, kernel_size=3, stride=1, padding=1, use_sppad=use_sppad, split=split) # 32
        #self.conv2_1 = spConv2d(128, 128, kernel_size=3, stride=1, padding=1, use_sppad=use_sppad, split=split) # 32
        #self.conv2_2 = spConv2d(256, 256, kernel_size=3, stride=1, padding=1, use_sppad=use_sppad, split=split) # 32
        #self.conv1_1 = spConv2d(96, 96, kernel_size=3, stride=1, padding=1, use_sppad=use_sppad, split=split) # 32
        #self.conv1_2 = spConv2d(192, 192, kernel_size=3, stride=1, padding=1, use_sppad=use_sppad, split=split) # 32

        self.relu1_1 = nn.ELU(inplace=True)
        self.relu1_2 = nn.ELU(inplace=True)
        self.relu2_1 = nn.ELU(inplace=True)
        self.relu2_2 = nn.ELU(inplace=True)
        self.relu3_1 = nn.ELU(inplace=True)
        self.relu3_2 = nn.ELU(inplace=True)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.in_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
예제 #7
0
    def __init__(self, n_classes=28,encoder=None, use_sppad=True, split=4): #use special padding and upsampling, 4 feature models
        """
        Model initialization
        :param x_n: number of input neurons
        :type x_n: int
        """
        super(Net, self).__init__()
        self.split=split
        #self.use_sppad=use_sppad
        self.conv1 = spConv2d(3, 64, 7, 2, 3, bias=False, use_sppad=use_sppad, split=split)
        #self.conv1 = nn.Conv2d(3, 64, 7, 2, 3, bias=False)
        self.bn1 = nn.BatchNorm2d(64)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(3, 2, 1)

        self.encoder1 = Encoder(64, 64, 3, 1, 1, use_sppad=use_sppad, split=split)
        self.encoder2 = Encoder(64, 128, 3, 2, 1, use_sppad=use_sppad, split=split)
        self.encoder3 = Encoder(128, 256, 3, 2, 1, use_sppad=use_sppad, split=split)
        self.encoder4 = Encoder(256, 512, 3, 2, 1, use_sppad=use_sppad, split=split)

        self.decoder1 = Decoder(64, 64, 3, 1, 1, 0)
        self.decoder2 = Decoder(128, 64, 3, 2, 1, 1)
        self.decoder3 = Decoder(256, 128, 3, 2, 1, 1)
        self.decoder4 = Decoder(512, 256, 3, 2, 1, 1)

        # Classifier
        self.tp_conv1 = nn.Sequential(nn.ConvTranspose2d(64, 32, 3, 2, 1, 1),
                                      nn.BatchNorm2d(32),
                                      nn.ReLU(inplace=True),)
        self.conv2 = nn.Sequential(nn.Conv2d(32, 32, 3, 1, 1),
                                nn.BatchNorm2d(32),
                                nn.ReLU(inplace=True),)
        self.tp_conv2 = nn.ConvTranspose2d(32, n_classes, 2, 2, 0)
        self.lsm = nn.LogSoftmax()
예제 #8
0
    def __init__(self, chann, dropprob, dilated):       
        super().__init__()

        self.conv3x1_1 = nn.Conv2d(chann, chann, (3, 1), stride=1, padding=(1,0), bias=True)

        #self.conv1x3_1 = nn.Conv2d(chann, chann, (1,3), stride=1, padding=(0,1), bias=True)
        self.conv1x3_1 = spConv2d(chann, chann, kernel_size=(1, 3),padding=(0,1), use_sppad=True, split=4)

        self.bn1 = nn.BatchNorm2d(chann, eps=1e-03)

        self.conv3x1_2 = nn.Conv2d(chann, chann, (3, 1), stride=1, padding=(1*dilated,0), bias=True, dilation = (dilated,1))

        #self.conv1x3_2 = nn.Conv2d(chann, chann, (1,3), stride=1, padding=(0,1*dilated), bias=True, dilation = (1, dilated))
        self.conv1x3_2 = spConv2d(chann, chann, (1,3), stride=1, padding=(0,dilated), dilation =  dilated, use_sppad=True, split=4)

        self.bn2 = nn.BatchNorm2d(chann, eps=1e-03)

        self.dropout = nn.Dropout2d(dropprob)
예제 #9
0
    def __init__(self, inplanes, squeeze_planes, expand_planes,use_sppad=False,split=1):
        super(Fire, self).__init__()
        #self.conv1 = nn.Conv2d(inplanes, squeeze_planes, kernel_size=1, stride=1)
        self.conv1 = spConv2d(inplanes, squeeze_planes, kernel_size=1, stride=1,use_sppad=use_sppad,split=split)
        # self.bn1 = nn.BatchNorm2d(squeeze_planes)
        self.relu1 = nn.ELU(inplace=True)
        #self.conv2 = nn.Conv2d(squeeze_planes, expand_planes, kernel_size=1, stride=1)
        self.conv2 = spConv2d(squeeze_planes, expand_planes, kernel_size=1, stride=1,use_sppad=use_sppad,split=split)
        # self.bn2 = nn.BatchNorm2d(expand_planes)
        #self.conv3 = nn.Conv2d(squeeze_planes, expand_planes, kernel_size=3, stride=1, padding=1)
        self.conv3 = spConv2d(squeeze_planes, expand_planes, kernel_size=3, stride=1, padding=1,use_sppad=use_sppad,split=split)
        # self.bn3 = nn.BatchNorm2d(expand_planes)
        self.relu2 = nn.ELU(inplace=True)

        # using MSR initilization
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.in_channels
                m.weight.data.normal_(0, math.sqrt(2./n))
예제 #10
0
    def __init__(self,
                 ninput,
                 dilated,
                 k=40,
                 dropprob=0.02,
                 use_sppad=False,
                 split=1):
        super(EDABlock, self).__init__()

        #k: growthrate
        #dropprob:a dropout layer between the last ReLU and the concatenation of each module

        self.conv1x1 = nn.Conv2d(ninput, k, kernel_size=1)
        self.bn0 = nn.BatchNorm2d(k)

        self.conv3x1_1 = nn.Conv2d(k, k, kernel_size=(3, 1), padding=(1, 0))
        self.conv1x3_1 = spConv2d(k,
                                  k,
                                  kernel_size=(1, 3),
                                  padding=(0, 1),
                                  use_sppad=use_sppad,
                                  split=split)
        self.bn1 = nn.BatchNorm2d(k)

        self.conv3x1_2 = nn.Conv2d(k,
                                   k, (3, 1),
                                   stride=1,
                                   padding=(dilated, 0),
                                   dilation=dilated)
        self.conv1x3_2 = spConv2d(k,
                                  k, (1, 3),
                                  stride=1,
                                  padding=(0, dilated),
                                  dilation=dilated,
                                  use_sppad=use_sppad,
                                  split=split)
        self.bn2 = nn.BatchNorm2d(k)

        self.dropout = nn.Dropout2d(dropprob)
예제 #11
0
 def __init__(self, nIn, nOut, kSize, stride=1, use_sppad=False, split=1):
     """
     args:
         nIn: number of input channels
         nOut: number of output channels
         kSize: kernel size
         stride: optional stride rate for down-sampling
     """
     super().__init__()
     padding = int((kSize - 1) / 2)
     self.conv = spConv2d(nIn,
                          nOut, (kSize, kSize),
                          stride=stride,
                          padding=(padding, padding),
                          use_sppad=use_sppad,
                          split=split)
예제 #12
0
 def __init__(self, nIn, nOut, kSize, stride=1, use_sppad=False, split=1):
     """
     args:
         nIn: number of input channels
         nOut: number of output channels
         kSize: kernel size
         stride: stride rate for down-sampling. Default is 1
     """
     super().__init__()
     padding = int((kSize - 1) / 2)
     #self.conv = nn.Conv2d(nIn, nOut, (kSize, kSize), stride=stride, padding=(padding, padding), bias=False)
     self.conv = spConv2d(nIn,
                          nOut, (kSize, kSize),
                          stride=stride,
                          padding=(padding, padding),
                          use_sppad=use_sppad,
                          split=split)
     self.bn = nn.BatchNorm2d(nOut, eps=1e-03)
     self.act = nn.PReLU(nOut)