예제 #1
0
            def __init__(self, in_channels, out_channels, bilinear=True):
                super().__init__()

                if bilinear:
                    self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
                else:
                    self.up = nn.ConvTranpose2d(in_channels // 2, in_channels // 2,
                                                kernel_size=2, stride=2)

                self.conv = double_conv(in_channels, out_channels)
예제 #2
0
    def __init__(self, in_channels, out_channels, bilinear=True):
        def double_conv(in_channels, out_channels):
            return nn.Sequential(
                nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),
                nn.BatchNorm2d(out_channels),
                nn.ReLU(inplace=True),
                nn.Conv2d(out_channels, out_channels,
                          kernel_size=3, padding=1),
                nn.BatchNorm2d(out_channels),
                nn.ReLU(inplace=True),
            )
        super(up,self).__init__()

        if bilinear:
            self.up = nn.Upsample(
                scale_factor=2, mode='bilinear', align_corners=True)
        else:
            self.up = nn.ConvTranpose2d(in_channels // 2, in_channels // 2,
                                        kernel_size=2, stride=2)

        self.conv = double_conv(in_channels, out_channels)
예제 #3
0
 def __init__(self,nfeats,nchannels):
     super (Generator,self).__init__()
     #Feeding Latent vector which will be the dot product of two latent vectors of size 1X1X100
     self.convt1 = nn.ConvTranspose2d(in_channels=100,out_channels=nfeats*2,kernel_size=4,stride=1,padding=0,bias=False,padding_mode='zeros')
     #Output has the shape 4X4 with nfeats*2 channels which will be tentatively 128 channels if nfeats = 64
     self.convt2 = nn.ConvTranspose2d(in_channels=nfeats*2,out_channels=nfeats*8,kernel_size=4,stride=2,padding=1,bias=False,padding_mode='zeros')
     self.bn1    = nn.BatchNorm2d(nfeats*8)
     #Output has the shape 8X8 with nfeats*8 channels which will be tentatively 512 channels if nfeats = 64
     self.convt3 = nn.ConvTranspose2d(in_channels=nfeats*8,out_channels=nfeats*8,kernel_size=4,stride=2,padding=1,bias=False,padding_mode='zeros')
     self.bn2    = nn.BatchNorm2d(nfeats*8)
     #Output has shape 16X16 with nfeats*8
     self.convt4 = nn.ConvTranspose2d(in_channels=nfeats*8,out_channels=nfeats*8,kernel_size=4,stride=2,padding=1,bias=False,padding_mode='zeros')
     self.bn3    = nn.BatchNorm2d(nfeats*8)
     #Output has shape 32X32 with nfeats*8 
     self.convt5 = nn.ConvTranspose2d(in_channels=nfeats*8,out_channels=nfeats*4,kernel_size=4,stride=2,padding=1,bias=False,padding_mode='zeros')
     self.bn4    = nn.BatchNorm2d(nfeats*4)
     #Output has shape 64X64 with nfeats *4 (256)
     self.convt6 = nn.Conv2d(in_channels=nfeats*4,out_channels=nchannels,kernel_size=1,stride=1,padding=0,bias=False,padding_mode='zeros')
     #Output has shape 64X64 with 3 channels -- A RGB Image 
     
     #----------------------------------------------------------------------------------------
     #Defining the components for the residual blocks :) 
     #This is for 4x4 to 8x8 
     self.resconv1 = nn.ConvTranpose2d(in_channels=nfeats*2,out_channels=nfeats*8,kernel_size=4,stride=2,padding=1,padding_mode='zeros',bias=False)
     self.resbatch1 = nn.BatchNorm2d(nfeats*8)
     #This is for 4x4 to 16x16 via 8x8 weights
     self.resconv2 = nn.ConvTranspose2d(in_channels=nfeats*8,out_channels=nfeats*8,kernel_size=4,stride=2,padding=1,padding_mode='zeros',bias=False)
     self.resbatch2 = nn.BatchNorm2d(nfeats*8)
     #This is for 4x4 to 32x32 va 16x16 weights
     self.resconv3 = nn.ConvTranspose2d(in_channels=nfeats*8,out_channels=nfeats*8,kernel_size=4,stride=2,padding=1,padding_mode='zeros',bias=False)
     self.resbatch3 = nn.BatchNorm2d(nfeats*8)
     #------------------------------Now designing the Resblocks from 8x8 to 32x32,64x64
     self.resconv4 = nn.ConvTranspose2d(in_channels=nfeats*8,out_channels=nfeats*8,kernel_size=4,stride=2,padding=1,padding_mode='zeros',bias=False)
     self.resbatch4 = nn.BatchNorm2d(nfeats*8)
     self.resconv5 = nn.ConvTranspose2d(in_channels=nfeats*8,out_channels=nfeats*8,kernel_size=4,stride=2,padding=1,padding_mode='zeros',bias=False)
     self.resbatch5 = nn.BatchNorm2d(nfeats*8)