Ejemplo n.º 1
0
 def __init__(self):
     super(TransformerNet, self).__init__()
     # Initial convolution layers
     self.conv1 = ConvLayer(3, 32, kernel_size=9, stride=1)
     self.in1 = nn.InstanceNorm2d(32, affine=True)
     self.conv2 = ConvLayer(32, 64, kernel_size=3, stride=2)
     self.in2 = nn.InstanceNorm2d(64, affine=True)
     self.conv3 = ConvLayer(64, 128, kernel_size=3, stride=2)
     self.in3 = nn.InstanceNorm2d(128, affine=True)
     # Residual layers
     self.res1 = ResidualBlock(128)
     self.res2 = ResidualBlock(128)
     self.res3 = ResidualBlock(128)
     self.res4 = ResidualBlock(128)
     self.res5 = ResidualBlock(128)
     # Upsampling Layers
     self.deconv1 = UpsampleConvLayer(128,
                                      64,
                                      kernel_size=3,
                                      stride=1,
                                      upsample=2)
     self.in4 = nn.InstanceNorm2d(64, affine=True)
     self.deconv2 = UpsampleConvLayer(64,
                                      32,
                                      kernel_size=3,
                                      stride=1,
                                      upsample=2)
     self.in5 = nn.InstanceNorm2d(32, affine=True)
     self.deconv3 = ConvLayer(32, 3, kernel_size=9, stride=1)
     # Non-linearities
     self.relu = nn.ReLU()
Ejemplo n.º 2
0
 def __init__(self, channels):
     super(ResidualBlock, self).__init__()
     self.conv1 = ConvLayer(channels,
                            channels,
                            kernel_size=3,
                            stride=1)
     self.in1 = nn.InstanceNorm2d(channels, affine=True)
     self.conv2 = ConvLayer(channels,
                            channels,
                            kernel_size=3,
                            stride=1)
     self.in2 = nn.InstanceNorm2d(channels, affine=True)
     self.relu = nn.ReLU()