def __init__(self, k=64, gpu_ids=list()): super(VariationalEncoder, self).__init__() self.k = k self.gpu_ids = gpu_ids # Format : NCHW model = [ layer_wrapper( nn.Conv2d( in_channels=1, out_channels=8, kernel_size=(2, 2), stride=(2, 2), padding=2 ) ), layer_wrapper( nn.Conv2d( in_channels=8, out_channels=16, kernel_size=(4, 4), stride=(2, 2), padding=1 ) ), layer_wrapper( nn.Conv2d( in_channels=16, out_channels=32, kernel_size=(4, 4), stride=(2, 2), padding=1 ) ), layer_wrapper( nn.Conv2d( in_channels=32, out_channels=k*2, kernel_size=(4, 4), stride=(1, 1) ), norm_layer=None, activation_function=None ), ] self.model = nn.Sequential( *model )
def __init__(self, k=64, gpu_ids=list()): self.k = k self.gpu_ids = gpu_ids super(VariationalDecoder, self).__init__() # Format : NCHW model = [ layer_wrapper( nn.ConvTranspose2d(in_channels=k, out_channels=256, kernel_size=(4, 4), stride=(1, 1))), layer_wrapper( nn.ConvTranspose2d(in_channels=256, out_channels=128, kernel_size=(4, 4), stride=(2, 2))), layer_wrapper( nn.ConvTranspose2d(in_channels=128, out_channels=64, kernel_size=(4, 4), stride=(1, 1))), layer_wrapper( nn.ConvTranspose2d(in_channels=64, out_channels=32, kernel_size=(4, 4), stride=(2, 2))), layer_wrapper( nn.ConvTranspose2d(in_channels=32, out_channels=32, kernel_size=(5, 5), stride=(1, 1))), layer_wrapper( nn.Conv2d(in_channels=32, out_channels=32, kernel_size=(1, 1), stride=(1, 1))), layer_wrapper(nn.Conv2d(in_channels=32, out_channels=3, kernel_size=(1, 1), stride=(1, 1)), norm_layer=None, activation_function=nn.Sigmoid()), ] self.model = nn.Sequential(*model)
def __init__(self, k=64, gpu_ids=list()): super(VariationalEncoder, self).__init__() self.k = k self.gpu_ids = gpu_ids self.layer_wrapper = partial( layer_wrapper, ) # Input: x:3x64x64 Output: z1:64x16x16 parameters:0.3M xz1 = [ layer_wrapper( nn.Conv2d( in_channels=3, out_channels=32, kernel_size=(3, 3), stride=(1, 1), padding=1 ) ), layer_wrapper( nn.Conv2d( in_channels=32, out_channels=32, kernel_size=(3, 3), stride=(1, 1), padding=1 ) ), layer_wrapper( nn.Conv2d( in_channels=32, out_channels=64, kernel_size=(4, 4), stride=(2, 2), padding=1 ) ), layer_wrapper( nn.Conv2d( in_channels=64, out_channels=64, kernel_size=(3, 3), stride=(1, 1), padding=1 ) ), layer_wrapper( nn.Conv2d( in_channels=64, out_channels=64, kernel_size=(1, 1), ) ), layer_wrapper( nn.Conv2d( in_channels=64, out_channels=64, kernel_size=(3, 3), stride=(1, 1), padding=1 ) ), layer_wrapper( nn.Conv2d( in_channels=64, out_channels=64, kernel_size=(3, 3), stride=(1, 1), padding=1 ), norm_layer=None, ), ] self.xz1 = nn.Sequential( *xz1 ) # Input: z1:64x16x16 Output: z2:64x1x1 parameters:2.8M self.z1z2 = nn.Sequential(*[ self.layer_wrapper( nn.Conv2d( in_channels=64, out_channels=128, kernel_size=(4, 4), stride=(2, 2), padding=1 ) ), self.layer_wrapper( nn.Conv2d( in_channels=128, out_channels=256, kernel_size=(4, 4), stride=(2, 2), padding=1 ) ), self.layer_wrapper( nn.Conv2d( in_channels=256, out_channels=256, kernel_size=(3, 3), stride=(1, 1), padding=1 ) ), self.layer_wrapper( nn.Conv2d( in_channels=256, out_channels=256, kernel_size=(1, 1), ) ), self.layer_wrapper( nn.Conv2d( in_channels=256, out_channels=256, kernel_size=(3, 3), stride=(1, 1), padding=1 ) ), self.layer_wrapper( nn.Conv2d( in_channels=256, out_channels=512, kernel_size=(4, 4), ) ), self.layer_wrapper( nn.Conv2d( in_channels=512, out_channels=512, kernel_size=(1, 1), ) ), self.layer_wrapper( nn.Conv2d( in_channels=512, out_channels=self.k*2, kernel_size=(1, 1), stride=(1, 1) ), norm_layer=None, activation_function=None ), ])