def test_pad(self): # *************************************************************** # Test ReplicationPad2d Layer # *************************************************************** arr = np.random.randn(16,3,224,224) check_equal(arr, jnn.ReplicationPad2d(10), tnn.ReplicationPad2d(10)) check_equal(arr, jnn.ReplicationPad2d((1,23,4,5)), tnn.ReplicationPad2d((1,23,4,5))) check_equal(arr, jnn.ReplicationPad2d((1,0,1,5)), tnn.ReplicationPad2d((1,0,1,5))) check_equal(arr, jnn.ReplicationPad2d((100)), tnn.ReplicationPad2d((100))) # *************************************************************** # Test ConstantPad2d Layer # *************************************************************** arr = np.random.randn(16,3,224,224) check_equal(arr, jnn.ConstantPad2d(10,-2), tnn.ConstantPad2d(10,-2)) check_equal(arr, jnn.ConstantPad2d((2,3,34,1),10.2), tnn.ConstantPad2d((2,3,34,1),10.2)) # *************************************************************** # Test ZeroPad2d Layer # *************************************************************** arr = np.random.randn(16,3,224,224) check_equal(arr, jnn.ZeroPad2d(1), tnn.ZeroPad2d(1)) check_equal(arr, jnn.ZeroPad2d((2,3,34,1)), tnn.ZeroPad2d((2,3,34,1))) # *************************************************************** # Test ReflectionPad2d Layer # *************************************************************** arr = np.random.randn(16,3,224,224) check_equal(arr, jnn.ReflectionPad2d(20), tnn.ReflectionPad2d(20)) check_equal(arr, jnn.ReflectionPad2d((2,3,34,1)), tnn.ReflectionPad2d((2,3,34,1))) check_equal(arr, jnn.ReflectionPad2d((10,123,34,1)), tnn.ReflectionPad2d((10,123,34,1))) check_equal(arr, jnn.ReflectionPad2d((100)), tnn.ReflectionPad2d((100)))
def build_conv_block(self, dim, padding_type, norm_layer, activation, use_dropout): conv_block = [] p = 0 if (padding_type == 'reflect'): conv_block += [nn.ReflectionPad2d(1)] elif (padding_type == 'replicate'): conv_block += [nn.ReplicationPad2d(1)] elif (padding_type == 'zero'): p = 1 else: raise NotImplementedError( ('padding [%s] is not implemented' % padding_type)) conv_block += [ nn.Conv(dim, dim, 3, padding=p), norm_layer(dim), activation ] if use_dropout: conv_block += [nn.Dropout(0.5)] p = 0 if (padding_type == 'reflect'): conv_block += [nn.ReflectionPad2d(1)] elif (padding_type == 'replicate'): conv_block += [nn.ReplicationPad2d(1)] elif (padding_type == 'zero'): p = 1 else: raise NotImplementedError( ('padding [%s] is not implemented' % padding_type)) conv_block += [nn.Conv(dim, dim, 3, padding=p), norm_layer(dim)] return nn.Sequential(*conv_block)
def __init__(self, in_features, dropout=0.5): super(ResidualBlock, self).__init__() model = [nn.ReflectionPad2d(1), nn.Conv(in_features, in_features, 3, bias=False), nn.BatchNorm2d(in_features), nn.ReLU()] if dropout: model += [nn.Dropout(dropout)] model += [nn.ReflectionPad2d(1), nn.Conv(in_features, in_features, 3, bias=False), nn.BatchNorm2d(in_features)] self.conv_block = nn.Sequential(*model)
def __init__(self, input_nc, output_nc, ngf=64, n_downsampling=3, n_blocks=9, norm_layer=nn.BatchNorm, padding_type='reflect'): assert (n_blocks >= 0) super(GlobalGenerator, self).__init__() activation = nn.ReLU() model = [nn.ReflectionPad2d(3), nn.Conv(input_nc, ngf, 7, padding=0), norm_layer(ngf), activation] ### downsample for i in range(n_downsampling): mult = (2 ** i) model += [nn.Conv((ngf * mult), ((ngf * mult) * 2), 3, stride=2, padding=1), norm_layer(((ngf * mult) * 2)), activation] ### resnet blocks mult = (2 ** n_downsampling) for i in range(n_blocks): model += [ResnetBlock((ngf * mult), padding_type=padding_type, activation=activation, norm_layer=norm_layer)] ### upsample for i in range(n_downsampling): mult = (2 ** (n_downsampling - i)) model += [nn.ConvTranspose((ngf * mult), int(((ngf * mult) / 2)), 3, stride=2, padding=1, output_padding=1), norm_layer(int(((ngf * mult) / 2))), activation] model += [nn.ReflectionPad2d(3), nn.Conv(ngf, output_nc, 7, padding=0), nn.Tanh()] self.model = nn.Sequential(*model) for m in self.modules(): weights_init_normal(m)
def __init__(self, in_features): super(ResidualBlock, self).__init__() self.block = nn.Sequential(nn.ReflectionPad2d(1), nn.Conv(in_features, in_features, 3), nn.InstanceNorm2d(in_features, affine=None), nn.ReLU(), nn.ReflectionPad2d(1), nn.Conv(in_features, in_features, 3), nn.InstanceNorm2d(in_features, affine=None))
def test_pad(self): # *************************************************************** # Test ReplicationPad2d Layer # *************************************************************** arr = np.random.randn(16, 3, 224, 224) check_equal(arr, jnn.ReplicationPad2d(10), tnn.ReplicationPad2d(10)) check_equal(arr, jnn.ReplicationPad2d((1, 23, 4, 5)), tnn.ReplicationPad2d((1, 23, 4, 5))) check_equal(arr, jnn.ReplicationPad2d((1, 0, 1, 5)), tnn.ReplicationPad2d((1, 0, 1, 5))) check_equal(arr, jnn.ReplicationPad2d((100)), tnn.ReplicationPad2d((100))) # *************************************************************** # Test ConstantPad2d Layer # *************************************************************** arr = np.random.randn(16, 3, 224, 224) check_equal(arr, jnn.ConstantPad2d(10, -2), tnn.ConstantPad2d(10, -2)) check_equal(arr, jnn.ConstantPad2d((2, 3, 34, 1), 10.2), tnn.ConstantPad2d((2, 3, 34, 1), 10.2)) arr = np.random.randn(16, 3, 224, 10, 10) check_equal(arr, jnn.ConstantPad2d(10, -2), tnn.ConstantPad2d(10, -2)) check_equal(arr, jnn.ConstantPad2d((2, 3, 34, 1), 10.2), tnn.ConstantPad2d((2, 3, 34, 1), 10.2)) # *************************************************************** # Test ZeroPad2d Layer # *************************************************************** arr = np.random.randn(16, 3, 224, 224) check_equal(arr, jnn.ZeroPad2d(1), tnn.ZeroPad2d(1)) check_equal(arr, jnn.ZeroPad2d((2, 3, 34, 1)), tnn.ZeroPad2d((2, 3, 34, 1))) # *************************************************************** # Test ReflectionPad2d Layer # *************************************************************** arr = np.random.randn(16, 3, 224, 224) check_equal(arr, jnn.ReflectionPad2d(20), tnn.ReflectionPad2d(20)) check_equal(arr, jnn.ReflectionPad2d((2, 3, 34, 1)), tnn.ReflectionPad2d((2, 3, 34, 1))) check_equal(arr, jnn.ReflectionPad2d((10, 123, 34, 1)), tnn.ReflectionPad2d((10, 123, 34, 1))) check_equal(arr, jnn.ReflectionPad2d((100)), tnn.ReflectionPad2d( (100))) # *************************************************************** # Test function pad # *************************************************************** arr = np.random.randn(16, 3, 224, 224) padding = (10, 11, 2, 3) for mode in ['constant', 'replicate', 'reflect', 'circular']: j_data = jt.array(arr) t_data = torch.tensor(arr) t_output = tnn.functional.pad(t_data, padding, mode=mode).detach().numpy() j_output = jnn.pad(j_data, padding, mode).numpy() assert np.allclose(t_output, j_output)
def __init__(self, input_nc, output_nc, ngf=64, n_downsampling=3, n_blocks=9, norm_layer=nn.InstanceNorm2d, padding_type='reflect'): assert (n_blocks >= 0) super(GlobalGenerator, self).__init__() activation = nn.ReLU() model = [ nn.ReflectionPad2d(3), nn.Conv(input_nc, ngf, 7, padding=0), norm_layer(ngf), activation ] ### downsample for i in range(n_downsampling): mult = 2**i model += [ nn.Conv(ngf * mult, ngf * mult * 2, 3, stride=2, padding=1), norm_layer(ngf * mult * 2), activation ] ### resnet blocks mult = 2**n_downsampling for i in range(n_blocks): model += [ ResnetBlock(ngf * mult, norm_type='in', padding_type=padding_type) ] ### upsample for i in range(n_downsampling): mult = 2**(n_downsampling - i) model += [ nn.ConvTranspose(ngf * mult, int(ngf * mult / 2), 3, stride=2, padding=1, output_padding=1), norm_layer(int(ngf * mult / 2)), activation ] model += [ nn.ReflectionPad2d(3), nn.Conv(ngf, output_nc, kernel_size=7, padding=0), nn.Tanh() ] self.model = nn.Sequential(*model)
def __init__(self, input_nc, output_nc, ngf=64, n_downsampling=3, n_blocks=1, norm_layer=nn.InstanceNorm2d, padding_type='reflect'): assert (n_blocks >= 0) super(GeometryEncoder, self).__init__() activation = nn.ReLU() model = [ nn.ReflectionPad2d(3), nn.Conv(input_nc, ngf, 7, padding=0), norm_layer(ngf), activation ] ### downsample for i in range(n_downsampling): mult = 2**i model += [ nn.Conv(ngf * mult, ngf * mult * 2, 3, stride=2, padding=1), norm_layer(ngf * mult * 2), activation ] mult = 2**n_downsampling for i in range(n_blocks): model += [ ResnetBlock(ngf * mult, norm_type='in', padding_type=padding_type) ] self.model = nn.Sequential(*model)
def __init__(self, norm_layer, image_size, output_nc, latent_dim=512): super(DecoderGenerator_image_Res, self).__init__() # start from B*1024 latent_size = int(image_size/32) self.latent_size = latent_size longsize = 512*latent_size*latent_size activation = nn.ReLU() padding_type='reflect' norm_layer=nn.BatchNorm self.fc = nn.Sequential(nn.Linear(in_features=latent_dim, out_features=longsize)) layers_list = [] layers_list.append(ResnetBlock(512, padding_type=padding_type, activation=activation, norm_layer=norm_layer)) # 176 176 dim_size = 256 for i in range(4): layers_list.append(DecoderBlock(channel_in=dim_size*2, channel_out=dim_size, kernel_size=4, padding=1, stride=2, output_padding=0)) #latent*2 layers_list.append(ResnetBlock(dim_size, padding_type=padding_type, activation=activation, norm_layer=norm_layer)) dim_size = int(dim_size/2) layers_list.append(DecoderBlock(channel_in=32, channel_out=32, kernel_size=4, padding=1, stride=2, output_padding=0)) #352 352 layers_list.append(ResnetBlock(32, padding_type=padding_type, activation=activation, norm_layer=norm_layer)) # 176 176 # layers_list.append(DecoderBlock(channel_in=64, channel_out=64, kernel_size=4, padding=1, stride=2, output_padding=0)) #96*160 layers_list.append(nn.ReflectionPad2d(2)) layers_list.append(nn.Conv(32,output_nc,kernel_size=5,padding=0)) self.conv = nn.Sequential(*layers_list) for m in self.modules(): weights_init_normal(m)
def __init__(self, in_channels=3, out_channels=1): super(Combiner, self).__init__() model = [nn.ReflectionPad2d(3), nn.Conv(in_channels, 64, 7, padding=0, bias=False), nn.BatchNorm2d(64), nn.ReLU()] for i in range(2): model += [ResidualBlock(64, dropout=0.5)] model += [nn.ReflectionPad2d(3), nn.Conv(64, out_channels, kernel_size=7, padding=0), nn.Tanh()] self.model = nn.Sequential(*model) for m in self.modules(): weights_init_normal(m)
def __init__(self, in_channels=3, out_channels=1, num_res_blocks=9): super(GeneratorResNet, self).__init__() out_features = 64 model = [nn.ReflectionPad2d(3), nn.Conv(in_channels, out_features, 7, bias=False), nn.BatchNorm2d(out_features), nn.ReLU()] in_features = out_features for _ in range(2): out_features *= 2 model += [nn.Conv(in_features, out_features, 3, stride=2, padding=1, bias=False), nn.BatchNorm2d(out_features), nn.ReLU()] in_features = out_features for _ in range(num_res_blocks): model += [ResidualBlock(out_features)] for _ in range(2): out_features //= 2 model += [nn.ConvTranspose(in_features, out_features, 3, stride=2, padding=1, output_padding=1, bias=False), nn.BatchNorm2d(out_features), nn.ReLU()] in_features = out_features model += [nn.ReflectionPad2d(3), nn.Conv(out_features, out_channels, 7), nn.Tanh()] self.model = nn.Sequential(*model) for m in self.modules(): weights_init_normal(m)
def __init__(self, input_shape, num_residual_blocks): super(GeneratorResNet, self).__init__() channels = input_shape[0] out_features = 64 model = [ nn.ReflectionPad2d(channels), nn.Conv(channels, out_features, 7), nn.InstanceNorm2d(out_features, affine=None), nn.ReLU() ] in_features = out_features for _ in range(2): out_features *= 2 model += [ nn.Conv(in_features, out_features, 3, stride=2, padding=1), nn.InstanceNorm2d(out_features, affine=None), nn.ReLU() ] in_features = out_features for _ in range(num_residual_blocks): model += [ResidualBlock(out_features)] for _ in range(2): out_features //= 2 model += [ nn.Upsample(scale_factor=2), nn.Conv(in_features, out_features, 3, stride=1, padding=1), nn.InstanceNorm2d(out_features, affine=None), nn.ReLU() ] in_features = out_features model += [ nn.ReflectionPad2d(channels), nn.Conv(out_features, channels, 7), nn.Tanh() ] self.model = nn.Sequential(*model) for m in self.modules(): weights_init_normal(m)
def __init__(self, input_dim, output_dim, kernel_size, stride, padding=0, norm='none', activation='relu', pad_type='zero'): super(ConvBlock, self).__init__() self.use_bias = True # initialize padding if pad_type == 'reflect': self.pad = nn.ReflectionPad2d(padding) elif pad_type == 'replicate': self.pad = nn.ReplicationPad2d(padding) elif pad_type == 'zero': self.pad = nn.ZeroPad2d(padding) else: assert 0, "Unsupported padding type: {}".format(pad_type) # initialize normalization norm_dim = output_dim if norm == 'bn': self.norm = nn.BatchNorm(norm_dim) elif norm == 'in': self.norm = nn.InstanceNorm2d(norm_dim) elif norm == 'adain': self.norm = AdaptiveInstanceNorm2d(norm_dim) elif norm == 'none': self.norm = None else: assert 0, "Unsupported normalization: {}".format(norm) # initialize activation if activation == 'relu': self.activation = nn.ReLU() elif activation == 'tanh': self.activation = nn.Tanh() elif activation == 'none': self.activation = None else: assert 0, "Unsupported activation: {}".format(activation) self.conv = nn.Conv(input_dim, output_dim, kernel_size, stride, bias=self.use_bias)
def __init__(self, input_nc, output_nc, ngf=64, n_downsampling=3, n_blocks=9, norm_layer=nn.BatchNorm2d, padding_type='reflect'): assert (n_blocks >= 0) super(Part_Generator, self).__init__() activation = nn.ReLU() model = [] ### resnet blocks mult = 2**n_downsampling for i in range(n_blocks): model += [ ResnetBlock(ngf * mult, norm_type='adain', padding_type=padding_type) ] for i in range(n_downsampling): mult = 2**(n_downsampling - i) model += [ nn.ConvTranspose(ngf * mult, int(ngf * mult / 2), 3, stride=2, padding=1, output_padding=1) ] model += [AdaptiveInstanceNorm2d(int(ngf * mult / 2))] model += [activation] model += [ nn.ReflectionPad2d(3), nn.Conv(ngf, output_nc, 7, padding=0), nn.Tanh() ] self.model = nn.Sequential(*model) # style encoder self.enc_style = StyleEncoder(5, 3, 16, self.get_num_adain_params(self.model), norm='none', activ='relu', pad_type='reflect')