예제 #1
0
파일: models.py 프로젝트: whuyyc/gan-jittor
 def __init__(self, in_features):
     super(ResidualBlock, self).__init__()
     self.block = nn.Sequential(nn.ReflectionPad2d(1),
                                nn.Conv(in_features, in_features, 3),
                                nn.InstanceNorm2d(in_features, affine=None),
                                nn.ReLU(), nn.ReflectionPad2d(1),
                                nn.Conv(in_features, in_features, 3),
                                nn.InstanceNorm2d(in_features, affine=None))
예제 #2
0
    def test_batchnorm(self):
        # ***************************************************************
        # Test BatchNorm Layer
        # ***************************************************************
        arr = np.random.randn(16,10,224,224)
        check_equal_with_istrain(arr, jnn.BatchNorm(10, is_train=True), tnn.BatchNorm2d(10))

        class Model(tnn.Module):
            def __init__(self):
                super(Model, self).__init__()
                self.layer = tnn.BatchNorm2d(10)
            def forward(self, x):
                return self.layer(x)
        model = Model()
        model.eval()
        check_equal_with_istrain(arr, jnn.BatchNorm(10, is_train=False), model, False)

        # ***************************************************************
        # Test InstanceNorm2d Layer
        # ***************************************************************
        arr = np.random.randn(16,10,224,224)
        check_equal_without_istrain(arr, jnn.InstanceNorm2d(10, is_train=True), tnn.InstanceNorm2d(10))

        class Model(tnn.Module):
            def __init__(self):
                super(Model, self).__init__()
                self.layer = tnn.InstanceNorm2d(10)
            def forward(self, x):
                return self.layer(x)
        model = Model()
        model.eval()
        check_equal_without_istrain(arr, jnn.InstanceNorm2d(10, is_train=False), model)

        # ***************************************************************
        # Test BatchNorm1d Layer
        # ***************************************************************
        arr = np.random.randn(16,10)
        check_equal_with_istrain(arr, jnn.BatchNorm1d(10, is_train=True), tnn.BatchNorm1d(10), 1e-3)

        class Model(tnn.Module):
            def __init__(self):
                super(Model, self).__init__()
                self.layer = tnn.BatchNorm1d(10)
            def forward(self, x):
                return self.layer(x)
        model = Model()
        model.eval()
        check_equal_with_istrain(arr, jnn.BatchNorm1d(10, is_train=False), model, False)
예제 #3
0
    def __init__(self, in_channel, style_dim):
        super(AdaptiveInstanceNorm, self).__init__()
        self.norm = nn.InstanceNorm2d(in_channel)
        self.linear = nn.Linear(style_dim, in_channel * 2)

        self.linear.bias.data[:in_channel] = 1
        self.linear.bias.data[in_channel:] = 0
예제 #4
0
파일: models.py 프로젝트: whuyyc/gan-jittor
 def discriminator_block(in_filters, out_filters, stride, normalize):
     'Returns layers of each discriminator block'
     layers = [nn.Conv(in_filters, out_filters, 3, stride=stride, padding=1)]
     if normalize:
         layers.append(nn.InstanceNorm2d(out_filters))
     layers.append(nn.Leaky_relu(scale=0.2))
     return layers
예제 #5
0
 def discriminator_block(in_filters, out_filters, normalization=True):
     'Returns downsampling layers of each discriminator block'
     layers = [nn.Conv(in_filters, out_filters, 4, stride=2, padding=1)]
     if normalization:
         layers.append(nn.InstanceNorm2d(out_filters, affine=None))
     layers.append(nn.LeakyReLU(scale=0.2))
     return layers
예제 #6
0
    def __init__(self, num_features, eps=1e-5, momentum=0.1):
        super(AdaptiveInstanceNorm2d, self).__init__()
        self.num_features = num_features

        self.norm = nn.InstanceNorm2d(num_features, affine=False)

        # weight and bias are dynamically assigned
        self.weight = None
        self.bias = None
예제 #7
0
 def block(in_features, out_features, normalization=True):
     'Classifier block'
     layers = [
         nn.Conv(in_features, out_features, 3, stride=2, padding=1),
         nn.LeakyReLU(scale=0.2)
     ]
     if normalization:
         layers.append(nn.InstanceNorm2d(out_features, affine=None))
     return layers
예제 #8
0
 def __init__(self, in_size, out_size, normalize=True, dropout=0.0):
     super(UNetDown, self).__init__()
     layers = [
         nn.Conv(in_size, out_size, 4, stride=2, padding=1, bias=False)
     ]
     if normalize:
         layers.append(nn.InstanceNorm2d(out_size, affine=None))
     layers.append(nn.LeakyReLU(scale=0.2))
     if dropout:
         layers.append(nn.Dropout(dropout))
     self.model = nn.Sequential(*layers)
예제 #9
0
파일: models.py 프로젝트: whuyyc/gan-jittor
 def __init__(self, img_shape=(3, 128, 128), res_blocks=9, c_dim=5):
     super(GeneratorResNet, self).__init__()
     (channels, img_size, _) = img_shape
     model = [
         nn.Conv((channels + c_dim), 64, 7, stride=1, padding=3,
                 bias=False),
         nn.InstanceNorm2d(64, affine=None),
         nn.ReLU()
     ]
     curr_dim = 64
     for _ in range(2):
         model += [
             nn.Conv(curr_dim, (curr_dim * 2),
                     4,
                     stride=2,
                     padding=1,
                     bias=False),
             nn.InstanceNorm2d((curr_dim * 2), affine=None),
             nn.ReLU()
         ]
         curr_dim *= 2
     for _ in range(res_blocks):
         model += [ResidualBlock(curr_dim)]
     for _ in range(2):
         model += [
             nn.ConvTranspose(curr_dim, (curr_dim // 2),
                              4,
                              stride=2,
                              padding=1,
                              bias=False),
             nn.InstanceNorm2d((curr_dim // 2), affine=None),
             nn.ReLU()
         ]
         curr_dim = (curr_dim // 2)
     model += [
         nn.Conv(curr_dim, channels, 7, stride=1, padding=3),
         nn.Tanh()
     ]
     self.model = nn.Sequential(*model)
     for m in self.model:
         weights_init_normal(m)
예제 #10
0
파일: models.py 프로젝트: whuyyc/gan-jittor
 def __init__(self, in_features):
     super(ResidualBlock, self).__init__()
     conv_block = [
         nn.Conv(in_features,
                 in_features,
                 3,
                 stride=1,
                 padding=1,
                 bias=False),
         nn.InstanceNorm2d(in_features, affine=None),
         nn.ReLU(),
         nn.Conv(in_features,
                 in_features,
                 3,
                 stride=1,
                 padding=1,
                 bias=False),
         nn.InstanceNorm2d(in_features, affine=None)
     ]
     self.conv_block = nn.Sequential(*conv_block)
     for m in self.conv_block:
         weights_init_normal(m)
예제 #11
0
파일: models.py 프로젝트: whuyyc/gan-jittor
    def __init__(self, input_shape, num_residual_blocks):
        super(GeneratorResNet, self).__init__()
        channels = input_shape[0]
        out_features = 64
        model = [
            nn.ReflectionPad2d(channels),
            nn.Conv(channels, out_features, 7),
            nn.InstanceNorm2d(out_features, affine=None),
            nn.ReLU()
        ]
        in_features = out_features
        for _ in range(2):
            out_features *= 2
            model += [
                nn.Conv(in_features, out_features, 3, stride=2, padding=1),
                nn.InstanceNorm2d(out_features, affine=None),
                nn.ReLU()
            ]
            in_features = out_features
        for _ in range(num_residual_blocks):
            model += [ResidualBlock(out_features)]
        for _ in range(2):
            out_features //= 2
            model += [
                nn.Upsample(scale_factor=2),
                nn.Conv(in_features, out_features, 3, stride=1, padding=1),
                nn.InstanceNorm2d(out_features, affine=None),
                nn.ReLU()
            ]
            in_features = out_features
        model += [
            nn.ReflectionPad2d(channels),
            nn.Conv(out_features, channels, 7),
            nn.Tanh()
        ]
        self.model = nn.Sequential(*model)

        for m in self.modules():
            weights_init_normal(m)
예제 #12
0
    def __init__(self,
                 input_dim,
                 output_dim,
                 kernel_size,
                 stride,
                 padding=0,
                 norm='none',
                 activation='relu',
                 pad_type='zero'):
        super(ConvBlock, self).__init__()
        self.use_bias = True
        # initialize padding
        if pad_type == 'reflect':
            self.pad = nn.ReflectionPad2d(padding)
        elif pad_type == 'replicate':
            self.pad = nn.ReplicationPad2d(padding)
        elif pad_type == 'zero':
            self.pad = nn.ZeroPad2d(padding)
        else:
            assert 0, "Unsupported padding type: {}".format(pad_type)

        # initialize normalization
        norm_dim = output_dim
        if norm == 'bn':
            self.norm = nn.BatchNorm(norm_dim)
        elif norm == 'in':
            self.norm = nn.InstanceNorm2d(norm_dim)
        elif norm == 'adain':
            self.norm = AdaptiveInstanceNorm2d(norm_dim)
        elif norm == 'none':
            self.norm = None
        else:
            assert 0, "Unsupported normalization: {}".format(norm)

        # initialize activation
        if activation == 'relu':
            self.activation = nn.ReLU()
        elif activation == 'tanh':
            self.activation = nn.Tanh()
        elif activation == 'none':
            self.activation = None
        else:
            assert 0, "Unsupported activation: {}".format(activation)

        self.conv = nn.Conv(input_dim,
                            output_dim,
                            kernel_size,
                            stride,
                            bias=self.use_bias)
예제 #13
0
 def __init__(self, in_size, out_size, dropout=0.0):
     super(UNetUp, self).__init__()
     layers = [
         nn.ConvTranspose(in_size,
                          out_size,
                          4,
                          stride=2,
                          padding=1,
                          bias=False),
         nn.InstanceNorm2d(out_size, affine=None),
         nn.ReLU()
     ]
     if dropout:
         layers.append(nn.Dropout(dropout))
     self.model = nn.Sequential(*layers)
    def __init__(self, channels, dlatent_size, use_wscale, use_noise,
                 use_pixel_norm, use_instance_norm, use_styles,
                 activation_layer):
        super().__init__()

        layers = []
        if use_noise:
            layers.append(('noise', NoiseLayer(channels)))
        layers.append(('activation', activation_layer))
        if use_pixel_norm:
            layers.append(('pixel_norm', PixelNormLayer()))
        if use_instance_norm:
            layers.append(
                ('instance_norm', nn.InstanceNorm2d(channels, affine=False)))

        self.top_epi = nn.Sequential(OrderedDict(layers))

        if use_styles:
            self.style_mod = StyleMod(dlatent_size,
                                      channels,
                                      use_wscale=use_wscale)
        else:
            self.style_mod = None