Exemple #1
0
    def __init__(self, step=1, if_noise=False, noise_dim=3, noise_stdv=1e-2, dim_tail=32):
        super(StepModel, self).__init__()
        self.step = step
        self.noise_dim = noise_dim
        self.noise_stdv = noise_stdv
        self.if_noise = if_noise
        self.dim_tail = dim_tail

        self.sa_module_1 = PointnetModule([3 + (self.noise_dim if self.if_noise else 0), 64, 64, 128], n_points=512, radius=0.2, n_samples=32)
        self.sa_module_2 = PointnetModule([128, 128, 128, 256], n_points=128, radius=0.4, n_samples=32)
        self.sa_module_3 = PointnetModule([256, 256, 512, 1024], n_points=None, radius=0.2, n_samples=32)

        self.fp_module_3 = PointNetFeaturePropagation(1024+256, [256, 256])
        self.fp_module_2 = PointNetFeaturePropagation(256+128, [256, 128])
        self.fp_module_1 = PointNetFeaturePropagation(128+6, [128, 128, 128])

        self.unit_3 = Unit(step=step, in_channel=256)
        self.unit_2 = Unit(step=step, in_channel=128)
        self.unit_1 = Unit(step=step, in_channel=128)

        mlp = [128, 64, 3]
        last_channel = 128 + self.dim_tail
        mlp_conv = []
        for out_channel in mlp[:-1]:
            mlp_conv.append(Conv1d(last_channel, out_channel, if_bn=True, activation_fn=nn.Relu()))
            last_channel = out_channel
        mlp_conv.append(Conv1d(last_channel, mlp[-1], if_bn=False, activation_fn=None))
        self.mlp_conv = nn.Sequential(*mlp_conv)

        self.tanh = nn.Tanh()
Exemple #2
0
 def __init__(self):
     super(Decoder, self).__init__()
     self.model = nn.Sequential(nn.Linear(opt.latent_dim, 512),
                                nn.Leaky_relu(0.2), nn.Linear(512, 512),
                                nn.BatchNorm1d(512), nn.Leaky_relu(0.2),
                                nn.Linear(512, int(np.prod(img_shape))),
                                nn.Tanh())
    def __init__(self, input_nc, output_nc, ngf=64, n_downsampling=3, n_blocks=9, norm_layer=nn.BatchNorm, padding_type='reflect'):
        assert (n_blocks >= 0)
        super(GlobalGenerator, self).__init__()
        activation = nn.ReLU()

        model = [nn.ReflectionPad2d(3), nn.Conv(input_nc, ngf, 7, padding=0), norm_layer(ngf), activation]
        ### downsample
        for i in range(n_downsampling):
            mult = (2 ** i)
            model += [nn.Conv((ngf * mult), ((ngf * mult) * 2), 3, stride=2, padding=1), norm_layer(((ngf * mult) * 2)), activation]
        
        ### resnet blocks
        mult = (2 ** n_downsampling)
        for i in range(n_blocks):
            model += [ResnetBlock((ngf * mult), padding_type=padding_type, activation=activation, norm_layer=norm_layer)]
        
        ### upsample 
        for i in range(n_downsampling):
            mult = (2 ** (n_downsampling - i))
            model += [nn.ConvTranspose((ngf * mult), int(((ngf * mult) / 2)), 3, stride=2, padding=1, output_padding=1), norm_layer(int(((ngf * mult) / 2))), activation]
        model += [nn.ReflectionPad2d(3), nn.Conv(ngf, output_nc, 7, padding=0), nn.Tanh()]
        self.model = nn.Sequential(*model)

        for m in self.modules():
            weights_init_normal(m)
    def __init__(self, in_size, out_size, inner_nc, dropout=0.0, innermost=False, outermost=False, submodule=None):
        super(UnetBlock, self).__init__()
        self.outermost = outermost

        downconv = nn.Conv(in_size, inner_nc, 4, stride=2, padding=1, bias=False)
        downnorm = nn.BatchNorm2d(inner_nc)
        downrelu = nn.LeakyReLU(0.2)
        upnorm = nn.BatchNorm2d(out_size)
        uprelu = nn.ReLU()

        if outermost:
            upconv = nn.ConvTranspose(2*inner_nc, out_size, 4, stride=2, padding=1)
            down = [downconv]
            up = [uprelu, upconv, nn.Tanh()]
            model = down + [submodule] + up
        elif innermost:
            upconv = nn.ConvTranspose(inner_nc, out_size, 4, stride=2, padding=1, bias=False)
            down = [downrelu, downconv]
            up = [uprelu, upconv, upnorm]
            model = down + up
        else:
            upconv = nn.ConvTranspose(2*inner_nc, out_size, 4, stride=2, padding=1, bias=False)
            down = [downrelu, downconv, downnorm]
            up = [uprelu, upconv, upnorm]
            if dropout:
                model = down + [submodule] + up + [nn.Dropout(dropout)]
            else:
                model = down + [submodule] + up
        
        self.model = nn.Sequential(*model)

        for m in self.modules():
            weights_init_normal(m)
Exemple #5
0
    def __init__(self,
                 input_nc,
                 output_nc,
                 ngf=64,
                 n_downsampling=3,
                 n_blocks=9,
                 norm_layer=nn.InstanceNorm2d,
                 padding_type='reflect'):
        assert (n_blocks >= 0)
        super(GlobalGenerator, self).__init__()
        activation = nn.ReLU()

        model = [
            nn.ReflectionPad2d(3),
            nn.Conv(input_nc, ngf, 7, padding=0),
            norm_layer(ngf), activation
        ]
        ### downsample
        for i in range(n_downsampling):
            mult = 2**i
            model += [
                nn.Conv(ngf * mult, ngf * mult * 2, 3, stride=2, padding=1),
                norm_layer(ngf * mult * 2), activation
            ]

        ### resnet blocks
        mult = 2**n_downsampling
        for i in range(n_blocks):
            model += [
                ResnetBlock(ngf * mult,
                            norm_type='in',
                            padding_type=padding_type)
            ]

        ### upsample
        for i in range(n_downsampling):
            mult = 2**(n_downsampling - i)
            model += [
                nn.ConvTranspose(ngf * mult,
                                 int(ngf * mult / 2),
                                 3,
                                 stride=2,
                                 padding=1,
                                 output_padding=1),
                norm_layer(int(ngf * mult / 2)), activation
            ]
        model += [
            nn.ReflectionPad2d(3),
            nn.Conv(ngf, output_nc, kernel_size=7, padding=0),
            nn.Tanh()
        ]
        self.model = nn.Sequential(*model)
Exemple #6
0
    def __init__(self):
        super(Generator, self).__init__()

        def block(in_feat, out_feat, normalize=True):
            layers = [nn.Linear(in_feat, out_feat)]
            if normalize:
                layers.append(nn.BatchNorm1d(out_feat, 0.8))
            layers.append(nn.LeakyReLU(0.2))
            return layers

        self.model = nn.Sequential(
            *block(opt.latent_dim, 128, normalize=False), *block(128, 256),
            *block(256, 512), *block(512, 1024),
            nn.Linear(1024, int(np.prod(img_shape))), nn.Tanh())
Exemple #7
0
    def __init__(self):
        super(Generator, self).__init__()
        self.fc = nn.Linear(opt.latent_dim, (opt.channels * (opt.img_size**2)))
        self.l1 = nn.Sequential(
            nn.Conv((opt.channels * 2), 64, 3, stride=1, padding=1), nn.ReLU())
        resblocks = []
        for _ in range(opt.n_residual_blocks):
            resblocks.append(ResidualBlock())
        self.resblocks = nn.Sequential(*resblocks)
        self.l2 = nn.Sequential(
            nn.Conv(64, opt.channels, 3, stride=1, padding=1), nn.Tanh())

        for m in self.modules():
            weights_init_normal(m)
Exemple #8
0
    def __init__(self,
                 input_nc,
                 output_nc,
                 ngf=64,
                 n_downsampling=3,
                 n_blocks=9,
                 norm_layer=nn.BatchNorm2d,
                 padding_type='reflect'):
        assert (n_blocks >= 0)
        super(Part_Generator, self).__init__()
        activation = nn.ReLU()

        model = []
        ### resnet blocks
        mult = 2**n_downsampling
        for i in range(n_blocks):
            model += [
                ResnetBlock(ngf * mult,
                            norm_type='adain',
                            padding_type=padding_type)
            ]

        for i in range(n_downsampling):
            mult = 2**(n_downsampling - i)
            model += [
                nn.ConvTranspose(ngf * mult,
                                 int(ngf * mult / 2),
                                 3,
                                 stride=2,
                                 padding=1,
                                 output_padding=1)
            ]
            model += [AdaptiveInstanceNorm2d(int(ngf * mult / 2))]
            model += [activation]
        model += [
            nn.ReflectionPad2d(3),
            nn.Conv(ngf, output_nc, 7, padding=0),
            nn.Tanh()
        ]
        self.model = nn.Sequential(*model)

        # style encoder
        self.enc_style = StyleEncoder(5,
                                      3,
                                      16,
                                      self.get_num_adain_params(self.model),
                                      norm='none',
                                      activ='relu',
                                      pad_type='reflect')
Exemple #9
0
    def __init__(self,
                 input_dim,
                 output_dim,
                 kernel_size,
                 stride,
                 padding=0,
                 norm='none',
                 activation='relu',
                 pad_type='zero'):
        super(ConvBlock, self).__init__()
        self.use_bias = True
        # initialize padding
        if pad_type == 'reflect':
            self.pad = nn.ReflectionPad2d(padding)
        elif pad_type == 'replicate':
            self.pad = nn.ReplicationPad2d(padding)
        elif pad_type == 'zero':
            self.pad = nn.ZeroPad2d(padding)
        else:
            assert 0, "Unsupported padding type: {}".format(pad_type)

        # initialize normalization
        norm_dim = output_dim
        if norm == 'bn':
            self.norm = nn.BatchNorm(norm_dim)
        elif norm == 'in':
            self.norm = nn.InstanceNorm2d(norm_dim)
        elif norm == 'adain':
            self.norm = AdaptiveInstanceNorm2d(norm_dim)
        elif norm == 'none':
            self.norm = None
        else:
            assert 0, "Unsupported normalization: {}".format(norm)

        # initialize activation
        if activation == 'relu':
            self.activation = nn.ReLU()
        elif activation == 'tanh':
            self.activation = nn.Tanh()
        elif activation == 'none':
            self.activation = None
        else:
            assert 0, "Unsupported activation: {}".format(activation)

        self.conv = nn.Conv(input_dim,
                            output_dim,
                            kernel_size,
                            stride,
                            bias=self.use_bias)
Exemple #10
0
 def __init__(self):
     super(Generator, self).__init__()
     self.init_size = (opt.img_size // 4)
     self.l1 = nn.Sequential(
         nn.Linear(opt.latent_dim, (128 * (self.init_size**2))))
     self.conv_blocks = nn.Sequential(
         nn.BatchNorm(128), nn.Upsample(scale_factor=2),
         nn.Conv(128, 128, 3, stride=1, padding=1),
         nn.BatchNorm(128, eps=0.8), nn.LeakyReLU(scale=0.2),
         nn.Upsample(scale_factor=2),
         nn.Conv(128, 64, 3, stride=1, padding=1), nn.BatchNorm(64,
                                                                eps=0.8),
         nn.LeakyReLU(scale=0.2),
         nn.Conv(64, opt.channels, 3, stride=1, padding=1), nn.Tanh())
     for m in self.conv_blocks:
         weights_init_normal(m)
Exemple #11
0
    def __init__(self):
        super(Generator, self).__init__()
        self.label_emb = nn.Embedding(opt.n_classes, opt.n_classes)

        # nn.Linear(in_dim, out_dim)表示全连接层
        # in_dim:输入向量维度
        # out_dim:输出向量维度
        def block(in_feat, out_feat, normalize=True):
            layers = [nn.Linear(in_feat, out_feat)]
            if normalize:
                layers.append(nn.BatchNorm1d(out_feat, 0.8))
            layers.append(nn.LeakyReLU(0.2))
            return layers

        self.model = nn.Sequential(
            *block((opt.latent_dim + opt.n_classes), 128, normalize=False),
            *block(128, 256), *block(256, 512), *block(512, 1024),
            nn.Linear(1024, int(np.prod(img_shape))), nn.Tanh())
 def __init__(self, dim=3):
     super(generator, self).__init__()
     self.fc = nn.Linear(1024, 7 * 7 * 256)
     self.fc_bn = nn.BatchNorm(256)
     self.deconv1 = nn.ConvTranspose(256, 256, 3, 2, 1, 1)
     self.deconv1_bn = nn.BatchNorm(256)
     self.deconv2 = nn.ConvTranspose(256, 256, 3, 1, 1)
     self.deconv2_bn = nn.BatchNorm(256)
     self.deconv3 = nn.ConvTranspose(256, 256, 3, 2, 1, 1)
     self.deconv3_bn = nn.BatchNorm(256)
     self.deconv4 = nn.ConvTranspose(256, 256, 3, 1, 1)
     self.deconv4_bn = nn.BatchNorm(256)
     self.deconv5 = nn.ConvTranspose(256, 128, 3, 2, 1, 1)
     self.deconv5_bn = nn.BatchNorm(128)
     self.deconv6 = nn.ConvTranspose(128, 64, 3, 2, 1, 1)
     self.deconv6_bn = nn.BatchNorm(64)
     self.deconv7 = nn.ConvTranspose(64, dim, 3, 1, 1)
     self.relu = nn.ReLU()
     self.tanh = nn.Tanh()
    def __init__(self, in_channels=3, out_channels=1):
        super(Combiner, self).__init__()

        model = [nn.ReflectionPad2d(3),
                 nn.Conv(in_channels, 64, 7, padding=0, bias=False),
                 nn.BatchNorm2d(64),
                 nn.ReLU()]

        for i in range(2):
            model += [ResidualBlock(64, dropout=0.5)]

        model += [nn.ReflectionPad2d(3),
                  nn.Conv(64, out_channels, kernel_size=7, padding=0),
                  nn.Tanh()]

        self.model = nn.Sequential(*model)

        for m in self.modules():
            weights_init_normal(m)
    def __init__(self, in_channels=3, out_channels=1, num_res_blocks=9):
        super(GeneratorResNet, self).__init__()
        out_features = 64
        model = [nn.ReflectionPad2d(3), nn.Conv(in_channels, out_features, 7, bias=False), nn.BatchNorm2d(out_features), nn.ReLU()]
        in_features = out_features
        for _ in range(2):
            out_features *= 2
            model += [nn.Conv(in_features, out_features, 3, stride=2, padding=1, bias=False), nn.BatchNorm2d(out_features), nn.ReLU()]
            in_features = out_features
        for _ in range(num_res_blocks):
            model += [ResidualBlock(out_features)]
        for _ in range(2):
            out_features //= 2
            model += [nn.ConvTranspose(in_features, out_features, 3, stride=2, padding=1, output_padding=1, bias=False), nn.BatchNorm2d(out_features), nn.ReLU()]
            in_features = out_features
        model += [nn.ReflectionPad2d(3), nn.Conv(out_features, out_channels, 7), nn.Tanh()]
        self.model = nn.Sequential(*model)

        for m in self.modules():
            weights_init_normal(m)
Exemple #15
0
 def __init__(self, img_shape=(3, 128, 128), res_blocks=9, c_dim=5):
     super(GeneratorResNet, self).__init__()
     (channels, img_size, _) = img_shape
     model = [
         nn.Conv((channels + c_dim), 64, 7, stride=1, padding=3,
                 bias=False),
         nn.InstanceNorm2d(64, affine=None),
         nn.ReLU()
     ]
     curr_dim = 64
     for _ in range(2):
         model += [
             nn.Conv(curr_dim, (curr_dim * 2),
                     4,
                     stride=2,
                     padding=1,
                     bias=False),
             nn.InstanceNorm2d((curr_dim * 2), affine=None),
             nn.ReLU()
         ]
         curr_dim *= 2
     for _ in range(res_blocks):
         model += [ResidualBlock(curr_dim)]
     for _ in range(2):
         model += [
             nn.ConvTranspose(curr_dim, (curr_dim // 2),
                              4,
                              stride=2,
                              padding=1,
                              bias=False),
             nn.InstanceNorm2d((curr_dim // 2), affine=None),
             nn.ReLU()
         ]
         curr_dim = (curr_dim // 2)
     model += [
         nn.Conv(curr_dim, channels, 7, stride=1, padding=3),
         nn.Tanh()
     ]
     self.model = nn.Sequential(*model)
     for m in self.model:
         weights_init_normal(m)
Exemple #16
0
    def __init__(self, input_shape, num_residual_blocks):
        super(GeneratorResNet, self).__init__()
        channels = input_shape[0]
        out_features = 64
        model = [
            nn.ReflectionPad2d(channels),
            nn.Conv(channels, out_features, 7),
            nn.InstanceNorm2d(out_features, affine=None),
            nn.ReLU()
        ]
        in_features = out_features
        for _ in range(2):
            out_features *= 2
            model += [
                nn.Conv(in_features, out_features, 3, stride=2, padding=1),
                nn.InstanceNorm2d(out_features, affine=None),
                nn.ReLU()
            ]
            in_features = out_features
        for _ in range(num_residual_blocks):
            model += [ResidualBlock(out_features)]
        for _ in range(2):
            out_features //= 2
            model += [
                nn.Upsample(scale_factor=2),
                nn.Conv(in_features, out_features, 3, stride=1, padding=1),
                nn.InstanceNorm2d(out_features, affine=None),
                nn.ReLU()
            ]
            in_features = out_features
        model += [
            nn.ReflectionPad2d(channels),
            nn.Conv(out_features, channels, 7),
            nn.Tanh()
        ]
        self.model = nn.Sequential(*model)

        for m in self.modules():
            weights_init_normal(m)
    def __init__(self, input_nc, output_nc, h=96, w=96):
        super(AutoEncoderWithFC, self).__init__()
        
        out_features = 64
        model = [nn.Conv(input_nc, 64, kernel_size=4, stride=2, padding=1, bias=False)]
        in_features = out_features
        for _ in range(3):
            out_features *= 2
            model += [nn.LeakyReLU(0.2),
                      nn.Conv(in_features, out_features, 4,
                                    stride=2, padding=1, bias=False),
                      nn.BatchNorm2d(out_features)]
            in_features = out_features
        self.encoder = nn.Sequential(*model)

        self.rh = int(h/16)
        self.rw = int(w/16)
        self.feat_dim = 512 * self.rh * self.rw

        self.fc1 = nn.Linear(self.feat_dim, 1024)
        self.relu = nn.ReLU()
        self.fc2 = nn.Linear(1024, self.feat_dim)
        
        model2 = []
        for _ in range(3):
            out_features //= 2
            model2 += [nn.ReLU(),
                       nn.ConvTranspose(in_features, out_features, 4, 
                                    stride=2, padding=1, bias=False),
                       nn.BatchNorm2d(out_features)]
            in_features = out_features
        model2 += [nn.ReLU(),
                    nn.ConvTranspose(out_features, output_nc, 4, stride=2, padding=1, bias=False),
                    nn.Tanh()]
        self.decoder = nn.Sequential(*model2)

        for m in self.modules():
            weights_init_normal(m)
Exemple #18
0
    def __init__(self, in_channels=3, out_channels=3):
        super(GeneratorUNet, self).__init__()
        self.down1 = UNetDown(in_channels, 64, normalize=False)
        self.down2 = UNetDown(64, 128)
        self.down3 = UNetDown(128, 256)
        self.down4 = UNetDown(256, 512, dropout=0.5)
        self.down5 = UNetDown(512, 512, dropout=0.5)
        self.down6 = UNetDown(512, 512, dropout=0.5)
        self.down7 = UNetDown(512, 512, dropout=0.5)
        self.down8 = UNetDown(512, 512, normalize=False, dropout=0.5)
        self.up1 = UNetUp(512, 512, dropout=0.5)
        self.up2 = UNetUp(1024, 512, dropout=0.5)
        self.up3 = UNetUp(1024, 512, dropout=0.5)
        self.up4 = UNetUp(1024, 512, dropout=0.5)
        self.up5 = UNetUp(1024, 256)
        self.up6 = UNetUp(512, 128)
        self.up7 = UNetUp(256, 64)
        self.final = nn.Sequential(nn.Upsample(scale_factor=2),
                                   nn.ZeroPad2d((1, 0, 1, 0)),
                                   nn.Conv(128, out_channels, 4, padding=1),
                                   nn.Tanh())

        for m in self.modules():
            weights_init_normal(m)
Exemple #19
0
    def __init__(self):
        super(CoupledGenerators, self).__init__()
        self.init_size = (opt.img_size // 4)
        self.fc = nn.Sequential(nn.Linear(opt.latent_dim, (128 * (self.init_size ** 2))))
        self.shared_conv = nn.Sequential(nn.BatchNorm(128), nn.Upsample(scale_factor=2), nn.Conv(128, 128, 3, stride=1, padding=1), nn.BatchNorm(128, eps=0.8), nn.LeakyReLU(0.2), nn.Upsample(scale_factor=2))
        self.G1 = nn.Sequential(nn.Conv(128, 64, 3, stride=1, padding=1), nn.BatchNorm(64, eps=0.8), nn.LeakyReLU(0.2), nn.Conv(64, opt.channels, 3, stride=1, padding=1), nn.Tanh())
        self.G2 = nn.Sequential(nn.Conv(128, 64, 3, stride=1, padding=1), nn.BatchNorm(64, eps=0.8), nn.LeakyReLU(0.2), nn.Conv(64, opt.channels, 3, stride=1, padding=1), nn.Tanh())

        for m in self.modules():
            weights_init_normal(m)
Exemple #20
0
    def __init__(self, channels=3):
        super(Generator, self).__init__()

        def downsample(in_feat, out_feat, normalize=True):
            layers = [nn.Conv(in_feat, out_feat, 4, stride=2, padding=1)]
            if normalize:
                layers.append(nn.BatchNorm(out_feat, eps=0.8))
            layers.append(nn.Leaky_relu(scale=0.2))
            return layers

        def upsample(in_feat, out_feat, normalize=True):
            layers = [nn.ConvTranspose(in_feat, out_feat, 4, stride=2, padding=1)]
            if normalize:
                layers.append(nn.BatchNorm(out_feat, eps=0.8))
            layers.append(nn.ReLU())
            return layers
        self.model = nn.Sequential(*downsample(channels, 64, normalize=False), *downsample(64, 64), *downsample(64, 128), *downsample(128, 256), *downsample(256, 512), nn.Conv(512, 4000, 1), *upsample(4000, 512), *upsample(512, 256), *upsample(256, 128), *upsample(128, 64), nn.Conv(64, channels, 3, stride=1, padding=1), nn.Tanh())
        for m in self.model:
            weights_init_normal(m)
Exemple #21
0
    def __init__(self, latent_dim, img_shape):
        super(Generator, self).__init__()
        (channels, self.h, self.w) = img_shape
        self.fc = nn.Linear(latent_dim, (self.h * self.w))
        self.down1 = UNetDown((channels + 1), 64, normalize=False)
        self.down2 = UNetDown(64, 128)
        self.down3 = UNetDown(128, 256)
        self.down4 = UNetDown(256, 512)
        self.down5 = UNetDown(512, 512)
        self.down6 = UNetDown(512, 512)
        self.down7 = UNetDown(512, 512, normalize=False)
        self.up1 = UNetUp(512, 512)
        self.up2 = UNetUp(1024, 512)
        self.up3 = UNetUp(1024, 512)
        self.up4 = UNetUp(1024, 256)
        self.up5 = UNetUp(512, 128)
        self.up6 = UNetUp(256, 64)
        self.final = nn.Sequential(nn.Upsample(scale_factor=2), nn.Conv(128, channels, 3, stride=1, padding=1), nn.Tanh())

        for m in self.modules():
            weights_init_normal(m)