Exemplo n.º 1
0
 def init_running_average_generator(self):
     self.running_average_generator = Generator(self.pose_size,
                                                self.start_channel_size,
                                                self.image_channels)
     self.running_average_generator = wrap_models(
         self.running_average_generator)
     to_cuda(self.running_average_generator)
     self.running_average_generator = amp.initialize(self.running_average_generator,
                                                     None, opt_level=self.opt_level)
Exemplo n.º 2
0
def init_generator(config, ckpt):
    g = Generator(
        config.models.pose_size,
        config.models.start_channel_size,
        config.models.image_channels
    )
    g.load_state_dict(ckpt["running_average_generator"])
    g.eval()
    torch_utils.to_cuda(g)
    return g
Exemplo n.º 3
0
def init_model(pose_size, start_channel_dim, image_channels,
               discriminator_model):
    if discriminator_model == "deep":
        d = DeepDiscriminator
    else:
        assert discriminator_model == "normal"
        d = Discriminator

    discriminator = d(image_channels, start_channel_dim, pose_size)
    generator = Generator(pose_size, start_channel_dim, image_channels)
    discriminator, generator = wrap_models([discriminator, generator])
    return discriminator, generator
Exemplo n.º 4
0
def init_model(pose_size, start_channel_dim, image_channels,
               discriminator_model):
    if discriminator_model == "deep":
        d = DeepDiscriminator
    else:
        assert discriminator_model == "normal"
        d = Discriminator

    discriminator = d(image_channels, start_channel_dim, pose_size)
    generator = Generator(pose_size, start_channel_dim, image_channels)
    discriminator, generator = wrap_models([discriminator, generator])

    # Freeze discriminator
    for parameter in discriminator.parameters():
        parameter.requires_grad = False

    return discriminator, generator