Example #1
0
class GANModel:
    def __init__(self, generator: Generator, loss: GANLoss, lr=0.0002):
        self.generator = generator
        self.generator.apply(weights_init)
        self.loss = loss
        self.loss.discriminator.apply(weights_init)
        params = MinMaxParameters(self.generator.parameters(),
                                  self.loss.parameters())
        self.optimizer = MinMaxOptimizer(params, lr, lr * 2)

    def discriminator_loss(self, real: Tensor, fake: Tensor) -> Loss:
        return self.loss.discriminator_loss_with_penalty([real], [fake])

    def generator_loss(self, real: Tensor, fake: Tensor) -> Loss:
        return self.loss.generator_loss([real], [fake])

    def loss_pair(self, real: Tensor, *noise: Tensor) -> MinMaxLoss:

        fake = self.generator.forward(*noise)

        return MinMaxLoss(self.generator_loss(real, fake),
                          self.discriminator_loss(real, fake))

    def parameters(self) -> MinMaxParameters:
        return MinMaxParameters(self.generator.parameters(),
                                self.loss.parameters())

    def forward(self, real: Tensor, *noise: Tensor):
        return self.loss_pair(real, *noise)

    def train(self, real: Tensor, *noise: Tensor):
        loss = self.loss_pair(real, *noise)
        self.optimizer.train_step(loss)
        return loss.min_loss.item(), loss.max_loss.item()
Example #2
0
class MaskToImageComposite:
    def __init__(self,
                 image_size: int,
                 labels_list: List[int],
                 image_channels_count: int = 3,
                 noise=NormalNoise(100, ParallelConfig.MAIN_DEVICE),
                 generator_size: int = 32,
                 discriminator_size: int = 32):

        mask_nc = len(labels_list)

        gen_list = nn.ModuleList([
            UNetGenerator(noise,
                          image_size,
                          1,
                          image_channels_count,
                          int(generator_size / 2),
                          nc_max=256) for i in range(mask_nc)
        ])

        netG = CompositeGenerator(noise, gen_list) \
            .to(ParallelConfig.MAIN_DEVICE)
        netD = Discriminator(discriminator_size, image_channels_count + mask_nc, image_size) \
            .to(ParallelConfig.MAIN_DEVICE)

        netG.apply(weights_init)
        netD.apply(weights_init)

        if torch.cuda.device_count() > 1:
            netD = nn.DataParallel(netD, ParallelConfig.GPU_IDS)
            netG = nn.DataParallel(netG, ParallelConfig.GPU_IDS)

        self.gan_model = ConditionalGANModel(
            netG, netD,
            WassersteinLoss(2).add_penalty(AdaptiveLipschitzPenalty(
                0.1, 0.01)).add_penalty(L2Penalty(0.1)) +
            VggGeneratorLoss(15, 1))

        # vgg_loss_fn = VggGeneratorLoss(ParallelConfig.MAIN_DEVICE)

        lrG = 0.0002
        lrD = 0.0002
        self.optimizer = MinMaxOptimizer(self.gan_model.parameters(), lrG, lrD)

    def train(self, images: Tensor, masks: Mask):

        loss: MinMaxLoss = self.gan_model.loss_pair(images, masks.tensor)
        self.optimizer.train_step(loss)

    def generator_loss(self, images: Tensor, masks: Mask) -> Loss:

        fake = self.gan_model.generator.forward(masks.tensor)
        return self.gan_model.generator_loss(images, fake, masks.tensor)
Example #3
0
class FillImageModel:
    def __init__(self,
                 image_size: int,
                 generator_size: int = 32,
                 discriminator_size: int = 32,
                 channels_count: int = 3):
        self.noise = NormalNoise(100, ParallelConfig.MAIN_DEVICE)

        self.G = FillGenerator(self.noise, image_size, channels_count, channels_count, generator_size) \
            .to(ParallelConfig.MAIN_DEVICE)
        self.D = Discriminator(discriminator_size, 2 * channels_count, image_size) \
            .to(ParallelConfig.MAIN_DEVICE)

        self.G.apply(weights_init)
        self.D.apply(weights_init)

        if ParallelConfig.GPU_IDS.__len__() > 1:
            self.G = nn.DataParallel(self.G, ParallelConfig.GPU_IDS)
            self.D = nn.DataParallel(self.D, ParallelConfig.GPU_IDS)

        was_loss = WassersteinLoss(2) \
            .add_penalty(AdaptiveLipschitzPenalty(0.1, 0.01)) \
            .add_penalty(L2Penalty(0.1))

        self.gan_model = ConditionalGANModel(self.G, self.D, was_loss)

        lr = 0.0002
        self.optimizer = MinMaxOptimizer(self.gan_model.parameters(), lr, lr)

    def train(self, images: Tensor, segments: Mask):

        front: Tensor = images * segments.tensor
        loss: MinMaxLoss = self.gan_model.loss_pair(images, front,
                                                    segments.tensor)
        self.optimizer.train_step(loss)

    def test(self, images: Tensor, segments: Mask) -> Tensor:
        front: Tensor = images * segments.tensor
        return self.G(front, segments.tensor)

    def generator_loss(self, images: Tensor, segments: Mask) -> Loss:
        front: Tensor = images * segments.tensor
        fake = self.G(front, segments.tensor)
        loss = self.gan_model.generator_loss(images, fake, front)

        return loss
class MaskToImage:
    def __init__(self,
                 image_size: int,
                 mask_channels_count: int,
                 image_channels_count: int = 3,
                 noise=NormalNoise(50, ParallelConfig.MAIN_DEVICE),
                 generator_size: int = 32,
                 discriminator_size: int = 32):

        netG = UNetGenerator(noise, image_size, mask_channels_count, image_channels_count, generator_size) \
            .to(ParallelConfig.MAIN_DEVICE)
        netD = Discriminator(discriminator_size, image_channels_count + mask_channels_count, image_size) \
            .to(ParallelConfig.MAIN_DEVICE)

        netG.apply(weights_init)
        netD.apply(weights_init)

        if torch.cuda.device_count() > 1:
            netD = nn.DataParallel(netD, ParallelConfig.GPU_IDS)
            netG = nn.DataParallel(netG, ParallelConfig.GPU_IDS)

        self.gan_model = ConditionalGANModel(
            netG, netD,
            WassersteinLoss(10.0)
            # .add_penalty(AdaptiveLipschitzPenalty(1, 0.05))
            # .add_penalty(L2Penalty(0.01))
        )

        lrG = 0.0001
        lrD = 0.0004
        self.optimizer = MinMaxOptimizer(self.gan_model.parameters(), lrG, lrD)

    def train(self, images: Tensor, masks: Mask):

        loss: MinMaxLoss = self.gan_model.loss_pair(images, masks.tensor)
        self.optimizer.train_step(loss)

    def generator_loss(self, images: Tensor, masks: Mask) -> Loss:

        fake = self.gan_model.generator.forward(masks.tensor)
        return self.gan_model.generator_loss(images, fake, masks.tensor)
Example #5
0
xs = (torch.arange(0, n, dtype=torch.float32) / 100.0).view(n, 1)
ys = torch.cat((xs.cos(), xs.sin()), dim=1)

plt.scatter(ys[:, 0].view(n).numpy(), ys[:, 1].view(n).numpy())

print("Starting Training Loop...")


def gen_batch() -> Tensor:
    i = random.randint(0, n - batch_size)
    j = i + batch_size
    return ys[i:j, :]


for iter in range(0, 3000):

    data = gen_batch().to(device)

    loss = gan_model.loss_pair(data)
    optimizer.train_step(loss)

    if iter % 100 == 0:
        # print(gan_model.loss.get_penalties()[1].weight)
        print(str(loss.max_loss.item()) + ", g = " + str(loss.min_loss.item()))

fake = netG.forward(3 * batch_size)
plt.scatter(fake[:, 0].cpu().view(3 * batch_size).detach().numpy(),
            fake[:, 1].cpu().view(3 * batch_size).detach().numpy())
plt.show()