コード例 #1
0
    def __init__(self,
                 image_size: int,
                 generator_size: int = 32,
                 discriminator_size: int = 32,
                 channels_count: int = 3):
        self.noise = NormalNoise(100, ParallelConfig.MAIN_DEVICE)

        self.G = FillGenerator(self.noise, image_size, channels_count, channels_count, generator_size) \
            .to(ParallelConfig.MAIN_DEVICE)
        self.D = Discriminator(discriminator_size, 2 * channels_count, image_size) \
            .to(ParallelConfig.MAIN_DEVICE)

        self.G.apply(weights_init)
        self.D.apply(weights_init)

        if ParallelConfig.GPU_IDS.__len__() > 1:
            self.G = nn.DataParallel(self.G, ParallelConfig.GPU_IDS)
            self.D = nn.DataParallel(self.D, ParallelConfig.GPU_IDS)

        was_loss = WassersteinLoss(2) \
            .add_penalty(AdaptiveLipschitzPenalty(0.1, 0.01)) \
            .add_penalty(L2Penalty(0.1))

        self.gan_model = ConditionalGANModel(self.G, self.D, was_loss)

        lr = 0.0002
        self.optimizer = MinMaxOptimizer(self.gan_model.parameters(), lr, lr)
コード例 #2
0
    def __init__(self,
                 image_size: int,
                 mask_channels_count: int,
                 image_channels_count: int = 3,
                 noise=NormalNoise(50, ParallelConfig.MAIN_DEVICE),
                 generator_size: int = 32,
                 discriminator_size: int = 32):

        netG = UNetGenerator(noise, image_size, mask_channels_count, image_channels_count, generator_size) \
            .to(ParallelConfig.MAIN_DEVICE)
        netD = Discriminator(discriminator_size, image_channels_count + mask_channels_count, image_size) \
            .to(ParallelConfig.MAIN_DEVICE)

        netG.apply(weights_init)
        netD.apply(weights_init)

        if torch.cuda.device_count() > 1:
            netD = nn.DataParallel(netD, ParallelConfig.GPU_IDS)
            netG = nn.DataParallel(netG, ParallelConfig.GPU_IDS)

        self.gan_model = ConditionalGANModel(
            netG, netD,
            WassersteinLoss(10.0)
            # .add_penalty(AdaptiveLipschitzPenalty(1, 0.05))
            # .add_penalty(L2Penalty(0.01))
        )

        lrG = 0.0001
        lrD = 0.0004
        self.optimizer = MinMaxOptimizer(self.gan_model.parameters(), lrG, lrD)
コード例 #3
0
    def __init__(self,
                 image_size: int,
                 labels_list: List[int],
                 image_channels_count: int = 3,
                 noise=NormalNoise(100, ParallelConfig.MAIN_DEVICE),
                 generator_size: int = 32,
                 discriminator_size: int = 32):

        mask_nc = len(labels_list)

        gen_list = nn.ModuleList([
            UNetGenerator(noise,
                          image_size,
                          1,
                          image_channels_count,
                          int(generator_size / 2),
                          nc_max=256) for i in range(mask_nc)
        ])

        netG = CompositeGenerator(noise, gen_list) \
            .to(ParallelConfig.MAIN_DEVICE)
        netD = Discriminator(discriminator_size, image_channels_count + mask_nc, image_size) \
            .to(ParallelConfig.MAIN_DEVICE)

        netG.apply(weights_init)
        netD.apply(weights_init)

        if torch.cuda.device_count() > 1:
            netD = nn.DataParallel(netD, ParallelConfig.GPU_IDS)
            netG = nn.DataParallel(netG, ParallelConfig.GPU_IDS)

        self.gan_model = ConditionalGANModel(
            netG, netD,
            WassersteinLoss(2).add_penalty(AdaptiveLipschitzPenalty(
                0.1, 0.01)).add_penalty(L2Penalty(0.1)) +
            VggGeneratorLoss(15, 1))

        # vgg_loss_fn = VggGeneratorLoss(ParallelConfig.MAIN_DEVICE)

        lrG = 0.0002
        lrD = 0.0002
        self.optimizer = MinMaxOptimizer(self.gan_model.parameters(), lrG, lrD)
コード例 #4
0
                           transform=transforms.Compose([
                               transforms.Resize(image_size),
                               transforms.CenterCrop(image_size),
                               transforms.ToTensor(),
                               transforms.Normalize((0.5, 0.5, 0.5),
                                                    (0.5, 0.5, 0.5)),
                           ]))

dataloader = torch.utils.data.DataLoader(dataset,
                                         batch_size=batch_size,
                                         shuffle=True,
                                         num_workers=12)

device = torch.device("cuda:1")

noise = NormalNoise(noise_size, device)
netG = DCGenerator(noise, image_size).to(device)
print(netG)

netD = DCDiscriminator().to(device)
print(netD)

lr = 0.0002
betas = (0.5, 0.999)

gan_model = GANModel(netG, HingeLoss(netD))

netG_back = DCEncoder(nc_out=noise_size).to(device)
print(netG_back)

netD_z = EDiscriminator(dim=noise_size, ndf=100).to(device)
コード例 #5
0
from gan.gan_model import GANModel
from framework.gan.euclidean.discriminator import EDiscriminator
from framework.gan.euclidean.generator import EGenerator
from framework.gan.loss.hinge import HingeLoss
from framework.gan.noise.normal import NormalNoise
from framework.optim.min_max import MinMaxOptimizer
from framework.gan.loss.penalties.adaptive_lipschitz import AdaptiveLipschitzPenalty
from framework.gan.loss.wasserstein import WassersteinLoss

batch_size = 256
noise_size = 2

device = torch.device("cuda:1")

noise = NormalNoise(noise_size, device)
netG = EGenerator(noise).to(device)
print(netG)

netD = EDiscriminator().to(device)
print(netD)

lr = 0.001
betas = (0.5, 0.999)

gan_model = GANModel(netG, netD, HingeLoss())

optimizer = MinMaxOptimizer(gan_model.parameters(), lr, 2 * lr, betas)

n = 5000
コード例 #6
0
def optimization_step():
    noise = NormalNoise(n_noise, device)
    measure2image = ResMeasureToImage(args.measure_size * 3 + noise.size(),
                                      args.image_size, ngf).cuda()

    netD = DCDiscriminator(ndf=ndf).cuda()
    gan_model = GANModel(measure2image,
                         HingeLoss(netD).add_generator_loss(nn.L1Loss(), L1),
                         lr=0.0004)

    fabric = ProbabilityMeasureFabric(args.image_size)
    barycenter = fabric.load("barycenter").cuda().padding(args.measure_size)
    print(barycenter.coord.shape)
    barycenter = fabric.cat([barycenter for b in range(args.batch_size)])
    print(barycenter.coord.shape)

    image2measure = ResImageToMeasure(args.measure_size).cuda()
    image2measure_opt = optim.Adam(image2measure.parameters(), lr=0.0002)

    def test():
        dataloader_test = torch.utils.data.DataLoader(dataset_test,
                                                      batch_size=40,
                                                      num_workers=20)

        sum_loss = 0

        for i, (imgs, masks) in enumerate(dataloader_test, 0):
            imgs = imgs.cuda().type(torch.float32)
            pred_measures: ProbabilityMeasure = image2measure(imgs)
            ref_measures: ProbabilityMeasure = fabric.from_mask(
                masks).cuda().padding(args.measure_size)
            ref_loss = Samples_Loss()(pred_measures, ref_measures)
            sum_loss += ref_loss.item()

        return sum_loss

    for epoch in range(20):

        ot_iters = 100
        print("epoch", epoch)
        test_imgs = None

        for i, imgs in enumerate(dataloader, 0):

            imgs = imgs.cuda().type(torch.float32)
            test_imgs = imgs
            pred_measures: ProbabilityMeasure = image2measure(imgs)
            cond = pred_measures.toChannels()
            n = cond.shape[0]
            barycenter_batch = barycenter.slice(0, n)

            z = noise.sample(n)
            cond = torch.cat((cond, z), dim=1)
            gan_model.train(imgs, cond.detach())

            with torch.no_grad():
                A, T = LinearTransformOT.forward(pred_measures,
                                                 barycenter_batch, ot_iters)

            bc_loss_T = Samples_Loss()(pred_measures,
                                       pred_measures.detach() + T)
            bc_loss_A = Samples_Loss()(
                pred_measures.centered(),
                pred_measures.centered().multiply(A).detach())
            bc_loss_W = Samples_Loss()(pred_measures.centered().multiply(A),
                                       barycenter_batch.centered())
            bc_loss = bc_loss_W * cw + bc_loss_A * ca + bc_loss_T * ct

            fake = measure2image(cond)
            g_loss = gan_model.generator_loss(imgs, fake)
            (g_loss + bc_loss).minimize_step(image2measure_opt)

    return test()