Exemple #1
0
def conf():
    device = 'cuda:0'
    netG = NetG_super().to(device)
    netDlow = NetD_super('low').to(device)
    netDhigh = NetD_super('high').to(device)
    optimizerG = optim.Adam(netG.parameters(), 0.0002, betas=(0.5, 0.999))
    optimizerDlow = optim.Adam(netDlow.parameters(), 0.0001, betas=(0.5, 0.999))
    optimizerDhigh = optim.Adam(netDhigh.parameters(), 0.0001, betas=(0.5, 0.999))
    f_bruit = Sup_res2
    epoch = 100
    cuda = True
    param = None
    f = f_bruit(param)

    datasetH = CelebADataset("/net/girlschool/besnier/CelebA_dataset/multi_dataset/img_H",
                             f,
                             transforms.Compose([transforms.Resize(64),
                                                 transforms.ToTensor(),
                                                 transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
                                                 ]))

    dataloaderH = torch.utils.data.DataLoader(datasetH, batch_size=64, shuffle=True, num_workers=1, drop_last=True)

    datasetF = CelebADataset("/net/girlschool/besnier/CelebA_dataset/multi_dataset/img_F",
                             f,
                             transforms.Compose([transforms.Resize(64),
                                                 transforms.ToTensor(),
                                                 transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
                                                 ]))

    dataloaderF = torch.utils.data.DataLoader(datasetF, batch_size=64, shuffle=True, num_workers=1, drop_last=True)
Exemple #2
0
def conf():
    device = 'cuda:0'
    netG = NetG_srgan().to(device)
    netDlow = NetD_super().to(device)
    netDhigh = NetD_patch().to(device)
    optimizerG = optim.Adam(netG.parameters(), 0.0004, betas=(0.5, 0.999))
    optimizerDlow = optim.Adam(netDlow.parameters(),
                               0.0004,
                               betas=(0.5, 0.999))
    optimizerDhigh = optim.Adam(netDhigh.parameters(),
                                0.0005,
                                betas=(0.5, 0.999))
    f_bruit = Sup_res2
    epoch = 5
    cuda = True
    param = None
    f = f_bruit(param)

    datasetCeleb = CelebADataset(
        "/net/girlschool/besnier/CelebA_dataset/img_align_celeba/", f,
        transforms.Compose([
            transforms.Resize(64),
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
        ]))

    dataloaderCeleb = torch.utils.data.DataLoader(datasetCeleb,
                                                  batch_size=64,
                                                  shuffle=True,
                                                  num_workers=1,
                                                  drop_last=True)

    datasetYtrain = YoutubeFacesDataset(
        "/net/girlschool/besnier/YoutubeFaces", f, 0, 80,
        transforms.Compose([
            transforms.Resize(64),
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
        ]))

    trainloaderY = torch.utils.data.DataLoader(datasetYtrain,
                                               batch_size=64,
                                               shuffle=True,
                                               num_workers=1,
                                               drop_last=True)

    datasetYtest = YoutubeFacesDataset(
        "/net/girlschool/besnier/YoutubeFaces", f, 80, 0,
        transforms.Compose([
            transforms.Resize(64),
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
        ]))

    testloaderY = torch.utils.data.DataLoader(datasetYtest,
                                              batch_size=64,
                                              shuffle=True,
                                              num_workers=1,
                                              drop_last=True)
Exemple #3
0
 def __load_file(self, train_filepath, train_csvfile, test_filepath, test_csvfile):
     self.train_dataset = CelebADataset(train_filepath,
                                        train_csvfile,
                                        test_filepath,
                                        test_csvfile,
                                        transform=transforms.Compose([
                                            transforms.ToTensor(),
                                            transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
                                        ]))
     self.train_data_loader = DataLoader(dataset=self.train_dataset,
                                         batch_size=self.args.batch_size,
                                         shuffle=True)
def encode(args):
    enc = Encoder()
    npz.load_npz(args.enc, enc)
    if args.gpu >= 0:
        chainer.cuda.get_device_from_id(args.gpu).use()
        enc.to_gpu()
    xp = enc.xp

    image = CelebADataset([args.infile])[0]
    x = Variable(xp.asarray([image])) / 255.
    x = F.resize_images(x, (64, 64))

    with chainer.using_config('train', False):
        z = enc(x)
    return z, x.data[0]
def get_vector(enc, image_files, args):
    # Setup dataset
    dataset = CelebADataset(paths=image_files, root=args.dataset)
    dataset_iter = chainer.iterators.SerialIterator(dataset,
                                                    args.batchsize,
                                                    repeat=False,
                                                    shuffle=False)

    # Infer
    vec_list = []
    for batch in dataset_iter:
        x_array = convert.concat_examples(batch, args.gpu) / 255.
        x_array = F.resize_images(x_array, (64, 64))
        y_array = enc(x_array).data
        if args.gpu >= 0:
            y_array = chainer.cuda.to_cpu(y_array)

        vec_list.append(y_array)

    vector = np.concatenate(vec_list, axis=0)
    vector = vector.mean(axis=0)
    return vector
Exemple #6
0
def conf():
    device = 'cuda:0'
    netG = NetG_super().cuda()
    netD = NetD().cuda()
    optimizerG = optim.Adam(netG.parameters(), 0.0002, betas=(0.5, 0.999))
    optimizerD = optim.Adam(netD.parameters(), 0.0001, betas=(0.5, 0.999))
    epoch = 100
    cuda = True
    param = None
    f = ConvNoise(9, 0.01)

    dataset = CelebADataset(
        "/net/girlschool/besnier/CelebA_dataset/img_align_celeba", f,
        transforms.Compose([
            transforms.Resize(64),
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
        ]))

    dataloader = torch.utils.data.DataLoader(dataset,
                                             batch_size=64,
                                             shuffle=True,
                                             num_workers=1,
                                             drop_last=True)
def main():
    parser = argparse.ArgumentParser(description='Train GAN')
    parser.add_argument('--batchsize',
                        '-b',
                        type=int,
                        default=64,
                        help='Number of images in each mini-batch')
    parser.add_argument('--epoch',
                        '-e',
                        type=int,
                        default=100,
                        help='Number of sweeps over the dataset to train')
    parser.add_argument('--gpu',
                        '-g',
                        type=int,
                        default=-1,
                        help='GPU ID (negative value indicates CPU)')
    parser.add_argument('--dataset',
                        '-i',
                        default='data/celebA/',
                        help='Directory of image files.')
    parser.add_argument('--out',
                        '-o',
                        default='result',
                        help='Directory to output the result')
    parser.add_argument('--resume',
                        '-r',
                        default='',
                        help='Resume the training from snapshot')
    parser.add_argument('--seed',
                        type=int,
                        default=0,
                        help='Random seed of z at visualization stage')
    parser.add_argument('--snapshot_interval',
                        type=int,
                        default=10000,
                        help='Interval of snapshot')
    parser.add_argument('--display_interval',
                        type=int,
                        default=1000,
                        help='Interval of displaying log to console')
    parser.add_argument('--unrolling_steps', type=int, default=0)
    args = parser.parse_args()

    print('GPU: {}'.format(args.gpu))
    print('# batchsize: {}'.format(args.batchsize))
    print('# epoch: {}'.format(args.epoch))
    print('')

    # Set up a neural network to train
    gen = Generator()
    dis = Discriminator(unrolling_steps=args.unrolling_steps)

    if args.gpu >= 0:
        chainer.cuda.get_device_from_id(args.gpu).use()
        gen.to_gpu()
        dis.to_gpu()

    # Setup an optimizer
    def make_optimizer(model, alpha=0.0002, beta1=0.5):
        optimizer = chainer.optimizers.Adam(alpha=alpha, beta1=beta1)
        optimizer.setup(model)
        optimizer.add_hook(chainer.optimizer.WeightDecay(0.0001), 'hook_dec')
        return optimizer

    opt_gen = make_optimizer(gen)
    opt_dis = make_optimizer(dis)

    # Setup a dataset
    all_files = os.listdir(args.dataset)
    image_files = [f for f in all_files if ('png' in f or 'jpg' in f)]
    print('{} contains {} image files'.format(args.dataset, len(image_files)))
    train = CelebADataset(paths=image_files, root=args.dataset)

    train_iter = chainer.iterators.SerialIterator(train, args.batchsize)

    # Set up a trainer
    updater = DCGANUpdater(models=(gen, dis),
                           iterator=train_iter,
                           optimizer={
                               'gen': opt_gen,
                               'dis': opt_dis
                           },
                           device=args.gpu)
    trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)

    snapshot_interval = (args.snapshot_interval, 'iteration')
    display_interval = (args.display_interval, 'iteration')
    trainer.extend(extensions.snapshot(
        filename='snapshot_gan_iter_{.updater.iteration}.npz'),
                   trigger=snapshot_interval)
    trainer.extend(extensions.snapshot_object(
        gen, 'gen_iter_{.updater.iteration}.npz'),
                   trigger=snapshot_interval)
    trainer.extend(extensions.snapshot_object(
        dis, 'dis_iter_{.updater.iteration}.npz'),
                   trigger=snapshot_interval)
    trainer.extend(
        extensions.LogReport(trigger=display_interval,
                             log_name='train_gan.log'))
    trainer.extend(extensions.PrintReport([
        'epoch',
        'iteration',
        'gen/loss',
        'dis/loss',
    ]),
                   trigger=display_interval)
    trainer.extend(
        extensions.PlotReport(['gen/loss', 'dis/loss'],
                              trigger=display_interval,
                              file_name='gan-loss.png'))
    trainer.extend(extensions.ProgressBar(update_interval=10))
    trainer.extend(out_generated_image(gen, dis, 10, 10, args.seed, args.out),
                   trigger=snapshot_interval)

    if args.resume:
        chainer.serializers.load_npz(args.resume, trainer)

    # Run the training
    trainer.run()
Exemple #8
0
def main():
    parser = argparse.ArgumentParser(description='Train Encoder')
    parser.add_argument('--batchsize', '-b', type=int, default=64,
                        help='Number of images in each mini-batch')
    parser.add_argument('--epoch', '-e', type=int, default=100,
                        help='Number of sweeps over the dataset to train')
    parser.add_argument('--gpu', '-g', type=int, default=-1,
                        help='GPU ID (negative value indicates CPU)')
    parser.add_argument('--dataset', '-i', default='data/celebA/',
                        help='Directory of image files.')
    parser.add_argument('--out', '-o', default='result',
                        help='Directory to output the result')
    parser.add_argument('--resume', '-r', default='',
                        help='Resume the training from snapshot')
    parser.add_argument('--snapshot_interval', type=int, default=10000,
                        help='Interval of snapshot')
    parser.add_argument('--display_interval', type=int, default=1000,
                        help='Interval of displaying log to console')
    parser.add_argument('--gen', default='gen.npz')
    parser.add_argument('--enc', default=None)
    args = parser.parse_args()

    print('GPU: {}'.format(args.gpu))
    print('# batchsize: {}'.format(args.batchsize))
    print('# epoch: {}'.format(args.epoch))
    print('')

    # Set up a neural network to train
    gen = Generator()
    npz.load_npz(args.gen, gen)
    enc = Encoder()
    if args.enc is not None:
        npz.load_npz(args.enc, enc)

    if args.gpu >= 0:
        chainer.cuda.get_device_from_id(args.gpu).use()
        gen.to_gpu()
        enc.to_gpu()

    # Setup an optimizer
    def make_optimizer(model, alpha=0.0005, beta1=0.9):
        optimizer = chainer.optimizers.Adam(alpha=alpha, beta1=beta1)
        optimizer.setup(model)
        optimizer.add_hook(chainer.optimizer.WeightDecay(0.0001), 'hook_dec')
        return optimizer
    opt_gen = make_optimizer(gen)
    gen.disable_update()
    opt_enc = make_optimizer(enc)

    # Setup a dataset
    all_files = os.listdir(args.dataset)
    image_files = [f for f in all_files if ('png' in f or 'jpg' in f)]
    print('{} contains {} image files'.format(args.dataset, len(image_files)))
    train = CelebADataset(paths=image_files, root=args.dataset)

    train_iter = chainer.iterators.SerialIterator(train, args.batchsize)

    # Set up a trainer
    updater = EncUpdater(
        models=(gen, enc),
        iterator=train_iter,
        optimizer={'gen': opt_gen, 'enc': opt_enc},
        device=args.gpu)
    trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)

    snapshot_interval = (args.snapshot_interval, 'iteration')
    display_interval = (args.display_interval, 'iteration')
    trainer.extend(
        extensions.snapshot(filename='snapshot_enc_iter_{.updater.iteration}.npz'),
        trigger=snapshot_interval)
    trainer.extend(extensions.ExponentialShift(
        'alpha', 0.5, optimizer=opt_enc), trigger=(10, 'epoch'))
    trainer.extend(extensions.snapshot_object(
        enc, 'enc_iter_{.updater.iteration}.npz'), trigger=snapshot_interval)
    trainer.extend(extensions.LogReport(trigger=display_interval, log_name='train_enc.log'))
    trainer.extend(extensions.PrintReport([
        'epoch', 'iteration', 'enc/loss',
    ]), trigger=display_interval)
    trainer.extend(extensions.PlotReport(
        ['enc/loss'], trigger=display_interval, file_name='enc-loss.png'))
    trainer.extend(extensions.ProgressBar(update_interval=10))

    if args.resume:
        chainer.serializers.load_npz(args.resume, trainer)

    # Run the training
    trainer.run()
Exemple #9
0
                return F.binary_cross_entropy_with_logits(
                    x.reshape(-1), torch.ones(x.shape[0], device=x.device))
        elif args.loss == 'mse':

            def lossDreal(x):
                return ((x - 1)**2).mean()

            def lossDfake(x):
                return (x**2).mean()

            def lossG(x):
                return ((x - 1)**2).mean()

        if args.dataset == 'celeba':
            loader = torch.utils.data.DataLoader(CelebADataset(
                torchvision.datasets.CelebA('/opt/data', 'all', download=True),
                args.size, args.zsize),
                                                 batch_size=args.batchsize,
                                                 num_workers=4,
                                                 shuffle=True)
        else:
            assert False, 'celeba is allowed only.'
        if args.optimizer == 'adam':
            optimizer = torch.optim.Adam
        if args.model == 'dcgan':
            model = DCGAN(optimizerG=optimizer,
                          optimizerD=optimizer,
                          lossDreal=lossDreal,
                          lossDfake=lossDfake,
                          lossG=lossG,
                          zsize=args.zsize,
def train(config):
    # ******************************************************************************************************************
    # * Build logger
    # ******************************************************************************************************************

    time_now = datetime.datetime.now().strftime('%m-%d-%H%M%S')
    if not os.path.exists('logs'):
        os.makedirs('logs')
    logger = create_logger(
        logger_name='main_logger',
        log_format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
        log_path='logs/train_{}.log'.format(time_now))

    log_list = {'real_loss_D': [], 'fake_loss_D': [], 'loss_G': []}

    # ******************************************************************************************************************
    # * Build dataset
    # ******************************************************************************************************************
    # Generate fixed input for debugging

    fixed_z = generate_z(config.z_dim, config.batch_size)
    dataset = CelebADataset(config)
    fixed_real_batch, fixed_pose_batch = dataset.generate_batch()
    dataset.current_index = 0  # Reset dataset index

    if torch.cuda.is_available():
        fixed_z = fixed_z.cuda()
    fixed_real_batch = to_torch(fixed_real_batch)
    fixed_pose_batch = to_torch(fixed_pose_batch)
    fixed_real_batch_pose = torch.cat(
        (fixed_real_batch, fixed_pose_batch.view(-1, 3, 1, 1).expand(
            -1, -1, config.image_size, config.image_size)),
        dim=1)
    fixed_z_pose = torch.cat((fixed_z, fixed_pose_batch), dim=1)

    fixed_target_pose = get_target_pose()
    fixed_z_pose_vary = torch.cat((fixed_z[:8].view(8, 1, config.z_dim).expand(
        -1, 8, -1).contiguous().view(64, config.z_dim), fixed_target_pose),
                                  dim=1)

    # ******************************************************************************************************************
    # * Build model and optimizer
    # ******************************************************************************************************************

    D = Discriminator()
    G = Generator(input_dim=(config.z_dim + 3), output_channel=3)
    if torch.cuda.is_available():
        D = D.cuda()
        G = G.cuda()

    D.apply(weights_init)
    G.apply(weights_init)

    optimizerD = torch.optim.Adam(D.parameters(),
                                  betas=(0.9, 0.999),
                                  lr=config.lr)
    optimizerG = torch.optim.Adam(G.parameters(),
                                  betas=(0.9, 0.999),
                                  lr=config.lr)

    # ******************************************************************************************************************
    # * Train!
    # ******************************************************************************************************************
    kt = 0.0
    step_num = (config.epoch * dataset.samples_num) // config.batch_size
    z = generate_z(config.z_dim, config.batch_size)
    checkpoint_dir = 'checkpoints'
    if torch.cuda.is_available():
        z = z.cuda()
    for step in range(step_num):
        real_batch, pose_batch = dataset.generate_batch()
        real_batch = to_torch(real_batch)
        pose_batch = to_torch(pose_batch)
        z.data.uniform_(-1, 1)
        z_pose = torch.cat((z, pose_batch), dim=1)

        # Train the discriminator
        G.zero_grad()
        D.zero_grad()
        fake_batch = G(z_pose)
        real_batch_pose = torch.cat(
            (real_batch, pose_batch.view(-1, 3, 1, 1).expand(
                -1, -1, config.image_size, config.image_size)),
            dim=1)
        fake_batch_pose = torch.cat(
            (fake_batch.detach(), pose_batch.view(-1, 3, 1, 1).expand(
                -1, -1, config.image_size, config.image_size)),
            dim=1)

        real_output_D = D(real_batch_pose)
        fake_output_D = D(fake_batch_pose)

        real_loss_D = torch.mean(torch.abs(real_output_D - real_batch_pose))
        fake_loss_D = torch.mean(torch.abs(fake_output_D - fake_batch_pose))
        loss_D = real_loss_D - kt * fake_loss_D

        loss_D.backward()
        optimizerD.step()

        log_list['real_loss_D'].append(real_loss_D.item())
        log_list['fake_loss_D'].append(fake_loss_D.item())

        # Train the generator
        G.zero_grad()
        D.zero_grad()
        fake_batch = G(z_pose)
        fake_batch_pose = torch.cat(
            (fake_batch, pose_batch.view(-1, 3, 1, 1).expand(
                -1, -1, config.image_size, config.image_size)),
            dim=1)
        fake_output_G = D(fake_batch_pose)

        loss_G = torch.mean(torch.abs(fake_output_G - fake_batch_pose))

        loss_G.backward()
        optimizerG.step()

        log_list['loss_G'].append(loss_G.item())

        balance = (config.gamma * real_loss_D - fake_loss_D).item()
        kt = kt + config.lambda_k * balance
        kt = max(min(1, kt), 0)
        measure = real_loss_D.item() + np.abs(balance)

        # Log and Save
        if step % config.verbose_steps == 0:
            logger.info(
                'It: {}\treal_loss_D: {:.4f}\tfake_loss_D: {:.4f}\tloss_G: {:.4f}\tkt: {:.4f}\tmeasure: {:.4f}'
                .format(step, np.mean(log_list['real_loss_D']),
                        np.mean(log_list['fake_loss_D']),
                        np.mean(log_list['loss_G']), kt, measure))
            for k in log_list.keys():
                log_list[k] = []

        if step % config.save_steps == 0:
            if not os.path.exists(checkpoint_dir):
                os.makedirs(checkpoint_dir)

            torch.save(
                G.state_dict(),
                os.path.join(checkpoint_dir, 'checkpoint_G_{}'.format(step)))
            torch.save(
                D.state_dict(),
                os.path.join(checkpoint_dir, 'checkpoint_D_{}'.format(step)))
            D.eval()
            G.eval()
            fixed_real_output_D = D(fixed_real_batch_pose)
            fixed_fake_batch = G(fixed_z_pose)
            fixed_fake_batch_vary = G(fixed_z_pose_vary)
            draw_debug_image(
                fixed_real_batch_pose,
                os.path.join(checkpoint_dir, '{}_debug_real'.format(step)))
            draw_debug_image(
                fixed_real_output_D,
                os.path.join(checkpoint_dir, '{}_debug_real_D'.format(step)))
            draw_debug_image(
                fixed_fake_batch,
                os.path.join(checkpoint_dir, '{}_debug_fake'.format(step)))
            draw_debug_image(
                fixed_fake_batch_vary,
                os.path.join(checkpoint_dir,
                             '{}_debug_fake_fixed'.format(step)))
            D.train()
            G.train()

    if not os.path.exists(checkpoint_dir):
        os.makedirs(checkpoint_dir)
    torch.save(G.state_dict(),
               os.path.join(checkpoint_dir, 'checkpoint_final_G'))
    torch.save(D.state_dict(),
               os.path.join(checkpoint_dir, 'checkpoint_final_D'))
    D.eval()
    G.eval()
    fixed_real_output_D = D(fixed_real_batch_pose)
    fixed_fake_batch = G(fixed_z_pose)
    fixed_fake_batch_vary = G(fixed_z_pose_vary)
    draw_debug_image(
        fixed_real_batch_pose,
        os.path.join(checkpoint_dir, '{}_debug_real'.format(step)))
    draw_debug_image(
        fixed_real_output_D,
        os.path.join(checkpoint_dir, '{}_debug_real_D'.format(step)))
    draw_debug_image(
        fixed_fake_batch,
        os.path.join(checkpoint_dir, '{}_debug_fake'.format(step)))
    draw_debug_image(
        fixed_fake_batch_vary,
        os.path.join(checkpoint_dir, '{}_debug_fake_fixed'.format(step)))