示例#1
0
def create_generator(params):
    '''
    Define generator and optmizer.
    '''
    gen = Generator(params).to(params['device'])
    gen_opt = torch.optim.Adam(gen.parameters(), lr=params['lr'], betas=(params['beta_1'],params['beta_2']))
    gen = gen.apply(weights_init)
    return gen, gen_opt
示例#2
0
def main():
    seed = tf.random.normal([NUM_OF_EXAMPLES, HIDDEN_DIMS])
    train_dataset = get_data(BUFFER_SIZE, BATCH_SIZE)
    generator = Generator()
    discriminator = Discriminator()

    generator_optim = optimizers.Adam(learning_rate=LR_RATE)
    discriminator_optim = optimizers.Adam(learning_rate=LR_RATE)

    checkpoint_dir = './dcgan_training_checkpoints'
    checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
    checkpoint = tf.train.Checkpoint(
        generator_optimizer=generator_optim,
        discriminator_optimizer=discriminator_optim,
        generator=generator,
        discriminator=discriminator)

    print('Start')
    for epoch in range(EPOCHS):
        print('Epoch {} / {}'.format(epoch + 1, EPOCHS))
        start = time.time()
        for train_batch in train_dataset:
            input_noise = tf.random.normal([BATCH_SIZE, HIDDEN_DIMS])

            with tf.GradientTape() as gen_tape, tf.GradientTape() as dis_tape:
                gen_loss = generator_loss(generator,
                                          discriminator,
                                          input_noise,
                                          training=True)
                dis_loss = discriminator_loss(generator,
                                              discriminator,
                                              train_batch,
                                              input_noise,
                                              training=True)

            gradient_gen = gen_tape.gradient(gen_loss,
                                             generator.trainable_variables)
            gradient_dis = dis_tape.gradient(dis_loss,
                                             discriminator.trainable_variables)
            generator_optim.apply_gradients(
                zip(gradient_gen, generator.trainable_variables))
            discriminator_optim.apply_gradients(
                zip(gradient_dis, discriminator.trainable_variables))

        save_images(generator, seed, epoch)

        if (epoch + 1) % 15 == 0:
            checkpoint.save(file_prefix=checkpoint_prefix)

        stop = time.time()
        print('Take {} times to run epoch {}'.format(stop - start, epoch + 1))

    print('Done!')
示例#3
0
    def create_generator(self):
        enc_filters = [64, 128, 256, 512, 512]
        enc_strides = [1, 2, 2, 2, 2]
        enc_dropout = [0, 0, 0, 0, 0]
        encode_params = (enc_filters, enc_strides, enc_dropout)

        dec_filters = [512, 256, 128, 64]
        dec_strides = [2, 2, 2, 2]
        dec_dropout = [0.5, 0, 0, 0]
        decode_params = (dec_filters, dec_strides, dec_dropout)

        return Generator(encode_params, decode_params)
示例#4
0
def DCGAN_run(num):
    parser = argparse.ArgumentParser(description='Chainer: MNIST predicting CNN')
    parser.add_argument('--n_hidden', '-n', type=int, default=100,
                        help='Number of hidden units (z)')
    parser.add_argument('--epoch', '-e', type=int, default=100,
                        help='Number of sweeps over the dataset to train')
    parser.add_argument('--rows', '-r', type=int, default=1,
                        help='Number of rows in the image')
    parser.add_argument('--cols', '-c', type=int, default=1,
                        help='Number of cols in the image')
    parser.add_argument('--out', '-o', default='generate_image',
                        help='Directory to output the result')
    parser.add_argument('--seed', type=int, default=num,
                        help='Random seed of z at visualization stage')
    args = parser.parse_args()

    print('# n_hidden: {}'.format(args.n_hidden))
    print('# epoch: {}'.format(args.epoch))
    print('# Number of rows in the image: {}'.format(args.rows))
    print('# Number of cols in the image: {}'.format(args.cols))
    print('')

    gen = Generator(n_hidden=args.n_hidden)
    chainer.serializers.load_npz('gen_epoch_3.npz', gen)

    np.random.seed(args.seed)
    n_images = args.rows * args.cols
    xp = gen.xp
    z = chainer.Variable(xp.asarray(gen.make_hidden(n_images)))

    x = gen(z)
    x = chainer.cuda.to_cpu(x.data)
    np.random.seed(args.seed)

    # gen_output_activation_func is sigmoid (0 ~ 1)
    x = np.asarray(np.clip(x * 255, 0.0, 255.0), dtype=np.uint8)
    # gen output_activation_func is tanh (-1 ~ 1)
    # x = np.asarray(np.clip((x+1) * 0.5 * 255, 0.0, 255.0), dtype=np.uint8)
    _, _, H, W = x.shape
    x = x.reshape((args.rows, args.cols, 1, H, W))
    x = x.transpose(0, 3, 1, 4, 2)
    x = x.reshape((args.rows * H, args.cols * W))


    preview_path = "./generate_image/" +str(n)+'.png'.format(args.epoch)
    print(preview_path)

    Image.fromarray(x).save(preview_path)
示例#5
0
def main():
    parser = argparse.ArgumentParser(description='Chainer: DCGAN MNIST')
    parser.add_argument('--batchsize',
                        '-b',
                        type=int,
                        default=50,
                        help='Number of images in each mini-batch')
    parser.add_argument('--epoch',
                        '-e',
                        type=int,
                        default=100,
                        help='Number of sweeps over the dataset to train')
    parser.add_argument('--gpu',
                        '-g',
                        type=int,
                        default=0,
                        help='GPU ID (negative value indicates CPU)')
    parser.add_argument('--out',
                        '-o',
                        default='result',
                        help='Directory to output the result')
    parser.add_argument('--resume',
                        '-r',
                        default='',
                        help='Resume the training from snapshot')
    parser.add_argument('--n_hidden',
                        '-n',
                        type=int,
                        default=100,
                        help='Number of hidden units (z)')
    parser.add_argument('--seed',
                        type=int,
                        default=0,
                        help='Random seed of z at visualization stage')
    parser.add_argument('--snapshot_interval',
                        type=int,
                        default=1000,
                        help='Interval of snapshot')
    parser.add_argument('--display_interval',
                        type=int,
                        default=10,
                        help='Interval of displaying log to console')
    args = parser.parse_args()

    print('GPU: {}'.format(args.gpu))
    print('# Minibatch-size: {}'.format(args.batchsize))
    print('# n_hidden: {}'.format(args.n_hidden))
    print('# epoch: {}'.format(args.epoch))
    print('')

    # Set up a neural network to train
    gen = Generator(n_hidden=args.n_hidden)
    dis = Discriminator()

    if args.gpu >= 0:
        # Make a specified GPU current
        chainer.cuda.get_device_from_id(args.gpu).use()
        gen.to_gpu()  # Copy the model to the GPU
        dis.to_gpu()

    # Setup an optimizer
    def make_optimizer(model, alpha=0.0002, beta1=0.5):
        optimizer = chainer.optimizers.Adam(alpha=alpha, beta1=beta1)
        optimizer.setup(model)
        optimizer.add_hook(chainer.optimizer.WeightDecay(0.0001), 'hook_dec')
        return optimizer

    opt_gen = make_optimizer(gen)
    opt_dis = make_optimizer(dis)

    # Load the MNIST dataset
    train, _ = chainer.datasets.get_mnist(
        withlabel=False, ndim=3, scale=255.)  # ndim=3 : (ch,width,height)
    train_iter = chainer.iterators.SerialIterator(train, args.batchsize)

    # Set up a trainer
    updater = DCGANUpdater(models=(gen, dis),
                           iterator=train_iter,
                           optimizer={
                               'gen': opt_gen,
                               'dis': opt_dis
                           },
                           device=args.gpu)
    trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)

    epoch_interval = (1, 'epoch')
    snapshot_interval = (args.snapshot_interval, 'iteration')
    display_interval = (args.display_interval, 'iteration')
    # trainer.extend(extensions.snapshot(filename='snapshot_iter_{.updater.iteration}.npz'), trigger=snapshot_interval)
    # trainer.extend(extensions.snapshot_object(gen, 'gen_iter_{.updater.iteration}.npz'), trigger=snapshot_interval)
    # trainer.extend(extensions.snapshot_object(dis, 'dis_iter_{.updater.iteration}.npz'), trigger=snapshot_interval)
    trainer.extend(
        extensions.snapshot(filename='snapshot_epoch_{.updater.epoch}.npz'),
        trigger=epoch_interval)
    trainer.extend(extensions.snapshot_object(
        gen, 'gen_epoch_{.updater.epoch}.npz'),
                   trigger=epoch_interval)
    trainer.extend(extensions.snapshot_object(
        dis, 'dis_epoch_{.updater.epoch}.npz'),
                   trigger=epoch_interval)
    trainer.extend(extensions.LogReport(trigger=display_interval))
    trainer.extend(extensions.PrintReport([
        'epoch',
        'iteration',
        'gen/loss',
        'dis/loss',
    ]),
                   trigger=display_interval)
    trainer.extend(extensions.ProgressBar(update_interval=10))
    trainer.extend(out_generated_image(gen, dis, 10, 10, args.seed, args.out),
                   trigger=epoch_interval)

    if args.resume:
        # Resume from a snapshot
        chainer.serializers.load_npz(args.resume, trainer)

    # Run the training
    trainer.run()
示例#6
0
net.fc = torch.nn.Linear(in_features=2048, out_features=2, bias=True)

#optimizer = torch.optim.SGD(list(net.parameters())[:], lr=0.001, momentum=0.9)
optimizer = torch.optim.Adam(net.parameters(), lr=0.001, betas=(0.9, 0.99))
criterion = torch.nn.CrossEntropyLoss()
criterion = torch.nn.BCELoss()

def weights_init(m):
    classname = m.__class__.__name__
    if classname.find('Conv') != -1:
        m.weight.data.normal_(0.0, 0.02)
    elif classname.find('BatchNorm') != -1:
        m.weight.data.normal_(1.0, 0.02)
        m.bias.data.fill_(0)

netG = Generator()
netG.apply(weights_init)
netD = Discriminator()
netD.apply(weights_init)
print(netG)
print(netD)

optimizerD = optim.Adam(netD.parameters(), lr=0.0001, betas=(0.9, 0.999))
optimizerG = optim.Adam(netG.parameters(), lr=0.0001, betas=(0.9, 0.999))

real_label = 1
fake_label = 0
fixed_noise = torch.randn(batch_size, nz, 1, 1)

for epoch in range(100):
    for i, data in enumerate(train_loader, 0):
示例#7
0
def train():
    """Train DCGAN and save the generator and discrinator."""

    torch.manual_seed(7)
    epochs = 200
    z_dim = 100
    batch_size = 256

    lr = 0.0003  # A learning rate of 0.0002 works well on DCGAN
    beta_1 = 0.5
    beta_2 = 0.999

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize([0.4914, 0.4822, 0.4465], [0.247, 0.243, 0.261])
    ])

    dataset = datasets.ImageFolder(os.path.join(DATA_DIR, "train"), transform)
    dataloader = DataLoader(dataset,
                            batch_size=batch_size,
                            shuffle=True,
                            num_workers=4,
                            drop_last=True)

    gen = Generator(z_dim).to(device)
    gen_optimizer = torch.optim.Adam(gen.parameters(),
                                     lr=lr,
                                     betas=(beta_1, beta_2))

    disc = Discriminator().to(device)
    disc_optimizer = torch.optim.Adam(disc.parameters(),
                                      lr=lr * 1.5,
                                      betas=(beta_1, beta_2))

    gen = gen.apply(weights_init)
    disc = disc.apply(weights_init)

    c_lambda = 10  # weight of gradient penalty in loss

    for epoch in range(epochs):
        print("Epoch:   ", epoch + 1, end='\n')
        total_discriminator_loss = 0
        total_generator_loss = 0
        display_fake = None

        for i, (real, _) in enumerate(dataloader):
            real = real.to(device)

            # UPDATE DISCRIMINATOR
            for _ in range(5):
                disc_optimizer.zero_grad()
                noise = torch.randn(batch_size, z_dim, device=device)
                fake = gen(noise)

                disc_fake_pred = disc(fake.detach())
                disc_real_pred = disc(real)

                # ratio of real to fake in calculating gp
                epsilon = torch.rand(len(real),
                                     1,
                                     1,
                                     1,
                                     device=device,
                                     requires_grad=True)

                gradient = get_gradient(disc, real, fake.detach(), epsilon)
                gp = grad_penalty(gradient)

                # value of real should go up, fake should go down : so loss is opposite
                disc_loss = -torch.mean(disc_real_pred) + torch.mean(
                    disc_fake_pred) + c_lambda * gp

                total_discriminator_loss += disc_loss.item()
                disc_loss.backward(retain_graph=True)
                # if i % 2 == 0:
                disc_optimizer.step()

            # UPDATE GENERATOR
            gen_optimizer.zero_grad()

            noise = torch.randn(batch_size, z_dim, device=device)
            fake = gen(noise)
            display_fake = fake
            disc_fake_pred = disc(fake)  # Notice no detach

            # for generator, critic prediction should be higher
            gen_loss = -torch.mean(disc_fake_pred)
            gen_loss.backward()
            gen_optimizer.step()

            total_generator_loss += gen_loss.item()

            print(
                'Discriminator Loss: {:.4f} \t Generator Loss: {:.4f} \t Done: {:.4f}'
                .format(total_discriminator_loss / (i + 1),
                        total_generator_loss / (i + 1), i / len(dataloader)),
                end='\r')

        if (epoch + 1) % 5 == 0:
            show_tensor_images(display_fake, epoch=epoch)
            torch.save(gen.state_dict,
                       "saved_gen/wgan_gp_gen_{}.pth".format(epoch))
示例#8
0
    [
        transforms.Resize(IMAGE_SIZE),
        transforms.ToTensor(),
        transforms.Normalize(
            [0.5 for _ in range(CHANNELS_IMG)], [0.5 for _ in range(CHANNELS_IMG)]
        ),
    ]
)

# If you train on MNIST, remember to set channels_img to 1
# dataset = datasets.MNIST(root="dataset/", train=True, transform=transforms, download=True)
# uncomment mnist above and comment below if train on MNIST

dataset = datasets.ImageFolder(root="dataset", transform=transforms)
dataloader = DataLoader(dataset, batch_size=BATCH_SIZE, shuffle=True)
gen = Generator(NOISE_DIM, CHANNELS_IMG, FEATURES_GEN).to(device)
disc = Discriminator(CHANNELS_IMG, FEATURES_DISC).to(device)
initialize_weights(gen)
initialize_weights(disc)

opt_gen = optim.Adam(gen.parameters(), lr=LEARNING_RATE, betas=(0.5, 0.999))
opt_disc = optim.Adam(disc.parameters(), lr=LEARNING_RATE, betas=(0.5, 0.999))
criterion = nn.BCELoss()

fixed_noise = torch.randn(32, NOISE_DIM, 1, 1).to(device)
writer_real = SummaryWriter(f"logs/real")
writer_fake = SummaryWriter(f"logs/fake")
step = 0

gen.train()
disc.train()
示例#9
0
def train():
    """Train DCGAN and save the generator and discrinator."""

    torch.manual_seed(1)
    epochs = 200
    z_dim = 100
    batch_size = 128

    lr = 0.0002
    beta_1 = 0.5
    beta_2 = 0.999

    criterion = nn.BCEWithLogitsLoss()
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    transform = transforms.Compose([
            transforms.Resize(64),
            transforms.ToTensor(),
            # transforms.Normalize([0.4914, 0.4822, 0.4465], [0.247, 0.243, 0.261])
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), #seems to work better
            ])

    dataset = datasets.ImageFolder(os.path.join(DATA_DIR, "train"), transform)
    dataloader = DataLoader(dataset, batch_size, shuffle=True, num_workers=4, drop_last=True)

    # initialize generator and discriminator
    gen = Generator(z_dim).to(device)
    gen_optimizer = torch.optim.Adam(gen.parameters(), lr=lr, betas=(beta_1, beta_2))

    disc = Discriminator().to(device)
    disc_optimizer = torch.optim.Adam(disc.parameters(), lr=lr, betas=(beta_1, beta_2))

    gen = gen.apply(weights_init)
    disc = disc.apply(weights_init)

    # to show generated image examples and improvement over training
    fixed_noise = torch.randn(64, z_dim, device=device)

    for epoch in range(epochs):
        print("Epoch:   ", epoch + 1, end='\n')
        total_discriminator_loss = 0
        total_generator_loss = 0
        display_fake = None

        for i, (real, _) in enumerate(dataloader):
            real = real.to(device)

            # UPDATE DISCRIMINATOR
            disc_optimizer.zero_grad()
            
            noise = torch.randn(batch_size, z_dim, device=device)
            fake = gen(noise)
            # discriminator predictions on generated images
            disc_fake_pred = disc(fake.detach())
            disc_fake_loss = criterion(disc_fake_pred, torch.zeros_like(disc_fake_pred))
            disc_fake_loss.backward(retain_graph=True)
            # discriminator predictions on real images
            disc_real_pred = disc(real)
            disc_real_loss = criterion(disc_real_pred, torch.ones_like(disc_real_pred))
            disc_real_loss.backward(retain_graph=True)

            disc_loss = disc_fake_loss + disc_real_loss
            total_discriminator_loss += disc_loss.item()            
            
            # if i % 5 == 0:
            disc_optimizer.step()

            # UPDATE GENERATOR
            gen_optimizer.zero_grad()

            noise = torch.randn(batch_size, z_dim, device=device)
            fake = gen(noise)
            display_fake = fake
            disc_fake_pred = disc(fake)   # Notice no detach

            gen_loss = criterion(disc_fake_pred, torch.ones_like(disc_fake_pred))
            gen_loss.backward()
            gen_optimizer.step()

            total_generator_loss += gen_loss.item()

            print('Discriminator Loss: {:.4f} \t Generator Loss: {:.4f} \t Done: {:.4f}'.format(total_discriminator_loss/(i+1),
                total_generator_loss/(i+1), i/len(dataloader)), end='\r')

        if (epoch + 1) % 5 == 0:
            fixed_output = gen(fixed_noise)
            show_tensor_images(fixed_output, id_num=epoch)
            torch.save(gen.state_dict, "saved_gen/gen_{}.pth".format(epoch))
        elif (epoch + 1) % 5 == 1:
            show_tensor_images(display_fake, id_num=epoch)
示例#10
0
# mnist_train = torchvision.datasets.EMNIST('./EMNIST_data', train=True, download=True, transform=transform, split="letters")
mnist_train = torchvision.datasets.MNIST('./MNIST_data',
                                         train=True,
                                         download=True,
                                         transform=transform)
train_loader = torch.utils.data.DataLoader(mnist_train,
                                           batch_size=batch_size,
                                           shuffle=True)
# mnist_test = torchvision.datasets.EMNIST('./EMNIST_data', train=False, download=True, transform=transform, split="letters")
# mnist_test = torchvision.datasets.EMNIST('./EMNIST_data', train=False, download=True, transform=transform, split="letters")
# test_loader = torch.utils.data.DataLoader(mnist_test, batch_size=batch_size,  shuffle=True)

pretrained_generator = ConditionalGenerator()
pretrained_generator.load_state_dict(torch.load(pretrained_generator_filepath))

generator = Generator()
discriminator = Discriminator()
pretrained_discriminator = Discriminator()
pretrained_discriminator.load_state_dict(
    torch.load(pretrained_discriminator_filepath))

generator.deconv1 = pretrained_generator.input_layer1
# generator.deconv1.requires_grad = False
generator.deconv2 = pretrained_generator.input_layer2
# generator.deconv2.requires_grad = False

if __name__ == "__main__":
    d_filename = "testD"
    g_filename = "testG"
    filename = "control"
    filenames = []
示例#11
0
real_batch = next(iter(dataloader))
plt.figure(figsize=(8, 8))
plt.axis("off")
plt.title("Training Images")
plt.imshow(
    np.transpose(
        vutils.make_grid(real_batch[0].to(device)[:64],
                         padding=2,
                         normalize=True).cpu(), (1, 2, 0)))
plt.savefig(os.path.join(args.save_img, 'training_images.png'))

# Loss function
criterion = torch.nn.BCELoss()

# Initialize generator and discriminator
netG = Generator(nc, nz, ngf, ndf, ngpu).to(device)
netD = Discriminator(nc, nz, ngf, ndf, ngpu).to(device)
print(netD)

# Handle multi-gpu if desired
if (device.type == 'cuda') and (ngpu > 1):
    netG = nn.DataParallel(netG, list(range(ngpu)))
    netD = nn.DataParallel(netD, list(range(ngpu)))

# Apply the weights_init function to randomly initialize all weights to mean=0, stdev=0.2
netG.apply(weights_init)
netD.apply(weights_init)

# Print the model
print(netG)
print(netD)
示例#12
0
    transforms.ToTensor(),
    transforms.Normalize((0.5, ), (0.5, ))
])

dataset = torchvision.datasets.MNIST(root='dataset/',
                                     train=True,
                                     transform=transform,
                                     download=True)
dataloader = DataLoader(dataset=dataset, shuffle=True, batch_size=batch_size)

# GPU or CPU
device = 'cuda' if torch.cuda.is_available() else 'cpu'

# Model Initialization
netD = Discriminator(channels_img, features_d).train(mode=True).to(device)
netG = Generator(channels_noise, channels_img,
                 features_g).train(mode=True).to(device)

# Optimizers
optimizerD = optim.Adam(netD.parameters(),
                        lr=learning_rate,
                        betas=(0.5, 0.999))
optimizerG = optim.Adam(netG.parameters(),
                        lr=learning_rate,
                        betas=(0.5, 0.999))

# Loss
criterion = nn.BCELoss()

# Tensorboard init
writer_real = SummaryWriter(f'runs/GAN-MNIST/test-real')
writer_fake = SummaryWriter(f'runs/GAN-MNIST/test-fake')