Example #1
0
    generator = build_generator(randomDim)
    x = generator(ganInput)
    ganOutput = discriminator(x)
    gan = KModels.Model(inputs=ganInput, outputs=ganOutput)
    gan.compile(loss='binary_crossentropy', optimizer=adam)

    dLosses = []
    gLosses = []
    batchSize = 32
    epochs = 20

    batchCount = X_train.shape[0] // batchSize
    print('Epochs:', epochs)
    print('Batch size:', batchSize)
    print('Batches per epoch:', batchCount)
    progBar = bar.ProgressBarGAN(epochs, batchCount, "D Loss:%.3f,G Loss:%.3f")
    samples_image = []
    for e in range(1, (epochs+1)):
        # Get a random set of input noise and images

        for _ in range(batchCount):
            noise = np.random.normal(0, 1, size=[batchSize, randomDim])
            imageBatch = X_train[np.random.randint(0, X_train.shape[0], size=batchSize)]

            imageBatch = np.reshape(imageBatch, newshape=(batchSize, 28,28,1))

            # Generate fake MNIST images
            generatedImages = generator.predict(noise)
            # print np.shape(imageBatch), np.shape(generatedImages)
            X = np.concatenate([imageBatch, generatedImages])
Example #2
0
            return F.sigmoid(self.map3(x))

    generator = SimpleMLP(input_size=z_dim,
                          hidden_size=50,
                          output_size=DIMENSION)
    discriminator = SimpleMLP(input_size=DIMENSION,
                              hidden_size=100,
                              output_size=1)
    if cuda:
        generator.cuda()
        discriminator.cuda()
    criterion = nn.BCELoss()

    d_optimizer = tOpt.Adadelta(discriminator.parameters(), lr=1)
    g_optimizer = tOpt.Adadelta(generator.parameters(), lr=1)
    progBar = bar.ProgressBarGAN(1, iterations,
                                 "D Loss:(real/fake) %.3f/%.3f,G Loss:%.3f")
    for train_iter in range(1, iterations + 1):
        for d_index in range(3):
            # 1. Train D on real+fake
            discriminator.zero_grad()

            #  1A: Train D on real
            real_samples = sampler.sample_2d(lut_2d, bs)
            d_real_data = Variable(torch.Tensor(real_samples))
            if cuda:
                d_real_data = d_real_data.cuda()
            d_real_decision = discriminator(d_real_data)
            labels = Variable(torch.ones(bs))
            if cuda:
                labels = labels.cuda()
            d_real_loss = criterion(d_real_decision, labels)  # ones = true
Example #3
0
discriminator = build_discriminator()
discriminator.trainable = False

ganInput = KLayers.Input(shape=(100, ))
ganInputlabel = KLayers.Input(shape=(10, ))

generator = build_generator()
x = generator([ganInput, ganInputlabel])

ganOutput = discriminator([x, ganInputlabel])
gan = KModels.Model(inputs=[ganInput, ganInputlabel], outputs=ganOutput)
gan.compile(loss='binary_crossentropy', optimizer=adam)

progBar = bar.ProgressBarGAN(train_epoch,
                             len(train_set) // batch_size,
                             "D Loss:%.3f,G Loss:%.3f")
samples_image = []
for epoch in range(train_epoch):
    for iter in range(len(train_set) // batch_size):
        noise = np.random.normal(0, 1, size=[batch_size, 100])
        label = np.random.randint(0, 9, size=[batch_size, 1])
        label = onehot[label.astype(np.int32)].squeeze()
        generatedImages = generator.predict([noise, label])

        randomInt = np.random.randint(0, train_set.shape[0], size=batch_size)
        imageBatch = np.reshape(train_set[randomInt],
                                newshape=(batch_size, 784))

        X = np.concatenate([imageBatch, generatedImages])
        Y = np.concatenate(
Example #4
0
density_img = io.imread(input_path, True)
lut_2d = sampler.generate_lut(density_img)
visualizer = visualizer.GANDemoVisualizer(
    'GAN 2D Example Visualization of {}'.format(input_path))

generator = build_generator()
discriminator = build_discriminator()
discriminator.trainable = False

ganInput = KLayers.Input(shape=(z_dim, ))
generator = build_generator()
x = generator(ganInput)
ganOutput = discriminator(x)
gan = KModels.Model(inputs=ganInput, outputs=ganOutput)
gan.compile(loss='binary_crossentropy', optimizer=opt)
progBar = bar.ProgressBarGAN(1, 2000, "D Loss:%.3f,G Loss:%.3f")

for epoch_iter in range(1, 2001):
    for index in range(20):
        real_samples = sampler.sample_2d(lut_2d, bs)
        # print(real_samples.shape)

        noise = torch.randn(bs,
                            z_dim)  # np.random.normal(-1, 1, size=[bs, z_dim])
        generateImage = generator.predict(noise.numpy())

        discriminator.trainable = True
        yDis = np.zeros(2 * bs)
        yDis[:bs] = 1
        d_loss = discriminator.train_on_batch(
            np.concatenate((real_samples, generateImage)), yDis)
model.compile(loss=Klosses.categorical_crossentropy,
              optimizer=Koptimizers.SGD(lr=0.0, momentum=0.9, nesterov=True),
              metrics=['acc'])

train_dataGen = Kimage.ImageDataGenerator(rescale=1. / 255,
                                          shear_range=0.2,
                                          zoom_range=0.2,
                                          horizontal_flip=True)

train_generator = train_dataGen.flow_from_directory(
    directory=os.path.join(cfg.DATAPATH, "cifar10", "train"),
    target_size=(cfg.IMAGE_SIZE, cfg.IMAGE_SIZE),
    batch_size=cfg.BATCH_SIZE,
    class_mode='categorical')
probar = Mprogress.ProgressBarCallback()
es = Kcallbacks.EarlyStopping(monitor='val_acc', patience=EPOCH_NUM)
checkpoint = Kcallbacks.ModelCheckpoint(filepath="cifar10_alexnet.h5",
                                        save_best_only=True,
                                        save_weights_only=True)
lrate = Kcallbacks.LearningRateScheduler(lr_schedule)

reader = MclassReader.ClassificationReader(
    dataPath=os.path.join(cfg.DATAPATH, "cifar10"))
x_test, y_test = reader.readData(phrase="test")
y_test = Kutils.to_categorical(y_test, num_classes=cfg.NUM_OUTPUTS)

model.fit_generator(generator=train_generator,
                    steps_per_epoch=50000 / cfg.BATCH_SIZE,
                    epochs=EPOCH_NUM,
                    verbose=0,
Example #6
0
    y_train = y_train.reshape(-1, 1)

    discriminator = build_discriminator()
    discriminator.trainable = False

    ganInput = KLayers.Input(shape=(74, ))
    generator = build_generator()
    x = generator(ganInput)
    valid, target_label, target_count = discriminator(x)
    gan = KModels.Model(inputs=ganInput,
                        outputs=[valid, target_label, target_count])
    gan.compile(loss=[
        'binary_crossentropy', 'categorical_crossentropy', gaussian_loss
    ],
                optimizer=adam)
    progBar = bar.ProgressBarGAN(1, epochs, "D Loss:%.3f,G Loss:%.3f")

    for epoch in range(epochs):
        sampled_noise, sampled_labels, sampled_count = sample_generator_input(
            batch_size)
        gen_input = np.concatenate(
            [sampled_noise, sampled_labels, sampled_count], axis=1)
        generateImages = generator.predict(gen_input)

        idx = np.random.randint(0, x_train.shape[0], batch_size)
        imgs = x_train[idx]

        fake = np.zeros((batch_size, 1))
        valid = np.ones((batch_size, 1))

        labels = KUtils.to_categorical(y_train[idx], num_classes=10)
Example #7
0
discriminator.trainable = False
validity = discriminator(encoded_repr)

adversarial_autoencoder = Model(img, [reconstructed_img, validity])
adversarial_autoencoder.compile(loss=['mse', 'binary_crossentropy'],
                                loss_weights=[0.999, 0.001],
                                optimizer=optimizer)

(x_train, y_train), (x_test, y_test) = reader.read_mnist('../data/mnist.npz')
x_train = (x_train.astype(np.float32) - 127.5) / 127.5
x_train = np.expand_dims(x_train, axis=3)

half_batch = int(cfg.BATCH_SIZE / 2)

samples_image = []
progressBar = bar.ProgressBarGAN(
    1, cfg.EPOCH_NUM, "D loss: %.3f, acc: %.2f%% - G loss: %.3f, mse: %.2f")
for epoch in range(cfg.EPOCH_NUM):
    # Select a random half batch of images
    idx = np.random.randint(0, x_train.shape[0], half_batch)
    imgs = x_train[idx]

    # Generate a half batch of new images
    latent_fake, gen_imgs = generator.predict(imgs)

    latent_real = np.random.normal(size=(half_batch, cfg.ENCODED_DIM))

    valid = np.ones((half_batch, 1))
    fake = np.zeros((half_batch, 1))

    # Train the discriminator
    d_loss_real = discriminator.train_on_batch(latent_real, valid)
Example #8
0
train_hist['per_epoch_ptimes'] = []
train_hist['total_ptime'] = []

# label preprocess
onehot = torch.zeros(10, 10)
onehot = onehot.scatter_(
    1,
    torch.LongTensor([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]).view(10, 1),
    1).view(10, 10, 1, 1)
fill = torch.zeros([10, 10, img_size, img_size])
for i in range(10):
    fill[i, i, :, :] = 1

print('training start!')
start_time = time.time()
progBar = bar.ProgressBarGAN(train_epoch, len(train_loader),
                             "D Loss: %.3f,G Loss:%.3f")
for epoch in range(train_epoch):
    D_losses = []
    G_losses = []

    # learning rate decay
    if (epoch + 1) == 11:
        G_optimizer.param_groups[0]['lr'] /= 10
        D_optimizer.param_groups[0]['lr'] /= 10
        print("learning rate change!")

    if (epoch + 1) == 16:
        G_optimizer.param_groups[0]['lr'] /= 10
        D_optimizer.param_groups[0]['lr'] /= 10
        print("learning rate change!")