Пример #1
0
def training(epochs=1, batch_size=128):
    """This function fits a gan model.

    Parameters
    ----------
    epochs : int, optional
        Number of epochs (1 by default).
    batch_size : int, optional
        Number of steps by epoch (128 by default).

    Returns
    -------

    """

    x_train = numpy.load('../data/train_data.npy')

    generator = models.create_generator()
    discriminator = models.create_discriminator()
    gan = models.create_gan(discriminator, generator)

    for e in range(1, epochs + 1):
        for _ in tqdm(range(batch_size), desc="Epoch {}".format(e)):
            noise = numpy.random.normal(0, 1, [batch_size, 100])

            generated_images = generator.predict(noise)

            image_batch = x_train[numpy.random.randint(low=0,
                                                       high=x_train.shape[0],
                                                       size=batch_size)]

            # x = numpy.concatenate([image_batch, generated_images])
            #
            # y_dis = numpy.zeros(2 * batch_size)
            # y_dis[:batch_size] = 0.9
            #
            # discriminator.trainable = True
            # discriminator.train_on_batch(x, y_dis)
            #
            # noise = numpy.random.normal(0, 1, [batch_size, 100])
            # y_gen = numpy.ones(batch_size)
            #
            # discriminator.trainable = False
            #
            # gan.train_on_batch(noise, y_gen)

        if e == 1 or e % 20 == 0:
            helpers.plot_generated_images(e, generator)
Пример #2
0
def training(epochs, batch_size):

    X_train = load_data()
    batch_count = int(X_train.shape[0] / batch_size)

    generator= create_generator(learning_rate,beta_1,encoding_dims)
    discriminator= create_discriminator(learning_rate,beta_1)
    gan = create_gan(discriminator, generator,encoding_dims)

    valid = np.ones((batch_size, 1))
    fake = np.zeros((batch_size, 1))

    seed = np.random.normal(0,1, [25, encoding_dims])

    for e in range(1,epochs+1 ):
        print("Epoch %d" %e)
        for _ in range(batch_count):

          noise= np.random.normal(0,1, [batch_size, encoding_dims])
          generated_images = generator.predict(noise)

          image_batch = X_train[np.random.randint(low=0,high=X_train.shape[0],size=batch_size)]

          discriminator.trainable=True
          d_loss_real = discriminator.train_on_batch(image_batch, valid)
          d_loss_fake = discriminator.train_on_batch(generated_images, fake)
          d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)

          noise= np.random.normal(0,1, [batch_size, encoding_dims])

          discriminator.trainable=False
          g_loss = gan.train_on_batch(noise,valid)

          print ("%d [D loss: %f] [G loss: %f]" % (e, d_loss, g_loss))
        if ipython:
            display.clear_output(wait=True)
        plot_generated_images(e, generator,seed,outdir)
    generator.save('{}/gan_model'.format(outdir))
Пример #3
0
def train():
    #init
    discriminator = create_discriminator()
    generator = create_gen()
    #generator.load_weights('srresnet.h5')
    joint_model = create_gan(generator, discriminator)
    joint_model.load_weights('srgan_best.h5')
    #generator.summary()
    #discriminator.summary()

    #Init discriminator training
    # for j in range(1000):
    #     train_discriminator(generator, discriminator, j)

    for i in range(config.adversarial_epochs):
        for j in range(config.discriminator_epochs):
            discriminator.trainable = True
            train_discriminator(generator, discriminator, i)
            discriminator.trainable = False
        for j in range(config.generator_epochs):
            train_generator(generator, discriminator, joint_model)

        sample_images(generator, i)
Пример #4
0
def main(args):

    # =====================================
    # Preparation (load dataset and create
    # a directory which saves results)
    # =====================================
    input_paths = utils.make_paths_from_directory(args.dataset)
    random.shuffle(input_paths)
    border = int(len(input_paths) * 0.8)
    train_paths, test_paths = input_paths[:border], input_paths[border:]

    if os.path.exists(args.result) == False:
        os.makedirs(args.result)
    save_config(os.path.join(args.result, 'config.txt'), args)

    # =====================================
    # Instantiate models
    # =====================================
    xgen = models.create_xgenerater()
    zgen = models.create_zgenerater()
    disc = models.create_discriminater()
    opt_d = Adam(lr=args.lr, beta_1=args.beta_1, beta_2=args.beta_2)
    opt_g = Adam(lr=args.lr, beta_1=args.beta_1, beta_2=args.beta_2)

    xgen.trainable = False
    zgen.trainable = False
    gan_d = models.create_gan(xgen, zgen, disc)
    gan_d.compile(optimizer=opt_d, loss=d_lossfun)

    xgen.trainable = True
    zgen.trainable = True
    disc.trainable = False
    gan_g = models.create_gan(xgen, zgen, disc)
    gan_g.compile(optimizer=opt_g, loss=g_lossfun)


    # =====================================
    # Training Loop
    # =====================================
    num_train = len(train_paths)
    for epoch in range(args.epochs):
        print('Epochs %d/%d' % (epoch+1, args.epochs))
        pbar = Progbar(num_train)
        for i in range(0, num_train, args.batch_size):
            x = utils.make_arrays_from_paths(
                train_paths[i:i+args.batch_size],
                preprocess=utils.preprocess_input,
                target_size=(32,32))
            z = np.random.normal(size=(len(x), 1, 1, 64))

            # train discriminater
            d_loss = gan_d.train_on_batch([x, z], np.zeros((len(x), 1, 1, 2)))
            # train generaters
            g_loss = gan_g.train_on_batch([x, z], np.zeros((len(x), 1, 1, 2)))

            # update progress bar
            pbar.add(len(x), values=[
                ('d_loss', d_loss),
                ('g_loss', g_loss),
            ])

        if (epoch+1) % args.snap_freq == 0:
            # ===========================================
            # Save result
            # ===========================================
            # Make a directory which stores learning results
            # at each (args.frequency)epochs
            dirname = 'epochs%d' % (epoch+1)
            path = os.path.join(args.result, dirname)
            if os.path.exists(path) == False:
                os.makedirs(path)

            # Save generaters' weights
            xgen.save_weights(os.path.join(path, 'xgen_weights.h5'))
            zgen.save_weights(os.path.join(path, 'zgen_weights.h5'))

            # Save generated images
            img = utils.generate_img(xgen)
            img.save(os.path.join(path, 'generated.png'))

            # Save reconstructed images
            x = utils.make_arrays_from_paths(
                test_paths,
                preprocess=None,
                target_size=(32,32))
            img = utils.reconstruct_img(x, xgen, zgen)
            img.save(os.path.join(path, 'reconstructed.png'))