예제 #1
0
def main(batch_size, file_dir):
    # Prepare the dataset. We use both the training & test MNIST digits.
    x = get_data(file_dir)
    
    gan = GAN(discriminator=discriminator, generator=generator, latent_dim=latent_dim)
    gan.compile(
        d_optimizer=keras.optimizers.Adam(learning_rate=0.0003),
        g_optimizer=keras.optimizers.Adam(learning_rate=0.0003),
        loss_fn=keras.losses.BinaryCrossentropy(from_logits=True)
    )
    # To limit the execution time, we only train on 100 batches. You can train on
    # the entire dataset. You will need about 20 epochs to get nice results.
    print(generator.summary())
    print(discriminator.summary())
    history = gan.fit(x, batch_size=batch_size, epochs=20)
    g_loss, d_loss = history.history['g_loss'], history.history['d_loss']
    plt.plot(g_loss)
    plt.plot(d_loss)
    plt.xticks(np.arange(0, 20, step=1))  # Set label locations.
    plt.xlabel('epochs')
    plt.ylabel('loss')
    plt.title('Protein Structure Generation With DCGAN')
    # print(xticks(np.arange(0, 20, step=1)))
    # pred = np.stack(history.history['pred'], axis=0)
    # labels = np.stack(history.history['label'], axis=0)
    # accuracies = get_accuracies(pred, labels)
    # plt.plot(accuracies)
    plt.legend(['Generator loss', 'Discriminator loss'], loc='upper right')
    plt.show()
예제 #2
0
def train(dataset,
          epochs=30,
          num_images=1,
          latent_dim=256,
          learning_rate_g=0.00005,
          learning_rate_d=0.00005):
    discriminator = Discriminator().discriminator
    generator = Generator(latent_dim).generator

    gan = GAN(discriminator, generator, latent_dim)
    gan.compile(
        d_optimizer=keras.optimizers.Adam(learning_rate_d),
        g_optimizer=keras.optimizers.Adam(learning_rate_g),
        loss_function=keras.losses.BinaryCrossentropy(from_logits=True),
    )

    gan.fit(dataset,
            epochs=epochs,
            callbacks=[GANMonitor(num_images, latent_dim)])

    # gan.save("gan_model.h5")
    generator.save("generator_model.h5")
    discriminator.save("discriminator_model.h5")
예제 #3
0
    images_path = glob(sys.argv[1] + "/" + sys.argv[2] + "_resized/*")

    d_model = build_discriminator()
    g_model = build_generator(latent_dim)

    d_model.summary()
    g_model.summary()

    gan = GAN(d_model, g_model, latent_dim)

    bce_loss_fn = tf.keras.losses.BinaryCrossentropy(from_logits=True,
                                                     label_smoothing=0.1)
    d_optimizer = tf.keras.optimizers.Adam(learning_rate=0.0002, beta_1=0.5)
    g_optimizer = tf.keras.optimizers.Adam(learning_rate=0.0002, beta_1=0.5)
    gan.compile(d_optimizer, g_optimizer, bce_loss_fn)

    images_dataset = tf_dataset(images_path, batch_size)

    for epoch in range(num_epochs):
        print('Epoch {}/{}'.format(epoch, num_epochs))
        gan.fit(images_dataset, epochs=1)
        g_model.save(sys.argv[1] + "/" + sys.argv[2] +
                     "_saved_models/g_model.h5")
        d_model.save(sys.argv[1] + "/" + sys.argv[2] +
                     "_saved_models/d_model.h5")

        noise = np.random.normal(size=(n_samples, latent_dim))
        examples = g_model.predict(noise)
        save_plot(sys.argv[1] + "/" + sys.argv[2] + "_samples", examples,
                  epoch, int(np.sqrt(n_samples)), IMG_CHANNEL)
        from tensorboard import program
        tb = program.TensorBoard()
        tb.configure(argv=[None, '--logdir', utils.LOG_DIR])
        url = tb.launch()
        print(url)

    # initial scanpy train & test splits
    data = sc.read_h5ad('data/GSE144136_preprocessed.h5ad')
    train = sc.pp.subsample(data=data, fraction=0.90, copy=True, random_state=utils.RANDOM)
    test = sc.pp.subsample(data=data, fraction=0.10, copy=True, random_state=utils.RANDOM)

    # build model
    strategy = tf.distribute.MirroredStrategy()
    with strategy.scope():
        model = GAN(gex_size=train.shape[1], num_cells_generate=test.shape[0])
        model.compile()
        model.build(input_shape=(model.hyperparams.batch_size, model.hyperparams.latent_dim))  # req. for subclassed models

    # process data for training
    train_tf = tf.data.Dataset.from_tensor_slices(train.X). \
        cache(). \
        shuffle(buffer_size=train.shape[0], seed=utils.RANDOM). \
        batch(batch_size=model.hyperparams.batch_size * strategy.num_replicas_in_sync, num_parallel_calls=tf.data.AUTOTUNE). \
        prefetch(buffer_size=tf.data.AUTOTUNE)
    train_tf_distributed = strategy.experimental_distribute_dataset(train_tf)

    test_tf = tf.data.Dataset.from_tensor_slices(test.X). \
        cache(). \
        shuffle(buffer_size=test.shape[0], seed=utils.RANDOM). \
        prefetch(buffer_size=tf.data.AUTOTUNE)