示例#1
0
def create_output_directory():
    os.makedirs(cfg.output_dir, exist_ok=True)
    output_subdir = create_result_subdir(cfg.output_dir)
    print('Output directory: ' + output_subdir)
    files = [f for f in os.listdir('.') if os.path.isfile(f)]
    for f in files:
        shutil.copy(f, output_subdir)
    return output_subdir
示例#2
0
def train():
    result_subdir = create_result_subdir(result_dir)
    real_words = read_words(real_words_path)
    fake_words = read_words(fake_words_path)
    real_words = [
        word for word in [convert_to_char_seq(word) for word in real_words]
        if word != []
    ]
    fake_words = [
        word for word in [convert_to_char_seq(word) for word in fake_words]
        if word != []
    ]
    words = real_words + fake_words
    words = pad_words(words)
    words = np.array(words)[:, :, np.newaxis]
    print(words.shape)
    labels = np.concatenate(
        [np.ones(len(real_words)),
         np.zeros(len(fake_words))])
    words_train, words_val, labels_train, labels_val = train_test_split(
        words, labels, test_size=0.2, random_state=42)

    model = simple_model()
    opt = Adam(0.01)
    model.compile(loss=binary_crossentropy,
                  optimizer=opt,
                  metrics=[binary_accuracy])
    model.summary()

    checkpoint = ModelCheckpoint(os.path.join(
        result_subdir, 'model.{epoch:03d}-{val_loss:.2f}.h5'),
                                 monitor='val_loss')

    model.fit(words_train,
              labels_train,
              batch_size=32,
              epochs=10,
              verbose=1,
              validation_data=(words_val, labels_val),
              callbacks=[])
示例#3
0
def create_output_directory():
    os.makedirs('eval', exist_ok=True)
    output_subdir = create_result_subdir('eval')
    print('Output directory: ' + output_subdir)
    return output_subdir
示例#4
0
    num_gpus = distribution_strategy.num_replicas_in_sync
    print("Num gpus:", num_gpus)

    # Compute global batch size using number of replicas.
    global_batch_size = batch_size_per_gpu * num_gpus
    dataset = make_dataset().batch(global_batch_size)

    total_n_examples = 60_000
    steps_per_epoch = total_n_examples // global_batch_size

    results_dir = "results"
    resume_run_id = 1
    if resume_run_id:
        config.log_dir = f"{results_dir}/{resume_run_id:02}-mnist"
    else:
        config.log_dir = utils.create_result_subdir(results_dir, "mnist")
    config.checkpoint_dir = config.log_dir + "/checkpoints"

    # strategy = tf.distribute.MirroredStrategy()
    strategy = tf.distribute.get_strategy()
    with strategy.scope():

        gen = DCGANGenerator()
        disc = DCGANDiscriminator()
        gan = blurred_gan.BlurredWGANGP(gen,
                                        disc,
                                        hyperparams=hyperparameters,
                                        config=config)

    checkpoint = tf.train.Checkpoint(gan=gan)
    manager = tf.train.CheckpointManager(checkpoint,
示例#5
0
    # TODO: Make multi-GPU training work
    # distribution_strategy = tf.distribute.MirroredStrategy()
    distribution_strategy = tf.distribute.get_strategy()
    num_gpus = distribution_strategy.num_replicas_in_sync
    print("Num gpus:", num_gpus)

    # Compute global batch size using number of replicas.
    global_batch_size = batch_size_per_gpu * num_gpus
    dataset = make_dataset().batch(global_batch_size)

    total_n_examples = 202_599
    steps_per_epoch = total_n_examples // global_batch_size

    results_dir = "results"
    config.log_dir = utils.create_result_subdir(results_dir, "celeba")
    config.checkpoint_dir = config.log_dir + "/checkpoints"

    gen = DCGANGenerator()
    disc = DCGANDiscriminator()
    gan = blurred_gan.BlurredWGANGP(gen,
                                    disc,
                                    hyperparams=hyperparameters,
                                    config=config)

    checkpoint = tf.train.Checkpoint(gan=gan)
    manager = tf.train.CheckpointManager(checkpoint,
                                         directory=config.checkpoint_dir,
                                         max_to_keep=5,
                                         keep_checkpoint_every_n_hours=1)