예제 #1
0
def main():
    dataset_loader = DatasetLoader("../geoguessrBotDatasets/geoguessrWorld/",
                                   (48, 64), "coordinates")
    train_dataset = dataset_loader.load_dataset(0, 9000, "train_dataset")
    val_dataset = dataset_loader.load_dataset(9000, 10000,
                                              "validation_dataset")

    models = {
        "CNN": CNNModel(),
        "noinput": NoInputModel(),
    }

    validation_loss_histories = dict()
    training_loss_histories = dict()

    for model_name, model in models.items():
        model.train(train_dataset, val_dataset, 100)
        training_loss_histories[model_name] = model.training_loss_history
        validation_loss_histories[model_name] = model.validation_loss_history

    images_to_predict, labels_to_predict = next(iter(val_dataset))
    predict_images(models, images_to_predict, labels_to_predict)
예제 #2
0
def train(sess, ops, config):
    writer = tf.summary.FileWriter(config.summary_dir,
                                   graph=tf.get_default_graph())
    saver = tf.train.Saver()

    # prepare data
    loader = DatasetLoader()
    dataset, num_batches = loader.load_dataset(config)
    iterator = dataset.make_initializable_iterator()
    next_batch = iterator.get_next()

    # counters
    epoch = sess.run(ops.epoch_var)
    batch = sess.run(ops.batch_var)
    global_step = sess.run(ops.global_step_var)

    # loop over epochs
    while epoch < config.num_epochs:

        # draw samples
        sample_all_categories(sess, ops, config, 5, 'epoch_' + str(epoch))

        sess.run(iterator.initializer)

        # loop over batches
        while batch < num_batches:

            images, labels = sess.run(next_batch)
            _, expanded_labels = expand_labels(labels)
            M = images.shape[0]
            y, y_expanded, z = random_codes(M)

            # run session
            feed_dict = {
                'images_holder:0': images,
                'labels_holder:0': expanded_labels,
                'y_expanded_holder:0': y_expanded,
                'z_holder:0': z,
                'y_holder:0': y
            }
            sess.run(ops.train_d, feed_dict=feed_dict)
            sess.run(ops.train_g, feed_dict=feed_dict)

            # logging
            if global_step % config.log_freq == 0:
                summary = sess.run(ops.summary_op, feed_dict=feed_dict)
                writer.add_summary(summary, global_step=global_step)

                loss_d_val = sess.run(ops.loss_d, feed_dict=feed_dict)
                loss_g_val = sess.run(ops.loss_g, feed_dict=feed_dict)
                print("epoch: " + str(epoch) + ", batch " + str(batch))
                print("G loss: " + str(loss_g_val))
                print("D loss: " + str(loss_d_val))

            # saving

            if global_step % config.checkpoint_freq == 0:
                checkpoint_model(config.checkpoint_dir, sess, global_step,
                                 saver)

            global_step = increment(ops.global_step_var, sess)
            batch = increment(ops.batch_var, sess)

        epoch = increment(ops.epoch_var, sess)
        sess.run(tf.assign(ops.batch_var, 0))
        batch = sess.run(ops.batch_var)

    sess.close()