Пример #1
0
    # Experiment builder
    data = dataset.MiniImageNetDataSet(batch_size=batch_size,
                                       classes_per_set=classes_per_set,
                                       samples_per_class=samples_per_class,
                                       shuffle_classes=True)
    experiment = ExperimentBuilder(data)
    one_shot_miniImagenet, losses, c_error_opt_op, init = experiment.build_experiment(
        batch_size, classes_per_set, samples_per_class)
    total_epochs = 120
    total_train_batches = 1000
    total_val_batches = int(250 * sp)
    total_test_batches = int(250 * sp)

    logs = "{}way{}shot learning problems, with {} tasks per task batch".format(
        classes_per_set, samples_per_class, batch_size)
    save_statistics(experiment_name, ["Experimental details: {}".format(logs)])
    save_statistics(experiment_name, [
        "epoch", "train_c_loss", "train_c_accuracy", "val_loss",
        "val_accuracy", "test_c_loss", "test_c_accuracy", "learning_rate"
    ])

    # Experiment initialization and running
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.gpu_options.visible_device_list = "0"
    with tf.Session(config=config) as sess:
        sess.run(init)
        saver = tf.train.Saver(max_to_keep=5)
        if continue_from_epoch != -1:  #load checkpoint if needed
            print("Loading from checkpoint")
            checkpoint = "saved_models/{}_{}.ckpt".format(
Пример #2
0
experiment = ExperimentBuilder(data)
one_shot_omniglot, losses, c_error_opt_op, init = experiment.build_experiment(
    batch_size, classes_per_set, samples_per_class, fce)
total_train_batches = 18  #1000
total_val_batches = 3  #1000
total_test_batches = 3  #1000
#total_test_c_loss_mean, total_test_c_loss_std, total_test_accuracy_mean, total_test_accuracy_std \
#= -1, -1, -1, -1

saved_models_filepath, logs_filepath = build_experiment_folder(experiment_name)
save_statistics(logs_filepath, [
    "epoch", "total_train_c_loss_mean", "total_train_c_loss_std",
    "total_train_accuracy_mean", "total_train_accuracy_std",
    "total_val_c_loss_mean", "total_val_c_loss_std", "total_val_accuracy_mean",
    "total_val_accuracy_std", "total_test_c_loss_mean",
    "total_test_c_loss_std", "total_test_accuracy_mean",
    "total_test_accuracy_std"
],
                create=True)

# Experiment initialization and running
with tf.Session() as sess:
    sess.run(init)
    train_saver = tf.train.Saver()
    val_saver = tf.train.Saver()
    if continue_from_epoch != -1:  #load checkpoint if needed
        checkpoint = "saved_models/{}_{}.ckpt".format(experiment_name,
                                                      continue_from_epoch)
        variables_to_restore = []
        for var in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES):
Пример #3
0
else:
    print("Unsupported dataset.")
    assert False

experiment = ExperimentBuilder(data)
one_shot, losses, c_error_opt_op, init = experiment.build_experiment(batch_size,\
    classes_train, classes_test, samples_per_class, queries_per_class, channels, image_size, fce, network_name)

#
total_epochs = 300
total_train_batches = 1000
total_val_batches = 100
total_test_batches = 250

save_statistics(experiment_name, [
    "epoch", "train_c_loss", "train_c_accuracy", "val_loss", "val_accuracy",
    "test_c_loss", "test_c_accuracy"
])

# Experiment initialization and running
with tf.Session() as sess:
    sess.run(init)
    saver = tf.train.Saver()
    if continue_from_epoch != -1:  #load checkpoint if needed
        checkpoint = "{}_{}.ckpt".format(experiment_name, continue_from_epoch)
        variables_to_restore = []
        for var in tf.get_collection(tf.GraphKeys.VARIABLES):
            print(var)
            variables_to_restore.append(var)

        tf.logging.info('Fine-tuning from %s' % checkpoint)