train_val_test_split=(1200 / 1622, 211 / 1622, 211 / 1622),
    samples_per_iter=1,
    num_workers=4,
    data_path="datasets/omniglot_data",
    name="omniglot_data",
    index_of_folder_indicating_class=-2,
    reset_stored_filepaths=False,
    num_samples_per_class=args.samples_per_class,
    num_classes_per_set=args.classes_per_set,
    label_as_int=True)

experiment = ExperimentBuilder(data)
one_shot_omniglot, losses, c_error_opt_op, init = experiment.build_experiment(
    args.batch_size,
    args.classes_per_set,
    args.samples_per_class,
    args.use_full_context_embeddings,
    full_context_unroll_k=args.full_context_unroll_k,
    args=args)
total_train_batches = args.total_iter_per_epoch
total_val_batches = args.total_iter_per_epoch
total_test_batches = args.total_iter_per_epoch

saved_models_filepath, logs_filepath = build_experiment_folder(
    args.experiment_title)

save_statistics(logs_filepath, [
    "epoch", "total_train_c_loss_mean", "total_train_c_loss_std",
    "total_train_accuracy_mean", "total_train_accuracy_std",
    "total_val_c_loss_mean", "total_val_c_loss_std", "total_val_accuracy_mean",
    "total_val_accuracy_std", "total_test_c_loss_mean",
Exemplo n.º 2
0
    batch_size = int(32 // sp)  #  default 32 for 5way1shot
    classes_per_set = opt.way  #20
    samples_per_class = opt.shot
    # N-way, K-shot
    continue_from_epoch = opt.ckp  # use -1 to start from scratch
    logs_path = "one_shot_outputs/"
    experiment_name = "LGM-Net_{}way{}shot".format(classes_per_set,
                                                   samples_per_class)

    # Experiment builder
    data = dataset.MiniImageNetDataSet(batch_size=batch_size,
                                       classes_per_set=classes_per_set,
                                       samples_per_class=samples_per_class,
                                       shuffle_classes=True)
    experiment = ExperimentBuilder(data)
    one_shot_miniImagenet, losses, c_error_opt_op, init = experiment.build_experiment(
        batch_size, classes_per_set, samples_per_class)
    total_epochs = 120
    total_train_batches = 1000
    total_val_batches = int(250 * sp)
    total_test_batches = int(250 * sp)

    logs = "{}way{}shot learning problems, with {} tasks per task batch".format(
        classes_per_set, samples_per_class, batch_size)
    save_statistics(experiment_name, ["Experimental details: {}".format(logs)])
    save_statistics(experiment_name, [
        "epoch", "train_c_loss", "train_c_accuracy", "val_loss",
        "val_accuracy", "test_c_loss", "test_c_accuracy", "learning_rate"
    ])

    # Experiment initialization and running
    config = tf.ConfigProto()
Exemplo n.º 3
0
logs_path = "one_shot_outputs_ADNI/"
experiment_name = "one_shot_ADNI_{}fold_{}_{}".format(k, samples_per_class,
                                                      classes_per_set)

# Experiment builder

# data = dataset.OmniglotNShotDataset(batch_size=batch_size, classes_per_set=classes_per_set,
#                                     samples_per_class=samples_per_class)

data = dataset.ADNIDataset(k=k,
                           batch_size=batch_size,
                           classes_per_set=classes_per_set,
                           samples_per_class=samples_per_class)

experiment = ExperimentBuilder(data)
one_shot_omniglot, losses, c_error_opt_op, init = experiment.build_experiment(
    batch_size, classes_per_set, samples_per_class, fce)

total_epochs = 100
total_train_batches = 5
total_val_batches = 2
total_test_batches = 2

save_statistics(experiment_name, [
    "epoch", "train_c_loss", "train_c_accuracy", "val_loss", "val_accuracy",
    "test_c_loss", "test_c_accuracy"
])

# summary_path = "/summary/%d" % (int(time.time()))
# Experiment initialization and running

with tf.Session() as sess:
Exemplo n.º 4
0
    data = dataset.CIFAR_100(batch_size=batch_size,
                             samples_per_class=samples_per_class,
                             queries_per_class=queries_per_class)

elif data_name == "omniglot":
    channels = 1
    image_size = 28
    augment = True
    data = dataset.OmniglotNShotDataset(batch_size=batch_size,\
        classes_per_set=classes_per_set, samples_per_class=samples_per_class, queries_per_class = queries_per_class)
else:
    print("Unsupported dataset.")
    assert False

experiment = ExperimentBuilder(data)
one_shot, losses, c_error_opt_op, init = experiment.build_experiment(batch_size,\
    classes_train, classes_test, samples_per_class, queries_per_class, channels, image_size, fce, network_name)

#
total_epochs = 300
total_train_batches = 1000
total_val_batches = 100
total_test_batches = 250

save_statistics(experiment_name, [
    "epoch", "train_c_loss", "train_c_accuracy", "val_loss", "val_accuracy",
    "test_c_loss", "test_c_accuracy"
])

# Experiment initialization and running
with tf.Session() as sess:
    sess.run(init)