Exemple #1
0
from data import MetaLearningSystemDataLoader
from experiment_builder import ExperimentBuilder
from few_shot_learning_system import MAMLFewShotClassifier
from utils.parser_utils import get_args
from utils.dataset_tools import maybe_unzip_dataset

# Combines the arguments, model, data and experiment builders to run an experiment
args, device = get_args()
model = MAMLFewShotClassifier(args=args, device=device,
                              im_shape=(2, args.image_channels,
                                        args.image_height, args.image_width))
maybe_unzip_dataset(args=args)
data = MetaLearningSystemDataLoader
maml_system = ExperimentBuilder(model=model, data=data, args=args, device=device)
maml_system.run_experiment()
Exemple #2
0
import data as dataset
from experiment_builder import ExperimentBuilder
from utils.parser_util import get_args

batch_size, num_gpus, args = get_args()
#set the data provider to use for the experiment
data = dataset.VGGFaceDAGANDataset(batch_size=batch_size,
                                   last_training_class_index=1600,
                                   reverse_channels=True,
                                   num_of_gpus=num_gpus,
                                   gen_batches=10)
#init experiment
experiment = ExperimentBuilder(args, data=data)
#run experiment
experiment.run_experiment()
Exemple #3
0
                                         shuffle=True,
                                         num_workers=4,
                                         drop_last=True)
val_data = torch.utils.data.DataLoader(voc_val,
                                       batch_size=args.batch_size,
                                       shuffle=False,
                                       num_workers=4,
                                       drop_last=True)
test_data = torch.utils.data.DataLoader(voc_test,
                                        batch_size=args.batch_size,
                                        shuffle=False,
                                        num_workers=4,
                                        drop_last=True)

custom_net = DeepLab(args.output_stride)

ss_experiment = ExperimentBuilder(network_model=custom_net,
                                  num_class=args.num_class,
                                  experiment_name=args.experiment_name,
                                  num_epochs=args.num_epochs,
                                  train_data=train_data,
                                  val_data=val_data,
                                  test_data=test_data,
                                  learn_rate=args.learn_rate,
                                  mementum=args.mementum,
                                  weight_decay=args.weight_decay,
                                  use_gpu=args.use_gpu,
                                  continue_from_epoch=args.continue_from_epoch)

experiment_metrics, test_metrics = ss_experiment.run_experiment()
Exemple #4
0
                                      transform=transform_test)
    test_data = torch.utils.data.DataLoader(testset,
                                            batch_size=100,
                                            shuffle=False,
                                            num_workers=2)

    num_output_classes = 100

custom_conv_net = ConvolutionalNetwork(  # initialize our network object, in this case a ConvNet
    input_shape=(args.batch_size, args.image_num_channels, args.image_height,
                 args.image_height),
    dim_reduction_type=args.dim_reduction_type,
    num_filters=args.num_filters,
    num_layers=args.num_layers,
    use_bias=False,
    num_output_classes=num_output_classes)

conv_experiment = ExperimentBuilder(
    network_model=custom_conv_net,
    experiment_name=args.experiment_name,
    num_epochs=args.num_epochs,
    weight_decay_coefficient=args.weight_decay_coefficient,
    gpu_id=args.gpu_id,
    use_gpu=args.use_gpu,
    continue_from_epoch=args.continue_from_epoch,
    train_data=train_data,
    val_data=val_data,
    test_data=test_data)  # build an experiment object
experiment_metrics, test_metrics = conv_experiment.run_experiment(
)  # run experiment and return experiment metrics
Exemple #5
0
def main():

    # down_sample factor
    N = 4

    # define paths to image and mask files
    cwd = Path(os.getcwd())
    par = cwd.parent
    data_path = str(par / "data/DAVIS//JPEGImages/480p/")
    mask_path = str(par / "data/DAVIS/Annotations/480p/")

    # training, validation and test split
    tvt_split = (0.5, 0.7)

    # get datasets
    X_train, X_val, X_test, y_train, y_val, y_test = generate_dataset_static(
        data_path, mask_path, tvt_split, N)
    X_train_t, X_val_t, X_test_t, y_train_t, y_val_t, y_test_t = generate_dataset_temporal(
        data_path, mask_path, tvt_split, N)

    # reshape datasets to match CNN shapes
    X_train = np.array(X_train).swapaxes(1, 3).swapaxes(2, 3)
    X_val = np.array(X_val).swapaxes(1, 3).swapaxes(2, 3)
    X_test = np.array(X_test).swapaxes(1, 3).swapaxes(2, 3)
    print(X_train.shape)
    print(X_val.shape)
    print(X_test.shape)
    y_train = np.array(y_train)
    y_val = np.array(y_val)
    y_test = np.array(y_test)
    print(y_train.shape)
    print(y_val.shape)
    print(y_test.shape)

    X_train_t = np.array(X_train_t).swapaxes(-1, -3).swapaxes(-2, -1)
    X_val_t = np.array(X_val_t).swapaxes(-1, -3).swapaxes(-2, -1)
    X_test_t = np.array(X_test_t).swapaxes(-1, -3).swapaxes(-2, -1)
    print(X_train_t.shape)
    print(X_val_t.shape)
    print(X_test_t.shape)
    y_train_t = np.array(y_train_t)
    y_val_t = np.array(y_val_t)
    y_test_t = np.array(y_test_t)
    print(y_train_t.shape)
    print(y_val_t.shape)
    print(y_test_t.shape)

    #put data into data provider objects
    batch_size = 25
    train_data = data_providers.DataProvider(X_train,
                                             y_train,
                                             batch_size,
                                             shuffle_order=True)
    val_data = data_providers.DataProvider(X_val,
                                           y_val,
                                           batch_size,
                                           shuffle_order=True)
    test_data = data_providers.DataProvider(X_test,
                                            y_test,
                                            batch_size,
                                            shuffle_order=True)

    batch_size = 25
    train_data_t = data_providers.DataProvider(X_train_t,
                                               y_train_t,
                                               batch_size,
                                               shuffle_order=True)
    val_data_t = data_providers.DataProvider(X_val_t,
                                             y_val_t,
                                             batch_size,
                                             shuffle_order=True)
    test_data_t = data_providers.DataProvider(X_test_t,
                                              y_test_t,
                                              batch_size,
                                              shuffle_order=True)

    inputs_shape = X_train[:batch_size].shape
    inputs_shape

    inputs_shape_t = X_train_t[:batch_size].shape
    inputs_shape_t

    print("Time to make networks!")

    # generates networks of different depths and datasets
    static_net_shallow = ShallowNetwork(input_shape=inputs_shape)
    static_net_deeper = DeeperNetwork(input_shape=inputs_shape)
    temporal_net_shallow = ShallowNetwork(input_shape=inputs_shape_t)
    temporal_net_deeper = DeeperNetwork(input_shape=inputs_shape_t)
    static_net_deepest = DeepestNetwork(input_shape=inputs_shape)
    temporal_net_deepest = DeepestNetwork(input_shape=inputs_shape_t)

    # declare variables for experiments
    experiment_name = "static_run_shallow"
    num_epochs = 30
    use_gpu = False
    continue_from_epoch = -1

    # build experiment and run
    experiment_1 = ExperimentBuilder(
        network_model=static_net_shallow,
        experiment_name=experiment_name,
        num_epochs=num_epochs,
        use_gpu=use_gpu,
        continue_from_epoch=continue_from_epoch,
        train_data=train_data,
        val_data=val_data,
        test_data=test_data)  # build an experiment object
    experiment_metrics, test_metrics = experiment_1.run_experiment(
    )  # run experiment and return experiment metrics

    experiment_name = "static_run_deeper"
    num_epochs = 30
    use_gpu = True
    continue_from_epoch = -1

    experiment_2 = ExperimentBuilder(
        network_model=static_net_shallow,
        experiment_name=experiment_name,
        num_epochs=num_epochs,
        use_gpu=use_gpu,
        continue_from_epoch=continue_from_epoch,
        train_data=train_data,
        val_data=val_data,
        test_data=test_data)  # build an experiment object
    experiment_metrics, test_metrics = experiment_1.run_experiment(
    )  # run experiment and return experiment metrics

    experiment_name = "temporal_run_shallow"
    num_epochs = 30
    use_gpu = True
    continue_from_epoch = -1

    experiment_3 = ExperimentBuilder(
        network_model=temporal_net_shallow,
        experiment_name=experiment_name,
        num_epochs=num_epochs,
        use_gpu=use_gpu,
        continue_from_epoch=continue_from_epoch,
        train_data=train_data_t,
        val_data=val_data_t,
        test_data=test_data_t)  # build an experiment object
    experiment_metrics, test_metrics = experiment_3.run_experiment(
    )  # run experiment and return experiment metrics

    experiment_name = "temporal_run_deeper"
    num_epochs = 30
    use_gpu = True
    continue_from_epoch = -1

    experiment_4 = ExperimentBuilder(
        network_model=temporal_net_deeper,
        experiment_name=experiment_name,
        num_epochs=num_epochs,
        use_gpu=use_gpu,
        continue_from_epoch=continue_from_epoch,
        train_data=train_data_t,
        val_data=val_data_t,
        test_data=test_data_t)  # build an experiment object
    experiment_metrics, test_metrics = experiment_4.run_experiment(
    )  # run experiment and return experiment metrics

    experiment_name = "static_run_deepest"
    num_epochs = 30
    use_gpu = True
    continue_from_epoch = -1

    experiment_5 = ExperimentBuilder(
        network_model=static_net_deepest,
        experiment_name=experiment_name,
        num_epochs=num_epochs,
        use_gpu=use_gpu,
        continue_from_epoch=continue_from_epoch,
        train_data=train_data,
        val_data=val_data,
        test_data=test_data)  # build an experiment object
    experiment_metrics, test_metrics = experiment_5.run_experiment(
    )  # run experiment and return experiment metrics

    experiment_name = "temporal_run_deepest"
    num_epochs = 30
    use_gpu = True
    continue_from_epoch = -1

    experiment_6 = ExperimentBuilder(
        network_model=temporal_net_deepest,
        experiment_name=experiment_name,
        num_epochs=num_epochs,
        use_gpu=use_gpu,
        continue_from_epoch=continue_from_epoch,
        train_data=train_data_t,
        val_data=val_data_t,
        test_data=test_data_t)  # build an experiment object
    experiment_metrics, test_metrics = experiment_6.run_experiment(
    )  # run experiment and return experiment metrics"""
Exemple #6
0
        train_data_unlabeled=train_unlabeled_loader,
        lambda_u=args.loss_lambda_u)
elif args.use_fix_match:
    print('Fix Match')

    fine_list = None
    if args.fine_tune:
        fine_list = ['denseblock_4', 'final_bn', 'final_classifier']
        # fine_list = ['final_bn', 'final_classifier']

    bhc_experiment = ExperimentBuilderFixMatch(
        network_model=model,
        use_gpu=args.use_gpu,
        experiment_name=args.experiment_name,
        num_epochs=args.num_epochs,
        continue_from_epoch=args.continue_from_epoch,
        train_data=train_loader,
        val_data=val_loader,
        test_data=test_loader,
        optimiser=args.optim_type,
        optim_params=optimizer_params,
        scheduler=args.sched_type,
        sched_params=scheduler_params,
        train_data_unlabeled=train_unlabeled_loader,
        lambda_u=args.loss_lambda_u,
        threshold=args.fm_conf_threshold,
        pretrained_weights_locations=args.pretrained_weights_locations,
        fine_tune_list=fine_list)

experiment_metrics, test_metrics = bhc_experiment.run_experiment()
    model=LSTM(gpus=args.gpus,
               batch_size=args.batch_size,
               segment_size=args.segment_size,
               num_features=args.window_size**2,
               num_layers=args.num_layers,
               hidden_size=args.hidden_size,
               session_holder=session_holder,
               learning_rate=args.learning_rate),
    experiment_name=args.experiment_name,
    num_epochs=args.num_epochs,
    train_data=WindowedDataProvider(data_reader=data_reader(
        data_folder=args.data_path, which_set='train'),
                                    window_size=args.window_size,
                                    segment_size=args.segment_size,
                                    batch_size=args.batch_size,
                                    shuffle_order=True,
                                    rng=rng,
                                    fraction_of_data=args.fraction_of_data),
    val_data=WindowedDataProvider(data_reader=data_reader(
        data_folder=args.data_path, which_set='valid'),
                                  window_size=args.window_size,
                                  segment_size=args.segment_size,
                                  batch_size=args.batch_size,
                                  shuffle_order=True,
                                  rng=rng),
    continue_from_epoch=args.continue_from_epoch)

with tf.Session() as sess:
    session_holder.set_sess(sess)
    experiment_builder.run_experiment()
Exemple #8
0
    custom_net = TwoStreamNetwork(input_shape=(batch_size, num_flow_channels,
                                               image_height, image_width),
                                  dropout_rate=args.dropout_rate)

    experiment = ExperimentBuilder(
        network_model=custom_net,
        experiment_name=experiment_name,
        num_epochs=num_epochs,
        weight_decay_coefficient=weight_decay_coefficient,
        lr=args.lr,
        gpu_id=gpu_id,
        use_gpu=use_gpu,
        continue_from_epoch=continue_from_epoch,
        num_batches=num_batches,
        train_data=train_data,
        valid_data=val_data,
        test_data=test_data,
        pretrained=args.pretrained,
        schedule=args.schedule)  # build an experiment object

    if not args.only_output_results:
        total_losses, test_losses = experiment.run_experiment(
        )  # run experiment and return experiment metrics
        print(total_losses, test_losses)
    else:
        # valid_losses = experiment.output_summary(type='valid', epoch_idx=continue_from_epoch)
        test_losses = experiment.output_summary(type='test',
                                                epoch_idx=continue_from_epoch)

    print("Done")
Exemple #9
0
              num_filters=pnn_hidden_dim,
              num_layers=pnn_layers)

    bridge_experiment = ExperimentBuilder(
        estimation_model=enn,
        policy_model=pnn,
        num_epochs=num_epochs,
        weight_decay_coefficient=weight_decay_coefficient,
        use_gpu=use_gpu,
        train_data=train_data_loader,
        val_data=val_data_loader,
        test_data=test_data_loader,
        lr=lr)  # build an experiment object

    if use_enn:
        enn_stats, pnn_stats = bridge_experiment.run_experiment()
        plot_result_graphs('baseline', enn_stats, pnn_stats)

        enn_accs, pnn_accs = bridge_experiment.eval_bid_length()
        plot_length_graphs('bidding_length', enn_accs, pnn_accs)
    else:
        pnn_stats = bridge_experiment.run_experiment()
        plot_pnn_graphs('only_pnn', pnn_stats)

        pnn_accs = bridge_experiment.eval_bid_length()
        plot_length_graphs('bidding_length', None, pnn_accs)

    if use_enn:
        enn_accs, enn_recalls = bridge_experiment.eval_ordered_card()
        plot_acc_recall_graphs('ordered_card', enn_accs, enn_recalls)
        num_epochs=args.num_epochs,
        weight_decay_coefficient=args.weight_decay_coefficient,
        continue_from_epoch=args.continue_from_epoch,
        train_data=train_data,
        val_data=val_data,
        test_data=test_data)  # build an experiment object
    fine_tuning_on_target_conv_net = from_scratch_conv_net
else:
    fine_tuning_on_target_conv_net = torch.load(args.load_pretrained)

target_task_training = ExperimentBuilder(
    network_model=fine_tuning_on_target_conv_net,
    use_gpu=args.use_gpu,
    experiment_name=args.experiment_name,
    num_epochs=args.num_epochs,
    weight_decay_coefficient=args.weight_decay_coefficient,
    continue_from_epoch=args.continue_from_epoch,
    train_data=train_data,
    val_data=val_data,
    test_data=test_data)  # build an experiment object

experiment_metrics, test_metrics = target_task_training.run_experiment(
)  # run experiment and return experiment metrics

#1. Add a new dataset which contains our source data, which consist of a merge of the Omniglot + EMNIST datasets.
#2. Add functionality which can freeze certain layers in a network, to be fine tuned on the target task.
#3. Implement two new tasks:
#   i. Given some source data -> pretrain a randomly initialized model on that data
#   ii. Given some target data and a pretrained model and then fine tunes the network on the target data.

# add new flag load_pretrained
Exemple #11
0
from data import MetaLearningSystemDataLoader
from experiment_builder import ExperimentBuilder
from meta_learning_system import SceneAdaptiveInterpolation
from config import get_args

args, _ = get_args()
print(args)
model = SceneAdaptiveInterpolation(args)
data = MetaLearningSystemDataLoader
savfi_system = ExperimentBuilder(model=model, data=data, args=args)
savfi_system.run_experiment()