Beispiel #1
0
def main():

    new_model = DeepestNetwork((25, 3, 120, 214))

    N = 4

    cwd = Path(os.getcwd())
    par = cwd.parent
    data_path = str(par / "data/DAVIS//JPEGImages/480p/")
    mask_path = str(par / "data/DAVIS/Annotations/480p/")

    tvt_split = (0.5, 0.7)

    X_train_t, X_val_t, X_test_t, y_train_t, y_val_t, y_test_t = generate_dataset_temporal(
        data_path, mask_path, tvt_split, N)

    X_train_t = np.array(X_train_t).swapaxes(-1, -3).swapaxes(-2, -1)
    X_val_t = np.array(X_val_t).swapaxes(-1, -3).swapaxes(-2, -1)
    X_test_t = np.array(X_test_t).swapaxes(-1, -3).swapaxes(-2, -1)
    print(X_train_t.shape)
    print(X_val_t.shape)
    print(X_test_t.shape)
    y_train_t = np.array(y_train_t)
    y_val_t = np.array(y_val_t)
    y_test_t = np.array(y_test_t)
    print(y_train_t.shape)
    print(y_val_t.shape)
    print(y_test_t.shape)

    batch_size = 25
    train_data_t = data_providers.DataProvider(X_train_t,
                                               y_train_t,
                                               batch_size,
                                               shuffle_order=True)
    val_data_t = data_providers.DataProvider(X_val_t,
                                             y_val_t,
                                             batch_size,
                                             shuffle_order=True)
    test_data_t = data_providers.DataProvider(X_test_t,
                                              y_test_t,
                                              batch_size,
                                              shuffle_order=True)

    eb = ExperimentBuilder(new_model, "get_bear", 1, train_data_t, val_data_t,
                           test_data_t, True)

    model_path = Path(os.getcwd())
    model_path = model_path / "static_run_deepest" / "saved_models"

    bear_path = Path(
        os.getcwd()).parent / "data" / "DAVIS" / "JPEGImages" / "480p" / "bear"

    bear = np.asarray(
        Image.open(str(bear_path / "00001.jpg")).convert(mode="RGB"))

    inp = torch.Tensor(
        down_sample(np.asarray(bear),
                    4).swapaxes(0, 2).swapaxes(1, 2)).unsqueeze(0)

    out = eb.get_bear(model_path, inp)
    out = out.squeeze()

    predicted = F.sigmoid(out) > 0.5

    mask = predicted.cpu().numpy().astype('uint8')

    mask = 255 * mask

    mask_img = Image.fromarray(mask, mode='L')

    bear = down_sample(bear, 4)
    bear = Image.fromarray(bear)

    overlay = overlay_segment(bear, mask_img)

    overlay.save("cnnbear.png")
Beispiel #2
0
scheduler_params = {
    'lr_max': args.learn_rate_max,
    'lr_min': args.learn_rate_min,
    'erf_alpha': args.erf_sched_alpha,
    'erf_beta': args.erf_sched_beta
}

if not args.use_mix_match and not args.use_fix_match:
    print('No Mix Match')
    bhc_experiment = ExperimentBuilder(
        network_model=model,
        use_gpu=args.use_gpu,
        experiment_name=args.experiment_name,
        num_epochs=args.num_epochs,
        continue_from_epoch=args.continue_from_epoch,
        train_data=train_loader,
        val_data=val_loader,
        test_data=test_loader,
        optimiser=args.optim_type,
        optim_params=optimizer_params,
        scheduler=args.sched_type,
        sched_params=scheduler_params)
elif args.use_mix_match:
    print('Mix Match')
    bhc_experiment = ExperimentBuilderMixMatch(
        network_model=model,
        use_gpu=args.use_gpu,
        experiment_name=args.experiment_name,
        num_epochs=args.num_epochs,
        continue_from_epoch=args.continue_from_epoch,
        train_data=train_loader,
                                      num_output_classes=num_output_classes,
                                      hidden_size=args.lstm_hidden_dim,
                                      attention_kernel_size=args.encoder_output_size,
                                      use_bias=True,
                                      num_att_layers=2,
                                      embedding_matrix=embedding_matrix)

    model_2 = StackedAttentionNetwork(desc_input_shape=(args.batch_size, 102),
                                      img_input_shape=(args.batch_size, 512, 14, 14),
                                      num_output_classes=2,
                                      hidden_size=args.lstm_hidden_dim,
                                      attention_kernel_size=args.encoder_output_size,
                                      use_bias=True,
                                      num_att_layers=2,
                                      embedding_matrix=embedding_matrix)

siamese_model = SiameseNetwork(item_1_model=model_1, item_2_model=model_2, encoder_output_size=args.encoder_output_size,
                               fc1_size=args.fc1_size, fc2_size=args.fc2_size, use_bias=True)

experiment = ExperimentBuilder(network_model=siamese_model,
                               experiment_name=args.experiment_name,
                               num_epochs=args.num_epochs,
                               learning_rate=args.lr,
                               weight_decay_coefficient=args.weight_decay_coefficient,
                               continue_from_epoch=args.continue_from_epoch,
                               device=device,
                               train_data=training_data_loader,
                               val_data=valid_data_loader,
                               test_data=test_data_loader)  # build an experiment object
experiment_metrics, test_metrics = experiment.run_experiment()  # run experiment and return experiment metrics
Beispiel #4
0
import data as dataset
from experiment_builder import ExperimentBuilder

parser = argparse.ArgumentParser(description='Welcome to GAN-Shot-Learning script')
parser.add_argument('--batch_size', nargs="?", type=int, default=32, help='batch_size for experiment')
parser.add_argument('--discriminator_inner_layers', nargs="?", type=int, default=1,
                    help='Number of inner layers per multi layer in the discriminator')
parser.add_argument('--generator_inner_layers', nargs="?", type=int, default=1,
                    help='Number of inner layers per multi layer in the generator')
parser.add_argument('--experiment_title', nargs="?", type=str, default="omniglot_dagan_experiment",
                    help='Experiment name')
parser.add_argument('--continue_from_epoch', nargs="?", type=int, default=-1, help='continue from checkpoint of epoch')
parser.add_argument('--num_of_gpus', nargs="?", type=int, default=1, help='Number of GPUs to use for training')
parser.add_argument('--z_dim', nargs="?", type=int, default=100, help='The dimensionality of the z input')
parser.add_argument('--dropout_rate_value', type=float, default=0.5,
                    help='A dropout rate placeholder or a scalar to use throughout the network')
parser.add_argument('--num_generations', nargs="?", type=int, default=64,
                    help='The number of samples generated for use in the spherical interpolations at the end of each epoch')

args = parser.parse_args()
batch_size = args.batch_size
num_gpus = args.num_of_gpus
# set the data provider to use for the experiment
data = dataset.PlantDAGANDataset(batch_size=batch_size, last_training_class_index=900,
                                               reverse_channels=True,
                                               num_of_gpus=num_gpus, gen_batches=10)
# init experiment
experiment = ExperimentBuilder(parser, data=data)
# run experiment
experiment.run_experiment()
Beispiel #5
0
from data import MetaLearningSystemDataLoader
from experiment_builder import ExperimentBuilder
from few_shot_learning_system import MAMLFewShotClassifier
from utils.parser_utils import get_args

args, device = get_args()
model = MAMLFewShotClassifier(args=args, device=device)
data = MetaLearningSystemDataLoader
maml_system = ExperimentBuilder(model=model,
                                data=data,
                                args=args,
                                device=device)
test_loss_mean, test_loss_std, val_c_index = maml_system.test_experiment(
    args.test_model)

with open("TEST_RESULTS_11_02_2021.txt", "a+") as f:
    f.write("{},{},{},{}\n".format(args.test_path, test_loss_mean,
                                   test_loss_std, val_c_index))
Beispiel #6
0
                                       batch_size=args.batch_size,
                                       shuffle=False,
                                       num_workers=4)
test_data = torch.utils.data.DataLoader(voc_test,
                                        batch_size=args.batch_size,
                                        shuffle=False,
                                        num_workers=4)

custom_net = DeepLab(args.output_stride, args.num_class)

ss_experiment = ExperimentBuilder(network_model=custom_net,
                                  num_class=args.num_class,
                                  experiment_name=args.experiment_name,
                                  num_epochs=args.num_epochs,
                                  train_data=train_data,
                                  val_data=val_data,
                                  test_data=test_data,
                                  learn_rate=args.learn_rate,
                                  mementum=args.mementum,
                                  weight_decay=args.weight_decay,
                                  use_gpu=args.use_gpu,
                                  continue_from_epoch=args.continue_from_epoch)

plt.switch_backend('agg')
dirName = './image/' + args.experiment_name
if not os.path.exists(dirName):
    os.mkdir(dirName)
evaluator = Evaluator(args.num_class)
for idx, (img, tag) in enumerate(val_data):
    predicts = ss_experiment.run_predicted_iter(img, tag)
    for i in range(args.batch_size):
        image = img[i]
Beispiel #7
0
from data import MetaLearningSystemDataLoader
from experiment_builder import ExperimentBuilder
from few_shot_learning_system import MAMLFewShotClassifier
from utils.parser_utils import get_args
from utils.dataset_tools import maybe_unzip_dataset

# Combines the arguments, model, data and experiment builders to run an experiment
args, device = get_args()
model = MAMLFewShotClassifier(args=args, device=device,
                              im_shape=(2, args.image_channels,
                                        args.image_height, args.image_width))
maybe_unzip_dataset(args=args)
data = MetaLearningSystemDataLoader
maml_system = ExperimentBuilder(model=model, data=data, args=args, device=device)
maml_system.run_experiment()
                       seed=args.seed,
                       sets_are_pre_split=args.sets_are_pre_split,
                       load_into_memory=args.load_into_memory, set_name='test',
                       num_tasks_per_epoch=600,
                       num_channels=args.image_channels,
                       num_support_sets=args.num_support_sets,
                       overwrite_classes_in_each_task=args.overwrite_classes_in_each_task,
                       class_change_interval=args.class_change_interval)

train_data = FewShotLearningDatasetParallel(**train_setup_dict)

val_data = FewShotLearningDatasetParallel(**val_setup_dict)

test_data = FewShotLearningDatasetParallel(**test_setup_dict)

data_dict = {'train': DataLoader(train_data, batch_size=args.batch_size,
                                 num_workers=args.num_dataprovider_workers),
             'val': DataLoader(val_data, batch_size=args.batch_size,
                               num_workers=args.num_dataprovider_workers),
             'test': DataLoader(test_data, batch_size=args.batch_size,
                                num_workers=args.num_dataprovider_workers)}

maml_system = ExperimentBuilder(model=model, data_dict=data_dict, experiment_name=args.experiment_name,
                                continue_from_epoch=args.continue_from_epoch,
                                total_iter_per_epoch=args.total_iter_per_epoch,
                                num_evaluation_tasks=args.num_evaluation_tasks, total_epochs=args.total_epochs,
                                batch_size=args.batch_size, max_models_to_save=args.max_models_to_save,
                                evaluate_on_test_set_only=args.evaluate_on_test_set_only,
                                args=args)
maml_system.run_experiment()
Beispiel #9
0
    test_data = data.DataLoader(test_dataset, **params)

    num_output_classes = 2

    custom_net = TwoStreamNetwork(input_shape=(batch_size, num_flow_channels,
                                               image_height, image_width),
                                  dropout_rate=args.dropout_rate)

    experiment = ExperimentBuilder(
        network_model=custom_net,
        experiment_name=experiment_name,
        num_epochs=num_epochs,
        weight_decay_coefficient=weight_decay_coefficient,
        lr=args.lr,
        gpu_id=gpu_id,
        use_gpu=use_gpu,
        continue_from_epoch=continue_from_epoch,
        num_batches=num_batches,
        train_data=train_data,
        valid_data=val_data,
        test_data=test_data,
        pretrained=args.pretrained,
        schedule=args.schedule)  # build an experiment object

    if not args.only_output_results:
        total_losses, test_losses = experiment.run_experiment(
        )  # run experiment and return experiment metrics
        print(total_losses, test_losses)
    else:
        # valid_losses = experiment.output_summary(type='valid', epoch_idx=continue_from_epoch)
        test_losses = experiment.output_summary(type='test',
from data import MetaLearningSystemDataLoader
from experiment_builder import ExperimentBuilder
from few_shot_learning_system import *
from utils.parser_utils import get_args
from utils.dataset_tools import check_download_dataset

# Combines the arguments, model, data and experiment builders to run an experiment
args, device = get_args()

model = EmbeddingMAMLFewShotClassifier(args=args,
                                       device=device,
                                       im_shape=(2, args.image_channels,
                                                 args.image_height,
                                                 args.image_width))
check_download_dataset(args=args)
data = MetaLearningSystemDataLoader
maml_system = ExperimentBuilder(use_features_instead_of_images=False,
                                model=model,
                                data=data,
                                args=args,
                                device=device)
maml_system.run_experiment()
Beispiel #11
0
                  num_output_classes=enn_output_dim,
                  num_filters=enn_hidden_dim,
                  num_layers=enn_layers)
    else:
        enn = None

    pnn = PNN(input_shape=(batch_size, pnn_input_dim),
              num_output_classes=pnn_output_dim,
              num_filters=pnn_hidden_dim,
              num_layers=pnn_layers)

    bridge_experiment = ExperimentBuilder(
        estimation_model=enn,
        policy_model=pnn,
        num_epochs=num_epochs,
        weight_decay_coefficient=weight_decay_coefficient,
        use_gpu=use_gpu,
        train_data=train_data_loader,
        val_data=val_data_loader,
        test_data=test_data_loader,
        lr=lr)  # build an experiment object

    if use_enn:
        enn_stats, pnn_stats = bridge_experiment.run_experiment()
        plot_result_graphs('baseline', enn_stats, pnn_stats)

        enn_accs, pnn_accs = bridge_experiment.eval_bid_length()
        plot_length_graphs('bidding_length', enn_accs, pnn_accs)
    else:
        pnn_stats = bridge_experiment.run_experiment()
        plot_pnn_graphs('only_pnn', pnn_stats)
Beispiel #12
0
from data_providers.windowed_data_provider import WindowedDataProvider
from data_providers.data_reader import FullDataReader, MiniDataReader
import numpy as np
from arg_extractor import get_args

args = get_args()

rng = np.random.RandomState(args.seed)

experiment_builder = ExperimentBuilder(
    args=args,
    model=MeanPredictor(mean=0),
    experiment_name="mean_predictor",
    num_epochs=1,
    train_data=WindowedDataProvider(data_reader=FullDataReader(
        args.data_path, 'train'),
                                    window_size=11,
                                    segment_size=12,
                                    batch_size=args.batch_size,
                                    shuffle_order=True,
                                    rng=rng),
    val_data=WindowedDataProvider(data_reader=FullDataReader(
        args.data_path, 'valid'),
                                  window_size=11,
                                  segment_size=12,
                                  batch_size=args.batch_size,
                                  shuffle_order=True,
                                  rng=rng),
    continue_from_epoch=-1)

experiment_builder.run_experiment()
if args.load_pretrained == None:
    from_scratch_conv_net = ConvolutionalNetwork(  # initialize our network object, in this case a ConvNet
        input_shape=(args.batch_size, args.image_num_channels,
                     args.image_height, args.image_height),
        dim_reduction_type=args.dim_reduction_type,
        num_filters=args.num_filters,
        num_layers=args.num_layers,
        use_bias=False,
        num_output_classes=num_output_classes)

    source_task_training = ExperimentBuilder(
        network_model=from_scratch_conv_net,
        use_gpu=args.use_gpu,
        experiment_name=args.experiment_name,
        num_epochs=args.num_epochs,
        weight_decay_coefficient=args.weight_decay_coefficient,
        continue_from_epoch=args.continue_from_epoch,
        train_data=train_data,
        val_data=val_data,
        test_data=test_data)  # build an experiment object
    fine_tuning_on_target_conv_net = from_scratch_conv_net
else:
    fine_tuning_on_target_conv_net = torch.load(args.load_pretrained)

target_task_training = ExperimentBuilder(
    network_model=fine_tuning_on_target_conv_net,
    use_gpu=args.use_gpu,
    experiment_name=args.experiment_name,
    num_epochs=args.num_epochs,
    weight_decay_coefficient=args.weight_decay_coefficient,
    continue_from_epoch=args.continue_from_epoch,
Beispiel #14
0
    augment = False
    data = dataset.CIFAR_100(batch_size=batch_size,
                             samples_per_class=samples_per_class,
                             queries_per_class=queries_per_class)

elif data_name == "omniglot":
    channels = 1
    image_size = 28
    augment = True
    data = dataset.OmniglotNShotDataset(batch_size=batch_size,\
        classes_per_set=classes_per_set, samples_per_class=samples_per_class, queries_per_class = queries_per_class)
else:
    print("Unsupported dataset.")
    assert False

experiment = ExperimentBuilder(data)
one_shot, losses, c_error_opt_op, init = experiment.build_experiment(batch_size,\
    classes_train, classes_test, samples_per_class, queries_per_class, channels, image_size, fce, network_name)

#
total_epochs = 300
total_train_batches = 1000
total_val_batches = 100
total_test_batches = 250

save_statistics(experiment_name, [
    "epoch", "train_c_loss", "train_c_accuracy", "val_loss", "val_accuracy",
    "test_c_loss", "test_c_accuracy"
])

# Experiment initialization and running
Beispiel #15
0
logs_path = "one_shot_outputs_ADNI/"
experiment_name = "one_shot_ADNI_{}fold_{}_{}".format(k, samples_per_class,
                                                      classes_per_set)

# Experiment builder

# data = dataset.OmniglotNShotDataset(batch_size=batch_size, classes_per_set=classes_per_set,
#                                     samples_per_class=samples_per_class)

data = dataset.ADNIDataset(k=k,
                           batch_size=batch_size,
                           classes_per_set=classes_per_set,
                           samples_per_class=samples_per_class)

experiment = ExperimentBuilder(data)
one_shot_omniglot, losses, c_error_opt_op, init = experiment.build_experiment(
    batch_size, classes_per_set, samples_per_class, fce)

total_epochs = 100
total_train_batches = 5
total_val_batches = 2
total_test_batches = 2

save_statistics(experiment_name, [
    "epoch", "train_c_loss", "train_c_accuracy", "val_loss", "val_accuracy",
    "test_c_loss", "test_c_accuracy"
])

# summary_path = "/summary/%d" % (int(time.time()))
# Experiment initialization and running
Beispiel #16
0
                                  shuffle=True,
                                  num_workers=0)
    #
    custom_blstm = LSTMBlock(input_dim=args.input_dim,
                             dropout=args.drop_out,
                             batch_size=args.batch_size,
                             hidden_dim=args.hidden_dim,
                             output_dim=args.num_classes,
                             num_layers=args.num_layers)

    conv_experiment = ExperimentBuilder(
        network_model=custom_blstm,
        beta=args.beta,
        layer_no=args.layer_no,
        SER=args.SER,
        experiment_name=args.experiment_name,
        num_epochs=args.num_epochs,
        gender_MTL=args.genderMTL,
        experiment_no=args.experiment_no,
        weight_decay_coefficient=args.weight_decay_coefficient,
        use_gpu=args.use_gpu,
        continue_from_epoch=args.continue_from_epoch,
        train_data=train_data_loader,
        val_data=val_data_loader,
        test_data=test_data_loader,
        lr=args.learning_rate,
        batch_size=args.batch_size)  # build an experiment object

    experiment_metrics, test_metrics = conv_experiment.run_experiment(
    )  # run experiment and return experiment metrics
Beispiel #17
0
                                      transform=transform_test)
    test_data = torch.utils.data.DataLoader(testset,
                                            batch_size=100,
                                            shuffle=False,
                                            num_workers=2)

    num_output_classes = 100

custom_conv_net = ConvolutionalNetwork(  # initialize our network object, in this case a ConvNet
    input_shape=(args.batch_size, args.image_num_channels, args.image_height,
                 args.image_height),
    dim_reduction_type=args.dim_reduction_type,
    num_filters=args.num_filters,
    num_layers=args.num_layers,
    use_bias=False,
    num_output_classes=num_output_classes)

conv_experiment = ExperimentBuilder(
    network_model=custom_conv_net,
    experiment_name=args.experiment_name,
    num_epochs=args.num_epochs,
    weight_decay_coefficient=args.weight_decay_coefficient,
    gpu_id=args.gpu_id,
    use_gpu=args.use_gpu,
    continue_from_epoch=args.continue_from_epoch,
    train_data=train_data,
    val_data=val_data,
    test_data=test_data)  # build an experiment object
experiment_metrics, test_metrics = conv_experiment.run_experiment(
)  # run experiment and return experiment metrics
Beispiel #18
0
import os
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
import sys
from os import path
parent_folder = path.dirname(path.dirname(path.abspath(__file__)))
sys.path.append(parent_folder)
import numpy as np

from experiment_builder import ExperimentBuilder
from models.cnn_convlstm import CnnConvLSTM
from data_providers.full_grid_data_provider import FullGridDataProvider
from data_providers import data_provider_factory
from arg_extractor import get_args
import model_factory

args = get_args()
rng = np.random.RandomState(args.seed)

model = model_factory.get_model(args)
train_data, val_data = data_provider_factory.get_data_providers(args, rng)

experiment_builder = ExperimentBuilder(
    args=args,
    model=model,
    train_data=train_data,
    val_data=val_data,
    experiment_name=args.experiment_name,
    num_epochs=args.num_epochs,
    continue_from_epoch=args.continue_from_epoch)

experiment_builder.run_experiment()
Beispiel #19
0
    optimizer = BertAdam(optimizer_grouped_parameters,
                         lr=args.learning_rate,
                         warmup=args.warmup_proportion,
                         t_total=num_train_optimization_steps)
    scheduler = None

# get experiment
experiment = ExperimentBuilder(
    network_model=model,
    experiment_name=args.experiment_name,
    num_epochs=1,  # hard code 1 epoch to ensure runtime limit
    gpu_id=args.gpu_id,
    use_gpu=args.use_gpu,
    device=device,
    continue_from_epoch=args.continue_from_epoch,
    criterion=criterion,
    optimizer=optimizer,
    fp16=args.fp16,
    scheduler=scheduler,
    learning_rate=args.learning_rate,
    val_metrics='loss',
    # save_optimizer=True,
    train_data=train_data,
    val_data=val_data,
    test_data=None)  # build an experiment object

experiment.run_experiment()

import json
with open("args_dict.json", 'w') as file:
    json.dump(args_dict, file, indent=True)
session_holder = SessionHolder()

experiment_builder = ExperimentBuilder(
    args=args,
    model=LSTM(gpus=args.gpus,
               batch_size=args.batch_size,
               segment_size=args.segment_size,
               num_features=args.window_size**2,
               num_layers=args.num_layers,
               hidden_size=args.hidden_size,
               session_holder=session_holder,
               learning_rate=args.learning_rate),
    experiment_name=args.experiment_name,
    num_epochs=args.num_epochs,
    train_data=WindowedDataProvider(data_reader=data_reader(
        data_folder=args.data_path, which_set='train'),
                                    window_size=args.window_size,
                                    segment_size=args.segment_size,
                                    batch_size=args.batch_size,
                                    shuffle_order=True,
                                    rng=rng,
                                    fraction_of_data=args.fraction_of_data),
    val_data=WindowedDataProvider(data_reader=data_reader(
        data_folder=args.data_path, which_set='valid'),
                                  window_size=args.window_size,
                                  segment_size=args.segment_size,
                                  batch_size=args.batch_size,
                                  shuffle_order=True,
                                  rng=rng),
    continue_from_epoch=args.continue_from_epoch)

with tf.Session() as sess:
    batch_size=args.batch_size,
    image_height=28,
    image_width=28,
    image_channels=1,
    train_val_test_split=(1200 / 1622, 211 / 1622, 211 / 1622),
    samples_per_iter=1,
    num_workers=4,
    data_path="datasets/omniglot_data",
    name="omniglot_data",
    index_of_folder_indicating_class=-2,
    reset_stored_filepaths=False,
    num_samples_per_class=args.samples_per_class,
    num_classes_per_set=args.classes_per_set,
    label_as_int=True)

experiment = ExperimentBuilder(data)
one_shot_omniglot, losses, c_error_opt_op, init = experiment.build_experiment(
    args.batch_size,
    args.classes_per_set,
    args.samples_per_class,
    args.use_full_context_embeddings,
    full_context_unroll_k=args.full_context_unroll_k,
    args=args)
total_train_batches = args.total_iter_per_epoch
total_val_batches = args.total_iter_per_epoch
total_test_batches = args.total_iter_per_epoch

saved_models_filepath, logs_filepath = build_experiment_folder(
    args.experiment_title)

save_statistics(logs_filepath, [
Beispiel #22
0
def main():

    # down_sample factor
    N = 4

    # define paths to image and mask files
    cwd = Path(os.getcwd())
    par = cwd.parent
    data_path = str(par / "data/DAVIS//JPEGImages/480p/")
    mask_path = str(par / "data/DAVIS/Annotations/480p/")

    # training, validation and test split
    tvt_split = (0.5, 0.7)

    # get datasets
    X_train, X_val, X_test, y_train, y_val, y_test = generate_dataset_static(
        data_path, mask_path, tvt_split, N)
    X_train_t, X_val_t, X_test_t, y_train_t, y_val_t, y_test_t = generate_dataset_temporal(
        data_path, mask_path, tvt_split, N)

    # reshape datasets to match CNN shapes
    X_train = np.array(X_train).swapaxes(1, 3).swapaxes(2, 3)
    X_val = np.array(X_val).swapaxes(1, 3).swapaxes(2, 3)
    X_test = np.array(X_test).swapaxes(1, 3).swapaxes(2, 3)
    print(X_train.shape)
    print(X_val.shape)
    print(X_test.shape)
    y_train = np.array(y_train)
    y_val = np.array(y_val)
    y_test = np.array(y_test)
    print(y_train.shape)
    print(y_val.shape)
    print(y_test.shape)

    X_train_t = np.array(X_train_t).swapaxes(-1, -3).swapaxes(-2, -1)
    X_val_t = np.array(X_val_t).swapaxes(-1, -3).swapaxes(-2, -1)
    X_test_t = np.array(X_test_t).swapaxes(-1, -3).swapaxes(-2, -1)
    print(X_train_t.shape)
    print(X_val_t.shape)
    print(X_test_t.shape)
    y_train_t = np.array(y_train_t)
    y_val_t = np.array(y_val_t)
    y_test_t = np.array(y_test_t)
    print(y_train_t.shape)
    print(y_val_t.shape)
    print(y_test_t.shape)

    #put data into data provider objects
    batch_size = 25
    train_data = data_providers.DataProvider(X_train,
                                             y_train,
                                             batch_size,
                                             shuffle_order=True)
    val_data = data_providers.DataProvider(X_val,
                                           y_val,
                                           batch_size,
                                           shuffle_order=True)
    test_data = data_providers.DataProvider(X_test,
                                            y_test,
                                            batch_size,
                                            shuffle_order=True)

    batch_size = 25
    train_data_t = data_providers.DataProvider(X_train_t,
                                               y_train_t,
                                               batch_size,
                                               shuffle_order=True)
    val_data_t = data_providers.DataProvider(X_val_t,
                                             y_val_t,
                                             batch_size,
                                             shuffle_order=True)
    test_data_t = data_providers.DataProvider(X_test_t,
                                              y_test_t,
                                              batch_size,
                                              shuffle_order=True)

    inputs_shape = X_train[:batch_size].shape
    inputs_shape

    inputs_shape_t = X_train_t[:batch_size].shape
    inputs_shape_t

    print("Time to make networks!")

    # generates networks of different depths and datasets
    static_net_shallow = ShallowNetwork(input_shape=inputs_shape)
    static_net_deeper = DeeperNetwork(input_shape=inputs_shape)
    temporal_net_shallow = ShallowNetwork(input_shape=inputs_shape_t)
    temporal_net_deeper = DeeperNetwork(input_shape=inputs_shape_t)
    static_net_deepest = DeepestNetwork(input_shape=inputs_shape)
    temporal_net_deepest = DeepestNetwork(input_shape=inputs_shape_t)

    # declare variables for experiments
    experiment_name = "static_run_shallow"
    num_epochs = 30
    use_gpu = False
    continue_from_epoch = -1

    # build experiment and run
    experiment_1 = ExperimentBuilder(
        network_model=static_net_shallow,
        experiment_name=experiment_name,
        num_epochs=num_epochs,
        use_gpu=use_gpu,
        continue_from_epoch=continue_from_epoch,
        train_data=train_data,
        val_data=val_data,
        test_data=test_data)  # build an experiment object
    experiment_metrics, test_metrics = experiment_1.run_experiment(
    )  # run experiment and return experiment metrics

    experiment_name = "static_run_deeper"
    num_epochs = 30
    use_gpu = True
    continue_from_epoch = -1

    experiment_2 = ExperimentBuilder(
        network_model=static_net_shallow,
        experiment_name=experiment_name,
        num_epochs=num_epochs,
        use_gpu=use_gpu,
        continue_from_epoch=continue_from_epoch,
        train_data=train_data,
        val_data=val_data,
        test_data=test_data)  # build an experiment object
    experiment_metrics, test_metrics = experiment_1.run_experiment(
    )  # run experiment and return experiment metrics

    experiment_name = "temporal_run_shallow"
    num_epochs = 30
    use_gpu = True
    continue_from_epoch = -1

    experiment_3 = ExperimentBuilder(
        network_model=temporal_net_shallow,
        experiment_name=experiment_name,
        num_epochs=num_epochs,
        use_gpu=use_gpu,
        continue_from_epoch=continue_from_epoch,
        train_data=train_data_t,
        val_data=val_data_t,
        test_data=test_data_t)  # build an experiment object
    experiment_metrics, test_metrics = experiment_3.run_experiment(
    )  # run experiment and return experiment metrics

    experiment_name = "temporal_run_deeper"
    num_epochs = 30
    use_gpu = True
    continue_from_epoch = -1

    experiment_4 = ExperimentBuilder(
        network_model=temporal_net_deeper,
        experiment_name=experiment_name,
        num_epochs=num_epochs,
        use_gpu=use_gpu,
        continue_from_epoch=continue_from_epoch,
        train_data=train_data_t,
        val_data=val_data_t,
        test_data=test_data_t)  # build an experiment object
    experiment_metrics, test_metrics = experiment_4.run_experiment(
    )  # run experiment and return experiment metrics

    experiment_name = "static_run_deepest"
    num_epochs = 30
    use_gpu = True
    continue_from_epoch = -1

    experiment_5 = ExperimentBuilder(
        network_model=static_net_deepest,
        experiment_name=experiment_name,
        num_epochs=num_epochs,
        use_gpu=use_gpu,
        continue_from_epoch=continue_from_epoch,
        train_data=train_data,
        val_data=val_data,
        test_data=test_data)  # build an experiment object
    experiment_metrics, test_metrics = experiment_5.run_experiment(
    )  # run experiment and return experiment metrics

    experiment_name = "temporal_run_deepest"
    num_epochs = 30
    use_gpu = True
    continue_from_epoch = -1

    experiment_6 = ExperimentBuilder(
        network_model=temporal_net_deepest,
        experiment_name=experiment_name,
        num_epochs=num_epochs,
        use_gpu=use_gpu,
        continue_from_epoch=continue_from_epoch,
        train_data=train_data_t,
        val_data=val_data_t,
        test_data=test_data_t)  # build an experiment object
    experiment_metrics, test_metrics = experiment_6.run_experiment(
    )  # run experiment and return experiment metrics"""
Beispiel #23
0
import data as dataset
from experiment_builder import ExperimentBuilder
from utils.parser_util import get_args

batch_size, num_gpus, args = get_args()
#set the data provider to use for the experiment
data = dataset.VGGFaceDAGANDataset(batch_size=batch_size,
                                   last_training_class_index=1600,
                                   reverse_channels=True,
                                   num_of_gpus=num_gpus,
                                   gen_batches=10)
#init experiment
experiment = ExperimentBuilder(args, data=data)
#run experiment
experiment.run_experiment()
Beispiel #24
0
from data import MetaLearningSystemDataLoader
from experiment_builder import ExperimentBuilder
from meta_learning_system import SceneAdaptiveInterpolation
from config import get_args

args, _ = get_args()
print(args)
model = SceneAdaptiveInterpolation(args)
data = MetaLearningSystemDataLoader
savfi_system = ExperimentBuilder(model=model, data=data, args=args)
savfi_system.run_experiment()