def main(path: str):
    _configs = load_configs(path)

    _batch_size = _configs['hyperparams']['batch_size']
    _epochs = _configs['hyperparams']['epochs']
    _eval_steps = _configs['hyperparams']['evaluation_steps']

    _model_params = _configs['hyperparams']['model']

    print('Getting dataset...')
    _data_manager = prepare_dataset(_configs)
    _vocab_size = _data_manager.get_vocabulary_size()

    _model_params['vocab_size'] = _vocab_size

    print('Now build model...')
    model = build_model(_model_params)

    train_dataloader = _data_manager.get_train_data_loader(_batch_size)
    test_dataloader = _data_manager.get_test_data_loader(100)

    print(model)

    print('Train start!')
    train_manager = TrainManager(train_dataloader, test_dataloader, model,
                                 _epochs, _eval_steps)

    train_manager.train()
def main(path: str):
    _configs = load_configs(path)

    _batch_size = _configs['hyperparams']['batch_size']
    _epochs = _configs['hyperparams']['epochs']
    _embedding_size = _configs['hyperparams']['embedding_size']
    _conv_dim = _configs['hyperparams']['convolution_dims']
    _eval_steps = _configs['hyperparams']['evaluation_steps']

    print('Getting dataset...')
    _data_manager = prepare_dataset(_configs)
    _vocab_size = _data_manager.get_vocabulary_size()
    _pretrained_embeddings = _data_manager.get_pretrained_word_embedding()

    print('Now build model...')
    model = build_model(_vocab_size, _embedding_size, _conv_dim, embeddings=_pretrained_embeddings)

    train_dataloader = _data_manager.get_train_data_loader(_batch_size)
    test_dataloader = _data_manager.get_test_data_loader(_batch_size)

    print('Train start!')
    train_manager = TrainManager(train_dataloader,
                                 test_dataloader,
                                 model,
                                 _epochs,
                                 _eval_steps)

    train_manager.train()
Beispiel #3
0
def get_data(experiment,
             N_per_class=None,
             N_largest=None,
             do_add_spectral_indices=True):
    assert experiment in [
        "isprs_rf_tum_23classes", "isprs_rf_gaf_23classes",
        "isprs_rf_tum_12classes", "isprs_rf_gaf_12classes"
    ]
    assert N_per_class is None or isinstance(N_per_class, int)
    assert N_largest is None or isinstance(N_largest, int)
    assert isinstance(do_add_spectral_indices, bool)

    #  args = argparse.Namespace(experiment=experiment, dataroot=../data, seed=0, batchsize=256, workers=0, mode=None, hparamset=0)
    args = argparse.Namespace(
        experiment=experiment,
        dataroot='/home/glennmoncrieff/crop-type-mapping/notebooks/data',
        seed=0,
        batchsize=256,
        workers=0,
        mode=None,
        hparamset=0)
    args = experiments(args)

    traindataloader, testdataloader = prepare_dataset(args)

    classnames = traindataloader.dataset.datasets[0].classname

    X, y, ids = dataloader_to_numpy(traindataloader)
    Xtest, ytest, idstest = dataloader_to_numpy(testdataloader)

    if N_largest is not None:
        class_idxs = get_class_idxs(np.hstack([y, ytest]), N_largest)
        X, y, ids = filter_largest(X, y, ids, class_idxs)
        Xtest, ytest, idstest = filter_largest(Xtest, ytest, idstest,
                                               class_idxs)
        classnames = classnames[class_idxs]
    else:
        class_idxs = np.arange(len(classnames))

    if N_per_class is not None:
        # make uniform class distributions
        X, y, ids = make_uniform(X, y, ids, N_per_class)
        Xtest, ytest, idstest = make_uniform(Xtest, ytest, idstest,
                                             N_per_class)

    # add spectral indices features
    if do_add_spectral_indices:
        X = add_spectral_indices(X)
        Xtest = add_spectral_indices(Xtest)

    return X, y, ids, Xtest, ytest, idstest, classnames, class_idxs
Beispiel #4
0
def setup(dataset, mode, dataroot="../data", store='/tmp/'):
    if mode == "12classes":
        classmapping = os.path.join(dataroot, "BavarianCrops",
                                    'classmapping12.csv')
    elif mode == "23classes":
        classmapping = os.path.join(dataroot, "BavarianCrops",
                                    'classmapping23.csv')

    args = Namespace(batchsize=256,
                     epochs=1500,
                     classmapping=classmapping,
                     dataroot=dataroot,
                     dataset=dataset,
                     model='duplo',
                     mode=None,
                     weight_decay=0,
                     learning_rate=1e-3,
                     seed=0,
                     store=store,
                     workers=0)

    if dataset == "BavarianCrops":
        args = merge([args, TUM_dataset])
        exp = "isprs_tum_duplo"
    elif dataset == "GAFv2":
        args = merge([args, GAF_dataset])
        exp = "isprs_gaf_duplo"
    args.experiment = exp
    args.store = f"/tmp/{mode}"

    args.train_on = "train"
    args.test_on = "valid"
    traindataloader, testdataloader = prepare_dataset(args)

    input_dim = traindataloader.dataset.datasets[0].ndims
    nclasses = len(traindataloader.dataset.datasets[0].classes)

    device = torch.device("cuda")
    model = DuPLO(input_dim=input_dim,
                  nclasses=nclasses,
                  sequencelength=args.samplet,
                  dropout=0.4)

    model.to(device)

    return traindataloader, testdataloader, model, args, device
Beispiel #5
0
    def _setup(self, config):

        # one iteration is five training epochs, one test epoch
        self.epochs = RAY_TEST_EVERY

        print(config)

        args = Namespace(**config)
        self.traindataloader, self.validdataloader = prepare_dataset(args)

        args.nclasses = self.traindataloader.dataset.nclasses
        args.seqlength = self.traindataloader.dataset.sequencelength
        args.input_dims = self.traindataloader.dataset.ndims

        self.model = getModel(args)

        if torch.cuda.is_available():
            self.model = self.model.cuda()

        if "model" in config.keys():
            config.pop('model', None)
        #trainer = Trainer(self.model, self.traindataloader, self.validdataloader, **config)

        if args.experiment == "transformer":
            optimizer = ScheduledOptim(
                optim.Adam(filter(lambda x: x.requires_grad,
                                  self.model.parameters()),
                           betas=(0.9, 0.98),
                           eps=1e-09,
                           weight_decay=args.weight_decay,
                           lr=args.learning_rate), self.model.d_model,
                args.warmup)
        else:
            optimizer = optim.Adam(filter(lambda x: x.requires_grad,
                                          self.model.parameters()),
                                   betas=(0.9, 0.999),
                                   eps=1e-08,
                                   weight_decay=args.weight_decay,
                                   lr=args.learning_rate)

        self.trainer = Trainer(self.model,
                               self.traindataloader,
                               self.validdataloader,
                               optimizer=optimizer,
                               **config)
Beispiel #6
0
def main(args):
    """
    Run ablation study to find the best model depending on the number of samples
    in the dataset. Result can be seen in the results.png image.
    Args:
        args: Command line arguments.
    """
    args = parse_arguments(args)

    # Prepare K-Fold cross validation of the original dataset
    kf, x, y = prepare_kfold_cross_validation('./dataset.csv',
                                              num_splits=args.num_splits)

    output_dir = './ablation_study_checkpoints'
    original_output_dir = os.path.join(output_dir, 'orig')
    generated_output_dir = os.path.join(output_dir, 'gen')

    results_orig = []
    results_gen = []
    dataset_sizes = []
    for fold_id, (train_index, test_index) in enumerate(kf):
        x_train, x_val = x[train_index], x[test_index]
        y_train, y_val = y[train_index], y[test_index]
        x_moments = calculate_mean_and_std(x_train)
        y_moments = calculate_mean_and_std(y_train)

        # Create and train model on original dataset
        model_orig = create_fully_connected_nn((4, ), x_moments, y_moments,
                                               args.model_structure)
        model_orig.compile(optimizer=args.optimizer, loss=args.loss_func)

        model_orig.fit(x_train,
                       y_train,
                       batch_size=args.batch_size,
                       epochs=args.num_epochs,
                       validation_data=(x_val, y_val),
                       callbacks=[
                           tf.keras.callbacks.EarlyStopping(
                               patience=args.early_stopping_patience),
                           tf.keras.callbacks.ModelCheckpoint(
                               original_output_dir, save_best_only=True)
                       ])

        # Load best checkpoint
        model_orig = tf.keras.models.load_model(original_output_dir)
        results_orig.append(
            model_orig.evaluate(x_val, y_val, batch_size=args.batch_size))

        results_gen_fold = []
        for generated_dataset_path in natural_sort(
                glob.glob('generated_datasets/*.csv')):

            dataset_size = int(
                generated_dataset_path.split('-')[1].split('.csv')[0])

            if fold_id == 0:
                dataset_sizes.append(dataset_size)
            # Prepare generated dataset data
            x_train_gen, x_val_gen, y_train_gen, y_val_gen = \
                prepare_dataset(generated_dataset_path, val_size=args.val_size)

            x_moments_gen = calculate_mean_and_std(x_train_gen)
            y_moments_gen = calculate_mean_and_std(y_train_gen)

            # Create model for training on the generated data
            model_gen = create_fully_connected_nn(
                (4, ), x_moments_gen, y_moments_gen, args.model_structure)
            model_gen.compile(optimizer=args.optimizer, loss=args.loss_func)

            model_gen.fit(x_train_gen,
                          y_train_gen,
                          batch_size=args.batch_size,
                          epochs=args.num_epochs,
                          validation_data=(x_val_gen, y_val_gen),
                          callbacks=[
                              tf.keras.callbacks.EarlyStopping(
                                  patience=args.early_stopping_patience),
                              tf.keras.callbacks.ModelCheckpoint(
                                  generated_output_dir, save_best_only=True)
                          ])

            # Load the best model
            model_gen = tf.keras.models.load_model(generated_output_dir)

            results_gen_fold.append(
                model_gen.evaluate(x_val, y_val, batch_size=args.batch_size))
        results_gen.append(results_gen_fold)

    results_orig = np.mean(results_orig)
    results_gen = np.mean(results_gen, axis=0)

    # Visualize the data.

    plt.plot(dataset_sizes, [results_orig] * len(dataset_sizes),
             color='b',
             label='Original dataset')
    plt.plot(dataset_sizes, results_gen, color='g', label='Generated dataset')

    plt.title('MAE loss on val set depending\non the number of samples '
              'in the dataset')

    plt.xlabel('Number of samples in the generated dataset')
    plt.ylabel('MAE averaged over different validation folds')
    plt.xscale('log')

    plt.legend()
    plt.savefig(args.output_name)