コード例 #1
0
def test_eeg_regressor_clonable():
    preds = np.array([
        [[0.2, 0.1, 0.1, 0.1], [0.8, 0.9, 0.9, 0.9]],
        [[1.0, 1.0, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0]],
        [[1.0, 1.0, 1.0, 0.2], [0.0, 0.0, 0.0, 0.8]],
        [[0.9, 0.8, 0.9, 1.0], [0.1, 0.2, 0.1, 0.0]],
    ])
    clf = EEGRegressor(MockModule(preds),
                       cropped=False,
                       callbacks=[
                           "accuracy",
                           ("lr_scheduler",
                            LRScheduler('CosineAnnealingLR', T_max=1))
                       ],
                       criterion=CroppedLoss,
                       criterion__loss_function=nll_loss,
                       optimizer=optim.Adam,
                       batch_size=32)
    clone(clf)
    clf.initialize()
    clone(clf)
コード例 #2
0
def test_trialwise_predict_and_predict_proba():
    preds = np.array([
        [0.125, 0.875],
        [1., 0.],
        [0.8, 0.2],
        [0.9, 0.1],
    ])
    clf = EEGRegressor(MockModule(preds), optimizer=optim.Adam, batch_size=32)
    clf.initialize()
    np.testing.assert_array_equal(preds, clf.predict(MockDataset()))
    np.testing.assert_array_equal(preds, clf.predict_proba(MockDataset()))
コード例 #3
0
def test_predict_trials():
    preds = np.array([
        [[0.2, 0.1, 0.1, 0.1], [0.8, 0.9, 0.9, 0.9]],
        [[1.0, 1.0, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0]],
        [[1.0, 1.0, 1.0, 0.2], [0.0, 0.0, 0.0, 0.8]],
        [[0.9, 0.8, 0.9, 1.0], [0.1, 0.2, 0.1, 0.0]],
    ])
    clf = EEGRegressor(MockModule(preds),
                       cropped=False,
                       criterion=CroppedLoss,
                       criterion__loss_function=nll_loss,
                       optimizer=optim.Adam,
                       batch_size=32)
    clf.initialize()
    with pytest.warns(UserWarning,
                      match="This method was designed to predict "
                      "trials in cropped mode."):
        clf.predict_trials(MockDataset(), return_targets=False)
コード例 #4
0
def test_cropped_predict_and_predict_proba_not_aggregate_predictions():
    preds = np.array([
        [[0.2, 0.1, 0.1, 0.1], [0.8, 0.9, 0.9, 0.9]],
        [[1.0, 1.0, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0]],
        [[1.0, 1.0, 1.0, 0.2], [0.0, 0.0, 0.0, 0.8]],
        [[0.9, 0.8, 0.9, 1.0], [0.1, 0.2, 0.1, 0.0]],
    ])
    clf = EEGRegressor(MockModule(preds),
                       cropped=True,
                       criterion=CroppedLoss,
                       criterion__loss_function=nll_loss,
                       optimizer=optim.Adam,
                       batch_size=32,
                       aggregate_predictions=False)
    clf.initialize()
    # for cropped decoding regressor returns value for each trial (average over all crops)
    np.testing.assert_array_equal(preds, clf.predict(MockDataset()))
    np.testing.assert_array_equal(preds, clf.predict_proba(MockDataset()))
コード例 #5
0
#     drop_bad_windows=True,
# )

# splits = dataset.split("session")
# train_set = splits["train"]
# valid_set = splits["eval"]

regressor = EEGRegressor(
    model,
    cropped=True,
    criterion=CroppedLoss,
    criterion__loss_function=torch.nn.functional.mse_loss,
    optimizer=torch.optim.AdamW,
    train_split=predefined_split(valid_set),
    optimizer__lr=optimizer_lr,
    optimizer__weight_decay=optimizer_weight_decay,
    iterator_train__shuffle=True,
    batch_size=batch_size,
    callbacks=[
        "neg_root_mean_squared_error",
        # seems n_epochs -1 leads to desired behavior of lr=0 after end of data?
        ("lr_scheduler", LRScheduler('CosineAnnealingLR', T_max=n_epochs - 1)),
    ],
    device=device,
)

regressor.fit(train_set, y=None, epochs=n_epochs)

if __name__ == '__main__':
    create_compatible_dataset('./data/BCICIV_4_mat/sub1_comp.mat')
コード例 #6
0
def train(data,
          dilation,
          kernel_size,
          lr,
          patient_index,
          model_string,
          correlation_monitor,
          output_dir,
          max_train_epochs=300,
          split=None,
          cropped=True,
          padding=False):
    """
    Creates and fits a model with the specified parameters onto the specified data
    :param data: dataset on which the model is to be trained
    :param dilation: dilation parameters of the model max-pool layers
    :param kernel_size: kernel sizes of the model's max-pool layers
    :param lr: learning rate
    :param patient_index: index of the patient on whose data the model is trained
    :param model_string: string specifying the setting of the data
    :param correlation_monitor: correlation monitor object calculating the correlations while fitting
    :param output_dir: where the trained model should be saved
    :param max_train_epochs: number of epochs for which to train the model
    :param split: the fold from cross-validation for which we are currently trainig the model
    :param cropped: if the decoding is cropped, alwasy True in thesis experiments
    :param padding: if padding should be added, always False in thesis experiments
    :return:
    """
    model, changed_model, model_name = get_model(data.in_channels,
                                                 input_time_length,
                                                 dilations=dilation,
                                                 kernel_sizes=kernel_size,
                                                 padding=padding)
    if cuda:
        device = 'cuda'
        model.model = changed_model.cuda()

    else:
        model.model = changed_model
        device = 'cpu'
    if not padding:
        n_preds_per_input = get_output_shape(model.model, model.input_channels,
                                             model.input_time_length)[1]
    else:
        n_preds_per_input = 1
    Path(home + f'/models/saved_models/{output_dir}/').mkdir(parents=True,
                                                             exist_ok=True)
    # cutting the input into batches compatible with model
    # if data.num_of_folds != -1, then also pre-whitening or filtering takes place
    # as part of the cut_input method
    data.cut_input(input_time_length=input_time_length,
                   n_preds_per_input=n_preds_per_input,
                   shuffle=False)

    print(
        f'starting cv epoch {split} out of {data.num_of_folds} for model: {model_string}_{model_name}'
    )
    correlation_monitor.step_number = 0
    if split is not None:
        correlation_monitor.split = split

    monitor = 'validation_correlation_best'

    monitors = [
        ('correlation monitor', correlation_monitor),
        ('checkpoint',
         Checkpoint(
             monitor=monitor,
             f_history=home +
             f'/logs/model_{model_name}/histories/{model_string}_k_{model_name}_p_{patient_index}.json',
         )),
    ]
    # cropped=False
    print('cropped:', cropped)

    # object EEGRegressor from the braindecode library suited for fitting models for regression tasks
    regressor = EEGRegressor(cropped=cropped,
                             module=model.model,
                             criterion=model.loss_function,
                             optimizer=model.optimizer,
                             max_epochs=max_train_epochs,
                             verbose=1,
                             train_split=data.cv_split,
                             callbacks=monitors,
                             lr=lr,
                             device=device,
                             batch_size=32).initialize()

    torch.save(
        model.model, home +
        f'/models/saved_models/{output_dir}/initial_{model_string}_{model_name}_p_{patient_index}'
    )
    regressor.max_correlation = -1000

    if padding:
        regressor.fit(data.train_set[0], data.train_set[1])

    regressor.fit(np.stack(data.train_set.X), np.stack(data.train_set.y))

    # best_model = load_model(
    #     f'/models/saved_models/{output_dir}/best_model_split_0')
    torch.save(model.model,
               home + f'/models/saved_models/{output_dir}/last_model_{split}')
    if cuda:
        best_corr = get_corr_coef(correlation_monitor.validation_set,
                                  model.model.cuda(device=device))
    else:
        best_corr = get_corr_coef(correlation_monitor.validation_set,
                                  model.model)
    print(patient_index, best_corr)
    return best_corr
コード例 #7
0
        corr_coeffs.append(np.corrcoef(y[:, i], preds[:, i])[0, 1])
    return np.mean(corr_coeffs)


regressor = EEGRegressor(
    model,
    criterion=torch.nn.MSELoss,
    optimizer=torch.optim.AdamW,
    train_split=predefined_split(valid_set),  # using valid_set for validation,
    optimizer__lr=lr,
    optimizer__weight_decay=weight_decay,
    batch_size=batch_size,
    callbacks=[
        'r2',
        ('valid_pearson_r',
         EpochScoring(pearson_r_score,
                      lower_is_better=False,
                      on_train=False,
                      name='valid_pearson_r')),
        ('train_pearson_r',
         EpochScoring(pearson_r_score,
                      lower_is_better=False,
                      on_train=True,
                      name='train_pearson_r')),
        ("lr_scheduler", LRScheduler('CosineAnnealingLR', T_max=n_epochs - 1)),
    ],
    device=device,
)
set_log_level(verbose='WARNING')

######################################################################
# Model training for a specified number of epochs. ``y`` is None as it is already supplied
コード例 #8
0
batch_size = 64
n_epochs = 8

regressor = EEGRegressor(
    model,
    cropped=True,
    aggregate_predictions=False,
    criterion=TimeSeriesLoss,
    criterion__loss_function=torch.nn.functional.mse_loss,
    optimizer=torch.optim.AdamW,
    train_split=predefined_split(valid_set),
    optimizer__lr=lr,
    optimizer__weight_decay=weight_decay,
    iterator_train__shuffle=True,
    batch_size=batch_size,
    callbacks=[("lr_scheduler",
                LRScheduler('CosineAnnealingLR', T_max=n_epochs - 1)),
               ('r2_train',
                CroppedTimeSeriesEpochScoring(sklearn.metrics.r2_score,
                                              lower_is_better=False,
                                              on_train=True,
                                              name='r2_train')),
               ('r2_valid',
                CroppedTimeSeriesEpochScoring(sklearn.metrics.r2_score,
                                              lower_is_better=False,
                                              on_train=False,
                                              name='r2_valid'))],
    device=device,
)
set_log_level(verbose='WARNING')

######################################################################
コード例 #9
0
        monitors = [
            ('correlation monitor', correlation_monitor),
            ('checkpoint',
             Checkpoint(
                 monitor=monitor,
                 f_history=home +
                 f'/logs/model_{model_name}/histories/{model_string}_k_{model_name}_p_{patient_index}.json',
             )), ('tensorboard', TensorBoard(writer, ))
        ]

        regressor = EEGRegressor(cropped=True,
                                 module=model,
                                 criterion=model.loss_function,
                                 optimizer=model.optimizer,
                                 max_epochs=max_train_epochs,
                                 verbose=1,
                                 train_split=data_full.cv_split,
                                 callbacks=monitors,
                                 lr=lr,
                                 device=device,
                                 batch_size=32).initialize()
        args, kwargs = regressor.get_params_for_optimizer(
            'optimizer', regressor.module_.named_parameters())
        print('output dir:', output_dir)
        Path(home + f'/models/double_models/{output_dir}/').mkdir(
            exist_ok=True, parents=True)
        # torch.save(model,
        #            home + f'/models/double_models/{output_dir}/initial_{model_string}_{model_name}_p_{patient_index}')
        regressor.max_correlation = -1000
        index = 0
        while index < data_full.train_set.X.shape[0]: