예제 #1
0
    def __init__(self, in_chans, n_classes, input_time_length):
        super(HybridNetModule, self).__init__()
        deep_model = Deep4Net(
            in_chans,
            n_classes,
            n_filters_time=20,
            n_filters_spat=30,
            n_filters_2=40,
            n_filters_3=50,
            n_filters_4=60,
            input_time_length=input_time_length,
            final_conv_length=2,
        ).create_network()
        shallow_model = ShallowFBCSPNet(
            in_chans,
            n_classes,
            input_time_length=input_time_length,
            n_filters_time=30,
            n_filters_spat=40,
            filter_time_length=28,
            final_conv_length=29,
        ).create_network()

        reduced_deep_model = nn.Sequential()
        for name, module in deep_model.named_children():
            if name == "conv_classifier":
                new_conv_layer = nn.Conv2d(
                    module.in_channels,
                    60,
                    kernel_size=module.kernel_size,
                    stride=module.stride,
                )
                reduced_deep_model.add_module("deep_final_conv",
                                              new_conv_layer)
                break
            reduced_deep_model.add_module(name, module)

        reduced_shallow_model = nn.Sequential()
        for name, module in shallow_model.named_children():
            if name == "conv_classifier":
                new_conv_layer = nn.Conv2d(
                    module.in_channels,
                    40,
                    kernel_size=module.kernel_size,
                    stride=module.stride,
                )
                reduced_shallow_model.add_module("shallow_final_conv",
                                                 new_conv_layer)
                break
            reduced_shallow_model.add_module(name, module)

        to_dense_prediction_model(reduced_deep_model)
        to_dense_prediction_model(reduced_shallow_model)
        self.reduced_deep_model = reduced_deep_model
        self.reduced_shallow_model = reduced_shallow_model
        self.final_conv = nn.Conv2d(100,
                                    n_classes,
                                    kernel_size=(1, 1),
                                    stride=1)
예제 #2
0
def train(config):
    cuda = True
    model = config['model']
    if model == 'deep':
        model = Deep4Net(n_chans,
                         n_classes,
                         input_time_length=input_time_length,
                         final_conv_length=2,
                         config=config).create_network()

    to_dense_prediction_model(model)
    if cuda:
        model.cuda()

    log.info("Model: \n{:s}".format(str(model)))
    dummy_input = np_to_var(train_set.X[:1, :, :, None])
    if cuda:
        dummy_input = dummy_input.cuda()
    out = model(dummy_input)

    n_preds_per_input = out.cpu().data.numpy().shape[2]

    optimizer = optim.Adam(model.parameters())

    iterator = CropsFromTrialsIterator(batch_size=60,
                                       input_time_length=input_time_length,
                                       n_preds_per_input=n_preds_per_input)

    stop_criterion = Or([MaxEpochs(20), NoDecrease('valid_misclass', 80)])

    monitors = [
        LossMonitor(),
        MisclassMonitor(col_suffix='sample_misclass'),
        CroppedTrialMisclassMonitor(input_time_length=input_time_length),
        RuntimeMonitor()
    ]

    model_constraint = MaxNormDefaultConstraint()

    loss_function = lambda preds, targets: F.nll_loss(
        th.mean(preds, dim=2, keepdim=False), targets)

    exp = Experiment(model,
                     train_set,
                     valid_set,
                     test_set,
                     iterator=iterator,
                     loss_function=loss_function,
                     optimizer=optimizer,
                     model_constraint=model_constraint,
                     monitors=monitors,
                     stop_criterion=stop_criterion,
                     remember_best_column='valid_misclass',
                     run_after_early_stop=True,
                     cuda=cuda)
    exp.run()
    print(exp.rememberer)
    return exp.rememberer.lowest_val
예제 #3
0
파일: deep4.py 프로젝트: kahartma/eeggan
def build_model(input_time_length, n_chans, n_classes, cropped=False):
    model = Deep4Net(n_chans,
                     n_classes,
                     input_time_length=input_time_length,
                     final_conv_length='auto')

    if cropped:
        final_conv_length = model.final_conv_length
        model = Deep4Net(n_chans,
                         n_classes,
                         input_time_length=input_time_length,
                         final_conv_length=final_conv_length)

    model = model.create_network()
    if cropped:
        to_dense_prediction_model(model)

    return model
예제 #4
0
def network_model(model, train_set, test_set, valid_set, n_chans, input_time_length, cuda):
	
	max_epochs = 30 
	max_increase_epochs = 10 
	batch_size = 64 
	init_block_size = 1000

	set_random_seeds(seed=20190629, cuda=cuda)

	n_classes = 2 
	n_chans = n_chans
	input_time_length = input_time_length

	if model == 'deep':
		model = Deep4Net(n_chans, n_classes, input_time_length=input_time_length,
						 final_conv_length='auto').create_network()

	elif model == 'shallow':
		model = ShallowFBCSPNet(n_chans, n_classes, input_time_length=input_time_length,
								final_conv_length='auto').create_network()

	if cuda:
		model.cuda()

	log.info("%s model: ".format(str(model))) 

	optimizer = AdamW(model.parameters(), lr=0.00625, weight_decay=0)

	iterator = BalancedBatchSizeIterator(batch_size=batch_size) 

	stop_criterion = Or([MaxEpochs(max_epochs),
						 NoDecrease('valid_misclass', max_increase_epochs)])

	monitors = [LossMonitor(), MisclassMonitor(), RuntimeMonitor()]

	model_constraint = None
	print(train_set.X.shape[0]) 

	model_test = Experiment(model, train_set, valid_set, test_set, iterator=iterator,
							loss_function=F.nll_loss, optimizer=optimizer,
							model_constraint=model_constraint, monitors=monitors,
							stop_criterion=stop_criterion, remember_best_column='valid_misclass',
							run_after_early_stop=True, cuda=cuda)

	model_test.run()
	return model_test 
예제 #5
0
 def __init__(self, input_channels, n_classes, input_time_length,
              final_conv_length, stride_before_pool):
     self.input_channels = input_channels
     self.n_classes = n_classes
     self.input_time_length = input_time_length
     self.final_conv_lenght = final_conv_length
     self.model = Deep4Net(in_chans=self.input_channels,
                           n_classes=self.n_classes,
                           input_window_samples=1200,
                           final_conv_length=self.final_conv_lenght,
                           stride_before_pool=stride_before_pool).train()
     # summary(self.model, (85, 1200, 1))
     # test_input = np_to_var(
     #     np.ones((2, 85, 1200, 1), dtype=np.float32))
     # test_out = self.model(test_input)
     # make_dot(test_out, params=dict(list(self.model.named_parameters()))).render("rnn_torchviz", format="png")
     # print(self.model)
     self.regressed = False
     self.optimizer = optim.Adam
     self.loss_function = torch.nn.MSELoss
예제 #6
0
def build_cropped_model(model_name, n_chans, n_classes, config):
    # input_time_length:
    #   will determine how many crops are processed in parallel
    #   supercrop, number of crops taken through network together
    # final_conv_length:
    #   will determine how many crops are processed in parallel
    #   we manually set the length of the final convolution layer
    #   to some length that makes the receptive field of the
    #   ConvNet smaller than the number of samples in a trial
    cropped_input_time_length = config['cropped']['input_time_length']
    final_conv_length_shallow = config['cropped']['final_conv_length_shallow']
    final_conv_length_deep = config['cropped']['final_conv_length_deep']
    if model_name == 'shallow':
        model = ShallowFBCSPNet(n_chans, n_classes,
                                input_time_length=cropped_input_time_length,
                                final_conv_length=final_conv_length_shallow) \
            .create_network()
    elif model_name == 'deep':
        model = Deep4Net(n_chans, n_classes,
                         input_time_length=cropped_input_time_length,
                         final_conv_length=final_conv_length_deep) \
            .create_network()
    to_dense_prediction_model(model)
    return model
예제 #7
0
def runModel(mode):
    cudnn.benchmark = True

    start = time.time()

    #mode = str(sys.argv[1])
    #X,y,test_X,test_y = loadSubNormData(mode='all')
    #X,y,test_X,test_y = loadNEDCdata(mode=mode)

    #data = np.load('sessionsData/data%s-sessions.npy'%mode[:3])
    #labels = np.load('sessionsData/labels%s-sessions.npy'%mode[:3])

    data = np.load('data%s.npy' % mode[:3])
    labels = np.load('labels%s.npy' % mode[:3])

    X, y, test_X, test_y = splitDataRandom_Loaded(data, labels, mode)

    print('Mode - %s Total n: %d, Test n: %d' %
          (mode, len(y) + len(test_y), len(test_y)))
    #return 0

    #X = addDataNoise(X,band=[1,4])
    #test_X = addDataNoise(test_X,band=[1,4])

    max_shape = np.max([list(x.shape) for x in X], axis=0)

    assert max_shape[1] == int(config.duration_recording_mins *
                               config.sampling_freq * 60)

    n_classes = 2
    n_recordings = None  # set to an integer, if you want to restrict the set size
    sensor_types = ["EEG"]
    n_chans = 19  #21
    max_recording_mins = 35  # exclude larger recordings from training set
    sec_to_cut = 60  # cut away at start of each recording
    duration_recording_mins = 5  #20  # how many minutes to use per recording
    test_recording_mins = 5  #20
    max_abs_val = 800  # for clipping
    sampling_freq = 100
    divisor = 10  # divide signal by this
    test_on_eval = True  # teston evaluation set or on training set
    # in case of test on eval, n_folds and i_testfold determine
    # validation fold in training set for training until first stop
    n_folds = 10
    i_test_fold = 9
    shuffle = True
    model_name = 'linear'  #'deep'#'shallow' 'linear'
    n_start_chans = 25
    n_chan_factor = 2  # relevant for deep model only
    input_time_length = 6000
    final_conv_length = 1
    model_constraint = 'defaultnorm'
    init_lr = 1e-3
    batch_size = 64
    max_epochs = 35  # until first stop, the continue train on train+valid
    cuda = True  # False

    if model_name == 'shallow':
        model = ShallowFBCSPNet(
            in_chans=n_chans,
            n_classes=n_classes,
            n_filters_time=n_start_chans,
            n_filters_spat=n_start_chans,
            input_time_length=input_time_length,
            final_conv_length=final_conv_length).create_network()
    elif model_name == 'deep':
        model = Deep4Net(n_chans,
                         n_classes,
                         n_filters_time=n_start_chans,
                         n_filters_spat=n_start_chans,
                         input_time_length=input_time_length,
                         n_filters_2=int(n_start_chans * n_chan_factor),
                         n_filters_3=int(n_start_chans * (n_chan_factor**2.0)),
                         n_filters_4=int(n_start_chans * (n_chan_factor**3.0)),
                         final_conv_length=final_conv_length,
                         stride_before_pool=True).create_network()
    elif (model_name == 'deep_smac'):
        if model_name == 'deep_smac':
            do_batch_norm = False
        else:
            assert model_name == 'deep_smac_bnorm'
            do_batch_norm = True
        double_time_convs = False
        drop_prob = 0.244445
        filter_length_2 = 12
        filter_length_3 = 14
        filter_length_4 = 12
        filter_time_length = 21
        final_conv_length = 1
        first_nonlin = elu
        first_pool_mode = 'mean'
        first_pool_nonlin = identity
        later_nonlin = elu
        later_pool_mode = 'mean'
        later_pool_nonlin = identity
        n_filters_factor = 1.679066
        n_filters_start = 32
        pool_time_length = 1
        pool_time_stride = 2
        split_first_layer = True
        n_chan_factor = n_filters_factor
        n_start_chans = n_filters_start
        model = Deep4Net(n_chans,
                         n_classes,
                         n_filters_time=n_start_chans,
                         n_filters_spat=n_start_chans,
                         input_time_length=input_time_length,
                         n_filters_2=int(n_start_chans * n_chan_factor),
                         n_filters_3=int(n_start_chans * (n_chan_factor**2.0)),
                         n_filters_4=int(n_start_chans * (n_chan_factor**3.0)),
                         final_conv_length=final_conv_length,
                         batch_norm=do_batch_norm,
                         double_time_convs=double_time_convs,
                         drop_prob=drop_prob,
                         filter_length_2=filter_length_2,
                         filter_length_3=filter_length_3,
                         filter_length_4=filter_length_4,
                         filter_time_length=filter_time_length,
                         first_nonlin=first_nonlin,
                         first_pool_mode=first_pool_mode,
                         first_pool_nonlin=first_pool_nonlin,
                         later_nonlin=later_nonlin,
                         later_pool_mode=later_pool_mode,
                         later_pool_nonlin=later_pool_nonlin,
                         pool_time_length=pool_time_length,
                         pool_time_stride=pool_time_stride,
                         split_first_layer=split_first_layer,
                         stride_before_pool=True).create_network()
    elif model_name == 'shallow_smac':
        conv_nonlin = identity
        do_batch_norm = True
        drop_prob = 0.328794
        filter_time_length = 56
        final_conv_length = 22
        n_filters_spat = 73
        n_filters_time = 24
        pool_mode = 'max'
        pool_nonlin = identity
        pool_time_length = 84
        pool_time_stride = 3
        split_first_layer = True
        model = ShallowFBCSPNet(
            in_chans=n_chans,
            n_classes=n_classes,
            n_filters_time=n_filters_time,
            n_filters_spat=n_filters_spat,
            input_time_length=input_time_length,
            final_conv_length=final_conv_length,
            conv_nonlin=conv_nonlin,
            batch_norm=do_batch_norm,
            drop_prob=drop_prob,
            filter_time_length=filter_time_length,
            pool_mode=pool_mode,
            pool_nonlin=pool_nonlin,
            pool_time_length=pool_time_length,
            pool_time_stride=pool_time_stride,
            split_first_layer=split_first_layer,
        ).create_network()
    elif model_name == 'linear':
        model = nn.Sequential()
        model.add_module("conv_classifier",
                         nn.Conv2d(n_chans, n_classes, (600, 1)))
        model.add_module('softmax', nn.LogSoftmax(dim=1))
        model.add_module('squeeze', Expression(lambda x: x.squeeze(3)))
    else:
        assert False, "unknown model name {:s}".format(model_name)

    to_dense_prediction_model(model)

    if config.cuda:
        model.cuda()
    test_input = np_to_var(
        np.ones((2, config.n_chans, config.input_time_length, 1),
                dtype=np.float32))
    if config.cuda:
        test_input = test_input.cuda()

    out = model(test_input)
    n_preds_per_input = out.cpu().data.numpy().shape[2]
    iterator = CropsFromTrialsIterator(
        batch_size=config.batch_size,
        input_time_length=config.input_time_length,
        n_preds_per_input=n_preds_per_input)

    #model.add_module('softmax', nn.LogSoftmax(dim=1))

    model.eval()

    mode[2] = str(mode[2])
    mode[3] = str(mode[3])
    modelName = '-'.join(mode[:4])

    #params = th.load('sessionsData/%sModel%s-sessions.pt'%(modelName,mode[4]))
    #params = th.load('%sModel%s.pt'%(modelName,mode[4]))
    params = th.load('linear/%sModel%s.pt' % (modelName, mode[4]))

    model.load_state_dict(params)

    if config.test_on_eval:
        #test_X, test_y = test_dataset.load()
        #test_X, test_y = loadNEDCdata(mode='eval')
        max_shape = np.max([list(x.shape) for x in test_X], axis=0)
        assert max_shape[1] == int(config.test_recording_mins *
                                   config.sampling_freq * 60)
    if not config.test_on_eval:
        splitter = TrainValidTestSplitter(config.n_folds,
                                          config.i_test_fold,
                                          shuffle=config.shuffle)
        train_set, valid_set, test_set = splitter.split(X, y)
    else:
        splitter = TrainValidSplitter(config.n_folds,
                                      i_valid_fold=config.i_test_fold,
                                      shuffle=config.shuffle)
        train_set, valid_set = splitter.split(X, y)
        test_set = SignalAndTarget(test_X, test_y)
        del test_X, test_y
    del X, y  # shouldn't be necessary, but just to make sure

    datasets = OrderedDict(
        (('train', train_set), ('valid', valid_set), ('test', test_set)))

    for setname in ('train', 'valid', 'test'):
        #setname = 'test'
        #print("Compute predictions for {:s}...".format(setname))
        dataset = datasets[setname]
        if config.cuda:
            preds_per_batch = [
                var_to_np(model(np_to_var(b[0]).cuda()))
                for b in iterator.get_batches(dataset, shuffle=False)
            ]
        else:
            preds_per_batch = [
                var_to_np(model(np_to_var(b[0])))
                for b in iterator.get_batches(dataset, shuffle=False)
            ]
        preds_per_trial = compute_preds_per_trial(
            preds_per_batch,
            dataset,
            input_time_length=iterator.input_time_length,
            n_stride=iterator.n_preds_per_input)
        mean_preds_per_trial = [
            np.mean(preds, axis=(0, 2)) for preds in preds_per_trial
        ]
        mean_preds_per_trial = np.array(mean_preds_per_trial)

        all_pred_labels = np.argmax(mean_preds_per_trial, axis=1).squeeze()
        all_target_labels = dataset.y
        acc_per_class = []
        for i_class in range(n_classes):
            mask = all_target_labels == i_class
            acc = np.mean(all_pred_labels[mask] == all_target_labels[mask])
            acc_per_class.append(acc)
        misclass = 1 - np.mean(acc_per_class)
        #print('Acc:{}, Class 0:{}, Class 1:{}'.format(np.mean(acc_per_class),acc_per_class[0],acc_per_class[1]))

        if setname == 'test':
            testResult = np.mean(acc_per_class)

    return testResult
예제 #8
0
def run_exp(data_folder, subject_id, low_cut_hz, model, cuda):
    train_filename = 'A{:02d}T.gdf'.format(subject_id)
    test_filename = 'A{:02d}E.gdf'.format(subject_id)
    train_filepath = os.path.join(data_folder, train_filename)
    test_filepath = os.path.join(data_folder, test_filename)
    train_label_filepath = train_filepath.replace('.gdf', '.mat')
    test_label_filepath = test_filepath.replace('.gdf', '.mat')

    train_loader = BCICompetition4Set2A(train_filepath,
                                        labels_filename=train_label_filepath)
    test_loader = BCICompetition4Set2A(test_filepath,
                                       labels_filename=test_label_filepath)
    train_cnt = train_loader.load()
    test_cnt = test_loader.load()

    # Preprocessing

    train_cnt = train_cnt.drop_channels(
        ['STI 014', 'EOG-left', 'EOG-central', 'EOG-right'])
    assert len(train_cnt.ch_names) == 22
    # lets convert to millvolt for numerical stability of next operations
    train_cnt = mne_apply(lambda a: a * 1e6, train_cnt)
    train_cnt = mne_apply(
        lambda a: bandpass_cnt(
            a, low_cut_hz, 38, train_cnt.info['sfreq'], filt_order=3, axis=1),
        train_cnt)
    train_cnt = mne_apply(
        lambda a: exponential_running_standardize(
            a.T, factor_new=1e-3, init_block_size=1000, eps=1e-4).T, train_cnt)

    test_cnt = test_cnt.drop_channels(
        ['STI 014', 'EOG-left', 'EOG-central', 'EOG-right'])
    assert len(test_cnt.ch_names) == 22
    test_cnt = mne_apply(lambda a: a * 1e6, test_cnt)
    test_cnt = mne_apply(
        lambda a: bandpass_cnt(
            a, low_cut_hz, 38, test_cnt.info['sfreq'], filt_order=3, axis=1),
        test_cnt)
    test_cnt = mne_apply(
        lambda a: exponential_running_standardize(
            a.T, factor_new=1e-3, init_block_size=1000, eps=1e-4).T, test_cnt)

    marker_def = OrderedDict([('Left Hand', [1]), (
        'Right Hand',
        [2],
    ), ('Foot', [3]), ('Tongue', [4])])
    ival = [-500, 4000]

    train_set = create_signal_target_from_raw_mne(train_cnt, marker_def, ival)
    test_set = create_signal_target_from_raw_mne(test_cnt, marker_def, ival)

    train_set, valid_set = split_into_two_sets(train_set,
                                               first_set_fraction=0.8)

    set_random_seeds(seed=20190706, cuda=cuda)

    n_classes = 4
    n_chans = int(train_set.X.shape[1])
    input_time_length = train_set.X.shape[2]
    if model == 'shallow':
        model = ShallowFBCSPNet(n_chans,
                                n_classes,
                                input_time_length=input_time_length,
                                final_conv_length='auto').create_network()
    elif model == 'deep':
        model = Deep4Net(n_chans,
                         n_classes,
                         input_time_length=input_time_length,
                         final_conv_length='auto').create_network()
    if cuda:
        model.cuda()
    log.info("Model: \n{:s}".format(str(model)))

    optimizer = optim.Adam(model.parameters())

    iterator = BalancedBatchSizeIterator(batch_size=60)

    stop_criterion = Or([MaxEpochs(1600), NoDecrease('valid_misclass', 160)])

    monitors = [LossMonitor(), MisclassMonitor(), RuntimeMonitor()]

    model_constraint = MaxNormDefaultConstraint()

    exp = Experiment(model,
                     train_set,
                     valid_set,
                     test_set,
                     iterator=iterator,
                     loss_function=F.nll_loss,
                     optimizer=optimizer,
                     model_constraint=model_constraint,
                     monitors=monitors,
                     stop_criterion=stop_criterion,
                     remember_best_column='valid_misclass',
                     run_after_early_stop=True,
                     cuda=cuda)
    exp.run()
    return exp
예제 #9
0
def setup_exp(
    train_folder,
    n_recordings,
    n_chans,
    model_name,
    n_start_chans,
    n_chan_factor,
    input_time_length,
    final_conv_length,
    model_constraint,
    stride_before_pool,
    init_lr,
    batch_size,
    max_epochs,
    cuda,
    num_workers,
    task,
    weight_decay,
    n_folds,
    shuffle_folds,
    lazy_loading,
    eval_folder,
    result_folder,
    run_on_normals,
    run_on_abnormals,
    seed,
    l2_decay,
    gradient_clip,
):
    info_msg = "using {}, {}".format(
        os.environ["SLURM_JOB_PARTITION"],
        os.environ["SLURMD_NODENAME"],
    )
    info_msg += ", gpu {}".format(os.environ["CUDA_VISIBLE_DEVICES"])
    logging.info(info_msg)

    logging.info("Targets for this task: <{}>".format(task))

    import torch.backends.cudnn as cudnn
    cudnn.benchmark = True

    if task == "age":
        loss_function = mse_loss_on_mean
        remember_best_column = "valid_rmse"
        n_classes = 1
    else:
        loss_function = nll_loss_on_mean
        remember_best_column = "valid_misclass"
        n_classes = 2

    if model_constraint is not None:
        assert model_constraint == 'defaultnorm'
        model_constraint = MaxNormDefaultConstraint()

    stop_criterion = MaxEpochs(max_epochs)

    set_random_seeds(seed=seed, cuda=cuda)
    if model_name == 'shallow':
        model = ShallowFBCSPNet(
            in_chans=n_chans,
            n_classes=n_classes,
            n_filters_time=n_start_chans,
            n_filters_spat=n_start_chans,
            input_time_length=input_time_length,
            final_conv_length=final_conv_length).create_network()
    elif model_name == 'deep':
        model = Deep4Net(
            n_chans,
            n_classes,
            n_filters_time=n_start_chans,
            n_filters_spat=n_start_chans,
            input_time_length=input_time_length,
            n_filters_2=int(n_start_chans * n_chan_factor),
            n_filters_3=int(n_start_chans * (n_chan_factor**2.0)),
            n_filters_4=int(n_start_chans * (n_chan_factor**3.0)),
            final_conv_length=final_conv_length,
            stride_before_pool=stride_before_pool).create_network()
    elif model_name == 'eegnet':
        model = EEGNetv4(n_chans,
                         n_classes,
                         input_time_length=input_time_length,
                         final_conv_length=final_conv_length).create_network()
    elif model_name == "tcn":
        assert task != "age", "what to change to do regression with tcn?!"
        model = TemporalConvNet(input_size=n_chans,
                                output_size=n_classes,
                                context_size=0,
                                num_channels=55,
                                num_levels=5,
                                kernel_size=16,
                                dropout=0.05270154233150525,
                                skip_mode=None,
                                use_context=0,
                                lasso_selection=0.0,
                                rnn_normalization=None)
    else:
        assert False, "unknown model name {:s}".format(model_name)

    if task == "age":
        # remove softmax layer, set n_classes to 1
        model.n_classes = 1
        new_model = nn.Sequential()
        for name, module_ in model.named_children():
            if name == "softmax":
                continue
            new_model.add_module(name, module_)
        model = new_model

    # maybe check if this works and wait / re-try after some time?
    # in case of all cuda devices are busy
    if cuda:
        model.cuda()

    if model_name != "tcn":
        to_dense_prediction_model(model)
    logging.info("Model:\n{:s}".format(str(model)))

    test_input = np_to_var(
        np.ones((2, n_chans, input_time_length, 1), dtype=np.float32))
    if list(model.parameters())[0].is_cuda:
        test_input = test_input.cuda()
    out = model(test_input)
    n_preds_per_input = out.cpu().data.numpy().shape[2]

    if eval_folder is None:
        logging.info("will do validation")
        if lazy_loading:
            logging.info(
                "using lazy loading to load {} recs".format(n_recordings))
            dataset = TuhLazy(train_folder,
                              target=task,
                              n_recordings=n_recordings)
        else:
            logging.info("using traditional loading to load {} recs".format(
                n_recordings))
            dataset = Tuh(train_folder, n_recordings=n_recordings, target=task)

        assert not (run_on_normals and run_on_abnormals), (
            "decide whether to run on normal or abnormal subjects")
        # only run on normal subjects
        if run_on_normals:
            ids = [
                i for i in range(len(dataset)) if dataset.pathologicals[i] == 0
            ]  # 0 is non-pathological
            dataset = TuhSubset(dataset, ids)
            logging.info("only using {} normal subjects".format(len(dataset)))
        if run_on_abnormals:
            ids = [
                i for i in range(len(dataset)) if dataset.pathologicals[i] == 1
            ]  # 1 is pathological
            dataset = TuhSubset(dataset, ids)
            logging.info("only using {} abnormal subjects".format(
                len(dataset)))

        indices = np.arange(len(dataset))
        kf = KFold(n_splits=n_folds, shuffle=shuffle_folds)
        for i, (train_ind, test_ind) in enumerate(kf.split(indices)):
            assert len(np.intersect1d(
                train_ind, test_ind)) == 0, ("train and test set overlap!")

            # seed is in range of number of folds and was set by submit script
            if i == seed:
                break

        if lazy_loading:
            test_subset = TuhLazySubset(dataset, test_ind)
            train_subset = TuhLazySubset(dataset, train_ind)
        else:
            test_subset = TuhSubset(dataset, test_ind)
            train_subset = TuhSubset(dataset, train_ind)
    else:
        logging.info("will do final evaluation")
        if lazy_loading:
            train_subset = TuhLazy(train_folder, target=task)
            test_subset = TuhLazy(eval_folder, target=task)
        else:
            train_subset = Tuh(train_folder, target=task)
            test_subset = Tuh(eval_folder, target=task)

        # remove rec:
        # train/abnormal/01_tcp_ar/081/00008184/s001_2011_09_21/00008184_s001_t001
        # since it contains no crop without outliers (channels A1, A2 broken)
        subjects = [f.split("/")[-3] for f in train_subset.file_paths]
        if "00008184" in subjects:
            bad_id = subjects.index("00008184")
            train_subset = remove_file_from_dataset(
                train_subset,
                file_id=bad_id,
                file=("train/abnormal/01_tcp_ar/081/00008184/s001_2011_09_21/"
                      "00008184_s001_t001"))
        subjects = [f.split("/")[-3] for f in test_subset.file_paths]
        if "00008184" in subjects:
            bad_id = subjects.index("00008184")
            test_subset = remove_file_from_dataset(
                test_subset,
                file_id=bad_id,
                file=("train/abnormal/01_tcp_ar/081/00008184/s001_2011_09_21/"
                      "00008184_s001_t001"))

    if task == "age":
        # standardize ages based on train set
        y_train = train_subset.y
        y_train_mean = np.mean(y_train)
        y_train_std = np.std(y_train)
        train_subset.y = (y_train - y_train_mean) / y_train_std
        y_test = test_subset.y
        test_subset.y = (y_test - y_train_mean) / y_train_std

    if lazy_loading:
        iterator = LazyCropsFromTrialsIterator(
            input_time_length,
            n_preds_per_input,
            batch_size,
            seed=seed,
            num_workers=num_workers,
            reset_rng_after_each_batch=False,
            check_preds_smaller_trial_len=False)  # True!
    else:
        iterator = CropsFromTrialsIterator(batch_size, input_time_length,
                                           n_preds_per_input, seed)

    monitors = []
    monitors.append(LossMonitor())
    monitors.append(RAMMonitor())
    monitors.append(RuntimeMonitor())
    if task == "age":
        monitors.append(
            RMSEMonitor(input_time_length,
                        n_preds_per_input,
                        mean=y_train_mean,
                        std=y_train_std))
    else:
        monitors.append(
            CroppedDiagnosisMonitor(input_time_length, n_preds_per_input))
        monitors.append(LazyMisclassMonitor(col_suffix='sample_misclass'))

    if lazy_loading:
        n_updates_per_epoch = len(
            iterator.get_batches(train_subset, shuffle=False))
    else:
        n_updates_per_epoch = sum(
            [1 for _ in iterator.get_batches(train_subset, shuffle=False)])
    n_updates_per_period = n_updates_per_epoch * max_epochs
    logging.info("there are {} updates per epoch".format(n_updates_per_epoch))

    if model_name == "tcn":
        adamw = ExtendedAdam(model.parameters(),
                             lr=init_lr,
                             weight_decay=weight_decay,
                             l2_decay=l2_decay,
                             gradient_clip=gradient_clip)
    else:
        adamw = AdamWWithTracking(model.parameters(),
                                  init_lr,
                                  weight_decay=weight_decay)

    scheduler = CosineAnnealing(n_updates_per_period)
    optimizer = ScheduledOptimizer(scheduler,
                                   adamw,
                                   schedule_weight_decay=True)

    exp = Experiment(model=model,
                     train_set=train_subset,
                     valid_set=None,
                     test_set=test_subset,
                     iterator=iterator,
                     loss_function=loss_function,
                     optimizer=optimizer,
                     model_constraint=model_constraint,
                     monitors=monitors,
                     stop_criterion=stop_criterion,
                     remember_best_column=remember_best_column,
                     run_after_early_stop=False,
                     batch_modifier=None,
                     cuda=cuda,
                     do_early_stop=False,
                     reset_after_second_run=False)
    return exp
예제 #10
0
def run_exp(data_folder, session_id, subject_id, low_cut_hz, model, cuda):
    ival = [-500, 4000]
    max_epochs = 1600
    max_increase_epochs = 160
    batch_size = 10
    high_cut_hz = 38
    factor_new = 1e-3
    init_block_size = 1000
    valid_set_fraction = .2
    ''' # BCIcompetition
    train_filename = 'A{:02d}T.gdf'.format(subject_id)
    test_filename = 'A{:02d}E.gdf'.format(subject_id)
    train_filepath = os.path.join(data_folder, train_filename)
    test_filepath = os.path.join(data_folder, test_filename)
    train_label_filepath = train_filepath.replace('.gdf', '.mat')
    test_label_filepath = test_filepath.replace('.gdf', '.mat')

    train_loader = BCICompetition4Set2A(
        train_filepath, labels_filename=train_label_filepath)
    test_loader = BCICompetition4Set2A(
        test_filepath, labels_filename=test_label_filepath)
    train_cnt = train_loader.load()
    test_cnt = test_loader.load()
    '''

    # GIGAscience
    filename = 'sess{:02d}_subj{:02d}_EEG_MI.mat'.format(
        session_id, subject_id)
    filepath = os.path.join(data_folder, filename)
    train_variable = 'EEG_MI_train'
    test_variable = 'EEG_MI_test'

    train_loader = GIGAscience(filepath, train_variable)
    test_loader = GIGAscience(filepath, test_variable)
    train_cnt = train_loader.load()
    test_cnt = test_loader.load()

    # Preprocessing
    ''' channel
    ['Fp1', 'Fp2', 'F7', 'F3', 'Fz', 'F4', 'F8', 'FC5', 'FC1', 'FC2', 'FC6', 'T7', 'C3', 'Cz', 'C4', 'T8', 'TP9', 'CP5',
     'CP1', 'CP2', 'CP6', 'TP10', 'P7', 'P3', 'Pz', 'P4', 'P8', 'PO9', 'O1', 'Oz', 'O2', 'PO10', 'FC3', 'FC4', 'C5',
     'C1', 'C2', 'C6', 'CP3', 'CPz', 'CP4', 'P1', 'P2', 'POz', 'FT9', 'FTT9h', 'TTP7h', 'TP7', 'TPP9h', 'FT10',
     'FTT10h', 'TPP8h', 'TP8', 'TPP10h', 'F9', 'F10', 'AF7', 'AF3', 'AF4', 'AF8', 'PO3', 'PO4']
    '''

    train_cnt = train_cnt.pick_channels([
        'FC5', 'FC3', 'FC1', 'Fz', 'FC2', 'FC4', 'FC6', 'C5', 'C3', 'C1', 'Cz',
        'C2', 'C4', 'C6', 'CP5', 'CP3', 'CP1', 'CPz', 'CP2', 'CP4', 'CP6', 'Pz'
    ])
    train_cnt, train_cnt.info['events'] = train_cnt.copy().resample(
        250, npad='auto', events=train_cnt.info['events'])

    assert len(train_cnt.ch_names) == 22
    # lets convert to millvolt for numerical stability of next operations
    train_cnt = mne_apply(lambda a: a * 1e6, train_cnt)
    train_cnt = mne_apply(
        lambda a: bandpass_cnt(a,
                               low_cut_hz,
                               high_cut_hz,
                               train_cnt.info['sfreq'],
                               filt_order=3,
                               axis=1), train_cnt)
    train_cnt = mne_apply(
        lambda a: exponential_running_standardize(a.T,
                                                  factor_new=factor_new,
                                                  init_block_size=
                                                  init_block_size,
                                                  eps=1e-4).T, train_cnt)

    test_cnt = test_cnt.pick_channels([
        'FC5', 'FC3', 'FC1', 'Fz', 'FC2', 'FC4', 'FC6', 'C5', 'C3', 'C1', 'Cz',
        'C2', 'C4', 'C6', 'CP5', 'CP3', 'CP1', 'CPz', 'CP2', 'CP4', 'CP6', 'Pz'
    ])
    test_cnt, test_cnt.info['events'] = test_cnt.copy().resample(
        250, npad='auto', events=test_cnt.info['events'])

    assert len(test_cnt.ch_names) == 22
    test_cnt = mne_apply(lambda a: a * 1e6, test_cnt)
    test_cnt = mne_apply(
        lambda a: bandpass_cnt(a,
                               low_cut_hz,
                               high_cut_hz,
                               test_cnt.info['sfreq'],
                               filt_order=3,
                               axis=1), test_cnt)
    test_cnt = mne_apply(
        lambda a: exponential_running_standardize(a.T,
                                                  factor_new=factor_new,
                                                  init_block_size=
                                                  init_block_size,
                                                  eps=1e-4).T, test_cnt)

    marker_def = OrderedDict([('Right Hand', [1]), ('Left Hand', [2])])

    train_set = create_signal_target_from_raw_mne(train_cnt, marker_def, ival)
    test_set = create_signal_target_from_raw_mne(test_cnt, marker_def, ival)

    train_set, valid_set = split_into_two_sets(train_set,
                                               first_set_fraction=1 -
                                               valid_set_fraction)

    set_random_seeds(seed=20190706, cuda=cuda)

    n_classes = 2
    n_chans = int(train_set.X.shape[1])
    input_time_length = train_set.X.shape[2]
    if model == 'shallow':
        model = ShallowFBCSPNet(n_chans,
                                n_classes,
                                input_time_length=input_time_length,
                                final_conv_length='auto').create_network()
    elif model == 'deep':
        model = Deep4Net(n_chans,
                         n_classes,
                         input_time_length=input_time_length,
                         final_conv_length='auto').create_network()
    if cuda:
        model.cuda()
    log.info("Model: \n{:s}".format(str(model)))

    optimizer = optim.Adam(model.parameters())

    iterator = BalancedBatchSizeIterator(batch_size=batch_size)

    stop_criterion = Or([
        MaxEpochs(max_epochs),
        NoDecrease('valid_misclass', max_increase_epochs)
    ])

    monitors = [LossMonitor(), MisclassMonitor(), RuntimeMonitor()]

    model_constraint = MaxNormDefaultConstraint()

    exp = Experiment(model,
                     train_set,
                     valid_set,
                     test_set,
                     iterator=iterator,
                     loss_function=F.nll_loss,
                     optimizer=optimizer,
                     model_constraint=model_constraint,
                     monitors=monitors,
                     stop_criterion=stop_criterion,
                     remember_best_column='valid_misclass',
                     run_after_early_stop=True,
                     cuda=cuda)
    exp.run()
    return exp
예제 #11
0
def network_model(subject_id, model_type, data_type, cropped, cuda, parameters, hyp_params):
	best_params = dict() # dictionary to store hyper-parameter values

	#####Parameter passed to funciton#####
	max_epochs  = parameters['max_epochs']
	max_increase_epochs = parameters['max_increase_epochs']
	batch_size = parameters['batch_size']

	#####Constant Parameters#####
	best_loss = 100.0 # instatiate starting point for loss
	iterator = BalancedBatchSizeIterator(batch_size=batch_size)
	stop_criterion = Or([MaxEpochs(max_epochs),
						 NoDecrease('valid_misclass', max_increase_epochs)])
	monitors = [LossMonitor(), MisclassMonitor(), RuntimeMonitor()]
	model_constraint = MaxNormDefaultConstraint()
	epoch = 4096

	#####Collect and format data#####
	if data_type == 'words':
		data, labels = format_data(data_type, subject_id, epoch)
		data = data[:,:,768:1280] # within-trial window selected for classification
	elif data_type == 'vowels':
		data, labels = format_data(data_type, subject_id, epoch)
		data = data[:,:,512:1024]
	elif data_type == 'all_classes':
		data, labels = format_data(data_type, subject_id, epoch)
		data = data[:,:,768:1280]
	
	x = lambda a: a * 1e6 # improves numerical stability
	data = x(data)
	
	data = normalize(data)
	data, labels = balanced_subsample(data, labels) # downsampling the data to ensure equal classes
	data, _, labels, _ = train_test_split(data, labels, test_size=0, random_state=42) # redundant shuffle of data/labels

	#####model inputs#####
	unique, counts = np.unique(labels, return_counts=True)
	n_classes = len(unique)
	n_chans   = int(data.shape[1])
	input_time_length = data.shape[2]

	#####k-fold nested corss-validation#####
	num_folds = 4
	skf = StratifiedKFold(n_splits=num_folds, shuffle=True, random_state=10)
	out_fold_num = 0 # outer-fold number
	
	cv_scores = []
	#####Outer=Fold#####
	for inner_ind, outer_index in skf.split(data, labels):
		inner_fold, outer_fold     = data[inner_ind], data[outer_index]
		inner_labels, outer_labels = labels[inner_ind], labels[outer_index]
		out_fold_num += 1
		 # list for storing cross-validated scores
		loss_with_params = dict()# for storing param values and losses
		in_fold_num = 0 # inner-fold number
		
		#####Inner-Fold#####
		for train_idx, valid_idx in skf.split(inner_fold, inner_labels):
			X_Train, X_val = inner_fold[train_idx], inner_fold[valid_idx]
			y_train, y_val = inner_labels[train_idx], inner_labels[valid_idx]
			in_fold_num += 1
			train_set = SignalAndTarget(X_Train, y_train)
			valid_set = SignalAndTarget(X_val, y_val)
			loss_with_params[f"Fold_{in_fold_num}"] = dict()
			
			####Nested cross-validation#####
			for drop_prob in hyp_params['drop_prob']:
				for loss_function in hyp_params['loss']:
					for i in range(len(hyp_params['lr_adam'])):
						model = None # ensure no duplication of models
						# model, learning-rate and optimizer setup according to model_type
						if model_type == 'shallow':
							model =  ShallowFBCSPNet(in_chans=n_chans, n_classes=n_classes, input_time_length=input_time_length,
										 n_filters_time=80, filter_time_length=40, n_filters_spat=80, 
										 pool_time_length=75, pool_time_stride=25, final_conv_length='auto',
										 conv_nonlin=square, pool_mode='max', pool_nonlin=safe_log, 
										 split_first_layer=True, batch_norm=True, batch_norm_alpha=0.1,
										 drop_prob=drop_prob).create_network()
							lr = hyp_params['lr_ada'][i]
							optimizer = optim.Adadelta(model.parameters(), lr=lr, rho=0.9, weight_decay=0.1, eps=1e-8)
						elif model_type == 'deep':
							model = Deep4Net(in_chans=n_chans, n_classes=n_classes, input_time_length=input_time_length,
										 final_conv_length='auto', n_filters_time=20, n_filters_spat=20, filter_time_length=10,
										 pool_time_length=3, pool_time_stride=3, n_filters_2=50, filter_length_2=15,
										 n_filters_3=100, filter_length_3=15, n_filters_4=400, filter_length_4=10,
										 first_nonlin=leaky_relu, first_pool_mode='max', first_pool_nonlin=safe_log, later_nonlin=leaky_relu,
										 later_pool_mode='max', later_pool_nonlin=safe_log, drop_prob=drop_prob, 
										 double_time_convs=False, split_first_layer=False, batch_norm=True, batch_norm_alpha=0.1,
										 stride_before_pool=False).create_network() #filter_length_4 changed from 15 to 10
							lr = hyp_params['lr_ada'][i]
							optimizer = optim.Adadelta(model.parameters(), lr=lr, weight_decay=0.1, eps=1e-8)
						elif model_type == 'eegnet':
							model = EEGNetv4(in_chans=n_chans, n_classes=n_classes, final_conv_length='auto', 
										 input_time_length=input_time_length, pool_mode='mean', F1=16, D=2, F2=32,
										 kernel_length=64, third_kernel_size=(8,4), drop_prob=drop_prob).create_network()
							lr = hyp_params['lr_adam'][i]
							optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=0, eps=1e-8, amsgrad=False)
						
						set_random_seeds(seed=20190629, cuda=cuda)
						
						if cuda:
							model.cuda()
							torch.backends.cudnn.deterministic = True
						model = torch.nn.DataParallel(model)
						log.info("%s model: ".format(str(model)))

						loss_function = loss_function
						model_loss_function = None

						#####Setup to run the selected model#####
						model_test = Experiment(model, train_set, valid_set, test_set=None, iterator=iterator,
												loss_function=loss_function, optimizer=optimizer,
												model_constraint=model_constraint, monitors=monitors,
												stop_criterion=stop_criterion, remember_best_column='valid_misclass',
												run_after_early_stop=True, model_loss_function=model_loss_function, cuda=cuda,
												data_type=data_type, subject_id=subject_id, model_type=model_type, 
												cropped=cropped, model_number=str(out_fold_num)) 

						model_test.run()
						model_loss = model_test.epochs_df['valid_loss'].astype('float')
						current_val_loss = current_loss(model_loss)
						loss_with_params[f"Fold_{in_fold_num}"][f"{drop_prob}/{loss_function}/{lr}"] = current_val_loss

		####Select and train optimized model#####
		df = pd.DataFrame(loss_with_params)
		df['mean'] = df.mean(axis=1) # compute mean loss across k-folds
		writer_df = f"results_folder\\results\\S{subject_id}\\{model_type}_parameters.xlsx"
		df.to_excel(writer_df)
		
		best_dp, best_loss, best_lr = df.loc[df['mean'].idxmin()].__dict__['_name'].split("/") # extract best param values
		if str(best_loss[10:13]) == 'nll':
			best_loss = F.nll_loss
		elif str(best_loss[10:13]) == 'cro':
			best_loss = F.cross_entropy
		
		print(f"Best parameters: dropout: {best_dp}, loss: {str(best_loss)[10:13]}, lr: {best_lr}")

		#####Train model on entire inner fold set#####
		torch.backends.cudnn.deterministic = True
		model = None
		#####Create outer-fold validation and test sets#####
		X_valid, X_test, y_valid, y_test = train_test_split(outer_fold, outer_labels, test_size=0.5, random_state=42, stratify=outer_labels)
		train_set = SignalAndTarget(inner_fold, inner_labels)
		valid_set = SignalAndTarget(X_valid, y_valid)
		test_set  = SignalAndTarget(X_test, y_test)


		if model_type == 'shallow':
			model =  ShallowFBCSPNet(in_chans=n_chans, n_classes=n_classes, input_time_length=input_time_length,
						 n_filters_time=60, filter_time_length=5, n_filters_spat=40, 
						 pool_time_length=50, pool_time_stride=15, final_conv_length='auto',
						 conv_nonlin=relu6, pool_mode='mean', pool_nonlin=safe_log, 
						 split_first_layer=True, batch_norm=True, batch_norm_alpha=0.1,
						 drop_prob=0.1).create_network() #50 works better than 75
			
			optimizer = optim.Adadelta(model.parameters(), lr=2.0, rho=0.9, weight_decay=0.1, eps=1e-8) 
			
		elif model_type == 'deep':
			model = Deep4Net(in_chans=n_chans, n_classes=n_classes, input_time_length=input_time_length,
						 final_conv_length='auto', n_filters_time=20, n_filters_spat=20, filter_time_length=5,
						 pool_time_length=3, pool_time_stride=3, n_filters_2=20, filter_length_2=5,
						 n_filters_3=40, filter_length_3=5, n_filters_4=1500, filter_length_4=10,
						 first_nonlin=leaky_relu, first_pool_mode='mean', first_pool_nonlin=safe_log, later_nonlin=leaky_relu,
						 later_pool_mode='mean', later_pool_nonlin=safe_log, drop_prob=0.1, 
						 double_time_convs=False, split_first_layer=True, batch_norm=True, batch_norm_alpha=0.1,
						 stride_before_pool=False).create_network()
			
			optimizer = AdamW(model.parameters(), lr=0.1, weight_decay=0)
		elif model_type == 'eegnet':
			model = EEGNetv4(in_chans=n_chans, n_classes=n_classes, final_conv_length='auto', 
						 input_time_length=input_time_length, pool_mode='mean', F1=16, D=2, F2=32,
						 kernel_length=64, third_kernel_size=(8,4), drop_prob=0.1).create_network()
			optimizer = optim.Adam(model.parameters(), lr=0.1, weight_decay=0, eps=1e-8, amsgrad=False) 
			

		if cuda:
			model.cuda()
			torch.backends.cudnn.deterministic = True
			#model = torch.nn.DataParallel(model)
		
		log.info("Optimized model")
		model_loss_function=None
		
		#####Setup to run the optimized model#####
		optimized_model = op_exp(model, train_set, valid_set, test_set=test_set, iterator=iterator,
								loss_function=best_loss, optimizer=optimizer,
								model_constraint=model_constraint, monitors=monitors,
								stop_criterion=stop_criterion, remember_best_column='valid_misclass',
								run_after_early_stop=True, model_loss_function=model_loss_function, cuda=cuda,
								data_type=data_type, subject_id=subject_id, model_type=model_type, 
								cropped=cropped, model_number=str(out_fold_num))
		optimized_model.run()

		log.info("Last 5 epochs")
		log.info("\n" + str(optimized_model.epochs_df.iloc[-5:]))
		
		writer = f"results_folder\\results\\S{subject_id}\\{data_type}_{model_type}_{str(out_fold_num)}.xlsx"
		optimized_model.epochs_df.iloc[-30:].to_excel(writer)

		accuracy = 1 - np.min(np.array(optimized_model.class_acc))
		cv_scores.append(accuracy) # k accuracy scores for this param set. 
		
	#####Print and store fold accuracies and mean accuracy#####
	
	print(f"Class Accuracy: {np.mean(np.array(cv_scores))}")
	results_df = pd.DataFrame(dict(cv_scores=cv_scores,
								   cv_mean=np.mean(np.array(cv_scores))))

	writer2 = f"results_folder\\results\\S{subject_id}\\{data_type}_{model_type}_cvscores.xlsx"
	results_df.to_excel(writer2)
	return optimized_model, np.mean(np.array(cv_scores))
예제 #12
0
    def call_model(self):
        if self.model_type == 'shallow':
            model = ShallowFBCSPNet(in_chans=self.n_chans,
                                    n_classes=self.n_classes,
                                    input_time_length=self.input_time_length,
                                    n_filters_time=40,
                                    filter_time_length=25,
                                    n_filters_spat=40,
                                    pool_time_length=75,
                                    pool_time_stride=15,
                                    final_conv_length='auto',
                                    conv_nonlin=getattr(
                                        torch.nn.functional, self.activation),
                                    pool_mode='mean',
                                    pool_nonlin=safe_log,
                                    split_first_layer=True,
                                    batch_norm=True,
                                    batch_norm_alpha=0.1,
                                    drop_prob=0.1).create_network()

        elif self.model_type == 'deep':
            model = Deep4Net(in_chans=self.n_chans,
                             n_classes=self.n_classes,
                             input_time_length=self.input_time_length,
                             final_conv_length='auto',
                             n_filters_time=25,
                             n_filters_spat=25,
                             filter_time_length=10,
                             pool_time_length=3,
                             pool_time_stride=3,
                             n_filters_2=50,
                             filter_length_2=10,
                             n_filters_3=100,
                             filter_length_3=10,
                             n_filters_4=200,
                             filter_length_4=10,
                             first_nonlin=getattr(torch.nn.functional,
                                                  self.activation),
                             first_pool_mode='max',
                             first_pool_nonlin=safe_log,
                             later_nonlin=self.getattr(torch.nn.functional,
                                                       self.activation),
                             later_pool_mode='max',
                             later_pool_nonlin=safe_log,
                             drop_prob=0.1,
                             double_time_convs=False,
                             split_first_layer=False,
                             batch_norm=True,
                             batch_norm_alpha=0.1,
                             stride_before_pool=False).create_network()

        elif self.model_type == 'eegnet':
            model = EEGNetv4(in_chans=self.n_chans,
                             n_classes=self.n_classes,
                             final_conv_length='auto',
                             input_time_length=self.input_time_length,
                             pool_mode='mean',
                             F1=16,
                             D=2,
                             F2=32,
                             kernel_length=64,
                             third_kernel_size=(8, 4),
                             conv_nonlin=getattr(torch.nn.functional,
                                                 self.activation),
                             drop_prob=0.1).create_network()
        return model
예제 #13
0
def create_model(in_channels, num_classes, cuda=True):
    def squeeze_out(x):
        assert x.size()[1] == num_classes and x.size()[3] == 1
        return x.squeeze(3).transpose(1, 2)

    if cfg.TRAINING.MODEL.lower() == 'rnn':
        model = RNNs(in_channels=in_channels)

    elif 'deep4' in cfg.TRAINING.MODEL.lower():
        if 'wide' in cfg.TRAINING.MODEL.lower():
            pool_length = 4
            pool_stride = 4
        elif 'narrow' in cfg.TRAINING.MODEL.lower():
            pool_length = 2
            pool_stride = 2
        else:
            pool_length = 3
            pool_stride = 3

        model = Deep4Net(in_chans=in_channels,
                         n_classes=num_classes,
                         input_time_length=cfg.TRAINING.CROP_LEN,
                         pool_time_length=pool_length,
                         pool_time_stride=pool_stride,
                         final_conv_length=2,
                         stride_before_pool=True).create_network()

        # remove softmax
        new_model = nn.Sequential()
        for name, module in model.named_children():
            if name == 'softmax':
                # continue
                break
            new_model.add_module(name, module)

        # remove empty final dimension and permute output shape
        new_model.add_module('squeeze', Expression(squeeze_out))
        # if num_classes > 1:
        #     def transpose_class_time(x):
        #         return x.transpose(2, 1)
        #
        #     new_model.add_module('trans', Expression(transpose_class_time))

        model = new_model

        to_dense_prediction_model(model)

    elif cfg.TRAINING.MODEL.lower() == 'deep5':
        #  pool_time_length=3
        #  pool_time_stride=3
        model = Deep5Net(in_chans=in_channels,
                         n_classes=num_classes,
                         input_time_length=cfg.TRAINING.CROP_LEN,
                         final_conv_length=2,
                         stride_before_pool=True).create_network()

        # remove softmax
        new_model = nn.Sequential()
        for name, module in model.named_children():
            if name == 'softmax':
                # continue
                break
            new_model.add_module(name, module)

        # remove empty final dimension and permute output shape
        new_model.add_module('squeeze', Expression(squeeze_out))
        # if num_classes > 1:
        #     def transpose_class_time(x):
        #         return x.transpose(2, 1)
        #
        #     new_model.add_module('trans', Expression(transpose_class_time))

        model = new_model

        to_dense_prediction_model(model)

    elif cfg.TRAINING.MODEL.lower() == 'shallow':
        model = Shallow(in_chans=in_channels,
                        n_classes=num_classes,
                        input_time_length=cfg.TRAINING.CROP_LEN,
                        final_conv_length=2).create_network()

        # remove softmax
        new_model = nn.Sequential()
        for name, module in model.named_children():
            if name == 'softmax':
                break
            new_model.add_module(name, module)

        # remove empty final dimension and permute output shape
        new_model.add_module('squeeze', Expression(squeeze_out))

        to_dense_prediction_model(model)

    elif cfg.TRAINING.MODEL.lower() == 'hybrid':
        model = Hybrid(in_channels=in_channels)

    elif cfg.TRAINING.MODEL.lower() == 'tcn':
        raise NotImplementedError
    else:
        assert False, f"Unknown Model {cfg.TRAINING.MODEL}"
    optimizer = optim.Adam(model.parameters(),
                           lr=cfg.OPTIMIZATION.BASE_LR,
                           weight_decay=cfg.OPTIMIZATION.WEIGHT_DECAY)
    scheduler = CosineAnnealingLR(optimizer, T_max=cfg.TRAINING.MAX_EPOCHS)
    if cuda:
        model.cuda()

    model.eval()

    metric = lambda targets, predictions: np.corrcoef(targets, predictions)[0,
                                                                            1]
    loss_fun = mse_loss
    logger.info(model)
    return model, optimizer, scheduler, loss_fun, metric
예제 #14
0
def run_exp_on_high_gamma_dataset(train_filename, test_filename, low_cut_hz,
                                  model_name, max_epochs, max_increase_epochs,
                                  np_th_seed, debug):
    train_set, valid_set, test_set = load_train_valid_test(
        train_filename=train_filename,
        test_filename=test_filename,
        low_cut_hz=low_cut_hz,
        debug=debug)
    if debug:
        max_epochs = 4

    set_random_seeds(np_th_seed, cuda=True)
    #torch.backends.cudnn.benchmark = True# sometimes crashes?
    n_classes = int(np.max(train_set.y) + 1)
    n_chans = int(train_set.X.shape[1])
    input_time_length = 1000
    if model_name == 'deep':
        model = Deep4Net(n_chans,
                         n_classes,
                         input_time_length=input_time_length,
                         final_conv_length=2).create_network()
    elif model_name == 'shallow':
        model = ShallowFBCSPNet(n_chans,
                                n_classes,
                                input_time_length=input_time_length,
                                final_conv_length=30).create_network()

    to_dense_prediction_model(model)
    model.cuda()
    model.eval()

    out = model(np_to_var(train_set.X[:1, :, :input_time_length, None]).cuda())

    n_preds_per_input = out.cpu().data.numpy().shape[2]
    optimizer = optim.Adam(model.parameters(), weight_decay=0, lr=1e-3)

    iterator = CropsFromTrialsIterator(batch_size=60,
                                       input_time_length=input_time_length,
                                       n_preds_per_input=n_preds_per_input,
                                       seed=np_th_seed)

    monitors = [
        LossMonitor(),
        MisclassMonitor(col_suffix='sample_misclass'),
        CroppedTrialMisclassMonitor(input_time_length=input_time_length),
        RuntimeMonitor()
    ]

    model_constraint = MaxNormDefaultConstraint()

    loss_function = lambda preds, targets: F.nll_loss(th.mean(preds, dim=2),
                                                      targets)

    run_after_early_stop = True
    do_early_stop = True
    remember_best_column = 'valid_misclass'
    stop_criterion = Or([
        MaxEpochs(max_epochs),
        NoDecrease('valid_misclass', max_increase_epochs)
    ])

    exp = Experiment(model,
                     train_set,
                     valid_set,
                     test_set,
                     iterator=iterator,
                     loss_function=loss_function,
                     optimizer=optimizer,
                     model_constraint=model_constraint,
                     monitors=monitors,
                     stop_criterion=stop_criterion,
                     remember_best_column=remember_best_column,
                     run_after_early_stop=run_after_early_stop,
                     cuda=True,
                     do_early_stop=do_early_stop)
    exp.run()
    return exp
예제 #15
0
cuda = False
set_random_seeds(seed=20170629, cuda=cuda)
n_classes = 3
in_chans = train_set.X.shape[1]
print("INFO : in_chans: {}".format(in_chans))
print("INFO : input_time_length: {}".format(train_set.X.shape[2]))

# final_conv_length = auto ensures we only get a single output in the time dimension
if model_type == 'shallow':
    model = ShallowFBCSPNet(in_chans=in_chans,
                            n_classes=n_classes,
                            input_time_length=train_set.X.shape[2],
                            final_conv_length='auto')
else:
    model = Deep4Net(in_chans=in_chans,
                     n_classes=n_classes,
                     input_time_length=train_set.X.shape[2],
                     final_conv_length='auto')
path_to_classifier = "torchModelsCrossSubjects\{}-{}-52subjects-2.5sec-800epoches-torch_model".format(
    model_type, train_type)

if cuda:
    model.cuda()

from braindecode.torch_ext.optimizers import AdamW
import torch.nn.functional as F
if model_type == 'shallow':
    optimizer = AdamW(model.parameters(), lr=0.0625 * 0.01, weight_decay=0)
else:
    optimizer = AdamW(model.parameters(),
                      lr=1 * 0.01,
                      weight_decay=0.5 *
예제 #16
0
set_random_seeds(seed=20190706, cuda=cuda)

input_time_length = 1000
n_classes = 4
n_chans = 26  # TODO: should be 22 of course
if model_name == "shallow":
    model = ShallowFBCSPNet(
        n_chans,
        n_classes,
        input_time_length=input_time_length,
        final_conv_length=30,
    )
elif model_name == "deep":
    model = Deep4Net(
        n_chans,
        n_classes,
        input_time_length=input_time_length,
        final_conv_length=2,
    )

to_dense_prediction_model(model)

if cuda:
    model.cuda()

with torch.no_grad():
    dummy_input = torch.tensor(
        np.ones((1, n_chans, input_time_length, 1), dtype=np.float32),
        device=device,
    )
    n_preds_per_input = model(dummy_input).shape[2]
예제 #17
0
def run_exp(data_folder, subject_id, low_cut_hz, model, cuda):
    ival = [-500, 4000]
    max_epochs = 1600
    max_increase_epochs = 160
    batch_size = 60
    high_cut_hz = 38
    factor_new = 1e-3
    init_block_size = 1000
    valid_set_fraction = 0.2

    train_filename = "A{:02d}T.gdf".format(subject_id)
    test_filename = "A{:02d}E.gdf".format(subject_id)
    train_filepath = os.path.join(data_folder, train_filename)
    test_filepath = os.path.join(data_folder, test_filename)
    train_label_filepath = train_filepath.replace(".gdf", ".mat")
    test_label_filepath = test_filepath.replace(".gdf", ".mat")

    train_loader = BCICompetition4Set2A(
        train_filepath, labels_filename=train_label_filepath
    )
    test_loader = BCICompetition4Set2A(
        test_filepath, labels_filename=test_label_filepath
    )
    train_cnt = train_loader.load()
    test_cnt = test_loader.load()

    # Preprocessing

    train_cnt = train_cnt.drop_channels(
        ["EOG-left", "EOG-central", "EOG-right"]
    )
    assert len(train_cnt.ch_names) == 22
    # lets convert to millvolt for numerical stability of next operations
    train_cnt = mne_apply(lambda a: a * 1e6, train_cnt)
    train_cnt = mne_apply(
        lambda a: bandpass_cnt(
            a,
            low_cut_hz,
            high_cut_hz,
            train_cnt.info["sfreq"],
            filt_order=3,
            axis=1,
        ),
        train_cnt,
    )
    train_cnt = mne_apply(
        lambda a: exponential_running_standardize(
            a.T,
            factor_new=factor_new,
            init_block_size=init_block_size,
            eps=1e-4,
        ).T,
        train_cnt,
    )

    test_cnt = test_cnt.drop_channels(["EOG-left", "EOG-central", "EOG-right"])
    assert len(test_cnt.ch_names) == 22
    test_cnt = mne_apply(lambda a: a * 1e6, test_cnt)
    test_cnt = mne_apply(
        lambda a: bandpass_cnt(
            a,
            low_cut_hz,
            high_cut_hz,
            test_cnt.info["sfreq"],
            filt_order=3,
            axis=1,
        ),
        test_cnt,
    )
    test_cnt = mne_apply(
        lambda a: exponential_running_standardize(
            a.T,
            factor_new=factor_new,
            init_block_size=init_block_size,
            eps=1e-4,
        ).T,
        test_cnt,
    )

    marker_def = OrderedDict(
        [
            ("Left Hand", [1]),
            ("Right Hand", [2]),
            ("Foot", [3]),
            ("Tongue", [4]),
        ]
    )

    train_set = create_signal_target_from_raw_mne(train_cnt, marker_def, ival)
    test_set = create_signal_target_from_raw_mne(test_cnt, marker_def, ival)

    train_set, valid_set = split_into_two_sets(
        train_set, first_set_fraction=1 - valid_set_fraction
    )

    set_random_seeds(seed=20190706, cuda=cuda)

    n_classes = 4
    n_chans = int(train_set.X.shape[1])
    input_time_length = train_set.X.shape[2]
    if model == "shallow":
        model = ShallowFBCSPNet(
            n_chans,
            n_classes,
            input_time_length=input_time_length,
            final_conv_length="auto",
        ).create_network()
    elif model == "deep":
        model = Deep4Net(
            n_chans,
            n_classes,
            input_time_length=input_time_length,
            final_conv_length="auto",
        ).create_network()
    if cuda:
        model.cuda()
    log.info("Model: \n{:s}".format(str(model)))

    optimizer = optim.Adam(model.parameters())

    iterator = BalancedBatchSizeIterator(batch_size=batch_size)

    stop_criterion = Or(
        [
            MaxEpochs(max_epochs),
            NoDecrease("valid_misclass", max_increase_epochs),
        ]
    )

    monitors = [LossMonitor(), MisclassMonitor(), RuntimeMonitor()]

    model_constraint = MaxNormDefaultConstraint()

    exp = Experiment(
        model,
        train_set,
        valid_set,
        test_set,
        iterator=iterator,
        loss_function=F.nll_loss,
        optimizer=optimizer,
        model_constraint=model_constraint,
        monitors=monitors,
        stop_criterion=stop_criterion,
        remember_best_column="valid_misclass",
        run_after_early_stop=True,
        cuda=cuda,
    )
    exp.run()
    return exp
예제 #18
0
from torch import nn
from braindecode.torch_ext.util import set_random_seeds
from braindecode.models.util import to_dense_prediction_model

# Set if you want to use GPU
# You can also use torch.cuda.is_available() to determine if cuda is available on your machine.
cuda = False
set_random_seeds(seed=20170629, cuda=cuda)

# This will determine how many crops are processed in parallel
input_time_length = 450
# final_conv_length determines the size of the receptive field of the ConvNet
model = Deep4Net(in_chans=64,
                 n_classes=2,
                 input_time_length=input_time_length,
                 filter_length_3=5,
                 filter_length_4=5,
                 pool_time_stride=2,
                 final_conv_length=1).create_network()
to_dense_prediction_model(model)

if cuda:
    model.cuda()

from torch import optim
import torch

optimizer = optim.Adam(model.parameters())

from braindecode.torch_ext.util import np_to_var
# determine output size
예제 #19
0
def run_exp(max_recording_mins, n_recordings, sec_to_cut,
            duration_recording_mins, max_abs_val, max_min_threshold,
            max_min_expected, shrink_val, max_min_remove, batch_set_zero_val,
            batch_set_zero_test, sampling_freq, low_cut_hz, high_cut_hz,
            exp_demean, exp_standardize, moving_demean, moving_standardize,
            channel_demean, channel_standardize, divisor, n_folds, i_test_fold,
            model_name, input_time_length, final_conv_length, batch_size,
            max_epochs, only_return_exp):
    cuda = True

    preproc_functions = []
    preproc_functions.append(lambda data, fs: (
        data[:, int(sec_to_cut * fs):-int(sec_to_cut * fs)], fs))
    preproc_functions.append(lambda data, fs: (data[:, :int(
        duration_recording_mins * 60 * fs)], fs))
    if max_abs_val is not None:
        preproc_functions.append(
            lambda data, fs: (np.clip(data, -max_abs_val, max_abs_val), fs))
    if max_min_threshold is not None:
        preproc_functions.append(lambda data, fs: (clean_jumps(
            data, 200, max_min_threshold, max_min_expected, cuda), fs))
    if max_min_remove is not None:
        window_len = 200
        preproc_functions.append(lambda data, fs: (set_jumps_to_zero(
            data,
            window_len=window_len,
            threshold=max_min_remove,
            cuda=cuda,
            clip_min_max_to_zero=True), fs))

    if shrink_val is not None:
        preproc_functions.append(lambda data, fs: (shrink_spikes(
            data,
            shrink_val,
            1,
            9,
        ), fs))

    preproc_functions.append(lambda data, fs: (resampy.resample(
        data, fs, sampling_freq, axis=1, filter='kaiser_fast'), sampling_freq))
    preproc_functions.append(lambda data, fs: (bandpass_cnt(
        data, low_cut_hz, high_cut_hz, fs, filt_order=4, axis=1), fs))

    if exp_demean:
        preproc_functions.append(lambda data, fs: (exponential_running_demean(
            data.T, factor_new=0.001, init_block_size=100).T, fs))
    if exp_standardize:
        preproc_functions.append(
            lambda data, fs: (exponential_running_standardize(
                data.T, factor_new=0.001, init_block_size=100).T, fs))
    if moving_demean:
        preproc_functions.append(lambda data, fs: (padded_moving_demean(
            data, axis=1, n_window=201), fs))
    if moving_standardize:
        preproc_functions.append(lambda data, fs: (padded_moving_standardize(
            data, axis=1, n_window=201), fs))
    if channel_demean:
        preproc_functions.append(lambda data, fs: (demean(data, axis=1), fs))
    if channel_standardize:
        preproc_functions.append(lambda data, fs:
                                 (standardize(data, axis=1), fs))
    if divisor is not None:
        preproc_functions.append(lambda data, fs: (data / divisor, fs))

    all_file_names, labels = get_all_sorted_file_names_and_labels()
    lengths = np.load(
        '/home/schirrmr/code/auto-diagnosis/sorted-recording-lengths.npy')
    mask = lengths < max_recording_mins * 60
    cleaned_file_names = np.array(all_file_names)[mask]
    cleaned_labels = labels[mask]

    diffs_per_rec = np.load(
        '/home/schirrmr/code/auto-diagnosis/diffs_per_recording.npy')

    def create_set(inds):
        X = []
        for i in inds:
            log.info("Load {:s}".format(cleaned_file_names[i]))
            x = load_data(cleaned_file_names[i], preproc_functions)
            X.append(x)
        y = cleaned_labels[inds].astype(np.int64)
        return SignalAndTarget(X, y)

    if not only_return_exp:
        folds = get_balanced_batches(n_recordings,
                                     None,
                                     False,
                                     n_batches=n_folds)
        test_inds = folds[i_test_fold]
        valid_inds = folds[i_test_fold - 1]
        all_inds = list(range(n_recordings))
        train_inds = np.setdiff1d(all_inds, np.union1d(test_inds, valid_inds))

        rec_nr_sorted_by_diff = np.argsort(diffs_per_rec)[::-1]
        train_inds = rec_nr_sorted_by_diff[train_inds]
        valid_inds = rec_nr_sorted_by_diff[valid_inds]
        test_inds = rec_nr_sorted_by_diff[test_inds]

        train_set = create_set(train_inds)
        valid_set = create_set(valid_inds)
        test_set = create_set(test_inds)
    else:
        train_set = None
        valid_set = None
        test_set = None

    set_random_seeds(seed=20170629, cuda=cuda)
    # This will determine how many crops are processed in parallel
    n_classes = 2
    in_chans = 21
    if model_name == 'shallow':
        model = ShallowFBCSPNet(
            in_chans=in_chans,
            n_classes=n_classes,
            input_time_length=input_time_length,
            final_conv_length=final_conv_length).create_network()
    elif model_name == 'deep':
        model = Deep4Net(in_chans,
                         n_classes,
                         input_time_length=input_time_length,
                         final_conv_length=final_conv_length).create_network()

    optimizer = optim.Adam(model.parameters())
    to_dense_prediction_model(model)
    log.info("Model:\n{:s}".format(str(model)))
    if cuda:
        model.cuda()
    # determine output size
    test_input = np_to_var(
        np.ones((2, in_chans, input_time_length, 1), dtype=np.float32))
    if cuda:
        test_input = test_input.cuda()
    out = model(test_input)
    n_preds_per_input = out.cpu().data.numpy().shape[2]
    log.info("{:d} predictions per input/trial".format(n_preds_per_input))
    iterator = CropsFromTrialsIterator(batch_size=batch_size,
                                       input_time_length=input_time_length,
                                       n_preds_per_input=n_preds_per_input)
    loss_function = lambda preds, targets: F.nll_loss(
        th.mean(preds, dim=2)[:, :, 0], targets)
    model_constraint = None
    monitors = [
        LossMonitor(),
        MisclassMonitor(col_suffix='sample_misclass'),
        CroppedTrialMisclassMonitor(input_time_length),
        RuntimeMonitor(),
    ]
    stop_criterion = MaxEpochs(max_epochs)
    batch_modifier = None
    if batch_set_zero_val is not None:
        batch_modifier = RemoveMinMaxDiff(batch_set_zero_val,
                                          clip_max_abs=True,
                                          set_zero=True)
    if (batch_set_zero_val is not None) and (batch_set_zero_test == True):
        iterator = ModifiedIterator(
            iterator,
            batch_modifier,
        )
        batch_modifier = None
    exp = Experiment(model,
                     train_set,
                     valid_set,
                     test_set,
                     iterator,
                     loss_function,
                     optimizer,
                     model_constraint,
                     monitors,
                     stop_criterion,
                     remember_best_column='valid_misclass',
                     run_after_early_stop=True,
                     batch_modifier=batch_modifier,
                     cuda=cuda)
    if not only_return_exp:
        exp.run()
    else:
        exp.dataset = None
        exp.splitter = None

    return exp
예제 #20
0
from braindecode.util import set_random_seeds

# Set if you want to use GPU
# You can also use torch.cuda.is_available() to determine if cuda is available on your machine.
cuda = True
cuda = False
set_random_seeds(seed=20170629, cuda=cuda)

# This will determine how many crops are processed in parallel
input_time_length = 450
# final_conv_length determines the size of the receptive field of the ConvNet
model = Deep4Net(
    in_chans=64,
    n_classes=2,
    input_time_length=input_time_length,
    filter_length_3=5,
    filter_length_4=5,
    pool_time_stride=2,
    stride_before_pool=True,
    final_conv_length=1,
)
if cuda:
    model.cuda()

from torch.optim import AdamW
import torch.nn.functional as F

optimizer = AdamW(
    model.parameters(), lr=0.01, weight_decay=0.5 * 0.001
)  # these are good values for the deep model
model.compile(
    loss=F.nll_loss, optimizer=optimizer, iterator_seed=1, cropped=True
예제 #21
0
def run_exp(epoches, batch_size, subject_num, model_type, cuda, single_subject,
            single_subject_num):
    # ival = [-500, 4000]
    max_increase_epochs = 160

    # Preprocessing
    X, y = loadSubjects(subject_num, single_subject, single_subject_num)
    X = X.astype(np.float32)
    y = y.astype(np.int64)
    X, y = shuffle(X, y)

    trial_length = X.shape[2]
    print("trial_length " + str(trial_length))
    print("trying to run with {} sec trials ".format((trial_length - 1) / 256))
    print("y")
    print(y)
    trainingSampleSize = int(len(X) * 0.6)
    valudationSampleSize = int(len(X) * 0.2)
    testSampleSize = int(len(X) * 0.2)
    print("INFO : Training sample size: {}".format(trainingSampleSize))
    print("INFO : Validation sample size: {}".format(valudationSampleSize))
    print("INFO : Test sample size: {}".format(testSampleSize))

    train_set = SignalAndTarget(X[:trainingSampleSize],
                                y=y[:trainingSampleSize])
    valid_set = SignalAndTarget(
        X[trainingSampleSize:(trainingSampleSize + valudationSampleSize)],
        y=y[trainingSampleSize:(trainingSampleSize + valudationSampleSize)])
    test_set = SignalAndTarget(X[(trainingSampleSize + valudationSampleSize):],
                               y=y[(trainingSampleSize +
                                    valudationSampleSize):])

    set_random_seeds(seed=20190706, cuda=cuda)

    n_classes = 3
    n_chans = int(train_set.X.shape[1])
    input_time_length = train_set.X.shape[2]
    if model_type == 'shallow':
        model = ShallowFBCSPNet(n_chans,
                                n_classes,
                                input_time_length=input_time_length,
                                final_conv_length='auto').create_network()
    elif model_type == 'deep':
        model = Deep4Net(n_chans,
                         n_classes,
                         input_time_length=input_time_length,
                         final_conv_length='auto').create_network()
    elif model_type == 'eegnet':
        model = EEGNetv4(n_chans,
                         n_classes,
                         input_time_length=input_time_length,
                         final_conv_length='auto').create_network()
    if cuda:
        model.cuda()
    log.info("Model: \n{:s}".format(str(model)))

    optimizer = optim.Adam(model.parameters())

    iterator = BalancedBatchSizeIterator(batch_size=batch_size)

    stop_criterion = Or([
        MaxEpochs(max_epochs),
        NoDecrease('valid_misclass', max_increase_epochs)
    ])

    monitors = [LossMonitor(), MisclassMonitor(), RuntimeMonitor()]

    model_constraint = MaxNormDefaultConstraint()

    exp = Experiment(model,
                     train_set,
                     valid_set,
                     test_set,
                     iterator=iterator,
                     loss_function=F.nll_loss,
                     optimizer=optimizer,
                     model_constraint=model_constraint,
                     monitors=monitors,
                     stop_criterion=stop_criterion,
                     remember_best_column='valid_misclass',
                     run_after_early_stop=True,
                     cuda=cuda)
    exp.run()
    # th.save(model, "models\{}-cropped-singleSubjectNum{}-{}sec-{}epoches-torch_model".format(model_type, single_subject_num, ((trial_length - 1) / 256), epoches))
    return exp
예제 #22
0
def run_exp(max_recording_mins, n_recordings, sec_to_cut,
            duration_recording_mins, max_abs_val, shrink_val, sampling_freq,
            divisor, n_folds, i_test_fold, final_conv_length, model_constraint,
            batch_size, max_epochs, n_filters_start, n_filters_factor,
            filter_time_length, first_nonlin, first_pool_mode,
            first_pool_nonlin, pool_time_stride, pool_time_length, drop_prob,
            filter_length_2, later_nonlin, later_pool_mode, later_pool_nonlin,
            filter_length_3, filter_length_4, double_time_convs,
            split_first_layer, do_batch_norm, stride_before_pool,
            input_time_length, only_return_exp, time_cut_off_sec, start_time):
    kwargs = locals()
    for model_param in [
            'final_conv_length', 'n_filters_start', 'n_filters_factor',
            'filter_time_length', 'first_nonlin', 'first_pool_mode',
            'first_pool_nonlin', 'pool_time_stride', 'pool_time_length',
            'drop_prob', 'filter_length_2', 'later_nonlin', 'later_pool_mode',
            'later_pool_nonlin', 'filter_length_3', 'filter_length_4',
            'double_time_convs', 'split_first_layer', 'do_batch_norm',
            'stride_before_pool'
    ]:
        kwargs.pop(model_param)
    nonlin_dict = {
        'elu': elu,
        'relu': relu,
        'relu6': relu6,
        'tanh': tanh,
        'square': square,
        'identity': identity,
        'log': safe_log,
    }
    assert input_time_length == 6000

    n_classes = 2
    in_chans = 21
    cuda = True
    set_random_seeds(seed=20170629, cuda=cuda)
    model = Deep4Net(
        in_chans=in_chans,
        n_classes=n_classes,
        input_time_length=input_time_length,
        final_conv_length=final_conv_length,
        n_filters_time=n_filters_start,
        n_filters_spat=n_filters_start,
        filter_time_length=filter_time_length,
        pool_time_length=pool_time_length,
        pool_time_stride=pool_time_stride,
        n_filters_2=int(n_filters_start * n_filters_factor),
        filter_length_2=filter_length_2,
        n_filters_3=int(n_filters_start * (n_filters_factor**2.0)),
        filter_length_3=filter_length_3,
        n_filters_4=int(n_filters_start * (n_filters_factor**3.0)),
        filter_length_4=filter_length_4,
        first_nonlin=nonlin_dict[first_nonlin],
        first_pool_mode=first_pool_mode,
        first_pool_nonlin=nonlin_dict[first_pool_nonlin],
        later_nonlin=nonlin_dict[later_nonlin],
        later_pool_mode=later_pool_mode,
        later_pool_nonlin=nonlin_dict[later_pool_nonlin],
        drop_prob=drop_prob,
        double_time_convs=double_time_convs,
        split_first_layer=split_first_layer,
        batch_norm=do_batch_norm,
        batch_norm_alpha=0.1,
        stride_before_pool=stride_before_pool).create_network()

    to_dense_prediction_model(model)
    if cuda:
        model.cuda()
    test_input = np_to_var(
        np.ones((2, in_chans, input_time_length, 1), dtype=np.float32))
    if cuda:
        test_input = test_input.cuda()
    try:
        out = model(test_input)
    except:
        raise ValueError("Model receptive field too large...")
    n_preds_per_input = out.cpu().data.numpy().shape[2]
    n_receptive_field = input_time_length - n_preds_per_input
    if n_receptive_field > 6000:
        raise ValueError("Model receptive field ({:d}) too large...".format(
            n_receptive_field))
        # For future, here optionally add input time length instead

    model = Deep4Net(
        in_chans=in_chans,
        n_classes=n_classes,
        input_time_length=input_time_length,
        final_conv_length=final_conv_length,
        n_filters_time=n_filters_start,
        n_filters_spat=n_filters_start,
        filter_time_length=filter_time_length,
        pool_time_length=pool_time_length,
        pool_time_stride=pool_time_stride,
        n_filters_2=int(n_filters_start * n_filters_factor),
        filter_length_2=filter_length_2,
        n_filters_3=int(n_filters_start * (n_filters_factor**2.0)),
        filter_length_3=filter_length_3,
        n_filters_4=int(n_filters_start * (n_filters_factor**3.0)),
        filter_length_4=filter_length_4,
        first_nonlin=nonlin_dict[first_nonlin],
        first_pool_mode=first_pool_mode,
        first_pool_nonlin=nonlin_dict[first_pool_nonlin],
        later_nonlin=nonlin_dict[later_nonlin],
        later_pool_mode=later_pool_mode,
        later_pool_nonlin=nonlin_dict[later_pool_nonlin],
        drop_prob=drop_prob,
        double_time_convs=double_time_convs,
        split_first_layer=split_first_layer,
        batch_norm=do_batch_norm,
        batch_norm_alpha=0.1,
        stride_before_pool=stride_before_pool).create_network()
    return common.run_exp(model=model, **kwargs)
def run_exp(max_recording_mins, n_recordings, sec_to_cut,
            duration_recording_mins, max_abs_val, max_min_threshold,
            max_min_expected, shrink_val, max_min_remove, batch_set_zero_val,
            batch_set_zero_test, sampling_freq, low_cut_hz, high_cut_hz,
            exp_demean, exp_standardize, moving_demean, moving_standardize,
            channel_demean, channel_standardize, divisor, n_folds, i_test_fold,
            input_time_length, final_conv_length, pool_stride, n_blocks_to_add,
            sigmoid, model_constraint, batch_size, max_epochs,
            only_return_exp):
    cuda = True

    preproc_functions = []
    preproc_functions.append(lambda data, fs: (
        data[:, int(sec_to_cut * fs):-int(sec_to_cut * fs)], fs))
    preproc_functions.append(lambda data, fs: (data[:, :int(
        duration_recording_mins * 60 * fs)], fs))
    if max_abs_val is not None:
        preproc_functions.append(
            lambda data, fs: (np.clip(data, -max_abs_val, max_abs_val), fs))
    if max_min_threshold is not None:
        preproc_functions.append(lambda data, fs: (clean_jumps(
            data, 200, max_min_threshold, max_min_expected, cuda), fs))
    if max_min_remove is not None:
        window_len = 200
        preproc_functions.append(lambda data, fs: (set_jumps_to_zero(
            data,
            window_len=window_len,
            threshold=max_min_remove,
            cuda=cuda,
            clip_min_max_to_zero=True), fs))

    if shrink_val is not None:
        preproc_functions.append(lambda data, fs: (shrink_spikes(
            data,
            shrink_val,
            1,
            9,
        ), fs))

    preproc_functions.append(lambda data, fs: (resampy.resample(
        data, fs, sampling_freq, axis=1, filter='kaiser_fast'), sampling_freq))
    preproc_functions.append(lambda data, fs: (bandpass_cnt(
        data, low_cut_hz, high_cut_hz, fs, filt_order=4, axis=1), fs))

    if exp_demean:
        preproc_functions.append(lambda data, fs: (exponential_running_demean(
            data.T, factor_new=0.001, init_block_size=100).T, fs))
    if exp_standardize:
        preproc_functions.append(
            lambda data, fs: (exponential_running_standardize(
                data.T, factor_new=0.001, init_block_size=100).T, fs))
    if moving_demean:
        preproc_functions.append(lambda data, fs: (padded_moving_demean(
            data, axis=1, n_window=201), fs))
    if moving_standardize:
        preproc_functions.append(lambda data, fs: (padded_moving_standardize(
            data, axis=1, n_window=201), fs))
    if channel_demean:
        preproc_functions.append(lambda data, fs: (demean(data, axis=1), fs))
    if channel_standardize:
        preproc_functions.append(lambda data, fs:
                                 (standardize(data, axis=1), fs))
    if divisor is not None:
        preproc_functions.append(lambda data, fs: (data / divisor, fs))

    dataset = DiagnosisSet(n_recordings=n_recordings,
                           max_recording_mins=max_recording_mins,
                           preproc_functions=preproc_functions)
    if not only_return_exp:
        X, y = dataset.load()

    splitter = Splitter(
        n_folds,
        i_test_fold,
    )
    if not only_return_exp:
        train_set, valid_set, test_set = splitter.split(X, y)
        del X, y  # shouldn't be necessary, but just to make sure
    else:
        train_set = None
        valid_set = None
        test_set = None

    set_random_seeds(seed=20170629, cuda=cuda)
    if sigmoid:
        n_classes = 1
    else:
        n_classes = 2
    in_chans = 21

    net = Deep4Net(
        in_chans=in_chans,
        n_classes=n_classes,
        input_time_length=input_time_length,
        final_conv_length=final_conv_length,
        pool_time_length=pool_stride,
        pool_time_stride=pool_stride,
        n_filters_2=50,
        n_filters_3=80,
        n_filters_4=120,
    )
    model = net_with_more_layers(net, n_blocks_to_add, nn.MaxPool2d)
    if sigmoid:
        model = to_linear_plus_minus_net(model)
    optimizer = optim.Adam(model.parameters())
    to_dense_prediction_model(model)
    log.info("Model:\n{:s}".format(str(model)))
    if cuda:
        model.cuda()
    # determine output size
    test_input = np_to_var(
        np.ones((2, in_chans, input_time_length, 1), dtype=np.float32))
    if cuda:
        test_input = test_input.cuda()
    out = model(test_input)
    n_preds_per_input = out.cpu().data.numpy().shape[2]
    log.info("{:d} predictions per input/trial".format(n_preds_per_input))
    iterator = CropsFromTrialsIterator(batch_size=batch_size,
                                       input_time_length=input_time_length,
                                       n_preds_per_input=n_preds_per_input)
    if sigmoid:
        loss_function = lambda preds, targets: binary_cross_entropy_with_logits(
            th.mean(preds, dim=2)[:, 1, 0], targets.type_as(preds))
    else:
        loss_function = lambda preds, targets: F.nll_loss(
            th.mean(preds, dim=2)[:, :, 0], targets)

    if model_constraint is not None:
        model_constraint = MaxNormDefaultConstraint()
    monitors = [
        LossMonitor(),
        MisclassMonitor(col_suffix='sample_misclass'),
        CroppedTrialMisclassMonitor(input_time_length),
        RuntimeMonitor(),
    ]
    stop_criterion = MaxEpochs(max_epochs)
    batch_modifier = None
    if batch_set_zero_val is not None:
        batch_modifier = RemoveMinMaxDiff(batch_set_zero_val,
                                          clip_max_abs=True,
                                          set_zero=True)
    if (batch_set_zero_val is not None) and (batch_set_zero_test == True):
        iterator = ModifiedIterator(
            iterator,
            batch_modifier,
        )
        batch_modifier = None
    exp = Experiment(model,
                     train_set,
                     valid_set,
                     test_set,
                     iterator,
                     loss_function,
                     optimizer,
                     model_constraint,
                     monitors,
                     stop_criterion,
                     remember_best_column='valid_misclass',
                     run_after_early_stop=True,
                     batch_modifier=batch_modifier,
                     cuda=cuda)
    if not only_return_exp:
        exp.run()
    else:
        exp.dataset = dataset
        exp.splitter = splitter

    return exp
예제 #24
0
def run_exp(data_folder, subject_id, low_cut_hz, model, cuda):
    ival = [-500, 4000]
    input_time_length = 1000
    max_epochs = 800
    max_increase_epochs = 80
    batch_size = 60
    high_cut_hz = 38
    factor_new = 1e-3
    init_block_size = 1000
    valid_set_fraction = 0.2

    train_filename = 'A{:02d}T.gdf'.format(subject_id)
    test_filename = 'A{:02d}E.gdf'.format(subject_id)
    train_filepath = os.path.join(data_folder, train_filename)
    test_filepath = os.path.join(data_folder, test_filename)
    train_label_filepath = train_filepath.replace('.gdf', '.mat')
    test_label_filepath = test_filepath.replace('.gdf', '.mat')

    train_loader = BCICompetition4Set2A(train_filepath,
                                        labels_filename=train_label_filepath)
    test_loader = BCICompetition4Set2A(test_filepath,
                                       labels_filename=test_label_filepath)
    train_cnt = train_loader.load()
    test_cnt = test_loader.load()

    # Preprocessing

    train_cnt = train_cnt.drop_channels(
        ['STI 014', 'EOG-left', 'EOG-central', 'EOG-right'])
    assert len(train_cnt.ch_names) == 22
    # lets convert to millvolt for numerical stability of next operations
    train_cnt = mne_apply(lambda a: a * 1e6, train_cnt)
    train_cnt = mne_apply(
        lambda a: bandpass_cnt(a,
                               low_cut_hz,
                               high_cut_hz,
                               train_cnt.info['sfreq'],
                               filt_order=3,
                               axis=1), train_cnt)
    train_cnt = mne_apply(
        lambda a: exponential_running_standardize(a.T,
                                                  factor_new=factor_new,
                                                  init_block_size=
                                                  init_block_size,
                                                  eps=1e-4).T, train_cnt)

    test_cnt = test_cnt.drop_channels(
        ['STI 014', 'EOG-left', 'EOG-central', 'EOG-right'])
    assert len(test_cnt.ch_names) == 22
    test_cnt = mne_apply(lambda a: a * 1e6, test_cnt)
    test_cnt = mne_apply(
        lambda a: bandpass_cnt(a,
                               low_cut_hz,
                               high_cut_hz,
                               test_cnt.info['sfreq'],
                               filt_order=3,
                               axis=1), test_cnt)
    test_cnt = mne_apply(
        lambda a: exponential_running_standardize(a.T,
                                                  factor_new=factor_new,
                                                  init_block_size=
                                                  init_block_size,
                                                  eps=1e-4).T, test_cnt)

    marker_def = OrderedDict([('Left Hand', [1]), (
        'Right Hand',
        [2],
    ), ('Foot', [3]), ('Tongue', [4])])

    train_set = create_signal_target_from_raw_mne(train_cnt, marker_def, ival)
    test_set = create_signal_target_from_raw_mne(test_cnt, marker_def, ival)

    train_set, valid_set = split_into_two_sets(train_set,
                                               first_set_fraction=1 -
                                               valid_set_fraction)

    set_random_seeds(seed=20190706, cuda=cuda)

    n_classes = 4
    n_chans = int(train_set.X.shape[1])
    if model == 'shallow':
        model = ShallowFBCSPNet(n_chans,
                                n_classes,
                                input_time_length=input_time_length,
                                final_conv_length=30).create_network()
    elif model == 'deep':
        model = Deep4Net(n_chans,
                         n_classes,
                         input_time_length=input_time_length,
                         final_conv_length=2).create_network()

    to_dense_prediction_model(model)
    if cuda:
        model.cuda()

    log.info("Model: \n{:s}".format(str(model)))
    dummy_input = np_to_var(train_set.X[:1, :, :, None])
    if cuda:
        dummy_input = dummy_input.cuda()
    out = model(dummy_input)

    n_preds_per_input = out.cpu().data.numpy().shape[2]

    optimizer = optim.Adam(model.parameters())

    iterator = CropsFromTrialsIterator(batch_size=batch_size,
                                       input_time_length=input_time_length,
                                       n_preds_per_input=n_preds_per_input)

    stop_criterion = Or([
        MaxEpochs(max_epochs),
        NoDecrease('valid_misclass', max_increase_epochs)
    ])

    monitors = [
        LossMonitor(),
        MisclassMonitor(col_suffix='sample_misclass'),
        CroppedTrialMisclassMonitor(input_time_length=input_time_length),
        RuntimeMonitor()
    ]

    model_constraint = MaxNormDefaultConstraint()

    loss_function = lambda preds, targets: F.nll_loss(
        th.mean(preds, dim=2, keepdim=False), targets)

    exp = Experiment(model,
                     train_set,
                     valid_set,
                     test_set,
                     iterator=iterator,
                     loss_function=loss_function,
                     optimizer=optimizer,
                     model_constraint=model_constraint,
                     monitors=monitors,
                     stop_criterion=stop_criterion,
                     remember_best_column='valid_misclass',
                     run_after_early_stop=True,
                     cuda=cuda)
    exp.run()
    return exp
예제 #25
0
            train_set.y = np.float32(train_set.y[:, :cutInd])

            valid_set = None

            set_random_seeds(seed=20170629, cuda=cuda)

            # This will determine how many crops are processed in parallel
            input_time_length = int(timeWindowDuration / 1000 *
                                    samplingRate)  # train_set.X.shape[1]
            in_chans = train_set.X[0].shape[0]

            if Deep4:
                # final_conv_length determines the size of the receptive field of the ConvNet
                model = Deep4Net(in_chans=in_chans,
                                 n_classes=1,
                                 input_time_length=input_time_length,
                                 pool_time_stride=pool_time_stride,
                                 final_conv_length=2,
                                 stride_before_pool=True).create_network()
            elif ResNet:
                model_name = 'resnet-xavier-uniform'
                init_name = model_name.lstrip('resnet-')
                from torch.nn import init
                init_fn = {
                    'he-uniform': lambda w: init.kaiming_uniform(w, a=0),
                    'he-normal': lambda w: init.kaiming_normal(w, a=0),
                    'xavier-uniform': lambda w: init.xavier_uniform(w, gain=1),
                    'xavier-normal': lambda w: init.xavier_normal(w, gain=1)
                }[init_name]

                model = EEGResNet(
                    in_chans=in_chans,
def run_exp(test_on_eval, sensor_types, n_chans, max_recording_mins,
            test_recording_mins, n_recordings, sec_to_cut_at_start,
            sec_to_cut_at_end, duration_recording_mins, max_abs_val,
            clip_before_resample, sampling_freq, divisor, n_folds, i_test_fold,
            shuffle, merge_train_valid, model_name, n_start_chans,
            n_chan_factor, input_time_length, final_conv_length,
            stride_before_pool, optimizer, learning_rate, weight_decay,
            scheduler, model_constraint, batch_size, max_epochs, log_dir,
            only_return_exp, np_th_seed):

    cuda = True
    if ('smac' in model_name) and (input_time_length is None):
        input_time_length = 12000
        fix_input_length_for_smac = True
    else:
        fix_input_length_for_smac = False
    set_random_seeds(seed=np_th_seed, cuda=cuda)
    n_classes = 2
    if model_name == 'shallow':
        model = ShallowFBCSPNet(
            in_chans=n_chans,
            n_classes=n_classes,
            n_filters_time=n_start_chans,
            n_filters_spat=n_start_chans,
            input_time_length=input_time_length,
            final_conv_length=final_conv_length).create_network()
    elif model_name == 'deep':
        model = Deep4Net(
            n_chans,
            n_classes,
            n_filters_time=n_start_chans,
            n_filters_spat=n_start_chans,
            input_time_length=input_time_length,
            n_filters_2=int(n_start_chans * n_chan_factor),
            n_filters_3=int(n_start_chans * (n_chan_factor**2.0)),
            n_filters_4=int(n_start_chans * (n_chan_factor**3.0)),
            final_conv_length=final_conv_length,
            stride_before_pool=stride_before_pool).create_network()
    elif (model_name == 'deep_smac') or (model_name == 'deep_smac_bnorm'):
        if model_name == 'deep_smac':
            do_batch_norm = False
        else:
            assert model_name == 'deep_smac_bnorm'
            do_batch_norm = True
        double_time_convs = False
        drop_prob = 0.244445
        filter_length_2 = 12
        filter_length_3 = 14
        filter_length_4 = 12
        filter_time_length = 21
        final_conv_length = 1
        first_nonlin = elu
        first_pool_mode = 'mean'
        first_pool_nonlin = identity
        later_nonlin = elu
        later_pool_mode = 'mean'
        later_pool_nonlin = identity
        n_filters_factor = 1.679066
        n_filters_start = 32
        pool_time_length = 1
        pool_time_stride = 2
        split_first_layer = True
        n_chan_factor = n_filters_factor
        n_start_chans = n_filters_start
        model = Deep4Net(n_chans,
                         n_classes,
                         n_filters_time=n_start_chans,
                         n_filters_spat=n_start_chans,
                         input_time_length=input_time_length,
                         n_filters_2=int(n_start_chans * n_chan_factor),
                         n_filters_3=int(n_start_chans * (n_chan_factor**2.0)),
                         n_filters_4=int(n_start_chans * (n_chan_factor**3.0)),
                         final_conv_length=final_conv_length,
                         batch_norm=do_batch_norm,
                         double_time_convs=double_time_convs,
                         drop_prob=drop_prob,
                         filter_length_2=filter_length_2,
                         filter_length_3=filter_length_3,
                         filter_length_4=filter_length_4,
                         filter_time_length=filter_time_length,
                         first_nonlin=first_nonlin,
                         first_pool_mode=first_pool_mode,
                         first_pool_nonlin=first_pool_nonlin,
                         later_nonlin=later_nonlin,
                         later_pool_mode=later_pool_mode,
                         later_pool_nonlin=later_pool_nonlin,
                         pool_time_length=pool_time_length,
                         pool_time_stride=pool_time_stride,
                         split_first_layer=split_first_layer,
                         stride_before_pool=True).create_network()
    elif model_name == 'shallow_smac':
        conv_nonlin = identity
        do_batch_norm = True
        drop_prob = 0.328794
        filter_time_length = 56
        final_conv_length = 22
        n_filters_spat = 73
        n_filters_time = 24
        pool_mode = 'max'
        pool_nonlin = identity
        pool_time_length = 84
        pool_time_stride = 3
        split_first_layer = True
        model = ShallowFBCSPNet(
            in_chans=n_chans,
            n_classes=n_classes,
            n_filters_time=n_filters_time,
            n_filters_spat=n_filters_spat,
            input_time_length=input_time_length,
            final_conv_length=final_conv_length,
            conv_nonlin=conv_nonlin,
            batch_norm=do_batch_norm,
            drop_prob=drop_prob,
            filter_time_length=filter_time_length,
            pool_mode=pool_mode,
            pool_nonlin=pool_nonlin,
            pool_time_length=pool_time_length,
            pool_time_stride=pool_time_stride,
            split_first_layer=split_first_layer,
        ).create_network()
    elif model_name == 'deep_smac_new':
        from torch.nn.functional import elu, relu, relu6, tanh
        from braindecode.torch_ext.functions import identity, square, safe_log
        n_filters_factor = 1.9532637176784269
        n_filters_start = 61

        deep_kwargs = {
            "batch_norm": False,
            "double_time_convs": False,
            "drop_prob": 0.3622676569047184,
            "filter_length_2": 9,
            "filter_length_3": 6,
            "filter_length_4": 10,
            "filter_time_length": 17,
            "final_conv_length": 5,
            "first_nonlin": elu,
            "first_pool_mode": "max",
            "first_pool_nonlin": identity,
            "later_nonlin": relu6,
            "later_pool_mode": "max",
            "later_pool_nonlin": identity,
            "n_filters_time": n_filters_start,
            "n_filters_spat": n_filters_start,
            "n_filters_2": int(n_filters_start * n_filters_factor),
            "n_filters_3": int(n_filters_start * (n_filters_factor**2.0)),
            "n_filters_4": int(n_filters_start * (n_filters_factor**3.0)),
            "pool_time_length": 1,
            "pool_time_stride": 4,
            "split_first_layer": True,
            "stride_before_pool": True,
        }

        model = Deep4Net(n_chans,
                         n_classes,
                         input_time_length=input_time_length,
                         **deep_kwargs).create_network()
    elif model_name == 'shallow_smac_new':
        from torch.nn.functional import elu, relu, relu6, tanh
        from braindecode.torch_ext.functions import identity, square, safe_log
        shallow_kwargs = {
            "conv_nonlin": square,
            "batch_norm": True,
            "drop_prob": 0.10198630723385381,
            "filter_time_length": 51,
            "final_conv_length": 1,
            "n_filters_spat": 200,
            "n_filters_time": 76,
            "pool_mode": "max",
            "pool_nonlin": safe_log,
            "pool_time_length": 139,
            "pool_time_stride": 49,
            "split_first_layer": True,
        }

        model = ShallowFBCSPNet(in_chans=n_chans,
                                n_classes=n_classes,
                                input_time_length=input_time_length,
                                **shallow_kwargs).create_network()
    elif model_name == 'linear':
        model = nn.Sequential()
        model.add_module("conv_classifier",
                         nn.Conv2d(n_chans, n_classes, (600, 1)))
        model.add_module('softmax', nn.LogSoftmax())
        model.add_module('squeeze', Expression(lambda x: x.squeeze(3)))
    elif model_name == '3path':
        virtual_chan_1x1_conv = True
        mean_across_features = False
        drop_prob = 0.5
        n_start_filters = 10
        early_bnorm = False
        n_classifier_filters = 100
        later_kernel_len = 5
        extra_conv_stride = 4
        # dont forget to reset n_preds_per_blabla
        model = create_multi_start_path_net(
            in_chans=n_chans,
            virtual_chan_1x1_conv=virtual_chan_1x1_conv,
            n_start_filters=n_start_filters,
            early_bnorm=early_bnorm,
            later_kernel_len=later_kernel_len,
            extra_conv_stride=extra_conv_stride,
            mean_across_features=mean_across_features,
            n_classifier_filters=n_classifier_filters,
            drop_prob=drop_prob)
    else:
        assert False, "unknown model name {:s}".format(model_name)
    if not model_name == '3path':
        to_dense_prediction_model(model)
    log.info("Model:\n{:s}".format(str(model)))
    time_cut_off_sec = np.inf
    start_time = time.time()

    # fix input time length in case of smac models
    if fix_input_length_for_smac:
        assert ('smac' in model_name) and (input_time_length == 12000)
        if cuda:
            model.cuda()
        test_input = np_to_var(
            np.ones((2, n_chans, input_time_length, 1), dtype=np.float32))
        if cuda:
            test_input = test_input.cuda()
        try:
            out = model(test_input)
        except:
            raise ValueError("Model receptive field too large...")
        n_preds_per_input = out.cpu().data.numpy().shape[2]
        n_receptive_field = input_time_length - n_preds_per_input
        input_time_length = 2 * n_receptive_field

    exp = common.run_exp(
        max_recording_mins,
        n_recordings,
        sec_to_cut_at_start,
        sec_to_cut_at_end,
        duration_recording_mins,
        max_abs_val,
        clip_before_resample,
        sampling_freq,
        divisor,
        n_folds,
        i_test_fold,
        shuffle,
        merge_train_valid,
        model,
        input_time_length,
        optimizer,
        learning_rate,
        weight_decay,
        scheduler,
        model_constraint,
        batch_size,
        max_epochs,
        only_return_exp,
        time_cut_off_sec,
        start_time,
        test_on_eval,
        test_recording_mins,
        sensor_types,
        log_dir,
        np_th_seed,
    )

    return exp
예제 #27
0
set_random_seeds(seed=20170629, cuda=cuda)
in_chans = train_set.X.shape[1]
print("INFO : in_chans: {}".format(in_chans))
np.set_printoptions(suppress=True, threshold=np.inf)

# final_conv_length = auto ensures we only get a single output in the time dimension
if train_type == 'trialwise':
    input_time_length = train_set.X.shape[2]
    if model_type == 'shallow':
        model = ShallowFBCSPNet(in_chans=in_chans, n_classes=n_classes,
                            input_time_length=input_time_length,
                            final_conv_length='auto')
    else:
        model = Deep4Net(in_chans=in_chans, n_classes=n_classes,
                            input_time_length=input_time_length,
                            final_conv_length='auto')
else: # cropped
    if model_type == 'shallow':
        model = ShallowFBCSPNet(in_chans=in_chans, n_classes=n_classes,
                            input_time_length=None,
                            final_conv_length=1)
    else:
        model = Deep4Net(in_chans=in_chans, n_classes=n_classes,
                            input_time_length=None,
                            final_conv_length=1)
if cuda:
    model.cuda()

from braindecode.torch_ext.optimizers import AdamW
import torch.nn.functional as F
예제 #28
0
def run_exp(
    debug,
    subject_id,
    max_epochs,
    n_sensors,
    final_hz,
    half_before,
    start_ms,
    stop_ms,
    model,
    weight_decay,
    final_fft,
    add_bnorm,
    act_norm,
):
    model_name = model
    del model
    assert final_hz in [64, 256]

    car = not debug
    train_inputs, test_inputs = load_train_test(
        subject_id,
        car,
        n_sensors,
        final_hz,
        start_ms,
        stop_ms,
        half_before,
        only_load_given_sensors=debug,
    )

    cuda = True
    if cuda:
        train_inputs = [i.cuda() for i in train_inputs]
        test_inputs = [i.cuda() for i in test_inputs]

    from braindecode.datautil.signal_target import SignalAndTarget

    sets = []
    for inputs in (train_inputs, test_inputs):
        X = np.concatenate([var_to_np(ins) for ins in inputs]).astype(
            np.float32
        )
        y = np.concatenate(
            [np.ones(len(ins)) * i_class for i_class, ins in enumerate(inputs)]
        )
        y = y.astype(np.int64)
        set = SignalAndTarget(X, y)
        sets.append(set)
    train_set = sets[0]
    valid_set = sets[1]

    from braindecode.models.shallow_fbcsp import ShallowFBCSPNet
    from braindecode.models.deep4 import Deep4Net
    from torch import nn
    from braindecode.torch_ext.util import set_random_seeds

    set_random_seeds(2019011641, cuda)
    n_chans = train_inputs[0].shape[1]
    n_time = train_inputs[0].shape[2]
    n_classes = 2
    input_time_length=train_set.X.shape[2]

    if model_name == 'shallow':
        # final_conv_length = auto ensures we only get a single output in the time dimension
        model = ShallowFBCSPNet(in_chans=n_chans, n_classes=n_classes,
                                input_time_length=input_time_length,
                                final_conv_length='auto')
    elif model_name == 'deep':
        model = Deep4Net(n_chans, n_classes,
                 input_time_length=train_set.X.shape[2],
                 pool_time_length=2,
                 pool_time_stride=2,
                 final_conv_length='auto')
    elif model_name == 'invertible':
        model = InvertibleModel(n_chans, n_time, final_fft=final_fft,
                                add_bnorm=add_bnorm)
    elif model_name == 'deep_invertible':
        n_chan_pad = 0
        filter_length_time = 11
        model = deep_invertible(
            n_chans, input_time_length,  n_chan_pad,  filter_length_time)
        model.add_module("select_dims", Expression(lambda x: x[:, :2, 0]))
        model.add_module("softmax", nn.LogSoftmax(dim=1))
        model = WrappedModel(model)

        ## set scale
        if act_norm:
            model.cuda()
            for module in model.network.modules():
                if hasattr(module, 'log_factor'):
                    module._forward_hooks.clear()
                    module.register_forward_hook(scale_to_unit_var)
            model.network(train_inputs[0].cuda());
            for module in model.network.modules():
                if hasattr(module, 'log_factor'):
                    module._forward_hooks.clear()

    else:
        assert False
    if cuda:
        model.cuda()

    from braindecode.torch_ext.optimizers import AdamW
    import torch.nn.functional as F
    if model_name == 'shallow':
        assert weight_decay == 'hardcoded'
        optimizer = AdamW(model.parameters(), lr=0.0625 * 0.01, weight_decay=0)
    elif model_name == 'deep':
        assert weight_decay == 'hardcoded'
        optimizer = AdamW(model.parameters(), lr=1 * 0.01,
                          weight_decay=0.5 * 0.001)  # these are good values for the deep model
    elif model_name == 'invertible':
        optimizer = AdamW(model.parameters(), lr=1e-4,
                          weight_decay=weight_decay)
    elif model_name == 'deep_invertible':
        optimizer = AdamW(model.parameters(), lr=1 * 0.001,
                          weight_decay=weight_decay)

    else:
        assert False

    model.compile(loss=F.nll_loss, optimizer=optimizer, iterator_seed=1, )
    model.fit(train_set.X, train_set.y, epochs=max_epochs, batch_size=64,
              scheduler='cosine',
              validation_data=(valid_set.X, valid_set.y), )

    return model.epochs_df, model.network
def run_exp(
    data_folders,
    n_recordings,
    sensor_types,
    n_chans,
    max_recording_mins,
    sec_to_cut,
    duration_recording_mins,
    test_recording_mins,
    max_abs_val,
    sampling_freq,
    divisor,
    test_on_eval,
    n_folds,
    i_test_fold,
    shuffle,
    model_name,
    n_start_chans,
    n_chan_factor,
    input_time_length,
    final_conv_length,
    model_constraint,
    init_lr,
    batch_size,
    max_epochs,
    cuda,
):

    import torch.backends.cudnn as cudnn
    cudnn.benchmark = True
    preproc_functions = []
    preproc_functions.append(lambda data, fs: (
        data[:, int(sec_to_cut * fs):-int(sec_to_cut * fs)], fs))
    preproc_functions.append(lambda data, fs: (data[:, :int(
        duration_recording_mins * 60 * fs)], fs))
    if max_abs_val is not None:
        preproc_functions.append(
            lambda data, fs: (np.clip(data, -max_abs_val, max_abs_val), fs))

    preproc_functions.append(lambda data, fs: (resampy.resample(
        data, fs, sampling_freq, axis=1, filter='kaiser_fast'), sampling_freq))

    if divisor is not None:
        preproc_functions.append(lambda data, fs: (data / divisor, fs))

    dataset = DiagnosisSet(n_recordings=n_recordings,
                           max_recording_mins=max_recording_mins,
                           preproc_functions=preproc_functions,
                           data_folders=data_folders,
                           train_or_eval='train',
                           sensor_types=sensor_types)
    if test_on_eval:
        if test_recording_mins is None:
            test_recording_mins = duration_recording_mins
        test_preproc_functions = copy(preproc_functions)
        test_preproc_functions[1] = lambda data, fs: (data[:, :int(
            test_recording_mins * 60 * fs)], fs)
        test_dataset = DiagnosisSet(n_recordings=n_recordings,
                                    max_recording_mins=None,
                                    preproc_functions=test_preproc_functions,
                                    data_folders=data_folders,
                                    train_or_eval='eval',
                                    sensor_types=sensor_types)
    X, y = dataset.load()
    max_shape = np.max([list(x.shape) for x in X], axis=0)
    assert max_shape[1] == int(duration_recording_mins * sampling_freq * 60)
    if test_on_eval:
        test_X, test_y = test_dataset.load()
        max_shape = np.max([list(x.shape) for x in test_X], axis=0)
        assert max_shape[1] == int(test_recording_mins * sampling_freq * 60)
    if not test_on_eval:
        splitter = TrainValidTestSplitter(n_folds,
                                          i_test_fold,
                                          shuffle=shuffle)
        train_set, valid_set, test_set = splitter.split(X, y)
    else:
        splitter = TrainValidSplitter(n_folds,
                                      i_valid_fold=i_test_fold,
                                      shuffle=shuffle)
        train_set, valid_set = splitter.split(X, y)
        test_set = SignalAndTarget(test_X, test_y)
        del test_X, test_y
    del X, y  # shouldn't be necessary, but just to make sure

    set_random_seeds(seed=20170629, cuda=cuda)
    n_classes = 2
    if model_name == 'shallow':
        model = ShallowFBCSPNet(
            in_chans=n_chans,
            n_classes=n_classes,
            n_filters_time=n_start_chans,
            n_filters_spat=n_start_chans,
            input_time_length=input_time_length,
            final_conv_length=final_conv_length).create_network()
    elif model_name == 'deep':
        model = Deep4Net(n_chans,
                         n_classes,
                         n_filters_time=n_start_chans,
                         n_filters_spat=n_start_chans,
                         input_time_length=input_time_length,
                         n_filters_2=int(n_start_chans * n_chan_factor),
                         n_filters_3=int(n_start_chans * (n_chan_factor**2.0)),
                         n_filters_4=int(n_start_chans * (n_chan_factor**3.0)),
                         final_conv_length=final_conv_length,
                         stride_before_pool=True).create_network()
    elif (model_name == 'deep_smac'):
        if model_name == 'deep_smac':
            do_batch_norm = False
        else:
            assert model_name == 'deep_smac_bnorm'
            do_batch_norm = True
        double_time_convs = False
        drop_prob = 0.244445
        filter_length_2 = 12
        filter_length_3 = 14
        filter_length_4 = 12
        filter_time_length = 21
        final_conv_length = 1
        first_nonlin = elu
        first_pool_mode = 'mean'
        first_pool_nonlin = identity
        later_nonlin = elu
        later_pool_mode = 'mean'
        later_pool_nonlin = identity
        n_filters_factor = 1.679066
        n_filters_start = 32
        pool_time_length = 1
        pool_time_stride = 2
        split_first_layer = True
        n_chan_factor = n_filters_factor
        n_start_chans = n_filters_start
        model = Deep4Net(n_chans,
                         n_classes,
                         n_filters_time=n_start_chans,
                         n_filters_spat=n_start_chans,
                         input_time_length=input_time_length,
                         n_filters_2=int(n_start_chans * n_chan_factor),
                         n_filters_3=int(n_start_chans * (n_chan_factor**2.0)),
                         n_filters_4=int(n_start_chans * (n_chan_factor**3.0)),
                         final_conv_length=final_conv_length,
                         batch_norm=do_batch_norm,
                         double_time_convs=double_time_convs,
                         drop_prob=drop_prob,
                         filter_length_2=filter_length_2,
                         filter_length_3=filter_length_3,
                         filter_length_4=filter_length_4,
                         filter_time_length=filter_time_length,
                         first_nonlin=first_nonlin,
                         first_pool_mode=first_pool_mode,
                         first_pool_nonlin=first_pool_nonlin,
                         later_nonlin=later_nonlin,
                         later_pool_mode=later_pool_mode,
                         later_pool_nonlin=later_pool_nonlin,
                         pool_time_length=pool_time_length,
                         pool_time_stride=pool_time_stride,
                         split_first_layer=split_first_layer,
                         stride_before_pool=True).create_network()
    elif model_name == 'shallow_smac':
        conv_nonlin = identity
        do_batch_norm = True
        drop_prob = 0.328794
        filter_time_length = 56
        final_conv_length = 22
        n_filters_spat = 73
        n_filters_time = 24
        pool_mode = 'max'
        pool_nonlin = identity
        pool_time_length = 84
        pool_time_stride = 3
        split_first_layer = True
        model = ShallowFBCSPNet(
            in_chans=n_chans,
            n_classes=n_classes,
            n_filters_time=n_filters_time,
            n_filters_spat=n_filters_spat,
            input_time_length=input_time_length,
            final_conv_length=final_conv_length,
            conv_nonlin=conv_nonlin,
            batch_norm=do_batch_norm,
            drop_prob=drop_prob,
            filter_time_length=filter_time_length,
            pool_mode=pool_mode,
            pool_nonlin=pool_nonlin,
            pool_time_length=pool_time_length,
            pool_time_stride=pool_time_stride,
            split_first_layer=split_first_layer,
        ).create_network()
    elif model_name == 'linear':
        model = nn.Sequential()
        model.add_module("conv_classifier",
                         nn.Conv2d(n_chans, n_classes, (600, 1)))
        model.add_module('softmax', nn.LogSoftmax())
        model.add_module('squeeze', Expression(lambda x: x.squeeze(3)))
    else:
        assert False, "unknown model name {:s}".format(model_name)
    to_dense_prediction_model(model)
    log.info("Model:\n{:s}".format(str(model)))
    if cuda:
        model.cuda()
    # determine output size
    test_input = np_to_var(
        np.ones((2, n_chans, input_time_length, 1), dtype=np.float32))
    if cuda:
        test_input = test_input.cuda()
    log.info("In shape: {:s}".format(str(test_input.cpu().data.numpy().shape)))

    out = model(test_input)
    log.info("Out shape: {:s}".format(str(out.cpu().data.numpy().shape)))
    n_preds_per_input = out.cpu().data.numpy().shape[2]
    log.info("{:d} predictions per input/trial".format(n_preds_per_input))
    iterator = CropsFromTrialsIterator(batch_size=batch_size,
                                       input_time_length=input_time_length,
                                       n_preds_per_input=n_preds_per_input)
    optimizer = optim.Adam(model.parameters(), lr=init_lr)

    loss_function = lambda preds, targets: F.nll_loss(
        th.mean(preds, dim=2, keepdim=False), targets)

    if model_constraint is not None:
        assert model_constraint == 'defaultnorm'
        model_constraint = MaxNormDefaultConstraint()
    monitors = [
        LossMonitor(),
        MisclassMonitor(col_suffix='sample_misclass'),
        CroppedDiagnosisMonitor(input_time_length, n_preds_per_input),
        RuntimeMonitor(),
    ]
    stop_criterion = MaxEpochs(max_epochs)
    batch_modifier = None
    run_after_early_stop = True
    exp = Experiment(model,
                     train_set,
                     valid_set,
                     test_set,
                     iterator,
                     loss_function,
                     optimizer,
                     model_constraint,
                     monitors,
                     stop_criterion,
                     remember_best_column='valid_misclass',
                     run_after_early_stop=run_after_early_stop,
                     batch_modifier=batch_modifier,
                     cuda=cuda)
    exp.run()
    return exp
예제 #30
0
def run_experiment(train_set, valid_set, test_set, model_name, optimizer_name,
                   init_lr, scheduler_name, use_norm_constraint, weight_decay,
                   schedule_weight_decay, restarts, max_epochs,
                   max_increase_epochs, np_th_seed):
    set_random_seeds(np_th_seed, cuda=True)
    #torch.backends.cudnn.benchmark = True# sometimes crashes?
    if valid_set is not None:
        assert max_increase_epochs is not None
    assert (max_epochs is None) != (restarts is None)
    if max_epochs is None:
        max_epochs = np.sum(restarts)
    n_classes = int(np.max(train_set.y) + 1)
    n_chans = int(train_set.X.shape[1])
    input_time_length = 1000
    if model_name == 'deep':
        model = Deep4Net(n_chans,
                         n_classes,
                         input_time_length=input_time_length,
                         final_conv_length=2).create_network()
    elif model_name == 'shallow':
        model = ShallowFBCSPNet(n_chans,
                                n_classes,
                                input_time_length=input_time_length,
                                final_conv_length=30).create_network()
    elif model_name in [
            'resnet-he-uniform', 'resnet-he-normal', 'resnet-xavier-normal',
            'resnet-xavier-uniform'
    ]:
        init_name = model_name.lstrip('resnet-')
        from torch.nn import init
        init_fn = {
            'he-uniform': lambda w: init.kaiming_uniform(w, a=0),
            'he-normal': lambda w: init.kaiming_normal(w, a=0),
            'xavier-uniform': lambda w: init.xavier_uniform(w, gain=1),
            'xavier-normal': lambda w: init.xavier_normal(w, gain=1)
        }[init_name]
        model = EEGResNet(in_chans=n_chans,
                          n_classes=n_classes,
                          input_time_length=input_time_length,
                          final_pool_length=10,
                          n_first_filters=48,
                          conv_weight_init_fn=init_fn).create_network()
    else:
        raise ValueError("Unknown model name {:s}".format(model_name))
    if 'resnet' not in model_name:
        to_dense_prediction_model(model)
    model.cuda()
    model.eval()

    out = model(np_to_var(train_set.X[:1, :, :input_time_length, None]).cuda())

    n_preds_per_input = out.cpu().data.numpy().shape[2]

    if optimizer_name == 'adam':
        optimizer = optim.Adam(model.parameters(),
                               weight_decay=weight_decay,
                               lr=init_lr)
    elif optimizer_name == 'adamw':
        optimizer = AdamW(model.parameters(),
                          weight_decay=weight_decay,
                          lr=init_lr)

    iterator = CropsFromTrialsIterator(batch_size=60,
                                       input_time_length=input_time_length,
                                       n_preds_per_input=n_preds_per_input,
                                       seed=np_th_seed)

    if scheduler_name is not None:
        assert schedule_weight_decay == (optimizer_name == 'adamw')
        if scheduler_name == 'cosine':
            n_updates_per_epoch = sum(
                [1 for _ in iterator.get_batches(train_set, shuffle=True)])
            if restarts is None:
                n_updates_per_period = n_updates_per_epoch * max_epochs
            else:
                n_updates_per_period = np.array(restarts) * n_updates_per_epoch
            scheduler = CosineAnnealing(n_updates_per_period)
            optimizer = ScheduledOptimizer(
                scheduler,
                optimizer,
                schedule_weight_decay=schedule_weight_decay)
        elif scheduler_name == 'cut_cosine':
            # TODO: integrate with if clause before, now just separate
            # to avoid messing with code
            n_updates_per_epoch = sum(
                [1 for _ in iterator.get_batches(train_set, shuffle=True)])
            if restarts is None:
                n_updates_per_period = n_updates_per_epoch * max_epochs
            else:
                n_updates_per_period = np.array(restarts) * n_updates_per_epoch
            scheduler = CutCosineAnnealing(n_updates_per_period)
            optimizer = ScheduledOptimizer(
                scheduler,
                optimizer,
                schedule_weight_decay=schedule_weight_decay)
        else:
            raise ValueError("Unknown scheduler")
    monitors = [
        LossMonitor(),
        MisclassMonitor(col_suffix='sample_misclass'),
        CroppedTrialMisclassMonitor(input_time_length=input_time_length),
        RuntimeMonitor()
    ]

    if use_norm_constraint:
        model_constraint = MaxNormDefaultConstraint()
    else:
        model_constraint = None
    # change here this cell
    loss_function = lambda preds, targets: F.nll_loss(th.mean(preds, dim=2),
                                                      targets)

    if valid_set is not None:
        run_after_early_stop = True
        do_early_stop = True
        remember_best_column = 'valid_misclass'
        stop_criterion = Or([
            MaxEpochs(max_epochs),
            NoDecrease('valid_misclass', max_increase_epochs)
        ])
    else:
        run_after_early_stop = False
        do_early_stop = False
        remember_best_column = None
        stop_criterion = MaxEpochs(max_epochs)

    exp = Experiment(model,
                     train_set,
                     valid_set,
                     test_set,
                     iterator=iterator,
                     loss_function=loss_function,
                     optimizer=optimizer,
                     model_constraint=model_constraint,
                     monitors=monitors,
                     stop_criterion=stop_criterion,
                     remember_best_column=remember_best_column,
                     run_after_early_stop=run_after_early_stop,
                     cuda=True,
                     do_early_stop=do_early_stop)
    exp.run()
    return exp