示例#1
0
 def __init__(self, hyp_params, parameters, data_params, subject,
              model_type, data_type):
     self.subject = subject
     self.model_type = model_type
     self.data_type = data_type
     self.best_loss = parameters["best_loss"]
     self.batch_size = parameters["batch_size"]
     self.monitors = parameters["monitors"]
     self.cuda = parameters["cuda"]
     self.model_constraint = parameters["model_constraint"]
     self.max_increase_epochs = parameters['max_increase_epochs']
     self.n_classes = data_params["n_classes"]
     self.n_chans = data_params["n_chans"]
     self.input_time_length = data_params["input_time_length"]
     self.hyp_params = hyp_params
     self.activation = "elu"
     self.learning_rate = 0.01
     self.epochs = 1
     self.loss = nll_loss
     for key in hyp_params:
         setattr(self, key, hyp_params[key])
     self.iterator = BalancedBatchSizeIterator(batch_size=self.batch_size)
     self.best_params = None
     self.model_number = 1
     self.y_pred = np.array([])
     self.probabilities = np.array([])
示例#2
0
def get_normal_settings():
    if global_vars.get('problem') == 'regression':
        loss_function = F.mse_loss
    else:
        loss_function = F.nll_loss
    stop_criterion = Or([MaxEpochs(global_vars.get('max_epochs')),
                         NoIncreaseDecrease(f'valid_{global_vars.get("nn_objective")}', global_vars.get('max_increase_epochs'),
                                            oper=get_oper_by_loss_function(loss_function))])
    iterator = BalancedBatchSizeIterator(batch_size=global_vars.get('batch_size'))
    monitors = [LossMonitor(), GenericMonitor('accuracy'), RuntimeMonitor()]
    for metric in global_vars.get('evaluation_metrics'):
        monitors.append(GenericMonitor(metric))
    return stop_criterion, iterator, loss_function, monitors
示例#3
0
    def setUp(self):
        args = parse_args(
            ['-e', 'tests', '-c', '../configurations/config.ini'])
        init_config(args.config)
        configs = get_configurations(args.experiment)
        assert (len(configs) == 1)
        global_vars.set_config(configs[0])
        global_vars.set('eeg_chans', 22)
        global_vars.set('num_subjects', 9)
        global_vars.set('input_time_len', 1125)
        global_vars.set('n_classes', 4)
        set_params_by_dataset()
        input_shape = (50, global_vars.get('eeg_chans'),
                       global_vars.get('input_time_len'))

        class Dummy:
            def __init__(self, X, y):
                self.X = X
                self.y = y

        dummy_data = Dummy(X=np.ones(input_shape, dtype=np.float32),
                           y=np.ones(50, dtype=np.longlong))
        self.iterator = BalancedBatchSizeIterator(
            batch_size=global_vars.get('batch_size'))
        self.loss_function = F.nll_loss
        self.monitors = [
            LossMonitor(),
            MisclassMonitor(),
            GenericMonitor('accuracy', acc_func),
            RuntimeMonitor()
        ]
        self.stop_criterion = Or([
            MaxEpochs(global_vars.get('max_epochs')),
            NoDecrease('valid_misclass',
                       global_vars.get('max_increase_epochs'))
        ])
        self.naiveNAS = NaiveNAS(iterator=self.iterator,
                                 exp_folder='../tests',
                                 exp_name='',
                                 train_set=dummy_data,
                                 val_set=dummy_data,
                                 test_set=dummy_data,
                                 stop_criterion=self.stop_criterion,
                                 monitors=self.monitors,
                                 loss_function=self.loss_function,
                                 config=global_vars.config,
                                 subject_id=1,
                                 fieldnames=None,
                                 model_from_file=None)
示例#4
0
def network_model(model, train_set, test_set, valid_set, n_chans, input_time_length, cuda):
	
	max_epochs = 30 
	max_increase_epochs = 10 
	batch_size = 64 
	init_block_size = 1000

	set_random_seeds(seed=20190629, cuda=cuda)

	n_classes = 2 
	n_chans = n_chans
	input_time_length = input_time_length

	if model == 'deep':
		model = Deep4Net(n_chans, n_classes, input_time_length=input_time_length,
						 final_conv_length='auto').create_network()

	elif model == 'shallow':
		model = ShallowFBCSPNet(n_chans, n_classes, input_time_length=input_time_length,
								final_conv_length='auto').create_network()

	if cuda:
		model.cuda()

	log.info("%s model: ".format(str(model))) 

	optimizer = AdamW(model.parameters(), lr=0.00625, weight_decay=0)

	iterator = BalancedBatchSizeIterator(batch_size=batch_size) 

	stop_criterion = Or([MaxEpochs(max_epochs),
						 NoDecrease('valid_misclass', max_increase_epochs)])

	monitors = [LossMonitor(), MisclassMonitor(), RuntimeMonitor()]

	model_constraint = None
	print(train_set.X.shape[0]) 

	model_test = Experiment(model, train_set, valid_set, test_set, iterator=iterator,
							loss_function=F.nll_loss, optimizer=optimizer,
							model_constraint=model_constraint, monitors=monitors,
							stop_criterion=stop_criterion, remember_best_column='valid_misclass',
							run_after_early_stop=True, cuda=cuda)

	model_test.run()
	return model_test 
示例#5
0
文件: deep4.py 项目: kahartma/eeggan
def train_completetrials(train_set,
                         test_set,
                         n_classes,
                         max_epochs=100,
                         batch_size=60,
                         iterator=None,
                         cuda=True):
    model = build_model(train_set.X.shape[2],
                        int(train_set.X.shape[1]),
                        n_classes,
                        cropped=False)
    if iterator is None:
        iterator = BalancedBatchSizeIterator(batch_size=batch_size,
                                             seed=np.random.randint(9999999))
    monitors = [LossMonitor(), MisclassMonitor(), RuntimeMonitor()]
    loss_function = F.nll_loss

    return train(train_set, test_set, model, iterator, monitors, loss_function,
                 max_epochs, cuda)
示例#6
0
def get_iterator(model, dataset, config):
    # Config
    cropped_input_time_length = config['cropped']['input_time_length']
    batch_size = config['train']['batch_size']

    # Set up iterator
    if config['cropped']['use']:
        # Determine number of predictions per input/trial,
        #   used for cropped batch iterator
        dummy_input = np_to_var(dataset.train_set.X[:1, :, :, None])
        if config['cuda']:
            dummy_input = dummy_input.cuda()
        out = model(dummy_input)
        n_preds_per_input = out.cpu().data.numpy().shape[2]
        return CropsFromTrialsIterator(
            batch_size=batch_size,
            input_time_length=cropped_input_time_length,
            n_preds_per_input=n_preds_per_input)
    if config['experiment']['type'] == 'ccsa_da':
        return PairedDataBalancedBatchSizeIterator(batch_size=batch_size)
    return BalancedBatchSizeIterator(batch_size=batch_size)
 def __init__(self, model, subnet1_params, subnet2_params, hyp_params,
              parameters, data_params, model_save_path, tag):
     self.model = model
     self.subnet1_params = subnet1_params
     self.subnet2_params = subnet2_params
     self.model_save_path = model_save_path
     self.tag = tag
     self.best_loss = parameters["best_loss"]
     self.batch_size = parameters["batch_size"]
     self.monitors = parameters["monitors"]
     self.cuda = parameters["cuda"]
     self.model_constraint = parameters["model_constraint"]
     self.max_increase_epochs = parameters['max_increase_epochs']
     self.lr_scheduler = parameters['learning_rate_scheduler']
     self.lr_step = parameters['lr_step']
     self.lr_gamma = parameters['lr_gamma']
     self.n_classes = data_params["n_classes"]
     self.n_chans_d1 = data_params["n_chans_d1"]
     self.input_time_length_d1 = data_params["input_time_length_d1"]
     self.n_chans_d2 = data_params["n_chans_d2"]
     self.input_time_length_d2 = data_params["input_time_length_d2"]
     self.hyp_params = hyp_params
     self.activation = "elu"
     self.learning_rate = 0.001
     self.dropout = 0.1
     self.epochs = parameters['epochs']
     self.window = None
     self.structure = 'deep'
     self.n_filts = 10  #n_filts in n-1 filters
     self.first_pool = False
     self.loss = nll_loss
     for key in hyp_params:
         setattr(self, key, hyp_params[key])
     self.iterator = BalancedBatchSizeIterator(batch_size=self.batch_size)
     self.best_params = None
     self.model_number = 1
     self.y_pred = np.array([])
     self.y_true = np.array([])
     self.probabilities = np.array([])
示例#8
0
    def evaluate(self, X, y, batch_size=32):
        # Create a dummy experiment for the evaluation
        iterator = BalancedBatchSizeIterator(batch_size=batch_size,
                                             seed=0)  # seed irrelevant
        stop_criterion = MaxEpochs(0)
        train_set = SignalAndTarget(X, y)
        model_constraint = None
        valid_set = None
        test_set = None
        loss_function = self.loss
        if self.cropped:
            loss_function = lambda outputs, targets: \
                self.loss(th.mean(outputs, dim=2), targets)
        exp = Experiment(self.network,
                         train_set,
                         valid_set,
                         test_set,
                         iterator=iterator,
                         loss_function=loss_function,
                         optimizer=self.optimizer,
                         model_constraint=model_constraint,
                         monitors=self.monitors,
                         stop_criterion=stop_criterion,
                         remember_best_column=None,
                         run_after_early_stop=False,
                         cuda=self.cuda,
                         print_0_epoch=False,
                         do_early_stop=False)

        exp.monitor_epoch({'train': train_set})

        result_dict = dict([
            (key.replace('train_', ''), val)
            for key, val in dict(exp.epochs_df.iloc[0]).items()
        ])
        return result_dict
示例#9
0
class BaseModel(object):
    def cuda(self):
        """Move underlying model to GPU."""
        self._ensure_network_exists()
        assert (
            not self.compiled
        ), "Call cuda before compiling model, otherwise optimization will not work"
        self.network = self.network.cuda()
        self.is_cuda = True
        return self

    def parameters(self):
        """
        Return parameters of underlying torch model.
    
        Returns
        -------
        parameters: list of torch tensors
        """
        self._ensure_network_exists()
        return self.network.parameters()

    def _ensure_network_exists(self):
        if not hasattr(self, "network"):
            self.network = self.create_network()
            self.is_cuda = False
            self.compiled = False

    def compile(
        self,
        loss,
        optimizer,
        extra_monitors=None,
        cropped=False,
        iterator_seed=0,
    ):
        """
        Setup training for this model.
        
        Parameters
        ----------
        loss: function (predictions, targets) -> torch scalar
        optimizer: `torch.optim.Optimizer` or string
            Either supply an optimizer or the name of the class (e.g. 'adam')
        extra_monitors: List of Braindecode monitors, optional
            In case you want to monitor additional values except for loss, misclass and runtime.
        cropped: bool
            Whether to perform cropped decoding, see cropped decoding tutorial.
        iterator_seed: int
            Seed to seed the iterator random generator.
        Returns
        -------

        """
        self.loss = loss
        self._ensure_network_exists()
        if cropped:
            model_already_dense = np.any([
                hasattr(m, "dilation") and (m.dilation != 1)
                and (m.dilation) != (1, 1) for m in self.network.modules()
            ])
            if not model_already_dense:
                to_dense_prediction_model(self.network)
            else:
                log.info("Seems model was already converted to dense model...")
        if not hasattr(optimizer, "step"):
            optimizer_class = find_optimizer(optimizer)
            optimizer = optimizer_class(self.network.parameters())
        self.optimizer = optimizer
        self.extra_monitors = extra_monitors
        # Already setting it here, so multiple calls to fit
        # will lead to different batches being drawn
        self.seed_rng = RandomState(iterator_seed)
        self.cropped = cropped
        self.compiled = True

    def fit(
        self,
        train_X,
        train_y,
        epochs,
        batch_size,
        input_time_length=None,
        validation_data=None,
        model_constraint=None,
        remember_best_column=None,
        scheduler=None,
        log_0_epoch=True,
    ):
        """
        Fit the model using the given training data.
        
        Will set `epochs_df` variable with a pandas dataframe to the history
        of the training process.
        
        Parameters
        ----------
        train_X: ndarray
            Training input data
        train_y: 1darray
            Training labels
        epochs: int
            Number of epochs to train
        batch_size: int
        input_time_length: int, optional
            Super crop size, what temporal size is pushed forward through 
            the network, see cropped decoding tuturial.
        validation_data: (ndarray, 1darray), optional
            X and y for validation set if wanted
        model_constraint: object, optional
            You can supply :class:`.MaxNormDefaultConstraint` if wanted.
        remember_best_column: string, optional
            In case you want to do an early stopping/reset parameters to some
            "best" epoch, define here the monitored value whose minimum
            determines the best epoch.
        scheduler: 'cosine' or None, optional
            Whether to use cosine annealing (:class:`.CosineAnnealing`).
        log_0_epoch: bool
            Whether to compute the metrics once before training as well.

        Returns
        -------
        exp: 
            Underlying braindecode :class:`.Experiment`
        """
        if (not hasattr(self, "compiled")) or (not self.compiled):
            raise ValueError(
                "Compile the model first by calling model.compile(loss, optimizer, metrics)"
            )

        if self.cropped and input_time_length is None:
            raise ValueError(
                "In cropped mode, need to specify input_time_length,"
                "which is the number of timesteps that will be pushed through"
                "the network in a single pass.")

        train_X = _ensure_float32(train_X)
        if self.cropped:
            self.network.eval()
            test_input = np_to_var(
                np.ones(
                    (1, train_X[0].shape[0], input_time_length) +
                    train_X[0].shape[2:],
                    dtype=np.float32,
                ))
            while len(test_input.size()) < 4:
                test_input = test_input.unsqueeze(-1)
            if self.is_cuda:
                test_input = test_input.cuda()
            out = self.network(test_input)
            n_preds_per_input = out.cpu().data.numpy().shape[2]
            self.iterator = CropsFromTrialsIterator(
                batch_size=batch_size,
                input_time_length=input_time_length,
                n_preds_per_input=n_preds_per_input,
                seed=self.seed_rng.randint(0,
                                           np.iinfo(np.int32).max - 1),
            )
        else:
            self.iterator = BalancedBatchSizeIterator(
                batch_size=batch_size,
                seed=self.seed_rng.randint(0,
                                           np.iinfo(np.int32).max - 1),
            )
        if log_0_epoch:
            stop_criterion = MaxEpochs(epochs)
        else:
            stop_criterion = MaxEpochs(epochs - 1)
        train_set = SignalAndTarget(train_X, train_y)
        optimizer = self.optimizer
        if scheduler is not None:
            assert (scheduler == "cosine"
                    ), "Supply either 'cosine' or None as scheduler."
            n_updates_per_epoch = sum([
                1 for _ in self.iterator.get_batches(train_set, shuffle=True)
            ])
            n_updates_per_period = n_updates_per_epoch * epochs
            if scheduler == "cosine":
                scheduler = CosineAnnealing(n_updates_per_period)
            schedule_weight_decay = False
            if optimizer.__class__.__name__ == "AdamW":
                schedule_weight_decay = True
            optimizer = ScheduledOptimizer(
                scheduler,
                self.optimizer,
                schedule_weight_decay=schedule_weight_decay,
            )
        loss_function = self.loss
        if self.cropped:
            loss_function = lambda outputs, targets: self.loss(
                th.mean(outputs, dim=2), targets)
        if validation_data is not None:
            valid_X = _ensure_float32(validation_data[0])
            valid_y = validation_data[1]
            valid_set = SignalAndTarget(valid_X, valid_y)
        else:
            valid_set = None
        test_set = None
        self.monitors = [LossMonitor()]
        if self.cropped:
            self.monitors.append(
                CroppedTrialMisclassMonitor(input_time_length))
        else:
            self.monitors.append(MisclassMonitor())
        if self.extra_monitors is not None:
            self.monitors.extend(self.extra_monitors)
        self.monitors.append(RuntimeMonitor())
        exp = Experiment(
            self.network,
            train_set,
            valid_set,
            test_set,
            iterator=self.iterator,
            loss_function=loss_function,
            optimizer=optimizer,
            model_constraint=model_constraint,
            monitors=self.monitors,
            stop_criterion=stop_criterion,
            remember_best_column=remember_best_column,
            run_after_early_stop=False,
            cuda=self.is_cuda,
            log_0_epoch=log_0_epoch,
            do_early_stop=(remember_best_column is not None),
        )
        exp.run()
        self.epochs_df = exp.epochs_df
        return exp

    def evaluate(self, X, y):
        """
        Evaluate, i.e., compute metrics on given inputs and targets.
        
        Parameters
        ----------
        X: ndarray
            Input data.
        y: 1darray
            Targets.

        Returns
        -------
        result: dict
            Dictionary with result metrics.

        """
        X = _ensure_float32(X)
        stop_criterion = MaxEpochs(0)
        train_set = SignalAndTarget(X, y)
        model_constraint = None
        valid_set = None
        test_set = None
        loss_function = self.loss
        if self.cropped:
            loss_function = lambda outputs, targets: self.loss(
                th.mean(outputs, dim=2), targets)

        # reset runtime monitor if exists...
        for monitor in self.monitors:
            if hasattr(monitor, "last_call_time"):
                monitor.last_call_time = time.time()
        exp = Experiment(
            self.network,
            train_set,
            valid_set,
            test_set,
            iterator=self.iterator,
            loss_function=loss_function,
            optimizer=self.optimizer,
            model_constraint=model_constraint,
            monitors=self.monitors,
            stop_criterion=stop_criterion,
            remember_best_column=None,
            run_after_early_stop=False,
            cuda=self.is_cuda,
            log_0_epoch=True,
            do_early_stop=False,
        )

        exp.monitor_epoch({"train": train_set})

        result_dict = dict([
            (key.replace("train_", ""), val)
            for key, val in dict(exp.epochs_df.iloc[0]).items()
        ])
        return result_dict

    def predict_classes(self,
                        X,
                        threshold_for_binary_case=None,
                        individual_crops=False):
        """
        Predict the labels for given input data.
        
        Parameters
        ----------
        X: ndarray
            Input data.
        threshold_for_binary_case: float, optional
            In case of a model with single output, the threshold for assigning,
            label 0 or 1, e.g. 0.5.

        Returns
        -------
        pred_labels: 1darray or list of 1darrays
            Predicted labels per trial, optionally for each crop within trial.
        """
        if individual_crops:
            assert self.cropped, "Cropped labels only for cropped decoding"
        outs_per_trial = self.predict_outs(X=X,
                                           individual_crops=individual_crops)

        pred_labels = [np.argmax(o, axis=0) for o in outs_per_trial]
        if not individual_crops:
            pred_labels = np.array(pred_labels)
        return pred_labels

    def predict_outs(self, X, individual_crops=False):
        """
        Predict raw outputs of the network for given input.

        Parameters
        ----------
        X: ndarray
            Input data.
        threshold_for_binary_case: float, optional
            In case of a model with single output, the threshold for assigning,
            label 0 or 1, e.g. 0.5.
        individual_crops: bool

        Returns
        -------
            outs_per_trial: 2darray or list of 2darrays
                Network outputs for each trial, optionally for each crop within trial.
        """
        if individual_crops:
            assert self.cropped, "Cropped labels only for cropped decoding"
        X = _ensure_float32(X)
        all_preds = []
        with th.no_grad():
            dummy_y = np.ones(len(X), dtype=np.int64)
            for b_X, _ in self.iterator.get_batches(
                    SignalAndTarget(X, dummy_y), False):
                b_X_var = np_to_var(b_X)
                if self.is_cuda:
                    b_X_var = b_X_var.cuda()
                all_preds.append(var_to_np(self.network(b_X_var)))
        if self.cropped:
            outs_per_trial = compute_preds_per_trial_from_crops(
                all_preds, self.iterator.input_time_length, X)
            if not individual_crops:
                outs_per_trial = np.array(
                    [np.mean(o, axis=1) for o in outs_per_trial])
        else:
            outs_per_trial = np.concatenate(all_preds)
        return outs_per_trial
示例#10
0
    def fit(
        self,
        train_X,
        train_y,
        epochs,
        batch_size,
        input_time_length=None,
        validation_data=None,
        model_constraint=None,
        remember_best_column=None,
        scheduler=None,
        log_0_epoch=True,
    ):
        """
        Fit the model using the given training data.
        
        Will set `epochs_df` variable with a pandas dataframe to the history
        of the training process.
        
        Parameters
        ----------
        train_X: ndarray
            Training input data
        train_y: 1darray
            Training labels
        epochs: int
            Number of epochs to train
        batch_size: int
        input_time_length: int, optional
            Super crop size, what temporal size is pushed forward through 
            the network, see cropped decoding tuturial.
        validation_data: (ndarray, 1darray), optional
            X and y for validation set if wanted
        model_constraint: object, optional
            You can supply :class:`.MaxNormDefaultConstraint` if wanted.
        remember_best_column: string, optional
            In case you want to do an early stopping/reset parameters to some
            "best" epoch, define here the monitored value whose minimum
            determines the best epoch.
        scheduler: 'cosine' or None, optional
            Whether to use cosine annealing (:class:`.CosineAnnealing`).
        log_0_epoch: bool
            Whether to compute the metrics once before training as well.

        Returns
        -------
        exp: 
            Underlying braindecode :class:`.Experiment`
        """
        if (not hasattr(self, "compiled")) or (not self.compiled):
            raise ValueError(
                "Compile the model first by calling model.compile(loss, optimizer, metrics)"
            )

        if self.cropped and input_time_length is None:
            raise ValueError(
                "In cropped mode, need to specify input_time_length,"
                "which is the number of timesteps that will be pushed through"
                "the network in a single pass.")

        train_X = _ensure_float32(train_X)
        if self.cropped:
            self.network.eval()
            test_input = np_to_var(
                np.ones(
                    (1, train_X[0].shape[0], input_time_length) +
                    train_X[0].shape[2:],
                    dtype=np.float32,
                ))
            while len(test_input.size()) < 4:
                test_input = test_input.unsqueeze(-1)
            if self.is_cuda:
                test_input = test_input.cuda()
            out = self.network(test_input)
            n_preds_per_input = out.cpu().data.numpy().shape[2]
            self.iterator = CropsFromTrialsIterator(
                batch_size=batch_size,
                input_time_length=input_time_length,
                n_preds_per_input=n_preds_per_input,
                seed=self.seed_rng.randint(0,
                                           np.iinfo(np.int32).max - 1),
            )
        else:
            self.iterator = BalancedBatchSizeIterator(
                batch_size=batch_size,
                seed=self.seed_rng.randint(0,
                                           np.iinfo(np.int32).max - 1),
            )
        if log_0_epoch:
            stop_criterion = MaxEpochs(epochs)
        else:
            stop_criterion = MaxEpochs(epochs - 1)
        train_set = SignalAndTarget(train_X, train_y)
        optimizer = self.optimizer
        if scheduler is not None:
            assert (scheduler == "cosine"
                    ), "Supply either 'cosine' or None as scheduler."
            n_updates_per_epoch = sum([
                1 for _ in self.iterator.get_batches(train_set, shuffle=True)
            ])
            n_updates_per_period = n_updates_per_epoch * epochs
            if scheduler == "cosine":
                scheduler = CosineAnnealing(n_updates_per_period)
            schedule_weight_decay = False
            if optimizer.__class__.__name__ == "AdamW":
                schedule_weight_decay = True
            optimizer = ScheduledOptimizer(
                scheduler,
                self.optimizer,
                schedule_weight_decay=schedule_weight_decay,
            )
        loss_function = self.loss
        if self.cropped:
            loss_function = lambda outputs, targets: self.loss(
                th.mean(outputs, dim=2), targets)
        if validation_data is not None:
            valid_X = _ensure_float32(validation_data[0])
            valid_y = validation_data[1]
            valid_set = SignalAndTarget(valid_X, valid_y)
        else:
            valid_set = None
        test_set = None
        self.monitors = [LossMonitor()]
        if self.cropped:
            self.monitors.append(
                CroppedTrialMisclassMonitor(input_time_length))
        else:
            self.monitors.append(MisclassMonitor())
        if self.extra_monitors is not None:
            self.monitors.extend(self.extra_monitors)
        self.monitors.append(RuntimeMonitor())
        exp = Experiment(
            self.network,
            train_set,
            valid_set,
            test_set,
            iterator=self.iterator,
            loss_function=loss_function,
            optimizer=optimizer,
            model_constraint=model_constraint,
            monitors=self.monitors,
            stop_criterion=stop_criterion,
            remember_best_column=remember_best_column,
            run_after_early_stop=False,
            cuda=self.is_cuda,
            log_0_epoch=log_0_epoch,
            do_early_stop=(remember_best_column is not None),
        )
        exp.run()
        self.epochs_df = exp.epochs_df
        return exp
示例#11
0
def run_exp(data_folder, subject_id, low_cut_hz, model, cuda):
    train_filename = 'A{:02d}T.gdf'.format(subject_id)
    test_filename = 'A{:02d}E.gdf'.format(subject_id)
    train_filepath = os.path.join(data_folder, train_filename)
    test_filepath = os.path.join(data_folder, test_filename)
    train_label_filepath = train_filepath.replace('.gdf', '.mat')
    test_label_filepath = test_filepath.replace('.gdf', '.mat')

    train_loader = BCICompetition4Set2A(train_filepath,
                                        labels_filename=train_label_filepath)
    test_loader = BCICompetition4Set2A(test_filepath,
                                       labels_filename=test_label_filepath)
    train_cnt = train_loader.load()
    test_cnt = test_loader.load()

    # Preprocessing

    train_cnt = train_cnt.drop_channels(
        ['STI 014', 'EOG-left', 'EOG-central', 'EOG-right'])
    assert len(train_cnt.ch_names) == 22
    # lets convert to millvolt for numerical stability of next operations
    train_cnt = mne_apply(lambda a: a * 1e6, train_cnt)
    train_cnt = mne_apply(
        lambda a: bandpass_cnt(
            a, low_cut_hz, 38, train_cnt.info['sfreq'], filt_order=3, axis=1),
        train_cnt)
    train_cnt = mne_apply(
        lambda a: exponential_running_standardize(
            a.T, factor_new=1e-3, init_block_size=1000, eps=1e-4).T, train_cnt)

    test_cnt = test_cnt.drop_channels(
        ['STI 014', 'EOG-left', 'EOG-central', 'EOG-right'])
    assert len(test_cnt.ch_names) == 22
    test_cnt = mne_apply(lambda a: a * 1e6, test_cnt)
    test_cnt = mne_apply(
        lambda a: bandpass_cnt(
            a, low_cut_hz, 38, test_cnt.info['sfreq'], filt_order=3, axis=1),
        test_cnt)
    test_cnt = mne_apply(
        lambda a: exponential_running_standardize(
            a.T, factor_new=1e-3, init_block_size=1000, eps=1e-4).T, test_cnt)

    marker_def = OrderedDict([('Left Hand', [1]), (
        'Right Hand',
        [2],
    ), ('Foot', [3]), ('Tongue', [4])])
    ival = [-500, 4000]

    train_set = create_signal_target_from_raw_mne(train_cnt, marker_def, ival)
    test_set = create_signal_target_from_raw_mne(test_cnt, marker_def, ival)

    train_set, valid_set = split_into_two_sets(train_set,
                                               first_set_fraction=0.8)

    set_random_seeds(seed=20190706, cuda=cuda)

    n_classes = 4
    n_chans = int(train_set.X.shape[1])
    input_time_length = train_set.X.shape[2]
    if model == 'shallow':
        model = ShallowFBCSPNet(n_chans,
                                n_classes,
                                input_time_length=input_time_length,
                                final_conv_length='auto').create_network()
    elif model == 'deep':
        model = Deep4Net(n_chans,
                         n_classes,
                         input_time_length=input_time_length,
                         final_conv_length='auto').create_network()
    if cuda:
        model.cuda()
    log.info("Model: \n{:s}".format(str(model)))

    optimizer = optim.Adam(model.parameters())

    iterator = BalancedBatchSizeIterator(batch_size=60)

    stop_criterion = Or([MaxEpochs(1600), NoDecrease('valid_misclass', 160)])

    monitors = [LossMonitor(), MisclassMonitor(), RuntimeMonitor()]

    model_constraint = MaxNormDefaultConstraint()

    exp = Experiment(model,
                     train_set,
                     valid_set,
                     test_set,
                     iterator=iterator,
                     loss_function=F.nll_loss,
                     optimizer=optimizer,
                     model_constraint=model_constraint,
                     monitors=monitors,
                     stop_criterion=stop_criterion,
                     remember_best_column='valid_misclass',
                     run_after_early_stop=True,
                     cuda=cuda)
    exp.run()
    return exp
示例#12
0
def run_exp(data_folder, subject_id, low_cut_hz, model, cuda):
    ival = [-500, 4000]
    max_epochs = 1600
    max_increase_epochs = 160
    batch_size = 60
    high_cut_hz = 38
    factor_new = 1e-3
    init_block_size = 1000
    valid_set_fraction = 0.2

    train_filename = "A{:02d}T.gdf".format(subject_id)
    test_filename = "A{:02d}E.gdf".format(subject_id)
    train_filepath = os.path.join(data_folder, train_filename)
    test_filepath = os.path.join(data_folder, test_filename)
    train_label_filepath = train_filepath.replace(".gdf", ".mat")
    test_label_filepath = test_filepath.replace(".gdf", ".mat")

    train_loader = BCICompetition4Set2A(
        train_filepath, labels_filename=train_label_filepath
    )
    test_loader = BCICompetition4Set2A(
        test_filepath, labels_filename=test_label_filepath
    )
    train_cnt = train_loader.load()
    test_cnt = test_loader.load()

    # Preprocessing

    train_cnt = train_cnt.drop_channels(
        ["EOG-left", "EOG-central", "EOG-right"]
    )
    assert len(train_cnt.ch_names) == 22
    # lets convert to millvolt for numerical stability of next operations
    train_cnt = mne_apply(lambda a: a * 1e6, train_cnt)
    train_cnt = mne_apply(
        lambda a: bandpass_cnt(
            a,
            low_cut_hz,
            high_cut_hz,
            train_cnt.info["sfreq"],
            filt_order=3,
            axis=1,
        ),
        train_cnt,
    )
    train_cnt = mne_apply(
        lambda a: exponential_running_standardize(
            a.T,
            factor_new=factor_new,
            init_block_size=init_block_size,
            eps=1e-4,
        ).T,
        train_cnt,
    )

    test_cnt = test_cnt.drop_channels(["EOG-left", "EOG-central", "EOG-right"])
    assert len(test_cnt.ch_names) == 22
    test_cnt = mne_apply(lambda a: a * 1e6, test_cnt)
    test_cnt = mne_apply(
        lambda a: bandpass_cnt(
            a,
            low_cut_hz,
            high_cut_hz,
            test_cnt.info["sfreq"],
            filt_order=3,
            axis=1,
        ),
        test_cnt,
    )
    test_cnt = mne_apply(
        lambda a: exponential_running_standardize(
            a.T,
            factor_new=factor_new,
            init_block_size=init_block_size,
            eps=1e-4,
        ).T,
        test_cnt,
    )

    marker_def = OrderedDict(
        [
            ("Left Hand", [1]),
            ("Right Hand", [2]),
            ("Foot", [3]),
            ("Tongue", [4]),
        ]
    )

    train_set = create_signal_target_from_raw_mne(train_cnt, marker_def, ival)
    test_set = create_signal_target_from_raw_mne(test_cnt, marker_def, ival)

    train_set, valid_set = split_into_two_sets(
        train_set, first_set_fraction=1 - valid_set_fraction
    )

    set_random_seeds(seed=20190706, cuda=cuda)

    n_classes = 4
    n_chans = int(train_set.X.shape[1])
    input_time_length = train_set.X.shape[2]
    if model == "shallow":
        model = ShallowFBCSPNet(
            n_chans,
            n_classes,
            input_time_length=input_time_length,
            final_conv_length="auto",
        ).create_network()
    elif model == "deep":
        model = Deep4Net(
            n_chans,
            n_classes,
            input_time_length=input_time_length,
            final_conv_length="auto",
        ).create_network()
    if cuda:
        model.cuda()
    log.info("Model: \n{:s}".format(str(model)))

    optimizer = optim.Adam(model.parameters())

    iterator = BalancedBatchSizeIterator(batch_size=batch_size)

    stop_criterion = Or(
        [
            MaxEpochs(max_epochs),
            NoDecrease("valid_misclass", max_increase_epochs),
        ]
    )

    monitors = [LossMonitor(), MisclassMonitor(), RuntimeMonitor()]

    model_constraint = MaxNormDefaultConstraint()

    exp = Experiment(
        model,
        train_set,
        valid_set,
        test_set,
        iterator=iterator,
        loss_function=F.nll_loss,
        optimizer=optimizer,
        model_constraint=model_constraint,
        monitors=monitors,
        stop_criterion=stop_criterion,
        remember_best_column="valid_misclass",
        run_after_early_stop=True,
        cuda=cuda,
    )
    exp.run()
    return exp
示例#13
0
class BaseModel(object):
    def cuda(self):
        """Move underlying model to GPU."""
        self._ensure_network_exists()
        assert not self.compiled,\
            ("Call cuda before compiling model, otherwise optimization will not work")
        self.network = self.network.cuda()
        self.cuda = True
        return self

    def parameters(self):
        """
        Return parameters of underlying torch model.
    
        Returns
        -------
        parameters: list of torch tensors
        """
        self._ensure_network_exists()
        return self.network.parameters()

    def _ensure_network_exists(self):
        if not hasattr(self, 'network'):
            self.network = self.create_network()
            self.cuda = False
            self.compiled = False

    def compile(self,
                loss,
                optimizer,
                monitors=None,
                cropped=False,
                iterator_seed=0):
        """
        Setup training for this model.
        
        Parameters
        ----------
        loss: function (predictions, targets) -> torch scalar
        optimizer: `torch.optim.Optimizer` or string
            Either supply an optimizer or the name of the class (e.g. 'adam')
        monitors: List of Braindecode monitors, optional
            In case you want to monitor additional values except for loss, misclass and runtime.
        cropped: bool
            Whether to perform cropped decoding, see cropped decoding tutorial.
        iterator_seed: int
            Seed to seed the iterator random generator.
        Returns
        -------

        """
        self.loss = loss
        self._ensure_network_exists()
        if cropped:
            to_dense_prediction_model(self.network)
        if not hasattr(optimizer, 'step'):
            optimizer_class = find_optimizer(optimizer)
            optimizer = optimizer_class(self.network.parameters())
        self.optimizer = optimizer
        self.extra_monitors = monitors
        # Already setting it here, so multiple calls to fit
        # will lead to different batches being drawn
        self.seed_rng = RandomState(iterator_seed)
        self.cropped = cropped
        self.compiled = True

    def fit(self,
            train_X,
            train_y,
            epochs,
            batch_size,
            input_time_length=None,
            validation_data=None,
            model_constraint=None,
            remember_best_column=None,
            scheduler=None):
        """
        Fit the model using the given training data.
        
        Will set `epochs_df` variable with a pandas dataframe to the history
        of the training process.
        
        Parameters
        ----------
        train_X: ndarray
            Training input data
        train_y: 1darray
            Training labels
        epochs: int
            Number of epochs to train
        batch_size: int
        input_time_length: int, optional
            Super crop size, what temporal size is pushed forward through 
            the network, see cropped decoding tuturial.
        validation_data: (ndarray, 1darray), optional
            X and y for validation set if wanted
        model_constraint: object, optional
            You can supply :class:`.MaxNormDefaultConstraint` if wanted.
        remember_best_column: string, optional
            In case you want to do an early stopping/reset parameters to some
            "best" epoch, define here the monitored value whose minimum
            determines the best epoch.
        scheduler: 'cosine' or None, optional
            Whether to use cosine annealing (:class:`.CosineAnnealing`).

        Returns
        -------
        exp: 
            Underlying braindecode :class:`.Experiment`
        """
        if (not hasattr(self, 'compiled')) or (not self.compiled):
            raise ValueError(
                "Compile the model first by calling model.compile(loss, optimizer, metrics)"
            )

        if self.cropped and input_time_length is None:
            raise ValueError(
                "In cropped mode, need to specify input_time_length,"
                "which is the number of timesteps that will be pushed through"
                "the network in a single pass.")
        if self.cropped:
            self.network.eval()
            test_input = np_to_var(train_X[0:1], dtype=np.float32)
            while len(test_input.size()) < 4:
                test_input = test_input.unsqueeze(-1)
            if self.cuda:
                test_input = test_input.cuda()
            out = self.network(test_input)
            n_preds_per_input = out.cpu().data.numpy().shape[2]
            self.iterator = CropsFromTrialsIterator(
                batch_size=batch_size,
                input_time_length=input_time_length,
                n_preds_per_input=n_preds_per_input,
                seed=self.seed_rng.randint(0, 4294967295))
        else:
            self.iterator = BalancedBatchSizeIterator(
                batch_size=batch_size,
                seed=self.seed_rng.randint(0, 4294967295))
        stop_criterion = MaxEpochs(
            epochs - 1
        )  # -1 since we dont print 0 epoch, which matters for this stop criterion
        train_set = SignalAndTarget(train_X, train_y)
        optimizer = self.optimizer
        if scheduler is not None:
            assert scheduler == 'cosine', (
                "Supply either 'cosine' or None as scheduler.")
            n_updates_per_epoch = sum([
                1 for _ in self.iterator.get_batches(train_set, shuffle=True)
            ])
            n_updates_per_period = n_updates_per_epoch * epochs
            if scheduler == 'cosine':
                scheduler = CosineAnnealing(n_updates_per_period)
            schedule_weight_decay = False
            if optimizer.__class__.__name__ == 'AdamW':
                schedule_weight_decay = True
            optimizer = ScheduledOptimizer(
                scheduler,
                self.optimizer,
                schedule_weight_decay=schedule_weight_decay)
        loss_function = self.loss
        if self.cropped:
            loss_function = lambda outputs, targets:\
                self.loss(th.mean(outputs, dim=2), targets)
        if validation_data is not None:
            valid_set = SignalAndTarget(validation_data[0], validation_data[1])
        else:
            valid_set = None
        test_set = None
        self.monitors = [LossMonitor()]
        if self.cropped:
            self.monitors.append(
                CroppedTrialMisclassMonitor(input_time_length))
        else:
            self.monitors.append(MisclassMonitor())
        if self.extra_monitors is not None:
            self.monitors.extend(self.extra_monitors)
        self.monitors.append(RuntimeMonitor())
        exp = Experiment(self.network,
                         train_set,
                         valid_set,
                         test_set,
                         iterator=self.iterator,
                         loss_function=loss_function,
                         optimizer=optimizer,
                         model_constraint=model_constraint,
                         monitors=self.monitors,
                         stop_criterion=stop_criterion,
                         remember_best_column=remember_best_column,
                         run_after_early_stop=False,
                         cuda=self.cuda,
                         log_0_epoch=False,
                         do_early_stop=(remember_best_column is not None))
        exp.run()
        self.epochs_df = exp.epochs_df
        return exp

    def evaluate(self, X, y):
        """
        Evaluate, i.e., compute metrics on given inputs and targets.
        
        Parameters
        ----------
        X: ndarray
            Input data.
        y: 1darray
            Targets.

        Returns
        -------
        result: dict
            Dictionary with result metrics.

        """
        stop_criterion = MaxEpochs(0)
        train_set = SignalAndTarget(X, y)
        model_constraint = None
        valid_set = None
        test_set = None
        loss_function = self.loss
        if self.cropped:
            loss_function = lambda outputs, targets: \
                self.loss(th.mean(outputs, dim=2), targets)

        # reset runtime monitor if exists...
        for monitor in self.monitors:
            if hasattr(monitor, 'last_call_time'):
                monitor.last_call_time = time.time()
        exp = Experiment(self.network,
                         train_set,
                         valid_set,
                         test_set,
                         iterator=self.iterator,
                         loss_function=loss_function,
                         optimizer=self.optimizer,
                         model_constraint=model_constraint,
                         monitors=self.monitors,
                         stop_criterion=stop_criterion,
                         remember_best_column=None,
                         run_after_early_stop=False,
                         cuda=self.cuda,
                         log_0_epoch=False,
                         do_early_stop=False)

        exp.monitor_epoch({'train': train_set})

        result_dict = dict([
            (key.replace('train_', ''), val)
            for key, val in dict(exp.epochs_df.iloc[0]).items()
        ])
        return result_dict

    def predict(self, X, threshold_for_binary_case=None):
        """
        Predict the labels for given input data.
        
        Parameters
        ----------
        X: ndarray
            Input data.
        threshold_for_binary_case: float, optional
            In case of a model with single output, the threshold for assigning,
            label 0 or 1, e.g. 0.5.

        Returns
        -------
        pred_labels: 1darray
            Predicted labels per trial. 
        """
        all_preds = []
        for b_X, _ in self.iterator.get_batches(SignalAndTarget(X, X), False):
            all_preds.append(var_to_np(self.network(np_to_var(b_X))))
        if self.cropped:
            pred_labels = compute_trial_labels_from_crop_preds(
                all_preds, self.iterator.input_time_length, X)
        else:
            pred_labels = compute_pred_labels_from_trial_preds(
                all_preds, threshold_for_binary_case)
        return pred_labels
示例#14
0
def network_model(subject_id, model_type, data_type, cropped, cuda, parameters, hyp_params):
	best_params = dict() # dictionary to store hyper-parameter values

	#####Parameter passed to funciton#####
	max_epochs  = parameters['max_epochs']
	max_increase_epochs = parameters['max_increase_epochs']
	batch_size = parameters['batch_size']

	#####Constant Parameters#####
	best_loss = 100.0 # instatiate starting point for loss
	iterator = BalancedBatchSizeIterator(batch_size=batch_size)
	stop_criterion = Or([MaxEpochs(max_epochs),
						 NoDecrease('valid_misclass', max_increase_epochs)])
	monitors = [LossMonitor(), MisclassMonitor(), RuntimeMonitor()]
	model_constraint = MaxNormDefaultConstraint()
	epoch = 4096

	#####Collect and format data#####
	if data_type == 'words':
		data, labels = format_data(data_type, subject_id, epoch)
		data = data[:,:,768:1280] # within-trial window selected for classification
	elif data_type == 'vowels':
		data, labels = format_data(data_type, subject_id, epoch)
		data = data[:,:,512:1024]
	elif data_type == 'all_classes':
		data, labels = format_data(data_type, subject_id, epoch)
		data = data[:,:,768:1280]
	
	x = lambda a: a * 1e6 # improves numerical stability
	data = x(data)
	
	data = normalize(data)
	data, labels = balanced_subsample(data, labels) # downsampling the data to ensure equal classes
	data, _, labels, _ = train_test_split(data, labels, test_size=0, random_state=42) # redundant shuffle of data/labels

	#####model inputs#####
	unique, counts = np.unique(labels, return_counts=True)
	n_classes = len(unique)
	n_chans   = int(data.shape[1])
	input_time_length = data.shape[2]

	#####k-fold nested corss-validation#####
	num_folds = 4
	skf = StratifiedKFold(n_splits=num_folds, shuffle=True, random_state=10)
	out_fold_num = 0 # outer-fold number
	
	cv_scores = []
	#####Outer=Fold#####
	for inner_ind, outer_index in skf.split(data, labels):
		inner_fold, outer_fold     = data[inner_ind], data[outer_index]
		inner_labels, outer_labels = labels[inner_ind], labels[outer_index]
		out_fold_num += 1
		 # list for storing cross-validated scores
		loss_with_params = dict()# for storing param values and losses
		in_fold_num = 0 # inner-fold number
		
		#####Inner-Fold#####
		for train_idx, valid_idx in skf.split(inner_fold, inner_labels):
			X_Train, X_val = inner_fold[train_idx], inner_fold[valid_idx]
			y_train, y_val = inner_labels[train_idx], inner_labels[valid_idx]
			in_fold_num += 1
			train_set = SignalAndTarget(X_Train, y_train)
			valid_set = SignalAndTarget(X_val, y_val)
			loss_with_params[f"Fold_{in_fold_num}"] = dict()
			
			####Nested cross-validation#####
			for drop_prob in hyp_params['drop_prob']:
				for loss_function in hyp_params['loss']:
					for i in range(len(hyp_params['lr_adam'])):
						model = None # ensure no duplication of models
						# model, learning-rate and optimizer setup according to model_type
						if model_type == 'shallow':
							model =  ShallowFBCSPNet(in_chans=n_chans, n_classes=n_classes, input_time_length=input_time_length,
										 n_filters_time=80, filter_time_length=40, n_filters_spat=80, 
										 pool_time_length=75, pool_time_stride=25, final_conv_length='auto',
										 conv_nonlin=square, pool_mode='max', pool_nonlin=safe_log, 
										 split_first_layer=True, batch_norm=True, batch_norm_alpha=0.1,
										 drop_prob=drop_prob).create_network()
							lr = hyp_params['lr_ada'][i]
							optimizer = optim.Adadelta(model.parameters(), lr=lr, rho=0.9, weight_decay=0.1, eps=1e-8)
						elif model_type == 'deep':
							model = Deep4Net(in_chans=n_chans, n_classes=n_classes, input_time_length=input_time_length,
										 final_conv_length='auto', n_filters_time=20, n_filters_spat=20, filter_time_length=10,
										 pool_time_length=3, pool_time_stride=3, n_filters_2=50, filter_length_2=15,
										 n_filters_3=100, filter_length_3=15, n_filters_4=400, filter_length_4=10,
										 first_nonlin=leaky_relu, first_pool_mode='max', first_pool_nonlin=safe_log, later_nonlin=leaky_relu,
										 later_pool_mode='max', later_pool_nonlin=safe_log, drop_prob=drop_prob, 
										 double_time_convs=False, split_first_layer=False, batch_norm=True, batch_norm_alpha=0.1,
										 stride_before_pool=False).create_network() #filter_length_4 changed from 15 to 10
							lr = hyp_params['lr_ada'][i]
							optimizer = optim.Adadelta(model.parameters(), lr=lr, weight_decay=0.1, eps=1e-8)
						elif model_type == 'eegnet':
							model = EEGNetv4(in_chans=n_chans, n_classes=n_classes, final_conv_length='auto', 
										 input_time_length=input_time_length, pool_mode='mean', F1=16, D=2, F2=32,
										 kernel_length=64, third_kernel_size=(8,4), drop_prob=drop_prob).create_network()
							lr = hyp_params['lr_adam'][i]
							optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=0, eps=1e-8, amsgrad=False)
						
						set_random_seeds(seed=20190629, cuda=cuda)
						
						if cuda:
							model.cuda()
							torch.backends.cudnn.deterministic = True
						model = torch.nn.DataParallel(model)
						log.info("%s model: ".format(str(model)))

						loss_function = loss_function
						model_loss_function = None

						#####Setup to run the selected model#####
						model_test = Experiment(model, train_set, valid_set, test_set=None, iterator=iterator,
												loss_function=loss_function, optimizer=optimizer,
												model_constraint=model_constraint, monitors=monitors,
												stop_criterion=stop_criterion, remember_best_column='valid_misclass',
												run_after_early_stop=True, model_loss_function=model_loss_function, cuda=cuda,
												data_type=data_type, subject_id=subject_id, model_type=model_type, 
												cropped=cropped, model_number=str(out_fold_num)) 

						model_test.run()
						model_loss = model_test.epochs_df['valid_loss'].astype('float')
						current_val_loss = current_loss(model_loss)
						loss_with_params[f"Fold_{in_fold_num}"][f"{drop_prob}/{loss_function}/{lr}"] = current_val_loss

		####Select and train optimized model#####
		df = pd.DataFrame(loss_with_params)
		df['mean'] = df.mean(axis=1) # compute mean loss across k-folds
		writer_df = f"results_folder\\results\\S{subject_id}\\{model_type}_parameters.xlsx"
		df.to_excel(writer_df)
		
		best_dp, best_loss, best_lr = df.loc[df['mean'].idxmin()].__dict__['_name'].split("/") # extract best param values
		if str(best_loss[10:13]) == 'nll':
			best_loss = F.nll_loss
		elif str(best_loss[10:13]) == 'cro':
			best_loss = F.cross_entropy
		
		print(f"Best parameters: dropout: {best_dp}, loss: {str(best_loss)[10:13]}, lr: {best_lr}")

		#####Train model on entire inner fold set#####
		torch.backends.cudnn.deterministic = True
		model = None
		#####Create outer-fold validation and test sets#####
		X_valid, X_test, y_valid, y_test = train_test_split(outer_fold, outer_labels, test_size=0.5, random_state=42, stratify=outer_labels)
		train_set = SignalAndTarget(inner_fold, inner_labels)
		valid_set = SignalAndTarget(X_valid, y_valid)
		test_set  = SignalAndTarget(X_test, y_test)


		if model_type == 'shallow':
			model =  ShallowFBCSPNet(in_chans=n_chans, n_classes=n_classes, input_time_length=input_time_length,
						 n_filters_time=60, filter_time_length=5, n_filters_spat=40, 
						 pool_time_length=50, pool_time_stride=15, final_conv_length='auto',
						 conv_nonlin=relu6, pool_mode='mean', pool_nonlin=safe_log, 
						 split_first_layer=True, batch_norm=True, batch_norm_alpha=0.1,
						 drop_prob=0.1).create_network() #50 works better than 75
			
			optimizer = optim.Adadelta(model.parameters(), lr=2.0, rho=0.9, weight_decay=0.1, eps=1e-8) 
			
		elif model_type == 'deep':
			model = Deep4Net(in_chans=n_chans, n_classes=n_classes, input_time_length=input_time_length,
						 final_conv_length='auto', n_filters_time=20, n_filters_spat=20, filter_time_length=5,
						 pool_time_length=3, pool_time_stride=3, n_filters_2=20, filter_length_2=5,
						 n_filters_3=40, filter_length_3=5, n_filters_4=1500, filter_length_4=10,
						 first_nonlin=leaky_relu, first_pool_mode='mean', first_pool_nonlin=safe_log, later_nonlin=leaky_relu,
						 later_pool_mode='mean', later_pool_nonlin=safe_log, drop_prob=0.1, 
						 double_time_convs=False, split_first_layer=True, batch_norm=True, batch_norm_alpha=0.1,
						 stride_before_pool=False).create_network()
			
			optimizer = AdamW(model.parameters(), lr=0.1, weight_decay=0)
		elif model_type == 'eegnet':
			model = EEGNetv4(in_chans=n_chans, n_classes=n_classes, final_conv_length='auto', 
						 input_time_length=input_time_length, pool_mode='mean', F1=16, D=2, F2=32,
						 kernel_length=64, third_kernel_size=(8,4), drop_prob=0.1).create_network()
			optimizer = optim.Adam(model.parameters(), lr=0.1, weight_decay=0, eps=1e-8, amsgrad=False) 
			

		if cuda:
			model.cuda()
			torch.backends.cudnn.deterministic = True
			#model = torch.nn.DataParallel(model)
		
		log.info("Optimized model")
		model_loss_function=None
		
		#####Setup to run the optimized model#####
		optimized_model = op_exp(model, train_set, valid_set, test_set=test_set, iterator=iterator,
								loss_function=best_loss, optimizer=optimizer,
								model_constraint=model_constraint, monitors=monitors,
								stop_criterion=stop_criterion, remember_best_column='valid_misclass',
								run_after_early_stop=True, model_loss_function=model_loss_function, cuda=cuda,
								data_type=data_type, subject_id=subject_id, model_type=model_type, 
								cropped=cropped, model_number=str(out_fold_num))
		optimized_model.run()

		log.info("Last 5 epochs")
		log.info("\n" + str(optimized_model.epochs_df.iloc[-5:]))
		
		writer = f"results_folder\\results\\S{subject_id}\\{data_type}_{model_type}_{str(out_fold_num)}.xlsx"
		optimized_model.epochs_df.iloc[-30:].to_excel(writer)

		accuracy = 1 - np.min(np.array(optimized_model.class_acc))
		cv_scores.append(accuracy) # k accuracy scores for this param set. 
		
	#####Print and store fold accuracies and mean accuracy#####
	
	print(f"Class Accuracy: {np.mean(np.array(cv_scores))}")
	results_df = pd.DataFrame(dict(cv_scores=cv_scores,
								   cv_mean=np.mean(np.array(cv_scores))))

	writer2 = f"results_folder\\results\\S{subject_id}\\{data_type}_{model_type}_cvscores.xlsx"
	results_df.to_excel(writer2)
	return optimized_model, np.mean(np.array(cv_scores))
示例#15
0
    def fit(self,
            train_X,
            train_y,
            epochs,
            batch_size,
            input_time_length=None,
            validation_data=None,
            model_constraint=None,
            remember_best_column=None,
            scheduler=None):
        if not self.compiled:
            raise ValueError(
                "Compile the model first by calling model.compile(loss, optimizer, metrics)"
            )

        if self.cropped and input_time_length is None:
            raise ValueError(
                "In cropped mode, need to specify input_time_length,"
                "which is the number of timesteps that will be pushed through"
                "the network in a single pass.")
        if self.cropped:
            test_input = np_to_var(train_X[0:1], dtype=np.float32)
            while len(test_input.size()) < 4:
                test_input = test_input.unsqueeze(-1)
            if self.cuda:
                test_input = test_input.cuda()
            out = self.network(test_input)
            n_preds_per_input = out.cpu().data.numpy().shape[2]
            iterator = CropsFromTrialsIterator(
                batch_size=batch_size,
                input_time_length=input_time_length,
                n_preds_per_input=n_preds_per_input,
                seed=self.seed_rng.randint(0, 4294967295))
        else:
            iterator = BalancedBatchSizeIterator(batch_size=batch_size,
                                                 seed=self.seed_rng.randint(
                                                     0, 4294967295))
        stop_criterion = MaxEpochs(
            epochs - 1
        )  # -1 since we dont print 0 epoch, which matters for this stop criterion
        train_set = SignalAndTarget(train_X, train_y)
        optimizer = self.optimizer
        if scheduler is not None:
            assert scheduler == 'cosine'
            n_updates_per_epoch = sum(
                [1 for _ in iterator.get_batches(train_set, shuffle=True)])
            n_updates_per_period = n_updates_per_epoch * epochs
            if scheduler == 'cosine':
                scheduler = CosineAnnealing(n_updates_per_period)
            schedule_weight_decay = False
            if optimizer.__class__.__name__ == 'AdamW':
                schedule_weight_decay = True
            optimizer = ScheduledOptimizer(
                scheduler,
                self.optimizer,
                schedule_weight_decay=schedule_weight_decay)
        loss_function = self.loss
        if self.cropped:
            loss_function = lambda outputs, targets:\
                self.loss(th.mean(outputs, dim=2), targets)
        if validation_data is not None:
            valid_set = SignalAndTarget(validation_data[0], validation_data[1])
        else:
            valid_set = None
        test_set = None
        if self.cropped:
            monitor_dict = {
                'acc': lambda: CroppedTrialMisclassMonitor(input_time_length)
            }
        else:
            monitor_dict = {'acc': MisclassMonitor}
        self.monitors = [LossMonitor()]
        extra_monitors = [monitor_dict[m]() for m in self.metrics]
        self.monitors += extra_monitors
        self.monitors += [RuntimeMonitor()]
        exp = Experiment(self.network,
                         train_set,
                         valid_set,
                         test_set,
                         iterator=iterator,
                         loss_function=loss_function,
                         optimizer=optimizer,
                         model_constraint=model_constraint,
                         monitors=self.monitors,
                         stop_criterion=stop_criterion,
                         remember_best_column=remember_best_column,
                         run_after_early_stop=False,
                         cuda=self.cuda,
                         print_0_epoch=False,
                         do_early_stop=(remember_best_column is not None))
        exp.run()
        self.epochs_df = exp.epochs_df
        return exp
示例#16
0
def run_exp(epoches, batch_size, subject_num, model_type, cuda, single_subject,
            single_subject_num):
    # ival = [-500, 4000]
    max_increase_epochs = 160

    # Preprocessing
    X, y = loadSubjects(subject_num, single_subject, single_subject_num)
    X = X.astype(np.float32)
    y = y.astype(np.int64)
    X, y = shuffle(X, y)

    trial_length = X.shape[2]
    print("trial_length " + str(trial_length))
    print("trying to run with {} sec trials ".format((trial_length - 1) / 256))
    print("y")
    print(y)
    trainingSampleSize = int(len(X) * 0.6)
    valudationSampleSize = int(len(X) * 0.2)
    testSampleSize = int(len(X) * 0.2)
    print("INFO : Training sample size: {}".format(trainingSampleSize))
    print("INFO : Validation sample size: {}".format(valudationSampleSize))
    print("INFO : Test sample size: {}".format(testSampleSize))

    train_set = SignalAndTarget(X[:trainingSampleSize],
                                y=y[:trainingSampleSize])
    valid_set = SignalAndTarget(
        X[trainingSampleSize:(trainingSampleSize + valudationSampleSize)],
        y=y[trainingSampleSize:(trainingSampleSize + valudationSampleSize)])
    test_set = SignalAndTarget(X[(trainingSampleSize + valudationSampleSize):],
                               y=y[(trainingSampleSize +
                                    valudationSampleSize):])

    set_random_seeds(seed=20190706, cuda=cuda)

    n_classes = 3
    n_chans = int(train_set.X.shape[1])
    input_time_length = train_set.X.shape[2]
    if model_type == 'shallow':
        model = ShallowFBCSPNet(n_chans,
                                n_classes,
                                input_time_length=input_time_length,
                                final_conv_length='auto').create_network()
    elif model_type == 'deep':
        model = Deep4Net(n_chans,
                         n_classes,
                         input_time_length=input_time_length,
                         final_conv_length='auto').create_network()
    elif model_type == 'eegnet':
        model = EEGNetv4(n_chans,
                         n_classes,
                         input_time_length=input_time_length,
                         final_conv_length='auto').create_network()
    if cuda:
        model.cuda()
    log.info("Model: \n{:s}".format(str(model)))

    optimizer = optim.Adam(model.parameters())

    iterator = BalancedBatchSizeIterator(batch_size=batch_size)

    stop_criterion = Or([
        MaxEpochs(max_epochs),
        NoDecrease('valid_misclass', max_increase_epochs)
    ])

    monitors = [LossMonitor(), MisclassMonitor(), RuntimeMonitor()]

    model_constraint = MaxNormDefaultConstraint()

    exp = Experiment(model,
                     train_set,
                     valid_set,
                     test_set,
                     iterator=iterator,
                     loss_function=F.nll_loss,
                     optimizer=optimizer,
                     model_constraint=model_constraint,
                     monitors=monitors,
                     stop_criterion=stop_criterion,
                     remember_best_column='valid_misclass',
                     run_after_early_stop=True,
                     cuda=cuda)
    exp.run()
    # th.save(model, "models\{}-cropped-singleSubjectNum{}-{}sec-{}epoches-torch_model".format(model_type, single_subject_num, ((trial_length - 1) / 256), epoches))
    return exp
示例#17
0
def build_exp(model_name, cuda, data, batch_size, max_epochs, max_increase_epochs):

    log.info("==============================")
    log.info("Loading Data...")
    log.info("==============================")

    train_set = data.train_set
    valid_set = data.validation_set
    test_set = data.test_set

    log.info("==============================")
    log.info("Setting Up Model...")
    log.info("==============================")
    set_random_seeds(seed=20190706, cuda=cuda)
    n_classes = 4
    n_chans = int(train_set.X.shape[1])
    input_time_length = train_set.X.shape[2]
    if model_name == "shallow":
        model = NewShallowNet(
            n_chans, n_classes, input_time_length, final_conv_length="auto"
        )
        # model = ShallowFBCSPNet(
        #     n_chans,
        #     n_classes,
        #     input_time_length=input_time_length,
        #     final_conv_length="auto",
        # ).create_network()
    elif model_name == "deep":
        model = NewDeep4Net(n_chans, n_classes, input_time_length, "auto")
        # model = Deep4Net(
        #     n_chans,
        #     n_classes,
        #     input_time_length=input_time_length,
        #     final_conv_length="auto",
        # ).create_network()
    elif model_name == "eegnet":
        # model = EEGNet(n_chans, n_classes,
        #                input_time_length=input_time_length)
        # model = EEGNetv4(n_chans, n_classes,
        #                  input_time_length=input_time_length).create_network()
        model = NewEEGNet(n_chans, n_classes, input_time_length=input_time_length)

    if cuda:
        model.cuda()

    log.info("==============================")
    log.info("Logging Model Architecture:")
    log.info("==============================")
    log.info("Model: \n{:s}".format(str(model)))

    log.info("==============================")
    log.info("Building Experiment:")
    log.info("==============================")
    optimizer = optim.Adam(model.parameters())

    iterator = BalancedBatchSizeIterator(batch_size=batch_size)

    stop_criterion = Or(
        [MaxEpochs(max_epochs), NoDecrease("valid_misclass", max_increase_epochs)]
    )

    monitors = [LossMonitor(), MisclassMonitor(), RuntimeMonitor()]

    model_constraint = MaxNormDefaultConstraint()

    exp = Experiment(
        model,
        train_set,
        valid_set,
        test_set,
        iterator=iterator,
        loss_function=F.nll_loss,
        optimizer=optimizer,
        model_constraint=model_constraint,
        monitors=monitors,
        stop_criterion=stop_criterion,
        remember_best_column="valid_misclass",
        run_after_early_stop=True,
        cuda=cuda,
    )
    return exp
示例#18
0
test_set = SignalAndTarget(test_X, y=valid_y)

cuda = True
batch_size = 60
max_epochs = 20000
max_increase_epochs = 360

model = ShallowFBCSPNet(in_chan,
                        db.n_classes,
                        input_time_length=time_steps,
                        final_conv_length="auto").create_network()
log.info("Model: \n{:s}".format(str(model)))

optimizer = optim.Adam(model.parameters())

iterator = BalancedBatchSizeIterator(batch_size=batch_size)

stop_criterion = Or([
    MaxEpochs(max_epochs),
    NoDecrease("valid_misclass", max_increase_epochs),
])

monitors = [LossMonitor(), MisclassMonitor(), RuntimeMonitor()]

model_constraint = MaxNormDefaultConstraint()

exp = Experiment(
    model,
    train_set,
    valid_set,
    test_set,
def run_exp(data_folder, session_id, subject_id, low_cut_hz, model, cuda):
    ival = [-500, 4000]
    max_epochs = 1600
    max_increase_epochs = 160
    batch_size = 10
    high_cut_hz = 38
    factor_new = 1e-3
    init_block_size = 1000
    valid_set_fraction = .2
    ''' # BCIcompetition
    train_filename = 'A{:02d}T.gdf'.format(subject_id)
    test_filename = 'A{:02d}E.gdf'.format(subject_id)
    train_filepath = os.path.join(data_folder, train_filename)
    test_filepath = os.path.join(data_folder, test_filename)
    train_label_filepath = train_filepath.replace('.gdf', '.mat')
    test_label_filepath = test_filepath.replace('.gdf', '.mat')

    train_loader = BCICompetition4Set2A(
        train_filepath, labels_filename=train_label_filepath)
    test_loader = BCICompetition4Set2A(
        test_filepath, labels_filename=test_label_filepath)
    train_cnt = train_loader.load()
    test_cnt = test_loader.load()
    '''

    # GIGAscience
    filename = 'sess{:02d}_subj{:02d}_EEG_MI.mat'.format(
        session_id, subject_id)
    filepath = os.path.join(data_folder, filename)
    train_variable = 'EEG_MI_train'
    test_variable = 'EEG_MI_test'

    train_loader = GIGAscience(filepath, train_variable)
    test_loader = GIGAscience(filepath, test_variable)
    train_cnt = train_loader.load()
    test_cnt = test_loader.load()

    # Preprocessing
    ''' channel
    ['Fp1', 'Fp2', 'F7', 'F3', 'Fz', 'F4', 'F8', 'FC5', 'FC1', 'FC2', 'FC6', 'T7', 'C3', 'Cz', 'C4', 'T8', 'TP9', 'CP5',
     'CP1', 'CP2', 'CP6', 'TP10', 'P7', 'P3', 'Pz', 'P4', 'P8', 'PO9', 'O1', 'Oz', 'O2', 'PO10', 'FC3', 'FC4', 'C5',
     'C1', 'C2', 'C6', 'CP3', 'CPz', 'CP4', 'P1', 'P2', 'POz', 'FT9', 'FTT9h', 'TTP7h', 'TP7', 'TPP9h', 'FT10',
     'FTT10h', 'TPP8h', 'TP8', 'TPP10h', 'F9', 'F10', 'AF7', 'AF3', 'AF4', 'AF8', 'PO3', 'PO4']
    '''

    train_cnt = train_cnt.pick_channels([
        'FC5', 'FC3', 'FC1', 'Fz', 'FC2', 'FC4', 'FC6', 'C5', 'C3', 'C1', 'Cz',
        'C2', 'C4', 'C6', 'CP5', 'CP3', 'CP1', 'CPz', 'CP2', 'CP4', 'CP6', 'Pz'
    ])
    train_cnt, train_cnt.info['events'] = train_cnt.copy().resample(
        250, npad='auto', events=train_cnt.info['events'])

    assert len(train_cnt.ch_names) == 22
    # lets convert to millvolt for numerical stability of next operations
    train_cnt = mne_apply(lambda a: a * 1e6, train_cnt)
    train_cnt = mne_apply(
        lambda a: bandpass_cnt(a,
                               low_cut_hz,
                               high_cut_hz,
                               train_cnt.info['sfreq'],
                               filt_order=3,
                               axis=1), train_cnt)
    train_cnt = mne_apply(
        lambda a: exponential_running_standardize(a.T,
                                                  factor_new=factor_new,
                                                  init_block_size=
                                                  init_block_size,
                                                  eps=1e-4).T, train_cnt)

    test_cnt = test_cnt.pick_channels([
        'FC5', 'FC3', 'FC1', 'Fz', 'FC2', 'FC4', 'FC6', 'C5', 'C3', 'C1', 'Cz',
        'C2', 'C4', 'C6', 'CP5', 'CP3', 'CP1', 'CPz', 'CP2', 'CP4', 'CP6', 'Pz'
    ])
    test_cnt, test_cnt.info['events'] = test_cnt.copy().resample(
        250, npad='auto', events=test_cnt.info['events'])

    assert len(test_cnt.ch_names) == 22
    test_cnt = mne_apply(lambda a: a * 1e6, test_cnt)
    test_cnt = mne_apply(
        lambda a: bandpass_cnt(a,
                               low_cut_hz,
                               high_cut_hz,
                               test_cnt.info['sfreq'],
                               filt_order=3,
                               axis=1), test_cnt)
    test_cnt = mne_apply(
        lambda a: exponential_running_standardize(a.T,
                                                  factor_new=factor_new,
                                                  init_block_size=
                                                  init_block_size,
                                                  eps=1e-4).T, test_cnt)

    marker_def = OrderedDict([('Right Hand', [1]), ('Left Hand', [2])])

    train_set = create_signal_target_from_raw_mne(train_cnt, marker_def, ival)
    test_set = create_signal_target_from_raw_mne(test_cnt, marker_def, ival)

    train_set, valid_set = split_into_two_sets(train_set,
                                               first_set_fraction=1 -
                                               valid_set_fraction)

    set_random_seeds(seed=20190706, cuda=cuda)

    n_classes = 2
    n_chans = int(train_set.X.shape[1])
    input_time_length = train_set.X.shape[2]
    if model == 'shallow':
        model = ShallowFBCSPNet(n_chans,
                                n_classes,
                                input_time_length=input_time_length,
                                final_conv_length='auto').create_network()
    elif model == 'deep':
        model = Deep4Net(n_chans,
                         n_classes,
                         input_time_length=input_time_length,
                         final_conv_length='auto').create_network()
    if cuda:
        model.cuda()
    log.info("Model: \n{:s}".format(str(model)))

    optimizer = optim.Adam(model.parameters())

    iterator = BalancedBatchSizeIterator(batch_size=batch_size)

    stop_criterion = Or([
        MaxEpochs(max_epochs),
        NoDecrease('valid_misclass', max_increase_epochs)
    ])

    monitors = [LossMonitor(), MisclassMonitor(), RuntimeMonitor()]

    model_constraint = MaxNormDefaultConstraint()

    exp = Experiment(model,
                     train_set,
                     valid_set,
                     test_set,
                     iterator=iterator,
                     loss_function=F.nll_loss,
                     optimizer=optimizer,
                     model_constraint=model_constraint,
                     monitors=monitors,
                     stop_criterion=stop_criterion,
                     remember_best_column='valid_misclass',
                     run_after_early_stop=True,
                     cuda=cuda)
    exp.run()
    return exp
def get_stats(model_type, data_type):
    subject_ids = [
        '01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11', '12',
        '13', '14', '15'
    ]  # 15 subjects
    accuracies = []
    batch_size = 64
    iterator = BalancedBatchSizeIterator(batch_size=batch_size)

    #####Instantiate variables for results#####
    y_true_all = np.array([])
    y_pred_all = np.array([])
    stats = {'precision': [], 'recall': [], 'f-score': []}

    for subject_id in subject_ids:

        #####Collect and format the data correctly#####
        data, labels = data_wrangler(data_type, subject_id)
        data, labels = balanced_subsample(data, labels)
        if data_type == 'words' or 'all_classes':
            data = data[:, :, 768:1280]
        elif data_type == 'vowels':
            data = data[:, :, 512:1024]
        num_folds = 4

        data, _, labels, _ = train_test_split(
            data, labels, test_size=0,
            random_state=42)  #shuffle the data/labels
        skf = StratifiedKFold(n_splits=num_folds,
                              shuffle=True,
                              random_state=10)

        test_data = []  # list for appending all test_data folds
        y_true = []  # lsit for appending true class labels
        y_true_a = np.array([])
        sub_acc = []  # list for appending all of the subjects accuracies

        #####split data into inner/outer folds and extract test sets#####
        for inner_ind, outer_index in skf.split(data, labels):

            inner_fold, outer_fold = data[inner_ind], data[outer_index]
            inner_labels, outer_labels = labels[inner_ind], labels[outer_index]
            outer_fold = outer_fold.reshape(
                (outer_fold.shape[0], 6, 512, 1))  # expected format
            _, X_test, _, y_test = train_test_split(outer_fold,
                                                    outer_labels,
                                                    test_size=0.5,
                                                    random_state=42,
                                                    stratify=outer_labels)

            test_data.append(X_test)  # test data
            y_true.append(y_test)  # test labels

        models = [0, 1, 2, 3]
        y_pred = np.array([])

        #####Load pytorch model and make predictions#####
        for model_num in models:

            if data_type == 'all_classes':
                data_type1 = 'combined'
            else:
                data_type1 = data_type

            model_file = f"results_folder/stored_models/{model_type}_{data_type1}/S{subject_id}/"
            model = None  # avoids potential duplication
            model = torch.load(
                model_file +
                f"{data_type}_{model_type}model_nc_{model_num+1}.pt",
                map_location={'cuda:0': 'cpu'})  #load model
            prediction = predict(model, test_data[model_num], 64, iterator)
            accuracy = accuracy_score(prediction, y_true[model_num])
            print(f"Accuracy: {accuracy}")
            sub_acc.append(accuracy)
            accuracies.append(accuracy)
            y_pred = np.concatenate((y_pred, prediction))
        print(f"Subject accuracy: {np.mean(np.array(sub_acc)) * 100} %")

        for y in y_true:
            y_true_a = np.concatenate((y_true_a, y))

        #####Gather statistics and add to dict#####
        precision, recall, f_score, _ = precision_recall_fscore_support(
            y_true_a, y_pred)
        stats['precision'].append(np.mean(precision))
        stats['recall'].append(np.mean(recall))
        stats['f-score'].append(np.mean(f_score))
        y_true_all = np.concatenate((y_true_all, y_true_a))
        y_pred_all = np.concatenate((y_pred_all, y_pred))
    print(f"Average Accuracy: {np.mean(np.array(accuracies)) * 100} %")

    df = pd.DataFrame(stats, index=subject_ids)
    writer_df = f"results_folder\\results\\{model_type}_{data_type}_stats.xlsx"
    df.to_excel(writer_df)

    return y_true_all, y_pred_all