Ejemplo n.º 1
0
def test_fixed_trial_with_rounding():
    dataset = DenseDesignMatrixWrapper(topo_view=to_4d_array(range(12)),
                                       y=np.zeros(12))
    splitter = FixedTrialSplitter(n_train_trials=9, valid_set_fraction=0.2)
    sets = splitter.split_into_train_valid_test(dataset)
    assert np.array_equal(sets['train'].get_topological_view().squeeze(),
                          range(8))
    assert sets['valid'].get_topological_view().squeeze() == 8
    assert np.array_equal(sets['test'].get_topological_view().squeeze(),
                          range(9, 12))
def test_fixed_trial_with_rounding():
    dataset = DenseDesignMatrixWrapper(topo_view = to_4d_array(range(12)), 
        y= np.zeros(12))
    splitter = FixedTrialSplitter(n_train_trials=9,
        valid_set_fraction=0.2)
    sets = splitter.split_into_train_valid_test(dataset)
    assert np.array_equal(sets['train'].get_topological_view().squeeze(), 
                          range(8))
    assert sets['valid'].get_topological_view().squeeze() == 8
    assert np.array_equal(sets['test'].get_topological_view().squeeze(), 
                          range(9,12))
Ejemplo n.º 3
0
def create_default_experiment(final_layer,
                              dataset,
                              n_epochs=100,
                              **overwrite_args):
    # make special case for this, since we access dataset.X here,
    # which might not exist
    if 'splitter' not in overwrite_args:
        n_trials = len(dataset.X)
        splitter = FixedTrialSplitter(n_train_trials=n_trials // 2,
                                      valid_set_fraction=0.2)
    else:
        splitter = overwrite_args['splitter']
    monitors = [MisclassMonitor(), LossMonitor(), RuntimeMonitor()]
    stop_criterion = MaxEpochs(n_epochs)

    exp_args = dict(
        splitter=splitter,
        preprocessor=None,
        iterator=BalancedBatchIterator(batch_size=45),
        loss_expression=lasagne.objectives.categorical_crossentropy,
        updates_expression=lasagne.updates.adam,
        updates_modifier=None,
        monitors=monitors,
        stop_criterion=stop_criterion,
        remember_best_chan='valid_misclass',
        run_after_early_stop=True,
        batch_modifier=None)
    exp_args.update(**overwrite_args)

    exp = Experiment(final_layer, dataset, **exp_args)
    return exp
Ejemplo n.º 4
0
    def _run_experiments_with_string(self, experiment_index, train_str):
        assert experiment_index >= self._get_start_id()
        assert experiment_index < self._get_stop_id()
        lasagne.random.set_rng(RandomState(9859295))
        # Save train string now, will be overwritten later after
        # input dimensions determined, save now for debug in
        # case of crash
        if not self._dry_run:
            self._save_train_string(train_str, experiment_index)
        starttime = time.time()

        train_dict = self._load_without_layers(train_str)
        log.info("With params...")
        if not self._quiet:
            pprint(train_dict['original_params'])
        if self._dry_run:
            # Do not do the loading or training...
            # Only go until here to show the train params
            return

        if self._batch_test:
            # TODO: put into function
            # load layers, load data with dimensions of the layer
            # create experiment with max epochs 2, run
            from braindecode.datasets.random import RandomSet
            train_str = train_str.replace('in_cols', '1')
            train_str = train_str.replace('in_sensors', '32')
            train_dict = yaml_parse.load(train_str)
            layers = load_layers_from_dict(train_dict)
            final_layer = layers[-1]
            n_chans = layers[0].shape[1]
            n_classes = final_layer.output_shape[1]
            n_samples = 500000
            # set n sample perds in case of cnt model
            if (np.any([hasattr(l, 'n_stride') for l in layers])):
                n_sample_preds = get_n_sample_preds(final_layer)
                log.info("Setting n_sample preds automatically to {:d}".format(
                    n_sample_preds))
                for monitor in train_dict['exp_args']['monitors']:
                    if hasattr(monitor, 'n_sample_preds'):
                        monitor.n_sample_preds = n_sample_preds
                train_dict['exp_args'][
                    'iterator'].n_sample_preds = n_sample_preds
                log.info("Input window length is {:d}".format(
                    get_model_input_window(final_layer)))
                # make at least batches
                n_samples = int(n_sample_preds * 1.5 * 200)
            dataset = RandomSet(topo_shape=[n_samples, n_chans, 1, 1],
                                y_shape=[n_samples, n_classes])
            dataset.load()
            splitter = FixedTrialSplitter(n_train_trials=int(n_samples * 0.8),
                                          valid_set_fraction=0.1)
            train_dict['exp_args']['preprocessor'] = None
            train_dict['exp_args']['stop_criterion'] = MaxEpochs(1)
            train_dict['exp_args']['iterator'].batch_size = 1
            # TODO: set stop criterion to max epochs =1
            #  change batch_size in iterator
            exp = Experiment(final_layer, dataset, splitter,
                             **train_dict['exp_args'])
            exp.setup()
            exp.run_until_early_stop()
            datasets = exp.dataset_provider.get_train_valid_test(exp.dataset)
            for batch_size in range(32, 200, 5):
                train_dict['exp_args']['stop_criterion'].num_epochs += 2
                log.info("Running with batch size {:d}".format(batch_size))
                train_dict['exp_args']['iterator'].batch_size = batch_size
                exp.run_until_stop(datasets, remember_best=False)
            return

        dataset = train_dict['dataset']
        dataset.load()
        iterator = train_dict['exp_args']['iterator']
        splitter = train_dict['dataset_splitter']
        if dataset.__class__.__name__ == 'EpilepsySet':
            log.info("Reducing to float16 for epilepsy set...")
            dataset.seizure_topo = np.float16(dataset.seizure_topo)
            dataset.non_seizure_topo = np.float16(dataset.non_seizure_topo)
        else:
            # todo: remove this?
            log.info(
                "Determining dataset dimensions to set possible model params..."
            )
            train_set = splitter.split_into_train_valid_test(dataset)['train']
            batch_gen = iterator.get_batches(train_set, shuffle=True)
            dummy_batch_topo = batch_gen.next()[0]
            del train_set
            # not for ultrasound: assert 'in_sensors' in train_str
            # not for cnt net assert 'in_rows' in train_str
            # not for resnet: assert 'in_cols' in train_str
            train_str = train_str.replace('in_sensors',
                                          str(dummy_batch_topo.shape[1]))
            train_str = train_str.replace('in_rows',
                                          str(dummy_batch_topo.shape[2]))
            train_str = train_str.replace('in_cols',
                                          str(dummy_batch_topo.shape[3]))

        self._save_train_string(train_str, experiment_index)

        # reset rng for actual loading of layers, so you can reproduce it
        # when you load the file later
        lasagne.random.set_rng(RandomState(9859295))
        train_dict = yaml_parse.load(train_str)

        layers = load_layers_from_dict(train_dict)
        final_layer = layers[-1]
        assert len(
            np.setdiff1d(
                layers, lasagne.layers.get_all_layers(final_layer))) == 0, (
                    "All layers "
                    "should be used, unused {:s}".format(
                        str(
                            np.setdiff1d(
                                layers,
                                lasagne.layers.get_all_layers(final_layer)))))
        # Set n sample preds in case of cnt model
        if (np.any([hasattr(l, 'n_stride') for l in layers])):
            # Can this be moved up and duplication in if clause( batch test,
            # more above) be removed?
            n_sample_preds = get_n_sample_preds(final_layer)
            log.info("Setting n_sample preds automatically to {:d}".format(
                n_sample_preds))
            for monitor in train_dict['exp_args']['monitors']:
                if hasattr(monitor, 'n_sample_preds'):
                    monitor.n_sample_preds = n_sample_preds
            train_dict['exp_args']['iterator'].n_sample_preds = n_sample_preds
            log.info("Input window length is {:d}".format(
                get_model_input_window(final_layer)))

        if not self._cross_validation:
            # for now lets not do that, current models seem fine again.
            #             if (dataset.__class__.__name__ == 'EpilepsySet') and self._pred_loss_hack:
            #                 from braindecode.epilepsy.experiment import EpilepsyExperiment
            #                 log.info("Creating epilepsy experiment with the pred loss hack")
            #                 exp = EpilepsyExperiment(final_layer, dataset, splitter,
            #                     **train_dict['exp_args'])
            #             else:
            exp = Experiment(final_layer, dataset, splitter,
                             **train_dict['exp_args'])
            exp.setup()
            exp.run()
            endtime = time.time()

            model = exp.final_layer

            # dummy predictions targets
            predictions = [0, 3, 1, 2, 3, 4]
            targets = [3, 4, 1, 2, 3, 4]

            result_or_results = Result(
                parameters=train_dict['original_params'],
                templates={},
                training_time=endtime - starttime,
                monitor_channels=exp.monitor_chans,
                predictions=predictions,
                targets=targets)

        else:  # cross validation
            assert False, (
                "cross validation not used in long time, not up to date"
                " for example targets predictions not added")
            # default 5 folds for now
            n_folds = train_dict['num_cv_folds']
            exp_cv = ExperimentCrossValidation(final_layer,
                                               dataset,
                                               exp_args=train_dict['exp_args'],
                                               n_folds=n_folds,
                                               shuffle=self._shuffle)
            exp_cv.run()
            endtime = time.time()
            result_or_results = []
            for i_fold in xrange(n_folds):
                res = Result(parameters=train_dict['original_params'],
                             templates={},
                             training_time=endtime - starttime,
                             monitor_channels=exp_cv.all_monitor_chans[i_fold],
                             predictions=[0, 3, 1, 2, 3, 4],
                             targets=[3, 4, 1, 2, 3, 4])
                result_or_results.append(res)
            model = exp_cv.all_layers

        if not os.path.exists(self._folder_paths[experiment_index]):
            os.makedirs(self._folder_paths[experiment_index])

        result_file_name = self._get_result_save_path(experiment_index)

        log.info("Saving result to {:s}...".format(result_file_name))
        with open(result_file_name, 'w') as resultfile:
            pickle.dump(result_or_results, resultfile)

        model_file_name = self._get_model_save_path(experiment_index)
        param_file_name = model_file_name.replace('.pkl', '.npy')
        np.save(param_file_name, lasagne.layers.get_all_param_values(model))

        # Possibly make kaggle submission file
        if isinstance(dataset,
                      KaggleGraspLiftSet) and splitter.use_test_as_valid:
            experiment_save_id = int(
                self._base_save_paths[experiment_index].split("/")[-1])
            create_submission_csv_for_one_subject(
                self._folder_paths[experiment_index], exp.dataset, iterator,
                train_dict['exp_args']['preprocessor'], final_layer,
                experiment_save_id)
        elif isinstance(
                dataset,
                AllSubjectsKaggleGraspLiftSet) and splitter.use_test_as_valid:
            experiment_save_id = int(
                self._base_save_paths[experiment_index].split("/")[-1])
            create_submission_csv_for_all_subject_model(
                self._folder_paths[experiment_index], exp.dataset,
                exp.dataset_provider, iterator, final_layer,
                experiment_save_id)
        elif isinstance(splitter, SeveralSetsSplitter):
            pass  # nothing to do in this case

        # very hacky create predictions targets :)
        # Not done earlier as there were weird theano crashes
        if exp.monitors[2].__class__.__name__ == 'CntTrialMisclassMonitor':
            del dataset
            del exp
            add_labels_to_cnt_exp_result(
                self._base_save_paths[experiment_index])
Ejemplo n.º 5
0
def test_experiment_fixed_split():
    """ Regression test, checking that values have not changed from original run"""
    data_rng = RandomState(398765905)
    rand_topo = data_rng.rand(200, 10, 10, 3).astype(np.float32)
    rand_y = np.int32(data_rng.rand(200) > 0.5)
    rand_topo[rand_y == 1] += 0.01
    rand_set = DenseDesignMatrixWrapper(topo_view=rand_topo, y=rand_y)

    lasagne.random.set_rng(RandomState(9859295))
    in_layer = InputLayer(shape=[None, 10, 10, 3])
    network = DenseLayer(incoming=in_layer,
                         name="softmax",
                         num_units=2,
                         nonlinearity=lasagne.nonlinearities.softmax)

    updates_modifier = MaxNormConstraint({'softmax': 0.5})
    dataset = rand_set

    dataset_iterator = BalancedBatchIterator(batch_size=60)

    preprocessor = OnlineAxiswiseStandardize(axis=['c', 1])
    dataset_splitter = FixedTrialSplitter(n_train_trials=150,
                                          valid_set_fraction=0.1)
    updates_var_func = lasagne.updates.adam
    loss_var_func = lasagne.objectives.categorical_crossentropy
    monitors = [
        braindecode.veganlasagne.monitors.LossMonitor(),
        braindecode.veganlasagne.monitors.MisclassMonitor(),
        braindecode.veganlasagne.monitors.RuntimeMonitor()
    ]
    stop_criterion = braindecode.veganlasagne.stopping.MaxEpochs(num_epochs=30)

    exp = Experiment(network,
                     dataset,
                     dataset_splitter,
                     preprocessor,
                     dataset_iterator,
                     loss_var_func,
                     updates_var_func,
                     updates_modifier,
                     monitors,
                     stop_criterion,
                     remember_best_chan='valid_misclass',
                     run_after_early_stop=True)
    exp.setup()
    exp.run()
    assert np.allclose([
        0.548148, 0.540741, 0.503704, 0.451852, 0.392593, 0.370370, 0.340741,
        0.281481, 0.237037, 0.207407, 0.192593, 0.177778, 0.133333, 0.111111,
        0.111111, 0.103704, 0.096296, 0.088889, 0.088889, 0.081481, 0.074074,
        0.066667, 0.066667, 0.059259, 0.059259, 0.051852, 0.037037, 0.037037,
        0.029630, 0.029630, 0.029630, 0.053333, 0.053333, 0.053333, 0.053333,
        0.040000, 0.040000, 0.026667, 0.026667, 0.026667, 0.026667, 0.033333,
        0.033333, 0.033333, 0.033333, 0.026667, 0.020000, 0.020000, 0.020000
    ],
                       exp.monitor_chans['train_misclass'],
                       rtol=1e-4,
                       atol=1e-4)
    assert np.allclose([
        0.400000, 0.400000, 0.400000, 0.400000, 0.400000, 0.400000, 0.400000,
        0.400000, 0.333333, 0.333333, 0.333333, 0.266667, 0.266667, 0.266667,
        0.266667, 0.266667, 0.266667, 0.266667, 0.266667, 0.266667, 0.266667,
        0.266667, 0.266667, 0.333333, 0.333333, 0.333333, 0.333333, 0.266667,
        0.266667, 0.266667, 0.266667, 0.266667, 0.266667, 0.266667, 0.266667,
        0.200000, 0.200000, 0.133333, 0.133333, 0.133333, 0.133333, 0.133333,
        0.133333, 0.133333, 0.133333, 0.066667, 0.000000, 0.000000, 0.000000
    ],
                       exp.monitor_chans['valid_misclass'],
                       rtol=1e-4,
                       atol=1e-4)
    assert np.allclose([
        0.460000, 0.420000, 0.420000, 0.420000, 0.420000, 0.440000, 0.420000,
        0.420000, 0.400000, 0.400000, 0.380000, 0.400000, 0.400000, 0.400000,
        0.400000, 0.400000, 0.420000, 0.420000, 0.420000, 0.400000, 0.400000,
        0.400000, 0.380000, 0.380000, 0.380000, 0.380000, 0.400000, 0.400000,
        0.420000, 0.420000, 0.420000, 0.420000, 0.420000, 0.420000, 0.420000,
        0.420000, 0.400000, 0.400000, 0.380000, 0.400000, 0.400000, 0.400000,
        0.400000, 0.400000, 0.360000, 0.360000, 0.380000, 0.380000, 0.380000
    ],
                       exp.monitor_chans['test_misclass'],
                       rtol=1e-4,
                       atol=1e-4)
    assert np.allclose([
        1.200389, 0.777420, 0.740212, 0.705151, 0.672329, 0.641764, 0.613245,
        0.586423, 0.561397, 0.538399, 0.517073, 0.497741, 0.479949, 0.463601,
        0.448505, 0.434583, 0.421652, 0.409739, 0.398721, 0.388490, 0.378988,
        0.370121, 0.361965, 0.354295, 0.347159, 0.340496, 0.334237, 0.328328,
        0.322803, 0.317624, 0.312765, 0.340091, 0.335658, 0.330868, 0.325923,
        0.320895, 0.316027, 0.311290, 0.306683, 0.302364, 0.298264, 0.294475,
        0.290957, 0.287673, 0.284664, 0.281860, 0.279309, 0.276918, 0.274709
    ],
                       exp.monitor_chans['train_loss'],
                       rtol=1e-4,
                       atol=1e-4)
    assert np.allclose([
        0.766092, 0.642237, 0.636960, 0.629884, 0.623676, 0.618789, 0.613821,
        0.609264, 0.605430, 0.601499, 0.598178, 0.594579, 0.591720, 0.589461,
        0.587571, 0.585673, 0.583782, 0.581606, 0.580687, 0.579677, 0.579276,
        0.578903, 0.578918, 0.578901, 0.579020, 0.579575, 0.580291, 0.581120,
        0.581591, 0.582552, 0.583647, 0.585879, 0.582269, 0.571548, 0.555956,
        0.536982, 0.517474, 0.496652, 0.474400, 0.453094, 0.432208, 0.412533,
        0.394271, 0.377036, 0.361311, 0.346461, 0.333406, 0.321266, 0.310158
    ],
                       exp.monitor_chans['valid_loss'],
                       rtol=1e-4,
                       atol=1e-4)
    assert np.allclose([
        1.069603, 0.751982, 0.746711, 0.742126, 0.738055, 0.734703, 0.731921,
        0.729251, 0.727241, 0.724931, 0.723189, 0.721885, 0.720605, 0.719565,
        0.718930, 0.718664, 0.718671, 0.718747, 0.719004, 0.718935, 0.719153,
        0.719381, 0.719815, 0.720419, 0.721205, 0.721993, 0.722759, 0.723534,
        0.724298, 0.724908, 0.725497, 0.725097, 0.725950, 0.726615, 0.726953,
        0.727603, 0.728247, 0.728787, 0.729323, 0.729945, 0.730434, 0.731245,
        0.732168, 0.732949, 0.734086, 0.735250, 0.736381, 0.737502, 0.738444
    ],
                       exp.monitor_chans['test_loss'],
                       rtol=1e-4,
                       atol=1e-4)
Ejemplo n.º 6
0
def test_experiment_sample_windows():
    data_rng = RandomState(398765905)
    rand_topo = data_rng.rand(200, 10, 10, 3).astype(np.float32)
    rand_y = np.int32(data_rng.rand(200) > 0.5)
    rand_topo[rand_y == 1] += 0.1
    rand_set = DenseDesignMatrixWrapper(topo_view=rand_topo, y=rand_y)

    lasagne.random.set_rng(RandomState(9859295))
    in_layer = InputLayer(shape=[None, 10, 5, 3])
    network = DenseLayer(incoming=in_layer,
                         name='softmax',
                         num_units=2,
                         nonlinearity=lasagne.nonlinearities.softmax)
    updates_modifier = MaxNormConstraint({'softmax': 0.5})

    dataset = rand_set

    dataset_iterator = WindowsIterator(n_samples_per_window=5, batch_size=60)

    preprocessor = OnlineAxiswiseStandardize(axis=['c', 1])
    dataset_splitter = FixedTrialSplitter(n_train_trials=150,
                                          valid_set_fraction=0.1)
    updates_var_func = lasagne.updates.adam
    loss_var_func = lasagne.objectives.categorical_crossentropy
    monitors = [
        braindecode.veganlasagne.monitors.LossMonitor(),
        braindecode.veganlasagne.monitors.WindowMisclassMonitor(),
        braindecode.veganlasagne.monitors.RuntimeMonitor()
    ]
    stop_criterion = braindecode.veganlasagne.stopping.MaxEpochs(num_epochs=5)

    exp = Experiment(network,
                     dataset,
                     dataset_splitter,
                     preprocessor,
                     dataset_iterator,
                     loss_var_func,
                     updates_var_func,
                     updates_modifier,
                     monitors,
                     stop_criterion,
                     remember_best_chan='valid_misclass',
                     run_after_early_stop=True)
    exp.setup()
    exp.run()

    assert np.allclose(
        [0.629630, 0.140741, 0.029630, 0.022222, 0.000000, 0.000000, 0.000000],
        exp.monitor_chans['train_misclass'],
        rtol=1e-4,
        atol=1e-4)
    assert np.allclose(
        [0.400000, 0.133333, 0.066667, 0.000000, 0.000000, 0.000000, 0.000000],
        exp.monitor_chans['valid_misclass'],
        rtol=1e-4,
        atol=1e-4)
    assert np.allclose(
        [0.560000, 0.060000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000],
        exp.monitor_chans['test_misclass'],
        rtol=1e-4,
        atol=1e-4)
    assert np.allclose(
        [1.180485, 0.574264, 0.420023, 0.330909, 0.278569, 0.245692, 0.242845],
        exp.monitor_chans['train_loss'],
        rtol=1e-4,
        atol=1e-4)
    assert np.allclose(
        [1.016782, 0.514049, 0.370485, 0.288948, 0.240913, 0.211189, 0.215967],
        exp.monitor_chans['valid_loss'],
        rtol=1e-4,
        atol=1e-4)
    assert np.allclose(
        [1.031832, 0.504570, 0.352317, 0.269810, 0.223904, 0.196681, 0.197899],
        exp.monitor_chans['test_loss'],
        rtol=1e-4,
        atol=1e-4)
Ejemplo n.º 7
0
    def _run_experiments_with_string(self, experiment_index, train_str):
        assert experiment_index >= self._get_start_id()
        assert experiment_index < self._get_stop_id()
        lasagne.random.set_rng(RandomState(9859295))
        # Save train string now, will be overwritten later after 
        # input dimensions determined, save now for debug in
        # case of crash
        if not self._dry_run:
            self._save_train_string(train_str, experiment_index)
        starttime = time.time()
        
        train_dict = self._load_without_layers(train_str)
        log.info("With params...")
        if not self._quiet:
            pprint(train_dict['original_params'])
        if self._dry_run:
            # Do not do the loading or training...
            # Only go until here to show the train params
            return
        
        if self._batch_test:
        # TODO: put into function
        # load layers, load data with dimensions of the layer
        # create experiment with max epochs 2, run
            from braindecode.datasets.random import RandomSet
            train_str = train_str.replace('in_cols', '1')
            train_str = train_str.replace('in_sensors', '32')
            train_dict =  yaml_parse.load(train_str)
            layers = load_layers_from_dict(train_dict)
            final_layer = layers[-1]
            n_chans = layers[0].shape[1]
            n_classes = final_layer.output_shape[1]
            n_samples = 500000
            # set n sample perds in case of cnt model
            if (np.any([hasattr(l, 'n_stride') for l in layers])):
                n_sample_preds =  get_n_sample_preds(final_layer)
                log.info("Setting n_sample preds automatically to {:d}".format(
                    n_sample_preds))
                for monitor in train_dict['exp_args']['monitors']:
                    if hasattr(monitor, 'n_sample_preds'):
                        monitor.n_sample_preds = n_sample_preds
                train_dict['exp_args']['iterator'].n_sample_preds = n_sample_preds
                log.info("Input window length is {:d}".format(
                    get_model_input_window(final_layer)))
                # make at least batches
                n_samples = int(n_sample_preds * 1.5 * 200)
            dataset = RandomSet(topo_shape=[n_samples, n_chans, 1, 1], 
                y_shape=[n_samples, n_classes])
            dataset.load()
            splitter = FixedTrialSplitter(n_train_trials=int(n_samples*0.8), 
                valid_set_fraction=0.1)
            train_dict['exp_args']['preprocessor'] = None
            train_dict['exp_args']['stop_criterion'] = MaxEpochs(1)
            train_dict['exp_args']['iterator'].batch_size = 1
            # TODO: set stop criterion to max epochs =1
            #  change batch_size in iterator
            exp = Experiment(final_layer, dataset, splitter,
                **train_dict['exp_args'])
            exp.setup()
            exp.run_until_early_stop()
            datasets = exp.dataset_provider.get_train_valid_test(exp.dataset)
            for batch_size in range(32,200,5):
                train_dict['exp_args']['stop_criterion'].num_epochs += 2
                log.info("Running with batch size {:d}".format(batch_size))
                train_dict['exp_args']['iterator'].batch_size = batch_size
                exp.run_until_stop(datasets, remember_best=False)
            return
            
            
        dataset = train_dict['dataset'] 
        dataset.load()
        iterator = train_dict['exp_args']['iterator']
        splitter = train_dict['dataset_splitter']
        if dataset.__class__.__name__ == 'EpilepsySet':
            log.info("Reducing to float16 for epilepsy set...")
            dataset.seizure_topo = np.float16(dataset.seizure_topo)
            dataset.non_seizure_topo = np.float16(dataset.non_seizure_topo)
        else:
            # todo: remove this?
            log.info("Determining dataset dimensions to set possible model params...")
            train_set = splitter.split_into_train_valid_test(dataset)['train']
            batch_gen = iterator.get_batches(train_set, shuffle=True)
            dummy_batch_topo = batch_gen.next()[0]
            del train_set
            # not for ultrasound: assert 'in_sensors' in train_str
            # not for cnt net assert 'in_rows' in train_str
            # not for resnet: assert 'in_cols' in train_str
            train_str = train_str.replace('in_sensors',
                str(dummy_batch_topo.shape[1]))
            train_str = train_str.replace('in_rows',
                str(dummy_batch_topo.shape[2]))
            train_str = train_str.replace('in_cols', 
                str(dummy_batch_topo.shape[3]))
        
        self._save_train_string(train_str, experiment_index)
        
        
        # reset rng for actual loading of layers, so you can reproduce it 
        # when you load the file later
        lasagne.random.set_rng(RandomState(9859295))
        train_dict =  yaml_parse.load(train_str)
            
        layers = load_layers_from_dict(train_dict)
        final_layer = layers[-1]
        assert len(np.setdiff1d(layers, 
            lasagne.layers.get_all_layers(final_layer))) == 0, ("All layers "
            "should be used, unused {:s}".format(str(np.setdiff1d(layers, 
            lasagne.layers.get_all_layers(final_layer)))))
        # Set n sample preds in case of cnt model
        if (np.any([hasattr(l, 'n_stride') for l in layers])):
            # Can this be moved up and duplication in if clause( batch test,
            # more above) be removed?
            n_sample_preds =  get_n_sample_preds(final_layer)
            log.info("Setting n_sample preds automatically to {:d}".format(
                n_sample_preds))
            for monitor in train_dict['exp_args']['monitors']:
                if hasattr(monitor, 'n_sample_preds'):
                    monitor.n_sample_preds = n_sample_preds
            train_dict['exp_args']['iterator'].n_sample_preds = n_sample_preds
            log.info("Input window length is {:d}".format(
                get_model_input_window(final_layer)))
        
        if not self._cross_validation:
            # for now lets not do that, current models seem fine again.
#             if (dataset.__class__.__name__ == 'EpilepsySet') and self._pred_loss_hack:
#                 from braindecode.epilepsy.experiment import EpilepsyExperiment
#                 log.info("Creating epilepsy experiment with the pred loss hack")
#                 exp = EpilepsyExperiment(final_layer, dataset, splitter,
#                     **train_dict['exp_args'])
#             else:
            exp = Experiment(final_layer, dataset, splitter,
                    **train_dict['exp_args'])
            exp.setup()
            exp.run()
            endtime = time.time()
            
            
            model = exp.final_layer
                
            # dummy predictions targets
            predictions = [0,3,1,2,3,4]
            targets = [3,4,1,2,3,4]
                
            result_or_results = Result(parameters=train_dict['original_params'],
                templates={}, 
                training_time=endtime - starttime, 
                monitor_channels=exp.monitor_chans, 
                predictions=predictions,
                targets=targets)
               
                
        else: # cross validation
            assert False, ("cross validation not used in long time, not up to date"
                " for example targets predictions not added")
            # default 5 folds for now
            n_folds = train_dict['num_cv_folds']
            exp_cv = ExperimentCrossValidation(final_layer, 
                dataset, exp_args=train_dict['exp_args'], n_folds=n_folds,
                shuffle=self._shuffle)
            exp_cv.run()
            endtime = time.time()
            result_or_results = []
            for i_fold in xrange(n_folds):
                res = Result(parameters=train_dict['original_params'],
                templates={}, 
                training_time=endtime - starttime, 
                monitor_channels=exp_cv.all_monitor_chans[i_fold], 
                predictions=[0,3,1,2,3,4],
                targets=[3,4,1,2,3,4])
                result_or_results.append(res)
            model = exp_cv.all_layers
            
        if not os.path.exists(self._folder_paths[experiment_index]):
            os.makedirs(self._folder_paths[experiment_index])
        
        result_file_name = self._get_result_save_path(experiment_index)
        
        log.info("Saving result...")
        with open(result_file_name, 'w') as resultfile:
            pickle.dump(result_or_results, resultfile)
        
        model_file_name = self._get_model_save_path(experiment_index)
        param_file_name = model_file_name.replace('.pkl', '.npy')
        np.save(param_file_name, lasagne.layers.get_all_param_values(model))
        
        # Possibly make kaggle submission file
        if isinstance(dataset, KaggleGraspLiftSet) and splitter.use_test_as_valid:
            experiment_save_id = int(
                self._base_save_paths[experiment_index].split("/")[-1])
            create_submission_csv_for_one_subject(self._folder_paths[experiment_index],
                exp.dataset, iterator,
                train_dict['exp_args']['preprocessor'], 
                final_layer, experiment_save_id)
        elif isinstance(dataset, AllSubjectsKaggleGraspLiftSet) and splitter.use_test_as_valid:
            experiment_save_id = int(
                self._base_save_paths[experiment_index].split("/")[-1])
            create_submission_csv_for_all_subject_model(
                self._folder_paths[experiment_index],
                exp.dataset, exp.dataset_provider, iterator,
                final_layer, experiment_save_id)
        elif isinstance(splitter, SeveralSetsSplitter):
            pass # nothing to do in this case

        # very hacky create predictions targets :)
        # Not done earlier as there were weird theano crashes
        if exp.monitors[2].__class__.__name__ == 'CntTrialMisclassMonitor':
            del dataset
            del exp
            add_labels_to_cnt_exp_result(self._base_save_paths[experiment_index])