Example #1
0
def test_early_stopping(mock_train_history):
    from nolearn_utils.hooks import EarlyStopping
    early_stopper = EarlyStopping(patience=3)
    train_history = mock_train_history(valid_loss=[0.7, 0.6, 0.6, 0.6, 0.6],
                                       train_loss=[0.3, 0.3, 0.3, 0.3, 0.3],
                                       epoch=[1, 2, 3, 4, 5])
    net = MagicMock()
    net.get_all_param = MagicMock()
    early_stopper(net, train_history[:1])
    early_stopper(net, train_history[:2])
    early_stopper(net, train_history[:3])
    early_stopper(net, train_history[:4])
    with pytest.raises(StopIteration):
        early_stopper(net, train_history[:5])
def cascade_model(options):
    """
    3D cascade model using Nolearn and Lasagne
    
    Inputs:
    - model_options:
    - weights_path: path to where weights should be saved

    Output:
    - nets = list of NeuralNets (CNN1, CNN2)
    """

    # model options
    channels = len(options['modalities'])
    train_split_perc = options['train_split']
    num_epochs = options['max_epochs']
    max_epochs_patience = options['patience']

    # save model to disk to re-use it. Create an experiment folder
    # organize experiment
    if not os.path.exists(
            os.path.join(options['weight_paths'], options['experiment'])):
        os.mkdir(os.path.join(options['weight_paths'], options['experiment']))
    if not os.path.exists(
            os.path.join(options['weight_paths'], options['experiment'],
                         'nets')):
        os.mkdir(
            os.path.join(options['weight_paths'], options['experiment'],
                         'nets'))

    # --------------------------------------------------
    # first model
    # --------------------------------------------------

    layer1 = InputLayer(name='in1',
                        shape=(None, channels) + options['patch_size'])
    layer1 = batch_norm(Conv3DLayer(layer1,
                                    name='conv1_1',
                                    num_filters=32,
                                    filter_size=3,
                                    pad='same'),
                        name='BN1')
    layer1 = Pool3DLayer(layer1,
                         name='avgpool_1',
                         mode='max',
                         pool_size=2,
                         stride=2)
    layer1 = batch_norm(Conv3DLayer(layer1,
                                    name='conv2_1',
                                    num_filters=64,
                                    filter_size=3,
                                    pad='same'),
                        name='BN2')
    layer1 = Pool3DLayer(layer1,
                         name='avgpoo2_1',
                         mode='max',
                         pool_size=2,
                         stride=2)
    layer1 = DropoutLayer(layer1, name='l2drop', p=0.5)
    layer1 = DenseLayer(layer1, name='d_1', num_units=256)
    layer1 = DenseLayer(layer1,
                        name='out',
                        num_units=2,
                        nonlinearity=nonlinearities.softmax)

    # save weights
    net_model = 'model_1'
    net_weights = os.path.join(options['weight_paths'], options['experiment'],
                               'nets', net_model + '.pkl')
    net_history = os.path.join(options['weight_paths'], options['experiment'],
                               'nets', net_model + '_history.pkl')

    net1 = NeuralNet(
        layers=layer1,
        objective_loss_function=objectives.categorical_crossentropy,
        batch_iterator_train=Rotate_batch_Iterator(batch_size=128),
        update=updates.adadelta,
        on_epoch_finished=[
            SaveWeights(net_weights, only_best=True, pickle=False),
            SaveTrainingHistory(net_history),
            EarlyStopping(patience=max_epochs_patience)
        ],
        verbose=options['net_verbose'],
        max_epochs=num_epochs,
        train_split=TrainSplit(eval_size=train_split_perc),
    )

    # --------------------------------------------------
    # second model
    # --------------------------------------------------

    layer2 = InputLayer(name='in2',
                        shape=(None, channels) + options['patch_size'])
    layer2 = batch_norm(Conv3DLayer(layer2,
                                    name='conv1_1',
                                    num_filters=32,
                                    filter_size=3,
                                    pad='same'),
                        name='BN1')
    layer2 = Pool3DLayer(layer2,
                         name='avgpool_1',
                         mode='max',
                         pool_size=2,
                         stride=2)
    layer2 = batch_norm(Conv3DLayer(layer2,
                                    name='conv2_1',
                                    num_filters=64,
                                    filter_size=3,
                                    pad='same'),
                        name='BN2')
    layer2 = Pool3DLayer(layer2,
                         name='avgpoo2_1',
                         mode='max',
                         pool_size=2,
                         stride=2)
    layer2 = DropoutLayer(layer2, name='l2drop', p=0.5)
    layer2 = DenseLayer(layer2, name='d_1', num_units=256)
    layer2 = DenseLayer(layer2,
                        name='out',
                        num_units=2,
                        nonlinearity=nonlinearities.softmax)

    # save weights
    net_model = 'model_2'
    net_weights2 = os.path.join(options['weight_paths'], options['experiment'],
                                'nets', net_model + '.pkl')
    net_history2 = os.path.join(options['weight_paths'], options['experiment'],
                                'nets', net_model + '_history.pkl')

    net2 = NeuralNet(
        layers=layer2,
        objective_loss_function=objectives.categorical_crossentropy,
        batch_iterator_train=Rotate_batch_Iterator(batch_size=128),
        update=updates.adadelta,
        on_epoch_finished=[
            SaveWeights(net_weights2, only_best=True, pickle=False),
            SaveTrainingHistory(net_history2),
            EarlyStopping(patience=max_epochs_patience)
        ],
        verbose=options['net_verbose'],
        max_epochs=num_epochs,
        train_split=TrainSplit(eval_size=train_split_perc),
    )

    return [net1, net2]
Example #3
0
    affine_p=1.,
    affine_scale_choices=np.linspace(0.5, 1.5, 11),
    # affine_shear_choices=np.linspace(-0.5, 0.5, 11),
    affine_translation_choices=np.arange(-64, 64, 1),
    # affine_rotation_choices=np.arange(0, 360, 1),
    adjust_gamma_p=0.5,
    adjust_gamma_chocies=np.linspace(0.5, 1.5, 11))
train_iterator = TrainIterator(**train_iterator_kwargs)

test_iterator_kwargs = dict(batch_size=batch_size, )
test_iterator = TestIterator(**test_iterator_kwargs)

save_weights = SaveWeights(model_fname, only_best=True, pickle=False)
save_training_history = SaveTrainingHistory(model_history_fname)
plot_training_history = PlotTrainingHistory(model_graph_fname)
early_stopping = EarlyStopping(patience=100)

conv_kwargs = dict(pad='same',
                   nonlinearity=nn.nonlinearities.very_leaky_rectify)

pool_kwargs = dict(pool_size=2, )

l = nn.layers.InputLayer(name='in', shape=(None, 3, image_size, image_size))

# 256
l = conv2dbn(l,
             name='l1c1',
             num_filters=32,
             filter_size=(7, 7),
             stride=2,
             **conv_kwargs)
Example #4
0
test_iterator_kwargs = {
    'batch_size': batch_size,
    'buffer_size': 5,
}
test_iterator = TestIterator(**test_iterator_kwargs)

save_weights = SaveWeights('./examples/mnist/model_weights.pkl',
                           only_best=True,
                           pickle=False)
save_training_history = SaveTrainingHistory(
    './examples/mnist/model_history.pkl')
plot_training_history = PlotTrainingHistory(
    './examples/mnist/training_history.png')
early_stopping = EarlyStopping(metrics='valid_accuracy',
                               patience=100,
                               verbose=True,
                               higher_is_better=True)

net = NeuralNet(layers=[
    (InputLayer, dict(name='in', shape=(None, 1, image_size, image_size))),
    (Conv2DDNNLayer,
     dict(name='l1c1', num_filters=32, filter_size=(3, 3), pad='same')),
    (Conv2DDNNLayer,
     dict(name='l1c2', num_filters=32, filter_size=(3, 3), pad='same')),
    (MaxPool2DDNNLayer, dict(name='l1p', pool_size=3, stride=2)),
    (Conv2DDNNLayer,
     dict(name='l2c1', num_filters=32, filter_size=(3, 3), pad='same')),
    (Conv2DDNNLayer,
     dict(name='l2c2', num_filters=32, filter_size=(3, 3), pad='same')),
    (MaxPool2DDNNLayer, dict(name='l2p', pool_size=3, stride=2)),
    (DenseLayer, dict(name='l7', num_units=256)),
def build_model(weights_path, options):
    """
    Build the CNN model. Create the Neural Net object and return it back. 
    Inputs: 
    - subject name: used to save the net weights accordingly.
    - options: several hyper-parameters used to configure the net.
    
    Output:
    - net: a NeuralNet object 
    """

    net_model_name = options['experiment']

    try:
        os.mkdir(os.path.join(weights_path, net_model_name))
    except:
        pass

    net_weights = os.path.join(weights_path, net_model_name,
                               net_model_name + '.pkl')
    net_history = os.path.join(weights_path, net_model_name,
                               net_model_name + '_history.pkl')

    # select hyper-parameters
    t_verbose = options['net_verbose']
    train_split_perc = options['train_split']
    num_epochs = options['max_epochs']
    max_epochs_patience = options['patience']
    early_stopping = EarlyStopping(patience=max_epochs_patience)
    save_weights = SaveWeights(net_weights, only_best=True, pickle=False)
    save_training_history = SaveTrainingHistory(net_history)

    # build the architecture
    ps = options['patch_size'][0]
    num_channels = 1
    fc_conv = 180
    fc_fc = 180
    dropout_conv = 0.5
    dropout_fc = 0.5

    # --------------------------------------------------
    # channel_1: axial
    # --------------------------------------------------

    axial_ch = InputLayer(name='in1', shape=(None, num_channels, ps, ps))
    axial_ch = prelu(batch_norm(
        Conv2DLayer(axial_ch,
                    name='axial_ch_conv1',
                    num_filters=20,
                    filter_size=3)),
                     name='axial_ch_prelu1')
    axial_ch = prelu(batch_norm(
        Conv2DLayer(axial_ch,
                    name='axial_ch_conv2',
                    num_filters=20,
                    filter_size=3)),
                     name='axial_ch_prelu2')
    axial_ch = MaxPool2DLayer(axial_ch, name='axial_max_pool_1', pool_size=2)
    axial_ch = prelu(batch_norm(
        Conv2DLayer(axial_ch,
                    name='axial_ch_conv3',
                    num_filters=40,
                    filter_size=3)),
                     name='axial_ch_prelu3')
    axial_ch = prelu(batch_norm(
        Conv2DLayer(axial_ch,
                    name='axial_ch_conv4',
                    num_filters=40,
                    filter_size=3)),
                     name='axial_ch_prelu4')
    axial_ch = MaxPool2DLayer(axial_ch, name='axial_max_pool_2', pool_size=2)
    axial_ch = prelu(batch_norm(
        Conv2DLayer(axial_ch,
                    name='axial_ch_conv5',
                    num_filters=60,
                    filter_size=3)),
                     name='axial_ch_prelu5')
    axial_ch = DropoutLayer(axial_ch, name='axial_l1drop', p=dropout_conv)
    axial_ch = DenseLayer(axial_ch, name='axial_d1', num_units=fc_conv)
    axial_ch = prelu(axial_ch, name='axial_prelu_d1')

    # --------------------------------------------------
    # channel_1: coronal
    # --------------------------------------------------

    coronal_ch = InputLayer(name='in2', shape=(None, num_channels, ps, ps))
    coronal_ch = prelu(batch_norm(
        Conv2DLayer(coronal_ch,
                    name='coronal_ch_conv1',
                    num_filters=20,
                    filter_size=3)),
                       name='coronal_ch_prelu1')
    coronal_ch = prelu(batch_norm(
        Conv2DLayer(coronal_ch,
                    name='coronal_ch_conv2',
                    num_filters=20,
                    filter_size=3)),
                       name='coronal_ch_prelu2')
    coronal_ch = MaxPool2DLayer(coronal_ch,
                                name='coronal_max_pool_1',
                                pool_size=2)
    coronal_ch = prelu(batch_norm(
        Conv2DLayer(coronal_ch,
                    name='coronal_ch_conv3',
                    num_filters=40,
                    filter_size=3)),
                       name='coronal_ch_prelu3')
    coronal_ch = prelu(batch_norm(
        Conv2DLayer(coronal_ch,
                    name='coronal_ch_conv4',
                    num_filters=40,
                    filter_size=3)),
                       name='coronal_ch_prelu4')
    coronal_ch = MaxPool2DLayer(coronal_ch,
                                name='coronal_max_pool_2',
                                pool_size=2)
    coronal_ch = prelu(batch_norm(
        Conv2DLayer(coronal_ch,
                    name='coronal_ch_conv5',
                    num_filters=60,
                    filter_size=3)),
                       name='coronal_ch_prelu5')
    coronal_ch = DropoutLayer(coronal_ch,
                              name='coronal_l1drop',
                              p=dropout_conv)
    coronal_ch = DenseLayer(coronal_ch, name='coronal_d1', num_units=fc_conv)
    coronal_ch = prelu(coronal_ch, name='coronal_prelu_d1')

    # --------------------------------------------------
    # channel_1: saggital
    # --------------------------------------------------

    saggital_ch = InputLayer(name='in3', shape=(None, num_channels, ps, ps))
    saggital_ch = prelu(batch_norm(
        Conv2DLayer(saggital_ch,
                    name='saggital_ch_conv1',
                    num_filters=20,
                    filter_size=3)),
                        name='saggital_ch_prelu1')
    saggital_ch = prelu(batch_norm(
        Conv2DLayer(saggital_ch,
                    name='saggital_ch_conv2',
                    num_filters=20,
                    filter_size=3)),
                        name='saggital_ch_prelu2')
    saggital_ch = MaxPool2DLayer(saggital_ch,
                                 name='saggital_max_pool_1',
                                 pool_size=2)
    saggital_ch = prelu(batch_norm(
        Conv2DLayer(saggital_ch,
                    name='saggital_ch_conv3',
                    num_filters=40,
                    filter_size=3)),
                        name='saggital_ch_prelu3')
    saggital_ch = prelu(batch_norm(
        Conv2DLayer(saggital_ch,
                    name='saggital_ch_conv4',
                    num_filters=40,
                    filter_size=3)),
                        name='saggital_ch_prelu4')
    saggital_ch = MaxPool2DLayer(saggital_ch,
                                 name='saggital_max_pool_2',
                                 pool_size=2)
    saggital_ch = prelu(batch_norm(
        Conv2DLayer(saggital_ch,
                    name='saggital_ch_conv5',
                    num_filters=60,
                    filter_size=3)),
                        name='saggital_ch_prelu5')
    saggital_ch = DropoutLayer(saggital_ch,
                               name='saggital_l1drop',
                               p=dropout_conv)
    saggital_ch = DenseLayer(saggital_ch,
                             name='saggital_d1',
                             num_units=fc_conv)
    saggital_ch = prelu(saggital_ch, name='saggital_prelu_d1')

    # FC layer 540
    layer = ConcatLayer(name='elem_channels',
                        incomings=[axial_ch, coronal_ch, saggital_ch])
    layer = DropoutLayer(layer, name='f1_drop', p=dropout_fc)
    layer = DenseLayer(layer, name='FC1', num_units=540)
    layer = prelu(layer, name='prelu_f1')

    # concatenate channels 540 + 15
    layer = DropoutLayer(layer, name='f2_drop', p=dropout_fc)
    atlas_layer = DropoutLayer(InputLayer(name='in4', shape=(None, 15)),
                               name='Dropout_atlas',
                               p=.2)
    atlas_layer = InputLayer(name='in4', shape=(None, 15))
    layer = ConcatLayer(name='elem_channels2', incomings=[layer, atlas_layer])

    # FC layer 270
    layer = DenseLayer(layer, name='fc_2', num_units=270)
    layer = prelu(layer, name='prelu_f2')

    # FC output 15 (softmax)
    net_layer = DenseLayer(layer,
                           name='out_layer',
                           num_units=15,
                           nonlinearity=softmax)

    net = NeuralNet(
        layers=net_layer,
        objective_loss_function=objectives.categorical_crossentropy,
        update=updates.adam,
        update_learning_rate=0.001,
        on_epoch_finished=[
            save_weights,
            save_training_history,
            early_stopping,
        ],
        verbose=t_verbose,
        max_epochs=num_epochs,
        train_split=TrainSplit(eval_size=train_split_perc),
    )

    if options['load_weights'] == 'True':
        try:
            print "    --> loading weights from ", net_weights
            net.load_params_from(net_weights)
        except:
            pass

    return net
Example #6
0
def main():
    c = color_codes()
    patch_size = (15, 15, 15)
    dir_name = '/home/sergivalverde/w/CNN/images/CH16'
    patients = [
        f for f in sorted(os.listdir(dir_name))
        if os.path.isdir(os.path.join(dir_name, f))
    ]
    names = np.stack([
        name for name in
        [[
            os.path.join(dir_name, patient, 'FLAIR_preprocessed.nii.gz')
            for patient in patients
        ],
         [
             os.path.join(dir_name, patient, 'DP_preprocessed.nii.gz')
             for patient in patients
         ],
         [
             os.path.join(dir_name, patient, 'T2_preprocessed.nii.gz')
             for patient in patients
         ],
         [
             os.path.join(dir_name, patient, 'T1_preprocessed.nii.gz')
             for patient in patients
         ]] if name is not None
    ],
                     axis=1)
    seed = np.random.randint(np.iinfo(np.int32).max)
    ''' Here we create an initial net to find conflictive voxels '''
    print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['g'] +
          '<Running iteration ' + c['b'] + '1>' + c['nc'])
    net_name = '/home/sergivalverde/w/CNN/code/CNN1/miccai_challenge2016/deep-challenge2016.init.'
    net = NeuralNet(
        layers=[
            (InputLayer, dict(name='in', shape=(None, 4, 15, 15, 15))),
            (Conv3DDNNLayer,
             dict(name='conv1_1',
                  num_filters=32,
                  filter_size=(5, 5, 5),
                  pad='same')),
            (Pool3DDNNLayer,
             dict(name='avgpool_1',
                  pool_size=2,
                  stride=2,
                  mode='average_inc_pad')),
            (Conv3DDNNLayer,
             dict(name='conv2_1',
                  num_filters=64,
                  filter_size=(5, 5, 5),
                  pad='same')),
            (Pool3DDNNLayer,
             dict(name='avgpool_2',
                  pool_size=2,
                  stride=2,
                  mode='average_inc_pad')),
            (DropoutLayer, dict(name='l2drop', p=0.5)),
            (DenseLayer, dict(name='l1', num_units=256)),
            (DenseLayer,
             dict(name='out', num_units=2,
                  nonlinearity=nonlinearities.softmax)),
        ],
        objective_loss_function=objectives.categorical_crossentropy,
        update=updates.adam,
        update_learning_rate=0.0001,
        on_epoch_finished=[
            SaveWeights(net_name + 'model_weights.pkl',
                        only_best=True,
                        pickle=False),
            SaveTrainingHistory(net_name + 'model_history.pkl'),
            PlotTrainingHistory(net_name + 'training_history.png'),
            EarlyStopping(patience=10)
        ],
        verbose=10,
        max_epochs=50,
        train_split=TrainSplit(eval_size=0.25),
        custom_scores=[('dsc', lambda p, t: 2 * np.sum(p * t[:, 1]) / np.sum(
            (p + t[:, 1])))],
    )

    try:
        net.load_params_from(net_name + 'model_weights.pkl')
    except IOError:
        print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['g'] +
              'Loading the data for ' + c['b'] + 'iteration 1' + c['nc'])
        # Create the data
        (x, y, _) = load_patches(dir_name=dir_name,
                                 use_flair=True,
                                 use_pd=True,
                                 use_t2=True,
                                 use_t1=True,
                                 use_gado=False,
                                 flair_name='FLAIR_preprocessed.nii.gz',
                                 pd_name='DP_preprocessed.nii.gz',
                                 t2_name='T2_preprocessed.nii.gz',
                                 t1_name='T1_preprocessed.nii.gz',
                                 gado_name=None,
                                 mask_name='Consensus.nii.gz',
                                 size=patch_size)

        print('-- Permuting the data')
        np.random.seed(seed)
        x_train = np.random.permutation(
            np.concatenate(x).astype(dtype=np.float32))
        print('-- Permuting the labels')
        np.random.seed(seed)
        y_train = np.random.permutation(
            np.concatenate(y).astype(dtype=np.int32))
        y_train = y_train[:, y_train.shape[1] / 2 + 1,
                          y_train.shape[2] / 2 + 1, y_train.shape[3] / 2 + 1]
        print('-- Training vector shape = (' +
              ','.join([str(length) for length in x_train.shape]) + ')')
        print('-- Training labels shape = (' +
              ','.join([str(length) for length in y_train.shape]) + ')')

        print c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['g'] +\
            'Training (' + c['b'] + 'initial' + c['nc'] + c['g'] + ')' + c['nc']
        # We try to get the last weights to keep improving the net over and over
        net.fit(x_train, y_train)
    ''' Here we get the seeds '''
    print c['c'] + '[' + strftime(
        "%H:%M:%S") + '] ' + c['g'] + '<Looking for seeds>' + c['nc']
    for patient in names:
        output_name = os.path.join('/'.join(patient[0].rsplit('/')[:-1]),
                                   'test.iter1.nii.gz')
        try:
            load_nii(output_name)
            print c['c'] + '[' + strftime("%H:%M:%S") + '] ' \
                + c['g'] + '-- Patient ' + patient[0].rsplit('/')[-2] + ' already done' + c['nc']
        except IOError:
            print c['c'] + '[' + strftime("%H:%M:%S") + '] '\
                  + c['g'] + '-- Testing with patient ' + c['b'] + patient[0].rsplit('/')[-2] + c['nc']
            image_nii = load_nii(patient[0])
            image = np.zeros_like(image_nii.get_data())
            for batch, centers in load_patch_batch(patient, 100000,
                                                   patch_size):
                y_pred = net.predict_proba(batch)
                [x, y, z] = np.stack(centers, axis=1)
                image[x, y, z] = y_pred[:, 1]

            print c['g'] + '-- Saving image ' + c['b'] + output_name + c['nc']
            image_nii.get_data()[:] = image
            image_nii.to_filename(output_name)
    ''' Here we perform the last iteration '''
    print c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c[
        'g'] + '<Running iteration ' + c['b'] + '2>' + c['nc']
    net_name = '/home/sergivalverde/w/CNN/code/CNN1/miccai_challenge2016/deep-challenge2016.final.'
    net = NeuralNet(
        layers=[
            (InputLayer, dict(name='in', shape=(None, 4, 15, 15, 15))),
            (Conv3DDNNLayer,
             dict(name='conv1_1',
                  num_filters=32,
                  filter_size=(5, 5, 5),
                  pad='same')),
            (Pool3DDNNLayer,
             dict(name='avgpool_1',
                  pool_size=2,
                  stride=2,
                  mode='average_inc_pad')),
            (Conv3DDNNLayer,
             dict(name='conv2_1',
                  num_filters=64,
                  filter_size=(5, 5, 5),
                  pad='same')),
            (Pool3DDNNLayer,
             dict(name='avgpool_2',
                  pool_size=2,
                  stride=2,
                  mode='average_inc_pad')),
            (DropoutLayer, dict(name='l2drop', p=0.5)),
            (DenseLayer, dict(name='l1', num_units=256)),
            (DenseLayer,
             dict(name='out', num_units=2,
                  nonlinearity=nonlinearities.softmax)),
        ],
        objective_loss_function=objectives.categorical_crossentropy,
        update=updates.adam,
        update_learning_rate=0.0001,
        on_epoch_finished=[
            SaveWeights(net_name + 'model_weights.pkl',
                        only_best=True,
                        pickle=False),
            SaveTrainingHistory(net_name + 'model_history.pkl'),
            PlotTrainingHistory(net_name + 'training_history.png'),
        ],
        batch_iterator_train=BatchIterator(batch_size=4096),
        verbose=10,
        max_epochs=2000,
        train_split=TrainSplit(eval_size=0.25),
        custom_scores=[('dsc', lambda p, t: 2 * np.sum(p * t[:, 1]) / np.sum(
            (p + t[:, 1])))],
    )

    try:
        net.load_params_from(net_name + 'model_weights.pkl')
    except IOError:
        pass
    print c['c'] + '[' + strftime("%H:%M:%S") + '] '\
        + c['g'] + 'Loading the data for ' + c['b'] + 'iteration 2' + c['nc']
    (x, y,
     names) = load_patches(dir_name='/home/sergivalverde/w/CNN/images/CH16',
                           use_flair=True,
                           use_pd=True,
                           use_t2=True,
                           use_t1=True,
                           use_gado=False,
                           flair_name='FLAIR_preprocessed.nii.gz',
                           pd_name='DP_preprocessed.nii.gz',
                           t2_name='T2_preprocessed.nii.gz',
                           gado_name=None,
                           t1_name='T1_preprocessed.nii.gz',
                           mask_name='Consensus.nii.gz',
                           size=patch_size,
                           roi_name='test.iter1.nii.gz')

    print '-- Permuting the data'
    np.random.seed(seed)
    x_train = np.random.permutation(np.concatenate(x).astype(dtype=np.float32))
    print '-- Permuting the labels'
    np.random.seed(seed)
    y_train = np.random.permutation(np.concatenate(y).astype(dtype=np.int32))
    y_train = y_train[:, y_train.shape[1] / 2 + 1, y_train.shape[2] / 2 + 1,
                      y_train.shape[3] / 2 + 1]
    print '-- Training vector shape = (' + ','.join(
        [str(length) for length in x_train.shape]) + ')'
    print '-- Training labels shape = (' + ','.join(
        [str(length) for length in y_train.shape]) + ')'
    print c['c'] + '[' + strftime("%H:%M:%S") + '] '\
        + c['g'] + 'Training (' + c['b'] + 'final' + c['nc'] + c['g'] + ')' + c['nc']
    net.fit(x_train, y_train)