예제 #1
0
파일: nn.py 프로젝트: bmelab513/teamOo-CPU
def create_net(config, **kwargs):
    args = {
        'layers': config.layers,
        'batch_iterator_train': iterator.ResampleIterator(
            config, batch_size=config.get('batch_size_train')),
        'batch_iterator_test': iterator.SharedIterator(
            config, deterministic=True, 
            batch_size=config.get('batch_size_test')),
        'on_epoch_finished': [
            Schedule('update_learning_rate', config.get('schedule'),
                     weights_file=config.final_weights_file),
            SaveBestWeights(weights_file=config.weights_file, 
                            loss='kappa', greater_is_better=True,),
            SaveWeights(config.weights_epoch, every_n_epochs=5),
            SaveWeights(config.weights_best, every_n_epochs=1, only_best=True),
        ],
        'objective': get_objective(),
        'use_label_encoder': False,
        'eval_size': 0.1,
        'regression': True,
        'max_epochs': 200,
        'verbose': 1,
        'update_learning_rate': theano.shared(
            util.float32(config.get('schedule')[0])),
        'update': nesterov_momentum,
        'update_momentum': 0.9,
        'custom_score': ('kappa', util.kappa),

    }
    args.update(kwargs)
    net = Net(**args)
    return net
def cascade_model(options):
    """
    3D cascade model using Nolearn and Lasagne
    
    Inputs:
    - model_options:
    - weights_path: path to where weights should be saved

    Output:
    - nets = list of NeuralNets (CNN1, CNN2)
    """

    # model options
    channels = len(options['modalities'])
    train_split_perc = options['train_split']
    num_epochs = options['max_epochs']
    max_epochs_patience = options['patience']

    # save model to disk to re-use it. Create an experiment folder
    # organize experiment
    if not os.path.exists(
            os.path.join(options['weight_paths'], options['experiment'])):
        os.mkdir(os.path.join(options['weight_paths'], options['experiment']))
    if not os.path.exists(
            os.path.join(options['weight_paths'], options['experiment'],
                         'nets')):
        os.mkdir(
            os.path.join(options['weight_paths'], options['experiment'],
                         'nets'))

    # --------------------------------------------------
    # first model
    # --------------------------------------------------

    layer1 = InputLayer(name='in1',
                        shape=(None, channels) + options['patch_size'])
    layer1 = batch_norm(Conv3DLayer(layer1,
                                    name='conv1_1',
                                    num_filters=32,
                                    filter_size=3,
                                    pad='same'),
                        name='BN1')
    layer1 = Pool3DLayer(layer1,
                         name='avgpool_1',
                         mode='max',
                         pool_size=2,
                         stride=2)
    layer1 = batch_norm(Conv3DLayer(layer1,
                                    name='conv2_1',
                                    num_filters=64,
                                    filter_size=3,
                                    pad='same'),
                        name='BN2')
    layer1 = Pool3DLayer(layer1,
                         name='avgpoo2_1',
                         mode='max',
                         pool_size=2,
                         stride=2)
    layer1 = DropoutLayer(layer1, name='l2drop', p=0.5)
    layer1 = DenseLayer(layer1, name='d_1', num_units=256)
    layer1 = DenseLayer(layer1,
                        name='out',
                        num_units=2,
                        nonlinearity=nonlinearities.softmax)

    # save weights
    net_model = 'model_1'
    net_weights = os.path.join(options['weight_paths'], options['experiment'],
                               'nets', net_model + '.pkl')
    net_history = os.path.join(options['weight_paths'], options['experiment'],
                               'nets', net_model + '_history.pkl')

    net1 = NeuralNet(
        layers=layer1,
        objective_loss_function=objectives.categorical_crossentropy,
        batch_iterator_train=Rotate_batch_Iterator(batch_size=128),
        update=updates.adadelta,
        on_epoch_finished=[
            SaveWeights(net_weights, only_best=True, pickle=False),
            SaveTrainingHistory(net_history),
            EarlyStopping(patience=max_epochs_patience)
        ],
        verbose=options['net_verbose'],
        max_epochs=num_epochs,
        train_split=TrainSplit(eval_size=train_split_perc),
    )

    # --------------------------------------------------
    # second model
    # --------------------------------------------------

    layer2 = InputLayer(name='in2',
                        shape=(None, channels) + options['patch_size'])
    layer2 = batch_norm(Conv3DLayer(layer2,
                                    name='conv1_1',
                                    num_filters=32,
                                    filter_size=3,
                                    pad='same'),
                        name='BN1')
    layer2 = Pool3DLayer(layer2,
                         name='avgpool_1',
                         mode='max',
                         pool_size=2,
                         stride=2)
    layer2 = batch_norm(Conv3DLayer(layer2,
                                    name='conv2_1',
                                    num_filters=64,
                                    filter_size=3,
                                    pad='same'),
                        name='BN2')
    layer2 = Pool3DLayer(layer2,
                         name='avgpoo2_1',
                         mode='max',
                         pool_size=2,
                         stride=2)
    layer2 = DropoutLayer(layer2, name='l2drop', p=0.5)
    layer2 = DenseLayer(layer2, name='d_1', num_units=256)
    layer2 = DenseLayer(layer2,
                        name='out',
                        num_units=2,
                        nonlinearity=nonlinearities.softmax)

    # save weights
    net_model = 'model_2'
    net_weights2 = os.path.join(options['weight_paths'], options['experiment'],
                                'nets', net_model + '.pkl')
    net_history2 = os.path.join(options['weight_paths'], options['experiment'],
                                'nets', net_model + '_history.pkl')

    net2 = NeuralNet(
        layers=layer2,
        objective_loss_function=objectives.categorical_crossentropy,
        batch_iterator_train=Rotate_batch_Iterator(batch_size=128),
        update=updates.adadelta,
        on_epoch_finished=[
            SaveWeights(net_weights2, only_best=True, pickle=False),
            SaveTrainingHistory(net_history2),
            EarlyStopping(patience=max_epochs_patience)
        ],
        verbose=options['net_verbose'],
        max_epochs=num_epochs,
        train_split=TrainSplit(eval_size=train_split_perc),
    )

    return [net1, net2]
예제 #3
0
    batch_size=batch_size,
    flip_horizontal_p=0.5,
    flip_vertical_p=0.5,
    affine_p=1.,
    affine_scale_choices=np.linspace(0.5, 1.5, 11),
    # affine_shear_choices=np.linspace(-0.5, 0.5, 11),
    affine_translation_choices=np.arange(-64, 64, 1),
    # affine_rotation_choices=np.arange(0, 360, 1),
    adjust_gamma_p=0.5,
    adjust_gamma_chocies=np.linspace(0.5, 1.5, 11))
train_iterator = TrainIterator(**train_iterator_kwargs)

test_iterator_kwargs = dict(batch_size=batch_size, )
test_iterator = TestIterator(**test_iterator_kwargs)

save_weights = SaveWeights(model_fname, only_best=True, pickle=False)
save_training_history = SaveTrainingHistory(model_history_fname)
plot_training_history = PlotTrainingHistory(model_graph_fname)
early_stopping = EarlyStopping(patience=100)

conv_kwargs = dict(pad='same',
                   nonlinearity=nn.nonlinearities.very_leaky_rectify)

pool_kwargs = dict(pool_size=2, )

l = nn.layers.InputLayer(name='in', shape=(None, 3, image_size, image_size))

# 256
l = conv2dbn(l,
             name='l1c1',
             num_filters=32,
예제 #4
0
    'affine_scale_choices': np.linspace(0.85, 1.15, 9),
    'affine_translation_choices': np.arange(-12, 12, 1),
    'affine_rotation_choices': np.arange(-45, 50, 2.5),
}
train_iterator = TrainIterator(**train_iterator_kwargs)

test_iterator_kwargs = {
    'read_image_size': (image_size, image_size),
    'read_image_as_gray': False,
    'read_image_prefix_path': '',
    'batch_size': batch_size,
    'buffer_size': 2,
}
test_iterator = TestIterator(**test_iterator_kwargs)

save_weights = SaveWeights('model_weights.pkl', only_best=True, pickle=False)
save_training_history = SaveTrainingHistory('model_history.pkl')
plot_training_history = PlotTrainingHistory('training_history.png')


#early_stopping = EarlyStopping(metrics='valid_accuracy', patience=100, verbose=True, higher_is_better=True) BACKLOG:0 Implement early stopping
def save_to_json(nn, training_history):
    train_hist = []
    for row in training_history:
        new_row = {}
        new_row["epoch"] = row["epoch"]
        new_row["train_loss"] = row["train_loss"]
        new_row["valid_loss"] = row["valid_loss"]
        new_row["valid_accuracy"] = row["valid_accuracy"]
        new_row["max_epochs"] = nn.max_epochs
        new_row["estimate"] = (nn.max_epochs - row["epoch"]) * row["dur"]
예제 #5
0
    'affine_translation_choices': np.arange(-3, 4, 1),
    'affine_rotation_choices': np.arange(-45, 50, 5)
}
train_iterator = TrainIterator(**train_iterator_kwargs)

test_iterator_kwargs = {
    'buffer_size': 5,
    'batch_size': batch_size,
    'read_image_size': (image_size, image_size),
    'read_image_as_gray': False,
    'read_image_prefix_path': './examples/cifar10/data/train/',
}
test_iterator = TestIterator(**test_iterator_kwargs)

save_weights = SaveWeights('./examples/cifar10/model_weights.pkl',
                           only_best=True,
                           pickle=False)
save_training_history = SaveTrainingHistory(
    './examples/cifar10/model_history.pkl')
plot_training_history = PlotTrainingHistory(
    './examples/cifar10/training_history.png')

net = NeuralNet(
    layers=[
        (InputLayer, dict(name='in', shape=(None, 3, image_size, image_size))),
        (Conv2DDNNLayer,
         dict(name='l1c1', num_filters=16, filter_size=(3, 3), pad='same')),
        (Conv2DDNNLayer,
         dict(name='l1c2', num_filters=16, filter_size=(3, 3), pad='same')),
        (Conv2DDNNLayer,
         dict(name='l1c3', num_filters=32, filter_size=(3, 3), pad='same')),
def build_model(weights_path, options):
    """
    Build the CNN model. Create the Neural Net object and return it back. 
    Inputs: 
    - subject name: used to save the net weights accordingly.
    - options: several hyper-parameters used to configure the net.
    
    Output:
    - net: a NeuralNet object 
    """

    net_model_name = options['experiment']

    try:
        os.mkdir(os.path.join(weights_path, net_model_name))
    except:
        pass

    net_weights = os.path.join(weights_path, net_model_name,
                               net_model_name + '.pkl')
    net_history = os.path.join(weights_path, net_model_name,
                               net_model_name + '_history.pkl')

    # select hyper-parameters
    t_verbose = options['net_verbose']
    train_split_perc = options['train_split']
    num_epochs = options['max_epochs']
    max_epochs_patience = options['patience']
    early_stopping = EarlyStopping(patience=max_epochs_patience)
    save_weights = SaveWeights(net_weights, only_best=True, pickle=False)
    save_training_history = SaveTrainingHistory(net_history)

    # build the architecture
    ps = options['patch_size'][0]
    num_channels = 1
    fc_conv = 180
    fc_fc = 180
    dropout_conv = 0.5
    dropout_fc = 0.5

    # --------------------------------------------------
    # channel_1: axial
    # --------------------------------------------------

    axial_ch = InputLayer(name='in1', shape=(None, num_channels, ps, ps))
    axial_ch = prelu(batch_norm(
        Conv2DLayer(axial_ch,
                    name='axial_ch_conv1',
                    num_filters=20,
                    filter_size=3)),
                     name='axial_ch_prelu1')
    axial_ch = prelu(batch_norm(
        Conv2DLayer(axial_ch,
                    name='axial_ch_conv2',
                    num_filters=20,
                    filter_size=3)),
                     name='axial_ch_prelu2')
    axial_ch = MaxPool2DLayer(axial_ch, name='axial_max_pool_1', pool_size=2)
    axial_ch = prelu(batch_norm(
        Conv2DLayer(axial_ch,
                    name='axial_ch_conv3',
                    num_filters=40,
                    filter_size=3)),
                     name='axial_ch_prelu3')
    axial_ch = prelu(batch_norm(
        Conv2DLayer(axial_ch,
                    name='axial_ch_conv4',
                    num_filters=40,
                    filter_size=3)),
                     name='axial_ch_prelu4')
    axial_ch = MaxPool2DLayer(axial_ch, name='axial_max_pool_2', pool_size=2)
    axial_ch = prelu(batch_norm(
        Conv2DLayer(axial_ch,
                    name='axial_ch_conv5',
                    num_filters=60,
                    filter_size=3)),
                     name='axial_ch_prelu5')
    axial_ch = DropoutLayer(axial_ch, name='axial_l1drop', p=dropout_conv)
    axial_ch = DenseLayer(axial_ch, name='axial_d1', num_units=fc_conv)
    axial_ch = prelu(axial_ch, name='axial_prelu_d1')

    # --------------------------------------------------
    # channel_1: coronal
    # --------------------------------------------------

    coronal_ch = InputLayer(name='in2', shape=(None, num_channels, ps, ps))
    coronal_ch = prelu(batch_norm(
        Conv2DLayer(coronal_ch,
                    name='coronal_ch_conv1',
                    num_filters=20,
                    filter_size=3)),
                       name='coronal_ch_prelu1')
    coronal_ch = prelu(batch_norm(
        Conv2DLayer(coronal_ch,
                    name='coronal_ch_conv2',
                    num_filters=20,
                    filter_size=3)),
                       name='coronal_ch_prelu2')
    coronal_ch = MaxPool2DLayer(coronal_ch,
                                name='coronal_max_pool_1',
                                pool_size=2)
    coronal_ch = prelu(batch_norm(
        Conv2DLayer(coronal_ch,
                    name='coronal_ch_conv3',
                    num_filters=40,
                    filter_size=3)),
                       name='coronal_ch_prelu3')
    coronal_ch = prelu(batch_norm(
        Conv2DLayer(coronal_ch,
                    name='coronal_ch_conv4',
                    num_filters=40,
                    filter_size=3)),
                       name='coronal_ch_prelu4')
    coronal_ch = MaxPool2DLayer(coronal_ch,
                                name='coronal_max_pool_2',
                                pool_size=2)
    coronal_ch = prelu(batch_norm(
        Conv2DLayer(coronal_ch,
                    name='coronal_ch_conv5',
                    num_filters=60,
                    filter_size=3)),
                       name='coronal_ch_prelu5')
    coronal_ch = DropoutLayer(coronal_ch,
                              name='coronal_l1drop',
                              p=dropout_conv)
    coronal_ch = DenseLayer(coronal_ch, name='coronal_d1', num_units=fc_conv)
    coronal_ch = prelu(coronal_ch, name='coronal_prelu_d1')

    # --------------------------------------------------
    # channel_1: saggital
    # --------------------------------------------------

    saggital_ch = InputLayer(name='in3', shape=(None, num_channels, ps, ps))
    saggital_ch = prelu(batch_norm(
        Conv2DLayer(saggital_ch,
                    name='saggital_ch_conv1',
                    num_filters=20,
                    filter_size=3)),
                        name='saggital_ch_prelu1')
    saggital_ch = prelu(batch_norm(
        Conv2DLayer(saggital_ch,
                    name='saggital_ch_conv2',
                    num_filters=20,
                    filter_size=3)),
                        name='saggital_ch_prelu2')
    saggital_ch = MaxPool2DLayer(saggital_ch,
                                 name='saggital_max_pool_1',
                                 pool_size=2)
    saggital_ch = prelu(batch_norm(
        Conv2DLayer(saggital_ch,
                    name='saggital_ch_conv3',
                    num_filters=40,
                    filter_size=3)),
                        name='saggital_ch_prelu3')
    saggital_ch = prelu(batch_norm(
        Conv2DLayer(saggital_ch,
                    name='saggital_ch_conv4',
                    num_filters=40,
                    filter_size=3)),
                        name='saggital_ch_prelu4')
    saggital_ch = MaxPool2DLayer(saggital_ch,
                                 name='saggital_max_pool_2',
                                 pool_size=2)
    saggital_ch = prelu(batch_norm(
        Conv2DLayer(saggital_ch,
                    name='saggital_ch_conv5',
                    num_filters=60,
                    filter_size=3)),
                        name='saggital_ch_prelu5')
    saggital_ch = DropoutLayer(saggital_ch,
                               name='saggital_l1drop',
                               p=dropout_conv)
    saggital_ch = DenseLayer(saggital_ch,
                             name='saggital_d1',
                             num_units=fc_conv)
    saggital_ch = prelu(saggital_ch, name='saggital_prelu_d1')

    # FC layer 540
    layer = ConcatLayer(name='elem_channels',
                        incomings=[axial_ch, coronal_ch, saggital_ch])
    layer = DropoutLayer(layer, name='f1_drop', p=dropout_fc)
    layer = DenseLayer(layer, name='FC1', num_units=540)
    layer = prelu(layer, name='prelu_f1')

    # concatenate channels 540 + 15
    layer = DropoutLayer(layer, name='f2_drop', p=dropout_fc)
    atlas_layer = DropoutLayer(InputLayer(name='in4', shape=(None, 15)),
                               name='Dropout_atlas',
                               p=.2)
    atlas_layer = InputLayer(name='in4', shape=(None, 15))
    layer = ConcatLayer(name='elem_channels2', incomings=[layer, atlas_layer])

    # FC layer 270
    layer = DenseLayer(layer, name='fc_2', num_units=270)
    layer = prelu(layer, name='prelu_f2')

    # FC output 15 (softmax)
    net_layer = DenseLayer(layer,
                           name='out_layer',
                           num_units=15,
                           nonlinearity=softmax)

    net = NeuralNet(
        layers=net_layer,
        objective_loss_function=objectives.categorical_crossentropy,
        update=updates.adam,
        update_learning_rate=0.001,
        on_epoch_finished=[
            save_weights,
            save_training_history,
            early_stopping,
        ],
        verbose=t_verbose,
        max_epochs=num_epochs,
        train_split=TrainSplit(eval_size=train_split_perc),
    )

    if options['load_weights'] == 'True':
        try:
            print "    --> loading weights from ", net_weights
            net.load_params_from(net_weights)
        except:
            pass

    return net
예제 #7
0
def main():
    c = color_codes()
    patch_size = (15, 15, 15)
    dir_name = '/home/sergivalverde/w/CNN/images/CH16'
    patients = [
        f for f in sorted(os.listdir(dir_name))
        if os.path.isdir(os.path.join(dir_name, f))
    ]
    names = np.stack([
        name for name in
        [[
            os.path.join(dir_name, patient, 'FLAIR_preprocessed.nii.gz')
            for patient in patients
        ],
         [
             os.path.join(dir_name, patient, 'DP_preprocessed.nii.gz')
             for patient in patients
         ],
         [
             os.path.join(dir_name, patient, 'T2_preprocessed.nii.gz')
             for patient in patients
         ],
         [
             os.path.join(dir_name, patient, 'T1_preprocessed.nii.gz')
             for patient in patients
         ]] if name is not None
    ],
                     axis=1)
    seed = np.random.randint(np.iinfo(np.int32).max)
    ''' Here we create an initial net to find conflictive voxels '''
    print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['g'] +
          '<Running iteration ' + c['b'] + '1>' + c['nc'])
    net_name = '/home/sergivalverde/w/CNN/code/CNN1/miccai_challenge2016/deep-challenge2016.init.'
    net = NeuralNet(
        layers=[
            (InputLayer, dict(name='in', shape=(None, 4, 15, 15, 15))),
            (Conv3DDNNLayer,
             dict(name='conv1_1',
                  num_filters=32,
                  filter_size=(5, 5, 5),
                  pad='same')),
            (Pool3DDNNLayer,
             dict(name='avgpool_1',
                  pool_size=2,
                  stride=2,
                  mode='average_inc_pad')),
            (Conv3DDNNLayer,
             dict(name='conv2_1',
                  num_filters=64,
                  filter_size=(5, 5, 5),
                  pad='same')),
            (Pool3DDNNLayer,
             dict(name='avgpool_2',
                  pool_size=2,
                  stride=2,
                  mode='average_inc_pad')),
            (DropoutLayer, dict(name='l2drop', p=0.5)),
            (DenseLayer, dict(name='l1', num_units=256)),
            (DenseLayer,
             dict(name='out', num_units=2,
                  nonlinearity=nonlinearities.softmax)),
        ],
        objective_loss_function=objectives.categorical_crossentropy,
        update=updates.adam,
        update_learning_rate=0.0001,
        on_epoch_finished=[
            SaveWeights(net_name + 'model_weights.pkl',
                        only_best=True,
                        pickle=False),
            SaveTrainingHistory(net_name + 'model_history.pkl'),
            PlotTrainingHistory(net_name + 'training_history.png'),
            EarlyStopping(patience=10)
        ],
        verbose=10,
        max_epochs=50,
        train_split=TrainSplit(eval_size=0.25),
        custom_scores=[('dsc', lambda p, t: 2 * np.sum(p * t[:, 1]) / np.sum(
            (p + t[:, 1])))],
    )

    try:
        net.load_params_from(net_name + 'model_weights.pkl')
    except IOError:
        print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['g'] +
              'Loading the data for ' + c['b'] + 'iteration 1' + c['nc'])
        # Create the data
        (x, y, _) = load_patches(dir_name=dir_name,
                                 use_flair=True,
                                 use_pd=True,
                                 use_t2=True,
                                 use_t1=True,
                                 use_gado=False,
                                 flair_name='FLAIR_preprocessed.nii.gz',
                                 pd_name='DP_preprocessed.nii.gz',
                                 t2_name='T2_preprocessed.nii.gz',
                                 t1_name='T1_preprocessed.nii.gz',
                                 gado_name=None,
                                 mask_name='Consensus.nii.gz',
                                 size=patch_size)

        print('-- Permuting the data')
        np.random.seed(seed)
        x_train = np.random.permutation(
            np.concatenate(x).astype(dtype=np.float32))
        print('-- Permuting the labels')
        np.random.seed(seed)
        y_train = np.random.permutation(
            np.concatenate(y).astype(dtype=np.int32))
        y_train = y_train[:, y_train.shape[1] / 2 + 1,
                          y_train.shape[2] / 2 + 1, y_train.shape[3] / 2 + 1]
        print('-- Training vector shape = (' +
              ','.join([str(length) for length in x_train.shape]) + ')')
        print('-- Training labels shape = (' +
              ','.join([str(length) for length in y_train.shape]) + ')')

        print c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['g'] +\
            'Training (' + c['b'] + 'initial' + c['nc'] + c['g'] + ')' + c['nc']
        # We try to get the last weights to keep improving the net over and over
        net.fit(x_train, y_train)
    ''' Here we get the seeds '''
    print c['c'] + '[' + strftime(
        "%H:%M:%S") + '] ' + c['g'] + '<Looking for seeds>' + c['nc']
    for patient in names:
        output_name = os.path.join('/'.join(patient[0].rsplit('/')[:-1]),
                                   'test.iter1.nii.gz')
        try:
            load_nii(output_name)
            print c['c'] + '[' + strftime("%H:%M:%S") + '] ' \
                + c['g'] + '-- Patient ' + patient[0].rsplit('/')[-2] + ' already done' + c['nc']
        except IOError:
            print c['c'] + '[' + strftime("%H:%M:%S") + '] '\
                  + c['g'] + '-- Testing with patient ' + c['b'] + patient[0].rsplit('/')[-2] + c['nc']
            image_nii = load_nii(patient[0])
            image = np.zeros_like(image_nii.get_data())
            for batch, centers in load_patch_batch(patient, 100000,
                                                   patch_size):
                y_pred = net.predict_proba(batch)
                [x, y, z] = np.stack(centers, axis=1)
                image[x, y, z] = y_pred[:, 1]

            print c['g'] + '-- Saving image ' + c['b'] + output_name + c['nc']
            image_nii.get_data()[:] = image
            image_nii.to_filename(output_name)
    ''' Here we perform the last iteration '''
    print c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c[
        'g'] + '<Running iteration ' + c['b'] + '2>' + c['nc']
    net_name = '/home/sergivalverde/w/CNN/code/CNN1/miccai_challenge2016/deep-challenge2016.final.'
    net = NeuralNet(
        layers=[
            (InputLayer, dict(name='in', shape=(None, 4, 15, 15, 15))),
            (Conv3DDNNLayer,
             dict(name='conv1_1',
                  num_filters=32,
                  filter_size=(5, 5, 5),
                  pad='same')),
            (Pool3DDNNLayer,
             dict(name='avgpool_1',
                  pool_size=2,
                  stride=2,
                  mode='average_inc_pad')),
            (Conv3DDNNLayer,
             dict(name='conv2_1',
                  num_filters=64,
                  filter_size=(5, 5, 5),
                  pad='same')),
            (Pool3DDNNLayer,
             dict(name='avgpool_2',
                  pool_size=2,
                  stride=2,
                  mode='average_inc_pad')),
            (DropoutLayer, dict(name='l2drop', p=0.5)),
            (DenseLayer, dict(name='l1', num_units=256)),
            (DenseLayer,
             dict(name='out', num_units=2,
                  nonlinearity=nonlinearities.softmax)),
        ],
        objective_loss_function=objectives.categorical_crossentropy,
        update=updates.adam,
        update_learning_rate=0.0001,
        on_epoch_finished=[
            SaveWeights(net_name + 'model_weights.pkl',
                        only_best=True,
                        pickle=False),
            SaveTrainingHistory(net_name + 'model_history.pkl'),
            PlotTrainingHistory(net_name + 'training_history.png'),
        ],
        batch_iterator_train=BatchIterator(batch_size=4096),
        verbose=10,
        max_epochs=2000,
        train_split=TrainSplit(eval_size=0.25),
        custom_scores=[('dsc', lambda p, t: 2 * np.sum(p * t[:, 1]) / np.sum(
            (p + t[:, 1])))],
    )

    try:
        net.load_params_from(net_name + 'model_weights.pkl')
    except IOError:
        pass
    print c['c'] + '[' + strftime("%H:%M:%S") + '] '\
        + c['g'] + 'Loading the data for ' + c['b'] + 'iteration 2' + c['nc']
    (x, y,
     names) = load_patches(dir_name='/home/sergivalverde/w/CNN/images/CH16',
                           use_flair=True,
                           use_pd=True,
                           use_t2=True,
                           use_t1=True,
                           use_gado=False,
                           flair_name='FLAIR_preprocessed.nii.gz',
                           pd_name='DP_preprocessed.nii.gz',
                           t2_name='T2_preprocessed.nii.gz',
                           gado_name=None,
                           t1_name='T1_preprocessed.nii.gz',
                           mask_name='Consensus.nii.gz',
                           size=patch_size,
                           roi_name='test.iter1.nii.gz')

    print '-- Permuting the data'
    np.random.seed(seed)
    x_train = np.random.permutation(np.concatenate(x).astype(dtype=np.float32))
    print '-- Permuting the labels'
    np.random.seed(seed)
    y_train = np.random.permutation(np.concatenate(y).astype(dtype=np.int32))
    y_train = y_train[:, y_train.shape[1] / 2 + 1, y_train.shape[2] / 2 + 1,
                      y_train.shape[3] / 2 + 1]
    print '-- Training vector shape = (' + ','.join(
        [str(length) for length in x_train.shape]) + ')'
    print '-- Training labels shape = (' + ','.join(
        [str(length) for length in y_train.shape]) + ')'
    print c['c'] + '[' + strftime("%H:%M:%S") + '] '\
        + c['g'] + 'Training (' + c['b'] + 'final' + c['nc'] + c['g'] + ')' + c['nc']
    net.fit(x_train, y_train)