Esempio n. 1
0
    flip_vertical_p=0.5,
    affine_p=1.,
    affine_scale_choices=np.linspace(0.5, 1.5, 11),
    # affine_shear_choices=np.linspace(-0.5, 0.5, 11),
    affine_translation_choices=np.arange(-64, 64, 1),
    # affine_rotation_choices=np.arange(0, 360, 1),
    adjust_gamma_p=0.5,
    adjust_gamma_chocies=np.linspace(0.5, 1.5, 11))
train_iterator = TrainIterator(**train_iterator_kwargs)

test_iterator_kwargs = dict(batch_size=batch_size, )
test_iterator = TestIterator(**test_iterator_kwargs)

save_weights = SaveWeights(model_fname, only_best=True, pickle=False)
save_training_history = SaveTrainingHistory(model_history_fname)
plot_training_history = PlotTrainingHistory(model_graph_fname)
early_stopping = EarlyStopping(patience=100)

conv_kwargs = dict(pad='same',
                   nonlinearity=nn.nonlinearities.very_leaky_rectify)

pool_kwargs = dict(pool_size=2, )

l = nn.layers.InputLayer(name='in', shape=(None, 3, image_size, image_size))

# 256
l = conv2dbn(l,
             name='l1c1',
             num_filters=32,
             filter_size=(7, 7),
             stride=2,
Esempio n. 2
0
    'affine_rotation_choices': np.arange(-45, 50, 2.5),
}
train_iterator = TrainIterator(**train_iterator_kwargs)

test_iterator_kwargs = {
    'read_image_size': (image_size, image_size),
    'read_image_as_gray': False,
    'read_image_prefix_path': '',
    'batch_size': batch_size,
    'buffer_size': 2,
}
test_iterator = TestIterator(**test_iterator_kwargs)

save_weights = SaveWeights('model_weights.pkl', only_best=True, pickle=False)
save_training_history = SaveTrainingHistory('model_history.pkl')
plot_training_history = PlotTrainingHistory('training_history.png')


#early_stopping = EarlyStopping(metrics='valid_accuracy', patience=100, verbose=True, higher_is_better=True) BACKLOG:0 Implement early stopping
def save_to_json(nn, training_history):
    train_hist = []
    for row in training_history:
        new_row = {}
        new_row["epoch"] = row["epoch"]
        new_row["train_loss"] = row["train_loss"]
        new_row["valid_loss"] = row["valid_loss"]
        new_row["valid_accuracy"] = row["valid_accuracy"]
        new_row["max_epochs"] = nn.max_epochs
        new_row["estimate"] = (nn.max_epochs - row["epoch"]) * row["dur"]
        new_row["dur"] = row["dur"]
        new_row["t_next"] = int(row["dur"]) + time.time()
Esempio n. 3
0
    'affine_rotation_choices': np.arange(-45, 50, 5),
}
train_iterator = TrainIterator(**train_iterator_kwargs)

test_iterator_kwargs = {
    'batch_size': batch_size,
    'buffer_size': 5,
}
test_iterator = TestIterator(**test_iterator_kwargs)

save_weights = SaveWeights('./examples/mnist/model_weights.pkl',
                           only_best=True,
                           pickle=False)
save_training_history = SaveTrainingHistory(
    './examples/mnist/model_history.pkl')
plot_training_history = PlotTrainingHistory(
    './examples/mnist/training_history.png')
early_stopping = EarlyStopping(metrics='valid_accuracy',
                               patience=100,
                               verbose=True,
                               higher_is_better=True)

net = NeuralNet(layers=[
    (InputLayer, dict(name='in', shape=(None, 1, image_size, image_size))),
    (Conv2DDNNLayer,
     dict(name='l1c1', num_filters=32, filter_size=(3, 3), pad='same')),
    (Conv2DDNNLayer,
     dict(name='l1c2', num_filters=32, filter_size=(3, 3), pad='same')),
    (MaxPool2DDNNLayer, dict(name='l1p', pool_size=3, stride=2)),
    (Conv2DDNNLayer,
     dict(name='l2c1', num_filters=32, filter_size=(3, 3), pad='same')),
    (Conv2DDNNLayer,
Esempio n. 4
0
test_iterator_kwargs = {
    'buffer_size': 5,
    'batch_size': batch_size,
    'read_image_size': (image_size, image_size),
    'read_image_as_gray': False,
    'read_image_prefix_path': './examples/cifar10/data/train/',
}
test_iterator = TestIterator(**test_iterator_kwargs)

save_weights = SaveWeights('./examples/cifar10/model_weights.pkl',
                           only_best=True,
                           pickle=False)
save_training_history = SaveTrainingHistory(
    './examples/cifar10/model_history.pkl')
plot_training_history = PlotTrainingHistory(
    './examples/cifar10/training_history.png')

net = NeuralNet(
    layers=[
        (InputLayer, dict(name='in', shape=(None, 3, image_size, image_size))),
        (Conv2DDNNLayer,
         dict(name='l1c1', num_filters=16, filter_size=(3, 3), pad='same')),
        (Conv2DDNNLayer,
         dict(name='l1c2', num_filters=16, filter_size=(3, 3), pad='same')),
        (Conv2DDNNLayer,
         dict(name='l1c3', num_filters=32, filter_size=(3, 3), pad='same')),
        (Conv2DDNNLayer,
         dict(name='l1c4', num_filters=32, filter_size=(3, 3), pad='same')),
        (MaxPool2DDNNLayer, dict(name='l1p', pool_size=3, stride=2)),
        (Conv2DDNNLayer,
         dict(name='l2c1', num_filters=32, filter_size=(3, 3), pad='same')),
Esempio n. 5
0
def main():
    c = color_codes()
    patch_size = (15, 15, 15)
    dir_name = '/home/sergivalverde/w/CNN/images/CH16'
    patients = [
        f for f in sorted(os.listdir(dir_name))
        if os.path.isdir(os.path.join(dir_name, f))
    ]
    names = np.stack([
        name for name in
        [[
            os.path.join(dir_name, patient, 'FLAIR_preprocessed.nii.gz')
            for patient in patients
        ],
         [
             os.path.join(dir_name, patient, 'DP_preprocessed.nii.gz')
             for patient in patients
         ],
         [
             os.path.join(dir_name, patient, 'T2_preprocessed.nii.gz')
             for patient in patients
         ],
         [
             os.path.join(dir_name, patient, 'T1_preprocessed.nii.gz')
             for patient in patients
         ]] if name is not None
    ],
                     axis=1)
    seed = np.random.randint(np.iinfo(np.int32).max)
    ''' Here we create an initial net to find conflictive voxels '''
    print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['g'] +
          '<Running iteration ' + c['b'] + '1>' + c['nc'])
    net_name = '/home/sergivalverde/w/CNN/code/CNN1/miccai_challenge2016/deep-challenge2016.init.'
    net = NeuralNet(
        layers=[
            (InputLayer, dict(name='in', shape=(None, 4, 15, 15, 15))),
            (Conv3DDNNLayer,
             dict(name='conv1_1',
                  num_filters=32,
                  filter_size=(5, 5, 5),
                  pad='same')),
            (Pool3DDNNLayer,
             dict(name='avgpool_1',
                  pool_size=2,
                  stride=2,
                  mode='average_inc_pad')),
            (Conv3DDNNLayer,
             dict(name='conv2_1',
                  num_filters=64,
                  filter_size=(5, 5, 5),
                  pad='same')),
            (Pool3DDNNLayer,
             dict(name='avgpool_2',
                  pool_size=2,
                  stride=2,
                  mode='average_inc_pad')),
            (DropoutLayer, dict(name='l2drop', p=0.5)),
            (DenseLayer, dict(name='l1', num_units=256)),
            (DenseLayer,
             dict(name='out', num_units=2,
                  nonlinearity=nonlinearities.softmax)),
        ],
        objective_loss_function=objectives.categorical_crossentropy,
        update=updates.adam,
        update_learning_rate=0.0001,
        on_epoch_finished=[
            SaveWeights(net_name + 'model_weights.pkl',
                        only_best=True,
                        pickle=False),
            SaveTrainingHistory(net_name + 'model_history.pkl'),
            PlotTrainingHistory(net_name + 'training_history.png'),
            EarlyStopping(patience=10)
        ],
        verbose=10,
        max_epochs=50,
        train_split=TrainSplit(eval_size=0.25),
        custom_scores=[('dsc', lambda p, t: 2 * np.sum(p * t[:, 1]) / np.sum(
            (p + t[:, 1])))],
    )

    try:
        net.load_params_from(net_name + 'model_weights.pkl')
    except IOError:
        print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['g'] +
              'Loading the data for ' + c['b'] + 'iteration 1' + c['nc'])
        # Create the data
        (x, y, _) = load_patches(dir_name=dir_name,
                                 use_flair=True,
                                 use_pd=True,
                                 use_t2=True,
                                 use_t1=True,
                                 use_gado=False,
                                 flair_name='FLAIR_preprocessed.nii.gz',
                                 pd_name='DP_preprocessed.nii.gz',
                                 t2_name='T2_preprocessed.nii.gz',
                                 t1_name='T1_preprocessed.nii.gz',
                                 gado_name=None,
                                 mask_name='Consensus.nii.gz',
                                 size=patch_size)

        print('-- Permuting the data')
        np.random.seed(seed)
        x_train = np.random.permutation(
            np.concatenate(x).astype(dtype=np.float32))
        print('-- Permuting the labels')
        np.random.seed(seed)
        y_train = np.random.permutation(
            np.concatenate(y).astype(dtype=np.int32))
        y_train = y_train[:, y_train.shape[1] / 2 + 1,
                          y_train.shape[2] / 2 + 1, y_train.shape[3] / 2 + 1]
        print('-- Training vector shape = (' +
              ','.join([str(length) for length in x_train.shape]) + ')')
        print('-- Training labels shape = (' +
              ','.join([str(length) for length in y_train.shape]) + ')')

        print c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['g'] +\
            'Training (' + c['b'] + 'initial' + c['nc'] + c['g'] + ')' + c['nc']
        # We try to get the last weights to keep improving the net over and over
        net.fit(x_train, y_train)
    ''' Here we get the seeds '''
    print c['c'] + '[' + strftime(
        "%H:%M:%S") + '] ' + c['g'] + '<Looking for seeds>' + c['nc']
    for patient in names:
        output_name = os.path.join('/'.join(patient[0].rsplit('/')[:-1]),
                                   'test.iter1.nii.gz')
        try:
            load_nii(output_name)
            print c['c'] + '[' + strftime("%H:%M:%S") + '] ' \
                + c['g'] + '-- Patient ' + patient[0].rsplit('/')[-2] + ' already done' + c['nc']
        except IOError:
            print c['c'] + '[' + strftime("%H:%M:%S") + '] '\
                  + c['g'] + '-- Testing with patient ' + c['b'] + patient[0].rsplit('/')[-2] + c['nc']
            image_nii = load_nii(patient[0])
            image = np.zeros_like(image_nii.get_data())
            for batch, centers in load_patch_batch(patient, 100000,
                                                   patch_size):
                y_pred = net.predict_proba(batch)
                [x, y, z] = np.stack(centers, axis=1)
                image[x, y, z] = y_pred[:, 1]

            print c['g'] + '-- Saving image ' + c['b'] + output_name + c['nc']
            image_nii.get_data()[:] = image
            image_nii.to_filename(output_name)
    ''' Here we perform the last iteration '''
    print c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c[
        'g'] + '<Running iteration ' + c['b'] + '2>' + c['nc']
    net_name = '/home/sergivalverde/w/CNN/code/CNN1/miccai_challenge2016/deep-challenge2016.final.'
    net = NeuralNet(
        layers=[
            (InputLayer, dict(name='in', shape=(None, 4, 15, 15, 15))),
            (Conv3DDNNLayer,
             dict(name='conv1_1',
                  num_filters=32,
                  filter_size=(5, 5, 5),
                  pad='same')),
            (Pool3DDNNLayer,
             dict(name='avgpool_1',
                  pool_size=2,
                  stride=2,
                  mode='average_inc_pad')),
            (Conv3DDNNLayer,
             dict(name='conv2_1',
                  num_filters=64,
                  filter_size=(5, 5, 5),
                  pad='same')),
            (Pool3DDNNLayer,
             dict(name='avgpool_2',
                  pool_size=2,
                  stride=2,
                  mode='average_inc_pad')),
            (DropoutLayer, dict(name='l2drop', p=0.5)),
            (DenseLayer, dict(name='l1', num_units=256)),
            (DenseLayer,
             dict(name='out', num_units=2,
                  nonlinearity=nonlinearities.softmax)),
        ],
        objective_loss_function=objectives.categorical_crossentropy,
        update=updates.adam,
        update_learning_rate=0.0001,
        on_epoch_finished=[
            SaveWeights(net_name + 'model_weights.pkl',
                        only_best=True,
                        pickle=False),
            SaveTrainingHistory(net_name + 'model_history.pkl'),
            PlotTrainingHistory(net_name + 'training_history.png'),
        ],
        batch_iterator_train=BatchIterator(batch_size=4096),
        verbose=10,
        max_epochs=2000,
        train_split=TrainSplit(eval_size=0.25),
        custom_scores=[('dsc', lambda p, t: 2 * np.sum(p * t[:, 1]) / np.sum(
            (p + t[:, 1])))],
    )

    try:
        net.load_params_from(net_name + 'model_weights.pkl')
    except IOError:
        pass
    print c['c'] + '[' + strftime("%H:%M:%S") + '] '\
        + c['g'] + 'Loading the data for ' + c['b'] + 'iteration 2' + c['nc']
    (x, y,
     names) = load_patches(dir_name='/home/sergivalverde/w/CNN/images/CH16',
                           use_flair=True,
                           use_pd=True,
                           use_t2=True,
                           use_t1=True,
                           use_gado=False,
                           flair_name='FLAIR_preprocessed.nii.gz',
                           pd_name='DP_preprocessed.nii.gz',
                           t2_name='T2_preprocessed.nii.gz',
                           gado_name=None,
                           t1_name='T1_preprocessed.nii.gz',
                           mask_name='Consensus.nii.gz',
                           size=patch_size,
                           roi_name='test.iter1.nii.gz')

    print '-- Permuting the data'
    np.random.seed(seed)
    x_train = np.random.permutation(np.concatenate(x).astype(dtype=np.float32))
    print '-- Permuting the labels'
    np.random.seed(seed)
    y_train = np.random.permutation(np.concatenate(y).astype(dtype=np.int32))
    y_train = y_train[:, y_train.shape[1] / 2 + 1, y_train.shape[2] / 2 + 1,
                      y_train.shape[3] / 2 + 1]
    print '-- Training vector shape = (' + ','.join(
        [str(length) for length in x_train.shape]) + ')'
    print '-- Training labels shape = (' + ','.join(
        [str(length) for length in y_train.shape]) + ')'
    print c['c'] + '[' + strftime("%H:%M:%S") + '] '\
        + c['g'] + 'Training (' + c['b'] + 'final' + c['nc'] + c['g'] + ')' + c['nc']
    net.fit(x_train, y_train)