示例#1
0
def load_patch_vectors(name, mask_name, dir_name, size, rois=None, random_state=42):
    # Get the names of the images and load them
    patients = [f for f in sorted(os.listdir(dir_name)) if os.path.isdir(os.path.join(dir_name, f))]
    image_names = [os.path.join(dir_name, patient, name) for patient in patients]
    # Create the masks
    brain_masks = rois if rois else load_masks(image_names)
    mask_names = [os.path.join(dir_name, patient, mask_name) for patient in patients]
    lesion_masks = load_masks(mask_names)
    nolesion_masks = [np.logical_and(np.logical_not(lesion), brain) for lesion, brain in
                      izip(lesion_masks, brain_masks)]

    # Get all the patches for each image
    # Get all the centers for each image
    positive_centers = [get_mask_voxels(mask) for mask in lesion_masks]
    negative_centers = [get_mask_voxels(mask) for mask in nolesion_masks]
    positive_voxels = [len(positives) for positives in positive_centers]
    nolesion_small = subsample(negative_centers, positive_voxels, random_state)

    # Get all the patches for each image
    images = norm_image_generator(image_names)
    positive_patches = get_list_of_patches(images, positive_centers, size)
    images = norm_image_generator(image_names)
    negative_patches = get_list_of_patches(images, nolesion_small, size)

    # Prepare the mask patches for training
    positive_mask_patches = get_list_of_patches(lesion_masks, positive_centers, size)
    negative_mask_patches = get_list_of_patches(nolesion_masks, nolesion_small, size)

    # Return the patch vectors
    data = [np.concatenate([p1, p2]) for p1, p2 in izip(positive_patches, negative_patches)]
    masks = [np.concatenate([p1, p2]) for p1, p2 in izip(positive_mask_patches, negative_mask_patches)]

    return data, masks, image_names
示例#2
0
def get_centers_from_masks(positive_masks, negative_masks, balanced=True, random_state=42):
    positive_centers = [get_mask_voxels(mask) for mask in positive_masks]
    negative_centers = [get_mask_voxels(mask) for mask in negative_masks]
    if balanced:
        positive_voxels = [len(positives) for positives in positive_centers]
        negative_centers = list(subsample(negative_centers, positive_voxels, random_state))

    return positive_centers, negative_centers
示例#3
0
def get_cnn_centers(names, labels_names, balanced=True, neigh_width=15):
    rois = list(load_masks(names))
    rois_p = list(load_masks(labels_names))
    rois_p_neigh = [log_and(log_and(imdilate(roi_p, iterations=neigh_width), log_not(roi_p)), roi)
                    for roi, roi_p in izip(rois, rois_p)]
    rois_n_global = [log_and(roi, log_not(log_or(roi_pn, roi_p)))
                     for roi, roi_pn, roi_p in izip(rois, rois_p_neigh, rois_p)]
    rois = list()
    for roi_pn, roi_ng, roi_p in izip(rois_p_neigh, rois_n_global, rois_p):
        # The goal of this for is to randomly select the same number of nonlesion and lesion samples for each image.
        # We also want to make sure that we select the same number of boundary negatives and general negatives to
        # try to account for the variability in the brain.
        n_positive = np.count_nonzero(roi_p)
        if balanced:
            roi_pn[roi_pn] = np.random.permutation(xrange(np.count_nonzero(roi_pn))) < n_positive / 2
        roi_ng[roi_ng] = np.random.permutation(xrange(np.count_nonzero(roi_ng))) < n_positive / 2
        rois.append(log_or(log_or(roi_ng, roi_pn), roi_p))

    # In order to be able to permute the centers to randomly select them, or just shuffle them for training, we need
    # to keep the image reference with the center. That's why we are doing the next following lines of code.
    centers_list = [get_mask_voxels(roi) for roi in rois]
    idx_lesion_centers = np.concatenate([np.array([(i, c) for c in centers], dtype=object)
                                         for i, centers in enumerate(centers_list)])

    return idx_lesion_centers
示例#4
0
def test_net(net, p, outputname):

    c = color_codes()
    options = parse_inputs()
    patch_width = options['patch_width']
    patch_size = (patch_width, patch_width, patch_width)
    batch_size = options['test_size']
    p_name = p[0].rsplit('/')[-2]
    patient_path = '/'.join(p[0].rsplit('/')[:-1])
    outputname_path = os.path.join(patient_path, outputname + '.nii.gz')
    pr_outputname_path = os.path.join(patient_path, outputname + '.pr.nii.gz')
    try:
        image = load_nii(outputname_path).get_data()
    except IOError:
        print('%s[%s]    %sTesting the network%s' % (c['c'], strftime("%H:%M:%S"), c['g'], c['nc']))
        nii = load_nii(p[0])
        roi = nii.get_data().astype(dtype=np.bool)
        centers = get_mask_voxels(roi)
        test_samples = np.count_nonzero(roi)
        image = np.zeros_like(roi).astype(dtype=np.uint8)
        pr = np.zeros_like(roi).astype(dtype=np.float32)
        print('%s[%s]    %s<Creating the probability map %s%s%s%s - %s%s%s%s (%d samples)>%s' % (
            c['c'], strftime("%H:%M:%S"),
            c['g'], c['b'], p_name, c['nc'],
            c['g'], c['b'], outputname, c['nc'],
            c['g'], test_samples, c['nc']
        ))

        n_centers = len(centers)
        image_list = [load_norm_list(p)]

        for i in range(0, n_centers, batch_size):
            print(
                '%f%% tested (step %d/%d)' % (100.0 * i / n_centers, (i / batch_size) + 1, -(-n_centers/batch_size)),
                end='\r'
            )
            sys.stdout.flush()
            centers_i = [centers[i:i + batch_size]]
            x = get_patches_list(image_list, centers_i, patch_size, True)
            x = np.concatenate(x).astype(dtype=np.float32)
            y_pr_pred = net.predict(x, batch_size=options['batch_size'])

            [x, y, z] = np.stack(centers_i[0], axis=1)

            # We store the results
            image[x, y, z] = np.argmax(y_pr_pred, axis=1).astype(dtype=np.int8)
            pr[x, y, z] = y_pr_pred[:, 1].astype(dtype=np.float32)

        print(' '.join([''] * 50), end='\r')
        sys.stdout.flush()

        # Post-processing (Basically keep the biggest connected region)
        # image = get_biggest_region(image)
        print('%s                   -- Saving image %s%s%s' % (c['g'], c['b'], outputname_path, c['nc']))

        nii.get_data()[:] = image
        nii.to_filename(outputname_path)
        nii.get_data()[:] = pr
        nii.to_filename(pr_outputname_path)
    return image
示例#5
0
def load_patch_batch_percent(
        image_names,
        batch_size,
        size,
        defo_size=None,
        d_names=None,
        mask=None,
        datatype=np.float32
):
    images = [load_nii(name).get_data() for name in image_names]
    defos = [load_nii(name).get_data() for name in d_names] if d_names is not None else []
    images_norm = [(im - im[np.nonzero(im)].mean()) / im[np.nonzero(im)].std() for im in images]
    defos_norm = [im / np.linalg.norm(im, axis=4).std() for im in defos]
    mask = images[0].astype(np.bool) if mask is None else mask.astype(np.bool)
    lesion_centers = get_mask_voxels(mask)
    n_centers = len(lesion_centers)
    for i in range(0, n_centers, batch_size):
        centers = lesion_centers[i:i + batch_size]
        x = get_image_patches(images_norm, centers, size).astype(dtype=datatype)
        d = get_defo_patches(defos_norm, centers, size=defo_size) if defos else []
        patches = (x, d) if defos else x
        yield patches, centers, (100.0 * min((i + batch_size),  n_centers)) / n_centers
示例#6
0
def main():
    options = parse_inputs()
    c = color_codes()

    # Prepare the net architecture parameters
    sequential = options['sequential']
    dfactor = options['dfactor']
    # Prepare the net hyperparameters
    num_classes = 5
    epochs = options['epochs']
    padding = options['padding']
    patch_width = options['patch_width']
    patch_size = (patch_width, patch_width, patch_width)
    batch_size = options['batch_size']
    dense_size = options['dense_size']
    conv_blocks = options['conv_blocks']
    n_filters = options['n_filters']
    filters_list = n_filters if len(n_filters) > 1 else n_filters * conv_blocks
    conv_width = options['conv_width']
    kernel_size_list = conv_width if isinstance(
        conv_width, list) else [conv_width] * conv_blocks
    balanced = options['balanced']
    # Data loading parameters
    preload = options['preload']
    queue = options['queue']

    # Prepare the sufix that will be added to the results for the net and images
    path = options['dir_name']
    filters_s = 'n'.join(['%d' % nf for nf in filters_list])
    conv_s = 'c'.join(['%d' % cs for cs in kernel_size_list])
    s_s = '.s' if sequential else '.f'
    ub_s = '.ub' if not balanced else ''
    params_s = (ub_s, dfactor, s_s, patch_width, conv_s, filters_s, dense_size,
                epochs, padding)
    sufix = '%s.D%d%s.p%d.c%s.n%s.d%d.e%d.pad_%s.' % params_s
    n_channels = np.count_nonzero([
        options['use_flair'], options['use_t2'], options['use_t1'],
        options['use_t1ce']
    ])

    print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' +
          'Starting cross-validation' + c['nc'])
    # N-fold cross validation main loop (we'll do 2 training iterations with testing for each patient)
    data_names, label_names = get_names_from_path(options)
    folds = options['folds']
    fold_generator = izip(
        nfold_cross_validation(data_names, label_names, n=folds,
                               val_data=0.25), xrange(folds))
    dsc_results = list()
    for (train_data, train_labels, val_data, val_labels, test_data,
         test_labels), i in fold_generator:
        print(
            c['c'] + '[' + strftime("%H:%M:%S") + ']  ' + c['nc'] +
            'Fold %d/%d: ' % (i + 1, folds) + c['g'] +
            'Number of training/validation/testing images (%d=%d/%d=%d/%d)' %
            (len(train_data), len(train_labels), len(val_data),
             len(val_labels), len(test_data)) + c['nc'])
        # Prepare the data relevant to the leave-one-out (subtract the patient from the dataset and set the path)
        # Also, prepare the network
        net_name = os.path.join(
            path, 'baseline-brats2017.fold%d' % i + sufix + 'mdl')

        # First we check that we did not train for that patient, in order to save time
        try:
            # net_name_before =  os.path.join(path,'baseline-brats2017.fold0.D500.f.p13.c3c3c3c3c3.n32n32n32n32n32.d256.e1.pad_valid.mdl')
            net = keras.models.load_model(net_name)
        except IOError:
            print '==============================================================='
            # NET definition using Keras
            train_centers = get_cnn_centers(train_data[:, 0],
                                            train_labels,
                                            balanced=balanced)
            val_centers = get_cnn_centers(val_data[:, 0],
                                          val_labels,
                                          balanced=balanced)
            train_samples = len(train_centers) / dfactor
            val_samples = len(val_centers) / dfactor
            print(c['c'] + '[' + strftime("%H:%M:%S") + ']    ' + c['g'] +
                  'Creating and compiling the model ' + c['b'] +
                  '(%d samples)' % train_samples + c['nc'])
            train_steps_per_epoch = -(-train_samples / batch_size)
            val_steps_per_epoch = -(-val_samples / batch_size)
            input_shape = (n_channels, ) + patch_size

            # This architecture is based on the functional Keras API to introduce 3 output paths:
            # - Whole tumor segmentation
            # - Core segmentation (including whole tumor)
            # - Whole segmentation (tumor, core and enhancing parts)
            # The idea is to let the network work on the three parts to improve the multiclass segmentation.
            # merged_inputs = Input(shape=(4,) + patch_size, name='merged_inputs')
            # flair = merged_inputs

            model = Sequential()
            model.add(
                Conv3D(64, (3, 3, 3),
                       strides=1,
                       padding='same',
                       activation='relu',
                       data_format='channels_first',
                       input_shape=(4, options['patch_width'],
                                    options['patch_width'],
                                    options['patch_width'])))
            model.add(
                Conv3D(64, (3, 3, 3),
                       strides=1,
                       padding='same',
                       activation='relu',
                       data_format='channels_first'))
            model.add(
                MaxPooling3D(pool_size=(3, 3, 3),
                             strides=2,
                             data_format='channels_first'))

            model.add(
                Conv3D(128, (3, 3, 3),
                       strides=1,
                       padding='same',
                       activation='relu',
                       data_format='channels_first'))
            model.add(
                Conv3D(128, (3, 3, 3),
                       strides=1,
                       padding='same',
                       activation='relu',
                       data_format='channels_first'))
            model.add(
                MaxPooling3D(pool_size=(3, 3, 3),
                             strides=2,
                             data_format='channels_first'))

            model.add(Flatten())

            model.add(Dense(256, activation='relu'))

            model.add(Dropout(0.5))

            model.add(Dense(num_classes, activation='softmax'))

            net = model

            # net_name_before =  os.path.join(path,'baseline-brats2017.fold0.D500.f.p13.c3c3c3c3c3.n32n32n32n32n32.d256.e1.pad_valid.mdl')
            # net = keras.models.load_model(net_name_before)
            net.compile(optimizer='sgd',
                        loss='categorical_crossentropy',
                        metrics=['accuracy'])

            print(c['c'] + '[' + strftime("%H:%M:%S") + ']    ' + c['g'] +
                  'Training the model with a generator for ' + c['b'] +
                  '(%d parameters)' % net.count_params() + c['nc'])
            print(net.summary())

            net.fit_generator(
                generator=load_patch_batch_train(
                    image_names=train_data,
                    label_names=train_labels,
                    centers=train_centers,
                    batch_size=batch_size,
                    size=patch_size,
                    # fc_shape = patch_size,
                    nlabels=num_classes,
                    dfactor=dfactor,
                    preload=preload,
                    split=not sequential,
                    datatype=np.float32),
                validation_data=load_patch_batch_train(
                    image_names=val_data,
                    label_names=val_labels,
                    centers=val_centers,
                    batch_size=batch_size,
                    size=patch_size,
                    # fc_shape = patch_size,
                    nlabels=num_classes,
                    dfactor=dfactor,
                    preload=preload,
                    split=not sequential,
                    datatype=np.float32),
                # workers=queue,
                steps_per_epoch=train_steps_per_epoch,
                validation_steps=val_steps_per_epoch,
                max_q_size=queue,
                epochs=epochs)
            net.save(net_name)

        # Then we test the net.
        for p, gt_name in zip(test_data, test_labels):
            p_name = p[0].rsplit('/')[-2]
            patient_path = '/'.join(p[0].rsplit('/')[:-1])
            outputname = os.path.join(patient_path,
                                      'deep-brats17' + sufix + 'test.nii.gz')
            gt_nii = load_nii(gt_name)
            gt = np.copy(gt_nii.get_data()).astype(dtype=np.uint8)
            try:
                load_nii(outputname)
            except IOError:
                roi_nii = load_nii(p[0])
                roi = roi_nii.get_data().astype(dtype=np.bool)
                centers = get_mask_voxels(roi)
                test_samples = np.count_nonzero(roi)
                image = np.zeros_like(roi).astype(dtype=np.uint8)
                print(c['c'] + '[' + strftime("%H:%M:%S") + ']    ' + c['g'] +
                      '<Creating the probability map ' + c['b'] + p_name +
                      c['nc'] + c['g'] + ' (%d samples)>' % test_samples +
                      c['nc'])
                test_steps_per_epoch = -(-test_samples / batch_size)
                y_pr_pred = net.predict_generator(
                    generator=load_patch_batch_generator_test(
                        image_names=p,
                        centers=centers,
                        batch_size=batch_size,
                        size=patch_size,
                        preload=preload,
                    ),
                    steps=test_steps_per_epoch,
                    max_q_size=queue)
                [x, y, z] = np.stack(centers, axis=1)

                if not sequential:
                    tumor = np.argmax(y_pr_pred[0], axis=1)
                    y_pr_pred = y_pr_pred[-1]
                    roi = np.zeros_like(roi).astype(dtype=np.uint8)
                    roi[x, y, z] = tumor
                    roi_nii.get_data()[:] = roi
                    roiname = os.path.join(
                        patient_path,
                        'deep-brats17' + sufix + 'test.roi.nii.gz')
                    roi_nii.to_filename(roiname)

                y_pred = np.argmax(y_pr_pred, axis=1)

                image[x, y, z] = y_pred
                # Post-processing (Basically keep the biggest connected region)
                image = get_biggest_region(image)
                labels = np.unique(gt.flatten())
                results = (p_name, ) + tuple(
                    [dsc_seg(gt == l, image == l) for l in labels[1:]])
                text = 'Subject %s DSC: ' + '/'.join(
                    ['%f' for _ in labels[1:]])
                print(text % results)
                dsc_results.append(results)

                print(c['g'] + '                   -- Saving image ' + c['b'] +
                      outputname + c['nc'])
                roi_nii.get_data()[:] = image
                roi_nii.to_filename(outputname)
示例#7
0
def centers_from_data(names):
    centers_list = [get_mask_voxels(np.squeeze(load_nii(p[0]).get_data().astype(dtype=np.bool)))
                    for p in names]
    centers = np.concatenate([np.array([(i, c) for c in centers], dtype=object)
                              for i, centers in enumerate(centers_list)])
    return centers
def get_mask_blocks(mask, dilation=2):
    return get_mask_voxels(imdilate(mask, iterations=dilation))
def main():
    options = parse_inputs()
    c = color_codes()

    # Prepare the net architecture parameters
    sequential = options['sequential']
    dfactor = options['dfactor']
    # Prepare the net hyperparameters
    num_classes = 5
    epochs = options['epochs']
    padding = options['padding']
    patch_width = options['patch_width']
    patch_size = (patch_width, patch_width, patch_width)
    batch_size = options['batch_size']
    dense_size = options['dense_size']
    conv_blocks = options['conv_blocks']
    n_filters = options['n_filters']
    filters_list = n_filters if len(n_filters) > 1 else n_filters * conv_blocks
    conv_width = options['conv_width']
    kernel_size_list = conv_width if isinstance(
        conv_width, list) else [conv_width] * conv_blocks
    balanced = options['balanced']
    recurrent = options['recurrent']
    # Data loading parameters
    preload = options['preload']
    queue = options['queue']

    # Prepare the sufix that will be added to the results for the net and images
    path = options['dir_name']
    filters_s = 'n'.join(['%d' % nf for nf in filters_list])
    conv_s = 'c'.join(['%d' % cs for cs in kernel_size_list])
    s_s = '.s' if sequential else '.f'
    ub_s = '.ub' if not balanced else ''
    params_s = (ub_s, dfactor, s_s, patch_width, conv_s, filters_s, dense_size,
                epochs, padding)
    sufix = '%s.D%d%s.p%d.c%s.n%s.d%d.e%d.pad_%s.' % params_s
    n_channels = np.count_nonzero([
        options['use_flair'], options['use_t2'], options['use_t1'],
        options['use_t1ce']
    ])

    print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' +
          'Starting cross-validation' + c['nc'])
    # N-fold cross validation main loop (we'll do 2 training iterations with testing for each patient)
    data_names, label_names = get_names_from_path(options)
    folds = options['folds']
    fold_generator = izip(
        nfold_cross_validation(data_names, label_names, n=folds,
                               val_data=0.25), xrange(folds))
    dsc_results = list()
    for (train_data, train_labels, val_data, val_labels, test_data,
         test_labels), i in fold_generator:
        print(c['c'] + '[' + strftime("%H:%M:%S") + ']  ' + c['nc'] +
              'Fold %d/%d: ' % (i + 1, folds) + c['g'] +
              'Number of training/validation/testing images (%d=%d/%d=%d/%d)' %
              (len(train_data), len(train_labels), len(val_data),
               len(val_labels), len(test_data)) + c['nc'])
        # Prepare the data relevant to the leave-one-out (subtract the patient from the dataset and set the path)
        # Also, prepare the network
        net_name = os.path.join(
            path, 'baseline-brats2017.fold%d' % i + sufix + 'mdl')

        # First we check that we did not train for that patient, in order to save time
        try:
            net = keras.models.load_model(net_name)
        except IOError:
            # NET definition using Keras
            train_centers = get_cnn_centers(train_data[:, 0],
                                            train_labels,
                                            balanced=balanced)
            val_centers = get_cnn_centers(val_data[:, 0],
                                          val_labels,
                                          balanced=balanced)
            train_samples = len(train_centers) / dfactor
            val_samples = len(val_centers) / dfactor
            print(c['c'] + '[' + strftime("%H:%M:%S") + ']    ' + c['g'] +
                  'Creating and compiling the model ' + c['b'] +
                  '(%d samples)' % train_samples + c['nc'])
            train_steps_per_epoch = -(-train_samples / batch_size)
            val_steps_per_epoch = -(-val_samples / batch_size)
            input_shape = (n_channels, ) + patch_size
            if sequential:
                # Sequential model that merges all 4 images. This architecture is just a set of convolutional blocks
                # that end in a dense layer. This is supposed to be an original baseline.
                net = Sequential()
                net.add(
                    Conv3D(filters_list[0],
                           kernel_size=kernel_size_list[0],
                           input_shape=input_shape,
                           activation='relu',
                           data_format='channels_first'))
                for filters, kernel_size in zip(filters_list[1:],
                                                kernel_size_list[1:]):
                    net.add(Dropout(0.5))
                    net.add(
                        Conv3D(filters,
                               kernel_size=kernel_size,
                               activation='relu',
                               data_format='channels_first'))
                net.add(Dropout(0.5))
                net.add(Flatten())
                net.add(Dense(dense_size, activation='relu'))
                net.add(Dropout(0.5))
                net.add(Dense(num_classes, activation='softmax'))
            else:
                # This architecture is based on the functional Keras API to introduce 3 output paths:
                # - Whole tumor segmentation
                # - Core segmentation (including whole tumor)
                # - Whole segmentation (tumor, core and enhancing parts)
                # The idea is to let the network work on the three parts to improve the multiclass segmentation.
                merged_inputs = Input(shape=(4, ) + patch_size,
                                      name='merged_inputs')
                flair = Reshape((1, ) + patch_size)(Lambda(
                    lambda l: l[:, 0, :, :, :],
                    output_shape=(1, ) + patch_size)(merged_inputs), )
                t2 = Reshape((1, ) + patch_size)(Lambda(
                    lambda l: l[:, 1, :, :, :],
                    output_shape=(1, ) + patch_size)(merged_inputs))
                t1 = Lambda(lambda l: l[:, 2:, :, :, :],
                            output_shape=(2, ) + patch_size)(merged_inputs)
                for filters, kernel_size in zip(filters_list,
                                                kernel_size_list):
                    flair = Conv3D(filters,
                                   kernel_size=kernel_size,
                                   activation='relu',
                                   data_format='channels_first')(flair)
                    t2 = Conv3D(filters,
                                kernel_size=kernel_size,
                                activation='relu',
                                data_format='channels_first')(t2)
                    t1 = Conv3D(filters,
                                kernel_size=kernel_size,
                                activation='relu',
                                data_format='channels_first')(t1)
                    flair = Dropout(0.5)(flair)
                    t2 = Dropout(0.5)(t2)
                    t1 = Dropout(0.5)(t1)

                # We only apply the RCNN to the multioutput approach (we keep the simple one, simple)
                if recurrent:
                    flair = Conv3D(dense_size,
                                   kernel_size=(1, 1, 1),
                                   activation='relu',
                                   data_format='channels_first',
                                   name='fcn_flair')(flair)
                    flair = Dropout(0.5)(flair)
                    t2 = concatenate([flair, t2], axis=1)
                    t2 = Conv3D(dense_size,
                                kernel_size=(1, 1, 1),
                                activation='relu',
                                data_format='channels_first',
                                name='fcn_t2')(t2)
                    t2 = Dropout(0.5)(t2)
                    t1 = concatenate([t2, t1], axis=1)
                    t1 = Conv3D(dense_size,
                                kernel_size=(1, 1, 1),
                                activation='relu',
                                data_format='channels_first',
                                name='fcn_t1')(t1)
                    t1 = Dropout(0.5)(t1)
                    flair = Dropout(0.5)(flair)
                    t2 = Dropout(0.5)(t2)
                    t1 = Dropout(0.5)(t1)
                    lstm_instance = LSTM(dense_size,
                                         implementation=1,
                                         name='rf_layer')
                    flair = lstm_instance(
                        Permute((2, 1))(Reshape((dense_size, -1))(flair)))
                    t2 = lstm_instance(
                        Permute((2, 1))(Reshape((dense_size, -1))(t2)))
                    t1 = lstm_instance(
                        Permute((2, 1))(Reshape((dense_size, -1))(t1)))

                else:
                    flair = Flatten()(flair)
                    t2 = Flatten()(t2)
                    t1 = Flatten()(t1)
                    flair = Dense(dense_size, activation='relu')(flair)
                    flair = Dropout(0.5)(flair)
                    t2 = concatenate([flair, t2])
                    t2 = Dense(dense_size, activation='relu')(t2)
                    t2 = Dropout(0.5)(t2)
                    t1 = concatenate([t2, t1])
                    t1 = Dense(dense_size, activation='relu')(t1)
                    t1 = Dropout(0.5)(t1)

                tumor = Dense(2, activation='softmax', name='tumor')(flair)
                core = Dense(3, activation='softmax', name='core')(t2)
                enhancing = Dense(num_classes,
                                  activation='softmax',
                                  name='enhancing')(t1)

                net = Model(inputs=merged_inputs,
                            outputs=[tumor, core, enhancing])

            net.compile(optimizer='adadelta',
                        loss='categorical_crossentropy',
                        metrics=['accuracy'])

            print(c['c'] + '[' + strftime("%H:%M:%S") + ']    ' + c['g'] +
                  'Training the model with a generator for ' + c['b'] +
                  '(%d parameters)' % net.count_params() + c['nc'])
            print(net.summary())
            net.fit_generator(
                generator=load_patch_batch_train(image_names=train_data,
                                                 label_names=train_labels,
                                                 centers=train_centers,
                                                 batch_size=batch_size,
                                                 size=patch_size,
                                                 nlabels=num_classes,
                                                 dfactor=dfactor,
                                                 preload=preload,
                                                 split=not sequential,
                                                 datatype=np.float32),
                validation_data=load_patch_batch_train(image_names=val_data,
                                                       label_names=val_labels,
                                                       centers=val_centers,
                                                       batch_size=batch_size,
                                                       size=patch_size,
                                                       nlabels=num_classes,
                                                       dfactor=dfactor,
                                                       preload=preload,
                                                       split=not sequential,
                                                       datatype=np.float32),
                steps_per_epoch=train_steps_per_epoch,
                validation_steps=val_steps_per_epoch,
                max_q_size=queue,
                epochs=epochs)
            net.save(net_name)

        # Then we test the net.
        use_gt = options['use_gt']
        for p, gt_name in zip(test_data, test_labels):
            p_name = p[0].rsplit('/')[-2]
            patient_path = '/'.join(p[0].rsplit('/')[:-1])
            outputname = os.path.join(patient_path,
                                      'deep-brats17' + sufix + 'test.nii.gz')
            try:
                load_nii(outputname)
            except IOError:
                roi_nii = load_nii(p[0])
                roi = roi_nii.get_data().astype(dtype=np.bool)
                centers = get_mask_voxels(roi)
                test_samples = np.count_nonzero(roi)
                image = np.zeros_like(roi).astype(dtype=np.uint8)
                print(c['c'] + '[' + strftime("%H:%M:%S") + ']    ' + c['g'] +
                      '<Creating the probability map ' + c['b'] + p_name +
                      c['nc'] + c['g'] + ' (%d samples)>' % test_samples +
                      c['nc'])
                test_steps_per_epoch = -(-test_samples / batch_size)
                y_pr_pred = net.predict_generator(
                    generator=load_patch_batch_generator_test(
                        image_names=p,
                        centers=centers,
                        batch_size=batch_size,
                        size=patch_size,
                        preload=preload,
                    ),
                    steps=test_steps_per_epoch,
                    max_q_size=queue)
                [x, y, z] = np.stack(centers, axis=1)

                if not sequential:
                    tumor = np.argmax(y_pr_pred[0], axis=1)
                    y_pr_pred = y_pr_pred[-1]
                    roi = np.zeros_like(roi).astype(dtype=np.uint8)
                    roi[x, y, z] = tumor
                    roi_nii.get_data()[:] = roi
                    roiname = os.path.join(
                        patient_path,
                        'deep-brats17' + sufix + 'test.roi.nii.gz')
                    roi_nii.to_filename(roiname)

                y_pred = np.argmax(y_pr_pred, axis=1)

                image[x, y, z] = y_pred
                # Post-processing (Basically keep the biggest connected region)
                image = get_biggest_region(image)
                if use_gt:
                    gt_nii = load_nii(gt_name)
                    gt = np.copy(gt_nii.get_data()).astype(dtype=np.uint8)
                    labels = np.unique(gt.flatten())
                    results = (p_name, ) + tuple(
                        [dsc_seg(gt == l, image == l) for l in labels[1:]])
                    text = 'Subject %s DSC: ' + '/'.join(
                        ['%f' for _ in labels[1:]])
                    print(text % results)
                    dsc_results.append(results)

                print(c['g'] + '                   -- Saving image ' + c['b'] +
                      outputname + c['nc'])
                roi_nii.get_data()[:] = image
                roi_nii.to_filename(outputname)
def test_net(net, p, gt_name, options):
    # Testing hyperparameters
    patch_width = options['patch_width']
    patch_size = (patch_width, patch_width, patch_width)
    batch_size = options['batch_size']
    # Data loading parameters
    preload = options['preload']
    queue = options['queue']

    sufix = get_sufix(options)

    c = color_codes()
    p_name = '-'.join(p[0].rsplit('/')[-1].rsplit('.')[0].rsplit('-')[:-1])
    patient_path = '/'.join(p[0].rsplit('/')[:-1])
    outputname = os.path.join(patient_path,
                              'deep-' + p_name + sufix + 'brain.hdr')
    gt_nii = load_nii(gt_name)
    gt = np.copy(np.squeeze(gt_nii.get_data()))
    vals = np.unique(gt.flatten())
    try:
        image = np.squeeze(load_nii(outputname).get_data())
    except IOError:
        roi = np.squeeze(load_nii(p[0]).get_data())
        centers = get_mask_voxels(roi.astype(dtype=np.bool))
        test_samples = np.count_nonzero(roi)
        image = np.zeros_like(roi).astype(dtype=np.uint8)
        print(c['c'] + '[' + strftime("%H:%M:%S") + ']    ' + c['g'] +
              '<Creating the probability map ' + c['b'] + p_name + c['nc'] +
              c['g'] + ' (%d samples)>' % test_samples + c['nc'])
        test_steps_per_epoch = -(-test_samples / batch_size)
        y_pr_pred = net.predict_generator(
            generator=load_patch_batch_generator_test(
                image_names=p,
                centers=centers,
                batch_size=batch_size,
                size=patch_size,
                preload=preload,
            ),
            steps=test_steps_per_epoch,
            max_q_size=queue)
        [x, y, z] = np.stack(centers, axis=1)

        if options['experimental'] >= 3:
            y_pr_pred = y_pr_pred[:-1]

        for num, results in enumerate(y_pr_pred):
            brain = np.argmax(results, axis=1)
            image[x, y, z] = brain
            if num is 0:
                im = sufix + 'csf.'
                gt_nii.get_data()[:] = np.expand_dims(image, axis=3)
            elif num is 1:
                im = sufix + 'gm.'
                gt_nii.get_data()[:] = np.expand_dims(image, axis=3)
            elif num is 2:
                im = sufix + 'wm.'
                gt_nii.get_data()[:] = np.expand_dims(image, axis=3)
            elif num is 3:
                im = sufix + 'brain.'
                gt_nii.get_data()[:] = np.expand_dims(vals[image], axis=3)
            elif num is 4:
                im = sufix + 'rf.'
                gt_nii.get_data()[:] = np.expand_dims(vals[image], axis=3)
            else:
                im = sufix + 'merge.'
                gt_nii.get_data()[:] = np.expand_dims(vals[image], axis=3)
            roiname = os.path.join(patient_path,
                                   'deep-' + p_name + im + 'roi.hdr')
            print(c['g'] + '                   -- Saving image ' + c['b'] +
                  roiname + c['nc'])
            save_nii(gt_nii, roiname)

        y_pred = np.argmax(y_pr_pred[-1], axis=1)

        image[x, y, z] = y_pred
        gt_nii.get_data()[:] = np.expand_dims(image, axis=3)
        save_nii(gt_nii, outputname)

    return image, gt
示例#11
0
def test_network(net, p, batch_size, patch_size, queue=50, sufix='', centers=None, filename=None):

    c = color_codes()
    p_name = p[0].rsplit('/')[-2]
    patient_path = '/'.join(p[0].rsplit('/')[:-1])
    outputname = filename if filename is not None else 'deep-brats17.test.' + sufix
    outputname_path = os.path.join(patient_path, outputname + '.nii.gz')
    roiname = os.path.join(patient_path, outputname + '.roi.nii.gz')
    try:
        image = load_nii(outputname_path).get_data()
        load_nii(roiname)
    except IOError:
        print(c['c'] + '[' + strftime("%H:%M:%S") + ']    ' + c['g'] + 'Testing ' +
              c['b'] + sufix + c['nc'] + c['g'] + ' network' + c['nc'])
        roi_nii = load_nii(p[0])
        roi = roi_nii.get_data().astype(dtype=np.bool)
        centers = get_mask_voxels(roi) if centers is None else centers
        test_samples = np.count_nonzero(roi)
        image = np.zeros_like(roi).astype(dtype=np.uint8)
        print(c['c'] + '[' + strftime("%H:%M:%S") + ']    ' + c['g'] +
              '<Creating the probability map ' + c['b'] + p_name + c['nc'] + c['g'] +
              ' (%d samples)>' % test_samples + c['nc'])
        test_steps_per_epoch = -(-test_samples / batch_size)
        y_pr_pred = net.predict_generator(
            generator=load_patch_batch_generator_test(
                image_names=p,
                centers=centers,
                batch_size=batch_size,
                size=patch_size,
                preload=True,
            ),
            steps=test_steps_per_epoch,
            max_q_size=queue
        )
        print(' '.join([''] * 50), end='\r')
        sys.stdout.flush()
        [x, y, z] = np.stack(centers, axis=1)

        if isinstance(y_pr_pred, list):
            tumor = np.argmax(y_pr_pred[0], axis=1)
            y_pr_pred = y_pr_pred[-1]
            is_roi = False
        else:
            tumor = np.argmax(y_pr_pred, axis=1)
            is_roi = True

        # We save the ROI
        roi = np.zeros_like(roi).astype(dtype=np.uint8)
        roi[x, y, z] = tumor
        roi_nii.get_data()[:] = roi
        roi_nii.to_filename(roiname)

        y_pred = np.argmax(y_pr_pred, axis=1)

        # We save the results
        image[x, y, z] = tumor if is_roi else y_pred
        # Post-processing (Basically keep the biggest connected region)
        image = get_biggest_region(image, is_roi)
        print(c['g'] + '                   -- Saving image ' + c['b'] + outputname_path + c['nc'])
        roi_nii.get_data()[:] = image
        roi_nii.to_filename(outputname_path)
    return image