Example #1
0
def test_net(net, p, outputname):

    c = color_codes()
    options = parse_inputs()
    patch_width = options['patch_width']
    patch_size = (patch_width, patch_width, patch_width)
    batch_size = options['test_size']
    p_name = p[0].rsplit('/')[-2]
    patient_path = '/'.join(p[0].rsplit('/')[:-1])
    outputname_path = os.path.join(patient_path, outputname + '.nii.gz')
    pr_outputname_path = os.path.join(patient_path, outputname + '.pr.nii.gz')
    try:
        image = load_nii(outputname_path).get_data()
    except IOError:
        print('%s[%s]    %sTesting the network%s' % (c['c'], strftime("%H:%M:%S"), c['g'], c['nc']))
        nii = load_nii(p[0])
        roi = nii.get_data().astype(dtype=np.bool)
        centers = get_mask_voxels(roi)
        test_samples = np.count_nonzero(roi)
        image = np.zeros_like(roi).astype(dtype=np.uint8)
        pr = np.zeros_like(roi).astype(dtype=np.float32)
        print('%s[%s]    %s<Creating the probability map %s%s%s%s - %s%s%s%s (%d samples)>%s' % (
            c['c'], strftime("%H:%M:%S"),
            c['g'], c['b'], p_name, c['nc'],
            c['g'], c['b'], outputname, c['nc'],
            c['g'], test_samples, c['nc']
        ))

        n_centers = len(centers)
        image_list = [load_norm_list(p)]

        for i in range(0, n_centers, batch_size):
            print(
                '%f%% tested (step %d/%d)' % (100.0 * i / n_centers, (i / batch_size) + 1, -(-n_centers/batch_size)),
                end='\r'
            )
            sys.stdout.flush()
            centers_i = [centers[i:i + batch_size]]
            x = get_patches_list(image_list, centers_i, patch_size, True)
            x = np.concatenate(x).astype(dtype=np.float32)
            y_pr_pred = net.predict(x, batch_size=options['batch_size'])

            [x, y, z] = np.stack(centers_i[0], axis=1)

            # We store the results
            image[x, y, z] = np.argmax(y_pr_pred, axis=1).astype(dtype=np.int8)
            pr[x, y, z] = y_pr_pred[:, 1].astype(dtype=np.float32)

        print(' '.join([''] * 50), end='\r')
        sys.stdout.flush()

        # Post-processing (Basically keep the biggest connected region)
        # image = get_biggest_region(image)
        print('%s                   -- Saving image %s%s%s' % (c['g'], c['b'], outputname_path, c['nc']))

        nii.get_data()[:] = image
        nii.to_filename(outputname_path)
        nii.get_data()[:] = pr
        nii.to_filename(pr_outputname_path)
    return image
Example #2
0
def vox_generator_test(all_files):

    path = options['root_path']

    while 1:
        for file in all_files:
            p = file
            coll = glob.glob(os.path.join(path, file) + '/*')
            for c in coll:
                if 'flair.' in c or 'flair_corrected.' in c:
                    flair_path = c
                if 't1.' in c or 't1_corrected.' in c:
                    t1_path = c
                if 't2.' in c or 't2_corrected.' in c:
                    t2_path = c
                if 't1ce.' in c or 't1ce_corrected.' in c:
                    t1ce_path = c
            flair = load_nii(flair_path).get_data()
            t2 = load_nii(t2_path).get_data()
            t1 = load_nii(t1_path).get_data()
            t1ce = load_nii(t1ce_path).get_data()

            data = np.array([flair, t2, t1, t1ce])
            data = np.transpose(data, axes=[1, 2, 3, 0])

            data_norm = np.array([norm(flair), norm(t2), norm(t1), norm(t1ce)])
            data_norm = np.transpose(data_norm, axes=[1, 2, 3, 0])

            labels = load_nii(os.path.join(path, p,
                                           p + '_seg.nii.gz')).get_data()

            yield data, data_norm, labels
Example #3
0
def vox_generator_test(all_files):

    path = options['root_path']

    while 1:
        for file in all_files:
            p = file
            if options['correction']:
                flair = load_nii(os.path.join(path, file, file + '_flair_corrected.nii.gz')).get_data()
                t2 = load_nii(os.path.join(path, file, file + '_t2_corrected.nii.gz')).get_data()
                t1 = load_nii(os.path.join(path, file, file + '_t1_corrected.nii.gz')).get_data()
                t1ce = load_nii(os.path.join(path, file, file + '_t1ce_corrected.nii.gz')).get_data()
            else:
                flair = load_nii(os.path.join(path, p, p + '_flair.nii.gz')).get_data()

                t2 = load_nii(os.path.join(path, p, p + '_t2.nii.gz')).get_data()

                t1 = load_nii(os.path.join(path, p, p + '_t1.nii.gz')).get_data()

                t1ce = load_nii(os.path.join(path, p, p + '_t1ce.nii.gz')).get_data()
            data = np.array([flair, t2, t1, t1ce])
            data = np.transpose(data, axes=[1, 2, 3, 0])

            data_norm = np.array([norm(flair), norm(t2), norm(t1), norm(t1ce)])
            data_norm = np.transpose(data_norm, axes=[1, 2, 3, 0])

            labels = load_nii(os.path.join(path, p, p + '_seg.nii.gz')).get_data()

            yield data, data_norm, labels
def get_patient_survival_features(path, p, p_features, test=False):
    # Init
    options = parse_inputs()
    roi_sufix = '_seg.nii.gz' if not test else '.nii.gz'
    roi = load_nii(os.path.join(path, p, p + roi_sufix)).get_data()
    brain = load_nii(os.path.join(path, p, p + options['t1'])).get_data()
    brain_vol = np.count_nonzero(brain)
    vol_features = map(lambda l: np.count_nonzero(roi == l) / brain_vol, [1, 2, 4])
    age_features = [float(p_features['Age']) / 100]
    features = [age_features + vol_features]

    return features
def get_atlas_vectors(options, dir_name, centers, t1_names):
    """
    Generate training data vectors from probabilistic atlases. These vectors are concatenated with fully-connected layers.
    """
    subjects = [
        f for f in sorted(os.listdir(dir_name))
        if os.path.isdir(os.path.join(dir_name, f))
    ]
    atlas_names = [
        os.path.join(dir_name, subject, 'tmp', 'MNI_sub_probabilities.nii.gz')
        for subject in subjects
    ]

    atlas_images = []
    # load atlas, register if does not exist
    for t1, atlas, subject in zip(t1_names, atlas_names, subjects):
        if os.path.exists(atlas) is False:
            print "         --> registering priors for scan {}".format(subject)
            t = register_masks(t1)
            print " (elapsed time {} min.".format(t / 60.0)
        atlas_images.append(fix_shape(load_nii(atlas).get_data()))

    # ATLAS probabilities (centered voxel)
    # convert lesion centers
    lc = map(lambda l: np.asarray(l), centers)
    atlas_vectors = [
        a[c[:, 0], c[:, 1], c[:, 2]] for a, c in zip(atlas_images, lc)
    ]

    # correct for background. if no probability exists for any class, set as background
    for index in range(len(atlas_vectors)):
        if np.sum(atlas_vectors[index]) == 0:
            atlas_vectors[index][14] = 1

    return atlas_vectors
Example #6
0
def eval_one(npy_path, name, save_nii, save_root, post_process):
	pred_path = os.path.join(npy_path, name)
	predict = np.load(pred_path).astype(np.float) / 255

	label_path = '/data/lbw/structseg2019/data_ori/Task3_Thoracic_OAR/{}/label.nii.gz'.format(name[5:-4])
	label = load_nii(label_path).get_data().astype(np.uint8)
	label = np.eye(7)[label].transpose([3, 0, 1, 2])[np.newaxis, :]

	if post_process:
		ori_shape = predict.shape
		predict = F.interpolate(torch.from_numpy(predict), [512, 512, 512], mode='trilinear')
		predict = F.interpolate(predict, ori_shape[2:], mode='trilinear').numpy()
		predict = postprocessing(predict)

	if save_nii:
		predict_nii = predict.copy()
		predict_nii = np.argmax(predict_nii.squeeze(), axis=0).astype(np.uint8)
		# predict_npy = predict_npy.transpose([1, 0, 2])
		predict_nii = nib.Nifti1Image(predict_nii, None)
		nib.save(predict_nii, os.path.join(save_root, name[:-4]))

	dices = compute_multi_dice(predict, label)
	print(name[:-4])
	for i in range(7):
		print(all_organs[i], float(dices[i]))

	return [dices, '{0:02d}'.format(int(name[5:-4]))]
Example #7
0
def get_isbi_data(
    d_path='data/ISBI',
    images=None,
    verbose=1,
    preload=True,
):
    """
        Function that loads the images and masks of a list of patients.
        :param d_path: Path to the LSI data.
        :param images: Image names (prefixes).
        :param verbose: Level of verbosity
        :return: list of numpy arrays for the concatenated images, lesion
        mask and brain mask.
    """

    lesion_mask_name = 'mask1.nii'
    tmp = get_dirs(d_path)  # if p_tag in p
    p_train = sorted([p for p in tmp],
                     key=lambda p: int(''.join(filter(str.isdigit, p))))
    if images is None:
        images = ['flair', 'mprage']

    p_trains = []

    # Finally we either load all images and normalise them (a lot of RAM) or we
    # leave that job to the dataset object.
    if verbose > 1:
        print('Loading the images')
    lesion_names = []
    data = []
    for p_path in p_train:
        tmp_stages = set()
        for file in os.listdir(os.path.join(d_path, p_path, 'preprocessed')):
            if file.endswith(".nii"):
                tmp_stages.add(file.split('_')[1])
        for stage in tmp_stages:
            lesion_names.append(
                os.path.join(d_path, p_path, 'masks',
                             '_'.join([p_path, stage, lesion_mask_name])))
            data.append(
                np.stack([
                    get_normalised_image(
                        os.path.join(d_path, p_path, 'preprocessed',
                                     '%s_%s_%s_pp.nii' % (p_path, stage, im)),
                        None,
                    ) for im in images
                ],
                         axis=0))
            p_trains.append(p_path + '_' + stage)

    # Lesion masks (we are using this function for training, so there should
    # always be a lesion mask).
    if verbose > 1:
        print('Loading the lesion masks')
    lesions = list(map(get_mask, lesion_names))
    brains = [np.full(lesions[0].shape, 1, dtype=int)] * len(lesions)
    example_nii = load_nii(
        os.path.join(d_path, p_path, 'preprocessed',
                     '%s_%s_%s_pp.nii' % (p_path, stage, images[0])))
    return data, lesions, brains, p_trains, example_nii
Example #8
0
def select_voxels_from_previous_model(model, train_x_data, options):
    """
    Select training voxels from image segmentation masks 
    
    """

    # get_scan names and number of modalities used
    scans = train_x_data.keys()
    modalities = train_x_data[scans[0]].keys()

    # select voxels for training. Discard CSF and darker WM in FLAIR.
    # flair_scans = [train_x_data[s]['FLAIR'] for s in scans]
    # selected_voxels = select_training_voxels(flair_scans, options['min_th'])

    # evaluate training scans using the learned model and extract voxels with probability higher than 0.5
    seg_mask = [
        test_scan(model,
                  dict(train_x_data.items()[s:s + 1]),
                  options,
                  save_nifti=False) > 0.5 for s in range(len(scans))
    ]

    # check candidate segmentations:
    # if no voxels have been selected, return candidate voxels on FLAIR modality > 2
    flair_scans = [train_x_data[s]['FLAIR'] for s in scans]
    images = [load_nii(name).get_data() for name in flair_scans]
    images_norm = [(im.astype(dtype=np.float32) - im[np.nonzero(im)].mean()) /
                   im[np.nonzero(im)].std() for im in images]
    seg_mask = [
        im > 2 if np.sum(seg) == 0 else seg
        for im, seg in zip(images_norm, seg_mask)
    ]

    return seg_mask
Example #9
0
def test_scan(model, test_x_data, options, save_nifti= True, candidate_mask = None):
    """
    Test data based on one model 
    Input: 
    - test_x_data: a nested dictionary containing training image paths: 
            train_x_data['scan_name']['modality'] = path_to_image_modality
    - save_nifti: save image segmentation 
    - candidate_mask: a binary masks containing voxels to classify

    Output:
    - test_scan = Output image containing the probability output segmetnation 
    - If save_nifti --> Saves a nifti file at specified location options['test_folder']/['test_scan']
    """

    # get_scan name and create an empty nifti image to store segmentation
    scans = test_x_data.keys()
    flair_scans = [test_x_data[s]['FLAIR'] for s in scans]
    flair_image = load_nii(flair_scans[0]).get_data()
    seg_image = np.zeros_like(flair_image)

    # get test paths
    test_folder, scan = os.path.split(flair_scans[0])
    
    # compute lesion segmentation in batches of size options['batch_size'] 
    for batch, centers in load_test_patches(test_x_data, options['patch_size'], options['batch_size'], candidate_mask):
        y_pred = model.predict_proba(np.squeeze(batch))
        [x, y, z] = np.stack(centers, axis=1)
        seg_image[x, y, z] = y_pred[:, 1]

    if save_nifti:
        out_scan = nib.Nifti1Image(seg_image, np.eye(4))
        #out_scan.to_filename(os.path.join(options['test_folder'], options['test_scan'], options['experiment'], options['test_name']))
        out_scan.to_filename(os.path.join(test_folder, options['experiment'], options['test_name']))

    return seg_image 
Example #10
0
def get_normalised_image(
        image_name, mask=None, dtype=np.float32, masked=False
):
    """
    Function to a load an image and normalised it (0 mean / 1 standard
     deviation)
    :param image_name: Path to the image to be noramlised
    :param mask: Mask defining the region of interest
    :param dtype: Data type for the final image
    :param masked: Whether to mask the image or not
    :return:
    """
    image = load_nii(image_name).get_fdata().astype(dtype)

    # If no mask is provided we use the image as a mask (all non-zero values)
    if mask is None:
        mask_bin = image.astype(np.bool)
    else:
        mask_bin = mask.astype(np.bool)

    # Parameter estimation using the mask provided
    image_mu = np.mean(image[mask_bin])
    image_sigma = np.std(image[mask_bin])
    norm_image = (image - image_mu) / image_sigma

    if masked:
        output = norm_image * mask_bin.astype(dtype)
    else:
        output = norm_image

    return output
Example #11
0
def test_scan(model,
              test_x_data,
              options,
              save_nifti=True,
              candidate_mask=None):
    """
    Test data based on one model
    Input:
    - test_x_data: a nested dictionary containing training image paths:
            train_x_data['scan_name']['modality'] = path_to_image_modality
    - save_nifti: save image segmentation
    - candidate_mask: a binary masks containing voxels to classify

    Output:
    - test_scan = Output image containing the probability output segmetnation
    - If save_nifti --> Saves a nifti file at specified location
      options['test_folder']/['test_scan']
    """

    # get_scan name and create an empty nifti image to store segmentation
    scans = list(test_x_data.keys())
    flair_scans = [test_x_data[s]['FLAIR'] for s in scans]
    flair_image = load_nii(flair_scans[0])
    seg_image = np.zeros_like(flair_image.get_data().astype('float32'))

    if candidate_mask is not None:
        all_voxels = np.sum(candidate_mask)
    else:
        all_voxels = np.sum(flair_image.get_data() > 0)

    if options['debug'] is True:
        print("> DEBUG ", scans[0], "Voxels to classify:", all_voxels)

    # compute lesion segmentation in batches of size options['batch_size']
    batch, centers = load_test_patches(test_x_data, options['patch_size'],
                                       options['batch_size'], candidate_mask)
    if options['debug'] is True:
        print("> DEBUG: testing current_batch:", batch.shape)

    y_pred = model['net'].predict(np.squeeze(batch), options['batch_size'])
    [x, y, z] = np.stack(centers, axis=1)
    seg_image[x, y, z] = y_pred[:, 1]
    if options['debug'] is True:
        print("...done!")

    # check if the computed volume is lower than the minimum accuracy given
    # by the min_error parameter
    if check_min_error(seg_image, options, flair_image.header.get_zooms()):
        if options['debug']:
            print("> DEBUG ", scans[0], "lesion volume below ",
                  options['min_error'], 'ml')
        seg_image = np.zeros_like(flair_image.get_data().astype('float32'))

    if save_nifti:
        out_scan = nib.Nifti1Image(seg_image, affine=flair_image.affine)
        out_scan.to_filename(
            os.path.join(options['test_folder'], options['test_scan'],
                         options['experiment'], options['test_name']))

    return seg_image
Example #12
0
def get_patches_from_name(filename, centers, patch_size):
    image = load_nii(filename).get_data()
    patches = get_patches(image, centers,
                          patch_size) if len(patch_size) == 3 else [
                              get_patches2_5d(image, centers, patch_size)
                          ]
    return patches
def post_process_segmentation(image_folder, input_mask):
    """
    doc
    """
    filtered_mask = np.zeros_like(input_mask)
    atlas = load_nii(
        os.path.join(image_folder, 'tmp',
                     'MNI_subcortical_mask.nii.gz')).get_data()
    for l in range(1, 15):

        th_label = input_mask == l
        labels, num_labels = ndimage.label(th_label)
        label_list = np.unique(labels)

        # filter candidates by size. Only storing the biggest one
        num_elements_by_lesion = ndimage.labeled_comprehension(
            np.logical_and(th_label, atlas), labels, label_list, np.sum, float,
            0)
        argmax = np.argmax(num_elements_by_lesion)

        # assign voxels to output
        current_voxels = np.stack(np.where(labels == argmax), axis=1)
        filtered_mask[current_voxels[:, 0], current_voxels[:, 1],
                      current_voxels[:, 2]] = l

    return filtered_mask
Example #14
0
def generate_vfn_label():
    root = '/data/lbw/structseg2019/data_ori/Task3_Thoracic_OAR'
    # root = '/data/lbw/structseg2019/data_ori/re_labeling'
    target = '/data/lbw/structseg2019/data_ori/trachea_vfn_data/label'
    with open('./data_split/img_spacing.json', 'r') as f:
        spacings = json.load(f)
    with open('./data_split/label_range.json', 'r') as f:
        label_range = json.load(f)

    for i in range(1, 51):
        # for i in [3, 5, 28, 32]:
        print(i)
        label_path = '{}/{}/label.nii.gz'.format(root, i)
        label = np.uint8(load_nii(label_path).get_data())
        label[label != 4] = 0
        label[label == 4] = 1

        this_range = label_range[str(i)]
        # min_rl_h, min_rl_w, min_rl_t, max_rl_h, max_rl_w, max_rl_t = this_range['RightLung']
        # min_ll_h, min_ll_w, min_ll_t, max_ll_h, max_ll_w, max_ll_t = this_range['LeftLung']
        min_tr_h, min_tr_w, min_tr_t, max_tr_h, max_tr_w, max_tr_t = this_range[
            'Trachea']
        min_h, min_w, min_t, max_h, max_w, max_t = min_tr_h - 10, min_tr_w - 10, min_tr_t - 5, max_tr_h + 11, max_tr_w + 11, max_tr_t + 6
        # min_h, min_w, max_h, max_w = min(min_rl_h, min_ll_h), min(min_rl_w, min_ll_w), max(max_rl_h, max_ll_h), max(
        #     max_rl_w, max_ll_w)
        # min_t, max_t = min_he_t - 3, max_he_t + 4

        label = label[min_h:max_h, min_w:max_w, min_t:max_t]

        spacing = spacings[str(i)]['spacing']
        label = equal_spacing(label, spacing, 1, mode='nearest')

        target_path = os.path.join(target, 'label_{}'.format(i))
        np.save(target_path, label)
def load_and_stack_iter2(names_lou, mask_names, roi_names, patch_size, old):
    if old:
        rois = load_thresholded_images_by_name(roi_names, threshold=0.5)
        print('                Loading FLAIR images')
        flair, y_train = load_patch_vectors_by_name(names_lou[0, :], mask_names, patch_size, rois)
        print('                Loading PD images')
        pd, _ = load_patch_vectors_by_name(names_lou[1, :], mask_names, patch_size, rois)
        print('                Loading T2 images')
        t2, _ = load_patch_vectors_by_name(names_lou[2, :], mask_names, patch_size, rois)
        print('                Loading T1 images')
        t1, _ = load_patch_vectors_by_name(names_lou[3, :], mask_names, patch_size, rois)
    else:
        pr_maps = [load_nii(roi_name).get_data() for roi_name in roi_names]
        print('                Loading FLAIR images')
        flair, y_train = load_patch_vectors_by_name_pr(names_lou[0, :], mask_names, patch_size, pr_maps)
        print('                Loading PD images')
        pd, _ = load_patch_vectors_by_name_pr(names_lou[1, :], mask_names, patch_size, pr_maps)
        print('                Loading T2 images')
        t2, _ = load_patch_vectors_by_name_pr(names_lou[2, :], mask_names, patch_size, pr_maps)
        print('                Loading T1 images')
        t1, _ = load_patch_vectors_by_name_pr(names_lou[3, :], mask_names, patch_size, pr_maps)

    x_train = [np.stack(images, axis=1) for images in zip(*[flair, pd, t2, t1])]

    return x_train, y_train
Example #16
0
def load_test_patches(test_x_data,
                      patch_size,
                      batch_size,
                      voxel_candidates=None,
                      datatype=np.float32):
    """
    Function generator to load test patches with size equal to patch_size,
    given a list of selected voxels. Patches are returned in batches to reduce
    the amount of RAM used

    Inputs:
       - x_data: list containing all subject image paths for a single modality
       - selected_voxels: list where each element contains the subject binary
         mask for selected voxels [len(x), len(y), len(z)]
       - tuple containing patch size, either 2D (p1, p2, 1) or 3D (p1, p2, p3)
       - Voxel candidates: a binary mask containing voxels for testing

    Outputs (in batches):
       - X: Train X data matrix for the each channel [num_samples, p1, p2, p3]
       - voxel_coord: list of tuples with voxel coordinates (x,y,z) of
         selected patches
    """

    # get scan names and number of modalities used
    scans = list(test_x_data.keys())
    modalities = list(test_x_data[scans[0]].keys())

    # load all image modalities and normalize intensities
    images = []

    for m in modalities:
        raw_images = [load_nii(test_x_data[s][m]).get_data() for s in scans]
        images.append([normalize_data(im) for im in raw_images])

    # select voxels for testing. Discard CSF and darker WM in FLAIR.
    # If voxel_candidates is not selected, using intensity > 0.5 in FLAIR,
    # else use the binary mask to extract candidate voxels
    if voxel_candidates is None:
        flair_scans = [test_x_data[s]['FLAIR'] for s in scans]
        selected_voxels = [
            get_mask_voxels(mask)
            for mask in select_training_voxels(flair_scans, 0.5)
        ][0]
    else:
        selected_voxels = get_mask_voxels(voxel_candidates)

    # yield data for testing with size equal to batch_size
    # for i in range(0, len(selected_voxels), batch_size):
    #     c_centers = selected_voxels[i:i+batch_size]
    #     X = []
    #     for m, image_modality in zip(modalities, images):
    #         X.append(get_patches(image_modality[0], c_centers, patch_size))
    #     yield np.stack(X, axis=1), c_centers

    X = []
    for image_modality in images:
        X.append(get_patches(image_modality[0], selected_voxels, patch_size))

    Xs = np.stack(X, axis=1)
    return Xs, selected_voxels
Example #17
0
def dump_spacing():
    root = '/data/lbw/structseg2019/data_ori/Task3_Thoracic_OAR'
    img_range = dict()

    for i in range(1, 51):
        print(i)
        path = '{}/{}/data.nii.gz'.format(root, i)
        img_range[i] = dict()
        img = load_nii(path).get_data().astype(np.uint8)
        spacing = load_nii(path).header['pixdim'][1:4]
        img_range[i]['shape'] = (int(img.shape[0]), int(img.shape[1]),
                                 int(img.shape[2]))
        img_range[i]['spacing'] = (float(spacing[0]), float(spacing[1]),
                                   float(spacing[2]))

    with open('./data_split/img_spacing.json', 'w') as f:
        json.dump(img_range, f)
Example #18
0
def test_cascaded_model(model, test_x_data, options):
    """
    Test the cascaded approach using a learned model

    inputs:

    - CNN model: a list containing the two cascaded CNN models

    - test_x_data: a nested dictionary containing testing image paths:
           test_x_data['scan_name']['modality'] = path_to_image_modality


    - options: dictionary containing general hyper-parameters:

    outputs:
        - output_segmentation
    """

    # print '> CNN: testing the model'

    # organize experiments
    exp_folder = os.path.join(options['test_folder'],
                              options['test_scan'],
                              options['experiment'])
    if not os.path.exists(exp_folder):
        os.mkdir(exp_folder)

    # first network
    options['test_name'] = options['experiment'] + '_debug_prob_0.nii.gz'

    # only save the first iteration result if debug is True
    save_nifti = True if options['debug'] is True else False
    t1 = test_scan(model[0],
                   test_x_data,
                   options,
                   save_nifti=save_nifti)

    # second network
    options['test_name'] = options['experiment'] + '_prob_1.nii.gz'
    t2 = test_scan(model[1],
                   test_x_data,
                   options,
                   save_nifti=True,
                   candidate_mask=(t1 > 0.5))

    # postprocess the output segmentation
    # obtain the orientation from the first scan used for testing
    scans = test_x_data.keys()
    flair_scans = [test_x_data[s]['FLAIR'] for s in scans]
    flair_image = load_nii(flair_scans[0])
    options['test_name'] = options['experiment'] + '_hard_seg.nii.gz'
    out_segmentation = post_process_segmentation(t2,
                                                 options,
                                                 save_nifti=True,
                                                 orientation=flair_image.affine)

    # return out_segmentation
    return out_segmentation
def vox_generator(all_files, n_pos, n_neg, correction=False):
    path = options["root_path"]

    def get_filename(prefix):
        return glob.glob(os.path.join(path, file, prefix))[0].split("/")[-1]

    while 1:
        for file in all_files:
            if correction:
                flair = load_nii(
                    os.path.join(path, file, file + "_flair_corrected.nii.gz")
                ).get_data()
                t2 = load_nii(
                    os.path.join(path, file, file + "_t2_corrected.nii.gz")
                ).get_data()
                t1 = load_nii(
                    os.path.join(path, file, file + "_t1_corrected.nii.gz")
                ).get_data()
                t1ce = load_nii(
                    os.path.join(path, file, file + "_t1ce_corrected.nii.gz")
                ).get_data()
            else:
                flair = load_nii(
                    os.path.join(path, file, file + "_flair.nii.gz")
                ).get_data()
                t2 = load_nii(os.path.join(path, file, file + "_t2.nii.gz")).get_data()
                t1 = load_nii(os.path.join(path, file, file + "_t1.nii.gz")).get_data()
                t1ce = load_nii(
                    os.path.join(path, file, file + "_t1ce.nii.gz")
                ).get_data()

            data_norm = np.array([norm(flair), norm(t2), norm(t1), norm(t1ce)])
            data_norm = np.transpose(data_norm, axes=[1, 2, 3, 0])
            labels = load_nii(os.path.join(path, file, file + "_seg.nii.gz")).get_data()

            foreground = np.array(np.where(labels > 0))
            background = np.array(np.where((labels == 0) & (flair > 0)))

            # n_pos = int(foreground.shape[1] * discount)
            foreground = foreground[
                :, np.random.permutation(foreground.shape[1])[:n_pos]
            ]
            background = background[
                :, np.random.permutation(background.shape[1])[:n_neg]
            ]

            centers = np.concatenate((foreground, background), axis=1)
            centers = centers[:, np.random.permutation(n_neg + n_pos)]

            yield data_norm, labels, centers
def get_patient_roi_slice(path, p):
    options = parse_inputs()
    n_slices = options['n_slices']

    # roi_sufix = '_seg.nii.gz' if not test else '.nii.gz'
    roi_sufix = '.nii.gz'
    roi = load_nii(os.path.join(path, p, p + roi_sufix)).get_data()
    brain = load_nii(os.path.join(path, p, p + options['t1'])).get_data()
    bounding_box_min = np.min(np.nonzero(brain), axis=1)
    bounding_box_max = np.max(np.nonzero(brain), axis=1)
    center_of_masses = np.mean(np.nonzero(roi), axis=1, dtype=np.int)
    slices = [[
        slice(bounding_box_min[0], bounding_box_max[0] + 1),
        slice(bounding_box_min[1], bounding_box_max[1] + 1),
        slice(center_of_masses[-1] - n_slices / 2, center_of_masses[-1] + n_slices / 2)
    ]]

    return slices
Example #21
0
def gen_test_data(p):
    path = options['root_path']
    flair = load_nii(os.path.join(path, p, p + '_flair.nii.gz')).get_data()

    t2 = load_nii(os.path.join(path, p, p + '_t2.nii.gz')).get_data()

    t1 = load_nii(os.path.join(path, p, p + '_t1.nii.gz')).get_data()

    t1ce = load_nii(os.path.join(path, p, p + '_t1ce.nii.gz')).get_data()
    data = np.array([flair, t2, t1, t1ce])
    data = np.transpose(data, axes=[1, 2, 3, 0])

    data_norm = np.array([norm(flair), norm(t2), norm(t1), norm(t1ce)])
    data_norm = np.transpose(data_norm, axes=[1, 2, 3, 0])

    labels = load_nii(os.path.join(path, p, p + '_seg.nii.gz')).get_data()

    return data, data_norm, labels
Example #22
0
def load_train_patches(x_data, y_data, selected_voxels, patch_size, random_state = 42, datatype=np.float32):
    """
    Load train patches with size equal to patch_size, given a list of selected voxels

    Inputs: 
       - x_data: list containing all subject image paths for a single modality
       - y_data: list containing all subject image paths for the labels
       - selected_voxels: list where each element contains the subject binary mask for selected voxels [len(x), len(y), len(z)]
       - tuple containing patch size, either 2D (p1, p2, 1) or 3D (p1, p2, p3)
    
    Outputs:
       - X: Train X data matrix for the particular channel [num_samples, p1, p2, p3]
       - Y: Train Y labels [num_samples, p1, p2, p3]
    """
    
    # load images and normalize their intensties
    images = [load_nii(name).get_data() for name in x_data]
    images_norm = [(im.astype(dtype=datatype) - im[np.nonzero(im)].mean()) / im[np.nonzero(im)].std() for im in images]

    # load labels 
    lesion_masks = [load_nii(name).get_data().astype(dtype=np.bool) for name in y_data]
    nolesion_masks = [np.logical_and(np.logical_not(lesion), brain) for lesion, brain in zip(lesion_masks, selected_voxels)]

    # Get all the x,y,z coordinates for each image
    lesion_centers = [get_mask_voxels(mask) for mask in lesion_masks]
    nolesion_centers = [get_mask_voxels(mask) for mask in nolesion_masks]
   
    # load all positive samples (lesion voxels) and the same number of random negatives samples
    np.random.seed(random_state) 

    x_pos_patches = [np.array(get_patches(image, centers, patch_size)) for image, centers in zip(images_norm, lesion_centers)]
    y_pos_patches = [np.array(get_patches(image, centers, patch_size)) for image, centers in zip(lesion_masks, lesion_centers)]
    
    indices = [np.random.permutation(range(0, len(centers1))).tolist()[:len(centers2)] for centers1, centers2 in zip(nolesion_centers, lesion_centers)]
    nolesion_small = [itemgetter(*idx)(centers) for centers, idx in zip(nolesion_centers, indices)]
    x_neg_patches = [np.array(get_patches(image, centers, patch_size)) for image, centers in zip(images_norm, nolesion_small)]
    y_neg_patches = [np.array(get_patches(image, centers, patch_size)) for image, centers in zip(lesion_masks, nolesion_small)]

    # concatenate positive and negative patches for each subject
    X = np.concatenate([np.concatenate([x1, x2]) for x1, x2 in zip(x_pos_patches, x_neg_patches)], axis = 0)
    Y = np.concatenate([np.concatenate([y1, y2]) for y1, y2 in zip(y_pos_patches, y_neg_patches)], axis= 0)
    
    return X, Y
Example #23
0
def mean_list(filename):
    filelist = file_name(filename)
    # img_arr = []
    img_sum = []
    img_list = []
    for filename in filelist:
        fsi = load_nii(filename).get_data()
        img_arr = np.squeeze(fsi)
        if len(img_list) == 0:
            img_sum = img_arr
            affine = load_nii(filename).affine
        else:
            img_sum += img_arr
        img_list.append(img_arr)
    img_mean = img_sum / len(img_list)
    img_list = np.array(img_list)
    print('img_list', img_list.shape)
    print(img_mean.shape)
    return img_mean, img_list, affine
def get_bounding_centers(image_names, patch_width, overlap=0, offset=0):
    list_of_centers = map(
        lambda names: get_bounding_blocks(
            load_nii(names[0]).get_data(),
            patch_width,
            overlap=overlap,
            offset=offset
        ),
        image_names
    )
    return list_of_centers
Example #25
0
def test():
    path = '/media/lele/DATA/brain/Brats17TrainingData/HGG_test/Brats17_2013_10_1/'
    gt = path + 'Brats17_2013_10_1_seg.nii.gz'
    roi_nii = load_nii(gt)
    roi = roi_nii.get_data().astype(dtype=np.bool)
    centers = get_mask_voxels(roi)
    test_samples = np.count_nonzero(roi)
    image = np.zeros_like(roi).astype(dtype=np.uint8)
    # print image
    print test_samples
    print image.shape
def check_image_list(patients_list, options):
    sufix = get_sufix(options)
    try:
        for p in patients_list:
            p_name, patient_path = get_patient_info(p)
            outputname = os.path.join(patient_path,
                                      'deep-' + p_name + sufix + 'brain.hdr')
            np.squeeze(load_nii(outputname))
        return True
    except IOError:
        return False
Example #27
0
def generate_esophagus_data():
    root = '/data/lbw/structseg2019/data_ori/Task3_Thoracic_OAR'
    target_root = '/data/ybh/PublicDataSet/StructSeg2019/single_class_data/trachea'
    target_img_root = os.path.join(target_root, 'img')
    target_label_root = os.path.join(target_root, 'label')
    if not os.path.exists(target_root):
        os.mkdir(target_root)
    if not os.path.exists(target_img_root):
        os.mkdir(target_img_root)
    if not os.path.exists(target_label_root):
        os.mkdir(target_label_root)
    for dir in os.listdir(root):
        print(dir)
        img_path = '{}/{}/data.nii.gz'.format(root, dir)
        label_path = '{}/{}/label.nii.gz'.format(root, dir)
        target_img_dir = os.path.join(target_img_root, dir)
        target_label_dir = os.path.join(target_label_root, dir)
        if not os.path.exists(target_img_dir):
            os.mkdir(target_img_dir)
        if not os.path.exists(target_label_dir):
            os.mkdir(target_label_dir)

        img = read_nii(img_path, 1180, -440)
        label = load_nii(label_path).get_data().astype(np.uint8)
        # rl_h, rl_w, rl_t = np.where(label == 1)
        # ll_h, ll_w, ll_t = np.where(label == 2)
        es_h, es_w, es_t = np.where(label == 5)
        #
        min_es_h, max_es_h, min_es_w, max_es_w, min_es_t, max_es_t = min(
            es_h), max(es_h), min(es_w), max(es_w), min(es_t), max(es_t)
        # # min_lung_h, max_lung_h = min(min(rl_h), min(ll_h)), max(max(rl_h), max(ll_h))
        # # min_lung_w, max_lung_w = min(min(rl_w), min(ll_w)), max(max(rl_w), max(ll_w))
        #
        # img_crop = img[100: 420, 140: 410, min_es_t - 5: max_es_t + 6]
        img_crop = img[min_es_h - 10:max_es_h + 11,
                       min_es_w - 10:max_es_w + 11, min_es_t - 5:max_es_t + 6]
        label_crop = label[min_es_h - 10:max_es_h + 11,
                           min_es_w - 10:max_es_w + 11,
                           min_es_t - 5:max_es_t + 6]
        # label_crop = label[100: 420, 140: 410, min_es_t - 5: max_es_t + 6]

        # img_crop = img
        # label[label != 5] = 0
        # label[label == 5] = 1
        # label_crop = label

        print(img_crop.shape)

        for i in range(img_crop.shape[2]):
            cv2.imwrite(os.path.join(target_img_dir, '{}.png'.format(i)),
                        img_crop[:, :, i:i + 1])
            cv2.imwrite(os.path.join(target_label_dir, '{}.png'.format(i)),
                        label_crop[:, :, i:i + 1])
def get_patches_list(list_of_image_names, centers_list, size):
    patch_list = [
        np.stack(
            map(
                lambda image: get_patches(norm(np.asarray(load_nii(image).dataobj)), centers, size),
                image_names
            ),
            axis=1
        )
        for image_names, centers in zip(list_of_image_names, centers_list) if centers
    ]
    return patch_list
Example #29
0
def load_patch_batch_percent(
        image_names,
        batch_size,
        size,
        defo_size=None,
        d_names=None,
        mask=None,
        datatype=np.float32
):
    images = [load_nii(name).get_data() for name in image_names]
    defos = [load_nii(name).get_data() for name in d_names] if d_names is not None else []
    images_norm = [(im - im[np.nonzero(im)].mean()) / im[np.nonzero(im)].std() for im in images]
    defos_norm = [im / np.linalg.norm(im, axis=4).std() for im in defos]
    mask = images[0].astype(np.bool) if mask is None else mask.astype(np.bool)
    lesion_centers = get_mask_voxels(mask)
    n_centers = len(lesion_centers)
    for i in range(0, n_centers, batch_size):
        centers = lesion_centers[i:i + batch_size]
        x = get_image_patches(images_norm, centers, size).astype(dtype=datatype)
        d = get_defo_patches(defos_norm, centers, size=defo_size) if defos else []
        patches = (x, d) if defos else x
        yield patches, centers, (100.0 * min((i + batch_size),  n_centers)) / n_centers
Example #30
0
def get_normalised_image(image_name, mask, dtype=np.float32, masked=False):
    mask_bin = mask.astype(np.bool)
    image = load_nii(image_name).get_data().astype(dtype)
    image_mu = np.mean(image[mask_bin])
    image_sigma = np.std(image[mask_bin])
    norm_image = (image - image_mu) / image_sigma

    if masked:
        output = norm_image * mask_bin.astype(dtype)
    else:
        output = norm_image

    return output
def vox_generator(all_files, n_pos, n_neg):
    path = options['root_path']
    while 1:
        for file in all_files:
            coll = glob.glob(os.path.join(path, file) + '/*')
            for c in coll:
                if 'flair.' in c or 'flair_corrected.' in c:
                    flair_path = c
                if 't1.' in c or 't1_corrected.' in c:
                    t1_path = c
                if 't2.' in c or 't2_corrected.' in c:
                    t2_path = c
                if 't1ce.' in c or 't1ce_corrected.' in c:
                    t1ce_path = c
            flair = load_nii(flair_path).get_data()
            t2 = load_nii(t2_path).get_data()
            t1 = load_nii(t1_path).get_data()
            t1ce = load_nii(t1ce_path).get_data()

            data_norm = np.array([norm(flair), norm(t2), norm(t1), norm(t1ce)])
            data_norm = np.transpose(data_norm, axes=[1, 2, 3, 0])
            labels = load_nii(os.path.join(path, file,
                                           file + '_seg.nii.gz')).get_data()

            foreground = np.array(np.where(labels > 0))
            background = np.array(np.where((labels == 0) & (flair > 0)))

            # n_pos = int(foreground.shape[1] * discount)
            foreground = foreground[:,
                                    np.random.permutation(foreground.shape[1]
                                                          )[:n_pos]]
            background = background[:,
                                    np.random.permutation(background.shape[1]
                                                          )[:n_neg]]

            centers = np.concatenate((foreground, background), axis=1)
            centers = centers[:, np.random.permutation(n_neg + n_pos)]

            yield data_norm, labels, centers
Example #32
0
def get_information_image(names, mask_name, patch_size, method='stdev'):
    # Init
    mask = load_nii(mask_name).get_data()
    patch_size = patch_size if type(patch_size) is list else (patch_size,) * len(mask.shape)
    patch_voxels = np.prod(patch_size)
    n_images =  len(names)
    # ROI stuff
    min_c = np.stack(np.nonzero(mask.astype(dtype=np.bool))).min(axis=1)
    max_c = np.stack(np.nonzero(mask.astype(dtype=np.bool))).max(axis=1)
    roi_shape = max_c - min_c
    # Slicing
    slicing = [slice(i, j) for i, j in zip(min_c, max_c)]
    images = [load_nii(name).get_data() for name in names]
    clipped_images = [np.pad(im[slicing], patch_size[0]/2, 'constant') for im in images]

    n_slices = np.prod(roi_shape)
    slices = np.zeros((n_slices, n_images, patch_voxels), dtype=np.int16)
    for i, ci in enumerate(clipped_images):
        slices[:, i, :] = get_rolling_patches(ci, patch_size).reshape((-1, patch_voxels))
    slices.reshape((-1, n_images * patch_voxels))

    min_s = np.amin(slices)
    max_s = np.amax(slices)

    hist = [np.histogram((x.astype(np.float32)-min_s)/(max_s-min_s), range=(0, 1))[0] for x in slices]

    methods = {
        'stdev': np.array([x.std() for x in slices]),
        'entropy': np.array([entropy(x) * 512 for x in hist])
    }

    # Final image
    im = np.zeros_like(mask, dtype=np.float32)
    im[slicing] = methods[method].reshape(roi_shape)
    im[np.logical_not(mask)] = 0

    return im
Example #33
0
def load_image_vectors(name, dir_name, min_shape, datatype=np.float32):
    # Get the names of the images and load them
    patients = [f for f in sorted(os.listdir(dir_name)) if os.path.isdir(os.path.join(dir_name, f))]
    image_names = [os.path.join(dir_name, patient, name) for patient in patients]
    images = [load_nii(image_name).get_data() for image_name in image_names]
    # Reshape everything to have data of homogenous size (important for training)
    # Also, normalize the data
    if min_shape is None:
        min_shape = min([im.shape for im in images])
    data = np.asarray(
        [nd.zoom((im - im.mean()) / im.std(),
                 [float(min_shape[0]) / im.shape[0], float(min_shape[1]) / im.shape[1],
                  float(min_shape[2]) / im.shape[2]]) for im in images]
    )

    return data.astype(datatype), image_names
Example #34
0
def reshape_to_nifti(image, original_name):
    # Open the original nifti
    original = load_nii(original_name).get_data()
    # Reshape the image and save it
    reshaped = nd.zoom(
        image,
        [
            float(original.shape[0]) / image.shape[0],
            float(original.shape[1]) / image.shape[1],
            float(original.shape[2]) / image.shape[2]
        ]
    )
    reshaped *= original.std()
    reshaped += original.mean()
    reshaped_nii = NiftiImage(reshaped, affine=np.eye(4))

    return reshaped_nii
Example #35
0
def get_cnn_rois(names, mask_names, roi_names=None, pr_names=None, th=1.0, balanced=True):
    rois = load_thresholded_norm_images_by_name(
        names[0, :],
        threshold=th,
        mask_names=roi_names
    ) if roi_names is not None else load_masks(names)
    if pr_names is not None:
        pr_maps = [load_nii(name).get_data() * roi for name, roi in izip(pr_names, rois)]
        if balanced:
            idx_sorted_maps = [np.argsort(pr_map * np.logical_not(lesion_mask), axis=None)
                               for pr_map, lesion_mask in izip(pr_maps, load_masks(mask_names))]
            rois_n = [idx.reshape(lesion_mask.shape) > (idx.shape[0] - np.sum(lesion_mask) - 1)
                      for idx, lesion_mask in izip(idx_sorted_maps, load_masks(mask_names))]
        else:
            rois_n = [np.logical_and(np.logical_not(lesion_mask), pr_map > 0.5)
                      for pr_map, lesion_mask in izip(pr_maps, load_masks(mask_names))]
    else:
        rois_n = [np.logical_and(np.logical_not(lesion), brain)
                  for lesion, brain in izip(load_masks(mask_names), rois)]

    rois_p = list(load_masks(mask_names))
    return rois_p, rois_n
def main():
    options = parse_inputs()
    c = color_codes()

    # Prepare the net architecture parameters
    register = options['register']
    multi = options['multi']
    defo = options['deformation']
    layers = ''.join(options['layers'])
    greenspan = options['greenspan']
    freeze = options['freeze']
    balanced = options['balanced'] if not freeze else False

    # Prepare the net hyperparameters
    epochs = options['epochs']
    padding = options['padding']
    patch_width = options['patch_width']
    patch_size = (32, 32) if greenspan else (patch_width, patch_width, patch_width)
    pool_size = options['pool_size']
    batch_size = options['batch_size']
    dense_size = options['dense_size']
    conv_blocks = options['conv_blocks']
    n_filters = options['number_filters']
    n_filters = n_filters if len(n_filters) > 1 else n_filters*conv_blocks
    conv_width = options['conv_width']
    conv_size = conv_width if isinstance(conv_width, list) else [conv_width]*conv_blocks

    # Prepare the sufix that will be added to the results for the net and images
    use_flair = options['use_flair']
    use_pd = options['use_pd']
    use_t2 = options['use_t2']
    flair_name = 'flair' if use_flair else None
    pd_name = 'pd' if use_pd else None
    t2_name = 't2' if use_t2 else None
    images = filter(None, [flair_name, pd_name, t2_name])
    reg_s = '.reg' if register else ''
    filters_s = 'n'.join(['%d' % nf for nf in n_filters])
    conv_s = 'c'.join(['%d' % cs for cs in conv_size])
    im_s = '.'.join(images)
    mc_s = '.mc' if multi else ''
    d_s = 'd%d.' % (conv_blocks*2+defo) if defo else ''
    sufix = '.greenspan' if greenspan else '%s.%s%s%s.p%d.c%s.n%s.d%d.e%d.pad_%s' %\
        (mc_s, d_s, im_s, reg_s, patch_width, conv_s, filters_s, dense_size, epochs, padding)

    # Prepare the data names
    mask_name = options['mask']
    wm_name = options['wm_mask']
    sub_folder = options['sub_folder']
    sub_name = options['flair_sub']
    dir_name = options['dir_name']
    patients = [f for f in sorted(os.listdir(dir_name))
                if os.path.isdir(os.path.join(dir_name, f))]
    n_patients = len(patients)
    names = get_names_from_path(dir_name, options, patients)
    defo_names = get_defonames_from_path(dir_name, options, patients) if defo else None
    defo_width = conv_blocks*2+defo if defo else None
    defo_size = (defo_width, defo_width, defo_width)

    # Random initialisation
    seed = np.random.randint(np.iinfo(np.int32).max)

    # Metrics output
    metrics_file = os.path.join(dir_name, 'metrics' + sufix)

    with open(metrics_file, 'w') as f:

        print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' + 'Starting leave-one-out' + c['nc'])
        # Leave-one-out main loop (we'll do 2 training iterations with testing for each patient)
        for i in range(0, n_patients):
            # Prepare the data relevant to the leave-one-out (subtract the patient from the dataset and set the path)
            # Also, prepare the network
            case = patients[i]
            path = os.path.join(dir_name, case)
            names_lou = np.concatenate([names[:, :i], names[:, i + 1:]], axis=1)
            defo_names_lou = np.concatenate([defo_names[:, :i], defo_names[:, i + 1:]], axis=1) if defo else None
            print(c['c'] + '[' + strftime("%H:%M:%S") + ']  ' + c['nc'] + 'Patient ' + c['b'] + case + c['nc'] +
                  c['g'] + ' (%d/%d)' % (i+1, n_patients))
            print(c['c'] + '[' + strftime("%H:%M:%S") + ']    ' + c['g'] +
                  '<Running iteration ' + c['b'] + '1' + c['nc'] + c['g'] + '>' + c['nc'])
            net_name = os.path.join(path, 'deep-longitudinal.init' + sufix + '.')
            if greenspan:
                net = create_cnn_greenspan(
                    input_channels=names.shape[0]/2,
                    patience=25,
                    name=net_name,
                    epochs=500
                )
                images = ['axial', 'coronal', 'sagital']
            else:
                if multi:
                    net = create_cnn3d_det_string(
                        cnn_path=layers,
                        input_shape=(None, names.shape[0], patch_width, patch_width, patch_width),
                        convo_size=conv_size,
                        padding=padding,
                        dense_size=dense_size,
                        pool_size=2,
                        number_filters=n_filters,
                        patience=10,
                        multichannel=True,
                        name=net_name,
                        epochs=100
                    )
                else:
                    net = create_cnn3d_longitudinal(
                        convo_blocks=conv_blocks,
                        input_shape=(None, names.shape[0], patch_width, patch_width, patch_width),
                        images=images,
                        convo_size=conv_size,
                        pool_size=pool_size,
                        dense_size=dense_size,
                        number_filters=n_filters,
                        padding=padding,
                        drop=0.5,
                        register=register,
                        defo=defo,
                        patience=10,
                        name=net_name,
                        epochs=100
                    )

            names_test = get_names_from_path(path, options)
            defo_names_test = get_defonames_from_path(path, options) if defo else None
            outputname1 = os.path.join(path, 't' + case + sufix + '.iter1.nii.gz') if not greenspan else os.path.join(
                path, 't' + case + sufix + '.nii.gz')

            # First we check that we did not train for that patient, in order to save time
            try:
                net.load_params_from(net_name + 'model_weights.pkl')
            except IOError:
                print(c['c'] + '[' + strftime("%H:%M:%S") + ']    ' +
                      c['g'] + 'Loading the data for ' + c['b'] + 'iteration 1' + c['nc'])
                # Data loading. Most of it is based on functions from data_creation that load the data.
                #  But we also need to prepare the name list to load the leave-one-out data.
                paths = [os.path.join(dir_name, p) for p in np.concatenate([patients[:i], patients[i+1:]])]
                mask_names = [os.path.join(p_path, mask_name) for p_path in paths]
                wm_names = [os.path.join(p_path, wm_name) for p_path in paths]
                pr_names = [os.path.join(p_path, sub_folder, sub_name) for p_path in paths]

                x_train, y_train = load_lesion_cnn_data(
                    names=names_lou,
                    mask_names=mask_names,
                    defo_names=defo_names_lou,
                    roi_names=wm_names,
                    pr_names=pr_names,
                    patch_size=patch_size,
                    defo_size=defo_size,
                    random_state=seed
                )

                # Afterwards we train. Check the relevant training function.
                if greenspan:
                    x_train = np.swapaxes(x_train, 1, 2)
                    train_greenspan(net, x_train, y_train, images)
                else:
                    train_net(net, x_train, y_train, images)
                    with open(net_name + 'layers.pkl', 'wb') as fnet:
                        pickle.dump(net.layers, fnet, -1)
            # Then we test the net. Again we save time by checking if we already tested that patient.
            try:
                image_nii = load_nii(outputname1)
                image1 = image_nii.get_data()
            except IOError:
                print(c['c'] + '[' + strftime("%H:%M:%S") + ']    ' + c['g'] +
                      '<Creating the probability map ' + c['b'] + '1' + c['nc'] + c['g'] + '>' + c['nc'])
                image_nii = load_nii(os.path.join(path, options['image_folder'], options['flair_f']))
                mask_nii = load_nii(os.path.join(path, wm_name))
                if greenspan:
                    image1 = test_greenspan(
                        net,
                        names_test,
                        mask_nii.get_data(),
                        batch_size,
                        patch_size,
                        image_nii.get_data().shape,
                        images
                    )
                else:
                    image1 = test_net(
                        net,
                        names_test,
                        mask_nii.get_data(),
                        batch_size,
                        patch_size,
                        defo_size,
                        image_nii.get_data().shape,
                        images,
                        defo_names_test
                    )
                image_nii.get_data()[:] = image1
                image_nii.to_filename(outputname1)
            if greenspan:
                # Since Greenspan did not use two iterations, we must get the final mask here.
                outputname_final = os.path.join(path, 't' + case + sufix + '.final.nii.gz')
                mask_nii.get_data()[:] = (image1 > 0.5).astype(dtype=np.int8)
                mask_nii.to_filename(outputname_final)
            else:
                # If not, we test the net with the training set to look for misclassified negative with a high
                # probability of being positives according to the net.
                # These voxels will be the input of the second training iteration.
                ''' Here we get the seeds '''
                print(c['c'] + '[' + strftime("%H:%M:%S") + ']    ' +
                      c['g'] + '<Looking for seeds for the final iteration>' + c['nc'])
                patients_names = zip(np.rollaxis(names_lou, 1), np.rollaxis(defo_names_lou, 1)) if defo\
                    else np.rollaxis(names_lou, 1)
                for patient in patients_names:
                    if defo:
                        patient, d_patient = patient
                    else:
                        d_patient = None
                    patient_path = '/'.join(patient[0].rsplit('/')[:-1])
                    outputname = os.path.join(patient_path, 't' + case + sufix + '.nii.gz')
                    mask_nii = load_nii(os.path.join('/'.join(patient[0].rsplit('/')[:-3]), wm_name))
                    try:
                        load_nii(outputname)
                        print(c['c'] + '[' + strftime("%H:%M:%S") + ']    ' +
                              c['g'] + '     Patient ' + patient[0].rsplit('/')[-4] + ' already done' + c['nc'])
                    except IOError:
                        print(c['c'] + '[' + strftime("%H:%M:%S") + ']    ' +
                              c['g'] + '     Testing with patient ' + c['b'] + patient[0].rsplit('/')[-4] + c['nc'])
                        image_nii = load_nii(patient[0])

                        image = test_net(
                            net,
                            patient,
                            mask_nii.get_data(),
                            batch_size,
                            patch_size,
                            defo_size,
                            image_nii.get_data().shape,
                            images,
                            d_patient
                        )

                        print(c['g'] + '                   -- Saving image ' + c['b'] + outputname + c['nc'])
                        image_nii.get_data()[:] = image
                        image_nii.to_filename(outputname)

                ''' Here we perform the last iteration '''
                # Finally we perform the final iteration. After refactoring the code, the code looks almost exactly
                # the same as the training of the first iteration.
                print(c['c'] + '[' + strftime("%H:%M:%S") + ']    ' + c['g'] +
                      '<Running iteration ' + c['b'] + '2' + c['nc'] + c['g'] + '>' + c['nc'])
                f_s = '.f' if freeze else ''
                ub_s = '.ub' if not balanced else ''
                final_s = f_s + ub_s
                outputname2 = os.path.join(path, 't' + case + final_s + sufix + '.iter2.nii.gz')
                net_name = os.path.join(path, 'deep-longitudinal.final' + final_s + sufix + '.')
                if multi:
                    net = create_cnn3d_det_string(
                        cnn_path=layers,
                        input_shape=(None, names.shape[0], patch_width, patch_width, patch_width),
                        convo_size=conv_size,
                        padding=padding,
                        pool_size=2,
                        dense_size=dense_size,
                        number_filters=n_filters,
                        patience=50,
                        multichannel=True,
                        name=net_name,
                        epochs=epochs
                    )
                else:
                    if not freeze:
                        net = create_cnn3d_longitudinal(
                            convo_blocks=conv_blocks,
                            input_shape=(None, names.shape[0], patch_width, patch_width, patch_width),
                            images=images,
                            convo_size=conv_size,
                            pool_size=pool_size,
                            dense_size=dense_size,
                            number_filters=n_filters,
                            padding=padding,
                            drop=0.5,
                            register=register,
                            defo=defo,
                            patience=50,
                            name=net_name,
                            epochs=epochs
                        )
                    else:
                        net.max_epochs = epochs
                        net.on_epoch_finished[0].name = net_name + 'model_weights.pkl'
                        for layer in net.get_all_layers():
                            if not isinstance(layer, DenseLayer):
                                for param in layer.params:
                                    layer.params[param].discard('trainable')

                try:
                    net.load_params_from(net_name + 'model_weights.pkl')
                except IOError:
                    print(c['c'] + '[' + strftime("%H:%M:%S") + ']    ' +
                          c['g'] + 'Loading the data for ' + c['b'] + 'iteration 2' + c['nc'])
                    roi_paths = ['/'.join(name.rsplit('/')[:-1]) for name in names_lou[0, :]]
                    paths = [os.path.join(dir_name, p) for p in np.concatenate([patients[:i], patients[i + 1:]])]
                    ipr_names = [os.path.join(p_path, sub_folder, sub_name) for p_path in paths] if freeze else None
                    pr_names = [os.path.join(p_path, 't' + case + sufix + '.nii.gz') for p_path in roi_paths]
                    mask_names = [os.path.join(p_path, mask_name) for p_path in paths]
                    wm_names = [os.path.join(p_path, wm_name) for p_path in paths]

                    x_train, y_train = load_lesion_cnn_data(
                        names=names_lou,
                        mask_names=mask_names,
                        defo_names=defo_names_lou,
                        roi_names=wm_names,
                        init_pr_names=ipr_names,
                        pr_names=pr_names,
                        patch_size=patch_size,
                        defo_size=defo_size,
                        random_state=seed,
                        balanced=balanced
                    )

                    train_net(net, x_train, y_train, images)
                    with open(net_name + 'layers.pkl', 'wb') as fnet:
                        pickle.dump(net.layers, fnet, -1)
                try:
                    image_nii = load_nii(outputname2)
                    image2 = image_nii.get_data()
                except IOError:
                    print(c['c'] + '[' + strftime("%H:%M:%S") + ']    ' + c['g'] +
                          '<Creating the probability map ' + c['b'] + '2' + c['nc'] + c['g'] + '>' + c['nc'])
                    image_nii = load_nii(os.path.join(path, options['image_folder'], options['flair_f']))
                    mask_nii = load_nii(os.path.join(path, wm_name))
                    image2 = test_net(
                        net,
                        names_test,
                        mask_nii.get_data(),
                        batch_size,
                        patch_size,
                        defo_size,
                        image_nii.get_data().shape,
                        images,
                        defo_names_test
                    )

                    image_nii.get_data()[:] = image2
                    image_nii.to_filename(outputname2)

                image = image1 * image2
                image_nii.get_data()[:] = image
                outputname_mult = os.path.join(path, 't' + case + final_s + sufix + '.iter1_x_2.nii.gz')
                image_nii.to_filename(outputname_mult)

                image = (image1 * image2) > 0.5
                image_nii.get_data()[:] = image
                outputname_final = os.path.join(path, 't' + case + final_s + sufix + '.final.nii.gz')
                image_nii.to_filename(outputname_final)

            # Finally we compute some metrics that are stored in the metrics file defined above.
            # I plan on replicating Challenge's 2008 evaluation measures here.
            gt = load_nii(os.path.join(path, mask_name)).get_data().astype(dtype=np.bool)
            seg1 = image1 > 0.5
            if not greenspan:
                seg2 = image2 > 0.5
            dsc1 = dsc_seg(gt, seg1)
            if not greenspan:
                dsc2 = dsc_seg(gt, seg2)
            if not greenspan:
                dsc_final = dsc_seg(gt, image)
            else:
                dsc_final = dsc1
            tpf1 = tp_fraction_seg(gt, seg1)
            if not greenspan:
                tpf2 = tp_fraction_seg(gt, seg2)
            if not greenspan:
                tpf_final = tp_fraction_seg(gt, image)
            fpf1 = fp_fraction_seg(gt, seg1)
            if not greenspan:
                fpf2 = fp_fraction_seg(gt, seg2)
            if not greenspan:
                fpf_final = fp_fraction_seg(gt, image)
            print(c['c'] + '[' + strftime("%H:%M:%S") + ']    ' + c['g'] +
                  '<DSC ' + c['c'] + case + c['g'] + ' = ' + c['b'] + str(dsc_final) + c['nc'] + c['g'] + '>' + c['nc'])
            f.write('%s;Test 1; %f;%f;%f\n' % (case, dsc1, tpf1, fpf1))
            if not greenspan:
                f.write('%s;Test 2; %f;%f;%f\n' % (case, dsc2, tpf2, fpf2))
            if not greenspan:
                f.write('%s;Final; %f;%f;%f\n' % (case, dsc_final, tpf_final, fpf_final))
def main():
    # Parse command line options
    parser = argparse.ArgumentParser(description='Test different nets with 3D data.')
    parser.add_argument('-f', '--folder', dest='folder', default='/home/mariano/DATA/Challenge2016/',
                        help="read data from FOLDER")
    parser.add_argument('-v', '--verbose', action='store_true', dest='verbose', default=False)
    parser.add_argument('-p', '--patch-size', action='store', type=int, nargs='+',
                        dest='patch_size', default=(15, 15, 15))
    parser.add_argument('--use-gado', action='store_true', dest='use_gado')
    parser.add_argument('--no-gado', action='store_false', dest='use_gado', default=False)
    parser.add_argument('--gado', action='store', dest='gado', default='GADO_preprocessed.nii.gz')
    parser.add_argument('--use-flair', action='store_true', dest='use_flair')
    parser.add_argument('--no-flair', action='store_false', dest='use_flair', default=True)
    parser.add_argument('--flair', action='store', dest='flair', default='FLAIR_preprocessed.nii.gz')
    parser.add_argument('--use-pd', action='store_true', dest='use_pd')
    parser.add_argument('--no-pd', action='store_false', dest='use_pd', default=True)
    parser.add_argument('--pd', action='store', dest='pd', default='DP_preprocessed.nii.gz')
    parser.add_argument('--use-t2', action='store_true', dest='use_t2')
    parser.add_argument('--no-t2', action='store_false', dest='use_t2', default=True)
    parser.add_argument('--t2', action='store', dest='t2', default='T2_preprocessed.nii.gz')
    parser.add_argument('--use-t1', action='store_true', dest='use_t1')
    parser.add_argument('--no-t1', action='store_false', dest='use_t1', default=True)
    parser.add_argument('--t1', action='store', dest='t1', default='T1_preprocessed.nii.gz')
    parser.add_argument('--mask', action='store', dest='mask', default='Consensus.nii.gz')
    options = vars(parser.parse_args())

    dir_name = options['folder']
    files = sorted(os.listdir(dir_name))
    patients = [f for f in files if os.path.isdir(os.path.join(dir_name, f))]
    n_patients = len(patients)
    for patient, i in zip(patients, range(n_patients)):
        patient_folder = os.path.join(dir_name, patient)
        print('\033[36m[' + strftime("%H:%M:%S") + ']  \033[0mPatient \033[1m' + patient +
              '\033[0m\033[32m (%d/%d)\033[0m' % (i + 1, n_patients))

        mask_nii = load_nii(os.path.join(patient_folder, options['mask']))
        mask_img = mask_nii.get_data()
        lesion_centers = get_mask_voxels(mask_img)

        flair = None
        pd = None
        t1 = None
        t2 = None
        gado = None

        patch_size = tuple(options['patch_size'])
        if options['use_flair']:
            flair = get_patches_from_name(os.path.join(patient_folder, options['flair']),
                                          lesion_centers,
                                          patch_size
                                          )

        if options['use_pd']:
            pd = get_patches_from_name(os.path.join(patient_folder, options['pd']),
                                       lesion_centers,
                                       patch_size
                                       )

        if options['use_t1']:
            t1 = get_patches_from_name(os.path.join(patient_folder, options['t1']),
                                       lesion_centers,
                                       patch_size
                                       )

        if options['use_t2']:
            t2 = get_patches_from_name(os.path.join(patient_folder, options['t2']),
                                       lesion_centers,
                                       patch_size
                                       )

        if options['use_gado']:
            gado = get_patches_from_name(os.path.join(patient_folder, options['gado']),
                                         lesion_centers,
                                         patch_size
                                         )

        patches = np.stack([np.array(data) for data in [flair, pd, t2, gado, t1] if data is not None], axis=1)

        print 'Our final vector\'s size = (' + ','.join([str(num) for num in patches.shape]) + ')'
def get_patches_from_name(filename, centers, patch_size):
    image = load_nii(filename).get_data()
    patches = get_patches(image, centers, patch_size) if len(patch_size) == 3 \
        else [get_patches2_5d(image, centers, patch_size)]
    return patches
Example #39
0
def load_thresholded_images(name, dir_name, threshold=2.0, datatype=np.float32):
    patients = [f for f in sorted(os.listdir(dir_name)) if os.path.isdir(os.path.join(dir_name, f))]
    image_names = [os.path.join(dir_name, patient, name) for patient in patients]
    images = [load_nii(image_name).get_data() for image_name in image_names]
    rois = [image.astype(dtype=datatype) > threshold for image in images]
    return rois
def main():
    c = color_codes()
    patch_size = (15, 15, 15)
    dir_name = '/home/sergivalverde/w/CNN/images/CH16'
    patients = [f for f in sorted(os.listdir(dir_name)) if os.path.isdir(os.path.join(dir_name, f))]
    names = np.stack([name for name in [
        [os.path.join(dir_name, patient, 'FLAIR_preprocessed.nii.gz') for patient in patients],
        [os.path.join(dir_name, patient, 'DP_preprocessed.nii.gz') for patient in patients],
        [os.path.join(dir_name, patient, 'T2_preprocessed.nii.gz') for patient in patients],
        [os.path.join(dir_name, patient, 'T1_preprocessed.nii.gz') for patient in patients]
    ] if name is not None], axis=1)
    seed = np.random.randint(np.iinfo(np.int32).max)
    ''' Here we create an initial net to find conflictive voxels '''
    print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['g'] + '<Running iteration ' + c['b'] + '1>' + c['nc'])
    net_name = '/home/sergivalverde/w/CNN/code/CNN1/miccai_challenge2016/deep-challenge2016.init.'
    net = NeuralNet(
        layers=[
            (InputLayer, dict(name='in', shape=(None, 4, 15, 15, 15))),
            (Conv3DDNNLayer, dict(name='conv1_1', num_filters=32, filter_size=(5, 5, 5), pad='same')),
            (Pool3DDNNLayer, dict(name='avgpool_1', pool_size=2, stride=2, mode='average_inc_pad')),
            (Conv3DDNNLayer, dict(name='conv2_1', num_filters=64, filter_size=(5, 5, 5), pad='same')),
            (Pool3DDNNLayer, dict(name='avgpool_2', pool_size=2, stride=2, mode='average_inc_pad')),
            (DropoutLayer, dict(name='l2drop', p=0.5)),
            (DenseLayer, dict(name='l1', num_units=256)),
            (DenseLayer, dict(name='out', num_units=2, nonlinearity=nonlinearities.softmax)),
        ],
        objective_loss_function=objectives.categorical_crossentropy,
        update=updates.adam,
        update_learning_rate=0.0001,
        on_epoch_finished=[
            SaveWeights(net_name + 'model_weights.pkl', only_best=True, pickle=False),
            SaveTrainingHistory(net_name + 'model_history.pkl'),
            PlotTrainingHistory(net_name + 'training_history.png'),
            EarlyStopping(patience=10)
        ],
        verbose=10,
        max_epochs=50,
        train_split=TrainSplit(eval_size=0.25),
        custom_scores=[('dsc', lambda p, t: 2 * np.sum(p * t[:, 1]) / np.sum((p + t[:, 1])))],
    )

    try:
        net.load_params_from(net_name + 'model_weights.pkl')
    except IOError:
        print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' +
              c['g'] + 'Loading the data for ' + c['b'] + 'iteration 1' + c['nc'])
        # Create the data
        (x, y, _) = load_patches(
            dir_name=dir_name,
            use_flair=True,
            use_pd=True,
            use_t2=True,
            use_t1=True,
            use_gado=False,
            flair_name='FLAIR_preprocessed.nii.gz',
            pd_name='DP_preprocessed.nii.gz',
            t2_name='T2_preprocessed.nii.gz',
            t1_name='T1_preprocessed.nii.gz',
            gado_name=None,
            mask_name='Consensus.nii.gz',
            size=patch_size
        )

        print('-- Permuting the data')
        np.random.seed(seed)
        x_train = np.random.permutation(np.concatenate(x).astype(dtype=np.float32))
        print('-- Permuting the labels')
        np.random.seed(seed)
        y_train = np.random.permutation(np.concatenate(y).astype(dtype=np.int32))
        y_train = y_train[:, y_train.shape[1] / 2 + 1, y_train.shape[2] / 2 + 1, y_train.shape[3] / 2 + 1]
        print('-- Training vector shape = (' + ','.join([str(length) for length in x_train.shape]) + ')')
        print('-- Training labels shape = (' + ','.join([str(length) for length in y_train.shape]) + ')')

        print c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['g'] +\
            'Training (' + c['b'] + 'initial' + c['nc'] + c['g'] + ')' + c['nc']
        # We try to get the last weights to keep improving the net over and over
        net.fit(x_train, y_train)

    ''' Here we get the seeds '''
    print c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['g'] + '<Looking for seeds>' + c['nc']
    for patient in names:
        output_name = os.path.join('/'.join(patient[0].rsplit('/')[:-1]), 'test.iter1.nii.gz')
        try:
            load_nii(output_name)
            print c['c'] + '[' + strftime("%H:%M:%S") + '] ' \
                + c['g'] + '-- Patient ' + patient[0].rsplit('/')[-2] + ' already done' + c['nc']
        except IOError:
            print c['c'] + '[' + strftime("%H:%M:%S") + '] '\
                  + c['g'] + '-- Testing with patient ' + c['b'] + patient[0].rsplit('/')[-2] + c['nc']
            image_nii = load_nii(patient[0])
            image = np.zeros_like(image_nii.get_data())
            for batch, centers in load_patch_batch(patient, 100000, patch_size):
                y_pred = net.predict_proba(batch)
                [x, y, z] = np.stack(centers, axis=1)
                image[x, y, z] = y_pred[:, 1]

            print c['g'] + '-- Saving image ' + c['b'] + output_name + c['nc']
            image_nii.get_data()[:] = image
            image_nii.to_filename(output_name)

    ''' Here we perform the last iteration '''
    print c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['g'] + '<Running iteration ' + c['b'] + '2>' + c['nc']
    net_name = '/home/sergivalverde/w/CNN/code/CNN1/miccai_challenge2016/deep-challenge2016.final.'
    net = NeuralNet(
        layers=[
            (InputLayer, dict(name='in', shape=(None, 4, 15, 15, 15))),
            (Conv3DDNNLayer, dict(name='conv1_1', num_filters=32, filter_size=(5, 5, 5), pad='same')),
            (Pool3DDNNLayer, dict(name='avgpool_1', pool_size=2, stride=2, mode='average_inc_pad')),
            (Conv3DDNNLayer, dict(name='conv2_1', num_filters=64, filter_size=(5, 5, 5), pad='same')),
            (Pool3DDNNLayer, dict(name='avgpool_2', pool_size=2, stride=2, mode='average_inc_pad')),
            (DropoutLayer, dict(name='l2drop', p=0.5)),
            (DenseLayer, dict(name='l1', num_units=256)),
            (DenseLayer, dict(name='out', num_units=2, nonlinearity=nonlinearities.softmax)),
        ],
        objective_loss_function=objectives.categorical_crossentropy,
        update=updates.adam,
        update_learning_rate=0.0001,
        on_epoch_finished=[
            SaveWeights(net_name + 'model_weights.pkl', only_best=True, pickle=False),
            SaveTrainingHistory(net_name + 'model_history.pkl'),
            PlotTrainingHistory(net_name + 'training_history.png'),
        ],
        batch_iterator_train=BatchIterator(batch_size=4096),
        verbose=10,
        max_epochs=2000,
        train_split=TrainSplit(eval_size=0.25),
        custom_scores=[('dsc', lambda p, t: 2 * np.sum(p * t[:, 1]) / np.sum((p + t[:, 1])))],
    )

    try:
        net.load_params_from(net_name + 'model_weights.pkl')
    except IOError:
        pass
    print c['c'] + '[' + strftime("%H:%M:%S") + '] '\
        + c['g'] + 'Loading the data for ' + c['b'] + 'iteration 2' + c['nc']
    (x, y, names) = load_patches(
        dir_name='/home/sergivalverde/w/CNN/images/CH16',
        use_flair=True,
        use_pd=True,
        use_t2=True,
        use_t1=True,
        use_gado=False,
        flair_name='FLAIR_preprocessed.nii.gz',
        pd_name='DP_preprocessed.nii.gz',
        t2_name='T2_preprocessed.nii.gz',
        gado_name=None,
        t1_name='T1_preprocessed.nii.gz',
        mask_name='Consensus.nii.gz',
        size=patch_size,
        roi_name='test.iter1.nii.gz'
    )

    print '-- Permuting the data'
    np.random.seed(seed)
    x_train = np.random.permutation(np.concatenate(x).astype(dtype=np.float32))
    print '-- Permuting the labels'
    np.random.seed(seed)
    y_train = np.random.permutation(np.concatenate(y).astype(dtype=np.int32))
    y_train = y_train[:, y_train.shape[1] / 2 + 1, y_train.shape[2] / 2 + 1, y_train.shape[3] / 2 + 1]
    print '-- Training vector shape = (' + ','.join([str(length) for length in x_train.shape]) + ')'
    print '-- Training labels shape = (' + ','.join([str(length) for length in y_train.shape]) + ')'
    print c['c'] + '[' + strftime("%H:%M:%S") + '] '\
        + c['g'] + 'Training (' + c['b'] + 'final' + c['nc'] + c['g'] + ')' + c['nc']
    net.fit(x_train, y_train)
Example #41
0
def load_thresholded_norm_images_by_name(image_names, mask_names=None, threshold=2.0):
    masks = [load_nii(mask).get_data() for mask in mask_names] if mask_names else None
    return threshold_image_list(norm_image_generator(image_names), threshold, masks)
Example #42
0
def norm_image_generator(image_names):
    for name in image_names:
        im = load_nii(name).get_data()
        yield (im - im[np.nonzero(im)].mean()) / im[np.nonzero(im)].std()
def main():
    @contextlib.contextmanager
    def dummy_file():
        yield None

    # Parse command line options
    parser = argparse.ArgumentParser(description='Test different nets with 3D data.')
    group_in = parser.add_mutually_exclusive_group()
    group_out = parser.add_mutually_exclusive_group()

    folder_help = 'Folder with the files to evaluate. Remember to include a init_names.py for the evaluation pairs.'
    files_help = 'Pair of files to be compared. The first is the GT and the second the file you wnat to evaluate.'

    group_in.add_argument('-f', '--folder', help=folder_help)
    group_in.add_argument('--files', nargs=2, help=files_help)

    general_help = 'General evaluation. Based on the 2008 MS challenge''s measures. ' \
                   'Volume and detection absolute measures are also included for all the images.'
    sizes_help = 'Evaluation based on region sizes. ' \
                 'Includes TPF, FPF and DSC for detection and DSC for segmentation. ' \
                 'The size of TP is determined by the GT size, while the FP size is determined by the FP lesion.'

    group_out.add_argument('-g', '--general', action='store_true', help=general_help)
    group_out.add_argument('-s', '--sizes', dest='sizes', nargs='+', type=int, default=[3, 11, 51], help=sizes_help)

    args = parser.parse_args()

    if args.folder:
        folder_name = args.folder
        sys.path = sys.path + [folder_name]
        from init_names import get_names_from_folder
        gt_names, all_names = get_names_from_folder(folder_name)

    elif args.files:
        folder_name = os.getcwd()
        gt_names = [args.files[0]]
        all_names = [[args.files[1]]]
    if args.general:
        print('\033[32;1mGeneral\033[0m\033[32m evaluation\033[0m')
        results_name = 'results.g.csv'
    else:
        print('\033[32;1mLesion size\033[0m\033[32m evaluation\033[0m')
        sizes = args.sizes
        results_name = 'results.s%s.csv' % '.'.join(['%d' % s for s in sizes])
    with open(os.path.join(folder_name, results_name), 'w') if args.folder else dummy_file() as f:
        for gt_name, names in zip(gt_names, all_names):
            print('\033[32mEvaluating with ground truth \033[32;1m' + gt_name + '\033[0m')

            gt_nii = load_nii(gt_name)
            gt = gt_nii.get_data()
            spacing = dict(gt_nii.header.items())['pixdim'][1:4]

            for name in names:
                name = ''.join(name)
                print('\033[32m-- vs \033[32;1m' + name + '\033[0m')
                lesion = load_nii(name).get_data()

                if args.general:
                    dist = average_surface_distance(gt, lesion, spacing)
                    tpfv = tp_fraction_seg(gt, lesion)
                    fpfv = fp_fraction_seg(gt, lesion)
                    dscv = dsc_seg(gt, lesion)
                    tpfl = tp_fraction_det(gt, lesion)
                    fpfl = fp_fraction_det(gt, lesion)
                    dscl = dsc_det(gt, lesion)
                    tp = true_positive_det(lesion, gt)
                    gt_d = num_regions(gt)
                    lesion_s = num_voxels(lesion)
                    gt_s = num_voxels(gt)
                    pdsc = probabilistic_dsc_seg(gt, lesion)
                    if f:
                        measures = (gt_name, name, dist, tpfv, fpfv, dscv, tpfl, fpfl, dscl, tp, gt_d, lesion_s, gt_s)
                        f.write('%s;%s;%f;%f;%f;%f;%f;%f;%f;%d;%d;%d;%d\n' % measures)
                    else:
                        measures = (dist, tpfv, fpfv, dscv, tpfl, fpfl, dscl, tp, gt_d, lesion_s, gt_s, pdsc)
                        print('SurfDist TPFV FPFV DSCV TPFL FPFL DSCL TPL GTL Voxels GTV PrDSC')
                        print('%f %f %f %f %f %f %f %d %d %d %d %f' % measures)
                else:
                    tpf, fpf, dscd, dscs = analysis_by_sizes(gt, lesion, sizes)
                    names = '%s;%s;' % (gt_name, name)
                    measures = ';'.join(['%f;%f;%f;%f' % (tpf_i, fpf_i, dscd_i, dscs_i)
                                         for tpf_i, fpf_i, dscd_i, dscs_i in zip(tpf, fpf, dscd, dscs)])
                    if f:
                        f.write(names + measures + '\n')
                    else:
                        intervals = ['\t\t[%d-%d)\t\t|' % (mins, maxs) for mins, maxs in zip(sizes[:-1], sizes[1:])]
                        intervals = ''.join(intervals) + '\t\t[%d-inf)\t|' % sizes[-1]
                        measures_s = 'TPF\tFPF\tDSCd\tDSCs\t|' * len(sizes)
                        measures = ''.join(['%.2f\t%.2f\t%.2f\t%.2f\t|' % (tpf_i, fpf_i, dscd_i, dscs_i)
                                            for tpf_i, fpf_i, dscd_i, dscs_i in zip(tpf, fpf, dscd, dscs)])
                        print(intervals)
                        print(measures_s)
                        print(measures)
Example #44
0
def test_all_nets(
        path,
        names_test,
        defo_names_test,
        roi_name,
        nets,
        case,
        batch_size,
        patch_sizes,
        defo_sizes,
        dense_sizes,
        n_filters,
        sufixes,
        iter_name,
        train_case=False
):
    c = color_codes()
    net_combos = itertools.product(zip(patch_sizes, defo_sizes), n_filters, dense_sizes)

    image_nii = load_nii(names_test[0])
    mask_nii = load_nii(roi_name)

    images = list()

    for net, sufix, ((patch_size, defo_size), _, _) in zip(nets, sufixes, net_combos):
        outputname = os.path.join(path, 't' + case + sufix + iter_name + '.nii.gz')
        # We save time by checking if we already tested that patient.
        try:
            image_nii = load_nii(outputname)
            image = image_nii.get_data()
            if train_case:
                print(c['c'] + '[' + strftime("%H:%M:%S") + ']      ' +
                      c['g'] + '     Patient ' + names_test[0].rsplit('/')[-4] + ' already done' + c['nc'])
        except IOError:
            if train_case:
                print(c['c'] + '[' + strftime("%H:%M:%S") + ']      ' +
                      c['g'] + '     Testing with patient ' + c['b'] + names_test[0].rsplit('/')[-4] + c['nc'])
            else:
                print(c['c'] + '[' + strftime("%H:%M:%S") + ']      ' + c['g'] +
                      '<Creating the probability map for net: ' +
                      c['b'] + sufix + c['nc'] + c['g'] + '>' + c['nc'])
            image = test_net(
                net=net,
                names=names_test,
                mask=mask_nii.get_data(),
                batch_size=batch_size,
                patch_size=patch_size,
                defo_size=defo_size,
                image_size=image_nii.get_data().shape,
                images=['flair', 'pd', 't2'],
                d_names=defo_names_test
            )

            if train_case:
                print(c['g'] + '                     -- Saving image ' + c['b'] + outputname + c['nc'])
            image_nii.get_data()[:] = image
            image_nii.to_filename(outputname)

        images.append(image)

    return images
def main():

    parser = argparse.ArgumentParser(description='Test different nets with 3D data.')
    parser.add_argument('-f', '--folder', dest='dir_name', default='/home/sergivalverde/w/CNN/images/CH16')
    parser.add_argument('--flair', action='store', dest='flair', default='FLAIR_preprocessed.nii.gz')
    parser.add_argument('--pd', action='store', dest='pd', default='DP_preprocessed.nii.gz')
    parser.add_argument('--t2', action='store', dest='t2', default='T2_preprocessed.nii.gz')
    parser.add_argument('--t1', action='store', dest='t1', default='T1_preprocessed.nii.gz')
    parser.add_argument('--mask', action='store', dest='mask', default='Consensus.nii.gz')
    parser.add_argument('--old', action='store_true', dest='old', default=False)
    options = vars(parser.parse_args())

    c = color_codes()
    patch_size = (15, 15, 15)
    batch_size = 100000
    # Create the data
    patients = [f for f in sorted(os.listdir(options['dir_name']))
                if os.path.isdir(os.path.join(options['dir_name'], f))]
    flair_names = [os.path.join(options['dir_name'], patient, options['flair']) for patient in patients]
    pd_names = [os.path.join(options['dir_name'], patient, options['pd']) for patient in patients]
    t2_names = [os.path.join(options['dir_name'], patient, options['t2']) for patient in patients]
    t1_names = [os.path.join(options['dir_name'], patient, options['t1']) for patient in patients]
    names = np.stack([name for name in [flair_names, pd_names, t2_names, t1_names]])
    seed = np.random.randint(np.iinfo(np.int32).max)

    print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' + 'Starting leave-one-out' + c['nc'])

    for i in range(0, 15):
        case = names[0, i].rsplit('/')[-2]
        path = '/'.join(names[0, i].rsplit('/')[:-1])
        print(c['c'] + '[' + strftime("%H:%M:%S") + ']  ' + c['nc'] + 'Patient ' + c['b'] + case + c['nc'])
        print(c['c'] + '[' + strftime("%H:%M:%S") + ']    ' + c['g'] +
              '<Running iteration ' + c['b'] + '1' + c['nc'] + c['g'] + '>' + c['nc'])
        net_name = os.path.join(path, 'deep-challenge2016.init.')
        net = NeuralNet(
            layers=[
                (InputLayer, dict(name='in', shape=(None, 4, 15, 15, 15))),
                (Conv3DDNNLayer, dict(name='conv1_1', num_filters=32, filter_size=(5, 5, 5), pad='same')),
                (Pool3DDNNLayer, dict(name='avgpool_1', pool_size=2, stride=2, mode='average_inc_pad')),
                (Conv3DDNNLayer, dict(name='conv2_1', num_filters=64, filter_size=(5, 5, 5), pad='same')),
                (Pool3DDNNLayer, dict(name='avgpool_2', pool_size=2, stride=2, mode='average_inc_pad')),
                (DropoutLayer, dict(name='l2drop', p=0.5)),
                (DenseLayer, dict(name='l1', num_units=256)),
                (DenseLayer, dict(name='out', num_units=2, nonlinearity=nonlinearities.softmax)),
            ],
            objective_loss_function=objectives.categorical_crossentropy,
            update=updates.adam,
            update_learning_rate=0.0001,
            on_epoch_finished=[
                SaveWeights(net_name + 'model_weights.pkl', only_best=True, pickle=False),
                EarlyStopping(patience=10)
            ],
            verbose=10,
            max_epochs=50,
            train_split=TrainSplit(eval_size=0.25),
            custom_scores=[('dsc', lambda p, t: 2 * np.sum(p * t[:, 1]) / np.sum((p + t[:, 1])))],
        )
        flair_name = os.path.join(path, options['flair'])
        pd_name = os.path.join(path, options['pd'])
        t2_name = os.path.join(path, options['t2'])
        t1_name = os.path.join(path, options['t1'])
        names_test = np.array([flair_name, pd_name, t2_name, t1_name])
        outputname1 = os.path.join(path, 'test' + str(i) + '.iter1.nii.gz')
        try:
            net.load_params_from(net_name + 'model_weights.pkl')
        except IOError:
            print(c['c'] + '[' + strftime("%H:%M:%S") + ']    ' +
                  c['g'] + 'Loading the data for ' + c['b'] + 'iteration 1' + c['nc'])
            names_lou = np.concatenate([names[:, :i], names[:, i + 1:]], axis=1)
            paths = ['/'.join(name.rsplit('/')[:-1]) for name in names_lou[0, :]]
            mask_names = [os.path.join(p_path, 'Consensus.nii.gz') for p_path in paths]

            x_train, y_train = load_iter1_data(
                names_lou=names_lou,
                mask_names=mask_names,
                patch_size=patch_size,
                seed=seed
            )

            print('                Training vector shape ='
                  ' (' + ','.join([str(length) for length in x_train.shape]) + ')')
            print('                Training labels shape ='
                  ' (' + ','.join([str(length) for length in y_train.shape]) + ')')

            print(c['c'] + '[' + strftime("%H:%M:%S") + ']    ' + c['g'] +
                  'Training (' + c['b'] + 'initial' + c['nc'] + c['g'] + ')' + c['nc'])
            # We try to get the last weights to keep improving the net over and over
            net.fit(x_train, y_train)

        try:
            image_nii = load_nii(outputname1)
            image1 = image_nii.get_data()
        except IOError:
            print(c['c'] + '[' + strftime("%H:%M:%S") + ']    ' + c['g'] +
                  '<Creating the probability map ' + c['b'] + '1' + c['nc'] + c['g'] + '>' + c['nc'])
            flair_name = os.path.join(path, options['flair'])
            image_nii = load_nii(flair_name)
            image1 = np.zeros_like(image_nii.get_data())
            print('              0% of data tested', end='\r')
            sys.stdout.flush()
            for batch, centers, percent in load_patch_batch_percent(names_test, batch_size, patch_size):
                y_pred = net.predict_proba(batch)
                print('              %f%% of data tested' % percent, end='\r')
                sys.stdout.flush()
                [x, y, z] = np.stack(centers, axis=1)
                image1[x, y, z] = y_pred[:, 1]

            image_nii.get_data()[:] = image1
            image_nii.to_filename(outputname1)

        ''' Here we get the seeds '''
        print(c['c'] + '[' + strftime("%H:%M:%S") + ']    ' +
              c['g'] + '<Looking for seeds for the final iteration>' + c['nc'])
        for patient in np.rollaxis(np.concatenate([names[:, :i], names[:, i+1:]], axis=1), 1):
            outputname = os.path.join('/'.join(patient[0].rsplit('/')[:-1]), 'test' + str(i) + '.iter1.nii.gz')
            try:
                load_nii(outputname)
                print(c['c'] + '[' + strftime("%H:%M:%S") + ']    ' +
                      c['g'] + '     Patient ' + patient[0].rsplit('/')[-2] + ' already done' + c['nc'])
            except IOError:
                print(c['c'] + '[' + strftime("%H:%M:%S") + ']    ' +
                      c['g'] + '     Testing with patient ' + c['b'] + patient[0].rsplit('/')[-2] + c['nc'])
                image_nii = load_nii(patient[0])
                image = np.zeros_like(image_nii.get_data())
                print('    0% of data tested', end='\r')
                for batch, centers, percent in load_patch_batch_percent(patient, 100000, patch_size):
                    y_pred = net.predict_proba(batch)
                    print('    %f%% of data tested' % percent, end='\r')
                    [x, y, z] = np.stack(centers, axis=1)
                    image[x, y, z] = y_pred[:, 1]

                print(c['g'] + '                   -- Saving image ' + c['b'] + outputname + c['nc'])
                image_nii.get_data()[:] = image
                image_nii.to_filename(outputname)

        ''' Here we perform the last iteration '''
        print(c['c'] + '[' + strftime("%H:%M:%S") + ']    ' + c['g'] +
              '<Running iteration ' + c['b'] + '2' + c['nc'] + c['g'] + '>' + c['nc'])
        outputname2 = os.path.join(path, 'test' + str(i) + '.old.iter2.nii.gz') if options['old'] \
            else os.path.join(path, 'test' + str(i) + '.new.iter2.nii.gz')
        net_name = os.path.join(path, 'deep-challenge2016.final.old.') if options['old'] \
            else os.path.join(path, 'deep-challenge2016.final.new.')
        net = NeuralNet(
            layers=[
                (InputLayer, dict(name='in', shape=(None, 4, 15, 15, 15))),
                (Conv3DDNNLayer, dict(name='conv1_1', num_filters=32, filter_size=(5, 5, 5), pad='same')),
                (Pool3DDNNLayer, dict(name='avgpool_1', pool_size=2, stride=2, mode='average_inc_pad')),
                (Conv3DDNNLayer, dict(name='conv2_1', num_filters=64, filter_size=(5, 5, 5), pad='same')),
                (Pool3DDNNLayer, dict(name='avgpool_2', pool_size=2, stride=2, mode='average_inc_pad')),
                (DropoutLayer, dict(name='l2drop', p=0.5)),
                (DenseLayer, dict(name='l1', num_units=256)),
                (DenseLayer, dict(name='out', num_units=2, nonlinearity=nonlinearities.softmax)),
            ],
            objective_loss_function=objectives.categorical_crossentropy,
            update=updates.adam,
            update_learning_rate=0.0001,
            on_epoch_finished=[
                SaveWeights(net_name + 'model_weights.pkl', only_best=True, pickle=False),
                EarlyStopping(patience=50)
            ],
            batch_iterator_train=BatchIterator(batch_size=4096),
            verbose=10,
            max_epochs=2000,
            train_split=TrainSplit(eval_size=0.25),
            custom_scores=[('dsc', lambda p, t: 2 * np.sum(p * t[:, 1]) / np.sum((p + t[:, 1])))],
        )

        try:
            net.load_params_from(net_name + 'model_weights.pkl')
        except IOError:
            print(c['c'] + '[' + strftime("%H:%M:%S") + ']    ' +
                  c['g'] + 'Loading the data for ' + c['b'] + 'iteration 2' + c['nc'])
            names_lou = np.concatenate([names[:, :i], names[:, i + 1:]], axis=1)
            paths = ['/'.join(name.rsplit('/')[:-1]) for name in names_lou[0, :]]
            roi_names = [os.path.join(p_path, 'test' + str(i) + '.iter1.nii.gz') for p_path in paths]
            mask_names = [os.path.join(p_path, 'Consensus.nii.gz') for p_path in paths]

            x_train, y_train = load_iter2_data(
                names_lou=names_lou,
                mask_names=mask_names,
                roi_names=roi_names,
                patch_size=patch_size,
                seed=seed,
                old=options['old']
            )

            print('              Training vector shape = (' + ','.join([str(length) for length in x_train.shape]) + ')')
            print('              Training labels shape = (' + ','.join([str(length) for length in y_train.shape]) + ')')
            print(c['c'] + '[' + strftime("%H:%M:%S") + ']    ' +
                  c['g'] + 'Training (' + c['b'] + 'final' + c['nc'] + c['g'] + ')' + c['nc'])
            net.fit(x_train, y_train)
        try:
            image_nii = load_nii(outputname2)
            image2 = image_nii.get_data()
        except IOError:
            print(c['c'] + '[' + strftime("%H:%M:%S") + ']    ' + c['g'] +
                  '<Creating the probability map ' + c['b'] + '2' + c['nc'] + c['g'] + '>' + c['nc'])
            image_nii = load_nii(flair_name)
            image2 = np.zeros_like(image_nii.get_data())
            print('              0% of data tested', end='\r')
            sys.stdout.flush()
            for batch, centers, percent in load_patch_batch_percent(names_test, batch_size, patch_size):
                y_pred = net.predict_proba(batch)
                print('              %f%% of data tested' % percent, end='\r')
                sys.stdout.flush()
                [x, y, z] = np.stack(centers, axis=1)
                image2[x, y, z] = y_pred[:, 1]

            image_nii.get_data()[:] = image2
            image_nii.to_filename(outputname2)

        image = (image1 * image2) > 0.5
        seg = np.roll(np.roll(image, 1, axis=0), 1, axis=1)
        image_nii.get_data()[:] = seg
        outputname_final = os.path.join(path, 'test' + str(i) + '.old.final.nii.gz') if options['old'] \
            else os.path.join(path, 'test' + str(i) + '.new.final.nii.gz')
        image_nii.to_filename(outputname_final)

        gt = load_nii(os.path.join(path, 'Consensus.nii.gz')).get_data().astype(dtype=np.bool)
        dsc = np.sum(2.0 * np.logical_and(gt, seg)) / (np.sum(gt) + np.sum(seg))
        print(c['c'] + '[' + strftime("%H:%M:%S") + ']    ' + c['g'] +
              '<DSC value for ' + c['c'] + case + c['g'] + ' = ' + c['b'] + str(dsc) + c['nc'] + c['g'] + '>' + c['nc'])
Example #46
0
def load_thresholded_images_by_name(image_names, threshold=2.0):
    images = [load_nii(image_name).get_data() for image_name in image_names]
    return threshold_image_list(images, threshold)
def patches_network_segmentation(options, mode):
    c = color_codes()
    image_sufix = get_sufix(
        options['use_flair'],
        options['use_pd'],
        options['use_t2'],
        options['use_gado'],
        options['use_t1']
    )
    size_sufix = '.'.join([str(length) for length in tuple(options['patch_size'])])
    sufixes = image_sufix + '.' + size_sufix
    mode_write = mode + '.mc' if options['multi_channel'] else mode + '.sc'

    print(c['g'] + 'Loading the data for the patch-based ' + c['b'] + mode + c['nc'])
    # Create the data
    (x, y, names) = load_patches(
        dir_name=options['folder'],
        use_flair=options['use_flair'],
        use_pd=options['use_pd'],
        use_t2=options['use_t2'],
        use_gado=options['use_gado'],
        use_t1=options['use_t1'],
        flair_name=options['flair'],
        pd_name=options['pd'],
        t2_name=options['t2'],
        gado_name=options['gado'],
        t1_name=options['t1'],
        mask_name=options['mask'],
        size=tuple(options['patch_size'])
    )

    print(c['g'] + 'Starting leave-one-out for the patch-based ' + c['b'] + mode + c['nc'])

    n_channels = x[0].shape[1]
    channels = range(0, n_channels)
    patch_size = tuple(options['patch_size'])

    for x_train, y_train, i in leave_one_out(x, y):
        print('Running patient ' + c['c'] + names[0, i].rsplit('/')[-2] + c['nc'])
        seed = np.random.randint(np.iinfo(np.int32).max)
        print('-- Permuting the data')
        np.random.seed(seed)
        x_train = np.random.permutation(np.concatenate(x_train).astype(dtype=np.float32))
        print('-- Permuting the labels')
        np.random.seed(seed)
        y_train = np.random.permutation(np.concatenate(y_train).astype(dtype=np.int32))
        y_train = y_train.reshape([y_train.shape[0], -1])
        print('-- Training vector shape = (' + ','.join([str(length) for length in x_train.shape]) + ')')
        print('-- Training labels shape = (' + ','.join([str(length) for length in y_train.shape]) + ')')

        print(c['g'] + '-- Creating the ' + c['b'] + 'patch-based ' + c['b'] + mode + c['nc'])

        # Train the net and save it
        net_name = os.path.join(
            os.path.split(names[0, i])[0], 'patches_' + mode + '.c' + str(i) + '.' + sufixes
        )
        net_types = {
            'unet': create_unet3d_seg_string,
            'unet-short': create_unet3d_shortcuts_seg_string
        }
        net = net_types[mode](
            ''.join(options['layers']),
            x_train.shape,
            options['convo_size'],
            options['pool_size'],
            options['number_filters'],
            options['patience'],
            options['multi_channel'],
            net_name
        )

        print(c['g'] + '-- Training the ' + c['b'] + 'patch-based ' + c['b'] + mode + c['nc'])
        # We try to get the last weights to keep improving the net over and over
        try:
            net.load_params_from(net_name + 'model_weights.pkl')
        except IOError:
            pass

        if options['multi_channel']:
            net.fit(x_train, y_train)
        else:
            x_train = np.split(x_train, n_channels, axis=1)
            inputs = dict(
                [('\033[30minput_%d\033[0m' % ch, channel) for (ch, channel) in zip(channels, x_train)])
            net.fit(inputs, y_train)

        print(c['g'] + '-- Creating the test probability maps' + c['nc'])
        image_nii = load_nii(names[0, i])
        image = np.zeros_like(image_nii.get_data())
        for batch, centers, _ in load_patch_batch_percent(names[:, i], options['batch_size'], patch_size):
            if options['multi_channel']:
                y_pred = net.predict_proba(batch)
            else:
                batch = np.split(batch, n_channels, axis=1)
                inputs = dict(
                    [('\033[30minput_%d\033[0m' % ch, channel) for (ch, channel) in zip(channels, batch)])
                y_pred = net.predict_proba(inputs)

            image += sum_patches_to_image(y_pred, centers, image)

        image_nii.get_data()[:] = image
        name = mode_write + '.c' + str(i) + '.' + sufixes + '.nii.gz'
        path = '/'.join(names[0, i].rsplit('/')[:-1])
        image_nii.to_filename(os.path.join(path, name))
Example #48
0
def load_masks(mask_names):
    for image_name in mask_names:
        yield load_nii(image_name).get_data().astype(dtype=np.bool)
Example #49
0
def norm_defo_generator(image_names):
    for name in image_names:
        im = load_nii(name).get_data()
        yield im / np.linalg.norm(im, axis=4).std()
def main():
    # Parse command line options
    parser = argparse.ArgumentParser(description='Test different nets with 3D data.')
    parser.add_argument('--flair', action='store', dest='flair', default='FLAIR_preprocessed.nii.gz')
    parser.add_argument('--pd', action='store', dest='pd', default='DP_preprocessed.nii.gz')
    parser.add_argument('--t2', action='store', dest='t2', default='T2_preprocessed.nii.gz')
    parser.add_argument('--t1', action='store', dest='t1', default='T1_preprocessed.nii.gz')
    parser.add_argument('--output', action='store', dest='output', default='output.nii.gz')
    parser.add_argument('--no-docker', action='store_false', dest='docker', default=True)

    c = color_codes()
    patch_size = (15, 15, 15)
    options = vars(parser.parse_args())
    batch_size = 10000
    min_size = 30

    print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['g'] +
          '<Loading the net ' + c['b'] + '1' + c['nc'] + c['g'] + '>' + c['nc'])
    net_name = '/usr/local/nets/deep-challenge2016.init.model_weights.pkl' if options['docker'] \
        else './deep-challenge2016.init.model_weights.pkl'
    net = NeuralNet(
        layers=[
            (InputLayer, dict(name='in', shape=(None, 4, 15, 15, 15))),
            (Conv3DDNNLayer, dict(name='conv1_1', num_filters=32, filter_size=(5, 5, 5), pad='same')),
            (Pool3DDNNLayer, dict(name='avgpool_1', pool_size=2, stride=2, mode='average_inc_pad')),
            (Conv3DDNNLayer, dict(name='conv2_1', num_filters=64, filter_size=(5, 5, 5), pad='same')),
            (Pool3DDNNLayer, dict(name='avgpool_2', pool_size=2, stride=2, mode='average_inc_pad')),
            (DropoutLayer, dict(name='l2drop', p=0.5)),
            (DenseLayer, dict(name='l1', num_units=256)),
            (DenseLayer, dict(name='out', num_units=2, nonlinearity=nonlinearities.softmax)),
        ],
        objective_loss_function=objectives.categorical_crossentropy,
        update=updates.adam,
        update_learning_rate=0.0001,
        verbose=10,
        max_epochs=50,
        train_split=TrainSplit(eval_size=0.25),
        custom_scores=[('dsc', lambda p, t: 2 * np.sum(p * t[:, 1]) / np.sum((p + t[:, 1])))],
    )
    net.load_params_from(net_name)

    print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['g'] +
          '<Creating the probability map ' + c['b'] + '1' + c['nc'] + c['g'] + '>' + c['nc'])
    names = np.array([options['flair'], options['pd'], options['t2'], options['t1']])
    image_nii = load_nii(options['flair'])
    image1 = np.zeros_like(image_nii.get_data())
    print('0% of data tested', end='\r')
    sys.stdout.flush()
    for batch, centers, percent in load_patch_batch_percent(names, batch_size, patch_size):
        y_pred = net.predict_proba(batch)
        print('%f%% of data tested' % percent, end='\r')
        sys.stdout.flush()
        [x, y, z] = np.stack(centers, axis=1)
        image1[x, y, z] = y_pred[:, 1]

    print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['g'] +
          '<Loading the net ' + c['b'] + '2' + c['nc'] + c['g'] + '>' + c['nc'])
    net_name = '/usr/local/nets/deep-challenge2016.final.model_weights.pkl' if options['docker'] \
        else './deep-challenge2016.final.model_weights.pkl'
    net = NeuralNet(
        layers=[
            (InputLayer, dict(name='in', shape=(None, 4, 15, 15, 15))),
            (Conv3DDNNLayer, dict(name='conv1_1', num_filters=32, filter_size=(5, 5, 5), pad='same')),
            (Pool3DDNNLayer, dict(name='avgpool_1', pool_size=2, stride=2, mode='average_inc_pad')),
            (Conv3DDNNLayer, dict(name='conv2_1', num_filters=64, filter_size=(5, 5, 5), pad='same')),
            (Pool3DDNNLayer, dict(name='avgpool_2', pool_size=2, stride=2, mode='average_inc_pad')),
            (DropoutLayer, dict(name='l2drop', p=0.5)),
            (DenseLayer, dict(name='l1', num_units=256)),
            (DenseLayer, dict(name='out', num_units=2, nonlinearity=nonlinearities.softmax)),
        ],
        objective_loss_function=objectives.categorical_crossentropy,
        update=updates.adam,
        update_learning_rate=0.0001,
        batch_iterator_train=BatchIterator(batch_size=4096),
        verbose=10,
        max_epochs=2000,
        train_split=TrainSplit(eval_size=0.25),
        custom_scores=[('dsc', lambda t, p: 2 * np.sum(t * p[:, 1]) / np.sum((t + p[:, 1])))],
    )
    net.load_params_from(net_name)

    print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['g'] +
          '<Creating the probability map ' + c['b'] + '2' + c['nc'] + c['g'] + '>' + c['nc'])
    image2 = np.zeros_like(image_nii.get_data())
    print('0% of data tested', end='\r')
    sys.stdout.flush()
    for batch, centers, percent in load_patch_batch_percent(names, batch_size, patch_size):
        y_pred = net.predict_proba(batch)
        print('%f%% of data tested' % percent, end='\r')
        sys.stdout.flush()
        [x, y, z] = np.stack(centers, axis=1)
        image2[x, y, z] = y_pred[:, 1]

    print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['g'] +
          '<Saving to file ' + c['b'] + options['output'] + c['nc'] + c['g'] + '>' + c['nc'])
    image = (image1 * image2) > 0.5

    # filter candidates < min_size
    labels, num_labels = ndimage.label(image)
    lesion_list = np.unique(labels)
    num_elements_by_lesion = ndimage.labeled_comprehension(image, labels, lesion_list, np.sum, float, 0)
    filt_min_size = num_elements_by_lesion >= min_size
    lesion_list = lesion_list[filt_min_size]
    image = reduce(np.logical_or, map(lambda lab: lab == labels, lesion_list))

    image_nii.get_data()[:] = np.roll(np.roll(image, 1, axis=0), 1, axis=1)
    path = '/'.join(options['t1'].rsplit('/')[:-1])
    outputname = options['output'].rsplit('/')[-1]
    image_nii.to_filename(os.path.join(path, outputname))

    if not options['docker']:
        path = '/'.join(options['output'].rsplit('/')[:-1])
        case = options['output'].rsplit('/')[-1]
        gt = load_nii(os.path.join(path, 'Consensus.nii.gz')).get_data().astype(dtype=np.bool)
        dsc = np.sum(2.0 * np.logical_and(gt, image)) / (np.sum(gt) + np.sum(image))
        print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['g'] +
              '<DSC value for ' + c['c'] + case + c['g'] + ' = ' + c['b'] + str(dsc) + c['nc'] + c['g'] + '>' + c['nc'])