コード例 #1
0
def estimate_t2_cropping(image_dir, result_dir=None, dilation=5):
    """This function takes all the hippocampus images (with 2 channels) within the specified directory, and estimates
    the cropping dimensions around the hippocampus in the t2 channel.
    It returns the mean and sts deviation for the minimal and maximal croppings, proportional to image size.
    :param image_dir: path of the folder containing hippocampus images
    :param result_dir: if not None, path of the folder where to write the computed statistics.
    :param dilation: dilation coefficient used to extract full brain mask. Default is 5.
    :returns t2_cropping_stats: numpy vector of size 4 [mean min crop, std min crop, mean max crop, std max crop]
    """

    # create result dir
    if result_dir is not None:
        if not os.path.exists(result_dir):
            os.mkdir(result_dir)

    # loop through images
    list_image_paths = utils.list_images_in_folder(image_dir)
    max_cropping_proportions = np.zeros(len(list_image_paths))
    min_cropping_proportions = np.zeros(len(list_image_paths))
    for im_idx, image_path in enumerate(list_image_paths):
        utils.print_loop_info(im_idx, len(list_image_paths), 10)

        # load t2 channel
        im = utils.load_volume(image_path)
        t2 = im[..., 1]
        shape = t2.shape
        hdim = int(np.argmax(shape))

        # mask image
        _, mask = edit_volumes.mask_volume(t2,
                                           threshold=0,
                                           dilate=dilation,
                                           return_mask=True)

        # find cropping indices
        indices = np.nonzero(mask)[hdim]
        min_cropping_proportions[im_idx] = np.maximum(
            np.min(indices) + int(dilation / 2), 0) / shape[hdim]
        max_cropping_proportions[im_idx] = np.minimum(
            np.max(indices) - int(dilation / 2), shape[hdim]) / shape[hdim]

    # compute and save stats
    t2_cropping_stats = np.array([
        np.mean(min_cropping_proportions),
        np.std(min_cropping_proportions),
        np.mean(max_cropping_proportions),
        np.std(max_cropping_proportions)
    ])

    # save stats if necessary
    if result_dir is not None:
        np.save(os.path.join(result_dir, 't2_cropping_stats.npy'),
                t2_cropping_stats)

    return t2_cropping_stats
コード例 #2
0
def dice_evaluation(gt_dir,
                    seg_dir,
                    path_label_list,
                    path_result_dice_array):
    """Computes Dice scores for all labels contained in path_segmentation_label_list. Files in gt_folder and seg_folder
    are matched by sorting order.
    :param gt_dir: folder containing ground truth files.
    :param seg_dir: folder containing evaluation files.
    :param path_label_list: path of numpy vector containing all labels to compute the Dice for.
    :param path_result_dice_array: path where the resulting Dice will be writen as numpy array.
    :return: numpy array containing all dice scores (labels in rows, subjects in columns).
    """

    # create result folder
    if not os.path.exists(os.path.dirname(path_result_dice_array)):
        os.mkdir(os.path.dirname(path_result_dice_array))

    # get list label maps to compare
    path_gt_labels = utils.list_images_in_folder(gt_dir)
    path_segs = utils.list_images_in_folder(seg_dir)
    if len(path_gt_labels) != len(path_segs):
        print('different number of files in data folders, had {} and {}'.format(len(path_gt_labels), len(path_segs)))

    # load labels list
    label_list, neutral_labels = utils.get_list_labels(label_list=path_label_list, FS_sort=True, labels_dir=gt_dir)
    label_list_sorted = np.sort(label_list)

    # initialise result matrix
    dice_coefs = np.zeros((label_list.shape[0], len(path_segs)))

    # loop over segmentations
    for idx, (path_gt, path_seg) in enumerate(zip(path_gt_labels, path_segs)):
        utils.print_loop_info(idx, len(path_segs), 10)

        # load gt labels and segmentation
        gt_labels = utils.load_volume(path_gt, dtype='int')
        seg = utils.load_volume(path_seg, dtype='int')
        # crop images
        gt_labels, cropping = edit_volumes.crop_volume_around_region(gt_labels, margin=10)
        seg = edit_volumes.crop_volume_with_idx(seg, cropping)
        # compute dice scores
        tmp_dice = fast_dice(gt_labels, seg, label_list_sorted)
        dice_coefs[:, idx] = tmp_dice[np.searchsorted(label_list_sorted, label_list)]

    # write dice results
    np.save(path_result_dice_array, dice_coefs)

    return dice_coefs
コード例 #3
0
def paste_lesions_on_buckner(lesion_dir,
                             buckner_dir,
                             result_dir,
                             dilate=2,
                             recompute=False):

    path_lesions = utils.list_images_in_folder(lesion_dir)
    path_buckners = utils.list_images_in_folder(buckner_dir)

    utils.mkdir(result_dir)

    # loop over buckner label maps
    for idx_buckner, path_buckner in enumerate(path_buckners):
        utils.print_loop_info(idx_buckner, len(path_buckners), 1)
        buckner_name = os.path.basename(path_buckner).replace(
            '_seg', '').replace('.nii.gz', '')
        buckner = utils.load_volume(path_buckner)
        WM = (buckner == 2) | (buckner == 7) | (buckner == 16) | (
            buckner == 41) | (buckner == 46)

        # loop over challenge data
        for path_lesion in path_lesions:
            lesion_name = os.path.basename(path_lesion).replace(
                '.samseg_and_lesions.nii.gz', '')
            path_result = os.path.join(
                result_dir, buckner_name + '_' + lesion_name + '.nii.gz')
            if (not os.path.isfile(path_result)) | recompute:

                lesion = utils.load_volume(path_lesion)
                assert lesion.shape == buckner.shape, 'lesions should have same shape as buckner labels'

                # define lesion, WM, and LV masks
                lesion = (lesion == 77) & WM
                LV_and_lesion = (buckner == 4) | lesion

                # morphological operations to bridge the gaps between lesions and LV
                morph_struct = utils.build_binary_structure(
                    dilate, len(lesion.shape))
                lesion = binary_dilation(LV_and_lesion, morph_struct)
                lesion = binary_erosion(lesion, morph_struct)
                lesion = lesion & WM
                buckner_lesions = np.where(lesion, 77, buckner)

                # save map
                utils.save_volume(buckner_lesions, None, None, path_result)
コード例 #4
0
def validation_on_dilated_lesions(normal_validation_dir,
                                  dilated_validation_dir,
                                  gt_dir,
                                  evaluation_labels,
                                  recompute=True):

    utils.mkdir(dilated_validation_dir)

    list_validation_subdir = utils.list_subfolders(normal_validation_dir)
    for val_idx, validation_subdir in enumerate(list_validation_subdir):
        utils.print_loop_info(val_idx, len(list_validation_subdir), 5)

        # dilate lesion
        dilated_validation_subdir = os.path.join(
            dilated_validation_dir, os.path.basename(validation_subdir))
        dilate_lesions(validation_subdir,
                       dilated_validation_subdir,
                       recompute=recompute)

        # compute new dice scores
        path_dice = os.path.join(dilated_validation_subdir, 'dice.npy')
        if (not os.path.isfile(path_dice)) | recompute:
            dice_evaluation(gt_dir, dilated_validation_subdir,
                            evaluation_labels, path_dice)
コード例 #5
0
def prepare_hippo_training_atlases(labels_dir,
                                   result_dir,
                                   image_dir=None,
                                   image_result_dir=None,
                                   smooth=True,
                                   crop_margin=50,
                                   recompute=True):
    """This function prepares training label maps from CobraLab. It first crops each atlas around the right and left
    hippocampi, with a margin. It then equalises the shape of these atlases by croppping them to the size of the
    smallest hippocampus. Finally it realigns the obtained atlases to FS orientation axes.
    :param labels_dir: path of directory with label maps to prepare
    :param result_dir: path of directory where prepared atlases will be writen
    :param image_dir: (optional) path of directory with images corresponding to the label maps to prepare.
    This can be sued to prepare a dataset of real images for supervised training.
    :param image_result_dir: (optional) path of directory where images corresponding to prepared atlases will be writen
    :param smooth: (optional) whether to smooth the final cropped label maps
    :param crop_margin: (optional) margin to add around hippocampi when cropping
    :param recompute: (optional) whether to recompute result files even if they already exists"""

    # create results dir
    if not os.path.exists(result_dir):
        os.mkdir(result_dir)
    tmp_result_dir = os.path.join(result_dir, 'first_cropping')
    if not os.path.exists(tmp_result_dir):
        os.mkdir(tmp_result_dir)
    if image_dir is not None:
        assert image_result_dir is not None, 'image_result_dir should not be None if image_dir is specified'
        if not os.path.exists(image_result_dir):
            os.mkdir(image_result_dir)
        tmp_image_result_dir = os.path.join(image_result_dir, 'first_cropping')
        if not os.path.exists(tmp_image_result_dir):
            os.mkdir(tmp_image_result_dir)
    else:
        tmp_image_result_dir = None

    # list labels and images
    labels_paths = utils.list_images_in_folder(labels_dir)
    if image_dir is not None:
        path_images = utils.list_images_in_folder(image_dir)
    else:
        path_images = [None] * len(labels_paths)

    # crop all atlases around hippo
    print('\ncropping around hippo')
    shape_array = np.zeros((len(labels_paths)*2, 3))
    for idx, (path_label, path_image) in enumerate(zip(labels_paths, path_images)):
        utils.print_loop_info(idx, len(labels_paths), 1)

        # crop left hippo first
        path_label_first_crop_l = os.path.join(tmp_result_dir,
                                               os.path.basename(path_label).replace('.nii', '_left.nii'))
        lab, aff, h = utils.load_volume(path_label, im_only=False)
        lab_l, croppping_idx, aff_l = edit_volumes.crop_volume_around_region(lab, crop_margin,
                                                                             list(range(20101, 20109)), aff=aff)
        if (not os.path.exists(path_label_first_crop_l)) | recompute:
            utils.save_volume(lab_l, aff_l, h, path_label_first_crop_l)
        else:
            lab_l = utils.load_volume(path_label_first_crop_l)
        if path_image is not None:
            path_image_first_crop_l = os.path.join(tmp_image_result_dir,
                                                   os.path.basename(path_image).replace('.nii', '_left.nii'))
            if (not os.path.exists(path_image_first_crop_l)) | recompute:
                im, aff, h = utils.load_volume(path_image, im_only=False)
                im, aff = edit_volumes.crop_volume_with_idx(im, croppping_idx, aff)
                utils.save_volume(im, aff, h, path_image_first_crop_l)
        shape_array[2*idx, :] = np.array(lab_l.shape)

        # crop right hippo and flip them
        path_label_first_crop_r = os.path.join(tmp_result_dir,
                                               os.path.basename(path_label).replace('.nii', '_right_flipped.nii'))
        lab, aff, h = utils.load_volume(path_label, im_only=False)
        lab_r, croppping_idx, aff_r = edit_volumes.crop_volume_around_region(lab, crop_margin,
                                                                             list(range(20001, 20009)), aff=aff)
        if (not os.path.exists(path_label_first_crop_r)) | recompute:
            lab_r = edit_volumes.flip_volume(lab_r, direction='rl', aff=aff_r)
            utils.save_volume(lab_r, aff_r, h, path_label_first_crop_r)
        else:
            lab_r = utils.load_volume(path_label_first_crop_r)
        if path_image is not None:
            path_image_first_crop_r = os.path.join(tmp_image_result_dir,
                                                   os.path.basename(path_image).replace('.nii', '_right.nii'))
            if (not os.path.exists(path_image_first_crop_r)) | recompute:
                im, aff, h = utils.load_volume(path_image, im_only=False)
                im, aff = edit_volumes.crop_volume_with_idx(im, croppping_idx, aff)
                im = edit_volumes.flip_volume(im, direction='rl', aff=aff)
                utils.save_volume(im, aff, h, path_image_first_crop_r)
        shape_array[2*idx+1, :] = np.array(lab_r.shape)

    # list croppped files
    path_labels_first_cropped = utils.list_images_in_folder(tmp_result_dir)
    if tmp_image_result_dir is not None:
        path_images_first_cropped = utils.list_images_in_folder(tmp_image_result_dir)
    else:
        path_images_first_cropped = [None] * len(path_labels_first_cropped)

    # crop all label maps to same size
    print('\nequalising shapes')
    new_shape = np.min(shape_array, axis=0).astype('int32')
    for i, (path_label, path_image) in enumerate(zip(path_labels_first_cropped, path_images_first_cropped)):
        utils.print_loop_info(i, len(path_labels_first_cropped), 1)

        # get cropping indices
        path_lab_cropped = os.path.join(result_dir, os.path.basename(path_label))
        lab, aff, h = utils.load_volume(path_label, im_only=False)
        lab_shape = lab.shape
        min_cropping = np.array([np.maximum(int((lab_shape[i]-new_shape[i])/2), 0) for i in range(3)])
        max_cropping = np.array([min_cropping[i] + new_shape[i] for i in range(3)])

        # crop labels and realign on adni format
        if (not os.path.exists(path_lab_cropped)) | recompute:
            lab, aff = edit_volumes.crop_volume_with_idx(lab, np.concatenate([min_cropping, max_cropping]), aff)
            # realign on adni format
            lab = np.flip(lab, axis=2)
            aff[0:3, 0:3] = np.array([[-0.6, 0, 0], [0, 0, -0.6], [0, -0.6, 0]])
            utils.save_volume(lab, aff, h, path_lab_cropped)

        # crop image and realign on adni format
        if path_image is not None:
            path_im_cropped = os.path.join(image_result_dir, os.path.basename(path_image))
            if (not os.path.exists(path_im_cropped)) | recompute:
                im, aff, h = utils.load_volume(path_image, im_only=False)
                im, aff = edit_volumes.crop_volume_with_idx(im, np.concatenate([min_cropping, max_cropping]), aff)
                im = np.flip(im, axis=2)
                aff[0:3, 0:3] = np.array([[-0.6, 0, 0], [0, 0, -0.6], [0, -0.6, 0]])
                im = edit_volumes.mask_volume(im, lab)
                utils.save_volume(im, aff, h, path_im_cropped)

    # correct all labels to left values
    print('\ncorrecting labels')
    list_incorrect_labels = [77, 80, 251, 252, 253, 254, 255, 29, 41, 42, 43, 44, 46, 47, 49, 50, 51, 52, 54, 58, 60,
                             61, 62, 63, 7012, 20001, 20002, 20004, 20005, 20006, 20007, 20008]
    list_correct_labels = [2, 3, 2, 2, 2, 2, 2, 2, 2, 3, 4, 5, 7, 8, 10, 11, 12, 13, 18, 26, 28, 2, 30, 31, 20108,
                           20101, 20102, 20104, 20105, 20106, 20107, 20108]
    edit_volumes.correct_labels_in_dir(result_dir, list_incorrect_labels, list_correct_labels, result_dir)

    # smooth labels
    if smooth:
        print('\nsmoothing labels')
        edit_volumes.smooth_labels_in_dir(result_dir, result_dir)
コード例 #6
0
ファイル: predict.py プロジェクト: sandeepganji/SynthSeg
def predict(path_images,
            path_model,
            segmentation_label_list,
            path_segmentations=None,
            path_posteriors=None,
            path_volumes=None,
            voxel_volume=1.,
            skip_background_volume=True,
            padding=None,
            cropping=None,
            resample=None,
            sigma_smoothing=0,
            keep_biggest_component=False,
            conv_size=3,
            n_levels=5,
            nb_conv_per_level=2,
            unet_feat_count=24,
            feat_multiplier=2,
            no_batch_norm=False,
            gt_folder=None):
    """
    This function uses trained models to segment images.
    It is crucial that the inputs match the architecture parameters of the trained model.
    :param path_images: path of the images to segment. Can be the path to a directory or the path to a single image.
    :param path_model: path ot the trained model.
    :param segmentation_label_list: List of labels for which to compute Dice scores. It should contain the same values
    as the segmentation label list used for training the network.
    Can be a sequence, a 1d numpy array, or the path to a numpy 1d array.
    :param path_segmentations: (optional) path where segmentations will be writen.
    Should be a dir, if path_images is a dir, and afile if path_images is a file.
    Should not be None, if path_posteriors is None.
    :param path_posteriors: (optional) path where posteriors will be writen.
    Should be a dir, if path_images is a dir, and afile if path_images is a file.
    Should not be None, if path_segmentations is None.
    :param path_volumes: (optional) path of a csv file where the soft volumes of all segmented regions will be writen.
    The rows of the csv file correspond to subjects, and the columns correspond to segmentation labels.
    The soft volume of a structure corresponds to the sum of its predicted probability map.
    :param voxel_volume: (optional) volume of voxel. Default is 1 (i.e. returned volumes are voxel counts).
    :param skip_background_volume: (optional) whether to skip computing the volume of the background. This assumes the
    background correspond to the first value in label list.
    :param padding: (optional) crop the images to the specified shape before predicting the segmentation maps.
    If padding and cropping are specified, images are padded before being cropped.
    Can be an int, a sequence or a 1d numpy array.
    :param cropping: (optional) crop the images to the specified shape before predicting the segmentation maps.
    If padding and cropping are specified, images are padded before being cropped.
    Can be an int, a sequence or a 1d numpy array.
    :param resample: (optional) resample the images to the specified resolution before predicting the segmentation maps.
    Can be an int, a sequence or a 1d numpy array.
    :param sigma_smoothing: (optional) If not None, the posteriors are smoothed with a gaussian kernel of the specified
    standard deviation.
    :param keep_biggest_component: (optional) whether to only keep the biggest component in the predicted segmentation.
    :param conv_size: (optional) size of unet's convolution masks. Default is 3.
    :param n_levels: (optional) number of levels for unet. Default is 5.
    :param nb_conv_per_level: (optional) number of convolution layers per level. Default is 2.
    :param unet_feat_count: (optional) number of features for the first layer of the unet. Default is 24.
    :param feat_multiplier: (optional) multiplicative factor for the number of feature for each new level. Default is 2.
    :param no_batch_norm: (optional) whether to deactivate batch norm. Default is False.
    :param gt_folder: (optional) folder containing ground truth files for evaluation.
    A numpy array containing all dice scores (labels in rows, subjects in columns) will be writen either at
    segmentations_dir (if not None), or posteriors_dir.
    """

    assert path_model, "A model file is necessary"
    assert path_segmentations or path_posteriors, "output segmentation (or posteriors) is required"

    # prepare output filepaths
    images_to_segment, path_segmentations, path_posteriors, path_volumes = prepare_output_files(
        path_images, path_segmentations, path_posteriors, path_volumes)

    # get label and classes lists
    label_list, _ = utils.get_list_labels(label_list=segmentation_label_list,
                                          FS_sort=True)

    # prepare volume file if needed
    if path_volumes is not None:
        if skip_background_volume:
            csv_header = [['subject'] + [str(lab) for lab in label_list[1:]]]
        else:
            csv_header = [['subject'] + [str(lab) for lab in label_list]]
        with open(path_volumes, 'w') as csvFile:
            writer = csv.writer(csvFile)
            writer.writerows(csv_header)
        csvFile.close()

    # perform segmentation
    net = None
    previous_model_input_shape = None
    for idx, (im_path, seg_path, posteriors_path) in enumerate(
            zip(images_to_segment, path_segmentations, path_posteriors)):
        utils.print_loop_info(idx, len(images_to_segment), 10)

        # preprocess image and get information
        image, aff, h, n_channels, n_dims, shape, pad_shape, cropping, crop_idx = preprocess_image(
            im_path, n_levels, cropping, padding)
        model_input_shape = image.shape[1:]

        # prepare net for first image or if input's size has changed
        if (idx == 0) | (previous_model_input_shape != model_input_shape):

            # check for image size compatibility
            if (idx != 0) & (previous_model_input_shape != model_input_shape):
                print(
                    'image of different shape as previous ones, redefining network'
                )
            previous_model_input_shape = model_input_shape
            net = None

            if resample is not None:
                net, resample_shape = preprocessing_model(
                    resample, model_input_shape, h, n_channels, n_dims,
                    n_levels)
            else:
                resample_shape = previous_model_input_shape
            net = prepare_unet(resample_shape,
                               len(label_list),
                               conv_size,
                               n_levels,
                               nb_conv_per_level,
                               unet_feat_count,
                               feat_multiplier,
                               no_batch_norm,
                               path_model,
                               input_model=net)
            if (resample is not None) | (sigma_smoothing != 0):
                net = postprocessing_model(net, model_input_shape, resample,
                                           sigma_smoothing, n_dims)

        # predict posteriors
        prediction_patch = net.predict(image)

        # get posteriors and segmentation
        seg, posteriors = postprocess(prediction_patch, cropping, pad_shape,
                                      shape, crop_idx, n_dims, label_list,
                                      keep_biggest_component)

        # compute volumes
        if path_volumes is not None:
            if skip_background_volume:
                volumes = np.around(
                    np.sum(posteriors[..., 1:],
                           axis=tuple(range(0,
                                            len(posteriors.shape) - 1))), 3)
            else:
                volumes = np.around(
                    np.sum(posteriors,
                           axis=tuple(range(0,
                                            len(posteriors.shape) - 1))), 3)
            volumes = voxel_volume * volumes
            row = [os.path.basename(im_path)] + [str(vol) for vol in volumes]
            with open(path_volumes, 'a') as csvFile:
                writer = csv.writer(csvFile)
                writer.writerow(row)
            csvFile.close()

        # write results to disk
        if seg_path is not None:
            utils.save_volume(seg.astype('int'), aff, h, seg_path)
        if posteriors_path is not None:
            if n_channels > 1:
                new_shape = list(posteriors.shape)
                new_shape.insert(-1, 1)
                new_shape = tuple(new_shape)
                posteriors = np.reshape(posteriors, new_shape)
            utils.save_volume(posteriors.astype('float'), aff, h,
                              posteriors_path)

    # evaluate
    if gt_folder is not None:
        if path_segmentations[0] is not None:
            eval_folder = os.path.dirname(path_segmentations[0])
        else:
            eval_folder = os.path.dirname(path_posteriors[0])
        path_result_dice = os.path.join(eval_folder, 'dice.npy')
        evaluate.dice_evaluation(gt_folder, eval_folder,
                                 segmentation_label_list, path_result_dice)
コード例 #7
0
def validate_training(image_dir,
                      gt_dir,
                      models_dir,
                      validation_main_dir,
                      segmentation_label_list,
                      evaluation_label_list=None,
                      step_eval=1,
                      aff_ref='FS',
                      sigma_smoothing=0,
                      keep_biggest_component=False,
                      cropping=None,
                      conv_size=3,
                      n_levels=5,
                      nb_conv_per_level=2,
                      feat_multiplier=2,
                      activation='elu',
                      recompute=True):
    """This function validates models saved at different epochs of the same training.
    All models are assumed to be in the same folder.contained in models_dir.
    The results of each model are saved in a subfolder in validation_main_dir.
    :param image_dir: path of the folder with validation images.
    :param gt_dir: path of the folder with ground truth label maps.
    These are matched to the validation images by sorting order.
    :param models_dir: path of the folder with the models to validate.
    :param validation_main_dir: path of the folder where all the models validation subfolders will be saved.
    :param segmentation_label_list: path of the numpy array containing all the segmentation labels used during training.
    :param evaluation_label_list: (optional) label values to validate on. Must be a subset of the segmentation labels.
    Can be a sequence, a 1d numpy array, or the path to a numpy 1d array. Default is the same as segmentation_label_list
    :param step_eval: (optional) If step_eval > 1 skips models when validating, by validating on models step_eval apart.
    :param aff_ref: (optional) affine matrix with which the models were trained. Can be 'FS' (default), or 'identity.
    :param sigma_smoothing: (optional) If not None, the posteriors are smoothed with a gaussian kernel of the specified
    standard deviation.
    :param keep_biggest_component: (optional) whether to only keep the biggest component in the predicted segmentation.
    :param cropping: (optional) whether to crop the input to smaller size while being run through the network.
    The result is then given in the original image space. Can be an int, a sequence, or a 1d numpy array.
    :param n_levels: (optional) number of level for the Unet. Default is 5.
    :param nb_conv_per_level: (optional) number of convolutional layers per level. Default is 2.
    :param conv_size: (optional) size of the convolution kernels. Default is 2.
    :param feat_multiplier: (optional) multiply the number of feature by this nummber at each new level. Default is 1.
    :param activation: (optional) activation function. Can be 'elu', 'relu'.
    :param recompute: (optional) whether to recompute result files even if they already exists."""

    # create result folder
    utils.mkdir(validation_main_dir)

    # loop over models
    list_models = utils.list_files(models_dir,
                                   expr=['dice', 'h5'],
                                   cond_type='and')[::step_eval]
    for model_idx, path_model in enumerate(list_models):

        # build names and create folders
        model_val_dir = os.path.join(
            validation_main_dir,
            os.path.basename(path_model).replace('.h5', ''))
        dice_path = os.path.join(model_val_dir, 'dice.npy')
        utils.mkdir(model_val_dir)

        if (not os.path.isfile(dice_path)) | recompute:
            utils.print_loop_info(model_idx, len(list_models), spacing=1)
            predict(path_images=image_dir,
                    path_model=path_model,
                    segmentation_label_list=segmentation_label_list,
                    path_segmentations=model_val_dir,
                    cropping=cropping,
                    aff_ref=aff_ref,
                    sigma_smoothing=sigma_smoothing,
                    keep_biggest_component=keep_biggest_component,
                    conv_size=conv_size,
                    n_levels=n_levels,
                    nb_conv_per_level=nb_conv_per_level,
                    feat_multiplier=feat_multiplier,
                    activation=activation,
                    gt_folder=gt_dir,
                    evaluation_label_list=evaluation_label_list,
                    verbose=False)
コード例 #8
0
def dice_evaluation(gt_folder, seg_folder, path_segmentation_label_list,
                    path_result_dice_array):
    """Computes Dice scores for all labels contained in path_segmentation_label_list. Files in gt_folder and seg_folder
    are matched by sorting order.
    :param gt_folder: folder containing ground truth files.
    :param seg_folder: folder containing evaluation files.
    :param path_segmentation_label_list: path of numpy vector containing all labels to compute the Dice for.
    :param path_result_dice_array: path where the resulting Dice will be writen as numpy array.
    :return: numpy array containing all dice scores (labels in rows, subjects in columns).
    """

    # get list of automated and manual segmentations
    list_path_gt_labels = utils.list_images_in_folder(gt_folder)
    list_path_segs = utils.list_images_in_folder(seg_folder)
    if len(list_path_gt_labels) != len(list_path_segs):
        warnings.warn(
            'both data folders should have the same length, had {} and {}'.
            format(len(list_path_gt_labels), len(list_path_segs)))

    # load labels list
    label_list, neutral_labels = utils.get_list_labels(
        label_list=path_segmentation_label_list,
        FS_sort=True,
        labels_dir=gt_folder)

    # create result folder
    if not os.path.exists(os.path.dirname(path_result_dice_array)):
        os.mkdir(os.path.dirname(path_result_dice_array))

    # initialise result matrix
    dice_coefs = np.zeros((label_list.shape[0], len(list_path_segs)))

    # start analysis
    for im_idx, (path_gt, path_seg) in enumerate(
            zip(list_path_gt_labels, list_path_segs)):
        utils.print_loop_info(im_idx, len(list_path_segs), 10)

        # load gt labels and segmentation
        gt_labels = utils.load_volume(path_gt, dtype='int')
        seg = utils.load_volume(path_seg, dtype='int')
        n_dims = len(gt_labels.shape)
        # crop images
        gt_labels, cropping = edit_volumes.crop_volume_around_region(gt_labels,
                                                                     margin=10)
        if n_dims == 2:
            seg = seg[cropping[0]:cropping[2], cropping[1]:cropping[3]]
        elif n_dims == 3:
            seg = seg[cropping[0]:cropping[3], cropping[1]:cropping[4],
                      cropping[2]:cropping[5]]
        else:
            raise Exception(
                'cannot evaluate images with more than 3 dimensions')
        # extract list of unique labels
        label_list_sorted = np.sort(label_list)
        tmp_dice = fast_dice(gt_labels, seg, label_list_sorted)
        dice_coefs[:,
                   im_idx] = tmp_dice[np.searchsorted(label_list_sorted,
                                                      label_list)]

    # write dice results
    np.save(path_result_dice_array, dice_coefs)

    return dice_coefs
コード例 #9
0
def sample_intensity_stats_from_single_dataset(image_dir,
                                               labels_dir,
                                               labels_list,
                                               classes_list=None,
                                               max_channel=3,
                                               rescale=True):
    """This function aims at estimating the intensity distributions of K different structure types from a set of images.
    The distribution of each structure type is modelled as a Gaussian, parametrised by a mean and a standard deviation.
    Because the intensity distribution of structures can vary accross images, we additionally use Gausian priors for the
    parameters of each Gaussian distribution. Therefore, the intensity distribution of each structure type is described
    by 4 parameters: a mean/std for the mean intensity, and a mean/std for the std deviation.
    This function uses a set of images along with corresponding segmentations to estimate the 4*K parameters.
    Structures can share the same statistics by being regrouped into classes of similar structure types.
    Images can be multi-modal (n_channels), in which case different statistics are estimated for each modality.
    :param image_dir: path of directory with images to estimate the intensity distribution
    :param labels_dir: path of directory with segmentation of input images.
    They are matched with images by sorting order.
    :param labels_list: list of labels for which to evaluate mean and std intensity.
    Can be a sequence, a 1d numpy array, or the path to a 1d numpy array.
    :param classes_list: (optional) enables to regroup structures into classes of similar intensity statistics.
    Intenstites associated to regrouped labels will thus contribute to the same Gaussian during statistics estimation.
    Can be a sequence, a 1d numpy array, or the path to a 1d numpy array.
    It should have the same length as labels_list, and contain values between 0 and K-1, where K is the total number of
    classes. Default is all labels have different classes (K=len(labels_list)).
    :param max_channel: (optional) maximum number of channels to consider if the data is multispectral. Default is 3.
    :param rescale: (optional) whether to rescale images between 0 and 255 before intensity estimation
    :return: 2 numpy arrays of size (2*n_channels, K), one with the evaluated means/std for the mean
    intensity, and one for the mean/std for the standard deviation.
    Each block of two rows correspond to a different modality (channel). For each block of two rows, the first row
    represents the mean, and the second represents the std.
    """

    # list files
    path_images = utils.list_images_in_folder(image_dir)
    path_labels = utils.list_images_in_folder(labels_dir)
    assert len(path_images) == len(
        path_labels
    ), 'image and labels folders do not have the same number of files'

    # reformat list labels and classes
    labels_list = np.array(
        utils.reformat_to_list(labels_list, load_as_numpy=True, dtype='int'))
    if classes_list is not None:
        classes_list = np.array(
            utils.reformat_to_list(classes_list,
                                   load_as_numpy=True,
                                   dtype='int'))
    else:
        classes_list = np.arange(labels_list.shape[0])
    assert len(classes_list) == len(
        labels_list), 'labels and classes lists should have the same length'

    # get unique classes
    unique_classes, unique_indices = np.unique(classes_list, return_index=True)
    n_classes = len(unique_classes)
    if not np.array_equal(unique_classes, np.arange(n_classes)):
        raise ValueError(
            'classes_list should only contain values between 0 and K-1, '
            'where K is the total number of classes. Here K = %d' % n_classes)

    # initialise result arrays
    n_dims, n_channels = utils.get_dims(utils.load_volume(
        path_images[0]).shape,
                                        max_channels=max_channel)
    means = np.zeros((len(path_images), n_classes, n_channels))
    stds = np.zeros((len(path_images), n_classes, n_channels))

    # loop over images
    for idx, (path_im, path_la) in enumerate(zip(path_images, path_labels)):
        utils.print_loop_info(idx, len(path_images), 10)

        # load image and label map
        image = utils.load_volume(path_im)
        la = utils.load_volume(path_la)
        if n_channels == 1:
            image = utils.add_axis(image, -1)

        # loop over channels
        for channel in range(n_channels):
            im = image[..., channel]
            if rescale:
                im = edit_volumes.rescale_volume(im)
            stats = sample_intensity_stats_from_image(
                im, la, labels_list, classes_list=classes_list)
            means[idx, :, channel] = stats[0, :]
            stds[idx, :, channel] = stats[1, :]

    # compute prior parameters for mean/std
    mean_means = np.mean(means, axis=0)
    std_means = np.std(means, axis=0)
    mean_stds = np.mean(stds, axis=0)
    std_stds = np.std(stds, axis=0)

    # regroup prior parameters in two different arrays: one for the mean and one for the std
    prior_means = np.zeros((2 * n_channels, n_classes))
    prior_stds = np.zeros((2 * n_channels, n_classes))
    for channel in range(n_channels):
        prior_means[2 * channel, :] = mean_means[:, channel]
        prior_means[2 * channel + 1, :] = std_means[:, channel]
        prior_stds[2 * channel, :] = mean_stds[:, channel]
        prior_stds[2 * channel + 1, :] = std_stds[:, channel]

    return prior_means, prior_stds
コード例 #10
0
ファイル: predict.py プロジェクト: mu40/SynthSeg
def predict(path_images,
            path_model,
            segmentation_label_list,
            path_segmentations=None,
            path_posteriors=None,
            path_volumes=None,
            skip_background_volume=True,
            padding=None,
            cropping=None,
            resample=None,
            aff_ref='FS',
            sigma_smoothing=0,
            keep_biggest_component=False,
            conv_size=3,
            n_levels=5,
            nb_conv_per_level=2,
            unet_feat_count=24,
            feat_multiplier=2,
            no_batch_norm=False,
            activation='elu',
            gt_folder=None,
            evaluation_label_list=None,
            verbose=True):
    """
    This function uses trained models to segment images.
    It is crucial that the inputs match the architecture parameters of the trained model.
    :param path_images: path of the images to segment. Can be the path to a directory or the path to a single image.
    :param path_model: path ot the trained model.
    :param segmentation_label_list: List of labels for which to compute Dice scores. It should contain the same values
    as the segmentation label list used for training the network.
    Can be a sequence, a 1d numpy array, or the path to a numpy 1d array.
    :param path_segmentations: (optional) path where segmentations will be writen.
    Should be a dir, if path_images is a dir, and afile if path_images is a file.
    Should not be None, if path_posteriors is None.
    :param path_posteriors: (optional) path where posteriors will be writen.
    Should be a dir, if path_images is a dir, and afile if path_images is a file.
    Should not be None, if path_segmentations is None.
    :param path_volumes: (optional) path of a csv file where the soft volumes of all segmented regions will be writen.
    The rows of the csv file correspond to subjects, and the columns correspond to segmentation labels.
    The soft volume of a structure corresponds to the sum of its predicted probability map.
    :param skip_background_volume: (optional) whether to skip computing the volume of the background. This assumes the
    background correspond to the first value in label list.
    :param padding: (optional) crop the images to the specified shape before predicting the segmentation maps.
    If padding and cropping are specified, images are padded before being cropped.
    Can be an int, a sequence or a 1d numpy array.
    :param cropping: (optional) crop the images to the specified shape before predicting the segmentation maps.
    If padding and cropping are specified, images are padded before being cropped.
    Can be an int, a sequence or a 1d numpy array.
    :param resample: (optional) resample the images to the specified resolution before predicting the segmentation maps.
    Can be an int, a sequence or a 1d numpy array.
    :param aff_ref: (optional) type of affine matrix of the images used for training. By default this is set to the
    FreeSurfer orientation ('FS'), as it was the configuration in which SynthSeg was trained. However, the new models
    are now trained on data aligned with identity vox2ras matrix, so you need to change aff_ref to 'identity'.
    :param sigma_smoothing: (optional) If not None, the posteriors are smoothed with a gaussian kernel of the specified
    standard deviation.
    :param keep_biggest_component: (optional) whether to only keep the biggest component in the predicted segmentation.
    :param conv_size: (optional) size of unet's convolution masks. Default is 3.
    :param n_levels: (optional) number of levels for unet. Default is 5.
    :param nb_conv_per_level: (optional) number of convolution layers per level. Default is 2.
    :param unet_feat_count: (optional) number of features for the first layer of the unet. Default is 24.
    :param feat_multiplier: (optional) multiplicative factor for the number of feature for each new level. Default is 2.
    :param no_batch_norm: (optional) whether to deactivate batch norm. Default is False.
    :param activation: (optional) activation function. Can be 'elu', 'relu'.
    :param gt_folder: (optional) folder containing ground truth files for evaluation.
    A numpy array containing all dice scores (labels in rows, subjects in columns) will be writen either at
    segmentations_dir (if not None), or posteriors_dir.
    :param evaluation_label_list: (optional) if gt_folder is True you can evaluate the Dice scores on a subset of the
    segmentation labels, by providing another label list here. Can be a sequence, a 1d numpy array, or the path to a
    numpy 1d array. Default is the same as segmentation_label_list.
    :param verbose: (optional) whether to print out info about the remaining number of cases.
    """

    assert path_model, "A model file is necessary"
    assert path_segmentations or path_posteriors, "output segmentation (or posteriors) is required"

    # prepare output filepaths
    images_to_segment, path_segmentations, path_posteriors, path_volumes = prepare_output_files(path_images,
                                                                                                path_segmentations,
                                                                                                path_posteriors,
                                                                                                path_volumes)

    # get label and classes lists
    label_list, _ = utils.get_list_labels(label_list=segmentation_label_list, FS_sort=True)
    if evaluation_label_list is None:
        evaluation_label_list = segmentation_label_list

    # prepare volume file if needed
    if path_volumes is not None:
        if skip_background_volume:
            csv_header = [['subject'] + [str(lab) for lab in label_list[1:]]]
        else:
            csv_header = [['subject'] + [str(lab) for lab in label_list]]
        with open(path_volumes, 'w') as csvFile:
            writer = csv.writer(csvFile)
            writer.writerows(csv_header)
        csvFile.close()

    # perform segmentation
    net = None
    previous_model_input_shape = None
    for idx, (path_image, path_segmentation, path_posterior) in enumerate(zip(images_to_segment,
                                                                              path_segmentations,
                                                                              path_posteriors)):
        if verbose:
            utils.print_loop_info(idx, len(images_to_segment), 10)

        # preprocess image and get information
        image, aff, h, im_res, n_channels, n_dims, shape, pad_shape, cropping, crop_idx = \
            preprocess_image(path_image, n_levels, cropping, padding, aff_ref=aff_ref)
        model_input_shape = list(image.shape[1:])

        # prepare net for first image or if input's size has changed
        if (idx == 0) | (previous_model_input_shape != model_input_shape):

            # check for image size compatibility
            if (idx != 0) & (previous_model_input_shape != model_input_shape):
                print('image of different shape as previous ones, redefining network')
            previous_model_input_shape = model_input_shape

            # build network
            net = build_model(path_model, model_input_shape, resample, im_res, n_levels, len(label_list), conv_size,
                              nb_conv_per_level, unet_feat_count, feat_multiplier, no_batch_norm, activation,
                              sigma_smoothing)

        # predict posteriors
        prediction_patch = net.predict(image)

        # get posteriors and segmentation
        seg, posteriors = postprocess(prediction_patch, cropping, pad_shape, shape, crop_idx, n_dims, label_list,
                                      keep_biggest_component, aff, aff_ref=aff_ref)

        # compute volumes
        if path_volumes is not None:
            if skip_background_volume:
                volumes = np.sum(posteriors[..., 1:], axis=tuple(range(0, len(posteriors.shape) - 1)))
            else:
                volumes = np.sum(posteriors, axis=tuple(range(0, len(posteriors.shape) - 1)))
            volumes = np.around(volumes * np.prod(im_res), 3)
            row = [os.path.basename(path_image).replace('.nii.gz', '')] + [str(vol) for vol in volumes]
            row += [np.sum(volumes[:int(len(volumes) / 2)]), np.sum(volumes[int(len(volumes) / 2):])]
            with open(path_volumes, 'a') as csvFile:
                writer = csv.writer(csvFile)
                writer.writerow(row)
            csvFile.close()

        # write results to disk
        if path_segmentation is not None:
            utils.save_volume(seg.astype('int'), aff, h, path_segmentation)
        if path_posterior is not None:
            if n_channels > 1:
                new_shape = list(posteriors.shape)
                new_shape.insert(-1, 1)
                new_shape = tuple(new_shape)
                posteriors = np.reshape(posteriors, new_shape)
            utils.save_volume(posteriors.astype('float'), aff, h, path_posterior)

    # evaluate
    if gt_folder is not None:
        if path_segmentations[0] is not None:
            eval_folder = os.path.dirname(path_segmentations[0])
        else:
            eval_folder = os.path.dirname(path_posteriors[0])
        path_result_dice = os.path.join(eval_folder, 'dice.npy')
        evaluate.dice_evaluation(gt_folder, eval_folder, evaluation_label_list, path_result_dice, verbose=verbose)