コード例 #1
0
def recursive_ridge_diffusion(input_image,
                              ridge_intensities,
                              ridge_filter,
                              surface_levelset,
                              orientation,
                              ang_factor,
                              loc_prior,
                              min_scale,
                              max_scale,
                              propagation_model,
                              diffusion_factor,
                              similarity_scale,
                              neighborhood_size,
                              max_iter,
                              max_diff,
                              save_data=False,
                              output_dir=None,
                              file_name=None):
    """ Recursive Ridge Diffusion

    Extracts planar of tubular structures across multiple scales, with an optional directional bias.

    Parameters
    ----------
    input_image:

	ridge_intensities:

	ridge_filter:
	
	surface_levelset:

	orientation:

	ang_factor:

	loc_prior:
	
	min_scale:
	
    max_scale:

	propagation_model:

	diffusion_factor:

	similarity_scale:

	neighborhood_size:

	max_iter:

	max_diff:
    
    Returns
    ----------
   	dict
        Dictionary collecting outputs under the following keys
        (suffix of output files in brackets)

        * filter (niimg): 
        * proba (niimg): 
        * propagation (niimg): 
        * scale (niimg): 
        * ridge_direction (niimg): 
        * correction (niimg): 
		* ridge_size (niimg): 

    Notes
    ----------
    Original Java module by Pierre-Louis Bazin.

    References
    ----------

    """

    print('\n Recursive Ridge Diffusion')

    # check atlas_file and set default if not given
    #atlas_file = _check_atlas_file(atlas_file)

    # make sure that saving related parameters are correct
    if save_data:
        output_dir = _output_dir_4saving(output_dir, input_image)

        ridge_pv_file = _fname_4saving(file_name=file_name,
                                       rootfile=input_image,
                                       suffix='rrd_pv')

        filter_file = _fname_4saving(file_name=file_name,
                                     rootfile=input_image,
                                     suffix='rrd_filter')

        proba_file = _fname_4saving(file_name=file_name,
                                    rootfile=input_image,
                                    suffix='rrd_proba')

        propagation_file = _fname_4saving(file_name=file_name,
                                          rootfile=input_image,
                                          suffix='rrd_propag')

        scale_file = _fname_4saving(file_name=file_name,
                                    rootfile=input_image,
                                    suffix='rrd_scale')

        ridge_direction_file = _fname_4saving(file_name=file_name,
                                              rootfile=input_image,
                                              suffix='rrd_dir')

        correction_file = _fname_4saving(file_name=file_name,
                                         rootfile=input_image,
                                         suffix='rrd_correct')

        ridge_size_file = _fname_4saving(file_name=file_name,
                                         rootfile=input_image,
                                         suffix='rrd_size')

    # start virtual machine, if not already running
    try:
        cbstools.initVM(initialheap='6000m', maxheap='6000m')
    except ValueError:
        pass
    # create extraction instance
    rrd = cbstools.FilterRecursiveRidgeDiffusion()

    # set parameters
    rrd.setRidgeIntensities(ridge_intensities)
    rrd.setRidgeFilter(ridge_filter)
    rrd.setOrientationToSurface(orientation)
    rrd.setAngularFactor(ang_factor)
    rrd.setMinimumScale(min_scale)
    rrd.setMaximumScale(max_scale)
    rrd.setPropagationModel(propagation_model)
    rrd.setDiffusionFactor(diffusion_factor)
    rrd.setSimilarityScale(similarity_scale)
    rrd.setNeighborhoodSize(neighborhood_size)
    rrd.setMaxIterations(max_iter)
    rrd.setMaxDifference(max_diff)

    # load input image and use it to set dimensions and resolution
    img = load_volume(input_image)
    data = img.get_data()
    affine = img.get_affine()
    header = img.get_header()
    resolution = [x.item() for x in header.get_zooms()]
    dimensions = data.shape

    rrd.setDimensions(dimensions[0], dimensions[1], dimensions[2])
    rrd.setResolutions(resolution[0], resolution[1], resolution[2])

    # input input_image
    rrd.setInputImage(
        cbstools.JArray('float')((data.flatten('F')).astype(float)))

    # input surface_levelset : dirty fix for the case where surface image not input
    try:
        data = load_volume(surface_levelset).get_data()
        rrd.setSurfaceLevelSet(
            cbstools.JArray('float')((data.flatten('F')).astype(float)))
    except:
        print("no surface image")

    # input location prior image : loc_prior is optional
    try:
        data = load_volume(loc_prior).get_data()
        rrd.setLocationPrior(
            cbstools.JArray('float')((data.flatten('F')).astype(float)))
    except:
        print("no location prior image")

    # execute Extraction
    try:
        rrd.execute()

    except:
        # if the Java module fails, reraise the error it throws
        print("\n The underlying Java code did not execute cleanly: ")
        print sys.exc_info()[0]
        raise
        return

    # reshape output to what nibabel likes
    ridge_pv_data = np.reshape(
        np.array(rrd.getRidgePartialVolumeImage(), dtype=np.float32),
        dimensions, 'F')

    filter_data = np.reshape(
        np.array(rrd.getFilterResponseImage(), dtype=np.float32), dimensions,
        'F')

    proba_data = np.reshape(
        np.array(rrd.getProbabilityResponseImage(), dtype=np.float32),
        dimensions, 'F')

    propagation_data = np.reshape(
        np.array(rrd.getPropagatedResponseImage(), dtype=np.float32),
        dimensions, 'F')

    scale_data = np.reshape(
        np.array(rrd.getDetectionScaleImage(), dtype=np.int32), dimensions,
        'F')

    ridge_direction_data = np.reshape(
        np.array(rrd.getRidgeDirectionImage(), dtype=np.float32),
        (dimensions[0], dimensions[1], dimensions[2], 3), 'F')

    correction_data = np.reshape(
        np.array(rrd.getDirectionalCorrectionImage(), dtype=np.float32),
        dimensions, 'F')

    ridge_size_data = np.reshape(
        np.array(rrd.getRidgeSizeImage(), dtype=np.float32), dimensions, 'F')

    # adapt header max for each image so that correct max is displayed
    # and create nifiti objects
    header['cal_max'] = np.nanmax(ridge_pv_data)
    ridge_pv = nb.Nifti1Image(ridge_pv_data, affine, header)

    header['cal_max'] = np.nanmax(filter_data)
    filter = nb.Nifti1Image(filter_data, affine, header)

    header['cal_max'] = np.nanmax(proba_data)
    proba = nb.Nifti1Image(proba_data, affine, header)

    header['cal_max'] = np.nanmax(propagation_data)
    propagation = nb.Nifti1Image(propagation_data, affine, header)

    header['cal_max'] = np.nanmax(scale_data)
    scale = nb.Nifti1Image(scale_data, affine, header)

    header['cal_max'] = np.nanmax(ridge_direction_data)
    ridge_direction = nb.Nifti1Image(ridge_direction_data, affine, header)

    header['cal_max'] = np.nanmax(correction_data)
    correction = nb.Nifti1Image(correction_data, affine, header)

    header['cal_max'] = np.nanmax(ridge_size_data)
    ridge_size = nb.Nifti1Image(ridge_size_data, affine, header)

    if save_data:
        save_volume(os.path.join(output_dir, ridge_pv_file), ridge_pv)
        save_volume(os.path.join(output_dir, filter_file), filter)
        save_volume(os.path.join(output_dir, proba_file), proba)
        save_volume(os.path.join(output_dir, propagation_file), propagation)
        save_volume(os.path.join(output_dir, scale_file), scale)
        save_volume(os.path.join(output_dir, ridge_direction_file),
                    ridge_direction)
        save_volume(os.path.join(output_dir, correction_file), correction)
        save_volume(os.path.join(output_dir, ridge_size_file), ridge_size)

    return {
        'ridge_pv': ridge_pv,
        'filter': filter,
        'proba': proba,
        'propagation': propagation,
        'scale': scale,
        'ridge_direction': ridge_direction,
        'correction': correction,
        'ridge_size': ridge_size
    }
コード例 #2
0
def extract_brain_region(segmentation, levelset_boundary,
                         maximum_membership, maximum_label,
                         extracted_region, atlas_file=None,
                         normalize_probabilities=False,
                         estimate_tissue_densities=False,
                         partial_volume_distance=1.0,
                         save_data=False, output_dir=None,
                         file_name=None):
    """ Extract Brain Region

    Extracts masks, probability maps and levelset surfaces for specific brain
    regions and regions from a Multiple Object Geometric Deformable Model
    (MGDM) segmentation result.

    Parameters
    ----------
    segmentation: niimg
        Segmentation result from MGDM.
    levelset_boundary: niimg
        Levelset boundary from MGDM.
    maximum_membership: niimg
        4D image of the maximum membership values from MGDM.
    maximum_label: niimg
        4D imageof the maximum labels from MGDM.
    atlas_file: str, optional
        Path to plain text atlas file (default is stored in DEFAULT_ATLAS).
        or atlas name to be searched in ATLAS_DIR
    extracted_region: {'left_cerebrum', 'right_cerebrum', 'cerebrum', 'cerebellum', 'cerebellum_brainstem', 'subcortex', 'tissues(anat)', 'tissues(func)', 'brain_mask'}
        Region to be extracted from the MGDM segmentation.
    normalize_probabilities: bool
        Whether to normalize the output probabilities to sum to 1
        (default is False).
    estimate_tissue_densities: bool
        Wheter to recompute partial volume densities from the probabilites
        (slow, default is False).
    partial_volume_distance: float
        Distance in mm to use for tissues densities, if recomputed
        (default is 1mm).
    save_data: bool
        Save output data to file (default is False)
    output_dir: str, optional
        Path to desired output directory, will be created if it doesn't exist
    file_name: str, optional
        Desired base name for output files with file extension
        (suffixes will be added)

    Returns
    ----------
    dict
        Dictionary collecting outputs under the following keys
        (suffix of output files in brackets, # stands for shorthand names of 
        the different extracted regions, respectively:
        rcr, lcr, cr, cb, cbs, sub, an, fn)

        * region_mask (niimg): Hard segmentation mask of the (GM) region
          of interest (_xmask_#gm)
        * inside_mask (niimg): Hard segmentation mask of the (WM) inside of
          the region of interest (_xmask_#wm)
        * background_mask (niimg): Hard segmentation mask of the (CSF) region
          background (_xmask_#bg)
        * region_proba (niimg): Probability map of the (GM) region
          of interest (_xproba_#gm)
        * inside_proba (niimg): Probability map of the (WM) inside of
          the region of interest (_xproba_#wm)
        * background_proba (niimg): Probability map of the (CSF) region
          background (_xproba_#bg)
        * region_lvl (niimg): Levelset surface of the (GM) region
          of interest (_xlvl_#gm)
        * inside_lvl (niimg): Levelset surface of the (WM) inside of
          the region of interest (_xlvl_#wm)
        * background_lvl (niimg): Levelset surface of the (CSF) region
          background (_xlvl_#bg)

    Notes
    ----------
    Original Java module by Pierre-Louis Bazin.
    """

    print('\nExtract Brain Region')

    # check atlas_file and set default if not given
    atlas_file = _check_atlas_file(atlas_file)

    # make sure that saving related parameters are correct
    if save_data:
        output_dir = _output_dir_4saving(output_dir, segmentation)

    # start virtual machine, if not already running
    try:
        cbstools.initVM(initialheap='8000m', maxheap='8000m')
    except ValueError:
        pass
    # create algorithm instance
    xbr = cbstools.BrainExtractBrainRegion()

    # set parameters
    xbr.setAtlasFile(atlas_file)
    xbr.setExtractedRegion(extracted_region)
    xbr.setNormalizeProbabilities(normalize_probabilities)
    xbr.setEstimateTissueDensities(estimate_tissue_densities)
    xbr.setPartialVolumingDistance(partial_volume_distance)

    # load images and set dimensions and resolution
    seg = load_volume(segmentation)
    data = seg.get_data()
    affine = seg.get_affine()
    header = seg.get_header()
    resolution = [x.item() for x in header.get_zooms()]
    dimensions = data.shape

    xbr.setDimensions(dimensions[0], dimensions[1], dimensions[2])
    xbr.setResolutions(resolution[0], resolution[1], resolution[2])
    xbr.setComponents(load_volume(maximum_membership).get_header().get_data_shape()[3])

    xbr.setSegmentationImage(cbstools.JArray('int')(
        (data.flatten('F')).astype(int)))

    data = load_volume(levelset_boundary).get_data()
    xbr.setLevelsetBoundaryImage(cbstools.JArray('float')(
        (data.flatten('F')).astype(float)))

    data = load_volume(maximum_membership).get_data()
    xbr.setMaximumMembershipImage(cbstools.JArray('float')(
        (data.flatten('F')).astype(float)))

    data = load_volume(maximum_label).get_data()
    xbr.setMaximumLabelImage(cbstools.JArray('int')(
        (data.flatten('F')).astype(int)))

    # execute
    try:
        xbr.execute()

    except:
        # if the Java module fails, reraise the error it throws
        print("\n The underlying Java code did not execute cleanly: ")
        print sys.exc_info()[0]
        raise
        return

	# build names for saving after the computations to get the proper names
    if save_data:
        reg_mask_file = _fname_4saving(file_name=file_name,
                                       rootfile=segmentation,
                                       suffix='xmask'+xbr.getStructureName(), )

        ins_mask_file = _fname_4saving(file_name=file_name,
                                       rootfile=segmentation,
                                       suffix='xmask'+xbr.getInsideName(), )

        bg_mask_file = _fname_4saving(file_name=file_name,
                                      rootfile=segmentation,
                                      suffix='xmask'+xbr.getBackgroundName(), )

        reg_proba_file = _fname_4saving(file_name=file_name,
                                        rootfile=segmentation,
                                        suffix='xproba'+xbr.getStructureName(), )

        ins_proba_file = _fname_4saving(file_name=file_name,
                                        rootfile=segmentation,
                                        suffix='xproba'+xbr.getInsideName(), )

        bg_proba_file = _fname_4saving(file_name=file_name,
                                       rootfile=segmentation,
                                       suffix='xproba'+xbr.getBackgroundName(), )

        reg_lvl_file = _fname_4saving(file_name=file_name,
                                      rootfile=segmentation,
                                      suffix='xlvl'+xbr.getStructureName(), )

        ins_lvl_file = _fname_4saving(file_name=file_name,
                                      rootfile=segmentation,
                                      suffix='xlvl'+xbr.getInsideName(), )

        bg_lvl_file = _fname_4saving(file_name=file_name,
                                     rootfile=segmentation,
                                     suffix='xlvl'+xbr.getBackgroundName(), )


    # inside region
    # reshape output to what nibabel likes
    mask_data = np.reshape(np.array(xbr.getInsideWMmask(),
                                    dtype=np.int32), dimensions, 'F')

    proba_data = np.reshape(np.array(xbr.getInsideWMprobability(),
                                     dtype=np.float32), dimensions, 'F')

    lvl_data = np.reshape(np.array(xbr.getInsideWMlevelset(),
                                   dtype=np.float32), dimensions, 'F')

    # adapt header max for each image so that correct max is displayed
    # and create nifiti objects
    header['cal_min'] = np.nanmin(mask_data)
    header['cal_max'] = np.nanmax(mask_data)
    inside_mask = nb.Nifti1Image(mask_data, affine, header)

    header['cal_min'] = np.nanmin(proba_data)
    header['cal_max'] = np.nanmax(proba_data)
    inside_proba = nb.Nifti1Image(proba_data, affine, header)

    header['cal_min'] = np.nanmin(lvl_data)
    header['cal_max'] = np.nanmax(lvl_data)
    inside_lvl = nb.Nifti1Image(lvl_data, affine, header)

    # main region
    # reshape output to what nibabel likes
    mask_data = np.reshape(np.array(xbr.getStructureGMmask(),
                                    dtype=np.int32), dimensions, 'F')

    proba_data = np.reshape(np.array(xbr.getStructureGMprobability(),
                                     dtype=np.float32), dimensions, 'F')

    lvl_data = np.reshape(np.array(xbr.getStructureGMlevelset(),
                                   dtype=np.float32), dimensions, 'F')

    # adapt header max for each image so that correct max is displayed
    # and create nifiti objects
    header['cal_min'] = np.nanmin(mask_data)
    header['cal_max'] = np.nanmax(mask_data)
    region_mask = nb.Nifti1Image(mask_data, affine, header)

    header['cal_min'] = np.nanmin(proba_data)
    header['cal_max'] = np.nanmax(proba_data)
    region_proba = nb.Nifti1Image(proba_data, affine, header)

    header['cal_min'] = np.nanmin(lvl_data)
    header['cal_max'] = np.nanmax(lvl_data)
    region_lvl = nb.Nifti1Image(lvl_data, affine, header)

    # background region
    # reshape output to what nibabel likes
    mask_data = np.reshape(np.array(xbr.getBackgroundCSFmask(),
                                    dtype=np.int32), dimensions, 'F')

    proba_data = np.reshape(np.array(xbr.getBackgroundCSFprobability(),
                                     dtype=np.float32), dimensions, 'F')

    lvl_data = np.reshape(np.array(xbr.getBackgroundCSFlevelset(),
                                   dtype=np.float32), dimensions, 'F')

    # adapt header max for each image so that correct max is displayed
    # and create nifiti objects
    header['cal_min'] = np.nanmin(mask_data)
    header['cal_max'] = np.nanmax(mask_data)
    background_mask = nb.Nifti1Image(mask_data, affine, header)

    header['cal_min'] = np.nanmin(proba_data)
    header['cal_max'] = np.nanmax(proba_data)
    background_proba = nb.Nifti1Image(proba_data, affine, header)

    header['cal_min'] = np.nanmin(lvl_data)
    header['cal_max'] = np.nanmax(lvl_data)
    background_lvl = nb.Nifti1Image(lvl_data, affine, header)

    if save_data:
        save_volume(os.path.join(output_dir, ins_mask_file), inside_mask)
        save_volume(os.path.join(output_dir, ins_proba_file), inside_proba)
        save_volume(os.path.join(output_dir, ins_lvl_file), inside_lvl)
        save_volume(os.path.join(output_dir, reg_mask_file), region_mask)
        save_volume(os.path.join(output_dir, reg_proba_file), region_proba)
        save_volume(os.path.join(output_dir, reg_lvl_file), region_lvl)
        save_volume(os.path.join(output_dir, bg_mask_file), background_mask)
        save_volume(os.path.join(output_dir, bg_proba_file), background_proba)
        save_volume(os.path.join(output_dir, bg_lvl_file), background_lvl)

    return {'inside_mask': inside_mask, 'inside_proba': inside_proba,
            'inside_lvl': inside_lvl, 'region_mask': region_mask,
            'region_proba': region_proba, 'inside_lvl': region_lvl,
            'background_mask': background_mask,
            'background_proba': background_proba,
            'background_lvl': background_lvl}
コード例 #3
0
def enhance_region_contrast(intensity_image,
                            segmentation_image,
                            levelset_boundary_image,
                            atlas_file,
                            enhanced_region,
                            contrast_background,
                            partial_voluming_distance,
                            save_data=False,
                            output_dir=None,
                            file_name=None):
    """ Enhance Region Contrast

    Enhances the contrast between selected regions from a MGDM brain segmentation.
    
    Parameters
    ----------
    intensity_image: niimg
        Intensity contrast to enhance between the chosen regions
    
    segmentation_image : niimg
       MGDM brain segmentation image (_mgdm_seg)
    
    levelset_boundary_image: niimg
       MGDM distance to closest boundary (_mgdm_dist)
    
    atlas_file: str, optional
        Path to MGDM brain atlas file (default is stored in DEFAULT_ATLAS)
    
    enhanced_region: str
       Region of interest to enhance (choices are: 'crwm', 'cbwm', 'csf' for
       cerebral and cerebellar WM, CSF)
       
    contrast_background: str
      Region to contrast as background (choices are: 'crgm', 'crwm', 'brain'
      for cerebral and cerebellar GM, brain tissues)
      
    partial_voluming_distance: float
      Distance in voxels for estimating partial voluming at the boundaries

    Returns
    ----------
    dict
        Dictionary collecting outputs under the following keys
        (suffix of output files in brackets, with # the region and % the background label above)

        * region_mask (niimg): Hard segmentation mask of the (GM) region
          of interest (_emask_#)
        * background_mask (niimg): Hard segmentation mask of the (CSF) region
          background (_emask_%)
        * region_proba (niimg): Probability map of the (GM) region
          of interest (_eproba_#)
        * background_proba (niimg): Probability map of the (CSF) region
          background (_eproba_%)
        * region_pv (niimg): Levelset surface of the (GM) region
          of interest (_epv_#)
        * background_pv (niimg): Levelset surface of the (CSF) region
          background (_epv_%)

    Notes
    ----------
    Original Java module by Pierre-Louis Bazin.

    References
    ----------
    """

    print('\n Enhance Region Contrast')

    # check atlas_file and set default if not given
    #atlas_file = _check_atlas_file(atlas_file)

    # make sure that saving related parameters are correct
    if save_data:
        output_dir = _output_dir_4saving(output_dir, intensity_image)

    # start virtual machine, if not already running
    try:
        cbstools.initVM(initialheap='6000m', maxheap='6000m')
    except ValueError:
        pass
    # create EnhanceRegionContrast instance
    erc = cbstools.BrainEnhanceRegionContrast()

    # set erc parameters
    erc.setAtlasFile(atlas_file)
    erc.setEnhancedRegion(enhanced_region)
    erc.setContrastBackground(contrast_background)
    erc.setPartialVolumingDistance(partial_voluming_distance)
    erc.setComponents(3)  # not used in module

    # load intensity_image and use it to set dimensions and resolution
    img = load_volume(intensity_image)
    data = img.get_data()
    affine = img.get_affine()
    header = img.get_header()
    resolution = [x.item() for x in header.get_zooms()]
    dimensions = data.shape

    erc.setDimensions(dimensions[0], dimensions[1], dimensions[2])
    erc.setResolutions(resolution[0], resolution[1], resolution[2])

    # input intensity_image
    erc.setIntensityImage(
        cbstools.JArray('float')((data.flatten('F')).astype(float)))

    # input segmentation_image
    data = load_volume(segmentation_image).get_data()
    erc.setSegmentationImage(
        cbstools.JArray('int')((data.flatten('F')).astype(int)))

    # input levelset_boundary_image
    data = load_volume(levelset_boundary_image).get_data()
    erc.setLevelsetBoundaryImage(
        cbstools.JArray('float')((data.flatten('F')).astype(float)))

    # execute ERC
    try:
        erc.execute()

    except:
        # if the Java module fails, reraise the error it throws
        print("\n The underlying Java code did not execute cleanly: ")
        print sys.exc_info()[0]
        raise
        return

    if save_data:
        reg_file = _fname_4saving(file_name=file_name,
                                  rootfile=intensity_image,
                                  suffix='emask' + str(erc.getRegionName()))

        back_file = _fname_4saving(file_name=file_name,
                                   rootfile=intensity_image,
                                   suffix='emask' +
                                   str(erc.getBackgroundName()))

        reg_proba_file = _fname_4saving(file_name=file_name,
                                        rootfile=intensity_image,
                                        suffix='eproba' +
                                        str(erc.getRegionName()))

        back_proba_file = _fname_4saving(file_name=file_name,
                                         rootfile=intensity_image,
                                         suffix='eproba' +
                                         str(erc.getBackgroundName()))

        reg_pv_file = _fname_4saving(file_name=file_name,
                                     rootfile=intensity_image,
                                     suffix='epv' + str(erc.getRegionName()))

        back_pv_file = _fname_4saving(file_name=file_name,
                                      rootfile=intensity_image,
                                      suffix='epv' +
                                      str(erc.getBackgroundName()))

    # reshape output to what nibabel likes
    reg_data = np.reshape(np.array(erc.getRegionMask(), dtype=np.int32),
                          dimensions, 'F')

    back_data = np.reshape(np.array(erc.getBackgroundMask(), dtype=np.int32),
                           dimensions, 'F')

    reg_proba_data = np.reshape(
        np.array(erc.getRegionProbability(), dtype=np.float32), dimensions,
        'F')

    back_proba_data = np.reshape(
        np.array(erc.getBackgroundProbability(), dtype=np.float32), dimensions,
        'F')

    reg_pv_data = np.reshape(
        np.array(erc.getRegionPartialVolume(), dtype=np.float32), dimensions,
        'F')

    back_pv_data = np.reshape(
        np.array(erc.getBackgroundPartialVolume(), dtype=np.float32),
        dimensions, 'F')

    ## membership and labels output has a 4th dimension, set to 6
    #dimensions4d = [dimensions[0], dimensions[1], dimensions[2], 6]
    #lbl_data = np.reshape(np.array(mgdm.getPosteriorMaximumLabels4D(),
    #                               dtype=np.int32), dimensions4d, 'F')
    #mems_data = np.reshape(np.array(mgdm.getPosteriorMaximumMemberships4D(),
    #                                dtype=np.float32), dimensions4d, 'F')

    # adapt header max for each image so that correct max is displayed
    # and create nifiti objects
    header['cal_max'] = np.nanmax(reg_data)
    reg = nb.Nifti1Image(reg_data, affine, header)

    header['cal_max'] = np.nanmax(back_data)
    back = nb.Nifti1Image(back_data, affine, header)

    header['cal_max'] = np.nanmax(reg_proba_data)
    reg_proba = nb.Nifti1Image(reg_proba_data, affine, header)

    header['cal_max'] = np.nanmax(back_proba_data)
    back_proba = nb.Nifti1Image(back_proba_data, affine, header)

    header['cal_max'] = np.nanmax(reg_pv_data)
    reg_pv = nb.Nifti1Image(reg_pv_data, affine, header)

    header['cal_max'] = np.nanmax(back_pv_data)
    back_pv = nb.Nifti1Image(back_pv_data, affine, header)

    if save_data:
        save_volume(os.path.join(output_dir, reg_file), reg)
        save_volume(os.path.join(output_dir, back_file), back)
        save_volume(os.path.join(output_dir, reg_proba_file), reg_proba)
        save_volume(os.path.join(output_dir, back_proba_file), back_proba)
        save_volume(os.path.join(output_dir, reg_pv_file), reg_pv)
        save_volume(os.path.join(output_dir, back_pv_file), back_pv)

    return {
        'region_mask': reg,
        'background_mask': back,
        'region_proba': reg_proba,
        'background_proba': back_proba,
        'region_pv': reg_pv,
        'background_pv': back_pv
    }
コード例 #4
0
def mgdm_segmentation(contrast_image1,
                      contrast_type1,
                      contrast_image2=None,
                      contrast_type2=None,
                      contrast_image3=None,
                      contrast_type3=None,
                      contrast_image4=None,
                      contrast_type4=None,
                      n_steps=5,
                      max_iterations=800,
                      topology='wcs',
                      atlas_file=None,
                      topology_lut_dir=None,
                      adjust_intensity_priors=False,
                      compute_posterior=False,
                      diffuse_probabilities=False,
                      save_data=False,
                      output_dir=None,
                      file_name=None):
    """ MGDM segmentation

    Estimates brain structures from an atlas for MRI data using
    a Multiple Object Geometric Deformable Model (MGDM)

    Parameters
    ----------
    contrast_image1: niimg
        First input image to perform segmentation on
    contrast_type1: str
        Contrast type of first input image, must be listed as a prior in used
        atlas(specified in atlas_file)
    contrast_image2: niimg, optional
        Additional input image to inform segmentation, must be in the same
        space as constrast_image1, requires contrast_type2
    contrast_type2: str, optional
        Contrast type of second input image, must be listed as a prior in used
        atlas (specified in atlas_file)
    contrast_image3: niimg, optional
        Additional input image to inform segmentation, must be in the same
        space as constrast_image1, requires contrast_type3
    contrast_type3: str, optional
        Contrast type of third input image, must be listed as a prior in used
        atlas (specified in atlas_file)
    contrast_image4: niimg, optional
        Additional input image to inform segmentation, must be in the same
        space as constrast_image1, requires contrast_type4
    contrast_type4: str, optional
        Contrast type of fourth input image, must be listed as a prior in used
        atlas (specified in atlas_file)
    n_steps: int, optional
        Number of steps for MGDM (default is 5, set to 0 for quick testing of
        registration of priors, which does not perform true segmentation)
    max_iterations: int, optional
        Maximum number of iterations per step for MGDM (default is 800, set
        to 1 for quick testing of registration of priors, which does not
        perform true segmentation)
    topology: {'wcs', 'no'}, optional
        Topology setting, choose 'wcs' (well-composed surfaces) for strongest
        topology constraint, 'no' for no topology constraint (default is 'wcs')
    atlas_file: str, optional
        Path to plain text atlas file (default is stored in DEFAULT_ATLAS)
        or atlas name to be searched in ATLAS_DIR
    topology_lut_dir: str, optional
        Path to directory in which topology files are stored (default is stored
        in TOPOLOGY_LUT_DIR)
    adjust_intensity_priors: bool
        Adjust intensity priors based on dataset (default is False)
    compute_posterior: bool
        Compute posterior probabilities for segmented structures
        (default is False)
    diffuse_probabilities: bool
        Regularize probability distribution with a non-linear diffusion scheme
        (default is False)
    save_data: bool
        Save output data to file (default is False)
    output_dir: str, optional
        Path to desired output directory, will be created if it doesn't exist
    file_name: str, optional
        Desired base name for output files with file extension
        (suffixes will be added)

    Returns
    ----------
    dict
        Dictionary collecting outputs under the following keys
        (suffix of output files in brackets)

        * segmentation (niimg): Hard brain segmentation with topological
          constraints (if chosen) (_mgdm_seg)
        * labels (niimg): Maximum tissue probability labels (_mgdm_lbls)
        * memberships (niimg): Maximum tissue probability values, 4D image
          where the first dimension shows each voxel's highest probability to
          belong to a specific tissue, the second dimension shows the second
          highest probability to belong to another tissue etc. (_mgdm_mems)
        * distance (niimg): Minimum distance to a segmentation boundary
          (_mgdm_dist)

    Notes
    ----------
    Original Java module by Pierre-Louis Bazin. Algorithm details can be
    found in [1]_ and [2]_

    References
    ----------
    .. [1] Bogovic, Prince and Bazin (2013). A multiple object geometric
       deformable model for image segmentation.
       doi:10.1016/j.cviu.2012.10.006.A
    .. [2] Fan, Bazin and Prince (2008). A multi-compartment segmentation
       framework with homeomorphic level sets. DOI: 10.1109/CVPR.2008.4587475
    """

    print('\nMGDM Segmentation')

    # check atlas_file and set default if not given
    atlas_file = _check_atlas_file(atlas_file)

    # check topology_lut_dir and set default if not given
    topology_lut_dir = _check_topology_lut_dir(topology_lut_dir)

    # find available intensity priors in selected MGDM atlas
    mgdm_intensity_priors = _get_mgdm_intensity_priors(atlas_file)

    # sanity check contrast types
    contrasts = [
        contrast_image1, contrast_image2, contrast_image3, contrast_image4
    ]
    ctypes = [contrast_type1, contrast_type2, contrast_type3, contrast_type4]
    for idx, ctype in enumerate(ctypes):
        if ctype is None and contrasts[idx] is not None:
            raise ValueError(
                ("If specifying contrast_image{0}, please also "
                 "specify contrast_type{0}".format(idx + 1, idx + 1)))

        elif ctype is not None and ctype not in mgdm_intensity_priors:
            raise ValueError(("{0} is not a valid contrast type for  "
                              "contrast_type{1} please choose from the "
                              "following contrasts provided by the chosen "
                              "atlas: ").format(ctype, idx + 1),
                             ", ".join(mgdm_intensity_priors))

    # make sure that saving related parameters are correct
    if save_data:
        output_dir = _output_dir_4saving(output_dir, contrast_image1)

        seg_file = _fname_4saving(
            file_name=file_name,
            rootfile=contrast_image1,
            suffix='mgdm_seg',
        )

        lbl_file = _fname_4saving(file_name=file_name,
                                  rootfile=contrast_image1,
                                  suffix='mgdm_lbls')

        mems_file = _fname_4saving(file_name=file_name,
                                   rootfile=contrast_image1,
                                   suffix='mgdm_mems')

        dist_file = _fname_4saving(file_name=file_name,
                                   rootfile=contrast_image1,
                                   suffix='mgdm_dist')

    # start virtual machine, if not already running
    try:
        cbstools.initVM(initialheap='6000m', maxheap='6000m')
    except ValueError:
        pass
    # create mgdm instance
    mgdm = cbstools.BrainMgdmMultiSegmentation2()

    # set mgdm parameters
    mgdm.setAtlasFile(atlas_file)
    mgdm.setTopologyLUTdirectory(topology_lut_dir)
    mgdm.setOutputImages('label_memberships')
    mgdm.setAdjustIntensityPriors(adjust_intensity_priors)
    mgdm.setComputePosterior(compute_posterior)
    mgdm.setDiffuseProbabilities(diffuse_probabilities)
    mgdm.setSteps(n_steps)
    mgdm.setMaxIterations(max_iterations)
    mgdm.setTopology(topology)
    mgdm.setNormalizeQuantitativeMaps(True)
    # set to False for "quantitative" brain prior atlases
    # (version quant-3.0.5 and above)

    # load contrast image 1 and use it to set dimensions and resolution
    img = load_volume(contrast_image1)
    data = img.get_data()
    affine = img.get_affine()
    header = img.get_header()
    resolution = [x.item() for x in header.get_zooms()]
    dimensions = data.shape

    mgdm.setDimensions(dimensions[0], dimensions[1], dimensions[2])
    mgdm.setResolutions(resolution[0], resolution[1], resolution[2])

    # convert orientation information to mgdm slice and orientation info
    sliceorder, LR, AP, IS = _get_mgdm_orientation(affine, mgdm)
    mgdm.setOrientations(sliceorder, LR, AP, IS)

    # input image 1
    mgdm.setContrastImage1(
        cbstools.JArray('float')((data.flatten('F')).astype(float)))
    mgdm.setContrastType1(contrast_type1)

    # if further contrast are specified, input them
    if contrast_image2 is not None:
        data = load_volume(contrast_image2).get_data()
        mgdm.setContrastImage2(
            cbstools.JArray('float')((data.flatten('F')).astype(float)))
        mgdm.setContrastType2(contrast_type2)

        if contrast_image3 is not None:
            data = load_volume(contrast_image3).get_data()
            mgdm.setContrastImage3(
                cbstools.JArray('float')((data.flatten('F')).astype(float)))
            mgdm.setContrastType3(contrast_type3)

            if contrast_image4 is not None:
                data = load_volume(contrast_image4).get_data()
                mgdm.setContrastImage4(
                    cbstools.JArray('float')(
                        (data.flatten('F')).astype(float)))
                mgdm.setContrastType4(contrast_type4)

    # execute MGDM
    try:
        mgdm.execute()

    except:
        # if the Java module fails, reraise the error it throws
        print("\n The underlying Java code did not execute cleanly: ")
        print sys.exc_info()[0]
        raise
        return

    # reshape output to what nibabel likes
    seg_data = np.reshape(
        np.array(mgdm.getSegmentedBrainImage(), dtype=np.int32), dimensions,
        'F')

    dist_data = np.reshape(
        np.array(mgdm.getLevelsetBoundaryImage(), dtype=np.float32),
        dimensions, 'F')

    # membership and labels output has a 4th dimension, set to 6
    dimensions4d = [dimensions[0], dimensions[1], dimensions[2], 6]
    lbl_data = np.reshape(
        np.array(mgdm.getPosteriorMaximumLabels4D(), dtype=np.int32),
        dimensions4d, 'F')
    mems_data = np.reshape(
        np.array(mgdm.getPosteriorMaximumMemberships4D(), dtype=np.float32),
        dimensions4d, 'F')

    # adapt header max for each image so that correct max is displayed
    # and create nifiti objects
    header['cal_max'] = np.nanmax(seg_data)
    seg = nb.Nifti1Image(seg_data, affine, header)

    header['cal_max'] = np.nanmax(dist_data)
    dist = nb.Nifti1Image(dist_data, affine, header)

    header['cal_max'] = np.nanmax(lbl_data)
    lbls = nb.Nifti1Image(lbl_data, affine, header)

    header['cal_max'] = np.nanmax(mems_data)
    mems = nb.Nifti1Image(mems_data, affine, header)

    if save_data:
        save_volume(os.path.join(output_dir, seg_file), seg)
        save_volume(os.path.join(output_dir, dist_file), dist)
        save_volume(os.path.join(output_dir, lbl_file), lbls)
        save_volume(os.path.join(output_dir, mems_file), mems)

    return {
        'segmentation': seg,
        'labels': lbls,
        'memberships': mems,
        'distance': dist
    }
コード例 #5
0
def define_multi_region_priors(segmentation_image,levelset_boundary_image,
                               atlas_file, #defined_region, definition_method,
                               distance_offset,
                               save_data=False, output_dir=None,
                               file_name=None):
    
    """ Define Multi-Region Priors

    Defines location priors based on combinations of regions from a MGDM brain segmentation.
    This version creates priors for the region between lateral ventricles, the region above 
    the frontal ventricular horns, and the internal capsule.

    Parameters
    ----------
    segmentation_image : niimg
         MGDM segmentation (_mgdm_seg) giving a labeling of the brain
         
    levelset_boundary_image: niimg
         MGDM boundary distance image (_mgdm_dist) giving the absolute distance to the closest boundary
         
    atlas_file: str, optional
        Path to brain atlas file to define segmentation labels (default is stored in DEFAULT_ATLAS)
        
    partial_voluming_distance: float
        Distance used to compute partial voluming at the boundary of structures (default is 0)

    Returns
    ----------
    dict
        Dictionary collecting outputs under the following keys
        (suffix of output files in brackets)

        * inter_ventricular_pv (niimg): Partial volume estimate of the inter-ventricular 
          region(_mrp_intv)
        * ventricular_horns_pv (niimg): Partial volume estimate of the region above the
          ventricular horns (_mrp_vhorns)
        * internal_capsule_pv (niimg): Partial volume estimate of the internal capsule
          (_mrp_icap)
 

    Notes
    ----------
    Original Java module by Pierre-Louis Bazin.
   
    References
    ----------
    """

    print('\n Define Multi-Region Priors')

    # check atlas_file and set default if not given
    #atlas_file = _check_atlas_file(atlas_file)

    # make sure that saving related parameters are correct
    if save_data:
        output_dir = _output_dir_4saving(output_dir, segmentation_image)

        intervent_file = _fname_4saving(file_name=file_name,
                                  rootfile=segmentation_image,
                                  suffix='mrp_ivent')

        horns_file = _fname_4saving(file_name=file_name,
                                  rootfile=segmentation_image,
                                  suffix='mrp_vhorns')
        
        intercap_file = _fname_4saving(file_name=file_name,
                                  rootfile=segmentation_image,
                                  suffix='mrp_icap')    

    # start virtual machine, if not already running
    try:
        cbstools.initVM(initialheap='6000m', maxheap='6000m')
    except ValueError:
        pass
    # create DefineMultiRegionPriors instance

    dmrp = cbstools.BrainDefineMultiRegionPriors()
 
    # set erc parameters
    dmrp.setAtlasFile(atlas_file)
    #dmrp.setDefinedRegion(defined_region)
    #dmrp.setDefinitionMethod(definition_method)
    dmrp.setDistanceOffset(distance_offset)

    # load segmentation image and use it to set dimensions and resolution
    img = load_volume(segmentation_image)
    data = img.get_data()
    affine = img.get_affine()
    header = img.get_header()
    resolution = [x.item() for x in header.get_zooms()]
    dimensions = data.shape

    dmrp.setDimensions(dimensions[0], dimensions[1], dimensions[2])
    dmrp.setResolutions(resolution[0], resolution[1], resolution[2])
    
    # input segmentation_image
    dmrp.setSegmentationImage(cbstools.JArray('int')((data.flatten('F')).astype(int)))

    # input levelset_boundary_image
    data = load_volume(levelset_boundary_image).get_data()
    dmrp.setLevelsetBoundaryImage(cbstools.JArray('float')((data.flatten('F')).astype(float)))

    # execute DMRP
    try:
        dmrp.execute()

    except:
        # if the Java module fails, reraise the error it throws
        print("\n The underlying Java code did not execute cleanly: ")
        print sys.exc_info()[0]
        raise
        return
   
    
    # reshape output to what nibabel likes
    intervent_data = np.reshape(np.array(dmrp.getInterVentricularPV(),
                                   dtype=np.float32), dimensions, 'F')

    horns_data = np.reshape(np.array(dmrp.getVentricularHornsPV(),
                                   dtype=np.float32), dimensions, 'F')
    
    intercap_data = np.reshape(np.array(dmrp.getInternalCapsulePV(),
                                   dtype=np.float32), dimensions, 'F')


    # adapt header max for each image so that correct max is displayed
    # and create nifiti objects
    header['cal_max'] = np.nanmax(intervent_data)
    intervent = nb.Nifti1Image(intervent_data, affine, header)

    header['cal_max'] = np.nanmax(horns_data)
    horns = nb.Nifti1Image(horns_data, affine, header)
    
    header['cal_max'] = np.nanmax(intercap_data)
    intercap = nb.Nifti1Image(intercap_data, affine, header)

   
    if save_data:
        save_volume(os.path.join(output_dir, intervent_file), intervent)
        save_volume(os.path.join(output_dir, horns_file), horns)
        save_volume(os.path.join(output_dir, intercap_file), intercap)


    return {'inter_ventricular_pv': intervent, 'ventricular_horns_pv': horns, 'internal_capsule_pv': intercap}
コード例 #6
0
def mp2rage_skullstripping(second_inversion,
                           t1_weighted=None,
                           t1_map=None,
                           skip_zero_values=True,
                           topology_lut_dir=None,
                           save_data=False,
                           output_dir=None,
                           file_name=None):
    """ MP2RAGE skull stripping

    Estimates a brain mask from MRI data acquired with the MP2RAGE sequence.
    At least a T1-weighted or a T1 map image is required

    Parameters
    ----------
    second_inversion: niimg
        Second inversion image derived from MP2RAGE sequence
    t1_weighted: niimg
        T1-weighted image derived from MP2RAGE sequence (also referred to as
        "uniform" image)
        At least one of t1_weighted and t1_map is required
    t1_map: niimg
        Quantitative T1 map image derived from MP2RAGE sequence
        At least one of t1_weighted and t1_map is required
    skip_zero_values: bool
         Ignores voxels with zero value (default is True)
    topology_lut_dir: str, optional
        Path to directory in which topology files are stored (default is stored
        in TOPOLOGY_LUT_DIR)
    save_data: bool
        Save output data to file (default is False)
    output_dir: str, optional
        Path to desired output directory, will be created if it doesn't exist
    file_name: str, optional
        Desired base name for output files with file extension
        (suffixes will be added)

    Returns
    ----------
    dict
        Dictionary collecting outputs under the following keys
        (suffix of output files in brackets)

        * brain_mask (niimg): Binary brain mask (_strip_mask)
        * inv2_masked (niimg): Masked second inversion imamge (_strip_inv2)
        * t1w_masked (niimg): Masked T1-weighted image (_strip_t1w)
        * t1map_masked (niimg): Masked T1 map (_strip_t1map)

    Notes
    ----------
    Original Java module by Pierre-Louis Bazin. Details on the MP2RAGE
    sequence can be found in [1]_

    References
    ----------
    .. [1] Marques et al. (2010). MP2RAGE, a self bias-field corrected sequence
       for improved segmentation and T1-mapping at high field.
       DOI: 10.1016/j.neuroimage.2009.10.002
    """

    print('\nMP2RAGE Skull Stripping')

    # check topology lut dir and set default if not given
    topology_lut_dir = _check_topology_lut_dir(topology_lut_dir)

    # make sure that saving related parameters are correct
    if save_data:
        output_dir = _output_dir_4saving(output_dir, second_inversion)

        inv2_file = _fname_4saving(file_name=file_name,
                                   rootfile=second_inversion,
                                   suffix='strip_inv2')
        mask_file = _fname_4saving(file_name=file_name,
                                   rootfile=second_inversion,
                                   suffix='strip_mask')
        if t1_weighted is not None:
            t1w_file = _fname_4saving(file_name=file_name,
                                      rootfile=t1_weighted,
                                      suffix='strip_t1w')

        if t1_map is not None:
            t1map_file = _fname_4saving(file_name=file_name,
                                        rootfile=t1_map,
                                        suffix='strip_t1map')

    # start virtual machine, if not already running
    try:
        cbstools.initVM(initialheap='6000m', maxheap='6000m')
    except ValueError:
        pass

    # create skulltripping instance
    stripper = cbstools.BrainMp2rageSkullStripping()

    # get dimensions and resolution from second inversion image
    inv2_img = load_volume(second_inversion)
    inv2_data = inv2_img.get_data()
    inv2_affine = inv2_img.get_affine()
    inv2_hdr = inv2_img.get_header()
    resolution = [x.item() for x in inv2_hdr.get_zooms()]
    dimensions = inv2_data.shape
    stripper.setDimensions(dimensions[0], dimensions[1], dimensions[2])
    stripper.setResolutions(resolution[0], resolution[1], resolution[2])
    stripper.setSecondInversionImage(
        cbstools.JArray('float')((inv2_data.flatten('F')).astype(float)))

    # pass other inputs
    if (t1_weighted is None and t1_map is None):
        raise ValueError('You must specify at least one of '
                         't1_weighted and t1_map')
    if t1_weighted is not None:
        t1w_img = load_volume(t1_weighted)
        t1w_data = t1w_img.get_data()
        t1w_affine = t1w_img.get_affine()
        t1w_hdr = t1w_img.get_header()
        stripper.setT1weightedImage(
            cbstools.JArray('float')((t1w_data.flatten('F')).astype(float)))
    if t1_map is not None:
        t1map_img = load_volume(t1_map)
        t1map_data = t1map_img.get_data()
        t1map_affine = t1map_img.get_affine()
        t1map_hdr = t1map_img.get_header()
        stripper.setT1MapImage(
            cbstools.JArray('float')((t1map_data.flatten('F')).astype(float)))

    stripper.setSkipZeroValues(skip_zero_values)
    stripper.setTopologyLUTdirectory(topology_lut_dir)

    # execute skull stripping
    try:
        stripper.execute()

    except:
        # if the Java module fails, reraise the error it throws
        print("\n The underlying Java code did not execute cleanly: ")
        print sys.exc_info()[0]
        raise
        return

    # collect outputs and potentially save
    inv2_masked_data = np.reshape(
        np.array(stripper.getMaskedSecondInversionImage(), dtype=np.float32),
        dimensions, 'F')
    inv2_hdr['cal_max'] = np.nanmax(inv2_masked_data)
    inv2_masked = nb.Nifti1Image(inv2_masked_data, inv2_affine, inv2_hdr)

    mask_data = np.reshape(
        np.array(stripper.getBrainMaskImage(), dtype=np.uint32), dimensions,
        'F')
    inv2_hdr['cal_max'] = np.nanmax(mask_data)
    mask = nb.Nifti1Image(mask_data, inv2_affine, inv2_hdr)

    outputs = {'brain_mask': mask, 'inv2_masked': inv2_masked}

    if save_data:
        save_volume(os.path.join(output_dir, inv2_file), inv2_masked)
        save_volume(os.path.join(output_dir, mask_file), mask)

    if t1_weighted is not None:
        t1w_masked_data = np.reshape(
            np.array(stripper.getMaskedT1weightedImage(), dtype=np.float32),
            dimensions, 'F')
        t1w_hdr['cal_max'] = np.nanmax(t1w_masked_data)
        t1w_masked = nb.Nifti1Image(t1w_masked_data, t1w_affine, t1w_hdr)
        outputs['t1w_masked'] = t1w_masked

        if save_data:
            save_volume(os.path.join(output_dir, t1w_file), t1w_masked)

    if t1_map is not None:
        t1map_masked_data = np.reshape(
            np.array(stripper.getMaskedT1MapImage(), dtype=np.float32),
            dimensions, 'F')
        t1map_hdr['cal_max'] = np.nanmax(t1map_masked_data)
        t1map_masked = nb.Nifti1Image(t1map_masked_data, t1map_affine,
                                      t1map_hdr)
        outputs['t1map_masked'] = t1map_masked

        if save_data:
            save_volume(os.path.join(output_dir, t1map_file), t1map_masked)

    return outputs
コード例 #7
0
ファイル: profile_sampling.py プロジェクト: nscherf/nighres
def profile_sampling(profile_surface_image,
                     intensity_image,
                     save_data=False,
                     output_dir=None,
                     file_name=None):
    '''Sampling data on multiple intracortical layers

    Parameters
    -----------
    profile_surface_image: niimg
        4D image containing levelset representations of different intracortical
        surfaces on which data should be sampled
    intensity_image: niimg
        Image from which data should be sampled
    save_data: bool
        Save output data to file (default is False)
    output_dir: str, optional
        Path to desired output directory, will be created if it doesn't exist
    file_name: str, optional
        Desired base name for output files with file extension
        (suffixes will be added)

    Returns
    -----------
    niimg
        4D profile image , where the 4th dimension represents the
        profile for each voxel (output file suffix _profiles)

    Notes
    ----------
    Original Java module by Pierre-Louis Bazin and Juliane Dinse
    '''

    print('\nProfile sampling')

    # make sure that saving related parameters are correct
    if save_data:
        output_dir = _output_dir_4saving(output_dir, intensity_image)

        profile_file = _fname_4saving(file_name=file_name,
                                      rootfile=intensity_image,
                                      suffix='profiles')

    # start VM if not already running
    try:
        cbstools.initVM(initialheap='6000m', maxheap='6000m')
    except ValueError:
        pass

    # initate class
    sampler = cbstools.LaminarProfileSampling()

    # load the data
    surface_img = load_volume(profile_surface_image)
    surface_data = surface_img.get_data()
    hdr = surface_img.get_header()
    aff = surface_img.get_affine()
    resolution = [x.item() for x in hdr.get_zooms()]
    dimensions = surface_data.shape

    intensity_data = load_volume(intensity_image).get_data()

    # pass inputs
    sampler.setIntensityImage(
        cbstools.JArray('float')((intensity_data.flatten('F')).astype(float)))
    sampler.setProfileSurfaceImage(
        cbstools.JArray('float')((surface_data.flatten('F')).astype(float)))
    sampler.setResolutions(resolution[0], resolution[1], resolution[2])
    sampler.setDimensions(dimensions[0], dimensions[1], dimensions[2],
                          dimensions[3])

    # execute class
    try:
        sampler.execute()

    except:
        # if the Java module fails, reraise the error it throws
        print("\n The underlying Java code did not execute cleanly: ")
        print sys.exc_info()[0]
        raise
        return

    # collecting outputs
    profile_data = np.reshape(
        np.array(sampler.getProfileMappedIntensityImage(), dtype=np.float32),
        dimensions, 'F')

    hdr['cal_max'] = np.nanmax(profile_data)
    profiles = nb.Nifti1Image(profile_data, aff, hdr)

    if save_data:
        save_volume(os.path.join(output_dir, profile_file), profiles)

    return profiles
コード例 #8
0
def cruise_cortex_extraction(init_image,
                             wm_image,
                             gm_image,
                             csf_image,
                             vd_image=None,
                             data_weight=0.4,
                             regularization_weight=0.1,
                             max_iterations=500,
                             normalize_probabilities=False,
                             correct_wm_pv=True,
                             wm_dropoff_dist=1.0,
                             topology='wcs',
                             topology_lut_dir=None,
                             save_data=False,
                             output_dir=None,
                             file_name=None):
    """ CRUISE cortex extraction

    Segments the cortex from a whole brain segmented data set with the CRUISE
    method (includes customized partial voluming corrections and the
    Anatomically-Consistent Enhancement (ACE) of sulcal fundi).

    Note that the main input images are generated by the nighres module
    :func:`nighres.brain.extract_brain_region`.

    Parameters
    ----------
    init_image: niimg
        Initial white matter (WM) segmentation mask (binary mask>0 inside WM)
    wm_image: niimg
        Filled WM probability map (values in [0,1], including subcortical GM
        and ventricles)
    gm_image: niimg
        Cortical gray matter (GM) probability map (values in [0,1], highest
        inside the cortex)
    csf_image: niimg
        Sulcal cerebro-spinal fluid (CSf) and background probability map
        (values in [0,1], highest in CSf and masked regions)
    vd_image: niimg, optional
        Additional probability map of vessels and dura mater to be excluded
    data_weight: float
        Weighting of probability-based balloon forces in CRUISE (default 0.4,
        sum of {data_weight,regularization_weight} should be below or equal
        to 1)
    regularization_weight: float
        Weighting of curvature regularization forces in CRUISE (default 0.1,
        sum of {data_weight,regularization_weight} should be below or equal
        to 1)
    max_iterations: int
        Maximum number of iterations in CRUISE (default is 500)
    normalize_probabilities: bool
        Whether to normalize the wm, gm, and csf probabilities
        (default is False)
    correct_wm_pv: bool
        Whether to correct for WM partial voluming in gyral crowns
        (default is True)
    wm_dropoff_dist: float
        Distance parameter to lower WM probabilities away from current
        segmentation (default is 1.0 voxel)
    topology: {'wcs', 'no'}
        Topology setting, choose 'wcs' (well-composed surfaces) for strongest
        topology constraint, 'no' for no topology constraint (default is 'wcs')
    topology_lut_dir: str
        Path to directory in which topology files are stored (default is stored
        in TOPOLOGY_LUT_DIR)
    save_data: bool
        Save output data to file (default is False)
    output_dir: str, optional
        Path to desired output directory, will be created if it doesn't exist
    file_name: str, optional
        Desired base name for output files with file extension
        (suffixes will be added)

    Returns
    ----------
    dict
        Dictionary collecting outputs under the following keys
        (suffix of output files in brackets)

        * cortex (niimg): Hard segmentation of the cortex with labels
          background=0, gm=1, and wm=2 (_cruise_cortex)
        * gwb (niimg): Gray-White matter Boundary (GWB) level set function
          (_cruise_gwb)
        * cgb (niimg): CSF-Gray matter Boundary (CGB) level set function
          (_cruise_cgb)
        * avg (niimg): Central level set function, obtained as geometric
          average of GWB and CGB (*not* the middle depth of the
          cortex, use volumetric_layering if you want accurate
          depth measures) (_cruise_avg)
        * thickness (niimg): Simple cortical thickness estimate: distance to
          the GWB and CGB surfaces, in mm (_cruise_thick)
        * pwm (niimg): Optimized WM probability, including partial volume and
          distant values correction (_cruise_pwm)
        * pgm (niimg): Optimized GM probability, including CSF sulcal ridges
          correction (_cruise_pgm)
        * pcsf (niimg): Optimized CSF probability, including sulcal ridges and
          vessel/dura correction (_cruise_pwm)

    Notes
    ----------
    Original algorithm by Xiao Han. Java module by Pierre-Louis Bazin.
    Algorithm details can be found in [1]_

    References
    ----------
    .. [1] X. Han, D.L. Pham, D. Tosun, M.E. Rettmann, C. Xu, and J. L. Prince,
       CRUISE: Cortical Reconstruction Using Implicit Surface Evolution,
       NeuroImage, vol. 23, pp. 997--1012, 2004
    """

    print('\nCRUISE Cortical Extraction')

    # check topology_lut_dir and set default if not given
    topology_lut_dir = _check_topology_lut_dir(topology_lut_dir)

    # make sure that saving related parameters are correct
    if save_data:
        output_dir = _output_dir_4saving(output_dir, gm_image)

        cortex_file = _fname_4saving(
            file_name=file_name,
            rootfile=gm_image,
            suffix='cruise_cortex',
        )

        gwb_file = _fname_4saving(
            file_name=file_name,
            rootfile=gm_image,
            suffix='cruise_gwb',
        )

        cgb_file = _fname_4saving(
            file_name=file_name,
            rootfile=gm_image,
            suffix='cruise_cgb',
        )

        avg_file = _fname_4saving(
            file_name=file_name,
            rootfile=gm_image,
            suffix='cruise_avg',
        )

        thick_file = _fname_4saving(
            file_name=file_name,
            rootfile=gm_image,
            suffix='cruise_thick',
        )

        pwm_file = _fname_4saving(
            file_name=file_name,
            rootfile=gm_image,
            suffix='cruise_pwm',
        )

        pgm_file = _fname_4saving(
            file_name=file_name,
            rootfile=gm_image,
            suffix='cruise_pgm',
        )

        pcsf_file = _fname_4saving(
            file_name=file_name,
            rootfile=gm_image,
            suffix='cruise_pcsf',
        )

    # start virtual machine, if not already running
    try:
        cbstools.initVM(initialheap='6000m', maxheap='6000m')
    except ValueError:
        pass
    # create instance
    cruise = cbstools.CortexOptimCRUISE()

    # set parameters
    cruise.setDataWeight(data_weight)
    cruise.setRegularizationWeight(regularization_weight)
    cruise.setMaxIterations(max_iterations)
    cruise.setNormalizeProbabilities(normalize_probabilities)
    cruise.setCorrectForWMGMpartialVoluming(correct_wm_pv)
    cruise.setWMdropoffDistance(wm_dropoff_dist)
    cruise.setTopology(topology)
    cruise.setTopologyLUTdirectory(topology_lut_dir)

    # load images
    init = load_volume(init_image)
    init_data = init.get_data()
    affine = init.get_affine()
    header = init.get_header()
    resolution = [x.item() for x in header.get_zooms()]
    dimensions = init_data.shape
    cruise.setDimensions(dimensions[0], dimensions[1], dimensions[2])
    cruise.setResolutions(resolution[0], resolution[1], resolution[2])
    cruise.importInitialWMSegmentationImage(
        cbstools.JArray('int')((init_data.flatten('F')).astype(int)))

    wm_data = load_volume(wm_image).get_data()
    cruise.setFilledWMProbabilityImage(
        cbstools.JArray('float')((wm_data.flatten('F')).astype(float)))

    gm_data = load_volume(gm_image).get_data()
    cruise.setGMProbabilityImage(
        cbstools.JArray('float')((gm_data.flatten('F')).astype(float)))

    csf_data = load_volume(csf_image).get_data()
    cruise.setCSFandBGProbabilityImage(
        cbstools.JArray('float')((csf_data.flatten('F')).astype(float)))

    if vd_image is not None:
        vd_data = load_volume(vd_image).get_data()
        cruise.setVeinsAndDuraProbabilityImage(
            cbstools.JArray('float')((vd_data.flatten('F')).astype(float)))

    # execute
    try:
        cruise.execute()

    except:
        # if the Java module fails, reraise the error it throws
        print("\n The underlying Java code did not execute cleanly: ")
        print sys.exc_info()[0]
        raise
        return

    # reshape output to what nibabel likes
    cortex_data = np.reshape(np.array(cruise.getCortexMask(), dtype=np.int32),
                             dimensions, 'F')
    gwb_data = np.reshape(np.array(cruise.getWMGMLevelset(), dtype=np.float32),
                          dimensions, 'F')
    cgb_data = np.reshape(
        np.array(cruise.getGMCSFLevelset(), dtype=np.float32), dimensions, 'F')
    avg_data = np.reshape(
        np.array(cruise.getCentralLevelset(), dtype=np.float32), dimensions,
        'F')
    thick_data = np.reshape(
        np.array(cruise.getCorticalThickness(), dtype=np.float32), dimensions,
        'F')
    pwm_data = np.reshape(
        np.array(cruise.getCerebralWMprobability(), dtype=np.float32),
        dimensions, 'F')
    pgm_data = np.reshape(
        np.array(cruise.getCorticalGMprobability(), dtype=np.float32),
        dimensions, 'F')
    pcsf_data = np.reshape(
        np.array(cruise.getSulcalCSFprobability(), dtype=np.float32),
        dimensions, 'F')

    # adapt header min, max for each image so that correct max is displayed
    # and create nifiti objects
    header['cal_min'] = np.nanmax(cortex_data)
    header['cal_max'] = np.nanmax(cortex_data)
    cortex = nb.Nifti1Image(cortex_data, affine, header)

    header['cal_min'] = np.nanmax(gwb_data)
    header['cal_max'] = np.nanmax(gwb_data)
    gwb = nb.Nifti1Image(gwb_data, affine, header)

    header['cal_min'] = np.nanmax(cgb_data)
    header['cal_max'] = np.nanmax(cgb_data)
    cgb = nb.Nifti1Image(cgb_data, affine, header)

    header['cal_min'] = np.nanmax(avg_data)
    header['cal_max'] = np.nanmax(avg_data)
    avg = nb.Nifti1Image(avg_data, affine, header)

    header['cal_min'] = np.nanmax(thick_data)
    header['cal_max'] = np.nanmax(thick_data)
    thickness = nb.Nifti1Image(thick_data, affine, header)

    header['cal_min'] = np.nanmax(pwm_data)
    header['cal_max'] = np.nanmax(pwm_data)
    pwm = nb.Nifti1Image(pwm_data, affine, header)

    header['cal_min'] = np.nanmax(pgm_data)
    header['cal_max'] = np.nanmax(pgm_data)
    pgm = nb.Nifti1Image(pgm_data, affine, header)

    header['cal_min'] = np.nanmax(pcsf_data)
    header['cal_max'] = np.nanmax(pcsf_data)
    pcsf = nb.Nifti1Image(pcsf_data, affine, header)

    if save_data:
        save_volume(os.path.join(output_dir, cortex_file), cortex)
        save_volume(os.path.join(output_dir, gwb_file), gwb)
        save_volume(os.path.join(output_dir, cgb_file), cgb)
        save_volume(os.path.join(output_dir, avg_file), avg)
        save_volume(os.path.join(output_dir, thick_file), thickness)
        save_volume(os.path.join(output_dir, pwm_file), pwm)
        save_volume(os.path.join(output_dir, pgm_file), pgm)
        save_volume(os.path.join(output_dir, pcsf_file), pcsf)

    return {
        'cortex': cortex,
        'gwb': gwb,
        'cgb': cgb,
        'avg': avg,
        'thickness': thickness,
        'pwm': pwm,
        'pgm': pgm,
        'pcsf': pcsf
    }
コード例 #9
0
def filter_ridge_structures(input_image,
                            structure_intensity='bright',
                            output_type='probability',
                            use_strict_min_max_filter=True,
                            save_data=False,
                            output_dir=None,
                            file_name=None):
    """ Filter Ridge Structures
    
    Uses an image filter to make a probabilistic image of ridge
    structures.


    Parameters
    ----------
    input_image: niimg
        Image containing structure-of-interest
    structure_intensity: str
        Image intensity of structure-of-interest 'bright', 'dark', or 'both'.
    output_type: str
        Whether the image should be normalized to reflect probabilities ('probability'
        or 'intensity'
    use_strict_min_max_filter: bool, optional (defaulti s True)
        Choose between the more specific recursive ridge filter or a more sensitive bidirectional filter
    save_data: bool, optional
        Save output data to file (default is False)
    output_dir: str, optional
        Path to desired output directory, will be created if it doesn't exist
    file_name: str, optional
        Desired base name for output files with file extension
        (suffixes will be added)

    Returns
    ----------
    dict
        Dictionary collecting outputs under the following keys
        (suffix of output files in brackets)

        * ridge_structure_image: Image that reflects the presensence of ridges
          in the image

    Notes
    ----------
    Original Java module by Pierre-Louis Bazin.
    """

    if save_data:
        output_dir = _output_dir_4saving(output_dir, input_image)

        ridge_file = _fname_4saving(
            file_name=file_name,
            rootfile=input_image,
            suffix='rdg',
        )
    outputs = {}

    # start virtual machine, if not already running
    try:
        cbstools.initVM(initialheap='6000m', maxheap='6000m')
    except ValueError:
        pass
    # create algorithm instance
    filter_ridge = cbstools.FilterRidgeStructures()

    # set parameters
    filter_ridge.setStructureIntensity(structure_intensity)
    filter_ridge.setOutputType(output_type)
    filter_ridge.setUseStrictMinMaxFilter(use_strict_min_max_filter)

    # load images and set dimensions and resolution
    input_image = load_volume(input_image)
    data = input_image.get_data()
    affine = input_image.get_affine()
    header = input_image.get_header()
    resolution = [x.item() for x in header.get_zooms()]
    dimensions = input_image.shape

    filter_ridge.setDimensions(dimensions[0], dimensions[1], dimensions[2])
    filter_ridge.setResolutions(resolution[0], resolution[1], resolution[2])

    data = load_volume(input_image).get_data()
    filter_ridge.setInputImage(
        cbstools.JArray('float')((data.flatten('F')).astype(float)))

    # execute
    try:
        filter_ridge.execute()

    except:
        # if the Java module fails, reraise the error it throws
        print("\n The underlying Java code did not execute cleanly: ")
        print sys.exc_info()[0]
        raise
        return

    # Collect output
    ridge_structure_image_data = np.reshape(
        np.array(filter_ridge.getRidgeStructureImage(), dtype=np.float32),
        dimensions, 'F')

    if output_type == 'probability':
        header['cal_min'] = 0.0
        header['cal_max'] = 1.0
    else:
        header['cal_min'] = np.nanmin(ridge_structure_image_data)
        header['cal_max'] = np.nanmax(ridge_structure_image_data)

    ridge_structure_image = nb.Nifti1Image(ridge_structure_image_data, affine,
                                           header)
    outputs['ridge_structure_image'] = ridge_structure_image

    if save_data:
        save_volume(os.path.join(output_dir, ridge_file),
                    ridge_structure_image)

    return outputs
コード例 #10
0
def volumetric_layering(inner_levelset,
                        outer_levelset,
                        n_layers=4,
                        topology_lut_dir=None,
                        save_data=False,
                        output_dir=None,
                        file_name=None):
    '''Equivolumetric layering of the cortical sheet.

    Parameters
    ----------
    inner_levelset: niimg
        Levelset representation of the inner surface, typically GM/WM surface
    outer_levelset : niimg
        Levelset representation of the outer surface, typically GM/CSF surface
    n_layers : int, optional
        Number of layers to be created (default is 10)
    topology_lut_dir: str, optional
        Path to directory in which topology files are stored (default is stored
        in TOPOLOGY_LUT_DIR)
    save_data: bool
        Save output data to file (default is False)
    output_dir: str, optional
        Path to desired output directory, will be created if it doesn't exist
    file_name: str, optional
        Desired base name for output files with file extension
        (suffixes will be added)

    Returns
    ----------
    dict
        Dictionary collecting outputs under the following keys
        (suffix of output files in brackets)

        * depth (niimg): Continuous depth from 0 (inner surface) to 1
          (outer surface) (_layering_depth)
        * layers (niimg): Discrete layers from 1 (bordering inner surface) to
          n_layers (bordering outer surface) (_layering_layers)
        * boundaries (niimg): Levelset representations of boundaries between
          all layers in 4D (_layering_boundaries)

    Notes
    ----------
    Original Java module by Miriam Waehnert, Pierre-Louis Bazin and
    Juliane Dinse. Algorithm details can be found in [1]_

    References
    ----------
    .. [1] Waehnert et al (2014) Anatomically motivated modeling of cortical
       laminae. DOI: 10.1016/j.neuroimage.2013.03.078
    '''

    print('\nVolumetric Layering')

    # check topology lut dir and set default if not given
    topology_lut_dir = _check_topology_lut_dir(topology_lut_dir)

    # make sure that saving related parameters are correct
    if save_data:
        output_dir = _output_dir_4saving(output_dir, inner_levelset)

        depth_file = _fname_4saving(file_name=file_name,
                                    rootfile=inner_levelset,
                                    suffix='layering_depth')

        layer_file = _fname_4saving(file_name=file_name,
                                    rootfile=inner_levelset,
                                    suffix='layering_layers')

        boundary_file = _fname_4saving(file_name=file_name,
                                       rootfile=inner_levelset,
                                       suffix='layering_boundaries')

    # start virutal machine if not already running
    try:
        cbstools.initVM(initialheap='12000m', maxheap='12000m')
    except ValueError:
        pass

    # initate class
    lamination = cbstools.LaminarVolumetricLayering()

    # load the data
    inner_img = load_volume(inner_levelset)
    inner_data = inner_img.get_data()
    hdr = inner_img.get_header()
    aff = inner_img.get_affine()
    resolution = [x.item() for x in hdr.get_zooms()]
    dimensions = inner_data.shape

    outer_data = load_volume(outer_levelset).get_data()

    # set parameters from input images
    lamination.setDimensions(dimensions[0], dimensions[1], dimensions[2])
    lamination.setResolutions(resolution[0], resolution[1], resolution[2])
    lamination.setInnerDistanceImage(
        cbstools.JArray('float')((inner_data.flatten('F')).astype(float)))
    lamination.setOuterDistanceImage(
        cbstools.JArray('float')((outer_data.flatten('F')).astype(float)))
    lamination.setNumberOfLayers(n_layers)
    lamination.setTopologyLUTdirectory(topology_lut_dir)

    # execute class
    try:
        lamination.execute()

    except:
        # if the Java module fails, reraise the error it throws
        print("\n The underlying Java code did not execute cleanly: ")
        print sys.exc_info()[0]
        raise
        return

    # collect data
    depth_data = np.reshape(
        np.array(lamination.getContinuousDepthMeasurement(), dtype=np.float32),
        dimensions, 'F')
    hdr['cal_max'] = np.nanmax(depth_data)
    depth = nb.Nifti1Image(depth_data, aff, hdr)

    layer_data = np.reshape(
        np.array(lamination.getDiscreteSampledLayers(), dtype=np.int32),
        dimensions, 'F')
    hdr['cal_max'] = np.nanmax(layer_data)
    layers = nb.Nifti1Image(layer_data, aff, hdr)

    boundary_len = lamination.getLayerBoundarySurfacesLength()
    boundary_data = np.reshape(
        np.array(lamination.getLayerBoundarySurfaces(), dtype=np.float32),
        (dimensions[0], dimensions[1], dimensions[2], boundary_len), 'F')
    hdr['cal_min'] = np.nanmin(boundary_data)
    hdr['cal_max'] = np.nanmax(boundary_data)
    boundaries = nb.Nifti1Image(boundary_data, aff, hdr)

    if save_data:
        save_volume(os.path.join(output_dir, depth_file), depth)
        save_volume(os.path.join(output_dir, layer_file), layers)
        save_volume(os.path.join(output_dir, boundary_file), boundaries)

    return {'depth': depth, 'layers': layers, 'boundaries': boundaries}
コード例 #11
0
def probability_to_levelset(probability_image,
                            save_data=False, output_dir=None,
                            file_name=None):

    """Levelset from tissue classification

    Creates a levelset surface representations from a probabilistic or
    deterministic tissue classification. The levelset indicates each voxel's
    distance to the closest boundary. It takes negative values inside and
    positive values outside of the brain.

    Parameters
    ----------
    probability_image: niimg
        Tissue segmentation to be turned into levelset. Values should be in
        [0, 1], either a binary mask or defining the boundary at 0.5.
    save_data: bool
        Save output data to file (default is False)
    output_dir: str, optional
        Path to desired output directory, will be created if it doesn't exist
    file_name: str, optional
        Desired base name for output files with file extension
        (suffixes will be added)

    Returns
    ----------
    niimg
        Levelset representation of surface (output file suffix _levelset)

    Notes
    ----------
    Original Java module by Pierre-Louis Bazin
    """

    print("\nProbability to Levelset")

    # make sure that saving related parameters are correct
    if save_data:
        output_dir = _output_dir_4saving(output_dir, probability_image)

        levelset_file = _fname_4saving(file_name=file_name,
                                       rootfile=probability_image,
                                       suffix='levelset')

    # start virtual machine if not running
    try:
        cbstools.initVM(initialheap='6000m', maxheap='6000m')
    except ValueError:
        pass

    # initiate class
    prob2level = cbstools.SurfaceProbabilityToLevelset()

    # load the data
    prob_img = load_volume(probability_image)
    prob_data = prob_img.get_data()
    hdr = prob_img.get_header()
    aff = prob_img.get_affine()
    resolution = [x.item() for x in hdr.get_zooms()]
    dimensions = prob_data.shape

    # set parameters from input data
    prob2level.setProbabilityImage(cbstools.JArray('float')(
                                    (prob_data.flatten('F')).astype(float)))
    prob2level.setResolutions(resolution[0], resolution[1], resolution[2])
    prob2level.setDimensions(dimensions[0], dimensions[1], dimensions[2])

    # execute class
    try:
        prob2level.execute()

    except:
        # if the Java module fails, reraise the error it throws
        print("\n The underlying Java code did not execute cleanly: ")
        print sys.exc_info()[0]
        raise
        return

    # collect outputs
    levelset_data = np.reshape(np.array(prob2level.getLevelSetImage(),
                               dtype=np.float32), dimensions, 'F')

    hdr['cal_max'] = np.nanmax(levelset_data)
    levelset = nb.Nifti1Image(levelset_data, aff, hdr)

    if save_data:
        save_volume(os.path.join(output_dir, levelset_file), levelset)

    return levelset
コード例 #12
0
ファイル: lesion_extraction.py プロジェクト: atsuch/nighres
def lesion_extraction(probability_image, segmentation_image,
                      levelset_boundary_image, location_prior_image,
                      atlas_file,
                      gm_boundary_partial_vol_dist, csf_boundary_partial_vol_dist,
                      lesion_clust_dist, prob_min_thresh, prob_max_thresh,
                      small_lesion_size,
                      save_data=False, output_dir=None,
                      file_name=None):
    
    """ Lesion Extraction

    Extracts lesions from a probability image and a pre-segmentation with MGDM.

    Parameters
    ----------
    probability_image: niimg

	segmentation_image: niimg

    levelset_boundary_image: niimg
        MGDM distance to closest boundary (_mgdm_dist)

	location_prior_image: niimg
	   
	atlas_file: str
	    Path to MGDM brain atlas file (default is stored in DEFAULT_ATLAS)

	gm_boundary_partial_vol_dist: float

	csf_boundary_partial_vol_dist: float

	lesion_clust_dist: float

	prob_min_tresh: float

	prob_max_tresh: float

	small_lesion_size: float

    
    Returns
    ----------
   	dict
        Dictionary collecting outputs under the following keys
        (suffix of output files in brackets)

        * lesion_prior (niimg): 
        * lesion_size (niimg): 
        * lesion_proba (niimg): 
        * lesion_pv (niimg): 
        * lesion_labels (niimg): 
        * lesion_score (niimg): 

    Notes
    ----------
    Original Java module by Pierre-Louis Bazin. 

    References
    ----------

    """

    print('\n Lesion Extraction')

    # check atlas_file and set default if not given
    #atlas_file = _check_atlas_file(atlas_file)

    # make sure that saving related parameters are correct
    if save_data:
        output_dir = _output_dir_4saving(output_dir, probability_image)

        lesion_prior_file = _fname_4saving(file_name=file_name,
                                  rootfile=probability_image,
                                  suffix='lesion_prior')

        lesion_size_file = _fname_4saving(file_name=file_name,
                                  rootfile=probability_image,
                                  suffix='lesion_size')

        lesion_proba_file = _fname_4saving(file_name=file_name,
                                   rootfile=probability_image,
                                   suffix='lesion_proba')

        lesion_pv_file = _fname_4saving(file_name=file_name,
                                   rootfile=probability_image,
                                   suffix='lesion_pv')

        lesion_labels_file = _fname_4saving(file_name=file_name,
                                   rootfile=probability_image,
                                   suffix='lesion_labels')

        lesion_score_file = _fname_4saving(file_name=file_name,
                                   rootfile=probability_image,
                                   suffix='lesion_score')

    # start virtual machine, if not already running
    try:
        cbstools.initVM(initialheap='6000m', maxheap='6000m')
    except ValueError:
        pass
    # create extraction instance
    el = cbstools.SegmentationLesionExtraction()

    # set extraction parameters
    el.setComponents(3) # not used in module
    el.setAtlasFile(atlas_file)
    el.setGMPartialVolumingDistance(gm_boundary_partial_vol_dist)
    el.setCSFPartialVolumingDistance(csf_boundary_partial_vol_dist)
    el.setLesionClusteringDistance(lesion_clust_dist)
    el.setMinProbabilityThreshold(prob_min_thresh)
    el.setMaxProbabilityThreshold(prob_max_thresh)
    el.setMinimumSize(small_lesion_size)
    

    # load segmentation image and use it to set dimensions and resolution
    img = load_volume(segmentation_image)
    data = img.get_data()
    affine = img.get_affine()
    header = img.get_header()
    resolution = [x.item() for x in header.get_zooms()]
    dimensions = data.shape

    el.setDimensions(dimensions[0], dimensions[1], dimensions[2])
    el.setResolutions(resolution[0], resolution[1], resolution[2])

    # input segmentation_image
    el.setSegmentationImage(cbstools.JArray('int')((data.flatten('F')).astype(int)))

    # input levelset_boundary_image
    data = load_volume(levelset_boundary_image).get_data()
    el.setLevelsetBoundaryImage(cbstools.JArray('float')((data.flatten('F')).astype(float)))
    
    # input levelset_boundary_image
    data = load_volume(probability_image).get_data()
    el.setProbaImage(cbstools.JArray('float')((data.flatten('F')).astype(float)))
    
    # input levelset_boundary_image
    data = load_volume(location_prior_image).get_data()
    el.setLocationPriorImage(cbstools.JArray('float')((data.flatten('F')).astype(float)))
    

    # execute Extraction
    try:
        el.execute()

    except:
        # if the Java module fails, reraise the error it throws
        print("\n The underlying Java code did not execute cleanly: ")
        print sys.exc_info()[0]
        raise
        return

    # reshape output to what nibabel likes
    lesion_prior_data = np.reshape(np.array(el.getRegionPrior(),
                                   dtype=np.float32), dimensions, 'F')

    lesion_size_data = np.reshape(np.array(el.getLesionSize(),
                                    dtype=np.float32), dimensions, 'F')
    
    lesion_proba_data = np.reshape(np.array(el.getLesionProba(),
                                    dtype=np.float32), dimensions, 'F')
    
    lesion_pv_data = np.reshape(np.array(el.getBoundaryPartialVolume(),
                                   dtype=np.float32), dimensions, 'F')

    lesion_labels_data = np.reshape(np.array(el.getLesionLabels(),
                                    dtype=np.int32), dimensions, 'F')
    
    lesion_score_data = np.reshape(np.array(el.getLesionScore(),
                                    dtype=np.float32), dimensions, 'F')
    

    # adapt header max for each image so that correct max is displayed
    # and create nifiti objects
    header['cal_max'] = np.nanmax(lesion_prior_data)
    lesion_prior = nb.Nifti1Image(lesion_prior_data, affine, header)

    header['cal_max'] = np.nanmax(lesion_size_data)
    lesion_size = nb.Nifti1Image(lesion_size_data, affine, header)

    header['cal_max'] = np.nanmax(lesion_proba_data)
    lesion_proba = nb.Nifti1Image(lesion_proba_data, affine, header)

    header['cal_max'] = np.nanmax(lesion_pv_data)
    lesion_pv = nb.Nifti1Image(lesion_pv_data, affine, header)
    
    header['cal_max'] = np.nanmax(lesion_labels_data)
    lesion_labels = nb.Nifti1Image(lesion_labels_data, affine, header)
    
    header['cal_max'] = np.nanmax(lesion_score_data)
    lesion_score = nb.Nifti1Image(lesion_score_data, affine, header)

    if save_data:
        save_volume(os.path.join(output_dir, lesion_prior_file), lesion_prior)
        save_volume(os.path.join(output_dir, lesion_size_file), lesion_size)
        save_volume(os.path.join(output_dir, lesion_proba_file), lesion_proba)
        save_volume(os.path.join(output_dir, lesion_pv_file), lesion_pv)
        save_volume(os.path.join(output_dir, lesion_labels_file), lesion_labels)
        save_volume(os.path.join(output_dir, lesion_score_file), lesion_score)

    return {'lesion_prior': lesion_prior, 'lesion_size': lesion_size,
            'lesion_proba': lesion_proba, 'lesion_pv': lesion_pv,
            'lesion_labels': lesion_labels, 'lesion_score': lesion_score}