Beispiel #1
0
def estimate_distribution(scalar_files, scalar_range, fold_files, label_files,
                          background_value=-1, verbose=False):
    """
    Estimate sulcus label border scalar distributions from VTK files.

    Learn distributions from training data (different surface meshes).
    Estimate distribution means, sigmas (standard deviations), and weights
    for VTK surface mesh scalars (e.g., depth, curvature) along and outside
    sulcus label borders within folds.

    Note : The number of classes, k, is currently hard-coded.

    Parameters
    ----------
    scalar_files : list of strings
        names of VTK files with scalar values for all surface vertices
    scalar_range : list of floats
        range of values to estimate distribution
    fold_files : list of strings
        names of VTK files with fold numbers for scalar values
    label_files : list of strings
        names of VTK files with label numbers for scalar values
    background_value : integer or float
        background value
    verbose : bool
        print statements?

    Returns
    -------
    border_parameters : dictionary containing lists of floats
        means, sigmas, weights
    nonborder_parameters : dictionary containing lists of floats
        means, sigmas, weights

    Examples
    --------
    >>> import numpy as np
    >>> import pickle
    >>> from mindboggle.shapes.likelihood import estimate_distribution
    >>> from mindboggle.mio.fetch_data import prep_tests
    >>> # Train on a single surface mesh (using FreeSurfer vs. manual labels):
    >>> urls, fetch_data = prep_tests()
    >>> depth_file = fetch_data(urls['left_travel_depth'], '', '.vtk')
    >>> curv_file = fetch_data(urls['left_mean_curvature'], '', '.vtk')
    >>> folds_file = fetch_data(urls['left_folds'], '', '.vtk')
    >>> labels_file = fetch_data(urls['left_freesurfer_labels'], '', '.vtk')
    >>> depth_files = [depth_file]
    >>> curv_files = [curv_file]
    >>> fold_files = [folds_file]
    >>> label_files = [labels_file]
    >>> #
    >>> # # Train on many Mindboggle-101 surface meshes:
    >>> # import os
    >>> # mindboggle_path = '../../Mindboggle101_mindboggle_results'
    >>> # label_path = os.environ['SUBJECTS_DIR']
    >>> # x_path = os.path.join(os.environ['MINDBOGGLE'], 'x')
    >>> # atlas_list_file = os.path.join(x_path, 'mindboggle101_atlases.txt')
    >>> # depth_files = []
    >>> # curv_files = []
    >>> # fold_files = []
    >>> # label_files = []
    >>> # for atlas in atlas_list:
    >>> #  if 'OASIS' in atlas or 'NKI' in atlas or 'MMRR-21' in atlas:
    >>> #   print(atlas)
    >>> #   for h in ['lh','rh']:
    >>> #     depth_file = os.path.join(mindboggle_path, 'shapes',
    >>> #         '_hemi_'+h+'_subject_'+atlas, h+'.pial.travel_depth.vtk')
    >>> #     curv_file = os.path.join(mindboggle_path, 'shapes',
    >>> #         '_hemi_'+h+'_subject_'+atlas, h+'.pial.mean_curvature.vtk')
    >>> #     folds_file = os.path.join(mindboggle_path, 'features',
    >>> #         '_hemi_'+h+'_subject_'+atlas, 'folds.vtk')
    >>> #     labels_file = os.path.join(label_path, atlas, 'label',
    >>> #         h+'.labels.DKT25.manual.vtk')
    >>> #     depth_files.append(depth_file)
    >>> #     curv_files.append(curv_file)
    >>> #     fold_files.append(folds_file)
    >>> #     label_files.append(labels_file)
    >>> #
    >>> scalar_files = depth_files
    >>> scalar_range = np.linspace(0, 1, 51, endpoint=True) # (0 to 1 by 0.02)
    >>> background_value = -1
    >>> verbose = False
    >>> depth_border, depth_nonborder = estimate_distribution(scalar_files,
    ...     scalar_range, fold_files, label_files, background_value, verbose)
    >>> scalar_files = curv_files
    >>> scalar_range = np.linspace(-1, 1, 101, endpoint=True) # (-1 to 1 by 0.02)
    >>> curv_border, curv_nonborder = estimate_distribution(scalar_files,
    ...     scalar_range, fold_files, label_files, verbose)
    >>> print(np.array_str(np.array(depth_border['means']),
    ...       precision=5, suppress_small=True))
    [ 13.0869   0.       0.    ]
    >>> print(np.array_str(np.array(depth_nonborder['means']),
    ...       precision=5, suppress_small=True))
    [ 14.59311   6.16008   0.     ]
    >>> print(np.array_str(np.array(curv_border['means']),
    ...       precision=5, suppress_small=True))
    [ 3.06449 -0.76109 -3.43184]
    >>> print(np.array_str(np.array(curv_nonborder['means']),
    ...       precision=5, suppress_small=True))
    [ 0.62236 -1.55192 -5.19359]
    >>> pickle.dump([depth_border, curv_border, depth_nonborder, curv_nonborder],
    ...     open("depth_curv_border_nonborder_parameters.pkl", "wb"))

    """
    from mindboggle.shapes.likelihood import concatenate_sulcus_scalars, \
        fit_normals_to_histogram

    if not scalar_files or not fold_files or not label_files:
        raise IOError("Input file lists cannot be empty.")

    # Concatenate scalars across multiple training files:
    border_scalars, nonborder_scalars = concatenate_sulcus_scalars(scalar_files,
        fold_files, label_files, background_value)

    # Estimate distribution parameters:
    border_means, border_sigmas, \
        border_weights = fit_normals_to_histogram(border_scalars,
                                                  scalar_range, verbose)
    nonborder_means, nonborder_sigmas, \
        nonborder_weights = fit_normals_to_histogram(nonborder_scalars,
                                                     scalar_range, verbose)

    # Store outputs in dictionaries:
    border_parameters = {
        'means': border_means,
        'sigmas': border_sigmas,
        'weights': border_weights
    }
    nonborder_parameters = {
        'means': nonborder_means,
        'sigmas': nonborder_sigmas,
        'weights': nonborder_weights
    }

    return border_parameters, nonborder_parameters
Beispiel #2
0
def estimate_distribution(scalar_files, scalar_range, fold_files, label_files):
    """
    Estimate sulcus label border scalar distributions from VTK files.

    Estimate distribution means, sigmas (standard deviations), and weights
    for VTK surface mesh scalars (e.g., depth, curvature) along and outside
    sulcus label borders within folds.

    Note : The number of classes, k, is currently hard-coded.

    Parameters
    ----------
    scalar_files : list of strings
        names of VTK files with scalar values for all surface vertices
    scalar_range : list of floats
        range of values to estimate distribution
    fold_files : list of strings
        names of VTK files with fold numbers for scalar values
    label_files : list of strings
        names of VTK files with label numbers for scalar values

    Returns
    -------
    border_parameters : dictionary containing lists of floats
        means, sigmas, weights
    nonborder_parameters : dictionary containing lists of floats
        means, sigmas, weights

    Examples
    --------
    >>> import os
    >>> import numpy as np
    >>> from mindboggle.shapes.likelihood import estimate_distribution
    >>> do_test = False
    >>> # Train on a single surface mesh:
    >>> if do_test:
    >>>     path = os.environ['MINDBOGGLE_DATA']
    >>>     #depth_file = os.path.join(path, 'arno', 'shapes', 'travel_depth_rescaled.vtk')
    >>>     depth_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.travel_depth.vtk')
    >>>     curv_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.mean_curvature.vtk')
    >>>     folds_file = os.path.join(path, 'arno', 'features', 'folds.vtk')
    >>>     labels_file = os.path.join(path, 'arno', 'labels', 'lh.labels.DKT25.manual.vtk')
    >>>     depth_files = [depth_file]
    >>>     curv_files = [curv_file]
    >>>     fold_files = [folds_file]
    >>>     label_files = [labels_file]
    >>> # Train on many Mindboggle-101 surface meshes:
    >>> else:
    >>>     mindboggle_path = '../../Mindboggle101_mindboggle_results'
    >>>     label_path = os.environ['SUBJECTS_DIR']
    >>>     x_path = os.path.join(os.environ['MINDBOGGLE'], 'x')
    >>>     atlas_list_file = os.path.join(x_path, 'mindboggle101_atlases.txt')
    >>>     atlas_list
    >>>     depth_files = []
    >>>     curv_files = []
    >>>     fold_files = []
    >>>     label_files = []
    >>>     for atlas in atlas_list:
    >>>      if 'OASIS' in atlas or 'NKI' in atlas or 'MMRR-21' in atlas:
    >>>       print(atlas)
    >>>       for h in ['lh','rh']:
    >>>         #depth_file = os.path.join(mindboggle_path, 'shapes',
    >>>         #    '_hemi_'+h+'_subject_'+atlas, 'travel_depth_rescaled.vtk')
    >>>         depth_file = os.path.join(mindboggle_path, 'shapes',
    >>>             '_hemi_'+h+'_subject_'+atlas, h+'.pial.travel_depth.vtk')
    >>>         curv_file = os.path.join(mindboggle_path, 'shapes',
    >>>             '_hemi_'+h+'_subject_'+atlas, h+'.pial.mean_curvature.vtk')
    >>>         folds_file = os.path.join(mindboggle_path, 'features',
    >>>             '_hemi_'+h+'_subject_'+atlas, 'folds.vtk')
    >>>         labels_file = os.path.join(label_path, atlas, 'label',
    >>>             h+'.labels.DKT25.manual.vtk')
    >>>         depth_files.append(depth_file)
    >>>         curv_files.append(curv_file)
    >>>         fold_files.append(folds_file)
    >>>         label_files.append(labels_file)
    >>> scalar_range1 = np.linspace(0, 1, 51, endpoint=True) # (0 to 1 by 0.02)
    >>> scalar_range2 = np.linspace(-1, 1, 101, endpoint=True) # (-1 to 1 by 0.02)
    >>> #
    >>> depth_border, depth_nonborder = estimate_distribution(depth_files,
    >>>     scalar_range1, fold_files, label_files)
    >>> #
    >>> curv_border, curv_nonborder = estimate_distribution(curv_files,
    >>>     scalar_range2, fold_files, label_files)
    >>> #
    >>> import cPickle as pickle
    >>> pickle.dump( [depth_border, curv_border, depth_nonborder, curv_nonborder],
    >>>     open("depth_curv_border_nonborder_parameters.pkl", "wb"))

    """
    from mindboggle.shapes.likelihood import concatenate_sulcus_scalars, \
        fit_normals_to_histogram

    if not scalar_files or not fold_files or not label_files:
        import sys
        sys.exit("Input file lists cannot be empty.")

    # Concatenate scalars across multiple training files:
    border_scalars, nonborder_scalars = concatenate_sulcus_scalars(scalar_files,
        fold_files, label_files)

    # Estimate distribution parameters:
    border_means, border_sigmas, \
        border_weights = fit_normals_to_histogram(border_scalars, scalar_range)
    nonborder_means, nonborder_sigmas, \
        nonborder_weights = fit_normals_to_histogram(nonborder_scalars, scalar_range)

    # Store outputs in dictionaries:
    border_parameters = {
        'means': border_means,
        'sigmas': border_sigmas,
        'weights': border_weights
    }
    nonborder_parameters = {
        'means': nonborder_means,
        'sigmas': nonborder_sigmas,
        'weights': nonborder_weights
    }

    return border_parameters, nonborder_parameters
Beispiel #3
0
def estimate_distribution(scalar_files,
                          scalar_range,
                          fold_files,
                          label_files,
                          background_value=-1,
                          verbose=False):
    """
    Estimate sulcus label border scalar distributions from VTK files.

    Learn distributions from training data (different surface meshes).
    Estimate distribution means, sigmas (standard deviations), and weights
    for VTK surface mesh scalars (e.g., depth, curvature) along and outside
    sulcus label borders within folds.

    Note : The number of classes, k, is currently hard-coded.

    Parameters
    ----------
    scalar_files : list of strings
        names of VTK files with scalar values for all surface vertices
    scalar_range : list of floats
        range of values to estimate distribution
    fold_files : list of strings
        names of VTK files with fold numbers for scalar values
    label_files : list of strings
        names of VTK files with label numbers for scalar values
    background_value : integer or float
        background value
    verbose : bool
        print statements?

    Returns
    -------
    border_parameters : dictionary containing lists of floats
        means, sigmas, weights
    nonborder_parameters : dictionary containing lists of floats
        means, sigmas, weights

    Examples
    --------
    >>> import numpy as np
    >>> import pickle
    >>> from mindboggle.shapes.likelihood import estimate_distribution
    >>> from mindboggle.mio.fetch_data import prep_tests
    >>> # Train on a single surface mesh (using FreeSurfer vs. manual labels):
    >>> urls, fetch_data = prep_tests()
    >>> depth_file = fetch_data(urls['left_travel_depth'], '', '.vtk')
    >>> curv_file = fetch_data(urls['left_mean_curvature'], '', '.vtk')
    >>> folds_file = fetch_data(urls['left_folds'], '', '.vtk')
    >>> labels_file = fetch_data(urls['left_freesurfer_labels'], '', '.vtk')
    >>> depth_files = [depth_file]
    >>> curv_files = [curv_file]
    >>> fold_files = [folds_file]
    >>> label_files = [labels_file]
    >>> #
    >>> # # Train on many Mindboggle-101 surface meshes:
    >>> # import os
    >>> # mindboggle_path = '../../Mindboggle101_mindboggle_results'
    >>> # label_path = os.environ['SUBJECTS_DIR']
    >>> # x_path = os.path.join(os.environ['MINDBOGGLE'], 'x')
    >>> # atlas_list_file = os.path.join(x_path, 'mindboggle101_atlases.txt')
    >>> # depth_files = []
    >>> # curv_files = []
    >>> # fold_files = []
    >>> # label_files = []
    >>> # for atlas in atlas_list:
    >>> #  if 'OASIS' in atlas or 'NKI' in atlas or 'MMRR-21' in atlas:
    >>> #   print(atlas)
    >>> #   for h in ['lh','rh']:
    >>> #     depth_file = os.path.join(mindboggle_path, 'shapes',
    >>> #         '_hemi_'+h+'_subject_'+atlas, h+'.pial.travel_depth.vtk')
    >>> #     curv_file = os.path.join(mindboggle_path, 'shapes',
    >>> #         '_hemi_'+h+'_subject_'+atlas, h+'.pial.mean_curvature.vtk')
    >>> #     folds_file = os.path.join(mindboggle_path, 'features',
    >>> #         '_hemi_'+h+'_subject_'+atlas, 'folds.vtk')
    >>> #     labels_file = os.path.join(label_path, atlas, 'label',
    >>> #         h+'.labels.DKT25.manual.vtk')
    >>> #     depth_files.append(depth_file)
    >>> #     curv_files.append(curv_file)
    >>> #     fold_files.append(folds_file)
    >>> #     label_files.append(labels_file)
    >>> #
    >>> scalar_files = depth_files
    >>> scalar_range = np.linspace(0, 1, 51, endpoint=True) # (0 to 1 by 0.02)
    >>> background_value = -1
    >>> verbose = False
    >>> depth_border, depth_nonborder = estimate_distribution(scalar_files,
    ...     scalar_range, fold_files, label_files, background_value, verbose)
    >>> scalar_files = curv_files
    >>> scalar_range = np.linspace(-1, 1, 101, endpoint=True) # (-1 to 1 by 0.02)
    >>> curv_border, curv_nonborder = estimate_distribution(scalar_files,
    ...     scalar_range, fold_files, label_files, verbose)
    >>> print(np.array_str(np.array(depth_border['means']),
    ...       precision=5, suppress_small=True))
    [ 13.0869   0.       0.    ]
    >>> print(np.array_str(np.array(depth_nonborder['means']),
    ...       precision=5, suppress_small=True))
    [ 14.59311   6.16008   0.     ]
    >>> print(np.array_str(np.array(curv_border['means']),
    ...       precision=5, suppress_small=True))
    [ 3.06449 -0.76109 -3.43184]
    >>> print(np.array_str(np.array(curv_nonborder['means']),
    ...       precision=5, suppress_small=True))
    [ 0.62236 -1.55192 -5.19359]
    >>> pickle.dump([depth_border, curv_border, depth_nonborder, curv_nonborder],
    ...     open("depth_curv_border_nonborder_parameters.pkl", "wb"))

    """
    from mindboggle.shapes.likelihood import concatenate_sulcus_scalars, \
        fit_normals_to_histogram

    if not scalar_files or not fold_files or not label_files:
        raise IOError("Input file lists cannot be empty.")

    # Concatenate scalars across multiple training files:
    border_scalars, nonborder_scalars = concatenate_sulcus_scalars(
        scalar_files, fold_files, label_files, background_value)

    # Estimate distribution parameters:
    border_means, border_sigmas, \
        border_weights = fit_normals_to_histogram(border_scalars,
                                                  scalar_range, verbose)
    nonborder_means, nonborder_sigmas, \
        nonborder_weights = fit_normals_to_histogram(nonborder_scalars,
                                                     scalar_range, verbose)

    # Store outputs in dictionaries:
    border_parameters = {
        'means': border_means,
        'sigmas': border_sigmas,
        'weights': border_weights
    }
    nonborder_parameters = {
        'means': nonborder_means,
        'sigmas': nonborder_sigmas,
        'weights': nonborder_weights
    }

    return border_parameters, nonborder_parameters
Beispiel #4
0
def estimate_distribution(scalar_files, scalar_range, fold_files, label_files):
    """
    Estimate sulcus label border scalar distributions from VTK files.

    Estimate distribution means, sigmas (standard deviations), and weights
    for VTK surface mesh scalars (e.g., depth, curvature) along and outside
    sulcus label borders within folds.

    Note : The number of classes, k, is currently hard-coded.

    Parameters
    ----------
    scalar_files : list of strings
        names of VTK files with scalar values for all surface vertices
    scalar_range : list of floats
        range of values to estimate distribution
    fold_files : list of strings
        names of VTK files with fold numbers for scalar values
    label_files : list of strings
        names of VTK files with label numbers for scalar values

    Returns
    -------
    border_parameters : dictionary containing lists of floats
        means, sigmas, weights
    nonborder_parameters : dictionary containing lists of floats
        means, sigmas, weights

    Examples
    --------
    >>> import os
    >>> import numpy as np
    >>> from mindboggle.shapes.likelihood import estimate_distribution
    >>> from mindboggle.utils.io_table import read_columns
    >>> do_test = False
    >>> # Train on a single surface mesh:
    >>> if do_test:
    >>>     path = os.environ['MINDBOGGLE_DATA']
    >>>     #depth_file = os.path.join(path, 'arno', 'shapes', 'travel_depth_rescaled.vtk')
    >>>     depth_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.travel_depth.vtk')
    >>>     curv_file = os.path.join(path, 'arno', 'shapes', 'lh.pial.mean_curvature.vtk')
    >>>     folds_file = os.path.join(path, 'arno', 'features', 'folds.vtk')
    >>>     labels_file = os.path.join(path, 'arno', 'labels', 'lh.labels.DKT25.manual.vtk')
    >>>     depth_files = [depth_file]
    >>>     curv_files = [curv_file]
    >>>     fold_files = [folds_file]
    >>>     label_files = [labels_file]
    >>> # Train on many Mindboggle-101 surface meshes:
    >>> else:
    >>>     mindboggle_path = '../../Mindboggle101_mindboggle_results'
    >>>     label_path = os.environ['SUBJECTS_DIR']
    >>>     x_path = os.path.join(os.environ['MINDBOGGLE'], 'x')
    >>>     atlas_list_file = os.path.join(x_path, 'mindboggle101_atlases.txt')
    >>>     atlas_list = read_columns(atlas_list_file, 1)[0]
    >>>     depth_files = []
    >>>     curv_files = []
    >>>     fold_files = []
    >>>     label_files = []
    >>>     for atlas in atlas_list:
    >>>      if 'OASIS' in atlas or 'NKI' in atlas or 'MMRR-21' in atlas:
    >>>       print(atlas)
    >>>       for h in ['lh','rh']:
    >>>         #depth_file = os.path.join(mindboggle_path, 'shapes',
    >>>         #    '_hemi_'+h+'_subject_'+atlas, 'travel_depth_rescaled.vtk')
    >>>         depth_file = os.path.join(mindboggle_path, 'shapes',
    >>>             '_hemi_'+h+'_subject_'+atlas, h+'.pial.travel_depth.vtk')
    >>>         curv_file = os.path.join(mindboggle_path, 'shapes',
    >>>             '_hemi_'+h+'_subject_'+atlas, h+'.pial.mean_curvature.vtk')
    >>>         folds_file = os.path.join(mindboggle_path, 'features',
    >>>             '_hemi_'+h+'_subject_'+atlas, 'folds.vtk')
    >>>         labels_file = os.path.join(label_path, atlas, 'label',
    >>>             h+'.labels.DKT25.manual.vtk')
    >>>         depth_files.append(depth_file)
    >>>         curv_files.append(curv_file)
    >>>         fold_files.append(folds_file)
    >>>         label_files.append(labels_file)
    >>> scalar_range1 = np.linspace(0, 1, 51, endpoint=True) # (0 to 1 by 0.02)
    >>> scalar_range2 = np.linspace(-1, 1, 101, endpoint=True) # (-1 to 1 by 0.02)
    >>> #
    >>> depth_border, depth_nonborder = estimate_distribution(depth_files,
    >>>     scalar_range1, fold_files, label_files)
    >>> #
    >>> curv_border, curv_nonborder = estimate_distribution(curv_files,
    >>>     scalar_range2, fold_files, label_files)
    >>> #
    >>> import cPickle as pickle
    >>> pickle.dump( [depth_border, curv_border, depth_nonborder, curv_nonborder],
    >>>     open("depth_curv_border_nonborder_parameters.pkl", "wb"))

    """
    from mindboggle.shapes.likelihood import concatenate_sulcus_scalars, \
        fit_normals_to_histogram

    if not scalar_files or not fold_files or not label_files:
        import sys
        sys.exit("Input file lists cannot be empty.")

    # Concatenate scalars across multiple training files:
    border_scalars, nonborder_scalars = concatenate_sulcus_scalars(
        scalar_files, fold_files, label_files)

    # Estimate distribution parameters:
    border_means, border_sigmas, \
        border_weights = fit_normals_to_histogram(border_scalars, scalar_range)
    nonborder_means, nonborder_sigmas, \
        nonborder_weights = fit_normals_to_histogram(nonborder_scalars, scalar_range)

    # Store outputs in dictionaries:
    border_parameters = {
        'means': border_means,
        'sigmas': border_sigmas,
        'weights': border_weights
    }
    nonborder_parameters = {
        'means': nonborder_means,
        'sigmas': nonborder_sigmas,
        'weights': nonborder_weights
    }

    return border_parameters, nonborder_parameters