Example #1
0
def write_shape_stats(labels_or_file=[], sulci=[], fundi=[],
        affine_transform_files=[], inverse_booleans=[], transform_format='itk',
        area_file='', normalize_by_area=False, mean_curvature_file='',
        travel_depth_file='', geodesic_depth_file='',
        freesurfer_thickness_file='', freesurfer_curvature_file='',
        freesurfer_sulc_file='',
        labels_spectra=[], labels_spectra_IDs=[],
        sulci_spectra=[], sulci_spectra_IDs=[],
        labels_zernike=[], labels_zernike_IDs=[],
        sulci_zernike=[], sulci_zernike_IDs=[],
        exclude_labels=[-1]):
    """
    Make tables of shape statistics per label, sulcus, and/or fundus.

    Note ::
        This function is tailored for Mindboggle outputs.

    Parameters
    ----------
    labels_or_file : list or string
        label number for each vertex or name of VTK file with index scalars
    sulci :  list of integers
        indices to sulci, one per vertex, with -1 indicating no sulcus
    fundi :  list of integers
        indices to fundi, one per vertex, with -1 indicating no fundus
    affine_transform_files : list of strings
        affine transform files to standard space
    inverse_booleans : list of of zeros and ones
        for each transform, 1 to take the inverse, else 0
    transform_format : string
        format for transform file
        Ex: 'txt' for text, 'itk' for ITK, and 'mat' for Matlab format
    area_file :  string
        name of VTK file with surface area scalar values
    normalize_by_area : Boolean
        normalize all shape measures by area of label/feature? (UNTESTED)
    mean_curvature_file :  string
        name of VTK file with mean curvature scalar values
    travel_depth_file :  string
        name of VTK file with travel depth scalar values
    geodesic_depth_file :  string
        name of VTK file with geodesic depth scalar values
    freesurfer_thickness_file :  string
        name of VTK file with FreeSurfer thickness scalar values
    freesurfer_curvature_file :  string
        name of VTK file with FreeSurfer curvature (curv) scalar values
    freesurfer_sulc_file :  string
        name of VTK file with FreeSurfer convexity (sulc) scalar values
    labels_spectra : list of lists of floats
        Laplace-Beltrami spectra for each labeled region
    labels_spectra_IDs : list of integers
        unique labels for labels_spectra
    sulci_spectra : list of lists of floats
        Laplace-Beltrami spectra for each sulcus
    sulci_spectra_IDs : list of integers
        unique sulcus IDs for sulci_spectra
    labels_zernike : list of lists of floats
        Zernike moments for each labeled region
    labels_zernike_IDs : list of integers
        unique labels for labels_zernike
    sulci_zernike : list of lists of floats
        Zernike moments for each sulcus
    sulci_zernike_IDs : list of integers
        unique sulcus IDs for sulci_zernike
    exclude_labels : list of lists of integers
        indices to be excluded (in addition to -1)

    Returns
    -------
    label_table :  string
        output table filename for label shapes
    sulcus_table :  string
        output table filename for sulcus shapes
    fundus_table :  string
        output table filename for fundus shapes

    Examples
    --------
    >>> import os
    >>> from mindboggle.mio.vtks import read_scalars
    >>> from mindboggle.mio.tables import write_shape_stats
    >>> path = '/homedir/mindboggled/Twins-2-1'
    >>> labels_or_file = os.path.join(path, 'labels', 'left_cortical_surface', 'freesurfer_cortex_labels.vtk')
    >>> sulci_file = os.path.join(path, 'features', 'left_cortical_surface', 'sulci.vtk')
    >>> fundi_file = os.path.join(path, 'features', 'left_cortical_surface', 'fundus_per_sulcus.vtk')
    >>> sulci, name = read_scalars(sulci_file)
    >>> fundi, name = read_scalars(fundi_file)
    >>> affine_transform_files = [] #os.path.join(path, 'arno', 'mri', 't1weighted_brain.MNI152Affine.txt')
    >>> inverse_booleans = []
    >>> #transform_format = 'mat'
    >>> transform_format = 'itk'
    >>> area_file = os.path.join(path, 'shapes', 'left_cortical_surface', 'area.vtk')
    >>> normalize_by_area = False
    >>> mean_curvature_file = os.path.join(path, 'shapes', 'left_cortical_surface', 'mean_curvature.vtk')
    >>> travel_depth_file = os.path.join(path, 'shapes', 'left_cortical_surface', 'travel_depth.vtk')
    >>> geodesic_depth_file = os.path.join(path, 'shapes', 'left_cortical_surface', 'geodesic_depth.vtk')
    >>> freesurfer_thickness_file = ''
    >>> freesurfer_curvature_file = ''
    >>> freesurfer_sulc_file = ''
    >>> #
    >>> labels, name = read_scalars(labels_or_file)
    >>> labels_spectra = []
    >>> labels_spectra_IDs = []
    >>> sulci_spectra = []
    >>> sulci_spectra_IDs = []
    >>> labels_zernike = []
    >>> labels_zernike_IDs = []
    >>> sulci_zernike = []
    >>> sulci_zernike_IDs = []
    >>> exclude_labels = [-1]
    >>> #
    >>> write_shape_stats(labels_or_file, sulci, fundi,
    >>>     affine_transform_files, inverse_booleans, transform_format,
    >>>     area_file, normalize_by_area,
    >>>     mean_curvature_file, travel_depth_file, geodesic_depth_file,
    >>>     freesurfer_thickness_file, freesurfer_curvature_file,
    >>>     freesurfer_sulc_file,
    >>>     labels_spectra, labels_spectra_IDs,
    >>>     sulci_spectra, sulci_spectra_IDs,
    >>>     labels_zernike, labels_zernike_IDs,
    >>>     sulci_zernike, sulci_zernike_IDs,
    >>>     exclude_labels)

    """
    import os
    import numpy as np
    import pandas as pd

    from mindboggle.guts.compute import means_per_label, stats_per_label, \
        sum_per_label
    from mindboggle.mio.vtks import read_scalars, read_vtk, \
        apply_affine_transforms
    from mindboggle.mio.labels import DKTprotocol

    dkt = DKTprotocol()

    # Make sure inputs are lists:
    if isinstance(labels_or_file, np.ndarray):
        labels = [int(x) for x in labels_or_file]
    elif isinstance(labels_or_file, list):
        labels = labels_or_file
    elif isinstance(labels_or_file, str):
        labels, name = read_scalars(labels_or_file)
    if isinstance(sulci, np.ndarray):
        sulci = [int(x) for x in sulci]
    if isinstance(fundi, np.ndarray):
        fundi = [int(x) for x in fundi]

    if not labels and not sulci and not fundi:
        import sys
        sys.exit('No feature data to tabulate in write_shape_stats().')

    spectrum_start = 1  # Store all columns of spectral components (0),
                        # or start from higher frequency components (>=1)

    #-------------------------------------------------------------------------
    # Feature lists, shape names, and shape files:
    #-------------------------------------------------------------------------
    # Feature lists:
    feature_lists = [labels, sulci, fundi]
    feature_names = ['label', 'sulcus', 'fundus']
    spectra_lists = [labels_spectra, sulci_spectra]
    spectra_ID_lists = [labels_spectra_IDs, sulci_spectra_IDs]
    zernike_lists = [labels_zernike, sulci_zernike]
    zernike_ID_lists = [labels_zernike_IDs, sulci_zernike_IDs]
    table_names = ['label_shapes.csv', 'sulcus_shapes.csv',
                   'fundus_shapes.csv']

    # Shape names corresponding to shape files below:
    shape_names = ['area', 'travel depth', 'geodesic depth',
                   'mean curvature', 'freesurfer curvature',
                   'freesurfer thickness', 'freesurfer convexity (sulc)']

    # Load shape files as a list of numpy arrays of per-vertex shape values:
    shape_files = [area_file, travel_depth_file, geodesic_depth_file,
                   mean_curvature_file, freesurfer_curvature_file,
                   freesurfer_thickness_file, freesurfer_sulc_file]
    shape_arrays = []
    first_pass = True
    area_array = []

    for ishape, shape_file in enumerate(shape_files):
        if os.path.exists(shape_file):
            if first_pass:
                points, indices, lines, faces, scalars_array, scalar_names, \
                    npoints, input_vtk = read_vtk(shape_file, True, True)
                points = np.array(points)
                first_pass = False
                if affine_transform_files and transform_format:
                    affine_points, \
                        foo1 = apply_affine_transforms(affine_transform_files,
                                    inverse_booleans, transform_format,
                                    points, vtk_file_stem='')
            else:
                scalars_array, name = read_scalars(shape_file, True, True)
            if scalars_array.size:
                shape_arrays.append(scalars_array)

                # Store area array:
                if ishape == 0:
                    area_array = scalars_array.copy()

    if normalize_by_area:
        use_area = area_array
    else:
        use_area = []

    # Initialize table file names:
    label_table = ''
    sulcus_table = ''
    fundus_table = ''

    # Loop through features / tables:
    for itable, feature_list in enumerate(feature_lists):
        column_names = []

        #-----------------------------------------------------------------
        # Label names:
        #-----------------------------------------------------------------
        label_title = 'name'
        if itable == 0:
            label_numbers = dkt.cerebrum_cortex_DKT31_numbers
            label_names = dkt.cerebrum_cortex_DKT31_names
        elif itable in [1, 2]:
            label_numbers = dkt.sulcus_numbers
            label_names = dkt.sulcus_names
        else:
            label_numbers = []
            label_names = []
        include_labels = label_numbers
        nlabels = len(label_numbers)

        #---------------------------------------------------------------------
        # For each feature, construct a table of average shape values:
        #---------------------------------------------------------------------
        if feature_list:
            feature_name = feature_names[itable]
            columns = []

            #-----------------------------------------------------------------
            # Loop through shape measures:
            #-----------------------------------------------------------------
            column_names.extend(column_names[:])
            for ishape, shape_array in enumerate(shape_arrays):
                shape = shape_names[ishape]
                print('  Compute statistics on {0} {1}...'.
                      format(feature_name, shape))
                #-------------------------------------------------------------
                # Append feature areas to columns:
                #-------------------------------------------------------------
                if ishape == 0 and np.size(area_array):
                    sums, label_list = sum_per_label(shape_array,
                        feature_list, include_labels, exclude_labels)
                    column_names.append(shape)
                    columns.append(sums)
                #-------------------------------------------------------------
                # Append feature shape statistics to columns:
                #-------------------------------------------------------------
                else:
                    medians, mads, means, sdevs, skews, kurts, \
                    lower_quarts, upper_quarts, \
                    label_list = stats_per_label(shape_array, feature_list,
                                        include_labels, exclude_labels,
                                        area_array, precision=1)

                    column_names.append(shape + ': median')
                    column_names.append(shape + ': MAD')
                    column_names.append(shape + ': mean')
                    column_names.append(shape + ': SD')
                    column_names.append(shape + ': skew')
                    column_names.append(shape + ': kurtosis')
                    column_names.append(shape + ': 25%')
                    column_names.append(shape + ': 75%')
                    columns.append(medians)
                    columns.append(mads)
                    columns.append(means)
                    columns.append(sdevs)
                    columns.append(skews)
                    columns.append(kurts)
                    columns.append(lower_quarts)
                    columns.append(upper_quarts)

            #-----------------------------------------------------------------
            # Mean positions in the original space:
            #-----------------------------------------------------------------
            # Compute mean position per feature:
            positions, sdevs, label_list, foo = means_per_label(points,
                feature_list, include_labels, exclude_labels, use_area)

            # Append mean x,y,z position per feature to columns:
            xyz_positions = np.asarray(positions)
            for ixyz, xyz in enumerate(['x','y','z']):
                column_names.append('mean position: {0}'.format(xyz))
                columns.append(xyz_positions[:, ixyz].tolist())

            #-----------------------------------------------------------------
            # Mean positions in standard space:
            #-----------------------------------------------------------------
            if affine_transform_files and transform_format:
                # Compute standard space mean position per feature:
                standard_positions, sdevs, label_list, \
                foo = means_per_label(affine_points,
                    feature_list, include_labels, exclude_labels, use_area)

                # Append standard space x,y,z position per feature to columns:
                xyz_std_positions = np.asarray(standard_positions)
                for ixyz, xyz in enumerate(['x','y','z']):
                    column_names.append('mean position in standard space:'
                                        ' {0}'.format(xyz))
                    columns.append(xyz_std_positions[:, ixyz].tolist())

            #-----------------------------------------------------------------
            # Laplace-Beltrami spectra:
            #-----------------------------------------------------------------
            if itable in [0, 1]:
                spectra = spectra_lists[itable]
                if spectra:
                    spectra_IDs = spectra_ID_lists[itable]

                    # Construct a matrix of spectra:
                    len_spectrum = len(spectra[0])
                    spectrum_matrix = np.zeros((nlabels, len_spectrum))
                    for ilabel, label in enumerate(include_labels):
                        if label in spectra_IDs:
                            spectrum = spectra[spectra_IDs.index(label)]
                            spectrum_matrix[ilabel, 0:len_spectrum] = spectrum

                    # Append spectral shape name and values to columns:
                    for ispec in range(spectrum_start, len_spectrum):
                        columns.append(spectrum_matrix[:, ispec].tolist())
                        column_names.append('Laplace-Beltrami spectrum:'
                                            ' component {0}'.format(ispec+1))

            #-----------------------------------------------------------------
            # Zernike moments:
            #-----------------------------------------------------------------
            if itable in [0, 1]:
                zernike = zernike_lists[itable]
                if zernike:
                    zernike_IDs = zernike_ID_lists[itable]

                    # Construct a matrix of Zernike moments:
                    len_moments = len(zernike[0])
                    moments_matrix = np.zeros((nlabels, len_moments))
                    for ilabel, label in enumerate(include_labels):
                        if label in zernike_IDs:
                            moments = zernike[zernike_IDs.index(label)]
                            moments_matrix[ilabel, 0:len_moments] = moments

                    # Append Zernike shape name and values to columns:
                    for imoment in range(0, len_moments):
                        columns.append(moments_matrix[:, imoment].tolist())
                        column_names.append('Zernike moments: component {0}'.
                                            format(imoment+1))

            #-----------------------------------------------------------------
            # Write labels/IDs and values to table:
            #-----------------------------------------------------------------
            # Write labels/IDs to table:
            output_table = os.path.join(os.getcwd(), table_names[itable])

            if columns:
                df1 = pd.DataFrame({'ID': label_numbers})
                df2 = pd.DataFrame(np.transpose(columns),
                                   columns = column_names)
                df = pd.concat([df1, df2], axis=1)
                if label_names:
                    df0 = pd.DataFrame({'name': label_names})
                    df = pd.concat([df0, df], axis=1)
                df.to_csv(output_table, index=False)

            if not os.path.exists(output_table):
                raise(IOError(output_table + " not found"))

            #-----------------------------------------------------------------
            # Return correct table file name:
            #-----------------------------------------------------------------
            if itable == 0:
                label_table = output_table
            elif itable == 1:
                sulcus_table = output_table
            elif itable == 2:
                fundus_table = output_table

    return label_table, sulcus_table, fundus_table
Example #2
0
def write_shape_stats(labels_or_file=[],
                      sulci=[],
                      fundi=[],
                      affine_transform_files=[],
                      inverse_booleans=[],
                      transform_format='itk',
                      area_file='',
                      normalize_by_area=False,
                      mean_curvature_file='',
                      travel_depth_file='',
                      geodesic_depth_file='',
                      freesurfer_thickness_file='',
                      freesurfer_curvature_file='',
                      freesurfer_sulc_file='',
                      labels_spectra=[],
                      labels_spectra_IDs=[],
                      sulci_spectra=[],
                      sulci_spectra_IDs=[],
                      labels_zernike=[],
                      labels_zernike_IDs=[],
                      sulci_zernike=[],
                      sulci_zernike_IDs=[],
                      exclude_labels=[-1],
                      verbose=False):
    """
    Make tables of shape statistics per label, sulcus, and/or fundus.

    There can be thousands of vertices in a single feature such as a gyrus,
    sulcus, or fundus, and for per-vertex shape measures, it makes sense to
    characterize their collective shape as a distribution of shape values.
    Mindboggle's stats_per_label function generates tables of summary
    statistical measures for these distributions, and includes the shape
    measures computed on cortical features as well.

    Note ::
        This function is tailored for Mindboggle outputs.

    Parameters
    ----------
    labels_or_file : list or string
        label number for each vertex or name of VTK file with index scalars
    sulci : list of integers
        indices to sulci, one per vertex, with -1 indicating no sulcus
    fundi : list of integers
        indices to fundi, one per vertex, with -1 indicating no fundus
    affine_transform_files : list of strings
        affine transform files to standard space
    inverse_booleans : list of of zeros and ones
        for each transform, 1 to take the inverse, else 0
    transform_format : string
        format for transform file
        Ex: 'txt' for text, 'itk' for ITK, and 'mat' for Matlab format
    area_file :  string
        name of VTK file with surface area scalar values
    normalize_by_area : bool
        normalize all shape measures by area of label/feature? (UNTESTED)
    mean_curvature_file :  string
        name of VTK file with mean curvature scalar values
    travel_depth_file :  string
        name of VTK file with travel depth scalar values
    geodesic_depth_file :  string
        name of VTK file with geodesic depth scalar values
    freesurfer_thickness_file :  string
        name of VTK file with FreeSurfer thickness scalar values
    freesurfer_curvature_file :  string
        name of VTK file with FreeSurfer curvature (curv) scalar values
    freesurfer_sulc_file :  string
        name of VTK file with FreeSurfer convexity (sulc) scalar values
    labels_spectra : list of lists of floats
        Laplace-Beltrami spectra for each labeled region
    labels_spectra_IDs : list of integers
        unique labels for labels_spectra
    sulci_spectra : list of lists of floats
        Laplace-Beltrami spectra for each sulcus
    sulci_spectra_IDs : list of integers
        unique sulcus IDs for sulci_spectra
    labels_zernike : list of lists of floats
        Zernike moments for each labeled region
    labels_zernike_IDs : list of integers
        unique labels for labels_zernike
    sulci_zernike : list of lists of floats
        Zernike moments for each sulcus
    sulci_zernike_IDs : list of integers
        unique sulcus IDs for sulci_zernike
    exclude_labels : list of lists of integers
        indices to be excluded (in addition to -1)
    verbose : bool
        print statements?

    Returns
    -------
    label_table :  string
        output table filename for label shapes
    sulcus_table :  string
        output table filename for sulcus shapes
    fundus_table :  string
        output table filename for fundus shapes

    Examples
    --------
    >>> from mindboggle.mio.tables import write_shape_stats
    >>> from mindboggle.mio.vtks import read_scalars
    >>> from mindboggle.mio.fetch_data import prep_tests
    >>> urls, fetch_data = prep_tests()
    >>> label_file = fetch_data(urls['left_freesurfer_labels'], '', '.vtk')
    >>> sulci_file = fetch_data(urls['left_sulci'], '', '.vtk')
    >>> fundi_file = fetch_data(urls['left_fundus_per_sulcus'], '', '.vtk')
    >>> mean_curvature_file = fetch_data(urls['left_mean_curvature'], '', '.vtk')
    >>> travel_depth_file = fetch_data(urls['left_travel_depth'], '', '.vtk')
    >>> geodesic_depth_file = fetch_data(urls['left_geodesic_depth'], '', '.vtk')
    >>> area_file = fetch_data(urls['left_area'], '', '.vtk')
    >>> freesurfer_thickness_file = ''
    >>> freesurfer_curvature_file = ''
    >>> freesurfer_sulc_file = ''
    >>> sulci, name = read_scalars(sulci_file)
    >>> fundi, name = read_scalars(fundi_file)
    >>> affine_transform_files = []
    >>> inverse_booleans = []
    >>> transform_format = 'itk'
    >>> normalize_by_area = False
    >>> labels, name = read_scalars(label_file)
    >>> labels_spectra = []
    >>> labels_spectra_IDs = []
    >>> sulci_spectra = []
    >>> sulci_spectra_IDs = []
    >>> labels_zernike = []
    >>> labels_zernike_IDs = []
    >>> sulci_zernike = []
    >>> sulci_zernike_IDs = []
    >>> exclude_labels = [-1]
    >>> verbose = False
    >>> label_table, sulcus_table, fundus_table = write_shape_stats(label_file,
    ...     sulci, fundi, affine_transform_files, inverse_booleans,
    ...     transform_format, area_file, normalize_by_area,
    ...     mean_curvature_file, travel_depth_file, geodesic_depth_file,
    ...     freesurfer_thickness_file, freesurfer_curvature_file,
    ...     freesurfer_sulc_file, labels_spectra, labels_spectra_IDs,
    ...     sulci_spectra, sulci_spectra_IDs, labels_zernike,
    ...     labels_zernike_IDs, sulci_zernike, sulci_zernike_IDs,
    ...     exclude_labels, verbose)

    """
    import os
    import numpy as np
    import pandas as pd

    from mindboggle.guts.compute import stats_per_label
    from mindboggle.guts.compute import means_per_label
    from mindboggle.guts.compute import sum_per_label
    from mindboggle.mio.vtks import read_scalars, read_vtk
    from mindboggle.mio.vtks import apply_affine_transforms
    from mindboggle.mio.labels import DKTprotocol

    dkt = DKTprotocol()

    # Make sure inputs are lists:
    if isinstance(labels_or_file, np.ndarray):
        labels = [int(x) for x in labels_or_file]
    elif isinstance(labels_or_file, list):
        labels = labels_or_file
    elif isinstance(labels_or_file, str):
        labels, name = read_scalars(labels_or_file)
    if isinstance(sulci, np.ndarray):
        sulci = [int(x) for x in sulci]
    if isinstance(fundi, np.ndarray):
        fundi = [int(x) for x in fundi]

    if not labels and not sulci and not fundi:
        raise IOError('No feature data to tabulate in write_shape_stats().')

    spectrum_start = 1  # Store all columns of spectral components (0),
    # or start from higher frequency components (>=1)

    # ------------------------------------------------------------------------
    # Feature lists, shape names, and shape files:
    # ------------------------------------------------------------------------
    # Feature lists:
    feature_lists = [labels, sulci, fundi]
    feature_names = ['label', 'sulcus', 'fundus']
    spectra_lists = [labels_spectra, sulci_spectra]
    spectra_ID_lists = [labels_spectra_IDs, sulci_spectra_IDs]
    zernike_lists = [labels_zernike, sulci_zernike]
    zernike_ID_lists = [labels_zernike_IDs, sulci_zernike_IDs]
    table_names = [
        'label_shapes.csv', 'sulcus_shapes.csv', 'fundus_shapes.csv'
    ]

    # Shape names corresponding to shape files below:
    shape_names = [
        'area', 'travel depth', 'geodesic depth', 'mean curvature',
        'freesurfer curvature', 'freesurfer thickness',
        'freesurfer convexity (sulc)'
    ]

    # Load shape files as a list of numpy arrays of per-vertex shape values:
    shape_files = [
        area_file, travel_depth_file, geodesic_depth_file, mean_curvature_file,
        freesurfer_curvature_file, freesurfer_thickness_file,
        freesurfer_sulc_file
    ]
    shape_arrays = []
    first_pass = True
    area_array = []

    for ishape, shape_file in enumerate(shape_files):
        if os.path.exists(shape_file):
            if first_pass:
                points, indices, lines, faces, scalars_array, scalar_names, \
                    npoints, input_vtk = read_vtk(shape_file, True, True)
                points = np.array(points)
                first_pass = False
                if affine_transform_files and transform_format:
                    affine_points, \
                        foo1 = apply_affine_transforms(affine_transform_files,
                                    inverse_booleans, transform_format,
                                    points, vtk_file_stem='')
            else:
                scalars_array, name = read_scalars(shape_file, True, True)
            if scalars_array.size:
                shape_arrays.append(scalars_array)

                # Store area array:
                if ishape == 0:
                    area_array = scalars_array.copy()

    if normalize_by_area:
        use_area = area_array
    else:
        use_area = []

    # Initialize table file names:
    label_table = ''
    sulcus_table = ''
    fundus_table = ''

    # Loop through features / tables:
    for itable, feature_list in enumerate(feature_lists):
        column_names = []

        # ----------------------------------------------------------------
        # Label names:
        # ----------------------------------------------------------------
        label_title = 'name'
        if itable == 0:
            label_numbers = dkt.cerebrum_cortex_DKT31_numbers
            label_names = dkt.cerebrum_cortex_DKT31_names
        elif itable in [1, 2]:
            label_numbers = dkt.sulcus_numbers
            label_names = dkt.sulcus_names
        else:
            label_numbers = []
            label_names = []
        include_labels = label_numbers
        nlabels = len(label_numbers)

        # --------------------------------------------------------------------
        # For each feature, construct a table of average shape values:
        # --------------------------------------------------------------------
        if feature_list:
            feature_name = feature_names[itable]
            columns = []

            # ----------------------------------------------------------------
            # Loop through shape measures:
            # ----------------------------------------------------------------
            column_names.extend(column_names[:])
            for ishape, shape_array in enumerate(shape_arrays):
                shape = shape_names[ishape]
                if verbose:
                    print('  Compute statistics on {0} {1}...'.format(
                        feature_name, shape))
                # ------------------------------------------------------------
                # Append feature areas to columns:
                # ------------------------------------------------------------
                if ishape == 0 and np.size(area_array):
                    sums, label_list = sum_per_label(shape_array, feature_list,
                                                     include_labels,
                                                     exclude_labels)
                    column_names.append(shape)
                    columns.append(sums)
                # ------------------------------------------------------------
                # Append feature shape statistics to columns:
                # ------------------------------------------------------------
                else:
                    medians, mads, means, sdevs, skews, kurts, \
                    lower_quarts, upper_quarts, \
                    label_list = stats_per_label(shape_array, feature_list,
                                        include_labels, exclude_labels,
                                        area_array, precision=1)

                    column_names.append(shape + ': median')
                    column_names.append(shape + ': MAD')
                    column_names.append(shape + ': mean')
                    column_names.append(shape + ': SD')
                    column_names.append(shape + ': skew')
                    column_names.append(shape + ': kurtosis')
                    column_names.append(shape + ': 25%')
                    column_names.append(shape + ': 75%')
                    columns.append(medians)
                    columns.append(mads)
                    columns.append(means)
                    columns.append(sdevs)
                    columns.append(skews)
                    columns.append(kurts)
                    columns.append(lower_quarts)
                    columns.append(upper_quarts)

            # ----------------------------------------------------------------
            # Mean positions in the original space:
            # ----------------------------------------------------------------
            # Compute mean position per feature:
            positions, sdevs, label_list, foo = means_per_label(
                points, feature_list, include_labels, exclude_labels, use_area)

            # Append mean x,y,z position per feature to columns:
            xyz_positions = np.asarray(positions)
            for ixyz, xyz in enumerate(['x', 'y', 'z']):
                column_names.append('mean position: {0}'.format(xyz))
                columns.append(xyz_positions[:, ixyz].tolist())

            # ----------------------------------------------------------------
            # Mean positions in standard space:
            # ----------------------------------------------------------------
            if affine_transform_files and transform_format:
                # Compute standard space mean position per feature:
                standard_positions, sdevs, label_list, \
                foo = means_per_label(affine_points,
                    feature_list, include_labels, exclude_labels, use_area)

                # Append standard space x,y,z position per feature to columns:
                xyz_std_positions = np.asarray(standard_positions)
                for ixyz, xyz in enumerate(['x', 'y', 'z']):
                    column_names.append('mean position in standard space:'
                                        ' {0}'.format(xyz))
                    columns.append(xyz_std_positions[:, ixyz].tolist())

            # ----------------------------------------------------------------
            # Laplace-Beltrami spectra:
            # ----------------------------------------------------------------
            if itable in [0, 1]:
                spectra = spectra_lists[itable]
                if spectra:
                    spectra_IDs = spectra_ID_lists[itable]

                    # Construct a matrix of spectra:
                    len_spectrum = len(spectra[0])
                    spectrum_matrix = np.zeros((nlabels, len_spectrum))
                    for ilabel, label in enumerate(include_labels):
                        if label in spectra_IDs:
                            spectrum = spectra[spectra_IDs.index(label)]
                            spectrum_matrix[ilabel, 0:len_spectrum] = spectrum

                    # Append spectral shape name and values to columns:
                    for ispec in range(spectrum_start, len_spectrum):
                        columns.append(spectrum_matrix[:, ispec].tolist())
                        column_names.append('Laplace-Beltrami spectrum:'
                                            ' component {0}'.format(ispec + 1))

            # ----------------------------------------------------------------
            # Zernike moments:
            # ----------------------------------------------------------------
            if itable in [0, 1]:
                zernike = zernike_lists[itable]
                if zernike:
                    zernike_IDs = zernike_ID_lists[itable]

                    # Construct a matrix of Zernike moments:
                    len_moments = len(zernike[0])
                    moments_matrix = np.zeros((nlabels, len_moments))
                    for ilabel, label in enumerate(include_labels):
                        if label in zernike_IDs:
                            moments = zernike[zernike_IDs.index(label)]
                            moments_matrix[ilabel, 0:len_moments] = moments

                    # Append Zernike shape name and values to columns:
                    for imoment in range(0, len_moments):
                        columns.append(moments_matrix[:, imoment].tolist())
                        column_names.append(
                            'Zernike moments: component {0}'.format(imoment +
                                                                    1))

            # ----------------------------------------------------------------
            # Write labels/IDs and values to table:
            # ----------------------------------------------------------------
            # Write labels/IDs to table:
            output_table = os.path.join(os.getcwd(), table_names[itable])

            if columns:
                df1 = pd.DataFrame({'ID': label_numbers})
                df2 = pd.DataFrame(np.transpose(columns), columns=column_names)
                df = pd.concat([df1, df2], axis=1)
                if label_names:
                    df0 = pd.DataFrame({'name': label_names})
                    df = pd.concat([df0, df], axis=1)
                df.to_csv(output_table, index=False, encoding='utf-8')

            if not os.path.exists(output_table):
                raise IOError(output_table + " not found")

            # ----------------------------------------------------------------
            # Return correct table file name:
            # ----------------------------------------------------------------
            if itable == 0:
                label_table = output_table
            elif itable == 1:
                sulcus_table = output_table
            elif itable == 2:
                fundus_table = output_table

    return label_table, sulcus_table, fundus_table