示例#1
0
def get_atlas_path(atlas_name=None):
    "Validates the atlas name and returs its location"

    if atlas_name in [None, 'None', '']:
        atlas_name = 'fsaverage'

    atlas_name = check_atlas(atlas_name)

    if atlas_name  in cfg.atlas_list:

        if atlas_name in ['glasser2016']:
            this_dir = dirname(realpath(__file__))
            atlas_path = pjoin(this_dir, 'atlases', 'glasser2016', 'fsaverage_annot_figshare3498446')
        elif atlas_name in ['fsaverage', 'yeo2011_fsaverage5', 'yeo2011_fsaverage6',
                            'yeo2011_fsaverage_highres']:
            this_dir = dirname(realpath(__file__))
            atlas_path = pjoin(this_dir, 'atlases', atlas_name)
        else:
            raise NotImplementedError('Requested atlas is not implemented or unreadable.')

    # cortical atlas in Freesurfer org
    elif os.path.isdir(atlas_name) and check_atlas_annot_exist(atlas_name):
        atlas_path = dirname(realpath(atlas_name))
        atlas_name = basename(atlas_name)
    else:
        raise NotImplementedError('Invalid choice for atlas!')

    return realpath(atlas_path), atlas_name
示例#2
0
def get_atlas_path(atlas_name=None):
    "Validates the atlas name and returns its location"

    if atlas_name in [None, 'None', '']:
        atlas_name = 'fsaverage'

    atlas_name, _ = check_atlas(atlas_name)

    if atlas_name in cfg.atlas_list:

        if atlas_name in ['glasser2016']:
            this_dir = dirname(realpath(__file__))
            atlas_path = pjoin(this_dir, 'atlases', 'glasser2016',
                               'fsaverage_annot_figshare3498446')
        elif atlas_name in [
                'fsaverage', 'yeo2011_fsaverage5', 'yeo2011_fsaverage6',
                'yeo2011_fsaverage_highres'
        ]:
            this_dir = dirname(realpath(__file__))
            atlas_path = pjoin(this_dir, 'atlases', atlas_name)
        elif atlas_name in ['cat_aal', 'cat_lpba40', 'cat_ibsr']:
            this_dir = dirname(realpath(__file__))
            # TODO inconsistency: this returns a path to a FILE,
            #   whereas cortical atlases are referred to by their FS folders
            atlas_path = pjoin(this_dir, 'atlases', atlas_name, 'atlas.nii')
        else:
            raise NotImplementedError(
                'Requested atlas is not implemented or unreadable.')

    # cortical atlas in Freesurfer org
    elif os.path.isdir(atlas_name) and check_atlas_annot_exist(atlas_name):
        atlas_path = dirname(realpath(atlas_name))
        atlas_name = basename(atlas_name)
    else:
        raise NotImplementedError('Invalid choice for atlas!')

    return realpath(atlas_path), atlas_name
示例#3
0
def get_atlas_path(atlas_name=None):
    "Validates the atlas name and returns its location"

    if atlas_name in [None, 'None', '']:
        atlas_name = 'fsaverage'

    atlas_name, _ = check_atlas(atlas_name)

    if atlas_name in cfg.atlas_list:

        this_dir = Path(__file__).resolve().parent
        if atlas_name in ['glasser2016']:
            atlas_path = this_dir / 'atlases' / 'glasser2016' / \
                         'fsaverage_annot_figshare3498446'
        elif atlas_name in [
                'fsaverage', 'yeo2011_fsaverage5', 'yeo2011_fsaverage6',
                'yeo2011_fsaverage_highres'
        ]:
            atlas_path = this_dir / 'atlases' / atlas_name
        elif atlas_name in ['cat_aal', 'cat_lpba40', 'cat_ibsr']:
            # TODO inconsistency: this returns a path to a FILE,
            #   whereas cortical atlases are referred to by their FS folders
            atlas_path = this_dir / 'atlases' / atlas_name / 'atlas.nii'
        else:
            raise NotImplementedError(
                'Atlas {} is not implemented / unreadable.'.format(atlas_name))

    # cortical atlas in Freesurfer org
    elif os.path.isdir(atlas_name) and check_atlas_annot_exist(atlas_name):
        dir_path_atlas = Path(atlas_name).resolve()
        atlas_path = dir_path_atlas.parent
        atlas_name = dir_path_atlas.name
    else:
        raise NotImplementedError('Invalid choice for atlas!')

    return atlas_path.resolve(), atlas_name
示例#4
0
def extract_multiedge(subject_id_list,
                      input_dir,
                      base_feature_list=cfg.default_features_multi_edge,
                      weight_method_list=cfg.default_weight_method,
                      summary_stats=cfg.multi_edge_summary_func_default,
                      num_bins=cfg.default_num_bins,
                      edge_range_dict=cfg.edge_range_predefined,
                      atlas=cfg.default_atlas,
                      smoothing_param=cfg.default_smoothing_param,
                      node_size=cfg.default_node_size,
                      out_dir=None,
                      return_results=False,
                      overwrite_results=False,
                      num_procs=cfg.default_num_procs):
    """
    Extracts weighted networks (matrix of pair-wise ROI distances) based on multiple gray matter features based on Freesurfer processing.

    Parameters
    ----------
    subject_id_list : str or list
         must be path to a file containing subject IDs, or a list of subject IDs
    input_dir : str
        Path to the input directory where features can be read.
        For example, this can be Freesurfer's SUBJECTS_DIR, where output processing is stored.
        Or another directory with a structure that graynet can parse.
    base_feature_list : list
        Set of features that drive the different edges between the pair of ROIs.

        For example, if you choose thickness and pial_curv, each pair of ROIs will have two edges.

        This multi-edge network can be turned into a single network based on averaging weights from different individual networks.

    weight_method : string(s), optional
        Type of distance (or metric) to compute between the pair of histograms.

        It must be one of the following methods:

        - 'chebyshev'
        - 'chebyshev_neg'
        - 'chi_square'
        - 'correlate'
        - 'correlate_1'
        - 'cosine'
        - 'cosine_1'
        - 'cosine_2'
        - 'cosine_alt'
        - 'euclidean'
        - 'fidelity_based'
        - 'histogram_intersection'
        - 'histogram_intersection_1'
        - 'jensen_shannon'
        - 'kullback_leibler'
        - 'manhattan'
        - 'minowski'
        - 'noelle_1'
        - 'noelle_2'
        - 'noelle_3'
        - 'noelle_4'
        - 'noelle_5'
        - 'relative_bin_deviation'
        - 'relative_deviation'

        Note only the following are *metrics*:

        - 'manhattan'
        - 'minowski'
        - 'euclidean'
        - 'noelle_2'
        - 'noelle_4'
        - 'noelle_5'

        The following are *semi- or quasi-metrics*:

        - 'kullback_leibler'
        - 'jensen_shannon'
        - 'chi_square'
        - 'chebyshev'
        - 'cosine_1'
        - 'chebyshev_neg'
        - 'correlate_1'
        - 'histogram_intersection_1'
        - 'relative_deviation'
        - 'relative_bin_deviation'
        - 'noelle_1'
        - 'noelle_3'

        The following are  classified to be similarity functions:

        - 'histogram_intersection'
        - 'correlate'
        - 'cosine'
        - 'cosine_2'
        - 'cosine_alt'
        - 'fidelity_based'

        *Default* choice: 'manhattan'.

    summary_stats : list of str
        A string, or list of strings, each representing a method (like 'median', 'prod' or 'max'),
        to compute a summay statistic from the array of multiple weights computed.

        This must be available as a member of numpy or scipy.stats.

    num_bins : int
        Number of histogram bins to use when computing pair-wise weights based on histogram distance. Default : 25

    edge_range_dict : tuple or list
        The range of edges (two finite values) within which to build the histogram e.g. ``--edge_range 0 5``.
        This can be helpful (and important) to ensure correspondence across multiple invocations of graynet (e.g. for different subjects), in terms of range across all bins as well as individual bin edges.

        Default :

            - ( 0.0, 5.0) for ``freesurfer_thickness`` and
            - (-0.3, 0.3) for ``freesurfer_curv``.

    atlas : str
        Name of the atlas whose parcellation to be used.
        Choices for cortical parcellation: ['fsaverage', 'glasser2016'], which are primary cortical.
        Volumetric whole-brain atlases will be added soon.

    smoothing_param : scalar
        Smoothing parameter, which could be fwhm for Freesurfer cortical features,
        or another relevant for the chosen base_feature_list.
        Default: assumed as fwhm=10mm for the default feature choice 'thickness'

    node_size : scalar, optional
        Parameter to indicate the size of the ROIs, subparcels or patches, depending on type of atlas or feature.
        This feature is not implemented yet, just a placeholder and to enable default computation.

    out_dir : str, optional
        Path to output directory to store results.
        Default: None, results are returned, but not saved to disk.
        If this is None, return_results must be true.

    return_results : bool
        Flag to indicate whether to return the results to be returned.
        This flag helps to reduce the memory requirements, when the number of nodes in a parcellation or
        the number of subjects or weight methods are large, as it doesn't retain results for all combinations,
        when running from commmand line interface (or HPC). Default: False
        If this is False, out_dir must be specified to save the results to disk.

    overwrite_results : bool
        Flag to request overwriting of existing results, in case of reruns/failed jobs. By default, if the expected output file exists and is of non-zero size, its computation is skipped (assuming the file is complete, usable and not corrupted).

    num_procs : int
        Number of parallel processes to use to speed up computation.

    Returns
    -------
    edge_weights_all : dict, None
        If return_results is True, this will be a dictionary keyed in by a tuple: (weight method, subject_ID)
        The value of each edge_weights_all[(weight method, subject_ID)] is
        a numpy array of length p = k*(k-1)/2, with k = number of nodes in the atlas parcellation.
        If return_results is False, this will be None, which is the default.
    """

    # All the checks must happen here, as this is key function in the API
    check_params_multiedge(base_feature_list, input_dir, atlas,
                           smoothing_param, node_size, out_dir, return_results)
    atlas = check_atlas(atlas)

    subject_id_list, num_subjects, max_id_width, nd_id = check_subjects(
        subject_id_list)

    num_bins = check_num_bins(num_bins)
    edge_range_dict = check_edge_range_dict(edge_range_dict, base_feature_list)
    weight_method_list, num_weights, max_wtname_width, nd_wm = check_weights(
        weight_method_list)

    # validating the choice and getting a callable
    summary_stats, summary_stat_names, _, _, _ = check_stat_methods(
        summary_stats)

    num_procs = check_num_procs(num_procs)
    pretty_print_options = (max_id_width, nd_id, num_weights, max_wtname_width,
                            nd_wm)

    # roi_labels, ctx_annot = parcellate.freesurfer_roi_labels(atlas)
    # uniq_rois, roi_size, num_nodes = roi_info(roi_labels)
    uniq_rois, centroids, roi_labels = parcellate.roi_labels_centroids(atlas)

    print('\nProcessing {} features resampled to {} atlas,'
          ' smoothed at {} with node size {}'.format(base_feature_list, atlas,
                                                     smoothing_param,
                                                     node_size))

    if not return_results:
        if out_dir is None:
            raise ValueError(
                'When return_results=False, out_dir must be specified to be able to save the results.'
            )
        if not pexists(out_dir):
            os.mkdir(out_dir)

    partial_func_extract = partial(
        per_subject_multi_edge, input_dir, base_feature_list, roi_labels,
        centroids, weight_method_list, summary_stats, summary_stat_names,
        atlas, smoothing_param, node_size, num_bins, edge_range_dict, out_dir,
        return_results, overwrite_results, pretty_print_options)
    if num_procs > 1:
        chunk_size = int(np.ceil(num_subjects / num_procs))
        with Manager():
            with Pool(processes=num_procs) as pool:
                edge_weights_list_dicts = pool.map(partial_func_extract,
                                                   subject_id_list, chunk_size)
    else:
        # reverting to sequential processing
        edge_weights_list_dicts = [
            partial_func_extract(subject=sub_id) for sub_id in subject_id_list
        ]

    if return_results:
        edge_weights_all = dict()
        for combo in edge_weights_list_dicts:
            # each element from output of parallel loop is a dict keyed in by {subject, weight)
            edge_weights_all.update(combo)
    else:
        edge_weights_all = None

    print('\ngraynet computation done.')
    return edge_weights_all
示例#5
0
def parse_args():
    """Parser/validator for the cmd line args."""

    parser = get_parser()

    if len(sys.argv) < 2:
        parser.print_help()
        print('\nToo few arguments!')
        parser.exit(1)

    # parsing
    try:
        params = parser.parse_args()
    except Exception as exc:
        print(exc)
        raise ValueError('Unable to parse command-line arguments.')

    subject_ids_path = os.path.abspath(params.subject_ids_path)
    if not os.path.exists(subject_ids_path):
        raise IOError("Given subject IDs file doesn't exist.")

    input_dir = os.path.abspath(params.input_dir)
    if not os.path.exists(input_dir):
        raise IOError("Given input directory doesn't exist.")

    out_dir = params.out_dir
    if out_dir is not None:
        if not pexists(out_dir):
            os.mkdir(out_dir)

    feature_list = utils.check_features(params.features)

    do_multi_edge = bool(params.do_multi_edge)
    summary_stat = params.summary_stat
    multi_edge_range = np.array(params.multi_edge_range, dtype=float)
    multi_edge_range_out = None
    if do_multi_edge:
        # ensure atleast two features
        num_features = len(feature_list)
        if num_features < 2:
            raise ValueError(
                'To enable multi-edge computation, specify atleast two valid features.'
            )

        if multi_edge_range is not None:
            nvals_per_feat = 2
            if len(multi_edge_range) != nvals_per_feat * num_features:
                raise ValueError(
                    'Insufficient specification of edge ranges for multiple features!\n'
                    'Needed : {} exactly, given : {}'.format(
                        nvals_per_feat * num_features, len(multi_edge_range)))
            indiv_ranges = np.split(
                multi_edge_range,
                range(nvals_per_feat, len(multi_edge_range), nvals_per_feat))

            multi_edge_range_out = dict()
            for ix, feat in enumerate(feature_list):
                multi_edge_range_out[feat] = indiv_ranges[ix]

        utils.check_stat_methods(summary_stat)
    else:
        summary_stat = None
        if len(feature_list) > 1:
            raise ValueError(
                'For single edge computation, only one feature can be specified.'
            )

    # validating choices and doing only one of the two
    weight_methods = params.weight_methods
    roi_stats = params.roi_stats
    if weight_methods is not None:
        weight_method_list, _, _, _ = check_weights(weight_methods)
        if roi_stats is not None:
            print(
                'ROI stats requested with network weights computation - not allowed.'
            )
            sys.exit(1)
        roi_stats = None
    elif roi_stats is not None:
        roi_stats, _, _, _, _ = check_stat_methods(roi_stats)
        weight_method_list = None
    else:
        raise ValueError('One of weight_method and roi_stats must be chosen.')

    atlas = check_atlas(params.atlas)
    # num_procs will be validated inside in the functions using it.

    # TODO should we check atlas compatibility with data for two subjects randomly
    #       load data for subjects, and check atlas parcellation is compatible in size with data

    return subject_ids_path, input_dir, \
           feature_list, weight_method_list, \
           do_multi_edge, summary_stat, multi_edge_range_out, \
           params.num_bins, params.edge_range, \
           atlas, out_dir, params.node_size, params.smoothing_param, roi_stats, \
           params.num_procs, params.overwrite_results
示例#6
0
def parse_args():
    """Parser/validator for the cmd line args."""

    parser = get_parser()

    if len(sys.argv) < 2:
        parser.print_help()
        print('\nToo few arguments!')
        parser.exit(1)

    # parsing
    try:
        params = parser.parse_args()
    except Exception as exc:
        print(exc)
        raise ValueError('Unable to parse command-line arguments.')

    feature_list = utils.check_features(params.features)

    input_dir = Path(params.input_dir).resolve()
    if not input_dir.exists():
        raise IOError("Given input directory doesn't exist!")

    out_dir = params.out_dir
    if out_dir is not None:
        out_dir = Path(out_dir).resolve()
    else:
        out_dir = input_dir / "graynet"

    if not out_dir.exists():
        out_dir.mkdir(exist_ok=True, parents=True)

    # allowing auto population of subject IDs for freesurfer directory
    sub_id_list_path = params.subject_ids_path
    if sub_id_list_path is None:
        # this is allowed only when all features are freesurfer-related only
        for feat in feature_list:
            if feat not in cfg.features_freesurfer:
                raise ValueError(
                    "Path to subject ID list must be specified "
                    "when non-Freesurfer features are being processed!")

        # get all IDs in Freesurfer $SUBJECTS_DIR that are folders with surf subdir
        id_list = [
            sub_id for sub_id in input_dir.iterdir()
            if (sub_id.is_dir() and sub_id.joinpath('surf').is_dir())
        ]

        if len(id_list) < 1:
            raise ValueError(
                'Given Freesurfer folder does not any subjects:\n{}'
                ''.format(input_dir))

        # write to a file in out folder
        try:
            sub_id_list_path = input_dir / 'id_list_freesurfer_graynet.txt'
            with open(sub_id_list_path, 'w') as idlf:
                idlf.writelines('\n'.join(id_list))
        except:
            raise IOError(
                'Unable to write auto generated id list (n={}) to disk'
                ' to\n  {}'.format(len(id_list), sub_id_list_path))
    else:
        sub_id_list_path = Path(params.subject_ids_path).resolve()
        if not sub_id_list_path.exists():
            raise IOError("Given subject IDs file doesn't exist.")

    do_multi_edge = bool(params.do_multi_edge)
    summary_stat = params.summary_stat
    multi_edge_range = np.array(params.multi_edge_range, dtype=float)
    multi_edge_range_out = None
    if do_multi_edge:
        # ensure atleast two features
        num_features = len(feature_list)
        if num_features < 2:
            raise ValueError(
                'To enable multi-edge computation, specify atleast '
                'two valid features.')

        if multi_edge_range is not None:
            nvals_per_feat = 2
            if len(multi_edge_range) != nvals_per_feat * num_features:
                raise ValueError(
                    'Insufficient specification of edge ranges for multiple features!'
                    '\nNeeded : {} exactly, given : {}'
                    ''.format(nvals_per_feat * num_features,
                              len(multi_edge_range)))
            indiv_ranges = np.split(
                multi_edge_range,
                range(nvals_per_feat, len(multi_edge_range), nvals_per_feat))

            multi_edge_range_out = dict()
            for ix, feat in enumerate(feature_list):
                multi_edge_range_out[feat] = indiv_ranges[ix]

        utils.check_stat_methods(summary_stat)
    else:
        summary_stat = None
        if len(feature_list) > 1:
            raise ValueError('For single edge computation, '
                             'only one feature can be specified.')

    # validating choices and doing only one of the two
    weight_methods = params.weight_methods
    roi_stats = params.roi_stats
    if weight_methods is not None:
        weight_method_list, _, _, _ = check_weights(weight_methods)
        if roi_stats is not None:
            print(
                'ROI stats requested with network weights computation - not allowed.'
            )
            sys.exit(1)
        roi_stats = None
    elif roi_stats is not None:
        roi_stats, _, _, _, _ = check_stat_methods(roi_stats)
        weight_method_list = None
    else:
        raise ValueError('One of weight_method and roi_stats must be chosen.')

    if params.node_size is not None:
        node_size = int(params.node_size)
    else:
        node_size = None

    print('\nData resampled to {} atlas, '
          ' smoothed at {} with node size {}'
          ''.format(params.atlas, params.smoothing_param, params.node_size))

    atlas_spec, _ = check_atlas(params.atlas)
    # num_procs will be validated inside in the functions using it.

    # TODO should we check atlas compatibility with data for two subjects randomly?
    #  load data for subjects, check atlas parcellation is compatible in size with data

    return sub_id_list_path, input_dir, \
           feature_list, weight_method_list, \
           do_multi_edge, summary_stat, multi_edge_range_out, \
           params.num_bins, params.edge_range, \
           atlas_spec, out_dir, node_size, params.smoothing_param, roi_stats, \
           params.num_procs, params.overwrite_results
示例#7
0
def extract(subject_id_list,
            input_dir,
            base_feature=cfg.default_feature_single_edge,
            weight_method_list=cfg.default_weight_method,
            num_bins=cfg.default_num_bins,
            edge_range=cfg.default_edge_range,
            atlas=cfg.default_atlas,
            smoothing_param=cfg.default_smoothing_param,
            node_size=cfg.default_node_size,
            out_dir=None,
            return_results=False,
            num_procs=cfg.default_num_procs):
    """
    Extracts weighted networks (matrix of pair-wise ROI distances) from gray matter features based on Freesurfer processing.

    Parameters
    ----------
    subject_id_list : str or list
         must be path to a file containing subject IDs, or a list of subject IDs
    input_dir : str
        Path to the input directory where features can be read.
        For example, this can be Freesurfer's SUBJECTS_DIR, where output processing is stored.
        Or another directory with a structure that graynet can parse.
    base_feature : str
        Specific type of feature to read for each subject from the input directory.

    weight_method : string(s), optional
        Type of distance (or metric) to compute between the pair of histograms.

        It must be one of the following methods:

        - 'chebyshev'
        - 'chebyshev_neg'
        - 'chi_square'
        - 'correlate'
        - 'correlate_1'
        - 'cosine'
        - 'cosine_1'
        - 'cosine_2'
        - 'cosine_alt'
        - 'euclidean'
        - 'fidelity_based'
        - 'histogram_intersection'
        - 'histogram_intersection_1'
        - 'jensen_shannon'
        - 'kullback_leibler'
        - 'manhattan'
        - 'minowski'
        - 'noelle_1'
        - 'noelle_2'
        - 'noelle_3'
        - 'noelle_4'
        - 'noelle_5'
        - 'relative_bin_deviation'
        - 'relative_deviation'

        Note only the following are *metrics*:

        - 'manhattan'
        - 'minowski'
        - 'euclidean'
        - 'noelle_2'
        - 'noelle_4'
        - 'noelle_5'

        The following are *semi- or quasi-metrics*:

        - 'kullback_leibler'
        - 'jensen_shannon'
        - 'chi_square'
        - 'chebyshev'
        - 'cosine_1'
        - 'chebyshev_neg'
        - 'correlate_1'
        - 'histogram_intersection_1'
        - 'relative_deviation'
        - 'relative_bin_deviation'
        - 'noelle_1'
        - 'noelle_3'

        The following are  classified to be similarity functions:

        - 'histogram_intersection'
        - 'correlate'
        - 'cosine'
        - 'cosine_2'
        - 'cosine_alt'
        - 'fidelity_based'

        *Default* choice: 'manhattan'.

    num_bins : int
        Number of histogram bins to use when computing pair-wise weights based on histogram distance. Default : 25

    edge_range : tuple or list
        The range of edges (two finite values) within which to build the histogram e.g. ``--edge_range 0 5``.
        This can be helpful (and important) to ensure correspondence across multiple invocations of graynet (e.g. for different subjects), in terms of range across all bins as well as individual bin edges.

        Default :

            - ( 0.0, 5.0) for ``freesurfer_thickness`` and
            - (-0.3, 0.3) for ``freesurfer_curv``.

    atlas : str
        Name of the atlas whose parcellation to be used.
        Choices for cortical parcellation: ['fsaverage', 'glasser2016'], which are primary cortical.
        Volumetric whole-brain atlases will be added soon.

    smoothing_param : scalar
        Smoothing parameter, which could be fwhm for Freesurfer cortical features,
        or another relevant for the chosen base_feature.
        Default: assumed as fwhm=10mm for the default feature choice 'thickness'

    node_size : scalar, optional
        Parameter to indicate the size of the ROIs, subparcels or patches, depending on type of atlas or feature.
        This feature is not implemented yet, just a placeholder and to enable default computation.

    out_dir : str, optional
        Path to output directory to store results.
        Default: None, results are returned, but not saved to disk.
        If this is None, return_results must be true.

    return_results : bool
        Flag to indicate whether to return the results to be returned.
        This flag helps to reduce the memory requirements, when the number of nodes in a parcellation or
        the number of subjects or weight methods are large, as it doesn't retain results for all combinations,
        when running from commmand line interface (or HPC). Default: False
        If this is False, out_dir must be specified to save the results to disk.

    num_procs : int
        Number of parallel processes to use to speed up computation.

    Returns
    -------
    edge_weights_all : dict, None
        If return_results is True, this will be a dictionary keyed in by a tuple: (weight method, subject_ID)
        The value of each edge_weights_all[(weight method, subject_ID)] is
        a numpy array of length p = k*(k-1)/2, with k = number of nodes in the atlas parcellation.
        If return_results is False, this will be None, which is the default.
    """

    # All the checks must happen here, as this is key function in the API
    check_params_single_edge(base_feature, input_dir, atlas, smoothing_param,
                             node_size, out_dir, return_results)
    atlas, atlas_name = check_atlas(atlas)

    subject_id_list, num_subjects, \
        max_id_width, nd_id = check_subjects(subject_id_list)

    num_bins, edge_range = check_weight_params(num_bins, edge_range)
    weight_method_list, num_weights, \
        max_wtname_width, nd_wm = check_weights(weight_method_list)

    num_procs = check_num_procs(num_procs)
    pretty_print_options = (max_id_width, nd_id, num_weights, max_wtname_width,
                            nd_wm)

    # roi_labels, ctx_annot = freesurfer_roi_labels(atlas)
    # uniq_rois, roi_size, num_nodes = roi_info(roi_labels)

    print('\nProcessing {} features'.format(base_feature))

    if not return_results:
        if out_dir is None:
            raise ValueError(
                'When return_results=False, out_dir must be specified '
                'to be able to save the results.')
        if not out_dir.exists():
            out_dir.mkdir(exist_ok=True, parents=True)

    if base_feature in cfg.features_cortical:
        uniq_rois, centroids, roi_labels = roi_labels_centroids(
            atlas, node_size)
        partial_func_extract = partial(extract_per_subject_cortical, input_dir,
                                       base_feature, roi_labels, centroids,
                                       weight_method_list, atlas, atlas_name,
                                       smoothing_param, node_size, num_bins,
                                       edge_range, out_dir, return_results,
                                       pretty_print_options)
    elif base_feature in cfg.features_volumetric:
        uniq_rois, centroids, roi_labels = volumetric_roi_info(atlas)
        partial_func_extract = partial(extract_per_subject_volumetric,
                                       input_dir, base_feature, roi_labels,
                                       centroids, weight_method_list, atlas,
                                       atlas_name, smoothing_param, node_size,
                                       num_bins, edge_range, out_dir,
                                       return_results, pretty_print_options)
    else:
        raise NotImplementedError('Chosen feature {} is not recognized as '
                                  'either cortical or volumetric! Choose one'
                                  'from the following options: {}'
                                  ''.format(cfg.base_feature_list))

    chunk_size = int(np.ceil(num_subjects / num_procs))
    with Manager():
        with Pool(processes=num_procs) as pool:
            edge_weights_list_dicts = pool.map(partial_func_extract,
                                               subject_id_list, chunk_size)

    if return_results:
        edge_weights_all = dict()
        for combo in edge_weights_list_dicts:
            # each element from output of parallel loop is a dict keyed in
            #   by {subject, weight)
            edge_weights_all.update(combo)
    else:
        edge_weights_all = None

    print('\ngraynet computation done.')
    return edge_weights_all