示例#1
0
def main(fname_data,
         path_label,
         method,
         slices,
         levels,
         fname_output,
         labels_user,
         append_csv,
         fname_vertebral_labeling="",
         perslice=1,
         perlevel=1,
         verbose=1,
         combine_labels=True):
    """
    Extract metrics from MRI data based on mask (could be single file of folder to atlas)
    :param fname_data: data to extract metric from
    :param path_label: mask: could be single file or folder to atlas (which contains info_label.txt)
    :param method {'wa', 'bin', 'ml', 'map'}
    :param slices. Slices of interest. Accepted format:
           "0,1,2,3": slices 0,1,2,3
           "0:3": slices 0,1,2,3
    :param levels: Vertebral levels to extract metrics from. Should be associated with a template
           (e.g. PAM50/template/) or a specified file: fname_vertebral_labeling. Same format as slices_of_interest.
    :param fname_output:
    :param labels_user:
    :param append_csv: Append to csv file
    :param fname_normalizing_label:
    :param fname_vertebral_labeling: vertebral labeling to be used with vertebral_levels
    :param perslice: if user selected several slices, then the function outputs a metric within each slice
           instead of a single average output.
    :param perlevel: if user selected several levels, then the function outputs a metric within each vertebral level
           instead of a single average output.
    :param verbose
    :param combine_labels: bool: Combine labels into a single value
    :return:
    """

    # check if path_label is a file (e.g., single binary mask) instead of a folder (e.g., SCT atlas structure which
    # contains info_label.txt file)
    if os.path.isfile(path_label):
        # Label is a single file
        indiv_labels_ids = [0]
        indiv_labels_files = [path_label]
        combined_labels_ids = []
        label_struc = {
            0:
            LabelStruc(id=0,
                       name=extract_fname(path_label)[1],
                       filename=path_label)
        }
        # set path_label to empty string, because indiv_labels_files will replace it from now on
        path_label = ''
    elif os.path.isdir(path_label):
        # Labels is an SCT atlas folder structure
        # Parse labels according to the file info_label.txt
        # Note: the "combined_labels_*" is a list of single labels that are defined in the section defined by the keyword
        # "# Keyword=CombinedLabels" in info_label.txt.
        # TODO: redirect to appropriate Sphinx documentation
        # TODO: output Class instead of multiple variables.
        #   Example 1:
        #     label_struc[2].id = (2)
        #     label_struc[2].name = "left fasciculus cuneatus"
        #     label_struc[2].filename = "PAM50_atlas_02.nii.gz"
        #   Example 2:
        #     label_struc[51].id = (1, 2, 3, ..., 29)
        #     label_struc[51].name = "White Matter"
        #     label_struc[51].filename = ""  # no name because it is combined
        indiv_labels_ids, indiv_labels_names, indiv_labels_files, \
            combined_labels_ids, combined_labels_names, combined_labels_id_groups, map_clusters \
            = read_label_file(path_label, param_default.file_info_label)

        label_struc = {}
        # fill IDs for indiv labels
        for i_label in range(len(indiv_labels_ids)):
            label_struc[indiv_labels_ids[i_label]] = LabelStruc(
                id=indiv_labels_ids[i_label],
                name=indiv_labels_names[i_label],
                filename=indiv_labels_files[i_label],
                map_cluster=[
                    indiv_labels_ids[i_label] in map_cluster
                    for map_cluster in map_clusters
                ].index(True))
        # fill IDs for combined labels
        # TODO: problem for defining map_cluster: if labels overlap two regions, e.g. WM and GM (e.g. id=50),
        #  map_cluster will take value 0, which is wrong.
        for i_label in range(len(combined_labels_ids)):
            label_struc[combined_labels_ids[i_label]] = LabelStruc(
                id=combined_labels_id_groups[i_label],
                name=combined_labels_names[i_label],
                map_cluster=[
                    indiv_labels_ids[i_label] in map_cluster
                    for map_cluster in map_clusters
                ].index(True))
    else:
        raise RuntimeError(path_label + ' does not exist')

    # check syntax of labels asked by user
    labels_id_user = check_labels(indiv_labels_ids + combined_labels_ids,
                                  parse_num_list(labels_user))
    nb_labels = len(indiv_labels_files)

    # Load data and systematically reorient to RPI because we need the 3rd dimension to be z
    printv('\nLoad metric image...', verbose)
    input_im = Image(fname_data).change_orientation("RPI")

    data = Metric(data=input_im.data, label='')
    # Load labels
    labels_tmp = np.empty([nb_labels], dtype=object)
    for i_label in range(nb_labels):
        im_label = Image(os.path.join(
            path_label, indiv_labels_files[i_label])).change_orientation("RPI")
        labels_tmp[i_label] = np.expand_dims(
            im_label.data, 3)  # TODO: generalize to 2D input label
    labels = np.concatenate(labels_tmp[:], 3)  # labels: (x,y,z,label)
    # Load vertebral levels
    if vertebral_levels:
        im_vertebral_labeling = Image(
            fname_vertebral_labeling).change_orientation("RPI")
    else:
        im_vertebral_labeling = None

    # Get dimensions of data and labels
    nx, ny, nz = data.data.shape
    nx_atlas, ny_atlas, nz_atlas, nt_atlas = labels.shape

    # Check dimensions consistency between atlas and data
    if (nx, ny, nz) != (nx_atlas, ny_atlas, nz_atlas):
        printv('\nERROR: Metric data and labels DO NOT HAVE SAME DIMENSIONS.',
               1,
               type='error')

    # Combine individual labels for estimation
    if combine_labels:
        # Add entry with internal ID value (99) which corresponds to combined labels
        label_struc[99] = LabelStruc(id=labels_id_user,
                                     name=','.join(
                                         [str(i) for i in labels_id_user]),
                                     map_cluster=None)
        labels_id_user = [99]

    for id_label in labels_id_user:
        printv('Estimation for label: ' + label_struc[id_label].name, verbose)
        agg_metric = extract_metric(data,
                                    labels=labels,
                                    slices=slices,
                                    levels=levels,
                                    perslice=perslice,
                                    perlevel=perlevel,
                                    vert_level=im_vertebral_labeling,
                                    method=method,
                                    label_struc=label_struc,
                                    id_label=id_label,
                                    indiv_labels_ids=indiv_labels_ids)

        save_as_csv(agg_metric,
                    fname_output,
                    fname_in=fname_data,
                    append=append_csv)
        append_csv = True  # when looping across labels, need to append results in the same file
    display_open(fname_output)
def get_fractional_volume_per_label(atlas_folder, file_label, nb_RL_labels=15):
    """This function takes as input the path to the folder containing an atlas and the name of the file gathering the
    labels' file name of this atlas.
    It returns, in the following order:
    - a list of the labels' ID,
    - a list of the labels' name,
    - a 1D-numpy array containing the fractional volume of each label in the same order as the previous lists."""

    import sct_extract_metric
    import nibabel
    import numpy

    label_id, label_name, label_file, combined_labels_ids, combined_labels_names, combined_labels_id_groups, _ = read_label_file(
        atlas_folder, file_label)
    nb_label = len(label_file)

    fract_volume_per_lab = numpy.zeros((nb_label))

    # compute fractional volume for each label
    for i_label in range(0, nb_label):
        fract_volume_per_lab[i_label] = numpy.sum(
            nibabel.load(atlas_folder + label_file[i_label]).get_data())

    # gather right and left sides
    # nb_non_RL_labels = nb_label - (2*nb_RL_labels) # number of labels that are not paired side-wise
    fract_volume_per_lab_RL_gatehered = numpy.zeros((nb_RL_labels))
    label_name_RL_gatehered = []

    for i_label in range(0, nb_RL_labels):
        ind_ID_first_side = label_id.index(i_label)
        ind_ID_other_side = label_id.index(i_label + nb_RL_labels)

        fract_volume_per_lab_RL_gatehered[i_label] = fract_volume_per_lab[
            ind_ID_first_side] + fract_volume_per_lab[ind_ID_other_side]
        label_name_RL_gatehered.append(label_name[ind_ID_first_side].replace(
            'left', '').replace('right', '').strip())

    # # add labels that are not paired side-wise
    # for i_label in range(0, nb_non_RL_labels):
    #     fract_volume_per_lab_RL_gatehered[nb_RL_labels+i_label] = fract_volume_per_lab[2 * nb_RL_labels + i_label]
    #     label_name_RL_gatehered.append(label_name[2 * nb_RL_labels + i_label].strip())

    return label_id, label_name, fract_volume_per_lab, label_name_RL_gatehered, fract_volume_per_lab_RL_gatehered
def get_nb_voxel_in_WM(atlas_folder, file_label):
    """This function takes as input the path to the folder containing an atlas and the name of the file gathering the
    labels' file name of this atlas. It returns the number of voxels including at least one label."""

    import sct_extract_metric
    import nibabel
    import numpy

    label_id, label_name, label_file, combined_labels_ids, combined_labels_names, combined_labels_id_groups, _ = read_label_file(
        atlas_folder, file_label)
    nb_label = len(label_file)

    # sum of all the labels
    sum_all_labels = nibabel.load(atlas_folder + label_file[0]).get_data()
    for i_label in range(1, nb_label):
        sum_all_labels = numpy.add(
            sum_all_labels,
            nibabel.load(atlas_folder + label_file[i_label]).get_data())

    # count the number of non-zero voxels
    nb_voxel_in_WM = numpy.count_nonzero(sum_all_labels)

    return nb_voxel_in_WM
示例#4
0
def main(argv=None):
    parser = get_parser()
    arguments = parser.parse_args(argv)
    verbose = arguments.v
    set_loglevel(verbose=verbose)

    param_default = Param()

    overwrite = 0  # TODO: Not used. Why?
    fname_data = get_absolute_path(arguments.i)
    path_label = arguments.f
    method = arguments.method
    fname_output = arguments.o
    append_csv = arguments.append
    combine_labels = arguments.combine
    labels_user = arguments.l
    adv_param_user = arguments.param  # TODO: Not used. Why?
    slices = parse_num_list(arguments.z)
    levels = parse_num_list(arguments.vert)
    fname_vertebral_labeling = arguments.vertfile
    perslice = arguments.perslice
    perlevel = arguments.perlevel
    fname_normalizing_label = arguments.norm_file  # TODO: Not used. Why?
    normalization_method = arguments.norm_method  # TODO: Not used. Why?
    label_to_fix = arguments.fix_label  # TODO: Not used. Why?
    fname_output_metric_map = arguments.output_map  # TODO: Not used. Why?
    fname_mask_weight = arguments.mask_weighted  # TODO: Not used. Why?
    discard_negative_values = int(arguments.discard_neg_val)  # TODO: Not used. Why?

    # check if path_label is a file (e.g., single binary mask) instead of a folder (e.g., SCT atlas structure which
    # contains info_label.txt file)
    if os.path.isfile(path_label):
        # Label is a single file
        indiv_labels_ids = [0]
        indiv_labels_files = [path_label]
        combined_labels_ids = []
        label_struc = {0: LabelStruc(id=0,
                                     name=extract_fname(path_label)[1],
                                     filename=path_label)}
        # set path_label to empty string, because indiv_labels_files will replace it from now on
        path_label = ''
    elif os.path.isdir(path_label):
        # Labels is an SCT atlas folder structure
        # Parse labels according to the file info_label.txt
        # Note: the "combined_labels_*" is a list of single labels that are defined in the section defined by the keyword
        # "# Keyword=CombinedLabels" in info_label.txt.
        # TODO: redirect to appropriate Sphinx documentation
        # TODO: output Class instead of multiple variables.
        #   Example 1:
        #     label_struc[2].id = (2)
        #     label_struc[2].name = "left fasciculus cuneatus"
        #     label_struc[2].filename = "PAM50_atlas_02.nii.gz"
        #   Example 2:
        #     label_struc[51].id = (1, 2, 3, ..., 29)
        #     label_struc[51].name = "White Matter"
        #     label_struc[51].filename = ""  # no name because it is combined
        indiv_labels_ids, indiv_labels_names, indiv_labels_files, \
            combined_labels_ids, combined_labels_names, combined_labels_id_groups, map_clusters \
            = read_label_file(path_label, param_default.file_info_label)

        label_struc = {}
        # fill IDs for indiv labels
        for i_label in range(len(indiv_labels_ids)):
            label_struc[indiv_labels_ids[i_label]] = LabelStruc(id=indiv_labels_ids[i_label],
                                                                name=indiv_labels_names[i_label],
                                                                filename=indiv_labels_files[i_label],
                                                                map_cluster=[indiv_labels_ids[i_label] in map_cluster for
                                                                             map_cluster in map_clusters].index(True))
        # fill IDs for combined labels
        # TODO: problem for defining map_cluster: if labels overlap two regions, e.g. WM and GM (e.g. id=50),
        #  map_cluster will take value 0, which is wrong.
        for i_label in range(len(combined_labels_ids)):
            label_struc[combined_labels_ids[i_label]] = LabelStruc(id=combined_labels_id_groups[i_label],
                                                                   name=combined_labels_names[i_label],
                                                                   map_cluster=[indiv_labels_ids[i_label] in map_cluster for
                                                                                map_cluster in map_clusters].index(True))
    else:
        raise RuntimeError(path_label + ' does not exist')

    # check syntax of labels asked by user
    labels_id_user = check_labels(indiv_labels_ids + combined_labels_ids, parse_num_list(labels_user))
    nb_labels = len(indiv_labels_files)

    # Load data and systematically reorient to RPI because we need the 3rd dimension to be z
    printv('\nLoad metric image...', verbose)
    input_im = Image(fname_data).change_orientation("RPI")

    data = Metric(data=input_im.data, label='')
    # Load labels
    labels_tmp = np.empty([nb_labels], dtype=object)
    for i_label in range(nb_labels):
        im_label = Image(os.path.join(path_label, indiv_labels_files[i_label])).change_orientation("RPI")
        labels_tmp[i_label] = np.expand_dims(im_label.data, 3)  # TODO: generalize to 2D input label
    labels = np.concatenate(labels_tmp[:], 3)  # labels: (x,y,z,label)
    # Load vertebral levels
    if not levels:
        fname_vertebral_labeling = None

    # Get dimensions of data and labels
    nx, ny, nz = data.data.shape
    nx_atlas, ny_atlas, nz_atlas, nt_atlas = labels.shape

    # Check dimensions consistency between atlas and data
    if (nx, ny, nz) != (nx_atlas, ny_atlas, nz_atlas):
        printv('\nERROR: Metric data and labels DO NOT HAVE SAME DIMENSIONS.', 1, type='error')

    # Combine individual labels for estimation
    if combine_labels:
        # Add entry with internal ID value (99) which corresponds to combined labels
        label_struc[99] = LabelStruc(id=labels_id_user, name=','.join([str(i) for i in labels_id_user]),
                                     map_cluster=None)
        labels_id_user = [99]

    for id_label in labels_id_user:
        printv('Estimation for label: ' + label_struc[id_label].name, verbose)
        agg_metric = extract_metric(data, labels=labels, slices=slices, levels=levels, perslice=perslice,
                                    perlevel=perlevel, vert_level=fname_vertebral_labeling, method=method,
                                    label_struc=label_struc, id_label=id_label, indiv_labels_ids=indiv_labels_ids)

        save_as_csv(agg_metric, fname_output, fname_in=fname_data, append=append_csv)
        append_csv = True  # when looping across labels, need to append results in the same file
    display_open(fname_output)
def get_nb_voxel_in_WM(atlas_folder, file_label):
    """This function takes as input the path to the folder containing an atlas and the name of the file gathering the
    labels' file name of this atlas. It returns the number of voxels including at least one label."""

    import sct_extract_metric
    import nibabel
    import numpy

    label_id, label_name, label_file, combined_labels_ids, combined_labels_names, combined_labels_id_groups, _ = read_label_file(atlas_folder, file_label)
    nb_label = len(label_file)

    # sum of all the labels
    sum_all_labels = nibabel.load(atlas_folder + label_file[0]).get_data()
    for i_label in range(1, nb_label):
        sum_all_labels = numpy.add(sum_all_labels, nibabel.load(atlas_folder + label_file[i_label]).get_data())

    # count the number of non-zero voxels
    nb_voxel_in_WM = numpy.count_nonzero(sum_all_labels)

    return nb_voxel_in_WM
def get_fractional_volume_per_label(atlas_folder, file_label, nb_RL_labels=15):
    """This function takes as input the path to the folder containing an atlas and the name of the file gathering the
    labels' file name of this atlas.
    It returns, in the following order:
    - a list of the labels' ID,
    - a list of the labels' name,
    - a 1D-numpy array containing the fractional volume of each label in the same order as the previous lists."""

    import sct_extract_metric
    import nibabel
    import numpy

    label_id, label_name, label_file, combined_labels_ids, combined_labels_names, combined_labels_id_groups, _ = read_label_file(atlas_folder, file_label)
    nb_label = len(label_file)

    fract_volume_per_lab = numpy.zeros((nb_label))

    # compute fractional volume for each label
    for i_label in range(0, nb_label):
        fract_volume_per_lab[i_label] = numpy.sum(nibabel.load(atlas_folder + label_file[i_label]).get_data())

    # gather right and left sides
    # nb_non_RL_labels = nb_label - (2*nb_RL_labels) # number of labels that are not paired side-wise
    fract_volume_per_lab_RL_gatehered = numpy.zeros((nb_RL_labels))
    label_name_RL_gatehered = []

    for i_label in range(0, nb_RL_labels):
        ind_ID_first_side = label_id.index(i_label)
        ind_ID_other_side = label_id.index(i_label + nb_RL_labels)

        fract_volume_per_lab_RL_gatehered[i_label] = fract_volume_per_lab[ind_ID_first_side] + fract_volume_per_lab[ind_ID_other_side]
        label_name_RL_gatehered.append(label_name[ind_ID_first_side].replace('left', '').replace('right', '').strip())

    # # add labels that are not paired side-wise
    # for i_label in range(0, nb_non_RL_labels):
    #     fract_volume_per_lab_RL_gatehered[nb_RL_labels+i_label] = fract_volume_per_lab[2 * nb_RL_labels + i_label]
    #     label_name_RL_gatehered.append(label_name[2 * nb_RL_labels + i_label].strip())

    return label_id, label_name, fract_volume_per_lab, label_name_RL_gatehered, fract_volume_per_lab_RL_gatehered
def get_tracts(folder_atlas, zslice=500, num_slice=10):
    """
    Loads tracts in an atlas folder and converts them from .nii.gz format to numpy ndarray
    :param tracts_folder:
    :param zslice: slice to select for generating the phantom
    :return: ndarray nx,ny,nb_tracts
    """
    # parameters
    file_info_label = 'info_label.txt'
    # read info labels
    indiv_labels_ids, indiv_labels_names, indiv_labels_files, combined_labels_ids, combined_labels_names, combined_labels_id_groups, ml_clusters = read_label_file(
        folder_atlas, file_info_label)

    # fname_tracts = glob.glob(folder_atlas + '/*' + '.nii.gz')
    nb_tracts = np.size(indiv_labels_files)
    # load first file to get dimensions
    im = Image(os.path.join(folder_atlas, indiv_labels_files[0]))
    nx, ny, nz, nt, px, py, pz, pt = im.dim
    # initialize data tracts
    data_tracts = np.zeros([nx, ny, num_slice, nb_tracts])
    #Load each partial volume of each tract
    for i in range(nb_tracts):
        sct.no_new_line_log('Load each atlas label: {}/{}'.format(
            i + 1, nb_tracts))
        # TODO: display counter
        data_tracts[:, :, :, i] = Image(
            os.path.join(folder_atlas,
                         indiv_labels_files[i])).data[:, :, zslice -
                                                      (num_slice / 2):zslice +
                                                      (num_slice / 2)]
    return data_tracts
def main(fname_data, path_label, method, slices, levels, fname_output, labels_user, append,
         fname_normalizing_label, normalization_method, label_to_fix, adv_param_user, fname_output_metric_map,
         fname_mask_weight, fname_vertebral_labeling="", perslice=1, perlevel=1, discard_negative_values=False,
         verbose=1):
    """
    Extract metrics from MRI data based on mask (could be single file of folder to atlas)
    :param fname_data: data to extract metric from
    :param path_label: mask: could be single file or folder to atlas (which contains info_label.txt)
    :param method:
    :param slices_of_interest. Accepted format:
           "0,1,2,3": slices 0,1,2,3
           "0:3": slices 0,1,2,3
    :param vertebral_levels: Vertebral levels to extract metrics from. Should be associated with a template
           (e.g. PAM50/template/) or a specified file: fname_vertebral_labeling. Same format as slices_of_interest.
    :param fname_output:
    :param labels_user:
    :param overwrite:
    :param fname_normalizing_label:
    :param normalization_method:
    :param label_to_fix:
    :param adv_param_user:
    :param fname_output_metric_map:
    :param fname_mask_weight:
    :param fname_vertebral_labeling: vertebral labeling to be used with vertebral_levels
    :param perslice: if user selected several slices, then the function outputs a metric within each slice
           instead of a single average output.
    :param perlevel: if user selected several levels, then the function outputs a metric within each vertebral level
           instead of a single average output.
    :param discard_negative_values: Bool: Discard negative voxels when computing metrics statistics
    :param verbose
    :return:
    """

    # check if path_label is a file (e.g., single binary mask) instead of a folder (e.g., SCT atlas structure which
    # contains info_label.txt file)
    if os.path.isfile(path_label):
        # Label is a single file
        indiv_labels_ids = [0]
        indiv_labels_names = [path_label]
        indiv_labels_files = [path_label]
        combined_labels_ids = []
        combined_labels_names = []
        combined_labels_id_groups = []
        map_clusters = []
        label_struc = {0: LabelStruc(id=0,
                                     name=sct.extract_fname(path_label)[1],
                                     filename=path_label)}
        # set path_label to empty string, because indiv_labels_files will replace it from now on
        path_label = ''
    elif os.path.isdir(path_label):
        # Labels is an SCT atlas folder structure
        # Parse labels according to the file info_label.txt
        # Note: the "combined_labels_*" is a list of single labels that are defined in the section defined by the keyword
        # "# Keyword=CombinedLabels" in info_label.txt.
        # TODO: redirect to appropriate Sphinx documentation
        # TODO: output Class instead of multiple variables.
        #   Example 1:
        #     label_struc[2].id = (2)
        #     label_struc[2].name = "left fasciculus cuneatus"
        #     label_struc[2].filename = "PAM50_atlas_02.nii.gz"
        #   Example 2:
        #     label_struc[51].id = (1, 2, 3, ..., 29)
        #     label_struc[51].name = "White Matter"
        #     label_struc[51].filename = ""  # no name because it is combined
        indiv_labels_ids, indiv_labels_names, indiv_labels_files, \
        combined_labels_ids, combined_labels_names, combined_labels_id_groups, map_clusters \
            = read_label_file(path_label, param_default.file_info_label)

        label_struc = {}
        # fill IDs for indiv labels
        for i_label in range(len(indiv_labels_ids)):
            label_struc[indiv_labels_ids[i_label]] = LabelStruc(id=indiv_labels_ids[i_label],
                                                                name=indiv_labels_names[i_label],
                                                                filename=indiv_labels_files[i_label],
                                                                map_cluster=[indiv_labels_ids[i_label] in map_cluster for
                                                                             map_cluster in map_clusters].index(True))
        # fill IDs for combined labels
        for i_label in range(len(combined_labels_ids)):
            label_struc[combined_labels_ids[i_label]] = LabelStruc(id=combined_labels_id_groups[i_label],
                                                                   name=combined_labels_names[i_label],
                                                                   map_cluster=[indiv_labels_ids[i_label] in map_cluster for
                                                                                map_cluster in map_clusters].index(True))
    else:
        sct.printv('\nERROR: ' + path_label + ' does not exist.', 1, 'error')

    # check syntax of labels asked by user
    labels_id_user = check_labels(indiv_labels_ids + combined_labels_ids, parse_num_list(labels_user))
    nb_labels = len(indiv_labels_files)

    # Load data and systematically reorient to RPI because we need the 3rd dimension to be z
    sct.printv('\nLoad metric image...', verbose)
    input_im = Image(fname_data).change_orientation("RPI")

    data = Metric(data=input_im.data, label='')
    # Load labels
    labels_tmp = np.empty([nb_labels], dtype=object)
    for i_label in range(nb_labels):
        im_label = Image(os.path.join(path_label, indiv_labels_files[i_label])).change_orientation("RPI")
        labels_tmp[i_label] = np.expand_dims(im_label.data, 3)  # TODO: generalize to 2D input label
    labels = np.concatenate(labels_tmp[:], 3)  # labels: (x,y,z,label)
    # Load vertebral levels
    if vertebral_levels:
        im_vertebral_labeling = Image(fname_vertebral_labeling).change_orientation("RPI")
    else:
        im_vertebral_labeling = None

        # Get dimensions of data and labels
    nx, ny, nz = data.data.shape
    nx_atlas, ny_atlas, nz_atlas, nt_atlas = labels.shape

    # Check dimensions consistency between atlas and data
    if (nx, ny, nz) != (nx_atlas, ny_atlas, nz_atlas):
        sct.printv('\nERROR: Metric data and labels DO NOT HAVE SAME DIMENSIONS.', 1, type='error')

    for id_label in labels_id_user:
        sct.printv('Estimation for label: '+label_struc[id_label].name, verbose)
        agg_metric = extract_metric(data, labels=labels, slices=slices, levels=levels, perslice=perslice,
                                    perlevel=perlevel, vert_level=im_vertebral_labeling, method=method,
                                    label_struc=label_struc, id_label=id_label, indiv_labels_ids=indiv_labels_ids)

        save_as_csv(agg_metric, fname_output, fname_in=fname_data, append=append)
        append = True  # when looping across labels, need to append results in the same file
    sct.display_open(fname_output)