def main():

    # Get parser info
    parser = get_parser()
    arguments = parser.parse(sys.argv[1:])
    fname_data = arguments['-i']
    fname_mask = arguments['-m']
    vert_label_fname = arguments["-vertfile"]
    vert_levels = arguments["-vert"]
    slices_of_interest = arguments["-z"]
    index_vol = arguments['-vol']
    method = arguments["-method"]

    # Load data and orient to RPI
    data = Image(fname_data).change_orientation('RPI').data
    mask = Image(fname_mask).change_orientation('RPI').data

    # Fetch slices to compute SNR on
    slices_list = []
    if not vert_levels == 'None':
        list_levels = parse_num_list(vert_levels)
        im_vertlevel = Image(vert_label_fname).change_orientation('RPI')
        for level in list_levels:
            slices_list.append(get_slices_from_vertebral_levels(im_vertlevel, level))
        if slices_list == []:
            sct.log.error('The specified vertebral levels are not in the vertebral labeling file.')
        else:
            slices_list = reduce(operator.add, slices_list)  # flatten and sort
            slices_list.sort()
    elif not slices_of_interest == 'None':
        slices_list = parse_num_list(slices_of_interest)
    else:
        slices_list = np.arange(data.shape[2]).tolist()

    # Set to 0 all slices in the mask that are not includes in the slices_list
    nz_to_exclude = [i for i in range(mask.shape[2]) if not i in slices_list]
    mask[:, :, nz_to_exclude] = 0

    # if user selected all 3d volumes from the input 4d volume ("-vol -1"), then assign index_vol
    if index_vol[0] == -1:
        index_vol = range(data.shape[3])

    # Get signal and noise
    indexes_roi = np.where(mask == 1)
    if method == 'mult':
        # get voxels in ROI to obtain a (x*y*z)*t 2D matrix
        data_in_roi = data[indexes_roi]
        # compute signal and STD across by averaging across time
        signal = np.mean(data_in_roi[:, index_vol])
        std_input_temporal = np.std(data_in_roi[:, index_vol], 1)
        noise = np.mean(std_input_temporal)
    elif method == 'diff':
        # if user did not select two volumes, then exit with error
        if not len(index_vol) == 2:
            sct.printv('ERROR: ' + str(len(index_vol)) + ' volumes were specified. Method "diff" should be used with '
                                                         'exactly two volumes (check flag "vol").', 1, 'error')
        data_1 = data[:, :, :, index_vol[0]]
        data_2 = data[:, :, :, index_vol[1]]
        # compute voxel-average of voxelwise sum
        signal = np.mean(np.add(data_1[indexes_roi], data_2[indexes_roi]))
        # compute voxel-STD of voxelwise substraction, multiplied by sqrt(2) as described in equation 7 of Dietrich et al.
        noise = np.std(np.subtract(data_1[indexes_roi], data_2[indexes_roi])) * np.sqrt(2)

    # compute SNR
    SNR = signal / noise

    # Display result
    sct.printv('\nSNR_' + method + ' = ' + str(SNR) + '\n', type='info')
def aggregate_per_slice_or_level(metric,
                                 mask=None,
                                 slices=[],
                                 levels=[],
                                 perslice=None,
                                 perlevel=False,
                                 vert_level=None,
                                 group_funcs=(('MEAN', func_wa), ),
                                 map_clusters=None):
    """
    The aggregation will be performed along the last dimension of 'metric' ndarray.

    :param metric: Class Metric(): data to aggregate.
    :param mask: Class Metric(): mask to use for aggregating the data. Optional.
    :param slices: List[int]: Slices to aggregate metric from. If empty, select all slices.
    :param levels: List[int]: Vertebral levels to aggregate metric from. It has priority over "slices".
    :param Bool perslice: Aggregate per slice (True) or across slices (False)
    :param Bool perlevel: Aggregate per level (True) or across levels (False). Has priority over "perslice".
    :param vert_level: Vertebral level. Could be either an Image or a file name.
    :param tuple group_funcs: Name and function to apply on metric. Example: (('MEAN', func_wa),)). Note, the function
      has special requirements in terms of i/o. See the definition to func_wa and use it as a template.
    :param map_clusters: list of list of int: See func_map()
    :return: Aggregated metric
    """
    # If user neither specified slices nor levels, set perslice=True, otherwise, the output will likely contain nan
    # because in many cases the segmentation does not span the whole I-S dimension.
    if perslice is None:
        if not slices and not levels:
            perslice = True
        else:
            perslice = False

    # if slices is empty, select all available slices from the metric
    ndim = metric.data.ndim
    if not slices:
        slices = range(metric.data.shape[ndim - 1])

    # aggregation based on levels
    if levels:
        im_vert_level = Image(vert_level).change_orientation('RPI')
        # slicegroups = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
        slicegroups = [
            tuple(get_slices_from_vertebral_levels(im_vert_level, level))
            for level in levels
        ]
        if perlevel:
            # vertgroups = [(2,), (3,), (4,)]
            vertgroups = [tuple([level]) for level in levels]
        elif perslice:
            # slicegroups = [(0,), (1,), (2,), (3,), (4,), (5,), (6,), (7,), (8,)]
            slicegroups = [
                tuple([i])
                for i in functools.reduce(operator.concat, slicegroups)
            ]  # reduce to individual tuple
            # vertgroups = [(2,), (2,), (2,), (3,), (3,), (3,), (4,), (4,), (4,)]
            vertgroups = [
                tuple([get_vertebral_level_from_slice(im_vert_level, i[0])])
                for i in slicegroups
            ]
        # output aggregate metric across levels
        else:
            # slicegroups = [(0, 1, 2, 3, 4, 5, 6, 7, 8)]
            slicegroups = [
                tuple([val for sublist in slicegroups for val in sublist])
            ]  # flatten into single tuple
            # vertgroups = [(2, 3, 4)]
            vertgroups = [tuple([level for level in levels])]
    # aggregation based on slices
    else:
        vertgroups = None
        if perslice:
            # slicegroups = [(0,), (1,), (2,), (3,), (4,), (5,), (6,), (7,), (8,)]
            slicegroups = [tuple([slice]) for slice in slices]
        else:
            # slicegroups = [(0, 1, 2, 3, 4, 5, 6, 7, 8)]
            slicegroups = [tuple(slices)]
    agg_metric = dict((slicegroup, dict()) for slicegroup in slicegroups)

    # loop across slice group
    for slicegroup in slicegroups:
        # add level info
        if vertgroups is None:
            agg_metric[slicegroup]['VertLevel'] = None
        else:
            agg_metric[slicegroup]['VertLevel'] = vertgroups[slicegroups.index(
                slicegroup)]
        # Loop across functions (e.g.: MEAN, STD)
        for (name, func) in group_funcs:
            try:
                data_slicegroup = metric.data[
                    ..., slicegroup]  # selection is done in the last dimension
                if mask is not None:
                    mask_slicegroup = mask.data[..., slicegroup, :]
                    agg_metric[slicegroup]['Label'] = mask.label
                    # Add volume fraction
                    agg_metric[slicegroup]['Size [vox]'] = np.sum(
                        mask_slicegroup.flatten())
                else:
                    mask_slicegroup = np.ones(data_slicegroup.shape)
                # Ignore nonfinite values
                i_nonfinite = np.where(np.isfinite(data_slicegroup) == False)
                data_slicegroup[i_nonfinite] = 0.
                # TODO: the lines below could probably be done more elegantly
                if mask_slicegroup.ndim == data_slicegroup.ndim + 1:
                    arr_tmp_concat = []
                    for i in range(mask_slicegroup.shape[-1]):
                        arr_tmp = np.reshape(mask_slicegroup[..., i],
                                             data_slicegroup.shape)
                        arr_tmp[i_nonfinite] = 0.
                        arr_tmp_concat.append(
                            np.expand_dims(arr_tmp,
                                           axis=(mask_slicegroup.ndim - 1)))
                    mask_slicegroup = np.concatenate(
                        arr_tmp_concat, axis=(mask_slicegroup.ndim - 1))
                else:
                    mask_slicegroup[i_nonfinite] = 0.
                # Make sure the number of pixels to extract metrics is not null
                if mask_slicegroup.sum() == 0:
                    result = None
                else:
                    # Run estimation
                    result, _ = func(data_slicegroup, mask_slicegroup,
                                     map_clusters)
                    # check if nan
                    if np.isnan(result):
                        result = None
                # here we create a field with name: FUNC(METRIC_NAME). Example: MEAN(CSA)
                agg_metric[slicegroup]['{}({})'.format(name,
                                                       metric.label)] = result
            except Exception as e:
                logging.warning(e)
                agg_metric[slicegroup]['{}({})'.format(name,
                                                       metric.label)] = str(e)
    return agg_metric
def aggregate_per_slice_or_level(metric, mask=None, slices=[], levels=[], perslice=None, perlevel=False,
                                 vert_level=None, group_funcs=(('MEAN', np.mean),), map_clusters=None):
    """
    The aggregation will be performed along the last dimension of 'metric' ndarray.
    :param metric: Class Metric(): data to aggregate.
    :param mask: Class Metric(): mask to use for aggregating the data. Optional.
    :param slices: List[int]: Slices to aggregate metric from. If empty, select all slices.
    :param levels: List[int]: Vertebral levels to aggregate metric from. It has priority over "slices".
    :param Bool perslice: Aggregate per slice (True) or across slices (False)
    :param Bool perlevel: Aggregate per level (True) or across levels (False). Has priority over "perslice".
    :param vert_level: Vertebral level. Could be either an Image or a file name.
    :param tuple group_funcs: Functions to apply on metric. Example: (('mean', np.mean),))
    :param map_clusters: list of list of int: See func_map()
    :return: Aggregated metric
    """
    # If user neither specified slices nor levels, set perslice=True, otherwise, the output will likely contain nan
    # because in many cases the segmentation does not span the whole I-S dimension.
    if perslice is None:
        if not slices and not levels:
            perslice = True
        else:
            perslice = False

    # if slices is empty, select all available slices from the metric
    ndim = metric.data.ndim
    if not slices:
        slices = range(metric.data.shape[ndim-1])

    # aggregation based on levels
    if levels:
        im_vert_level = Image(vert_level).change_orientation('RPI')
        # slicegroups = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
        slicegroups = [tuple(get_slices_from_vertebral_levels(im_vert_level, level)) for level in levels]
        if perlevel:
            # vertgroups = [(2,), (3,), (4,)]
            vertgroups = [tuple([level]) for level in levels]
        elif perslice:
            # slicegroups = [(0,), (1,), (2,), (3,), (4,), (5,), (6,), (7,), (8,)]
            slicegroups = [tuple([i]) for i in functools.reduce(operator.concat, slicegroups)]  # reduce to individual tuple
            # vertgroups = [(2,), (2,), (2,), (3,), (3,), (3,), (4,), (4,), (4,)]
            vertgroups = [tuple([get_vertebral_level_from_slice(im_vert_level, i[0])]) for i in slicegroups]
        # output aggregate metric across levels
        else:
            # slicegroups = [(0, 1, 2, 3, 4, 5, 6, 7, 8)]
            slicegroups = [tuple([val for sublist in slicegroups for val in sublist])]  # flatten into single tuple
            # vertgroups = [(2, 3, 4)]
            vertgroups = [tuple([level for level in levels])]
    # aggregation based on slices
    else:
        vertgroups = None
        if perslice:
            # slicegroups = [(0,), (1,), (2,), (3,), (4,), (5,), (6,), (7,), (8,)]
            slicegroups = [tuple([slice]) for slice in slices]
        else:
            # slicegroups = [(0, 1, 2, 3, 4, 5, 6, 7, 8)]
            slicegroups = [tuple(slices)]
    agg_metric = dict((slicegroup, dict()) for slicegroup in slicegroups)

    # loop across slice group
    for slicegroup in slicegroups:
        # add level info
        if vertgroups is None:
            agg_metric[slicegroup]['VertLevel'] = None
        else:
            agg_metric[slicegroup]['VertLevel'] = vertgroups[slicegroups.index(slicegroup)]
        # Loop across functions (e.g.: MEAN, STD)
        for (name, func) in group_funcs:
            try:
                data_slicegroup = metric.data[..., slicegroup]  # selection is done in the last dimension
                if mask is not None:
                    mask_slicegroup = mask.data[..., slicegroup, :]
                    agg_metric[slicegroup]['Label'] = mask.label
                    # Add volume fraction
                    agg_metric[slicegroup]['Size [vox]'] = np.sum(mask_slicegroup.flatten())
                else:
                    mask_slicegroup = np.ones(data_slicegroup.shape)
                # Ignore nonfinite values
                i_nonfinite = np.where(np.isfinite(data_slicegroup) == False)
                data_slicegroup[i_nonfinite] = 0.
                # TODO: the lines below could probably be done more elegantly
                if mask_slicegroup.ndim == data_slicegroup.ndim + 1:
                    arr_tmp_concat = []
                    for i in range(mask_slicegroup.shape[-1]):
                        arr_tmp = np.reshape(mask_slicegroup[..., i], data_slicegroup.shape)
                        arr_tmp[i_nonfinite] = 0.
                        arr_tmp_concat.append(np.expand_dims(arr_tmp, axis=(mask_slicegroup.ndim-1)))
                    mask_slicegroup = np.concatenate(arr_tmp_concat, axis=(mask_slicegroup.ndim-1))
                else:
                    mask_slicegroup[i_nonfinite] = 0.
                # Make sure the number of pixels to extract metrics is not null
                if mask_slicegroup.sum() == 0:
                    result = None
                else:
                    # Run estimation
                    result, _ = func(data_slicegroup, mask_slicegroup, map_clusters)
                    # check if nan
                    if np.isnan(result):
                        result = None
                # here we create a field with name: FUNC(METRIC_NAME). Example: MEAN(CSA)
                agg_metric[slicegroup]['{}({})'.format(name, metric.label)] = result
            except Exception as e:
                logging.warning(e)
                agg_metric[slicegroup]['{}({})'.format(name, metric.label)] = str(e)
    return agg_metric