def get_centrality_by_sparsity(ts_normd, method_option, threshold, block_size):
    '''
    Method to calculate degree/eigenvector centrality via sparsity threshold

    Parameters
    ----------
    ts_normd : ndarray
        timeseries of shape (ntpts x nvoxs) that is normalized; i.e. the data 
        is demeaned and divided by its L2-norm
    method_option : integer
        0 - degree centrality calculation, 
        1 - eigenvector centrality calculation, 
        2 - lFCD calculation
    threshold : a float
        sparsity_threshold value
    block_size : an integer
        the number of rows (voxels) to compute timeseries correlation over
        at any one time

    Returns
    -------
    out_list : list (string, ndarray)
        list of (string,ndarray) elements corresponding to:
        string - the name of the metric
        ndarray - the array of values to be mapped for that metric
    '''

    # Import packages
    import copy
    import numpy as np
    import scipy as sp
    from nipype import logging

    import CPAC.network_centrality.core as core

    # Init variables
    logger = logging.getLogger('workflow')
    out_list = []
    nvoxs = ts_normd.shape[1]

    # Init degree centrality outputs
    if method_option == 'degree':
        degree_binarize = np.zeros(nvoxs, dtype=ts_normd.dtype)
        out_list.append(('degree_centrality_binarize', degree_binarize))
        degree_weighted = np.zeros(nvoxs, dtype=ts_normd.dtype)
        out_list.append(('degree_centrality_weighted', degree_weighted))

    # Init eigenvector centrality outputs
    if method_option == 'eigenvector':
        r_matrix = np.zeros((nvoxs,nvoxs), dtype = ts_normd.dtype)
        # Init output map
        eigen_binarize = np.zeros(nvoxs, dtype=ts_normd.dtype)
        out_list.append(('eigenvector_centrality_binarize', eigen_binarize))
        # Init output map
        eigen_weighted = np.zeros(nvoxs, dtype=ts_normd.dtype)
        out_list.append(('eigenvector_centrality_weighted', eigen_weighted))

    # Get the number of connections to keep
    sparse_num = np.round((nvoxs**2-nvoxs)*threshold/2.0)

    # Prepare to loop through and calculate correlation matrix
    n = 0
    m = block_size
    block_no = 1
    r_value = -1

    # Init wij list
    z = np.array([])
    wij_global = np.rec.fromarrays([z.astype(ts_normd.dtype),
                                    z.astype('int32'),
                                    z.astype('int32')])
    # Form the initial blockwise mask (to only grab upper triangle of data)
    block_triu = np.triu(np.ones((block_size,block_size)), k=1).astype('bool')
    block_rect = np.ones((block_size,nvoxs-block_size), dtype='bool')
    block_mask = np.concatenate((block_triu,block_rect), axis=1)

    # Delete matrices to save memory
    del block_triu, block_rect
    # Calculate correlations step - prune connections for degree
    while n <= nvoxs:
        # First, compute block of correlation matrix
        logger.info('running block %d: rows %d thru %d' % (block_no, n, m-1))
        # Calculate wij over entire matrix by block
        # Do this for both deg and eig, more efficient way to compute r_value
        rmat_block = np.dot(ts_normd[:,n:m].T,
                            ts_normd[:,n:])
        # Shrink block_mask down on every iteration after the first block
        if n > 0:
            block_mask = block_mask[:,:-block_size]

        # Get elements as an array
        rmat_block = rmat_block[block_mask]
        thr_idx = np.where(rmat_block >= r_value)
        rmat_block = rmat_block[thr_idx]
        logger.info('number of passing correlations is %d' % len(rmat_block))

        # Add global offset to indicies
        idx = np.where(block_mask)
        i = idx[0][thr_idx].astype('int32') + n
        j = idx[1][thr_idx].astype('int32') + n

        # Free some memory
        del idx, thr_idx
        w_global = np.concatenate([wij_global.f0,rmat_block])
        i_global = np.concatenate([wij_global.f1,i])
        j_global = np.concatenate([wij_global.f2,j])
        # Free some memory
        del i,j,rmat_block
        # Grab indices and weights that pass and combine into list
        wij_global = np.rec.fromarrays([w_global,i_global,j_global])
        # Free some memory
        del w_global, i_global, j_global

        # Pass these into the global set and sort (ascending) by correlation
        logger.info('sorting list...')
        wij_global.sort()
        # And trim list if it's greater than the number of connections we want
        if len(wij_global) > sparse_num:
            wij_global = wij_global[-sparse_num:]
        r_value = wij_global[0][0]

        # If we're doing eigen, store block into full matrix
        if method_option == 'eigenvector':
            r_matrix[n:m] = np.dot(ts_normd[:,n:m].T, ts_normd)

        # Move next block start point up to last block finish point
        n = m
        # If we finished at nvoxs last time, break the loop
        if n == nvoxs:
            break
        # Else, if our next block runs over nvoxs, limit it to nvoxs
        elif (m+block_size) > nvoxs:
            m = nvoxs
        # Else, just increment end of next block by block_size
        else:
            m += block_size
        # Increment block number
        block_no += 1

    # Calculate centrality step
    # Degree - use ijw list to create a sparse matrix
    if method_option == 'degree':
        # Create sparse (symmetric) matrix of all correlations that survived
        logger.info('creating sparse matrix')
        # Extract the weights and indices from the global list
        w = wij_global.f0
        i = wij_global.f1
        j = wij_global.f2
        del wij_global
        # Create sparse correlation matrix (upper tri) from wij's
        Rsp = sp.sparse.coo_matrix((w,(i,j)), shape=(nvoxs,nvoxs))
        Rsp = Rsp + Rsp.T
        # And compute degree centrality on compressed row matrix
        Rcsr = Rsp.tocsr()
        del Rsp
        degree_binarize[:] = np.array((Rcsr > 0).sum(axis=0))
        degree_weighted[:] = np.array(Rcsr.sum(axis=0))
        del Rcsr

    # Eigenvector - compute the r value from entire matrix
    if method_option == 'eigenvector':
        del wij_global
        # Finally compute centrality using full matrix and r_value
        logger.info('...calculating binarize eigenvector')
        eigen_binarize[:] = \
            core.eigenvector_centrality(copy.deepcopy(r_matrix), r_value,
                                        method='binarize').squeeze()
        logger.info('...calculating weighted eigenvector')
        eigen_weighted[:] = \
            core.eigenvector_centrality(r_matrix, r_value,
                                        method='weighted').squeeze()
        del r_matrix

    # Return list of outputs
    return out_list
def get_centrality_by_rvalue(ts_normd, template, method_option, r_value, block_size):
    '''
    Method to calculate degree/eigenvector centrality and lFCD
    via correlation (r-value) threshold

    Parameters
    ----------
    ts_normd : ndarray (float)
        timeseries of shape (ntpts x nvoxs) that is normalized; i.e. the data 
        is demeaned and divided by its L2-norm
    template : ndarray
        three dimensional array with non-zero elements corresponding to the
        indices at which the lFCD metric is analyzed
    method_option : integer
        0 - degree centrality calculation, 
        1 - eigenvector centrality calculation, 
        2 - lFCD calculation
    threshold : a float
        threshold (as correlation r) value
    block_size : an integer
        the number of rows (voxels) to compute timeseries correlation over
        at any one time

    Returns
    -------
    out_list : list (string, ndarray)
        list of (string,ndarray) elements corresponding to:
        string - the name of the metric
        ndarray - the array of values to be mapped for that metric
    '''
    
    # Import packages
    import copy
    import numpy as np
    import copy
    from nipype import logging

    from CPAC.network_centrality.utils import cluster_data
    import CPAC.network_centrality.core as core

    # Init variables
    logger = logging.getLogger('workflow')
    out_list = []
    nvoxs = ts_normd.shape[1]

    # Init degree centrality outputs
    if method_option == 'degree':
        # Init output map
        degree_binarize = np.zeros(nvoxs, dtype=ts_normd.dtype)
        out_list.append(('degree_centrality_binarize', degree_binarize))
        # Init output map
        degree_weighted = np.zeros(nvoxs, dtype=ts_normd.dtype)
        out_list.append(('degree_centrality_weighted', degree_weighted))
    # Init eigenvector centrality outputs
    if method_option == 'eigenvector':
        r_matrix = np.zeros((nvoxs,nvoxs), dtype=ts_normd.dtype)
        # Init output map
        eigen_binarize = np.zeros(nvoxs, dtype=ts_normd.dtype)
        out_list.append(('eigenvector_centrality_binarize', eigen_binarize))
        # Init output map
        eigen_weighted = np.zeros(nvoxs, dtype=ts_normd.dtype)
        out_list.append(('eigenvector_centrality_weighted', eigen_weighted))
    # Init lFCD outputs
    if method_option == 'lfcd':
        # Init output map
        lfcd_binarize = np.zeros(nvoxs, dtype=ts_normd.dtype)
        out_list.append(('lfcd_binarize', lfcd_binarize))
        # Init output map
        lfcd_weighted = np.zeros(nvoxs, dtype=ts_normd.dtype)
        out_list.append(('lfcd_weighted', lfcd_weighted))

    # Prepare to loop through and calculate correlation matrix
    n = 0
    m = block_size
    block_no = 1
    # Run as long as our last row index is <= nvoxs
    while m <= nvoxs:
        # First, compute block of correlation matrix
        logger.info('running block %d: rows %d thru %d' % (block_no, n, m))
        rmat_block = np.dot(ts_normd[:,n:m].T, ts_normd)

        # Degree centrality calculation
        if method_option == 'degree':
            core.degree_centrality(rmat_block, r_value, method='binarize', 
                              out=degree_binarize[n:m])
            core.degree_centrality(rmat_block, r_value, method='weighted', 
                              out=degree_weighted[n:m])

        # Eigenvector centrality - append global corr. matrix
        if method_option == 'eigenvector':
            r_matrix[n:m] = rmat_block

        # lFCD - perform lFCD algorithm
        if method_option == 'lfcd':
            xyz_a = np.argwhere(template)
            krange = rmat_block.shape[0]
            logger.info('...iterating through seeds in block - lfcd')
            for k in range (0,krange):
                corr_seed = rmat_block[k,:]
                labels = cluster_data(corr_seed,r_value,xyz_a)
                seed_label = labels[n+k]
                # Binarized lFCD
                if seed_label > 0:
                    lfcd_bin = np.sum(labels==seed_label)
                    lfcd_wght = np.sum(corr_seed*(labels==seed_label))
                else:
                    lfcd_bin = lfcd_wght = 1
                lfcd_binarize[n+k] = lfcd_bin
                lfcd_weighted[n+k] = lfcd_wght

        # Delete block of corr matrix and increment indices
        del rmat_block

        # Move next block start point up to last block finish point
        n = m
        # If we finished at nvoxs last time, break the loop
        if n == nvoxs:
            break
        # Else, if our next block runs over nvoxs, limit it to nvoxs
        elif (m+block_size) > nvoxs:
            m = nvoxs
        # Else, just increment end of next block by block_size
        else:
            m += block_size
        # Increment block number
        block_no += 1

    # Correct for self-correlation in degree centrality
    if method_option == 'degree':
        idx = np.where(degree_binarize)
        degree_binarize[idx] = degree_binarize[idx]-1
        idx = np.where(degree_weighted)
        degree_weighted[idx] = degree_weighted[idx]-1

    # Perform eigenvector measures
    if method_option == 'eigenvector':
        logger.info('...calculating binarize eigenvector')
        # Have to deepcopy the r_matrix because thresh and sum overwrites
        # its values via pass-by-reference
        eigen_binarize[:] = \
            core.eigenvector_centrality(copy.deepcopy(r_matrix), r_value,
                                        method='binarize').squeeze()
        logger.info('...calculating weighted eigenvector')
        eigen_weighted[:] = \
            core.eigenvector_centrality(r_matrix, r_value,
                                        method='weighted').squeeze()
        del r_matrix

    # Return list of outputs
    return out_list