示例#1
0
def get_centrality_by_rvalue(ts_normd, 
                             template, 
                             method_option, 
                             weight_options, 
                             r_value, 
                             block_size):
    '''
    Method to calculate degree/eigenvector centrality and lFCD
    via correlation (r-value) threshold
    
    Parameters
    ----------
    ts_normd : ndarray (float)
        timeseries of shape (ntpts x nvoxs) that is normalized; i.e. the data 
        is demeaned and divided by its L2-norm
    template : ndarray
        three dimensional array with non-zero elements corresponding to the
        indices at which the lFCD metric is analyzed
    method_option : integer
        0 - degree centrality calculation, 
        1 - eigenvector centrality calculation, 
        2 - lFCD calculation
    weight_options : list (boolean)
        weight_options[0] - True or False to perform binary counting
        weight_options[1] - True or False to perform weighted counting
    threshold : a float
        threshold (as correlation r) value
    block_size : an integer
        the number of rows (voxels) to compute timeseries correlation over
        at any one time
    
    Returns
    -------
    out_list : list (string, ndarray)
        list of (string,ndarray) elements corresponding to:
        string - the name of the metric
        ndarray - the array of values to be mapped for that metric
    '''
    
    # Import packages
    from CPAC.network_centrality.utils import cluster_data
    
    # Init variables
    out_list = []
    nvoxs = ts_normd.shape[1]
    # ntpts = timeseries.shape[0]
    calc_degree = False
    calc_eigen = False
    calc_lfcd = False
    # Select which method we're going to perform
    if method_option == 0:
        calc_degree = True
    elif method_option == 1:
        calc_eigen = True
    elif method_option == 2:
        calc_lfcd = True
    # Weighting
    out_binarize = weight_options[0]
    out_weighted = weight_options[1]
    
    # Init degree centrality outputs
    if calc_degree:
        # If binary weighting, init output map
        if out_binarize:
            degree_binarize = np.zeros(nvoxs, dtype=ts_normd.dtype)
            out_list.append(('degree_centrality_binarize', degree_binarize))
        # If connection weighting, init output map
        if out_weighted:
            degree_weighted = np.zeros(nvoxs, dtype=ts_normd.dtype)
            out_list.append(('degree_centrality_weighted', degree_weighted))
    # Init eigenvector centrality outputs
    if calc_eigen:
        r_matrix = np.zeros((nvoxs,nvoxs), dtype=ts_normd.dtype)
        # If binary weighting, init output map
        if out_binarize:
            eigen_binarize = np.zeros(nvoxs, dtype=ts_normd.dtype)
            out_list.append(('eigenvector_centrality_binarize', eigen_binarize))
        # If connection weighting, init output map
        if out_weighted:
            eigen_weighted = np.zeros(nvoxs, dtype=ts_normd.dtype)
            out_list.append(('eigenvector_centrality_weighted', eigen_weighted))
    # Init lFCD outputs
    if calc_lfcd:
        # If binary weighting, init output map
        if out_binarize:
            lfcd_binarize = np.zeros(nvoxs, dtype=ts_normd.dtype)
            out_list.append(('lfcd_binarize', lfcd_binarize))
        # If connection weighting, init output map
        if out_weighted:
            lfcd_weighted = np.zeros(nvoxs, dtype=ts_normd.dtype)
            out_list.append(('lfcd_weighted', lfcd_weighted))
    
    # Prepare to loop through and calculate correlation matrix
    n = 0
    m = block_size
    block_no = 1
    
    # Run as long as our last row index is <= nvoxs
    while m <= nvoxs:
        # First, compute block of correlation matrix
        print 'running block %d: rows %d thru %d' % (block_no, n, m)
        rmat_block = np.dot(ts_normd[:,n:m].T, ts_normd)
        
        # Degree centrality calculation
        if calc_degree:
            if weight_options[0]:
                degree_centrality(rmat_block, r_value, method='binarize', 
                                  out=degree_binarize[n:m])
            if weight_options[1]:
                degree_centrality(rmat_block, r_value, method='weighted', 
                                  out=degree_weighted[n:m])
        
        # Eigenvector centrality - append global corr. matrix
        if calc_eigen:
            r_matrix[n:m] = rmat_block
        
        # lFCD - perform lFCD algorithm
        if calc_lfcd:
            xyz_a = np.argwhere(template)
            krange = rmat_block.shape[0]
            print '...iterating through seeds in block - lfcd'
            for k in range (0,krange):
                corr_seed = rmat_block[k,:]
                labels = cluster_data(corr_seed,r_value,xyz_a)
                seed_label = labels[n+k]
                if out_binarize:
                    if seed_label > 0:
                        lfcd = np.sum(labels==seed_label)
                    else:
                        lfcd = 1
                    lfcd_binarize[n+k] = lfcd
                if out_weighted:
                    if seed_label > 0:
                        lfcd = np.sum(corr_seed*(labels==seed_label))
                    else:
                        lfcd = 1
                    lfcd_weighted[n+k] = lfcd
        
        # Delete block of corr matrix and increment indices
        del rmat_block
        
        # Move next block start point up to last block finish point
        n = m
        # If we finished at nvoxs last time, break the loop
        if n == nvoxs:
            break
        # Else, if our next block runs over nvoxs, limit it to nvoxs
        elif (m+block_size) > nvoxs:
            m = nvoxs
        # Else, just increment end of next block by block_size
        else:
            m += block_size
        # Increment block number
        block_no += 1
    
    # Correct for self-correlation in degree centrality
    if calc_degree:
        if out_binarize:
            idx = np.where(degree_binarize)
            degree_binarize[idx] = degree_binarize[idx]-1
        if out_weighted:
            idx = np.where(degree_weighted)
            degree_weighted[idx] = degree_weighted[idx]-1
    
    # Perform eigenvector measures
    try:
        if calc_eigen:
            if out_binarize:
                print '...calculating binarize eigenvector'
                eigen_binarize[:] = eigenvector_centrality(r_matrix, 
                                                           r_value, 
                                                           method='binarize').squeeze()
            if out_weighted:
                print '...calculating weighted eigenvector'
                eigen_weighted[:] = eigenvector_centrality(r_matrix, 
                                                           r_value, 
                                                           method='weighted').squeeze()
            del r_matrix
        
    except Exception:
        print 'Error in calcuating eigen vector centrality'
        raise
    
    # Return list of outputs
    return out_list
示例#2
0
def get_centrality_by_rvalue(ts_normd, template, method_option, r_value, block_size):
    '''
    Method to calculate degree/eigenvector centrality and lFCD
    via correlation (r-value) threshold

    Parameters
    ----------
    ts_normd : ndarray (float)
        timeseries of shape (ntpts x nvoxs) that is normalized; i.e. the data 
        is demeaned and divided by its L2-norm
    template : ndarray
        three dimensional array with non-zero elements corresponding to the
        indices at which the lFCD metric is analyzed
    method_option : integer
        0 - degree centrality calculation, 
        1 - eigenvector centrality calculation, 
        2 - lFCD calculation
    threshold : a float
        threshold (as correlation r) value
    block_size : an integer
        the number of rows (voxels) to compute timeseries correlation over
        at any one time

    Returns
    -------
    out_list : list (string, ndarray)
        list of (string,ndarray) elements corresponding to:
        string - the name of the metric
        ndarray - the array of values to be mapped for that metric
    '''
    
    # Import packages
    import copy
    import numpy as np
    import copy
    from nipype import logging

    from CPAC.network_centrality.utils import cluster_data
    import CPAC.network_centrality.core as core

    # Init variables
    logger = logging.getLogger('workflow')
    out_list = []
    nvoxs = ts_normd.shape[1]

    # Init degree centrality outputs
    if method_option == 'degree':
        # Init output map
        degree_binarize = np.zeros(nvoxs, dtype=ts_normd.dtype)
        out_list.append(('degree_centrality_binarize', degree_binarize))
        # Init output map
        degree_weighted = np.zeros(nvoxs, dtype=ts_normd.dtype)
        out_list.append(('degree_centrality_weighted', degree_weighted))
    # Init eigenvector centrality outputs
    if method_option == 'eigenvector':
        r_matrix = np.zeros((nvoxs,nvoxs), dtype=ts_normd.dtype)
        # Init output map
        eigen_binarize = np.zeros(nvoxs, dtype=ts_normd.dtype)
        out_list.append(('eigenvector_centrality_binarize', eigen_binarize))
        # Init output map
        eigen_weighted = np.zeros(nvoxs, dtype=ts_normd.dtype)
        out_list.append(('eigenvector_centrality_weighted', eigen_weighted))
    # Init lFCD outputs
    if method_option == 'lfcd':
        # Init output map
        lfcd_binarize = np.zeros(nvoxs, dtype=ts_normd.dtype)
        out_list.append(('lfcd_binarize', lfcd_binarize))
        # Init output map
        lfcd_weighted = np.zeros(nvoxs, dtype=ts_normd.dtype)
        out_list.append(('lfcd_weighted', lfcd_weighted))

    # Prepare to loop through and calculate correlation matrix
    n = 0
    m = block_size
    block_no = 1
    # Run as long as our last row index is <= nvoxs
    while m <= nvoxs:
        # First, compute block of correlation matrix
        logger.info('running block %d: rows %d thru %d' % (block_no, n, m))
        rmat_block = np.dot(ts_normd[:,n:m].T, ts_normd)

        # Degree centrality calculation
        if method_option == 'degree':
            core.degree_centrality(rmat_block, r_value, method='binarize', 
                              out=degree_binarize[n:m])
            core.degree_centrality(rmat_block, r_value, method='weighted', 
                              out=degree_weighted[n:m])

        # Eigenvector centrality - append global corr. matrix
        if method_option == 'eigenvector':
            r_matrix[n:m] = rmat_block

        # lFCD - perform lFCD algorithm
        if method_option == 'lfcd':
            xyz_a = np.argwhere(template)
            krange = rmat_block.shape[0]
            logger.info('...iterating through seeds in block - lfcd')
            for k in range (0,krange):
                corr_seed = rmat_block[k,:]
                labels = cluster_data(corr_seed,r_value,xyz_a)
                seed_label = labels[n+k]
                # Binarized lFCD
                if seed_label > 0:
                    lfcd_bin = np.sum(labels==seed_label)
                    lfcd_wght = np.sum(corr_seed*(labels==seed_label))
                else:
                    lfcd_bin = lfcd_wght = 1
                lfcd_binarize[n+k] = lfcd_bin
                lfcd_weighted[n+k] = lfcd_wght

        # Delete block of corr matrix and increment indices
        del rmat_block

        # Move next block start point up to last block finish point
        n = m
        # If we finished at nvoxs last time, break the loop
        if n == nvoxs:
            break
        # Else, if our next block runs over nvoxs, limit it to nvoxs
        elif (m+block_size) > nvoxs:
            m = nvoxs
        # Else, just increment end of next block by block_size
        else:
            m += block_size
        # Increment block number
        block_no += 1

    # Correct for self-correlation in degree centrality
    if method_option == 'degree':
        idx = np.where(degree_binarize)
        degree_binarize[idx] = degree_binarize[idx]-1
        idx = np.where(degree_weighted)
        degree_weighted[idx] = degree_weighted[idx]-1

    # Perform eigenvector measures
    if method_option == 'eigenvector':
        logger.info('...calculating binarize eigenvector')
        # Have to deepcopy the r_matrix because thresh and sum overwrites
        # its values via pass-by-reference
        eigen_binarize[:] = \
            core.eigenvector_centrality(copy.deepcopy(r_matrix), r_value,
                                        method='binarize').squeeze()
        logger.info('...calculating weighted eigenvector')
        eigen_weighted[:] = \
            core.eigenvector_centrality(r_matrix, r_value,
                                        method='weighted').squeeze()
        del r_matrix

    # Return list of outputs
    return out_list