Esempio n. 1
0
def test_fast_on_real_data():
    from pandas import read_table
    from os import path as op

    k = 200
    subdir = "/home2/data/Projects/CWAS/share/nki/subinfo/40_Set1_N104"
    ffile = op.join(subdir, "short_compcor_rois_random_k%04i.txt" % k)
    fpaths = read_table(ffile, header=None)
    fpath = fpaths.ix[0, 0]

    import nibabel as nib
    img = nib.load(fpath)
    dat = img.get_data()

    import numpy as np
    from CPAC.cwas.subdist import norm_cols, ncor
    norm_dat = norm_cols(dat)
    corr_dat = norm_dat.T.dot(norm_dat)

    ref = eigenvector_centrality(corr_dat)
    comp = fast_eigenvector_centrality(norm_dat)

    diff = np.abs(ref - comp).mean()  # mean diff
    print(diff)

    ok_(diff < np.spacing(1e10))  # allow some differences
Esempio n. 2
0
    def test_degree_on_real_data():
        # TODO: Replace the mask and func with a standard testing one
        mpath = "/home2/data/Projects/CPAC_Regression_Test/centrality_template/mask-thr50-3mm.nii.gz"
        mask_inds = nib.load(mpath).get_data().nonzero()

        fpath = "/home/data/Projects/CPAC_Regression_Test/2014-02-24_v-0-3-4/run/w/resting_preproc_0010042_session_1/_scan_rest_1_rest/_scan_rest_1_rest/_csf_threshold_0.98/_gm_threshold_0.7/_wm_threshold_0.98/_compcor_ncomponents_5_selector_pc10.linear1.wm0.global0.motion1.quadratic0.gm0.compcor1.csf0/_bandpass_freqs_0.009.0.1/_mask_mask-thr50-3mm/resample_functional_to_template_0/bandpassed_demeaned_filtered_wtsimt_flirt.nii.gz"

        import nibabel as nib
        img = nib.load(fpath)
        dat = img.get_data()
        dat = dat[mask_inds]

        import numpy as np
        from CPAC.cwas.subdist import norm_cols, ncor
        norm_dat = norm_cols(dat.T)
        corr_dat = norm_dat.T.dot(norm_dat)

        r_value = 0.2

        ref = np.sum(corr_dat[:5, :5] > r_value, axis=1)
        comp = degree_centrality(corr_dat[:5, :5], r_value, "binarize")
        assert_equal(ref, comp)

        ref = np.sum(corr_dat * (corr_dat > r_value), axis=1)
        comp = degree_centrality(corr_dat, r_value, "weighted")
        assert_equal(ref, comp)
Esempio n. 3
0
    def test_degree_on_real_data():
        # TODO: Replace the mask and func with a standard testing one
        mpath = "/home2/data/Projects/CPAC_Regression_Test/centrality_template/mask-thr50-3mm.nii.gz"
        mask_inds = nib.load(mpath).get_data().nonzero()
    
        fpath   = "/home/data/Projects/CPAC_Regression_Test/2014-02-24_v-0-3-4/run/w/resting_preproc_0010042_session_1/_scan_rest_1_rest/_scan_rest_1_rest/_csf_threshold_0.98/_gm_threshold_0.7/_wm_threshold_0.98/_compcor_ncomponents_5_selector_pc10.linear1.wm0.global0.motion1.quadratic0.gm0.compcor1.csf0/_bandpass_freqs_0.009.0.1/_mask_mask-thr50-3mm/resample_functional_to_template_0/bandpassed_demeaned_filtered_wtsimt_flirt.nii.gz"
    
        import nibabel as nib
        img = nib.load(fpath)
        dat = img.get_data()
        dat = dat[mask_inds]

        import numpy as np
        from CPAC.cwas.subdist import norm_cols, ncor
        norm_dat = norm_cols(dat.T)
        corr_dat = norm_dat.T.dot(norm_dat)
    
        r_value = 0.2
    
        ref     = np.sum(corr_dat[:5,:5]>r_value, axis=1)
        comp    = degree_centrality(corr_dat[:5,:5], r_value, "binarize")
        assert_equal(ref, comp)
    
        ref     = np.sum(corr_dat*(corr_dat>r_value), axis=1)
        comp    = degree_centrality(corr_dat, r_value, "weighted")
        assert_equal(ref, comp)
Esempio n. 4
0
def test_fast_on_real_data():
    from pandas import read_table
    from os import path as op
    
    k       = 200
    subdir  = "/home2/data/Projects/CWAS/share/nki/subinfo/40_Set1_N104"
    ffile   = op.join(subdir, "short_compcor_rois_random_k%04i.txt" % k)
    fpaths  = read_table(ffile, header=None)
    fpath   = fpaths.ix[0,0]

    import nibabel as nib
    img = nib.load(fpath)
    dat = img.get_data()

    import numpy as np
    from CPAC.cwas.subdist import norm_cols, ncor
    norm_dat = norm_cols(dat)
    corr_dat = norm_dat.T.dot(norm_dat)
    
    ref    = eigenvector_centrality(corr_dat)
    comp   = fast_eigenvector_centrality(norm_dat)
    
    diff = np.abs(ref-comp).mean()  # mean diff
    print(diff)
    
    ok_(diff < np.spacing(1e10)) # allow some differences
Esempio n. 5
0
def test_fast_eigenvector_centrality(ntpts=100, nvoxs=1000):
    print "testing fast_eigenvector_centrality"
    
    # Simulate Data
    import numpy as np
    from CPAC.cwas.subdist import norm_cols
    # Normalize Random Time-Series Data
    m = np.random.random((ntpts,nvoxs))
    m = norm_cols(m)
    # Correlation Data with Range 0-1
    mm = m.T.dot(m) # note that need to generate connectivity matrix here
    
    # Execute
    #from CPAC.network_centrality.core import fast_eigenvector_centrality,slow_eigenvector_centrality
    
    ref  = eigenvector_centrality(mm, verbose=False)  # we need to transform mm to be a distance
    comp = fast_eigenvector_centrality(m, verbose=False)
    
    diff = np.abs(ref-comp).mean()  # mean diff
    print(diff)
    
    ok_(diff < np.spacing(1e2)) # allow minimal difference
Esempio n. 6
0
def test_fast_eigenvector_centrality(ntpts=100, nvoxs=1000):
    print "testing fast_eigenvector_centrality"

    # Simulate Data
    import numpy as np
    from CPAC.cwas.subdist import norm_cols
    # Normalize Random Time-Series Data
    m = np.random.random((ntpts, nvoxs))
    m = norm_cols(m)
    # Correlation Data with Range 0-1
    mm = m.T.dot(m)  # note that need to generate connectivity matrix here

    # Execute
    #from CPAC.network_centrality.core import fast_eigenvector_centrality,slow_eigenvector_centrality

    ref = eigenvector_centrality(
        mm, verbose=False)  # we need to transform mm to be a distance
    comp = fast_eigenvector_centrality(m, verbose=False)

    diff = np.abs(ref - comp).mean()  # mean diff
    print(diff)

    ok_(diff < np.spacing(1e2))  # allow minimal difference
Esempio n. 7
0
def calc_centrality(datafile,
                    template,
                    method_option,
                    threshold_option,
                    threshold,
                    weight_options,
                    allocated_memory):
    '''
    Method to calculate centrality and map them to a nifti file
    
    Parameters
    ----------
    datafile : string (nifti file)
        path to subject data file
    template : string (nifti file)
        path to mask/parcellation unit
    method_option : integer
        0 - degree centrality calculation, 1 - eigenvector centrality calculation, 2 - lFCD calculation
    threshold_option : an integer
        0 for probability p_value, 1 for sparsity threshold, 
        2 for actual threshold value, and 3 for no threshold and fast approach
    threshold : a float
        pvalue/sparsity_threshold/threshold value
    weight_options : list (boolean)
        list of booleans, where, weight_options[0] corresponds to binary counting 
        and weight_options[1] corresponds to weighted counting (e.g. [True,False]) 
    allocated_memory : string
        amount of memory allocated to degree centrality
    
    Returns
    -------
    out_list : list
        list containing out mapped centrality images
    '''
    
    # Import packages
    from CPAC.network_centrality import load,\
                                        get_centrality_by_rvalue,\
                                        get_centrality_by_sparsity,\
                                        get_centrality_fast,\
                                        map_centrality_matrix,\
                                        calc_blocksize,\
                                        convert_pvalue_to_r
    from CPAC.cwas.subdist import norm_cols
    
    # Check for input errors
    if weight_options.count(True) == 0:
        raise Exception("Invalid values in weight options" \
                        "At least one True value is required")
    # If it's sparsity thresholding, check for (0,1]
    if threshold_option == 1:
        if threshold <= 0 or threshold > 1:
            raise Exception('Threshold value must be a positive number'\
                            'greater than 0 and less than or equal to 1.'\
                            '\nCurrently it is set at %d' % threshold)
    if method_option == 2 and threshold_option != 2:
        raise Exception('lFCD must use correlation-type thresholding.'\
                         'Check the pipline configuration has this setting')
    import time
    start = time.clock()
    
    # Init variables
    out_list = []
    ts, aff, mask, t_type, scans = load(datafile, template)
    
    # If we're doing eigenvectory centrality, need entire correlation matrix
    if method_option == 0 and threshold_option == 1:
        block_size = calc_blocksize(ts, memory_allocated=allocated_memory,
                                    sparsity_thresh=threshold)
    elif method_option == 1:
        block_size = calc_blocksize(ts, memory_allocated=allocated_memory,
                                    include_full_matrix=True)
    # Otherwise, compute blocksize with regards to available memory
    else:
        block_size = calc_blocksize(ts, memory_allocated=allocated_memory,
                                    include_full_matrix=False)
    # Normalize the timeseries for easy dot-product correlation calc.
    ts_normd = norm_cols(ts.T)
    
    # P-value threshold centrality
    if threshold_option == 0:
        r_value = convert_pvalue_to_r(scans, threshold)
        centrality_matrix = get_centrality_by_rvalue(ts_normd, 
                                                     mask, 
                                                     method_option, 
                                                     weight_options, 
                                                     r_value, 
                                                     block_size)
    # Sparsity threshold
    elif threshold_option == 1:
        centrality_matrix = get_centrality_by_sparsity(ts_normd, 
                                                       method_option, 
                                                       weight_options, 
                                                       threshold, 
                                                       block_size)
    # R-value threshold centrality
    elif threshold_option == 2:
        centrality_matrix = get_centrality_by_rvalue(ts_normd, 
                                                     mask, 
                                                     method_option, 
                                                     weight_options, 
                                                     threshold, 
                                                     block_size)
    # For fast approach (no thresholding)
    elif threshold_option == 3:
        centrality_matrix = get_centrality_fast(ts, method_option)
    # Otherwise, incorrect input for threshold_option
    else:
        raise Exception('Option must be between 0-3 and not %s, check your '\
                        'pipeline config file' % str(threshold_option))
    
    # Print timing info
    print 'Timing:', time.clock() - start
 
    # Map the arrays back to images
    for mat in centrality_matrix:
        centrality_image = map_centrality_matrix(mat, aff, mask, t_type)
        out_list.append(centrality_image)
    
    # Finally return
    return out_list
Esempio n. 8
0
def get_centrality_fast(timeseries,
                        method_options):
    '''
    Method to calculate degree and eigen vector centrality. 
    Relative to `get_centrality_opt`, it runs fast by not directly computing 
    the correlation matrix. As a consequence, there are several differences/
    limitations from the standard approach:
    
    1. Cannot specify a correlation threshold
    2. As a consequence, the weighted dense matrix centrality is computed
    3. No memory limit is specified since it is assumed to be ok
    
    Note that the approach doesn't directly calculate the complete correlation
    matrix.
    
    Parameters
    ----------
    timeseries_data : numpy array
        timeseries of the input subject
    method_options : string (list of boolean)
        list of two booleans for binarize and weighted options respectively
    
    Returns
    -------
    out_list : string (list of tuples)
        list of tuple containing output name to be used to store nifti image
        for centrality and centrality matrix. this will only be weighted since
        the fast approaches are limited to this type of output.
    
    Raises
    ------
    Exception
    '''
    
    # Import packages
    from CPAC.network_centrality import fast_degree_centrality,\
                                        fast_eigenvector_centrality
    from CPAC.cwas.subdist import norm_cols
    
    try:
        out_list    = []
        nvoxs       = timeseries.shape[0]
        ntpts       = timeseries.shape[1]
        
        # It's assumed that there is enough memory
        # So a block size isn't set here
        
        calc_degree  = method_options[0]
        calc_eigen   = method_options[1]
        
        print "Normalize Time-series"
        timeseries = norm_cols(timeseries.T)
        
        print "Computing centrality across %i voxels" % nvoxs
        
        if calc_degree:
            print "...calculating degree"
            degree_weighted = fast_degree_centrality(timeseries)
            out_list.append(('degree_centrality_weighted', degree_weighted))
        
        if calc_eigen:
            print "...calculating eigen"
            eigen_weighted = fast_eigenvector_centrality(timeseries)
            out_list.append(('eigenvector_centrality_weighted', eigen_weighted))
        
        return out_list   
    
    except Exception: 
        print "Error in calcuating centrality"
        raise
Esempio n. 9
0
ref_file = "/data/Projects/ABIDE_Initiative/CPAC/test_qp/Centrality_Working/resting_preproc_0051466_session_1/_mask_mask_abide_90percent_gm_4mm/_scan_rest_1_rest/resample_functional_to_template_0/tmp_binarize.nii.gz"
comp_file = "/data/Projects/ABIDE_Initiative/CPAC/test_qp/Centrality_Working/resting_preproc_0051466_session_1/network_centrality_0/_mask_mask_abide_90percent_gm_4mm/_scan_rest_1_rest/calculate_centrality/degree_centrality_binarize.nii.gz"

ref_img = nib.load(ref_file)
comp_img = nib.load(comp_file)

ref_data = ref_img.get_data()
comp_data = comp_img.get_data()

mask = nib.load(templ["mask"]).get_data().nonzero()
nvoxs = float(len(mask[0]))

diff_data = np.abs(ref_data[mask] - comp_data[mask])
w = diff_data.nonzero()

# We allow for some differences but the majority of voxels are assumed to be the same
ok_(w[0].shape[0] < 25)
ok_(diff_data[w].mean() <= 1)

from CPAC.cwas.subdist import norm_cols, ncor

ts_img = nib.load(templ['input'])
ts_data = ts_img.get_data()
ts = ts_data[mask]
norm_ts = norm_cols(ts.T)
corr_matrix = np.nan_to_num(ts.T.dot(ts))

binarize = np.sum(corr_matrix > templ['thresh'], axis=1)
binarize[binarize != 0] = binarize[binarize != 0] - 1
Esempio n. 10
0
ref_img = nib.load(ref_file)
comp_img = nib.load(comp_file)

ref_data = ref_img.get_data()
comp_data = comp_img.get_data()

mask = nib.load(templ["mask"]).get_data().nonzero()
nvoxs = float(len(mask[0]))

diff_data = np.abs(ref_data[mask] - comp_data[mask])
w = diff_data.nonzero()

# We allow for some differences but the majority of voxels are assumed to be the same
ok_(w[0].shape[0] < 25)
ok_(diff_data[w].mean() <= 1)




from CPAC.cwas.subdist import norm_cols, ncor

ts_img = nib.load(templ['input'])
ts_data = ts_img.get_data()
ts = ts_data[mask]
norm_ts = norm_cols(ts.T)
corr_matrix = np.nan_to_num(ts.T.dot(ts))

binarize = np.sum(corr_matrix > templ['thresh'], axis=1)
binarize[binarize!=0] = binarize[binarize!=0] - 1
Esempio n. 11
0
def calc_centrality(in_file, template, method_option, threshold_option,
                    threshold, allocated_memory):
    '''
    Function to calculate centrality and map them to a nifti file
    
    Parameters
    ----------
    in_file : string (nifti file)
        path to subject data file
    template : string (nifti file)
        path to mask/parcellation unit
    method_option : string
        accepted values are 'degree centrality', 'eigenvector centrality', and
        'lfcd'
    threshold_option : string
        accepted values are: 'significance', 'sparsity', and 'correlation'
    threshold : float
        pvalue/sparsity_threshold/threshold value
    allocated_memory : string
        amount of memory allocated to degree centrality
    
    Returns
    -------
    out_list : list
        list containing out mapped centrality images
    '''

    # Import packages
    from CPAC.network_centrality import load,\
                                        get_centrality_by_rvalue,\
                                        get_centrality_by_sparsity,\
                                        get_centrality_fast,\
                                        map_centrality_matrix,\
                                        calc_blocksize,\
                                        convert_pvalue_to_r
    from CPAC.network_centrality.utils import check_centrality_params
    from CPAC.cwas.subdist import norm_cols

    # First check input parameters and get proper formatted method/thr options
    method_option, threshold_option = \
        check_centrality_params(method_option, threshold_option, threshold)

    # Init variables
    out_list = []
    ts, aff, mask, t_type, scans = load(in_file, template)

    # If we're doing degree sparsity
    if method_option == 'degree' and threshold_option == 'sparsity':
        block_size = calc_blocksize(ts, memory_allocated=allocated_memory,
                                    sparsity_thresh=threshold)
    # Otherwise
    elif method_option == 'eigenvector':
        block_size = calc_blocksize(ts, memory_allocated=allocated_memory,
                                    include_full_matrix=True)
    # Otherwise, compute blocksize with regards to available memory
    else:
        block_size = calc_blocksize(ts, memory_allocated=allocated_memory,
                                    include_full_matrix=False)
    # Normalize the timeseries for easy dot-product correlation calc.
    ts_normd = norm_cols(ts.T)

    # P-value threshold centrality
    if threshold_option == 'significance':
        r_value = convert_pvalue_to_r(in_file, threshold, two_tailed=False)
        centrality_matrix = get_centrality_by_rvalue(ts_normd,
                                                     mask,
                                                     method_option,
                                                     r_value,
                                                     block_size)
    # Sparsity threshold
    elif threshold_option == 'sparsity':
        centrality_matrix = get_centrality_by_sparsity(ts_normd,
                                                       method_option,
                                                       threshold,
                                                       block_size)
    # R-value threshold centrality
    elif threshold_option == 'correlation':
        centrality_matrix = get_centrality_by_rvalue(ts_normd,
                                                     mask,
                                                     method_option,
                                                     threshold,
                                                     block_size)
    # For fast approach (no thresholding)
    elif threshold_option == 3:
        centrality_matrix = get_centrality_fast(ts, method_option)
    # Otherwise, incorrect input for threshold_option
    else:
        err_msg = 'Threshold option: %s not supported for network centrality '\
                  'measure: %s; fix this in the pipeline config'\
                  % (str(threshold_option), str(method_option))
        raise Exception(err_msg)
 
    # Map the arrays back to images
    for mat in centrality_matrix:
        centrality_image = map_centrality_matrix(mat, aff, mask, t_type)
        out_list.append(centrality_image)

    # Finally return
    return out_list
Esempio n. 12
0
def get_centrality_by_thresh(timeseries,
                             template,
                             method_option,
                             weight_options,
                             threshold,
                             r_value,
                             memory_allocated):
    """
    Method to calculate degree and eigen vector centrality. 
    This method takes into consideration the amount of memory
    allocated by the user to calculate degree centrality.
    
    Parameters
    ----------
    timeseries_data : numpy array
        timeseries of the input subject
    template : numpy array
        Mask/ROI template for timeseries of subject
    method_option : integer
        0 - degree centrality calculation, 1 - eigenvector centrality calculation, 2 - lFCD calculation
    weight_options : string (list of boolean)
        list of two booleans for binarize and weighted options respectively
    threshold : float
        p-value threshold for the correlation values (ignored if the r_value option is specified)
    r_value : float
        threshold value in terms of the correlation (this will override the threshold option)
    memory_allocated : a string
        amount of memory allocated to degree centrality
        
    Returns
    -------
    out_list : string (list of tuples)
        list of tuple containing output name to be used to store nifti image
        for centrality and centrality matrix 
    
    Raises
    ------
    Exception
    """
    
    
    import numpy as np
    import os
    from CPAC.network_centrality import calc_blocksize,\
                                        cluster_data,\
                                        convert_pvalue_to_r,\
                                        degree_centrality,\
                                        eigenvector_centrality
    from CPAC.cwas.subdist import norm_cols
    
    try:                         
        # Init variables for use
        out_list = []
        nvoxs = timeseries.shape[0]
        ntpts = timeseries.shape[1]
        
        r_matrix = None             # init correlation matrix
        calc_degree = False         # init degree measure flag to false
        calc_eigen = False          # init eigen measure flag to false
        calc_lfcd= False            # init lFCD measure flag to false
        
        # Select which method we're going to perform
        if method_option == 0:
            calc_degree = True
        elif method_option == 1:
            calc_eigen = True
        elif method_option == 2:
            calc_lfcd = True
        
        # Set weighting parameters
        out_binarize = weight_options[0]
        out_weighted = weight_options[1]
        
        # Calculate the block size (i.e., number of voxels) to compute part of the
        # connectivity matrix at once.
        if calc_eigen:
            # We still use a block size to calculate the whole correlation matrix
            # because of issues in numpy that lead to extra memory usage when
            # computing the dot product.
            # See https://cmi.hackpad.com/Numpy-Memory-Issues-BlV9Pg5nRDM.
            block_size = calc_blocksize(timeseries, memory_allocated, include_full_matrix=True)
        else:
            block_size = calc_blocksize(timeseries, memory_allocated)
        
        if r_value == None:
            print "Calculating threshold"
            r_value = convert_pvalue_to_r(ntpts, threshold)
            print "...%s -> %s" % (threshold, r_value)
        
        print "Setup Intermediates/Outputs"
        # Degree matrix init
        if calc_degree:
            print "...degree"
            if out_binarize:
                degree_binarize = np.zeros(nvoxs, dtype=timeseries.dtype)
                out_list.append(('degree_centrality_binarize', degree_binarize))
            if out_weighted:
                degree_weighted = np.zeros(nvoxs, dtype=timeseries.dtype)
                out_list.append(('degree_centrality_weighted', degree_weighted))
        # Eigen matrix init
        if calc_eigen:
            print "...eigen"
            r_matrix = np.zeros((nvoxs, nvoxs), dtype=timeseries.dtype)
            if out_binarize:
                eigen_binarize = np.zeros(nvoxs, dtype=timeseries.dtype)
                out_list.append(('eigenvector_centrality_binarize', eigen_binarize))
            if out_weighted:
                eigen_weighted = np.zeros(nvoxs, dtype=timeseries.dtype)
                out_list.append(('eigenvector_centrality_weighted', eigen_weighted))
        # lFCD matrix init
        if calc_lfcd:
            print "...degree"
            if out_binarize:
                lfcd_binarize = np.zeros(nvoxs, dtype=timeseries.dtype)
                out_list.append(('lFCD_binarize', lfcd_binarize))
            if out_weighted:
                lfcd_weighted = np.zeros(nvoxs, dtype=timeseries.dtype)
                out_list.append(('lFCD_weighted', lfcd_weighted))
        
        # Normalize the timeseries columns for simple correlation calc via dot product later..
        print "Normalize TimeSeries"
        timeseries = norm_cols(timeseries.T)
        
        # Init blocking indices for correlation matrix calculation
        print "Computing centrality across %i voxels" % nvoxs
        i = block_size
        j = 0
        # Calculate correlation matrix in blocks while loop
        while i <= nvoxs:
            print "running block ->", i, j
           
            try:
                print "...correlating"
                corr_matrix = np.dot(timeseries[:,j:i].T, timeseries)
            except:
                raise Exception("Error in calcuating block wise correlation for the block %i,%i"%(j,i))
                      
            if calc_eigen:
                print "...storing correlation matrix"
                r_matrix[j:i] = corr_matrix
            
            if calc_degree:
                if out_binarize:
                    print "...calculating binarize degree"
                    degree_centrality(corr_matrix, r_value, method="binarize", out=degree_binarize[j:i])
                if out_weighted:
                    print "...calculating weighted degree"
                    degree_centrality(corr_matrix, r_value, method="weighted", out=degree_weighted[j:i])
            
            if calc_lfcd:
                xyz_a = np.argwhere(template)
                krange = corr_matrix.shape[0]
                print "...iterating through seeds in block - lfcd"
                for k in range (0,krange):
                    corr_seed = corr_matrix[k,:]
                    labels = cluster_data(corr_seed,r_value,xyz_a)
                    seed_label = labels[j+k]
                    if out_binarize:
                        if seed_label > 0:
                            lfcd = np.sum(labels==seed_label)
                        else:
                            lfcd = 1
                        lfcd_binarize[j+k] = lfcd
                    if out_weighted:
                        if seed_label > 0:
                            lfcd = np.sum(corr_seed*(labels==seed_label))
                        else:
                            lfcd = 1
                        lfcd_weighted[j+k] = lfcd
                            
            print "...removing temporary correlation matrix"
            del corr_matrix
           
            j = i
            if i == nvoxs:
                break
            elif (i+block_size) > nvoxs:
                i = nvoxs
            else:
                i += block_size
        
        # In case there are any zeros in lfcd matrix, set them to 1
        if calc_lfcd:
            if out_binarize:
                lfcd_binarize[np.argwhere(lfcd_binarize == 0)] = 1
            if out_weighted:
                lfcd_weighted[np.argwhere(lfcd_weighted == 0)] = 1
        
        # Perform eigenvector measures if necessary
        try:
            if calc_eigen:
                if out_binarize:
                    print "...calculating binarize eigenvector"
                    eigen_binarize[:] = eigenvector_centrality(r_matrix, r_value, method="binarize").squeeze()
                if out_weighted:
                    print "...calculating weighted eigenvector"
                    eigen_weighted[:] = eigenvector_centrality(r_matrix, r_value, method="weighted").squeeze()
        except Exception:
            print "Error in calcuating eigen vector centrality"
            raise
        
        if calc_degree:
            print "...removing effect of auto-correlation on degree"
            degree_binarize[degree_binarize!=0] = degree_binarize[degree_binarize!=0] - 1
            degree_weighted[degree_weighted!=0] = degree_weighted[degree_weighted!=0] - 1
        
        return out_list
    
    except Exception: 
        print "Error in calcuating Centrality"
        raise
Esempio n. 13
0
def get_centrality_by_sparsity(timeseries, 
                   method_option,
                   weight_options,
                   threshold,
                   memory_allocated):
    
    """
    Method to calculate degree and eigen vector centrality
    
    Parameters
    ----------
    timeseries : numpy array
        timeseries of the input subject
    method_options : string (list of boolean)
        list of two booleans for degree and eigen options respectively
    weight_options : string (list of boolean)
        list of two booleans for binarize and weighted options respectively
    threshold : float
        sparsity threshold for the correlation values
    memory_allocated : a string
        amount of memory allocated to degree centrality
    
    Returns
    -------
    out_list : string (list of tuples)
        list of tuple containing output name to be used to store nifti image
        for centrality and centrality matrix
    
    Raises
    ------
    Exception
    """
    
    import os
    import numpy as np
    from CPAC.network_centrality import calc_blocksize,\
                                        convert_sparsity_to_r,\
                                        degree_centrality,\
                                        eigenvector_centrality
    from CPAC.cwas.subdist import norm_cols
    
    out_list=[]
    
    try:
        # Calculate the block size (i.e., number of voxels) to compute part of the
        # connectivity matrix at once.
        # 
        # We still use a block size to calculate the whole correlation matrix
        # because of issues in numpy that lead to extra memory usage when
        # computing the dot product.
        # See https://cmi.hackpad.com/Numpy-Memory-Issues-BlV9Pg5nRDM.
        block_size  = calc_blocksize(timeseries, memory_allocated, include_full_matrix=True)
        
        nvoxs = timeseries.shape[0]
        ntpts = timeseries.shape[1]
        
        calc_degree = False         # init degree measure flag to false
        calc_eigen = False          # init eigen measure flag to false
        calc_lfcd= False            # init lFCD measure flag to false
        
        # Select which method we're going to perform
        if method_option == 0:
            calc_degree = True
        elif method_option == 1:
            calc_eigen = True
        elif method_option == 2:
            calc_lfcd = True
        
        # Set weighting parameters
        out_binarize = weight_options[0]
        out_weighted = weight_options[1]
        
        corr_matrix = np.zeros((nvoxs, nvoxs), dtype = timeseries.dtype)
        
        
        print "Normalize TimeSeries"
        timeseries  = norm_cols(timeseries.T)
        
        
        print "Computing centrality across %i voxels" % nvoxs
        j = 0
        i = block_size
        while i <= timeseries.shape[1]:
            print "running block ->", i,j
            
            print "...correlating"
            np.dot(timeseries[:,j:i].T, timeseries, out=corr_matrix[j:i])
            
            j = i
            if i == nvoxs:
                break
            elif (i+block_size) > nvoxs:
                i = nvoxs
            else:
                i += block_size
        
        
        print "Calculating threshold"
        r_value = convert_sparsity_to_r(corr_matrix, threshold, full_matrix = True)
        print "r_value ->", r_value
        
        
        if calc_degree:
            if out_binarize:
                print "...calculating binarize degree"
                degree_binarize = degree_centrality(corr_matrix, r_value, method="binarize")
                out_list.append(('degree_centrality_binarize', degree_binarize))
            if out_weighted:
                print "...calculating weighted degree"
                degree_weighted = degree_centrality(corr_matrix, r_value, method="weighted")
                out_list.append(('degree_centrality_weighted', degree_weighted))
        
        
        if calc_eigen:
            if out_binarize:
                print "...calculating binarize eigenvector"
                eigen_binarize = eigenvector_centrality(corr_matrix, r_value, method="binarize")
                out_list.append(('eigenvector_centrality_binarize', eigen_binarize))
            if out_weighted:
                print "...calculating weighted eigenvector"
                eigen_weighted = eigenvector_centrality(corr_matrix, r_value, method="weighted")
                out_list.append(('eigenvector_centrality_weighted', eigen_weighted))            
            
    except Exception:
        print "Error while calculating centrality"
        raise
    
    return out_list