def setup_class(self):
        basedir = os.path.split(os.path.abspath(__file__))[0]
        fname_anscombe = os.path.join(basedir, 'data/anscombe.csv')
        #a, b, solution, decimal
        self.ts = {}
        anscomb = np.genfromtxt(
            fname_anscombe, delimiter=',',
            names=True)  #columns are x1, .., x4, y1, .., y4
        for ii in range(1, 5):
            anscombe_xy = [anscomb['x%s' % ii], anscomb['y%s' % ii]]
            stand_xy = map(lambda s: standardize(s), anscombe_xy)
            sol = [.8165, 3]
            self.ts['anscombe%s' % ii] = anscombe_xy + sol
            self.ts['anscombe_stand%s' % ii] = stand_xy + sol

        a = np.arange(11)
        self.ts['linear'] = [a, a * 2 + 10, 1]
        #make 3 dim array
        test = ['anscombe%s' % ii for ii in range(1, 5)] + ['linear']
        #fix for order issues with dict
        self.a_3dim = np.array(
            [v[0] for k, v in self.ts.iteritems() if k in test])
        self.b_3dim = np.array(
            [v[1] for k, v in self.ts.iteritems() if k in test])
        self.corrs_3dim = np.array(
            [v[2] for k, v in self.ts.iteritems() if k in test])
示例#2
0
def crosscor_full(A, B=None, nan_thresh=None):
    """From data (dims sub x seg x vox) calculate sub against others pattern cor

    Parameters:
        A: sub x seg x vox matrix
        B: (optional) seg x vox matrix

    Returns:
        seg x seg correlation matrix
    """
    # standardize all along last dim, so don't need to in correlation
    A = standardize(A)

    all_cors = []
    # Within group correlations
    if B is None:
        others = sum_tc(A)
        for sub in A:
            # check for nan
            to_remove = np.any(np.isnan(sub), axis=0)
            if np.any(to_remove):
                tmp_sub = sub[..., to_remove]
                tmp_others = others[..., to_remove]
            else:
                tmp_sub, tmp_others = sub, others
            # cross correlate (1 x seg x seg)
            if nan_thresh is not None and to_remove.mean() > nan_thresh:
                cormat = np.empty(sub.shape[0:1] * 2) * np.nan
            else:
                cormat = crosscor(tmp_sub,
                                  standardize(tmp_others - tmp_sub),
                                  standardized=True)
            all_cors.append(cormat)
        return np.array(all_cors)
    # Between group correlations
    else:
        B = standardize(B)
        for sub in A:
            cormat = crosscor(sub, B, standardized=True)
            all_cors.append(cormat)
        return np.array(all_cors)
示例#3
0
def load_mvpa(all_fnames, TRs, bad_vox, offset_TR, collapse=True):
    """Return matrix of shape (sub x seg x vox)

    Parameters:
        all_fnames:     names of nifti files to load for sub dimension
        TRs:            dataframe with cond column and order column
        bad_vox:        mask with true for voxels to be discarded
        offset_TR:      TRs to shift timecourses before subsetting (to take into account lag, etc..)
        collapse:       whether to take mean along last axis

    Notes:
        If collapse is False, then sub and seg dims are lists.

    """
    subs_list = []
    for fname in all_fnames:
        # Load Array
        if type(fname) is str:
            subname, ext = os.path.splitext(os.path.split(fname)[-1])
            #subkey = "_".join(subname.split('_')[:2])
            arr = np.load(fname)
        else:
            arr = fname

        # Standardize, make sure no NaNs
        arr = standardize(arr)[~bad_vox if bad_vox is not None else
                               Ellipsis]  #TODO write more clearly
        #arr = arr[ np.isnan(arr).sum(axis=-1) == 0]     #remove any nans (from no var?)

        # Get individual segments
        # since it sorts in order of columns, will be sorted by cond first, then order
        segs_list = []
        cond_list = []
        # TODO remove hard coded conditions
        for ii, g in TRs.query("cond in ['Slumlord', 'Overview']").groupby(
            ['cond', 'order']):
            #print ii
            cond_list.append(ii)
            segarr = subset_from_TRs(arr, g, offset=offset_TR)
            segs_list.append(segarr)
        # optionally collapse time dimension
        if collapse:
            mat = np.vstack([seg.mean(axis=1) for seg in segs_list])
            subs_list.append(mat)
        else:
            subs_list.append(segs_list)
        #print cond_list

    M = np.array(
        subs_list
    ) if collapse else subs_list  # Make field array, with sub names?
    return M
示例#4
0
def crosscor_full(A, B=None, nan_thresh=None):
    """From data (dims sub x seg x vox) calculate sub against others pattern cor

    Parameters:
        A: sub x seg x vox matrix
        B: (optional) seg x vox matrix

    Returns:
        seg x seg correlation matrix
    """
    # standardize all along last dim, so don't need to in correlation
    A = standardize(A)

    all_cors = []
    # Within group correlations
    if B is None:
        others = sum_tc(A)
        for sub in A:
            # check for nan
            to_remove = np.any(np.isnan(sub), axis=0)
            if np.any(to_remove):
                tmp_sub = sub[...,to_remove]
                tmp_others = others[..., to_remove]
            else:
                tmp_sub, tmp_others = sub, others
            # cross correlate (1 x seg x seg)
            if nan_thresh is not None and to_remove.mean() > nan_thresh:
                cormat = np.empty(sub.shape[0:1]*2) * np.nan
            else:
                cormat = crosscor(tmp_sub, standardize(tmp_others - tmp_sub), standardized=True)
            all_cors.append(cormat)
        return np.array(all_cors)
    # Between group correlations
    else:
        B = standardize(B)
        for sub in A:
            cormat = crosscor(sub, B, standardized=True)
            all_cors.append(cormat)
        return np.array(all_cors)
示例#5
0
def load_mvpa(all_fnames, TRs, bad_vox, offset_TR, collapse=True):
    """Return matrix of shape (sub x seg x vox)

    Parameters:
        all_fnames:     names of nifti files to load for sub dimension
        TRs:            dataframe with cond column and order column
        bad_vox:        mask with true for voxels to be discarded
        offset_TR:      TRs to shift timecourses before subsetting (to take into account lag, etc..)
        collapse:       whether to take mean along last axis

    Notes:
        If collapse is False, then sub and seg dims are lists.

    """
    subs_list = []
    for fname in all_fnames:
        # Load Array
        if type(fname) is str:
            subname, ext = os.path.splitext(os.path.split(fname)[-1])
            #subkey = "_".join(subname.split('_')[:2])
            arr = np.load(fname)
        else: arr = fname

        # Standardize, make sure no NaNs
        arr = standardize(arr)[~bad_vox if bad_vox is not None else Ellipsis] #TODO write more clearly
        #arr = arr[ np.isnan(arr).sum(axis=-1) == 0]     #remove any nans (from no var?)

        # Get individual segments
        # since it sorts in order of columns, will be sorted by cond first, then order
        segs_list = []
        cond_list = []
        # TODO remove hard coded conditions
        for ii, g in TRs.query("cond in ['Slumlord', 'Overview']").groupby(['cond', 'order']):
            #print ii
            cond_list.append(ii)
            segarr = subset_from_TRs(arr, g, offset=offset_TR)
            segs_list.append(segarr)
        # optionally collapse time dimension
        if collapse:
            mat = np.vstack([seg.mean(axis=1) for seg in segs_list])
            subs_list.append(mat)
        else:
            subs_list.append(segs_list)
        #print cond_list

    M = np.array(subs_list) if collapse else subs_list # Make field array, with sub names?
    return M
示例#6
0
    def setup_class(self): 
        basedir = os.path.split(os.path.abspath(__file__))[0]
        fname_anscombe = os.path.join(basedir, 'data/anscombe.csv')
        #a, b, solution, decimal
        self.ts = {}
        anscomb = np.genfromtxt(fname_anscombe, delimiter=',', names=True)   #columns are x1, .., x4, y1, .., y4
        for ii in range(1,5):
            anscombe_xy = [anscomb['x%s'%ii], anscomb['y%s'%ii]]
            stand_xy = map(lambda s: standardize(s), anscombe_xy)
            sol = [.8165, 3]
            self.ts['anscombe%s'%ii] = anscombe_xy + sol
            self.ts['anscombe_stand%s'%ii] = stand_xy + sol

        a = np.arange(11)
        self.ts['linear'] = [a, a*2 + 10, 1]
        #make 3 dim array
        test = ['anscombe%s'%ii for ii in range(1,5)] + ['linear']
        #fix for order issues with dict
        self.a_3dim = np.array([v[0] for k, v in self.ts.iteritems() if k in test])
        self.b_3dim = np.array([v[1] for k, v in self.ts.iteritems() if k in test])
        self.corrs_3dim = np.array([v[2] for k, v in self.ts.iteritems() if k in test])
示例#7
0
 def test_python_list(self):                         #can take python array
     el = list(self.S1)
     a = [el, el]
     M = np.array([self.S1, self.S1])
     assert_almost_equal(standardize(a), standardize(M))
 def test_python_list(self):  #can take python array
     el = list(self.S1)
     a = [el, el]
     M = np.array([self.S1, self.S1])
     assert_almost_equal(standardize(a), standardize(M))
 def test_axis_arg(self):  #axis arg returns arr with expected shape
     tmp_M = np.array([np.ones(3), np.zeros(3)])
     tmp_stand = standardize(tmp_M, axis=0)
     assert_almost_equal(tmp_stand.shape, tmp_M.shape)
示例#10
0
 def test_inplace_equals_copy(self):  #equal results for copy and inplace
     S1_inplace = standardize(self.S1, inplace=True)
     assert_almost_equal(standardize(self.S1), S1_inplace)
示例#11
0
 def test_standardize_out_equal_standardize_func(self):
     summed = sum_tc(self.a, standardize_out=True)
     assert_almost_equal(summed, standardize(summed))
示例#12
0
 def test_standardize_out_equal_standardize_func(self):
     summed = sum_tc(self.a, standardize_out=True)
     assert_almost_equal(summed, standardize(summed))
示例#13
0
 def test_returns_inplace_arg(self):                  #can work inplace
     tmp_S1  = self.S1.copy()
     assert tmp_S1 is standardize(tmp_S1, inplace=True)
示例#14
0
 def test_returns_copy_by_default(self):              #returns copy by default
     assert self.S1 is not standardize(self.S1)
示例#15
0
# 1) operating on correlation matrix
# 2) subtract each sub from summed tc and take correlation
# 3) correlate with summed tc, use correction given by Wherry

from nose import with_setup
import numpy as np
from numpy.testing import assert_almost_equal
from pycorr.funcs_correlate import standardize, corsubs, crosscor, intersubcorr

np.random.seed(10)

dims = (2,2, 10)
nsubs = 3
subs = [np.random.random(dims) for ii in range(nsubs)]
for M in subs: M[0,0] = range(dims[-1])   #0,0 is 1:N
for M in subs: standardize(M, inplace=True)
subs[0][1,1] = np.NAN                     #1,1 sub 0 has a NaN timecourse

C_all = crosscor(subs, standardized=True)
C_all[1,1,0] = np.NAN
isc1 = intersubcorr(C_all)

M_ttl = np.nansum(subs, axis=0)
isc2 = np.array([corsubs(M, M_ttl-M) for M in subs]).transpose([1,2,0])

isc3_list = []
for M in subs:
    r_all = corsubs(M, M_ttl)
    s_all = np.std(M_ttl, axis=-1, ddof=1)
    s_i = np.std(M, axis=-1, ddof=1)
    M_cors = (r_all*s_all - s_i) / \
示例#16
0
 def test_returns_copy_by_default(self):  #returns copy by default
     assert self.S1 is not standardize(self.S1)
示例#17
0
 def test_returns_inplace_arg(self):  #can work inplace
     tmp_S1 = self.S1.copy()
     assert tmp_S1 is standardize(tmp_S1, inplace=True)
示例#18
0
 def test_standardize_in_equal_standardize_func(self):
     stand_a = standardize(self.a)
     assert_almost_equal(sum_tc(self.a, standardize_subs=True),
                         sum_tc(stand_a, standardize_subs=False))
示例#19
0
 def test_standardize_in_equal_standardize_func(self):
     stand_a = standardize(self.a)
     assert_almost_equal(sum_tc(self.a, standardize_subs=True),
                         sum_tc(stand_a, standardize_subs=False))
示例#20
0
 def test_inplace_equals_copy(self):                  #equal results for copy and inplace
     S1_inplace = standardize(self.S1, inplace=True)
     assert_almost_equal(standardize(self.S1), S1_inplace)
示例#21
0
 def test_inplace_int(self):  #int arrays never operate inplace
     tmp_S1 = self.S1.astype(int)
     assert tmp_S1 is not standardize(tmp_S1, inplace=True)
     assert_almost_equal(standardize(tmp_S1),
                         standardize(tmp_S1, inplace=True))
示例#22
0
 def test_inplace_int(self):                          #int arrays never operate inplace
     tmp_S1 = self.S1.astype(int)
     assert tmp_S1 is not standardize(tmp_S1, inplace=True)
     assert_almost_equal(standardize(tmp_S1), standardize(tmp_S1, inplace=True))
示例#23
0
 def test_no_demean(self):
     print self.S1
     print standardize(self.S1, demean=False)
     assert not np.allclose(standardize(self.S1, demean=False).mean(), 0)
示例#24
0
 def test_axis_arg(self):                             #axis arg returns arr with expected shape
     tmp_M = np.array([np.ones(3), np.zeros(3)])
     tmp_stand = standardize(tmp_M, axis=0)
     assert_almost_equal(tmp_stand.shape, tmp_M.shape)
示例#25
0
 def test_no_demean(self):
     print self.S1
     print standardize(self.S1, demean=False)
     assert not np.allclose(standardize(self.S1, demean=False).mean(), 0)
示例#26
0
# 3) correlate with summed tc, use correction given by Wherry

from nose import with_setup
import numpy as np
from numpy.testing import assert_almost_equal
from pycorr.funcs_correlate import standardize, corsubs, crosscor, intersubcorr

np.random.seed(10)

dims = (2, 2, 10)
nsubs = 3
subs = [np.random.random(dims) for ii in range(nsubs)]
for M in subs:
    M[0, 0] = range(dims[-1])  #0,0 is 1:N
for M in subs:
    standardize(M, inplace=True)
subs[0][1, 1] = np.NAN  #1,1 sub 0 has a NaN timecourse

C_all = crosscor(subs, standardized=True)
C_all[1, 1, 0] = np.NAN
isc1 = intersubcorr(C_all)

M_ttl = np.nansum(subs, axis=0)
isc2 = np.array([corsubs(M, M_ttl - M) for M in subs]).transpose([1, 2, 0])

isc3_list = []
for M in subs:
    r_all = corsubs(M, M_ttl)
    s_all = np.std(M_ttl, axis=-1, ddof=1)
    s_i = np.std(M, axis=-1, ddof=1)
    M_cors = (r_all*s_all - s_i) / \