示例#1
0
def test_t_contrast_add():
    # new API
    n, p, q = 100, 80, 10
    X, Y = np.random.randn(p, q), np.random.randn(p, n)
    lab, res = session_glm(Y, X, 'ols')
    c1, c2 = np.eye(q)[0], np.eye(q)[1]
    con = compute_contrast(lab, res, c1) + compute_contrast(lab, res, c2)
    z_vals = con.z_score()
    assert_almost_equal(z_vals.mean(), 0, 0)
    assert_almost_equal(z_vals.std(), 1, 0)
示例#2
0
def test_t_contrast_add():
    # new API
    n, p, q = 100, 80, 10
    X, Y = np.random.randn(p, q), np.random.randn(p, n)
    lab, res = session_glm(Y, X, 'ols')
    c1, c2 = np.eye(q)[0], np.eye(q)[1]
    con = compute_contrast(lab, res, c1) + compute_contrast(lab, res, c2)
    z_vals = con.z_score()
    assert_almost_equal(z_vals.mean(), 0, 0)
    assert_almost_equal(z_vals.std(), 1, 0)
示例#3
0
def association_glm(var1, var2):
    # we assume that the var1 is a numerical variable already encoded

    # Encode variables
    x = var2

    # Sanity check
    if (x == 0).sum() != x.shape[0]:

        # Normalize
        x = (x - x.mean()) / x.std()
        y = (var1 - var1.mean()) / var1.std()
        # print y.shape,x.shape
        # print x
        # GLM
        contrast = [0, 1]
        x_ = np.vstack((np.ones_like(x), x)).T
        labels, regression_result = nsglm.session_glm(y[:, np.newaxis], x_)
        cont_results = nsglm.compute_contrast(labels,
                                              regression_result,
                                              contrast,
                                              contrast_type='t')
        pval = cont_results.p_value()[0]
        return cont_results.stat()[0], cont_results.p_value()[0][0][0]

    else:
        print '### Error nothing to regress ###'
        return np.NAN, np.NAN
示例#4
0
def test_F_contrast_add():
    # new API
    n, p, q = 100, 80, 10
    X, Y = np.random.randn(p, q), np.random.randn(p, n)
    lab, res = session_glm(Y, X, 'ar1')
    c1, c2 = np.eye(q)[:2], np.eye(q)[2:4]
    con = compute_contrast(lab, res, c1) + compute_contrast(lab, res, c2)
    z_vals = con.z_score()
    assert_almost_equal(z_vals.mean(), 0, 0)
    assert_almost_equal(z_vals.std(), 1, 0)

    # first test with dependent contrast
    con1 = compute_contrast(lab, res, c1)
    con2 = compute_contrast(lab, res, c1) + compute_contrast(lab, res, c1)
    assert_almost_equal(con1.effect * 2, con2.effect)
    assert_almost_equal(con1.variance * 2, con2.variance)
    assert_almost_equal(con1.stat() * 2, con2.stat())
示例#5
0
def test_F_contrast_add():
    # new API
    n, p, q = 100, 80, 10
    X, Y = np.random.randn(p, q), np.random.randn(p, n)
    lab, res = session_glm(Y, X, 'ar1')
    c1, c2 = np.eye(q)[:2], np.eye(q)[2:4]
    con = compute_contrast(lab, res, c1) + compute_contrast(lab, res, c2)
    z_vals = con.z_score()
    assert_almost_equal(z_vals.mean(), 0, 0)
    assert_almost_equal(z_vals.std(), 1, 0)

    # first test with dependent contrast
    con1 = compute_contrast(lab, res, c1)
    con2 = compute_contrast(lab, res, c1) + compute_contrast(lab, res, c1)
    assert_almost_equal(con1.effect * 2, con2.effect)
    assert_almost_equal(con1.variance * 2, con2.variance)
    assert_almost_equal(con1.stat() * 2, con2.stat())
示例#6
0
def test_contrast_values():
    # new API
    # but this test is circular and should be removed
    n, p, q = 100, 80, 10
    X, Y = np.random.randn(p, q), np.random.randn(p, n)
    lab, res = session_glm(Y, X, 'ar1', bins=1)
    # t test
    cval = np.eye(q)[0]
    con = compute_contrast(lab, res, cval)
    t_ref = list(res.values())[0].Tcontrast(cval).t
    assert_almost_equal(np.ravel(con.stat()), t_ref)
    # F test
    cval = np.eye(q)[:3]
    con = compute_contrast(lab, res, cval)
    F_ref = list(res.values())[0].Fcontrast(cval).F
    # Note that the values are not strictly equal,
    # this seems to be related to a bug in Mahalanobis
    assert_almost_equal(np.ravel(con.stat()), F_ref, 3)
示例#7
0
def test_Tcontrast():
    # new API
    n, p, q = 100, 80, 10
    X, Y = np.random.randn(p, q), np.random.randn(p, n)
    labels, results = session_glm(Y, X, 'ar1')
    con_val = np.eye(q)[0]
    z_vals = compute_contrast(labels, results, con_val).z_score()
    assert_almost_equal(z_vals.mean(), 0, 0)
    assert_almost_equal(z_vals.std(), 1, 0)
示例#8
0
def test_contrast_values():
    # new API
    # but this test is circular and should be removed
    n, p, q = 100, 80, 10
    X, Y = np.random.randn(p, q), np.random.randn(p, n)
    lab, res = session_glm(Y, X, 'ar1', bins=1)
    # t test
    cval = np.eye(q)[0]
    con = compute_contrast(lab, res, cval)
    t_ref = list(res.values())[0].Tcontrast(cval).t
    assert_almost_equal(np.ravel(con.stat()), t_ref)
    # F test
    cval = np.eye(q)[:3]
    con = compute_contrast(lab, res, cval)
    F_ref = list(res.values())[0].Fcontrast(cval).F
    # Note that the values are not strictly equal,
    # this seems to be related to a bug in Mahalanobis
    assert_almost_equal(np.ravel(con.stat()), F_ref, 3)
示例#9
0
def test_Tcontrast():
    # new API
    n, p, q = 100, 80, 10
    X, Y = np.random.randn(p, q), np.random.randn(p, n)
    labels, results = session_glm(Y, X, 'ar1')
    con_val = np.eye(q)[0]
    z_vals = compute_contrast(labels, results, con_val).z_score()
    assert_almost_equal(z_vals.mean(), 0, 0)
    assert_almost_equal(z_vals.std(), 1, 0)
示例#10
0
def test_Fcontrast():
    # new API
    n, p, q = 100, 80, 10
    X, Y = np.random.randn(p, q), np.random.randn(p, n)
    for model in ['ols', 'ar1']:
        labels, results = session_glm(Y, X, model)
        for con_val in [np.eye(q)[0], np.eye(q)[:3]]:
            z_vals = compute_contrast(
                labels, results, con_val, contrast_type='F').z_score()
            assert_almost_equal(z_vals.mean(), 0, 0)
            assert_almost_equal(z_vals.std(), 1, 0)
示例#11
0
def test_contrast_mul():
    # new API
    n, p, q = 100, 80, 10
    X, Y = np.random.randn(p, q), np.random.randn(p, n)
    lab, res = session_glm(Y, X, 'ar1')
    for c1 in [np.eye(q)[0], np.eye(q)[:3]]:
        con1 = compute_contrast(lab, res, c1)
        con2 = con1 * 2
        assert_almost_equal(con1.effect * 2, con2.effect)
        # assert_almost_equal(con1.variance * 2, con2.variance) FIXME
        # assert_almost_equal(con1.stat() * 2, con2.stat()) FIXME
        assert_almost_equal(con1.z_score(), con2.z_score())
示例#12
0
def test_contrast_mul():
    # new API
    n, p, q = 100, 80, 10
    X, Y = np.random.randn(p, q), np.random.randn(p, n)
    lab, res = session_glm(Y, X, 'ar1')
    for c1 in [np.eye(q)[0], np.eye(q)[:3]]:
        con1 = compute_contrast(lab, res, c1)
        con2 = con1 * 2
        assert_almost_equal(con1.effect * 2, con2.effect)
        # assert_almost_equal(con1.variance * 2, con2.variance) FIXME
        # assert_almost_equal(con1.stat() * 2, con2.stat()) FIXME
        assert_almost_equal(con1.z_score(), con2.z_score())
示例#13
0
def test_Fcontrast():
    # new API
    n, p, q = 100, 80, 10
    X, Y = np.random.randn(p, q), np.random.randn(p, n)
    for model in ['ols', 'ar1']:
        labels, results = session_glm(Y, X, model)
        for con_val in [np.eye(q)[0], np.eye(q)[:3]]:
            z_vals = compute_contrast(labels,
                                      results,
                                      con_val,
                                      contrast_type='F').z_score()
            assert_almost_equal(z_vals.mean(), 0, 0)
            assert_almost_equal(z_vals.std(), 1, 0)
示例#14
0
    def fit(self,
            net_data_low_main,
            y,
            confounds,
            n_subtypes,
            n_subtypes_l1=3,
            flag_feature_select=True,
            extra_var=[],
            verbose=True):
        self.verbose = verbose
        ### regress confounds from the connectomes
        #net_data_low = net_data_low_main.copy()
        #cf_rm = prediction.ConfoundsRm(confounds,net_data_low.reshape((net_data_low.shape[0],net_data_low.shape[1]*net_data_low.shape[2])))
        #net_data_low_tmp = cf_rm.transform(confounds,net_data_low.reshape((net_data_low.shape[0],net_data_low.shape[1]*net_data_low.shape[2])))
        #net_data_low = net_data_low_tmp.reshape((net_data_low_tmp.shape[0],net_data_low.shape[1],net_data_low.shape[2]))
        self.scale_ref = net_data_low_main.mean(0).mean(1)
        #net_data_low = self.norm_subjects(net_data_low_main)
        self.cf_rm = prediction.ConfoundsRm(confounds, net_data_low_main)
        net_data_low = self.cf_rm.transform(confounds, net_data_low_main)
        #net_data_low += self.cf_rm.intercept()

        ### compute the subtypes
        if self.verbose: start = time.time()
        st_ = subtypes.clusteringST()
        st_.fit(net_data_low, n_subtypes_l1)
        xw = st_.transform(net_data_low)
        xw = np.nan_to_num(xw)

        print 'xw sub data', xw[0, :]
        self.st_l2 = subtypes.clusteringST()
        self.st_l2.fit(net_data_low, n_subtypes)
        xwl2 = self.st_l2.transform(net_data_low)
        xwl2 = np.nan_to_num(xwl2)
        #xwl2 = np.hstack((xwl2,confounds))
        #xw = np.hstack((age_var,xw))
        if self.verbose:
            print("Compute subtypes, Time elapsed: {}s)".format(
                int(time.time() - start)))

        ### feature selection
        if flag_feature_select:
            if verbose: start = time.time()
            contrast = np.hstack(
                ([0, 1], np.repeat(0, confounds.shape[1])))  #[0,1,0,0,0]
            x_ = np.vstack((np.ones_like(y), y, confounds.T)).T

            labels, regression_result = nsglm.session_glm(np.array(xw), x_)
            cont_results = nsglm.compute_contrast(labels,
                                                  regression_result,
                                                  contrast,
                                                  contrast_type='t')
            pval = cont_results.p_value()
            results = smm.multipletests(pval, alpha=0.01, method='fdr_bh')
            w_select = np.where(results[0])[0]
            #w_select = w_select[np.argsort(pval[np.where(results[0])])]
            if len(w_select) < 10:
                w_select = np.argsort(pval)[:10]
            else:
                w_select = w_select[np.argsort(pval[np.where(results[0])])]
        else:
            # Cancel the selection
            w_select = np.where(xw[0, :] != 2)[0]

        #w_select = get_stable_w(xw[train_index,:],y_tmp[train_index],confounds[train_index,:],6)
        # Cancel the selection
        #w_select = np.where(results[0]!=-1)[0]
        #print("Feature selected: {})".format(w_select))

        ### Include extra covariates
        if len(extra_var) != 0:
            all_var = np.hstack((xw[:, w_select], extra_var))
        else:
            all_var = xw[:, w_select]
        if self.verbose:
            print("Feature selection, Time elapsed: {}s)".format(
                int(time.time() - start)))

        ### prediction model
        if self.verbose: start = time.time()
        tlp = TwoLevelsPrediction()
        tlp.fit(all_var, xwl2, y, model_type='svm', verbose=self.verbose)
        if self.verbose:
            print("Two Levels prediction, Time elapsed: {}s)".format(
                int(time.time() - start)))

        ### save parameters
        self.median_template = np.median(net_data_low, axis=0)
        self.st = st_
        self.w_select = w_select
        self.tlp = tlp
示例#15
0
    def fit(self,net_data_low_main,y,confounds,n_subtypes,n_subtypes_l1=3,flag_feature_select=True,extra_var=[],verbose=True):
        self.verbose = verbose
        ### regress confounds from the connectomes
        #net_data_low = net_data_low_main.copy()
        #cf_rm = prediction.ConfoundsRm(confounds,net_data_low.reshape((net_data_low.shape[0],net_data_low.shape[1]*net_data_low.shape[2])))
        #net_data_low_tmp = cf_rm.transform(confounds,net_data_low.reshape((net_data_low.shape[0],net_data_low.shape[1]*net_data_low.shape[2])))
        #net_data_low = net_data_low_tmp.reshape((net_data_low_tmp.shape[0],net_data_low.shape[1],net_data_low.shape[2]))
        self.scale_ref = net_data_low_main.mean(0).mean(1)
        #net_data_low = self.norm_subjects(net_data_low_main)
        self.cf_rm = prediction.ConfoundsRm(confounds,net_data_low_main)
        net_data_low = self.cf_rm.transform(confounds,net_data_low_main)
        #net_data_low += self.cf_rm.intercept()


        ### compute the subtypes
        if self.verbose: start = time.time()
        st_ = subtypes.clusteringST()
        st_.fit_robust(net_data_low,n_subtypes_l1)
        xw = st_.transform(net_data_low)
        xw = np.nan_to_num(xw)

        self.st_l2 = subtypes.clusteringST()
        self.st_l2.fit_robust(net_data_low,n_subtypes)
        xwl2 = self.st_l2.transform(net_data_low)
        xwl2 = np.nan_to_num(xwl2)
        #xw = np.hstack((age_var,xw))
        if self.verbose: print("Compute subtypes, Time elapsed: {}s)".format(int(time.time() - start)))

        ### feature selection
        if flag_feature_select:
            if verbose: start = time.time()
            contrast = np.hstack(([0,1],np.repeat(0,confounds.shape[1])))#[0,1,0,0,0]
            x_ = np.vstack((np.ones_like(y),y,confounds.T)).T

            labels, regression_result  = nsglm.session_glm(np.array(xw),x_)
            cont_results = nsglm.compute_contrast(labels,regression_result, contrast,contrast_type='t')
            pval = cont_results.p_value()
            results = smm.multipletests(pval, alpha=0.01, method='fdr_bh')
            w_select = np.where(results[0])[0]
            #w_select = w_select[np.argsort(pval[np.where(results[0])])]
            if len(w_select)<10:
                w_select = np.argsort(pval)[:10]
            else:
                w_select = w_select[np.argsort(pval[np.where(results[0])])]
        else:
            # Cancel the selection
            w_select = np.where(xw[0,:]!=2)[0]

        #w_select = get_stable_w(xw[train_index,:],y_tmp[train_index],confounds[train_index,:],6)
        # Cancel the selection
        #w_select = np.where(results[0]!=-1)[0]
        #print("Feature selected: {})".format(w_select))

        ### Include extra covariates
        if len(extra_var)!=0:
            all_var = np.hstack((xw[:,w_select],extra_var))
        else:
            all_var = xw[:,w_select]
        if self.verbose: print("Feature selection, Time elapsed: {}s)".format(int(time.time() - start)))

        ### prediction model
        if self.verbose: start = time.time()
        tlp = TwoLevelsPrediction()
        tlp.fit(all_var,xwl2,y,model_type='svm',verbose=self.verbose)
        if self.verbose: print("Two Levels prediction, Time elapsed: {}s)".format(int(time.time() - start)))

        ### save parameters
        self.median_template = np.median(net_data_low,axis=0)
        self.st = st_
        self.w_select = w_select
        self.tlp = tlp
	# Create masker from epi and detrended
	mask_img = nb.load(mask_file)
	masker2 = NiftiMasker(mask_img=mask_img, detrend=True)
	niimgs_detrended = masker2.fit_transform(niimgs1)
	niimgs_masked = masker2.inverse_transform(niimgs_detrended)
	niimgs_detrended_r2 = masker2.fit_transform(niimgs2)
	niimgs_masked_r2 = masker2.inverse_transform(niimgs_detrended_r2)
	np.save(op.join(folder, names[i] + '_mean_timeseries_r1.npy'), niimgs_detrended.mean(1))
	np.save(op.join(folder, names[i] + '_mean_timeseries_r2.npy'), niimgs_detrended_r2.mean(1))

	# GLM analysis
	glm_results = glm.session_glm(niimgs_detrended, dm, noise_model='ols')
	labels = glm_results[0]
	reg_results = glm_results[1]
	contrast_map = glm.compute_contrast(labels, reg_results, contrasts[i])
	indexes = contrast_map.p_value()<0.05
	sum_significant_voxels = niimgs_detrended[:, indexes].mean(1)
	np.save(op.join(folder, names[i] + '_mean_pvalues_005.npy'), sum_significant_voxels)

	inds_mins = np.argsort(contrast_map.p_value())
	#ind_min = contrast_map.p_value().argmin()
	#if i==1 or i==2:
	ind_min = inds_mins[1]
	#else:
 	#	ind_min = inds_mins[0]
	v1 = niimgs_detrended[:, ind_min]
	v2 = niimgs_detrended_r2[:, ind_min]
	np.save(op.join(folder, names[i] + '_voxelmin%d.npy' % ind_min), v1)
	np.save(op.join(folder, names[i] + '_voxelmin%d_r2.npy' % ind_min), v2)