示例#1
0
np.savetxt(fnc_corr_outfile, fnc_corr, fmt='%1.5f', delimiter='\t')
fnc_corr_z_outfile = os.path.join(datadir, fnc_corr_z_out)
np.savetxt(fnc_corr_z_outfile, fnc_corr_z, fmt='%1.5f', delimiter='\t')
fnc_lag_outfile = os.path.join(datadir, fnc_lag_out)
np.savetxt(fnc_lag_outfile, fnc_lag, fmt='%1.2f', delimiter='\t')

## Run group analysis
#######################
exists, resultsdir = gu.make_dir(datadir,'randomise') 
resultsglob = os.path.join(datadir, 'FNCtb_*.csv')
result_files = glob(resultsglob)
for fnc_data_file in result_files:
    fnc_data = np.genfromtxt(fnc_data_file, names=None, dtype=float, delimiter=None)
    pth, fname, ext = gu.split_filename(fnc_data_file)
    fnc_img_fname = os.path.join(resultsdir, fname + '.nii.gz')
    fnc_saveimg = gu.save_img(fnc_data, fnc_img_fname)
    rand_basename = os.path.join(resultsdir, fname)
    p_uncorr_list, p_corr_list = ga.randomise(fnc_saveimg, 
                                                rand_basename, 
                                                des_file, 
                                                con_file)     
    uncorr_results = ga.get_results(p_uncorr_list)
    corr_results = ga.get_results(p_corr_list)
           
    fdr_results = {}
    for i in range(len(uncorr_results.keys())):
        conname = sorted(uncorr_results.keys())[i]
        fdr_corr_arr = ga.multi_correct(uncorr_results[conname])
        fdr_results[conname] = gu.square_from_combos(fdr_corr_arr, nnodes)
        
        outfile = os.path.join(resultsdir, 
示例#2
0
    modeldir = '/home/jagust/rsfmri_ica/GIFT/models/Old'
    des_file = os.path.join(modeldir, 'Covariate_Old_log_demeaned.mat')
    con_file = os.path.join(modeldir, 'Covariate_Old_log_demeaned.con')
    resultsglob = '*mancovan_preproc.csv'
    ##############################################################


    ## Run group analysis with randomise
    ####################################
    exists, resultsdir = gu.make_dir(basedir,'randomise') 
    result_files = glob(os.path.join(datadir,resultsglob))
    for data_file in result_files:
        data = np.genfromtxt(data_file, names=None, dtype=float, delimiter=',')
        pth, fname, ext = gu.split_filename(data_file)
        img_fname = os.path.join(resultsdir, fname + '.nii.gz')
        saveimg = gu.save_img(data, img_fname)
        rand_basename = os.path.join(resultsdir, fname)
        p_uncorr_list, p_corr_list = ga.randomise(saveimg, 
                                                    rand_basename, 
                                                    des_file, 
                                                    con_file)     
        uncorr_results = ga.get_results(p_uncorr_list)
        corr_results = ga.get_results(p_corr_list)
               
        fdr_results = {}
        for i in range(len(uncorr_results.keys())):
            conname = sorted(uncorr_results.keys())[i]
            fdr_corr_arr = ga.multi_correct(uncorr_results[conname])
            fdr_results[conname] = gu.square_from_combos(fdr_corr_arr, nnodes)
            
            outfile = os.path.join(resultsdir, 
# Get correlation and lag matrices
mfnc_zcorr = go.get_mfnc_stats(infile)
# Save out text files of correlation and lags
mfnc_zcorr_outfile = os.path.join(datadir, mfnc_zcorr_out)
np.savetxt(mfnc_zcorr_outfile, mfnc_zcorr, fmt='%1.5f', delimiter='\t')

## Run group analysis
#######################
exists, resultsdir = gu.make_dir(datadir,'randomise') 
resultsglob = os.path.join(datadir, 'mfnc_zcorr.csv')
result_files = glob(resultsglob)
for mfnc_data_file in result_files:
    mfnc_data = np.genfromtxt(mfnc_data_file, names=None, dtype=float, delimiter=None)
    pth, fname, ext = gu.split_filename(mfnc_data_file)
    mfnc_img_fname = os.path.join(resultsdir, fname + '.nii.gz')
    mfnc_saveimg = gu.save_img(mfnc_data, mfnc_img_fname)
    rand_basename = os.path.join(resultsdir, fname)
    p_uncorr_list, p_corr_list = ga.randomise(mfnc_saveimg, 
                                                rand_basename, 
                                                des_file, 
                                                con_file)     
    uncorr_results = ga.get_results(p_uncorr_list)
    corr_results = ga.get_results(p_corr_list)
           
    fdr_results = {}
    for i in range(len(uncorr_results.keys())):
        conname = sorted(uncorr_results.keys())[i]
        fdr_corr_arr = ga.multi_correct(uncorr_results[conname])
        fdr_results[conname] = gu.square_from_combos(fdr_corr_arr, nnodes)
        
        outfile = os.path.join(resultsdir, 
示例#4
0
            allsub_array[i] = sub_stat
        outname = '_'.join(['dFNC', measure_name, stat_name]) + '.csv'
        outfile = os.path.join(datadir, outname)
        np.savetxt(outfile, allsub_array, fmt='%1.5f', delimiter='\t')
        

## Run group analysis with randomise
####################################
exists, resultsdir = gu.make_dir(datadir,'randomise') 
resultsglob = os.path.join(datadir, 'dFNC_*.csv')
result_files = glob(resultsglob)
for dfnc_data_file in result_files:
    dfnc_data = np.genfromtxt(dfnc_data_file, names=None, dtype=float, delimiter=None)
    pth, fname, ext = gu.split_filename(dfnc_data_file)
    dfnc_img_fname = os.path.join(resultsdir, fname + '.nii.gz')
    dfnc_saveimg = gu.save_img(dfnc_data, dfnc_img_fname)
    rand_basename = os.path.join(resultsdir, fname)
    p_uncorr_list, p_corr_list = ga.randomise(dfnc_saveimg, 
                                                rand_basename, 
                                                des_file, 
                                                con_file)     
    uncorr_results = ga.get_results(p_uncorr_list)
    corr_results = ga.get_results(p_corr_list)
           
    fdr_results = {}
    for i in range(len(uncorr_results.keys())):
        conname = sorted(uncorr_results.keys())[i]
        fdr_corr_arr = ga.multi_correct(uncorr_results[conname])
        fdr_results[conname] = gu.square_from_combos(fdr_corr_arr, nnodes)
        
        outfile = os.path.join(resultsdir,