Esempio n. 1
0
File: parse.py Progetto: ctw/myhddm
def get_emp_SE(data, code_type):	

	from scipy.stats import stats
	
	
	allcor=data[data['acc'].isin([1])]
	cor_pivot=pd.pivot_table(allcor, values='rt', cols=['stim', 'cue'], rows=['subj_idx'], aggfunc=np.average)
	acc_pivot=pd.pivot_table(data, values='acc', cols=['stim', 'cue'], rows=['subj_idx'], aggfunc=np.average)
	#Get theoretical RT S.E.M's
	sem_rt=[]
	for img, cue in cor_pivot.columns:
		x=stats.sem(cor_pivot[img][cue])
		sem_rt.append(x)

	#Get theoretical ACCURACY S.E.M's
	sem_acc=[]
	for img, cue in acc_pivot.columns:
		x=stats.sem(acc_pivot[img][cue])
		sem_acc.append(x)

	if code_type=='HNL':
		face_emp_acc_SE=sem_acc[:3]
		house_emp_acc_SE=sem_acc[3:]
		face_emp_rts_SE=sem_rt[:3]
		house_emp_rts_SE=sem_rt[3:]
	else:
		face_emp_acc_SE=sem_acc[:5]
		house_emp_acc_SE=sem_acc[5:]
		face_emp_rts_SE=sem_rt[:5]
		house_emp_rts_SE=sem_rt[5:]
	
	sem_list=[face_emp_acc_SE, house_emp_acc_SE, face_emp_rts_SE, house_emp_rts_SE]

	return sem_list
Esempio n. 2
0
def split_mean(idx, op, bin_len=100):
    rs = np.array([op[j][idx] for j in range(len(op))])
    rs = np.dstack(np.split(rs, rs.shape[1] / bin_len, axis=1))
    rs = rs.mean(axis=1)
    rs_mean = rs.mean(axis=0)
    rs_sem = stats.sem(rs, axis=0)
    return rs_mean, rs_sem
Esempio n. 3
0
def t_single_analysis(data, u, X, alpha=0.05):
    df = int(len(data)) - 1
    ttest, pval = ttest_1samp(data, u)  # t值和p值
    sample_mean = data.mean()  # 样本均值
    se = stats.sem(data)  # 样本标准误差平均值
    cha = sample_mean - u
    lower = (cha - se / np.sqrt(len(data) - 1))  # 差值的95%置信区间下限
    upper = (cha + se / np.sqrt(len(data) - 1))  # 差值的95%置信区间上限
    alpha_range = "{:.0f}".format((1 - alpha) * 100)
    return {
        "title":
        "单样本T检验",
        "row":
        X,
        "col": [
            "t值", "自由度", "p值", "拒绝原假设", "平均值差值",
            "差值的{}%置信区间上限".format(alpha_range),
            "差值的{}%置信区间下限".format(alpha_range)
        ],
        "data": [[
            "{:.4f}".format(ttest), "{}".format(df), "{:.4f}".format(pval),
            str(bool(pval - alpha < 0)), "{:.4f}".format(sample_mean - u),
            "{:.4f}".format(lower), "{:.4f}".format(upper)
        ]],
        "remarks":
        "注:拒绝原假设,False表示不拒绝原假设,True表示拒绝原假设。"
    }
Esempio n. 4
0
File: parse.py Progetto: ctw/myhddm
def get_emp_SE(data, code_type):

    from scipy.stats import stats

    allcor = data[data['acc'].isin([1])]
    cor_pivot = pd.pivot_table(allcor,
                               values='rt',
                               cols=['stim', 'cue'],
                               rows=['subj_idx'],
                               aggfunc=np.average)
    acc_pivot = pd.pivot_table(data,
                               values='acc',
                               cols=['stim', 'cue'],
                               rows=['subj_idx'],
                               aggfunc=np.average)
    #Get theoretical RT S.E.M's
    sem_rt = []
    for img, cue in cor_pivot.columns:
        x = stats.sem(cor_pivot[img][cue])
        sem_rt.append(x)

    #Get theoretical ACCURACY S.E.M's
    sem_acc = []
    for img, cue in acc_pivot.columns:
        x = stats.sem(acc_pivot[img][cue])
        sem_acc.append(x)

    if code_type == 'HNL':
        face_emp_acc_SE = sem_acc[:3]
        house_emp_acc_SE = sem_acc[3:]
        face_emp_rts_SE = sem_rt[:3]
        house_emp_rts_SE = sem_rt[3:]
    else:
        face_emp_acc_SE = sem_acc[:5]
        house_emp_acc_SE = sem_acc[5:]
        face_emp_rts_SE = sem_rt[:5]
        house_emp_rts_SE = sem_rt[5:]

    sem_list = [
        face_emp_acc_SE, house_emp_acc_SE, face_emp_rts_SE, house_emp_rts_SE
    ]

    return sem_list
Esempio n. 5
0
 def continue_sampling(self, matrix):
     self.matrix = matrix
     print 'more sampling'
     if matrix.profile_dict == {}:
         return True
     for profile in self.target_set:
         for role, strategies in profile.items():
             for strategy in strategies.keys():
                 if stats.sem(matrix.getPayoffData(profile, role, strategy)) >= self.standard_err_threshold:
                     return True
     return False
Esempio n. 6
0
File: sdt.py Progetto: ctw/myhddm
def plot_params(data):
	
	from scipy.stats import stats
	
	countsdf=counts(data)
	c, d=get_params(countsdf)
	
	sem_crit=[]
	sem_dp=[]	
	for cond in c.columns:
		se_criterion=stats.sem(c[cond])
		se_dprime=stats.sem(d[cond])
	
		sem_crit.append(se_criterion)
		sem_dp.append(se_dprime)
		
		cmeans=c.describe().ix['mean', :].values
		dmeans=d.describe().ix['mean', :].values
	
	x=np.array([1,2,3])
	fig_c, ax_c=plt.subplots(1)
	fig_d, ax_d=plt.subplots(1)
	
	plotc=ax_c.errorbar(x, cmeans, yerr=sem_crit, elinewidth=2.5, ecolor='r', color='k', lw=4.0)
	plotd=ax_d.errorbar(x, dmeans, yerr=sem_dp, elinewidth=2.5, ecolor='r', color='k', lw=4.0)
	
	ax_list=[ax_c, ax_d]
	for a in ax_list:
		a.set_xlim(0.5, 3.5)
		a.set_xticks([1,2,3])
		a.set_xticklabels(['80H', '50N', '80F'], fontsize=16)
		a.set_xlabel("Prior Probability Cue", fontsize=20)
	
	ax_c.set_ylabel("Criterion (c)", fontsize=20)
	ax_d.set_ylabel("Discriminability (d')", fontsize=20)	
	
	
	#fig_c.savefig("criterion.jpeg", format='jpeg', dpi=400)
	#fig_d.savefig("dprime.jpeg", format='jpeg', dpi=400)
	fig_c.savefig("criterion.png", format='png', dpi=500)
	fig_d.savefig("dprime.png", format='png', dpi=500)	
Esempio n. 7
0
File: sdt.py Progetto: ctw/myhddm
def plot_params(data):

    from scipy.stats import stats

    countsdf = counts(data)
    c, d = get_params(countsdf)

    sem_crit = []
    sem_dp = []
    for cond in c.columns:
        se_criterion = stats.sem(c[cond])
        se_dprime = stats.sem(d[cond])

        sem_crit.append(se_criterion)
        sem_dp.append(se_dprime)

        cmeans = c.describe().ix["mean", :].values
        dmeans = d.describe().ix["mean", :].values

    x = np.array([1, 2, 3])
    fig_c, ax_c = plt.subplots(1)
    fig_d, ax_d = plt.subplots(1)

    plotc = ax_c.errorbar(x, cmeans, yerr=sem_crit, elinewidth=2.5, ecolor="r", color="k", lw=4.0)
    plotd = ax_d.errorbar(x, dmeans, yerr=sem_dp, elinewidth=2.5, ecolor="r", color="k", lw=4.0)

    ax_list = [ax_c, ax_d]
    for a in ax_list:
        a.set_xlim(0.5, 3.5)
        a.set_xticks([1, 2, 3])
        a.set_xticklabels(["80H", "50N", "80F"], fontsize=16)
        a.set_xlabel("Prior Probability Cue", fontsize=20)

    ax_c.set_ylabel("Criterion (c)", fontsize=20)
    ax_d.set_ylabel("Discriminability (d')", fontsize=20)

    # fig_c.savefig("criterion.jpeg", format='jpeg', dpi=400)
    # fig_d.savefig("dprime.jpeg", format='jpeg', dpi=400)
    fig_c.savefig("criterion.png", format="png", dpi=500)
    fig_d.savefig("dprime.png", format="png", dpi=500)
def getFourMoments(sequence, ax=1):
    finalArray = [
        np.mean(sequence, axis=ax),
        np.var(sequence, axis=ax),
        skew(sequence, axis=ax),
        kurtosis(sequence, axis=ax),
        sem(sequence, axis=ax),
    ]
    if ax != None:
        finalArray = np.array(finalArray)
        finalArray = finalArray.T
        return np.concatenate((finalArray, np.array(mquantiles(sequence, axis=ax))), axis=ax)
    finalArray.extend(mquantiles(sequence, axis=ax))
    return np.array(finalArray)
def getFourMoments(sequence, ax=1):
    finalArray = [
        np.mean(sequence, axis=ax),
        np.var(sequence, axis=ax),
        skew(sequence, axis=ax),
        kurtosis(sequence, axis=ax),
        sem(sequence, axis=ax)
    ]
    if ax != None:
        finalArray = np.array(finalArray)
        finalArray = finalArray.T
        return np.concatenate(
            (finalArray, np.array(mquantiles(sequence, axis=ax))), axis=ax)
    finalArray.extend(mquantiles(sequence, axis=ax))
    return np.array(finalArray)
Esempio n. 10
0
def plot_line(fpath, rois, ylabel, n_row, n_col):
    import numpy as np
    import pandas as pd
    from scipy.stats.stats import sem
    from matplotlib import pyplot as plt

    # inputs
    Hemis = ('L', 'R')

    # load
    df = pd.read_csv(fpath)
    ages = np.array(df['age in years'])
    age_uniq = np.unique(ages)

    rois_without_hemi = ['_'.join(i.split('_')[1:]) for i in rois]
    rois_uniq = np.unique(rois_without_hemi)
    max_row_idx = int((len(rois_uniq) - 1) / n_col)
    _, axes = plt.subplots(n_row, n_col)
    if axes.shape != (n_row, n_col):
        axes = axes.reshape((n_row, n_col))
    for i, roi_without_hemi in enumerate(rois_uniq):
        row_idx = int(i / n_col)
        col_idx = i % n_col
        ax = axes[row_idx, col_idx]
        for Hemi in Hemis:
            roi = f'{Hemi}_{roi_without_hemi}'
            if roi not in rois:
                continue
            meas_vec = np.array(df[roi])
            ys = []
            yerrs = []
            for age in age_uniq:
                meas_tmp = meas_vec[ages == age]
                ys.append(np.mean(meas_tmp))
                yerrs.append(sem(meas_tmp))
            ax.errorbar(age_uniq, ys, yerrs, label=roi)
        if col_idx == 0:
            ax.set_ylabel(ylabel)
        if row_idx == max_row_idx:
            ax.set_xlabel('age in years')
        ax.legend()
    plt.tight_layout()
    plt.show()
Esempio n. 11
0
def plot_rsfc_line(hemi='lh'):
    """
    对于每个ROI,在每个年龄,求出ROI和targets连接的均值的
    被试间均值和SEM并画折线图
    """
    import numpy as np
    import pandas as pd
    import pickle as pkl
    from scipy.stats.stats import sem
    from matplotlib import pyplot as plt
    from cxy_hcp_ffa.lib.predefine import roi2color

    # inputs
    rois = ('pFus-face', 'mFus-face')
    subj_info_file = pjoin(dev_dir, 'HCPD_SubjInfo.csv')
    rsfc_file = pjoin(work_dir, f'rsfc_MPM2Cole_{hemi}.pkl')

    # load
    subj_info = pd.read_csv(subj_info_file)
    age_vec = np.array(subj_info['age in years'])
    rsfc_dict = pkl.load(open(rsfc_file, 'rb'))

    # plot
    for roi in rois:
        fc_vec = np.mean(rsfc_dict[roi], 1)
        non_nan_vec = ~np.isnan(fc_vec)
        fcs = fc_vec[non_nan_vec]
        ages = age_vec[non_nan_vec]
        age_uniq = np.unique(ages)
        ys = np.zeros_like(age_uniq, np.float64)
        yerrs = np.zeros_like(age_uniq, np.float64)
        for idx, age in enumerate(age_uniq):
            sample = fcs[ages == age]
            ys[idx] = np.mean(sample)
            yerrs[idx] = sem(sample)
        plt.errorbar(age_uniq, ys, yerrs,
                     label=roi, color=roi2color[roi])
    plt.ylabel('RSFC')
    plt.xlabel('age in years')
    plt.title(hemi)
    plt.legend()
    plt.show()
Esempio n. 12
0
def plot_line(meas_name='thickness'):
    """
    对于每个ROI,在每个年龄,求出measurement的
    被试间均值和SEM并画折线图
    """
    import pandas as pd
    from scipy.stats.stats import sem
    from matplotlib import pyplot as plt

    # inputs
    figsize = (9, 4)
    cols = ['pFus_{hemi}', 'mFus_{hemi}']
    colors = ('limegreen', 'cornflowerblue')
    hemis = ('lh', 'rh')
    fpath = pjoin(work_dir, f'HCPD_{meas_name}_MPM1_prep_inf.csv')

    # prepare
    data = pd.read_csv(fpath)
    age_name = 'age in years'
    ages = np.array(data[age_name])
    age_uniq = np.unique(ages)

    # plot
    _, axes = plt.subplots(1, len(hemis), figsize=figsize)
    for hemi_idx, hemi in enumerate(hemis):
        ax = axes[hemi_idx]
        for col_idx, col in enumerate(cols):
            col = col.format(hemi=hemi)
            meas_vec = np.array(data[col])
            ys = np.zeros_like(age_uniq, np.float64)
            yerrs = np.zeros_like(age_uniq, np.float64)
            for age_idx, age in enumerate(age_uniq):
                sample = meas_vec[ages == age]
                ys[age_idx] = np.mean(sample)
                yerrs[age_idx] = sem(sample)
            ax.errorbar(age_uniq, ys, yerrs, label=col, color=colors[col_idx])
        ax.legend()
        ax.set_xlabel(age_name)
        if hemi_idx == 0:
            ax.set_ylabel(meas_name)
    plt.tight_layout()
    plt.show()
def main():
    with open("../default_config.json") as f:
        default_config = json.load(f)

    rs, converge_point = run(default_config)

    fig1, ax1 = plt.subplots()
    # ax1.set_ylim(-0.2, 1.05)
    ax1.set_ylabel("Obtained reward")
    ax1.set_xlabel("Episodes")
    ys = rs.mean(axis=1)
    rs_sem = stats.sem(rs, axis=1)
    xs = np.arange(len(ys)) * default_config["log_interval"]
    ax1.plot(xs, ys, color=color_sequence[0])
    plt.fill_between(xs,
                     ys - rs_sem,
                     ys + rs_sem,
                     alpha=0.5,
                     color=color_sequence[0])

    # Plot the convergence points and save it
    fig2, ax2 = plot_utils.plot_confusion_matrix(converge_point.astype(np.int))
    plt.show()
Esempio n. 14
0
def calc_mean_sem():
    import os
    import pickle
    import numpy as np
    import pandas as pd

    from os.path import join as pjoin
    from scipy.stats.stats import sem

    stru_name = 'thickness'
    project_dir = '/nfs/s2/userhome/chenxiayu/workingdir/study/FFA_clustering'
    stru_dir = pjoin(project_dir,
                     's2_25_zscore/HAC_ward_euclidean/2clusters/structure')
    corr_file = pjoin(stru_dir, 'acti_stru_corrs_{}.pkl'.format(stru_name))
    mean_sem_dir = pjoin(stru_dir, 'mean_sem')
    if not os.path.exists(mean_sem_dir):
        os.makedirs(mean_sem_dir)

    acti_stru_corrs = pickle.load(open(corr_file, 'rb'))
    corr_names = sorted(acti_stru_corrs.keys())
    means = [np.mean(acti_stru_corrs[name]) for name in corr_names]
    sems = [sem(acti_stru_corrs[name]) for name in corr_names]
    df = pd.DataFrame({'names': corr_names, 'means': means, 'sems': sems})
    df.to_csv(pjoin(mean_sem_dir, stru_name), index=False)
Esempio n. 15
0
def sfn(l, msk, myrad, bcast_var):
    # extract training and testing data
    train_data = []
    test_data = []
    d1, d2, d3, ntr = l[0].shape
    nvx = d1 * d2 * d3
    for s in l:
        train_data.append(
            np.reshape(s[:, :, :, :int(ntr / 2)], (nvx, int(ntr / 2))))
        test_data.append(
            np.reshape(s[:, :, :, int(ntr / 2):], (nvx, ntr - int(ntr / 2))))
    # train an srm model
    srm = SRM(bcast_var[0], bcast_var[1])
    srm.fit(train_data)
    # transform test data
    shared_data = srm.transform(test_data)
    for s in range(len(l)):
        shared_data[s] = np.nan_to_num(
            stats.zscore(shared_data[s], axis=1, ddof=1))
    # experiment
    accu = timesegmentmatching_accuracy(shared_data, 6)

    return np.mean(accu), stats.sem(
        accu)  # multiple outputs will be saved as tuples
Esempio n. 16
0
def mean_confidence_interval(data, confidence=0.95):
    a = 1.0 * numpy.array(data)
    mean, se = numpy.mean(a), stats.sem(a)
    h = se * t._ppf((1 + confidence) / 2., len(a) - 1)
    return mean, mean - h, mean + h
Esempio n. 17
0
def plot_variogram(data, cov_data, data_bg=None, cov_data_bg=None,
                   norm='normalized', binned=False, color='b'):
    # Correlation (across subjects) between electrodes depending on distance
    # between the electrodes
    electrode_correlations = []
    electrode_correlations_bg = []
    electrode_distances = []
    [electrodes_file, pos_x, pos_y, pos_theta, pos_phi] = \
            read_electrode_locations()
    if data_bg != None:
        color = 'r'
    for electrode1 in range(data.shape[1]):
        for electrode2 in range(data.shape[1]):
            #if electrode1 <= electrode2:
            #    break
            if norm == 'normalized':
                [coefficient, pvalue] = pearsonr(data[:,electrode1], data[:,electrode2])
                if data_bg != None:
                    [coefficient_bg, pvalue_bg] = pearsonr(data_bg[:,electrode1], data_bg[:,electrode2])
            elif norm == 'unnormalized':
                coefficient = cov_data[electrode1][electrode2]
                if cov_data_bg != None:
                    coefficient_bg = cov_data_bg[electrode1][electrode2]
            else:
                print norm + ' not recognized!!!'

            electrode_correlations.append(coefficient)
            if data_bg != None:
                electrode_correlations_bg.append(coefficient_bg)
            distance = sqrt(square(pos_x[electrode1] - pos_x[electrode2])
                            + square(pos_y[electrode1] - pos_y[electrode2]))
            electrode_distances.append(distance)

    if binned is False:
        if data_bg != None:
            pyplot.plot(electrode_distances, electrode_correlations_bg, 'b.')
        pyplot.plot(electrode_distances, electrode_correlations, color+'.')
    elif binned is True:
        (numbers,bins) = histogram(electrode_distances,20)
        corr_means = []
        corr_sems = []
        corr_means_bg = []
        corr_sems_bg = []
        dists = []
        for i in range(len(bins[:-1])):
            corr_bin = []
            corr_bin_bg = []
            for j in range(len(electrode_correlations)):
                if electrode_distances[j] >= bins[i] and electrode_distances[j] < bins[i+1]:
                    corr_bin.append(electrode_correlations[j])
                    if data_bg != None:
                        corr_bin_bg.append(electrode_correlations_bg[j])
            corr_means.append(mean(corr_bin))
            #corr_means.append(median(corr_bin))
            corr_sems.append(2*sem(corr_bin))
            #corr_sems.append(std(corr_bin))
            dists.append((bins[i+1] - bins[i])/2.0 + bins[i])
            if data_bg != None:
                corr_means_bg.append(mean(corr_bin_bg))
                #corr_means_bg.append(median(corr_bin_bg))
                corr_sems_bg.append(2*sem(corr_bin_bg))
                #corr_sems_bg.append(std(corr_bin_bg))
        if data_bg != None:
            pyplot.errorbar(dists, corr_means_bg, yerr=corr_sems_bg,fmt='bo')
        pyplot.errorbar(dists, corr_means, yerr=corr_sems,fmt=color + 'o')
        
    handle = pyplot.gca()

    pyplot.xlabel('Distance between two electrodes')
    if norm == 'normalized':
        pyplot.ylabel('Pearson\'s R Correlation between\ntwo electrodes across subjects')
        pyplot.ylim(-1.1,1.1)
    elif norm == 'unnormalized':
        pyplot.ylabel('Covariance between two\nelectrodes across subjects')

    return handle
Esempio n. 18
0
def plot_retest_reliability_corr():
    import numpy as np
    import pickle as pkl
    from scipy.stats.stats import sem
    from nibrain.util.plotfig import auto_bar_width

    # inputs
    figsize = (6.4, 4.8)
    hemis = ('lh', 'rh')
    rois = ('pFus', 'mFus')
    retest_dir = pjoin(proj_dir, 'analysis/s2/1080_fROI/'
                       'refined_with_Kevin/retest')
    atlas_names = ('MPM', 'ROIv3')
    meas2file = {
        'thickness':
        pjoin(retest_dir, 'reliability/thickness_{atlas}_corr_rm-subj.pkl'),
        'myelin':
        pjoin(retest_dir, 'reliability/myelin_{atlas}_corr_rm-subj.pkl'),
        'activ':
        pjoin(retest_dir, 'reliability/activ_{atlas}_corr_rm-subj.pkl'),
        'rsfc':
        pjoin(retest_dir, 'rfMRI/{atlas}_rm-subj_corr.pkl'),
    }
    meas2title = {
        'thickness': 'thickness',
        'myelin': 'myelination',
        'activ': 'face selectivity',
        'rsfc': 'RSFC'
    }
    atlas2color = {
        'MPM': (0.33, 0.33, 0.33, 1),
        'ROIv3': (0.66, 0.66, 0.66, 1)
    }

    # outputs
    out_file = pjoin(work_dir, 'retest_reliabilty_corr.jpg')

    # prepare
    n_hemi = len(hemis)
    n_roi = len(rois)
    n_atlas = len(atlas_names)
    n_meas = len(meas2file)
    x = np.arange(n_roi)

    # plot
    _, axes = plt.subplots(n_meas, n_hemi, figsize=figsize)
    offset = -(n_atlas - 1) / 2
    width = auto_bar_width(x, n_atlas)
    for atlas_idx, atlas_name in enumerate(atlas_names):
        for meas_idx, meas_name in enumerate(meas2file.keys()):
            fpath = meas2file[meas_name].format(atlas=atlas_name)
            data = pkl.load(open(fpath, 'rb'))
            for hemi_idx, hemi in enumerate(hemis):
                ax = axes[meas_idx, hemi_idx]
                ys = np.zeros(n_roi)
                yerrs = np.zeros(n_roi)
                for roi_idx, roi in enumerate(rois):
                    k = f'{hemi}_{roi}'
                    ys[roi_idx] = np.mean(data[k])
                    yerrs[roi_idx] = sem(data[k])
                ax.bar(x + width * offset,
                       ys,
                       width,
                       yerr=yerrs,
                       label=atlas_name,
                       color=atlas2color[atlas_name])
                if atlas_idx == 1:
                    ax.set_title(meas2title[meas_name])
                    # ax.legend()
                    ax.spines['top'].set_visible(False)
                    ax.spines['right'].set_visible(False)
                    ax.set_xticks(x)
                    ax.set_xticklabels(rois)
                    if hemi_idx == 0:
                        ax.set_ylabel('pearson R')
        offset += 1
    plt.tight_layout()
    plt.savefig(out_file)
Esempio n. 19
0
IlaengeMM, IampliV, ItMikros, IdeltatMikros, ITGCdB = np.genfromtxt('impuls.txt', unpack=True)
DlaengeMM, DampliV, DtMikros, DTGCdB = np.genfromtxt('durchschall.txt', unpack=True)
Apeak, AtMikros, AdeltatMikros = np.genfromtxt('auge.txt', unpack=True)
Cpeak, CtMikros = np.genfromtxt('cepstrum.txt', unpack=True)

IlaengeMM = unp.uarray(IlaengeMM, 0.02)
DlaengeMM = unp.uarray(DlaengeMM, 0.02)

Ic = (IlaengeMM*10**(-3)) / (0.5*IdeltatMikros*10**(-6))
Dc = (DlaengeMM*10**(-3)) / (DtMikros*10**(-6))

print(Ic)
print(np.mean(unp.nominal_values(Ic)))
print(np.sqrt(1/7*np.sum((unp.std_devs(Ic))**2)))

Icmean = ufloat(np.mean(unp.nominal_values(Ic)), stats.sem(unp.nominal_values(Ic)))
Dcmean = ufloat(np.mean(unp.nominal_values(Dc)), stats.sem(unp.nominal_values(Dc)))

#Lineare Regression
def f(x, m, n):
    return x/m + n
paramsI, covarianceI = curve_fit(f, unp.nominal_values(IlaengeMM)*10**(-3), 0.5*IdeltatMikros*10**(-6))
errorsI = np.sqrt(np.diag(covarianceI))
dtI = ufloat(paramsI[1], errorsI[1])
cI = ufloat(paramsI[0], errorsI[0])

print(cI, dtI)

paramsD, covarianceD = curve_fit(f, unp.nominal_values(DlaengeMM)*10**(-3), DtMikros*10**(-6))
errorsD = np.sqrt(np.diag(covarianceD))
dtD = ufloat(paramsD[1], errorsD[1])
Esempio n. 20
0
def plot_bar():
    import numpy as np
    import pandas as pd
    from scipy.stats.stats import sem
    from matplotlib import pyplot as plt
    from nibrain.util.plotfig import auto_bar_width

    # inputs
    gids = (1, 2)
    hemis = ('lh', 'rh')
    rois = ('pFus', 'mFus')
    sessions = ('test', 'retest')
    subj_file_45 = pjoin(proj_dir, 'data/HCP/wm/analysis_s2/'
                         'retest/subject_id')
    subj_file_1080 = pjoin(proj_dir, 'analysis/s2/subject_id')
    ses2gid_file = {
        'test':
        pjoin(
            proj_dir, 'analysis/s2/1080_fROI/refined_with_Kevin/'
            'grouping/group_id_{}.npy'),
        'retest':
        pjoin(
            proj_dir, 'analysis/s2/1080_fROI/refined_with_Kevin/'
            'retest/grouping/group_id_{}.npy')
    }
    data_file = pjoin(work_dir, 'activ-{ses1}_ROI-{ses2}.csv')
    gid2name = {1: 'two-C', 2: 'two-S'}

    # prepare
    n_gid = len(gids)
    n_ses = len(sessions)
    subj_ids_45 = open(subj_file_45).read().splitlines()
    subj_ids_1080 = open(subj_file_1080).read().splitlines()
    retest_idx_in_1080 = [subj_ids_1080.index(i) for i in subj_ids_45]
    ses_hemi2gid_vec = {}
    for ses in sessions:
        for hemi in hemis:
            gid_file = ses2gid_file[ses].format(hemi)
            gid_vec = np.load(gid_file)
            if ses == 'test':
                gid_vec = gid_vec[retest_idx_in_1080]
            ses_hemi2gid_vec[f'{ses}_{hemi}'] = gid_vec

    # plot
    x = np.arange(len(hemis) * len(rois))
    width = auto_bar_width(x, n_gid)
    _, axes = plt.subplots(n_ses, n_ses * n_ses)
    col_idx = -1
    for ses1 in sessions:
        for ses2 in sessions:
            col_idx += 1
            fpath = data_file.format(ses1=ses1, ses2=ses2)
            data = pd.read_csv(fpath)
            for ses3_idx, ses3 in enumerate(sessions):
                ax = axes[ses3_idx, col_idx]
                offset = -(n_gid - 1) / 2
                for gid in gids:
                    y = []
                    yerr = []
                    xticklabels = []
                    for hemi in hemis:
                        gid_vec = ses_hemi2gid_vec[f'{ses3}_{hemi}']
                        gid_idx_vec = gid_vec == gid
                        print(f'#{ses3}_{hemi}_{gid}:', np.sum(gid_idx_vec))
                        for roi in rois:
                            column = f'{hemi}_{roi}'
                            meas_vec = np.array(data[column])[gid_idx_vec]
                            print(np.sum(np.isnan(meas_vec)))
                            meas_vec = meas_vec[~np.isnan(meas_vec)]
                            y.append(np.mean(meas_vec))
                            yerr.append(sem(meas_vec))
                            xticklabels.append(column)
                    ax.bar(x + width * offset,
                           y,
                           width,
                           yerr=yerr,
                           label=gid2name[gid])
                    offset += 1
                ax.set_xticks(x)
                ax.set_xticklabels(xticklabels)
                ax.spines['top'].set_visible(False)
                ax.spines['right'].set_visible(False)
                if col_idx == 0:
                    ax.set_ylabel('face selectivity')
                    if ses3_idx == 0:
                        ax.legend()
                if ses3_idx == 0:
                    fname = os.path.basename(fpath)
                    ax.set_title(fname.rstrip('.csv'))
    plt.tight_layout()
    plt.show()
Esempio n. 21
0
from uncertainties import ufloat
from scipy.stats import stats

D_theta, R1, R2, R3, R4, R5 = np.genfromtxt('glas.txt', unpack=True)

T = 1 * 10**-3
lam = 633 * 10**(-9)
theta_0 = 10 * np.pi / 180
theta = D_theta * (np.pi) / 180


def n(lam, N, T, t):
    return (1 - (lam * N) / (2 * T * 0.175 * t))**(-1)


n1_mean = ufloat(np.mean(n(lam, R1, T, theta)), stats.sem(n(lam, R1, T,
                                                            theta)))
n2_mean = ufloat(np.mean(n(lam, R2, T, theta)), stats.sem(n(lam, R2, T,
                                                            theta)))
n3_mean = ufloat(np.mean(n(lam, R3, T, theta)), stats.sem(n(lam, R3, T,
                                                            theta)))
n4_mean = ufloat(np.mean(n(lam, R4, T, theta)), stats.sem(n(lam, R4, T,
                                                            theta)))
n5_mean = ufloat(np.mean(n(lam, R5, T, theta)), stats.sem(n(lam, R5, T,
                                                            theta)))

n1 = n(lam, R1, T, theta)
n2 = n(lam, R2, T, theta)
n3 = n(lam, R3, T, theta)
n4 = n(lam, R4, T, theta)
n5 = n(lam, R5, T, theta)
Esempio n. 22
0
import matplotlib.pyplot as plt

data = json.load(open('./tally.json'))

weights = list(np.arange(1 / 3, 3.01, 0.05))
xs = list()
ys = list()
std = list()
out = list()

for i in range(len(weights)):
    weight = weights[i]
    nums = data[str(i)]
    ratios = [n[0] / n[1] for n in nums]

    out.append((weight, np.median(ratios), np.percentile(ratios, 25),
                np.percentile(ratios, 75)))

    xs.append(weight)
    ys.append(np.median(ratios))

    std.append(stats.sem(ratios))

# print(std)
plt.errorbar(np.log(xs), np.log(ys), yerr=std)
plt.xlabel('w1/w2')
plt.ylabel('# beige crate/purple circle')
plt.show()

np.savetxt('weighted_or.txt', out, fmt='%.4f')
    #arena data input
    data2 = open("%d.cfg" % (number), 'rU')
    diameter, center_x, center_y, radius, lstripex, lstripey, rstripex, rstripey = [
        int(l.split('=')[1]) for l in data2 if len(l.split('=')) > 1
    ]

    #-------------Calculate fixation index in part and add to array---------
    fixation_index_scope = []
    for j in range(26):
        coordinate_x_scope = []
        coordinate_y_scope = []
        for k in range(len(time_checked)):
            if j * 10 < time_checked[k] <= j * 10 + 20:
                coordinate_x_scope.append(coordinate_x_checked[k])
                coordinate_y_scope.append(coordinate_y_checked[k])
        fixation_index_scope.append(
            distribution_method(coordinate_x_scope, coordinate_y_scope,
                                center_x, center_y))
    if i == 0:
        fixation_variation = np.array([fixation_index_scope])
    else:
        fixation_variation = np.vstack(
            (fixation_variation, fixation_index_scope))

#-------------Calculate overall fixation index---------
fixation_index_average = np.mean(fixation_variation, axis=0)
fixation_index_std = stats.sem(fixation_variation, axis=0)

#-------------draw graph-------------------------------
variation_graph(fixation_index_average, fixation_index_std, 26, graph_name)
Esempio n. 24
0
# Umrechnen in Wellenlängenänderung
def del_lambda(del_s, delta_s, Disp):
    return 0.5 * (del_s / delta_s) * Disp


def mg(dl, B, lam):
    return const.h * const.c / (lam**2 * B * const.value("Bohr magneton")) * dl


def B(I):
    return (x_3 * I**3 + x_2 * I**2 + x_1 * I + x_0) * 10**(-3)


lam_Rot = del_lambda(dels_Rot_0, ds_Rot_0, Disp_Geb_Ro)
lam_Rot_mean = ufloat(np.mean(lam_Rot), stats.sem(lam_Rot))
lam_Blau0 = del_lambda(dels_Blau_0, ds_Blau_0, Disp_Geb_Bl)
lam_Blau0_mean = ufloat(np.mean(lam_Blau0), stats.sem(lam_Blau0))
lam_Blau90 = del_lambda(dels_Blau_90, ds_Blau_90, Disp_Geb_Bl)
lam_Blau90_mean = ufloat(np.mean(lam_Blau90), stats.sem(lam_Blau90))

print(lam_Rot_mean, lam_Blau0_mean, lam_Blau90_mean)

mg_Rot = mg(lam_Rot_mean, B(I_Rot_0), 644 * 10**(-9))
mg_blau_0 = mg(lam_Blau0_mean, B(I_Blau_0), 480 * 10**(-9))
mg_blau_90 = mg(lam_Blau90_mean, B(I_Blau_90), 480 * 10**(-9))
print(mg_Rot, mg_blau_0, mg_blau_90)
print(B(I_Rot_0), B(I_Blau_0), B(I_Blau_90))

np.savetxt('TabelleRot.txt',
           np.column_stack([ds_Rot_0, dels_Rot_0, lam_Rot * 10**12]),
Esempio n. 25
0
print('A =', Params2[0], '±', Errors2[0])
print('B =', Params2[1], '±', Errors2[1])
print('C =', Params2[2], '±', Errors2[2])
print('D =', Params2[3], '±', Errors2[3])
print("")
#Berechnung der Quotienten (5c) )
Quotienten1 = f(zeiten, A1, B1, C1)
Quotienten2 = f(zeiten, A2, B2, C2)
print("Aufgabe 5c), Quotienten := Werte der Ableitung an den Stellen t")
print("Zeiten: ", zeiten)
print("Quotienten für T1: ", Quotienten1)
print("Quotienten für T2: ", Quotienten2)
print("")

#Berechnung der Güteziffern (5d) )
Leistung = ufloat(np.mean(N), stats.sem(N))
Temperatur1 = f3(zeiten, A1, B1, C1, D1)
#Versuch zu runden, klappt nicht...T1 = unp.around(f3(zeiten, A1, B1, C1, D1), decimals=3)
Temperatur2 = f3(zeiten, A2, B2, C2, D2)
nuemp = (cmapperat + cmwasser) * Quotienten1 * (1 / Leistung)
nuid = Temperatur1 / (Temperatur1 - Temperatur2)
print("Aufgabe 5d), Güteziffern:")
print("Mittel der Leistung: ", Leistung)
print("Zeiten: ", zeiten)
print("T1 für die zeiten: ", Temperatur1)
print("T2 für die zeiten: ", Temperatur2)
print("Real: ", nuemp)
print("Ideal: ", nuid)
print("Mittelwert von N: ", Leistung)
print("")
Esempio n. 26
0
plt.plot(U1, logI61, 'r.', label="Gefittete Werte")
plt.axvline(x=U1[8], ymin=0, ymax=1, ls=':', color='k')
plt.plot(U[9:12], logI6[9:12], 'g.', label="Nicht-gefittete Werte")
plt.plot(x2, f(x2, *paramsA), 'b-', label="Regression")
plt.legend(loc="best")
plt.tight_layout()
plt.savefig("anlauf.pdf")

# Aufgabenteil d)
Uh, Ih = np.genfromtxt('leistung.txt', unpack=True)
NWL = 0.9
sigma = 5.7 * 10**(-12)
g = 0.32
eta = 0.28
T2 = ((Uh * Ih - NWL) / (sigma * g * eta))**(1 / 4)
T2mean = ufloat(np.mean(T2), stats.sem(T2))
print("")
print("Aufgabenteil d): ")
print(T2)
print(T2mean)

# Aufgabenteil e)
e0Phi1 = -np.log((Is1 * (const.h)**3) /
                 (4 * g * 10**(-4) * np.pi * const.e * const.m_e *
                  (const.k)**2 * T2[0]**2)) * const.k * T2[0] / const.e
e0Phi2 = -np.log((Is2 * (const.h)**3) /
                 (4 * g * 10**(-4) * np.pi * const.e * const.m_e *
                  (const.k)**2 * T2[1]**2)) * const.k * T2[1] / const.e
e0Phi3 = -np.log((Is3 * (const.h)**3) /
                 (4 * g * 10**(-4) * np.pi * const.e * const.m_e *
                  (const.k)**2 * T2[2]**2)) * const.k * T2[2] / const.e
Esempio n. 27
0
deltad, z_well = np.genfromtxt('wellenlaenge.txt', unpack=True)
deltaP, z_Luft, z_CO2 = np.genfromtxt('brechnungsind.txt', unpack=True)

uebers = 5.017
b = 50 * 10**(-3)
p0 = 1.0132
T0 = 273.15
T = 293.15
l = 665 * 10**(-9)

deltaP = p0 - deltaP

deltad = deltad * 10**(-3) / uebers

wl = 2 * deltad / z_well
lambdaLaser = ufloat(np.mean(wl), stats.sem(wl))
print("Lambda:", lambdaLaser * 10**9)

deltanLuft = z_Luft * l / (2 * b)
deltanCO2 = z_CO2 * l / (2 * b)

pstrich = p0 - deltaP

nLuft = 1 + deltanLuft * (T / T0) * (p0 / pstrich)
nCO2 = 1 + deltanCO2 * (T / T0) * (p0 / pstrich)

nCO2Mean = ufloat(np.mean(unp.nominal_values(nCO2)), stats.sem(nCO2))
nLuftMean = ufloat(np.mean(unp.nominal_values(nLuft)), stats.sem(nLuft))
print(nLuftMean)
print(nCO2Mean)
Esempio n. 28
0
gdU = (gdU2 - gdU1) * 10**(-3)  # V

ndR = ndR1 - ndR2  # Ohm
ndU = (ndU2 - ndU1) * 10**(-3)  # V

# Qreal ausrechnen
dyQ = dyM / (dyL * dyRho) * 100  # mm^2
gdQ = gdM / (gdL * gdRho) * 100  # mm^2
ndQ = ndM / (ndL * ndRho) * 100  # mm^2

# Mit Widerstandsmethode
dyChiR = 2 * dyR / R3 * F / (dyQ)
gdChiR = 2 * gdR / R3 * F / (gdQ)
ndChiR = 2 * ndR / R3 * F / (ndQ)

dyChiRmean = ufloat(np.mean(dyChiR), stats.sem(dyChiR))
gdChiRmean = ufloat(np.mean(gdChiR), stats.sem(gdChiR))
ndChiRmean = ufloat(np.mean(ndChiR), stats.sem(ndChiR))

print("")
print("Q von Nd: ", ndQ)
print("Q von Gd: ", gdQ)
print("Q von Dy: ", dyQ)
print("")
print("Chi von Dy mit R: ", dyChiRmean)
print("Chi von Gd mit R: ", gdChiRmean)
print("Chi von Nd mit R: ", ndChiRmean)

# Mit Spannungsmethode
dyChiU = 4 * F / dyQ * dyU / 0.9
gdChiU = 4 * F / gdQ * gdU / 0.9
Esempio n. 29
0
def test(housing):
    # 查看数据的基本情况,可以结合可视化
    housing.info()
    housing.describe()
    # 数据呈现长尾分布
    # housing.hist(bins=50, figsize=(20, 15))
    train_set, test_set = train_test_split(housing,
                                           test_size=0.2,
                                           random_state=42)

    # 收入转化为标签
    housing["income_cat"] = pd.cut(housing["median_income"],
                                   bins=[0., 1.5, 3.0, 4.5, 6., np.inf],
                                   labels=[1, 2, 3, 4, 5])
    # housing["income_cat"].hist()

    # 分割数据
    split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)
    for train_index, test_index in split.split(housing, housing["income_cat"]):
        strat_train_set = housing.loc[train_index]
        strat_test_set = housing.loc[test_index]

    for set_ in (strat_train_set, strat_test_set):
        set_.drop("income_cat", axis=1, inplace=True)

    housing = strat_train_set.copy()
    '''
    # 注意这里绘图时通过x,y指定对应列
    # housing.plot(kind="scatter", x="longitude", y="latitude", alpha=0.1)
    # 半径s人口,颜色c房价
    housing.plot(kind="scatter", x="longitude", y="latitude", alpha=0.4,
                 s=housing["population"] / 100, label="population", figsize=(10, 7),
                 c="median_house_value", cmap=plt.get_cmap("jet"), colorbar=True,
                 )
    plt.legend()
    '''

    # 相关性
    corr_matrix = housing.corr()
    corr_matrix["median_house_value"].sort_values(ascending=False)

    attributes = [
        "median_house_value", "median_income", "total_rooms",
        "housing_median_age"
    ]
    # scatter_matrix(housing[attributes], figsize=(12, 8))
    # housing.plot(kind="scatter", x="median_income", y="median_house_value",alpha=0.1)

    housing = strat_train_set.drop("median_house_value", axis=1)
    housing_labels = strat_train_set["median_house_value"].copy()

    # 数据清洗
    # NA 插入数据
    housing.dropna(subset=["total_bedrooms"])  # option 1
    housing.drop("total_bedrooms", axis=1)  # option 2
    median = housing["total_bedrooms"].median()  # option 3
    housing["total_bedrooms"].fillna(median, inplace=True)

    # sklearn提供的填充空值方法
    imputer = SimpleImputer(strategy="median")
    housing_num = housing.drop("ocean_proximity", axis=1)
    imputer.fit(housing_num)
    imputer.statistics_
    X = imputer.transform(housing_num)
    housing_tr = pd.DataFrame(X,
                              columns=housing_num.columns,
                              index=housing_num.index)

    # 编码,OrdinalEncoder相当于LabelEncoder的矩阵版,能对多个特征同时编码
    housing_cat = housing[["ocean_proximity"]]
    ordinal_encoder = OrdinalEncoder()
    housing_cat_encoded = ordinal_encoder.fit_transform(housing_cat)

    cat_encoder = OneHotEncoder()
    housing_cat_1hot = cat_encoder.fit_transform(housing_cat)

    attr_adder = CombinedAttributesAdder(add_bedrooms_per_room=False)
    housing_extra_attribs = attr_adder.transform(housing.values)

    num_pipeline = Pipeline([
        ('imputer', SimpleImputer(strategy="median")),
        ('attribs_adder', CombinedAttributesAdder()),
        ('std_scaler', StandardScaler()),
    ])
    housing_num_tr = num_pipeline.fit_transform(housing_num)

    # ColumnTransformer对pandas每列操作
    # 注意这里list(housing_num) 返回的是housing_num的列名
    num_attribs = list(housing_num)
    cat_attribs = ["ocean_proximity"]

    # 注意这里传入了dataframe的列名(属性名),使Transformer进行相应的转换
    full_pipeline = ColumnTransformer([
        ("num", num_pipeline, num_attribs),
        ("cat", OneHotEncoder(), cat_attribs),
    ])

    # 这里返回的onehot+连续值使用的是非稀疏举证, 将每个类编都转换成了0/1两种值的列,
    # 可以分开转换然后用sparse.hstack组装起来,titanic有用过
    housing_prepared = full_pipeline.fit_transform(housing)

    lin_reg = LinearRegression()
    lin_reg.fit(housing_prepared, housing_labels)
    some_data = housing.iloc[:5]
    some_labels = housing_labels.iloc[:5]
    some_data_prepared = full_pipeline.transform(some_data)
    print("Predictions:", lin_reg.predict(some_data_prepared))
    print("Labels:", list(some_labels))

    housing_predictions = lin_reg.predict(housing_prepared)
    lin_mse = mean_squared_error(housing_labels, housing_predictions)
    lin_rmse = np.sqrt(lin_mse)
    print(lin_rmse)

    tree_reg = DecisionTreeRegressor()
    tree_reg.fit(housing_prepared, housing_labels)
    housing_predictions = tree_reg.predict(housing_prepared)
    tree_mse = mean_squared_error(housing_labels, housing_predictions)
    tree_rmse = np.sqrt(tree_mse)
    # 这里的损失是0,因为没有测试集,决策树严重的过拟合了
    print(tree_rmse)

    # 采用K折验证,决策树的表现不如之前,cv分成几份
    scores = cross_val_score(tree_reg,
                             housing_prepared,
                             housing_labels,
                             scoring="neg_mean_squared_error",
                             cv=10)
    tree_rmse_scores = np.sqrt(-scores)
    print(tree_rmse_scores)

    # cross_val_score的评估标准是与正常loss相反
    lin_scores = cross_val_score(lin_reg,
                                 housing_prepared,
                                 housing_labels,
                                 scoring="neg_mean_squared_error",
                                 cv=10)

    lin_rmse_scores = np.sqrt(-lin_scores)
    display_scores(lin_rmse_scores)

    forest_reg = RandomForestRegressor()
    forest_reg.fit(housing_prepared, housing_labels)
    forest_scores = cross_val_score(forest_reg,
                                    housing_prepared,
                                    housing_labels,
                                    scoring="neg_mean_squared_error",
                                    cv=10)

    forest_rmse_scores = np.sqrt(-forest_scores)
    display_scores(forest_rmse_scores)

    # 超参数的自动搜索
    param_grid = [
        {
            'n_estimators': [3, 10, 30],
            'max_features': [2, 4, 6, 8]
        },
        {
            'bootstrap': [False],
            'n_estimators': [3, 10],
            'max_features': [2, 3, 4]
        },
    ]
    forest_reg = RandomForestRegressor()
    grid_search = GridSearchCV(forest_reg,
                               param_grid,
                               cv=5,
                               scoring='neg_mean_squared_error',
                               return_train_score=True)
    grid_search.fit(housing_prepared, housing_labels)

    cvres = grid_search.cv_results_
    for mean_score, params in zip(cvres["mean_test_score"], cvres["params"]):
        print(np.sqrt(-mean_score), params)

    final_model = grid_search.best_estimator_
    X_test = strat_test_set.drop("median_house_value", axis=1)
    y_test = strat_test_set["median_house_value"].copy()
    X_test_prepared = full_pipeline.transform(X_test)
    final_predictions = final_model.predict(X_test_prepared)
    final_mse = mean_squared_error(y_test, final_predictions)
    final_rmse = np.sqrt(final_mse)
    print(final_rmse)

    # 95置信区间
    # 一般的t统计量写成t=(估计值-假设值)/标准误,它服从自由度为(n-2)的t分布
    confidence = 0.95
    squared_errors = (final_predictions - y_test)**2
    np.sqrt(
        stats.t.interval(confidence,
                         len(squared_errors) - 1,
                         loc=squared_errors.mean(),
                         scale=stats.sem(squared_errors)))
Esempio n. 30
0
def plot_development_pattern_corr():
    import numpy as np
    import pandas as pd
    from scipy.stats.stats import sem
    from cxy_hcp_ffa.lib.predefine import roi2color

    # inputs
    figsize = (4, 6)
    rois = ('pFus-face', 'mFus-face')
    hemis = ('lh', 'rh')
    hemi2style = {'lh': '-', 'rh': '--'}
    dev_dir = pjoin(proj_dir, 'analysis/s2/1080_fROI/refined_with_Kevin/'
                    'development')
    subj_info_file = pjoin(dev_dir, 'HCPD_SubjInfo.csv')
    meas2file = {
        'thickness': pjoin(dev_dir, 'HCPD_thickness-corr_MPM1.csv'),
        'myelin': pjoin(dev_dir, 'HCPD_myelin-corr_MPM1.csv'),
        'rsfc': pjoin(dev_dir, 'rfMRI/rsfc-corr_MPM.csv'),
    }
    meas2title = {
        'thickness': 'thickness',
        'myelin': 'myelination',
        'rsfc': 'RSFC'
    }

    # outputs
    out_file = pjoin(work_dir, 'dev-corr_line.jpg')

    # prepare
    age_name = 'age in years'
    subj_info = pd.read_csv(subj_info_file)
    subj_ids = subj_info['subID'].to_list()
    ages = np.array(subj_info[age_name])
    n_meas = len(meas2file)

    # plot
    _, axes = plt.subplots(n_meas, 1, figsize=figsize)
    for meas_idx, meas_name in enumerate(meas2file.keys()):
        ax = axes[meas_idx]
        data = pd.read_csv(meas2file[meas_name])
        assert subj_ids == data['subID'].to_list()
        for hemi in hemis:
            for roi in rois:
                roi_name = roi.split('-')[0]
                col = f"{roi_name}_{hemi}"
                meas_vec = np.array(data[col])
                non_nan_vec = ~np.isnan(meas_vec)
                meas_vec = meas_vec[non_nan_vec]
                age_vec = ages[non_nan_vec]
                print(f'{meas_name}_{hemi}_{roi}:', meas_vec.shape)
                age_uniq = np.unique(age_vec)
                ys = np.zeros_like(age_uniq, np.float64)
                yerrs = np.zeros_like(age_uniq, np.float64)
                for age_idx, age in enumerate(age_uniq):
                    sample = meas_vec[age_vec == age]
                    ys[age_idx] = np.mean(sample)
                    yerrs[age_idx] = sem(sample)
                ax.errorbar(age_uniq,
                            ys,
                            yerrs,
                            label=f'{hemi}_{roi_name}',
                            color=roi2color[roi],
                            linestyle=hemi2style[hemi])
        ax.spines['top'].set_visible(False)
        ax.spines['right'].set_visible(False)
        # ax.legend()
        if meas_name == 'thickness':
            ax.set_ylim(-0.3, 0.6)
        if meas_idx + 1 == n_meas:
            ax.set_xlabel(age_name)
        # ax.set_title(meas2title[meas_name])
        ax.set_ylabel('pearson R')
        x_ticks = np.unique(ages)[::2]
        ax.set_xticks(x_ticks)
        ax.set_xticklabels(x_ticks)
    plt.tight_layout()
    plt.savefig(out_file)
Esempio n. 31
0
def describe_numeric_1d(series: pd.Series, series_description: dict) -> dict:
    """Describe a numeric series.

    Args:
        series: The Series to describe.
        series_description: The dict containing the series description so far.

    Returns:
        A dict containing calculated series description values.

    Notes:
        When 'bins_type' is set to 'bayesian_blocks', astropy.stats.bayesian_blocks is used to determine the number of
        bins. Read the docs:
        https://docs.astropy.org/en/stable/visualization/histogram.html
        https://docs.astropy.org/en/stable/api/astropy.stats.bayesian_blocks.html

        This method might print warnings, which we suppress.
        https://github.com/astropy/astropy/issues/4927
    """
    quantiles = config["vars"]["num"]["quantiles"].get(list)

    stats = {
        "mean": series.mean(),
        "std": series.std(),
        "variance": series.var(),
        "min": series.min(),
        "max": series.max(),
        "kurtosis": series.kurt(),
        "skewness": series.skew(),
        "sum": series.sum(),
        "mad": series.mad(),
        "sme": sem(series),
        "n_zeros": (len(series) - np.count_nonzero(series)),
        "histogram_data": series,
        "scatter_data": series,  # For complex
    }

    chi_squared_threshold = config["vars"]["num"]["chi_squared_threshold"].get(
        float)
    if chi_squared_threshold > 0.0:
        histogram = np.histogram(series[series.notna()].values, bins="auto")[0]
        stats["chi_squared"] = chisquare(histogram)

    stats["range"] = stats["max"] - stats["min"]
    stats.update({
        f"{percentile:.0%}": value
        for percentile, value in series.quantile(quantiles).to_dict().items()
    })
    stats["iqr"] = stats["75%"] - stats["25%"]
    stats["cv"] = stats["std"] / stats["mean"] if stats["mean"] else np.NaN
    stats["p_zeros"] = float(stats["n_zeros"]) / len(series)

    bins = config["plot"]["histogram"]["bins"].get(int)
    # Bins should never be larger than the number of distinct values
    bins = min(series_description["distinct_count_with_nan"], bins)
    stats["histogram_bins"] = bins

    bayesian_blocks_bins = config["plot"]["histogram"][
        "bayesian_blocks_bins"].get(bool)
    if bayesian_blocks_bins:
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            ret = bayesian_blocks(stats["histogram_data"])

            # Sanity check
            if not np.isnan(ret).any() and ret.size > 1:
                stats["histogram_bins_bayesian_blocks"] = ret

    return stats
Esempio n. 32
0
def plot_development():
    import numpy as np
    import pandas as pd
    import pickle as pkl
    from scipy.stats.stats import sem
    from cxy_hcp_ffa.lib.predefine import roi2color

    # inputs
    figsize = (4, 6)
    rois = ('pFus-face', 'mFus-face')
    hemis = ('lh', 'rh')
    hemi2style = {'lh': '-', 'rh': '--'}
    dev_dir = pjoin(proj_dir, 'analysis/s2/1080_fROI/refined_with_Kevin/'
                    'development')
    meas2file = {
        'thickness': pjoin(dev_dir, 'HCPD_thickness_MPM1_prep_inf.csv'),
        'myelin': pjoin(dev_dir, 'HCPD_myelin_MPM1_prep_inf.csv'),
        'rsfc': pjoin(dev_dir, 'rfMRI/rsfc_MPM2Cole_{hemi}.pkl'),
    }
    meas2ylabel = {
        'thickness': 'thickness',
        'myelin': 'myelination',
        'rsfc': 'RSFC'
    }

    # outputs
    out_file = pjoin(work_dir, 'dev_line.jpg')

    # prepare
    n_meas = len(meas2file)
    age_name = 'age in years'

    # plot
    _, axes = plt.subplots(n_meas, 1, figsize=figsize)
    for meas_idx, meas_name in enumerate(meas2file.keys()):
        ax = axes[meas_idx]
        fpath = meas2file[meas_name]
        if meas_name == 'rsfc':
            subj_info = pd.read_csv(pjoin(dev_dir, 'HCPD_SubjInfo.csv'))
            age_vec = np.array(subj_info[age_name])
            for hemi in hemis:
                rsfc_dict = pkl.load(open(fpath.format(hemi=hemi), 'rb'))
                for roi in rois:
                    fc_vec = np.mean(rsfc_dict[roi], 1)
                    non_nan_vec = ~np.isnan(fc_vec)
                    fcs = fc_vec[non_nan_vec]
                    ages = age_vec[non_nan_vec]
                    age_uniq = np.unique(ages)
                    ys = np.zeros_like(age_uniq, np.float64)
                    yerrs = np.zeros_like(age_uniq, np.float64)
                    for idx, age in enumerate(age_uniq):
                        sample = fcs[ages == age]
                        ys[idx] = np.mean(sample)
                        yerrs[idx] = sem(sample)
                    ax.errorbar(age_uniq,
                                ys,
                                yerrs,
                                label=f"{hemi}_{roi.split('-')[0]}",
                                color=roi2color[roi],
                                linestyle=hemi2style[hemi])
            ax.set_ylabel(meas2ylabel[meas_name])
            ax.spines['top'].set_visible(False)
            ax.spines['right'].set_visible(False)
            # ax.legend()
            x_ticks = np.unique(age_vec)[::2]
            ax.set_xticks(x_ticks)
            ax.set_xticklabels(x_ticks)
        else:
            data = pd.read_csv(fpath)
            age_vec = np.array(data[age_name])
            age_uniq = np.unique(age_vec)
            for hemi in hemis:
                for roi in rois:
                    roi_name = roi.split('-')[0]
                    col = f"{roi_name}_{hemi}"
                    meas_vec = np.array(data[col])
                    ys = np.zeros_like(age_uniq, np.float64)
                    yerrs = np.zeros_like(age_uniq, np.float64)
                    for age_idx, age in enumerate(age_uniq):
                        sample = meas_vec[age_vec == age]
                        ys[age_idx] = np.mean(sample)
                        yerrs[age_idx] = sem(sample)
                    ax.errorbar(age_uniq,
                                ys,
                                yerrs,
                                label=f'{hemi}_{roi_name}',
                                color=roi2color[roi],
                                linestyle=hemi2style[hemi])
            ax.spines['top'].set_visible(False)
            ax.spines['right'].set_visible(False)
            # ax.legend()
            ax.set_ylabel(meas2ylabel[meas_name])
            x_ticks = age_uniq[::2]
            ax.set_xticks(x_ticks)
            ax.set_xticklabels(x_ticks)
        if meas_idx + 1 == n_meas:
            ax.set_xlabel(age_name)
    plt.tight_layout()
    plt.savefig(out_file)
Esempio n. 33
0
def run(config):
    """
    Train the agents
    Args:
        config: Dict containing all parameters listed in default_config.json

    Returns:
        If log_summary is true,
             the mean and standard error for normalized rewards and percentage of optimal
                actions for each episode during training
             convergence points and the percentage of optimal actions with the final
                policy
        Else
            bool indicating optimal action for each step in each run
            convergence points
            numbers for plotting the Venn diagram (see compare_to_fixed.py for an example)
    """
    np.random.seed(config["seed"])
    try:
        payoff = (
            np.array(config["payoff"])
            .reshape((config["states"], config["actions"]))
            .astype(np.float)
        )
        payoff /= np.abs(payoff).max()
    except ValueError:
        raise ValueError(
            "There should be {} (states * actions) elements in payoff. Found {} elements".format(
                config["states"] * config["actions"], len(config["payoff"])
            )
        )

    # Logging vars
    rewards = np.zeros((config["episodes"] // config["log_interval"], config["runs"]))
    opt = np.zeros((config["episodes"] // config["log_interval"], config["runs"]))
    converge_point = np.zeros((config["states"], config["actions"]))

    alg = getattr(algs, config["algorithm"])(
        config["states"], config["messages"], config["actions"], config["runs"], **config
    )

    # Train the algorithms
    for e in range(config["episodes"]):
        s0 = np.random.randint(config["states"], size=config["runs"])
        # s0 = np.random.choice(config['states'], size=config['runs'], p=[0.1, 0.8, 0.1])
        m0, a1 = alg.act(s0)
        r_mean = payoff[s0, a1]
        r = r_mean * (1 + config["payoff_sigma"] * np.random.randn(config["runs"]))
        # r = r_mean * (2 * np.random.randint(2, size=config['runs']))
        # regret[e // config["log_interval"]] += (
        #     (payoff[s0].max(axis=1) - r_mean)
        #     / (payoff[s0].max(axis=1) - payoff[s0].min(axis=1))
        #     / config["log_interval"]
        # )
        rewards[e // config["log_interval"]] += (
            r_mean / payoff[s0].max(axis=1) / config["log_interval"]
        )
        # rewards[e // config['log_interval']] += r_mean / config['log_interval']
        opt[e // config["log_interval"]] += (
            np.isclose(r_mean, payoff[s0].max(axis=1)) / config["log_interval"]
        )
        alg.train(r)

    # Evaluate using a state sweep
    message_policy = np.zeros((config["runs"], config["states"]))
    percent_opt = 0
    for s in range(config["states"]):
        s0 = s + np.zeros(config["runs"], dtype=np.int)
        m0, a1 = alg.act(s0, test=True)
        message_policy[:, s] = m0
        converge_point[s] = np.bincount(a1, minlength=config["actions"])
        best_act = payoff[s].argmax()
        percent_opt += converge_point[s, best_act] / config["runs"] / config["states"]

    if config["log_summary"]:
        return (
            rewards.mean(axis=1),
            stats.sem(rewards, axis=1),
            opt.mean(axis=1),
            stats.sem(opt, axis=1),
            percent_opt,
            converge_point,
        )

    # Calculate the numbers corresponding to uniqueness of message protocol
    n_diff = np.count_nonzero(
        np.logical_and(
            np.logical_and(
                message_policy[:, 0] != message_policy[:, 1],
                message_policy[:, 1] != message_policy[:, 2],
            ),
            message_policy[:, 0] != message_policy[:, 2],
        )
    )
    n01 = np.count_nonzero(message_policy[:, 0] == message_policy[:, 1])
    n12 = np.count_nonzero(message_policy[:, 1] == message_policy[:, 2])
    n02 = np.count_nonzero(message_policy[:, 0] == message_policy[:, 2])
    n012 = np.count_nonzero(
        np.logical_and(
            message_policy[:, 0] == message_policy[:, 1],
            message_policy[:, 1] == message_policy[:, 2],
        )
    )
    ns = (n_diff, n01, n12, n02, n012)

    return opt, converge_point, ns
Esempio n. 34
0
plt.rcParams['figure.figsize'] = (10, 8)
plt.rcParams['font.size'] = 10

#Einlesen der Daten
DMK1mm, DMK2mm, GK1g, GK2g = np.genfromtxt('dataDurchuGew.txt', unpack=True)
RaumTK1, RaumTK2 = np.genfromtxt('Raumtemperatur.txt', unpack=True)
Temperatur, Messung1, Messung2 = np.genfromtxt('Temperaturabh.txt',
                                               unpack=True)
TK = Temperatur + 273.15
DMK1 = DMK1mm * 10**(-3)
DMK2 = DMK2mm * 10**(-3)
GK1 = GK1g * 10**(-3)
GK2 = GK1g * 10**(-3)

#Gewicht und Durchmesser in mm und gram
Durchm1mm = ufloat(np.mean(DMK1mm), stats.sem(DMK1mm))
Durchm2mm = ufloat(np.mean(DMK2mm), stats.sem(DMK2mm))
Gew1g = ufloat(np.mean(GK1g), stats.sem(GK1g))
Gew2g = ufloat(np.mean(GK2g), stats.sem(GK2g))
RaumTK1Mean = ufloat(np.mean(RaumTK1), stats.sem(RaumTK1))
RaumTK2Mean = ufloat(np.mean(RaumTK2), stats.sem(RaumTK2))
print(RaumTK1Mean)
print(RaumTK2Mean)
print(Durchm1mm, Durchm2mm)
print(Gew1g, Gew2g)

#a) Bestimmen der Dichten der Kugeln, K1=kleinere, leichtere Kugel
Durchm1 = ufloat(np.mean(DMK1), stats.sem(DMK1))
Durchm2 = ufloat(np.mean(DMK2), stats.sem(DMK2))
Gew1 = ufloat(np.mean(GK1), stats.sem(GK1))
Gew2 = ufloat(np.mean(GK2), stats.sem(GK2))
Esempio n. 35
0
print("")

paramsB, covarianceB = curve_fit(f, xSkalaBmm, USkalaBV)
errorsB = np.sqrt(np.diag(covarianceB))
mB = paramsB[0]
bB = paramsB[1]

print("Umrechnungen Teil b):")
print(mB, "+/-", errorsB[0])
print(bB, "+/-", errorsB[1])

Abstaende = mB * Abstaende + bB

KplusDU = mB * 22 + bB

DU = ufloat(np.mean(Abstaende), stats.sem(Abstaende))

KHertz = KplusDU - DU

print(KplusDU)
print("E1-E0 = ", DU, "eV")
print("K2= ", KHertz, "V")

lamb = const.c / (DU * const.e / const.h)
print(lamb)

print("")
print("Teil c)")
print("")

paramsC, covarianceC = curve_fit(f, xSkalaCmm, USkalaCV)