コード例 #1
0
ファイル: stability.py プロジェクト: jojonas/particle-lab
def stability(ux, ug_crit, omega, label=None):
    formatter = FuncFormatter(lambda x, pos: "%g\u00B2" % np.sqrt(x))

    ux, ug_crit = _double_sort(ux, ug_crit)

    x = np.power(ux, 2)
    y = ug_crit

    line = plt.errorbar(
        unp.nominal_values(x), unp.nominal_values(y), xerr=unp.std_devs(x), yerr=unp.std_devs(y), fmt="o", label=label
    )
    color = line.lines[0].get_color()

    plt.gca().xaxis.set_major_formatter(formatter)
    plt.gca().xaxis.set_ticks(np.power(np.arange(4, 10) * 100, 2))
    plt.xlabel(r"$U_x^2$")
    plt.ylabel(r"$U_g$")

    linear = lambda x, a: a * x
    a = _uarray_fit(linear, x, y, x0=(0.0001,), epsfcn=1e-7)[0]

    x = np.linspace(0, unp.nominal_values(x).max() * 1.1, 20)
    y = linear(x, a.n)

    plt.plot(x, linear(x, a.n), color=color)
    plt.fill_between(x, linear(x, a.n + a.s), linear(x, a.n - a.s), color=color, alpha=0.1)

    a = _normalize_ufloat(a)
    print("Slope:", a * 1000, "1/kV")

    qm = -2 / 3 * a * r0 ** 2 * omega ** 2 / K

    print(label, "q/m = {:.4P} uC/kg".format(qm * 1e6))
コード例 #2
0
ファイル: plot.py プロジェクト: Fujnky/ap
def auswerten(name, d, n, t, z, V_mol, eps, raw):
    d *= 1e-3
    N = unp.uarray(n/t, np.sqrt(n)/t) - N_u

    if name=="Cu":
        tools.table((raw[0], raw[1], N), ("D/mm", "n", "(N-N_U)/\per\second"), "build/{}.tex".format(name), "Messdaten von {}.".format(name), "tab:daten{}".format(name), split=2, footer=r"$\Delta t = \SI{60}{s}$")#"(N-N_U)/\per\second"
    else:
        tools.table((raw[0], raw[1], raw[2], N), ("D/mm", "n", "\Delta t/s", "(N-N_U)/\per\second"), "build/{}.tex".format(name), "Messdaten von {}.".format(name), "tab:daten{}".format(name), split=2)
    mu = z * const.N_A / V_mol * 2 * np.pi * (const.e**2 / (4 * np.pi * const.epsilon_0 * const.m_e * const.c**2))**2 * ((1+eps)/eps**2 * ((2 * (1+eps))/(1+2*eps) - 1/eps * np.log(1+2*eps)) + 1/(2*eps) * np.log(1+ 2*eps) - (1+ 3*eps)/(1+2*eps)**2)

    params, pcov = curve_fit(fit, d, unp.nominal_values(N), sigma=unp.std_devs(N))
    params_ = unc.correlated_values(params, pcov)
    print("{}: N(0) = {}, µ = {}, µ_com = {}".format(name, params_[0], -params_[1], mu))

    sd = np.linspace(0, .07, 1000)

    valuesp = (fit(sd, *(unp.nominal_values(params_) + 10*unp.std_devs(params_)))).astype(float)
    valuesm = (fit(sd, *(unp.nominal_values(params_) - 10*unp.std_devs(params_)))).astype(float)

    #plt.xlim(0,7)
    plt.xlabel(r"$D/\si{mm}$")
    plt.ylabel(r"$(N-N_U)/\si{\per\second}$")
    plt.plot(1e3*sd, fit(sd, *params), 'b-', label="Fit")
    plt.fill_between(1e3*sd, valuesm, valuesp, facecolor='blue', alpha=0.125, edgecolor='none', label=r'$1\sigma$-Umgebung ($\times 10$)')
    plt.errorbar(1e3*d, unp.nominal_values(N), yerr=unp.std_devs(N), fmt='rx', label="Messdaten")
    plt.legend(loc='best')
    plt.yscale('linear')
    plt.tight_layout(pad=0)
    plt.savefig("build/{}.pdf".format(name))
    plt.yscale('log')
    plt.savefig("build/{}_log.pdf".format(name))
    plt.clf()
コード例 #3
0
ファイル: test_rebin.py プロジェクト: andyfaff/rebin
def test_x2_in_x1_2():
    """
    x2 has a couple of bins, each of which span more than one original bin
    """
    # old size
    m = 10

    # bin edges
    x_old = np.linspace(0., 1., m+1)
    x_new = np.array([0.25, 0.55, 0.75])

    # some arbitrary distribution
    y_old = 1. + np.sin(x_old[:-1]*np.pi) / np.ediff1d(x_old)

    y_old = unp.uarray(y_old, 0.1*y_old*uniform((m,)))

    # rebin
    y_new = rebin.rebin(x_old, y_old, x_new, interp_kind='piecewise_constant')

    # compute answer here to check rebin
    y_new_here = unp.uarray(np.zeros(2), np.zeros(2))
    y_new_here[0] = 0.5 * y_old[2] + y_old[3] + y_old[4] + 0.5 * y_old[5]
    y_new_here[1] = 0.5 * y_old[5] + y_old[6] + 0.5 * y_old[7]

    assert_allclose(unp.nominal_values(y_new),
                   unp.nominal_values(y_new_here))

    # mean or nominal value comparison
    assert_allclose(unp.std_devs(y_new),
                       unp.std_devs(y_new_here))
コード例 #4
0
def import_data_from_objLog(FilesList, Objects_Include, pv):
    
    
    List_Abundances     = ['OI_HI', 'NI_HI', 'SI_HI', 'SI_HI_ArCorr', 'Y_Mass_O', 'Y_Mass_S', 'Y_Inference_O', 'Y_Inference_S']
    #List_Abundances    = ['OI_HI', 'NI_HI', 'SI_HI', 'SI_HI_ArCorr', 'Y_Mass_O', 'Y_Mass_S', 'Y_inf_O', 'Y_inf_S']

    #Dictionary of dictionaries to store object abundances
    Abund_dict = OrderedDict()
    for abund in List_Abundances:
        Abund_dict[abund] = OrderedDict()

    #Loop through files
    for i in range(len(FilesList)):
        #Analyze file address
        CodeName, FileName, FileFolder  = pv.Analyze_Address(FilesList[i])  
      
        if CodeName in Objects_Include:
            #Loop through abundances in the log
            for abund in List_Abundances:
                Abund_Mag = pv.GetParameter_ObjLog(CodeName, FileFolder, Parameter = abund, Assumption = 'float')
                #If the abundance was measure store it 
                if Abund_Mag != None:
                    Abund_dict[abund][CodeName] = Abund_Mag
        
    #Dictionary to store objects with abundances pairs for regressions. 
    #As an initial value for the keys we define the abundances we want to use for the regression
    Abundances_Pairs_dict = OrderedDict()
    Abundances_Pairs_dict['O_Regression']                   = ('OI_HI','Y_Mass_O')      
    Abundances_Pairs_dict['N_Regression']                   = ('NI_HI','Y_Mass_O')      
    Abundances_Pairs_dict['S_Regression']                   = ('SI_HI','Y_Mass_S')      
    Abundances_Pairs_dict['S_ArCorr_Regression']            = ('SI_HI_ArCorr','Y_Mass_S')
    Abundances_Pairs_dict['O_Regression_Inference']         = ('OI_HI','Y_Inference_O')      
    Abundances_Pairs_dict['N_Regression_Inference']         = ('NI_HI','Y_Inference_O')      
    Abundances_Pairs_dict['S_Regression_Inference']         = ('SI_HI','Y_Inference_S')      
    Abundances_Pairs_dict['S_ArCorr_Regression_Inference']  = ('SI_HI_ArCorr','Y_Inference_S') 
        
    #Loop through the regression lists and get objects with both abundances observed
    for j in range(len(Abundances_Pairs_dict)):
        
        #Get the elements keys for the regression
        Vector, Elem_X, Elem_Y = Abundances_Pairs_dict.keys()[j], Abundances_Pairs_dict.values()[j][0], Abundances_Pairs_dict.values()[j][1]
        
        #Determine objects with both abundances observed
        Obj_vector  = intersect1d(Abund_dict[Elem_X].keys(), Abund_dict[Elem_Y].keys(), assume_unique = True)
        X_vector    = zeros(len(Obj_vector))
        Y_vector    = zeros(len(Obj_vector))
        X_vector_E  = zeros(len(Obj_vector))
        Y_vector_E  = zeros(len(Obj_vector))
                        
        #Generate abundances vectors
        for z in range(len(Obj_vector)):  
            X_vector[z] = nominal_values(Abund_dict[Elem_X][Obj_vector[z]])
            X_vector_E[z] = std_devs(Abund_dict[Elem_X][Obj_vector[z]])            
            Y_vector[z] = nominal_values(Abund_dict[Elem_Y][Obj_vector[z]])
            Y_vector_E[z] = std_devs(Abund_dict[Elem_Y][Obj_vector[z]])
    
        Abundances_Pairs_dict[Vector] = (list(Obj_vector), uarray(X_vector, X_vector_E), uarray(Y_vector, Y_vector_E))
        
    return Abundances_Pairs_dict
コード例 #5
0
ファイル: halos.py プロジェクト: gogrean/InteractiveFigs
def fitting_powerlaw_LP(lx_min, lx_max,
        const=[ufloat(0.083,0.058), ufloat(2.11,0.21)]):
    x = lx_range(lx_min, lx_max)
    y = (10**24.5) * ((x/1e45)**const[1]) * (10**const[0])
    y_nom = unumpy.nominal_values(y)
    y_min = unumpy.nominal_values(y) - unumpy.std_devs(y)
    y_max = unumpy.nominal_values(y) + unumpy.std_devs(y)
    return y_nom, y_min, y_max
コード例 #6
0
ファイル: halos.py プロジェクト: gogrean/InteractiveFigs
def fitting_powerlaw_YP(sz_min, sz_max,
        const=[ufloat(-0.133,0.069), ufloat(2.03,0.30)]):
    x = lx_range(sz_min, sz_max)
    y = (10**24.5) * ((x/1e-4)**const[1]) * (10**const[0])
    y_nom = unumpy.nominal_values(y)
    y_min = unumpy.nominal_values(y) - unumpy.std_devs(y)
    y_max = unumpy.nominal_values(y) + unumpy.std_devs(y)
    return y_nom, y_min, y_max
コード例 #7
0
def ucurve_fit(f, x, y, **kwargs):
    if np.any(unp.std_devs(y) == 0):
        sigma = None
    else:
        sigma = unp.std_devs(y)

    popt, pcov = scipy.optimize.curve_fit(f, x, unp.nominal_values(y), sigma=sigma, **kwargs)

    return unc.uarray(popt, np.sqrt(np.diag(pcov)))
コード例 #8
0
ファイル: loesung.py プロジェクト: Physik1516/protokolle
def ucurve_fit(f, x, y, **kwargs):
    if np.any(unp.std_devs(y) == 0):
        sigma = None
    else:
        sigma = unp.std_devs(y)

    popt, pcov = scipy.optimize.curve_fit(f, x, unp.nominal_values(y), sigma=sigma, **kwargs)

    return unc.correlated_values(popt, pcov)
コード例 #9
0
ファイル: linReg.py プロジェクト: miallo/PP15
def ulinReg(xdata,ydata):
    """
    Führt über linReg_iter eine lineare Regression durch. Parameter sind hierbei
    allerdings uncertainties.ufloat
    """
    popt, perr = linReg_iter(unumpy.nominal_values(xdata),
                 unumpy.nominal_values(ydata), unumpy.std_devs(ydata),
                 unumpy.std_devs(xdata))
    return uc.ufloat(popt[0],perr[0]), uc.ufloat(popt[1],perr[1])
コード例 #10
0
ファイル: detector.py プロジェクト: vsilv/fp
def CdTe_Am():
    detector_name = "CdTe"
    sample_name = "Am"
    suff = "_" + sample_name + "_" + detector_name 
    npy_file = npy_dir + "spectra" + suff + ".npy"
    n = np.load(npy_file)[:500]
    s_n = np.sqrt(n)
    x = np.arange(len(n))
    
    # only fit
    p0 = [250000, 250, 25]
    red = (x > 235) * (x < 270)
    n_red = n[red]
    x_red = x[red]
    s_n_red = s_n[red]
    fit, cov_fit = curve_fit(gauss, x_red, n_red, p0=p0, sigma=s_n_red)
    fit  = np.abs(fit)
    fit_corr = uc.correlated_values(fit, cov_fit)
    s_fit = un.std_devs(fit_corr)
    # chi square
    n_d = 4
    chi2 = np.sum(((gauss(x_red, *fit) - n_red) / s_n_red) ** 2 )
    chi2_test = chi2/n_d
    fit_r, s_fit_r = err_round(fit, cov_fit)
    fit_both = np.array([fit_r, s_fit_r])
    fit_both = np.reshape(fit_both.T, np.size(fit_both))
    As[2], mus[2], sigmas[2] = fit
    s_As[2], s_mus[2], s_sigmas[2] = un.std_devs(fit_corr)

    all = np.concatenate((fit_both, [chi2_test]), axis=0)
    def plot_two_gauss():
        fig1, ax1 = plt.subplots(1, 1)
        if not save_fig:
            fig1.suptitle("Detector: " + detector_name + "; sample: " + sample_name)
        plot1, = ax1.plot(x, n, '.', alpha=0.3)   # histo plot
        ax1.errorbar(x, n, yerr=s_n, fmt=',', alpha=0.99, c=plot1.get_color(), errorevery=10) # errors of t are not changed!
        ax1.plot(x_red, gauss(x_red, *fit))
        ax1.set_xlabel("channel")
        ax1.set_ylabel("counts")
        textstr = 'Results of fit:\n\
                \\begin{eqnarray*}\
                A     &=& (%.0f \pm %.0f) \\\\ \
                \mu   &=& (%.1f \pm %.1f) \\\\ \
                \sigma&=& (%.1f \pm %.1f) \\\\ \
                \chi^2 / n_d &=& %.1f\
                \end{eqnarray*}'%tuple(all)
        ax1.text(0.65, 0.95, textstr, transform=ax1.transAxes, va='top', bbox=props)
        if show_fig:
            fig1.show()
        if save_fig:
            file_name = "detector" + suff
            fig1.savefig(fig_dir + file_name + ".pdf")
            fig1.savefig(fig_dir + file_name + ".png")
        return 0
    plot_two_gauss()
    return fit, s_fit
コード例 #11
0
def plot(x,y,*args,**kwargs):
    nominal_curve = pyplot.plot(x, unumpy.nominal_values(y), *args, **kwargs)
    pyplot.fill_between(x, 
                        unumpy.nominal_values(y)-unumpy.std_devs(y), 
                        unumpy.nominal_values(y)+unumpy.std_devs(y),
                        facecolor=nominal_curve[0].get_color(),
                        edgecolor='face',
                        alpha=0.1,
                        linewidth=0)
    return nominal_curve
コード例 #12
0
ファイル: plot.py プロジェクト: SBU-NSL/analysis-ferromagnet
def plot_u(cal_file, fm_file, offset_file, description, accidental_offset,
        results_file):
    M = np.genfromtxt(cal_file)
    N = np.genfromtxt(fm_file)
    O = np.genfromtxt(offset_file)

    i_helm = M[:,1] #current applied to helmholtz for calibration measurement
    b_helm = M[:,2] #field applied to helmholtz coil for calibration measurement
    p, cov = np.polyfit(i_helm, b_helm, 1,  cov=True) #fit a line to calibration measurement so that we get a calibration

    i_fm = N[:,1] #current applied to helmmholtz for shielding measurement
    b_fm = unumpy.uarray(N[:,2],0.0005) - accidental_offset #field measured inside of ferromagnet shield


    B_earth = np.polyval(p,0) #We get the Earths magnetic field from i=0 of the Helmholtz calibration
    B_fm_no_i = unumpy.uarray(np.mean(O[:,2]), np.std(O[:,2])) #Get average and error for initial magnetization

    mag = B_fm_no_i - B_earth #initial magnetization is the field inside of the ferromagnet before any field is applied minus the earths magnetic field 

    Bin = b_fm - mag #internal magnetization is the measured internal field minus the initial magnetization. This correction might not be necessary for a soft ferromagnet
    
    Bext = unumpy.uarray(np.polyval(p,i_fm), 0.0005) #external field
    Bext_nom = unumpy.nominal_values(Bext)
    Bext_err = unumpy.std_devs(Bext)

    B = Bext/Bin
    c = a/b
    u=(-2*B + c**2 - 2*unumpy.sqrt(B**2 - B*c**2 - B + c**2) + 1)/(c**2 - 1)

    u_nom = unumpy.nominal_values(u)
    u_err = unumpy.std_devs(u)

    #cakculate uerr with just point to point uncertainties. I define this as just
    #uncertainty from the field measurements
    u_pp=(-2*B + c.nominal_value**2 - 2*unumpy.sqrt(B**2 - B*c.nominal_value**2 - B + c.nominal_value**2) + 1)/(c.nominal_value**2 - 1)
    
    #calculate uerr from just geometry uncertainties
    u_geom=(-2*unumpy.nominal_values(B) + c**2 - 2*unumpy.sqrt(unumpy.nominal_values(B)**2 - unumpy.nominal_values(B)*c**2 - unumpy.nominal_values(B) + c**2) + 1)/(c**2 - 1)

    ##obtain uncertainties from field
    u_err_pp = unumpy.std_devs(u_pp)

    ##obtain uncertainties from geometry
    u_err_geom = unumpy.std_devs(u_geom)

    with open(results_file, "w") as myfile:
        myfile.write('#Bext, sig_Bext, ur, sig_ur, sig_ur_pp, sig_ur_corr\n')
        for j in range(0, len(u_nom)):
            myfile.write('%s\t%s\t%s\t%s\t%s\t%s\n' %(
                Bext_nom[j], Bext_err[j],
                u_nom[j], u_err[j], u_err_pp[j], u_err_geom[j]))
    

    plt.errorbar(Bext_nom, u_nom, u_err, marker = '.', label = description)
コード例 #13
0
ファイル: CriterialFit.py プロジェクト: romarro/DoctoratCode
def GenExpErrorPlot(ras,figoffset,save=True,close=True,**figaspect): 
    colors=['b','g','r','c','m','y','k']
    markers=['.','*','o','v','^','<','>']
    index=0  
    Nufig=figoffset
    Ffig=Nufig+1
    for ra in ras:
        fh=Fh(ra)
        plt.figure(figoffset)
        plt.errorbar(uns.nominal_values(ra.Re.magnitude),
                 uns.nominal_values(ra.Nu.magnitude),
                 uns.std_devs(ra.Nu.magnitude),
                 uns.std_devs(ra.Re.magnitude),
                 '{:s}{:s}'.format(colors[index%len(colors)],
                                markers[index%len(markers)]),
                 label='{:.3f}'.format(fh))
        plt.figure(Ffig)
        plt.errorbar(uns.nominal_values(ra.Re.magnitude),
                 uns.nominal_values(ra.fr.magnitude),
                 uns.std_devs(ra.Re.magnitude),
                 uns.std_devs(ra.fr.magnitude),
                 '{:s}{:s}'.format(colors[index%len(colors)],
                                markers[index%len(markers)]),
                 label='{:.3f}'.format(fh))
    
    plt.figure(Nufig)
    plt.legend(loc='upper left')
    plt.xlabel('Re')
    plt.ylabel('Nu')
    
    plt.figure(Ffig)
    plt.legend(loc='upper right')
    plt.xlabel('Re')
    plt.ylabel('$c_f$')

    if save:
        plt.figure(Nufig)
        plt.savefig('Nu_exp_all.png',dpi=300,
                    figsize=(166.54/2.54,81/2.54),
                    orientation='landscape',
                    facecolor='w',
                    edgecolor='k')
        plt.figure(Ffig)
        plt.savefig('cf_exp_all.png',dpi=300,
                    figsize=(166.54/2.54,81/2.54),
                    orientation='landscape',
                    facecolor='w',
                    edgecolor='k')
    if close:
        plt.close(Nufig)
        plt.close(Ffig)
        return figoffset,-1,-1
    
    return Ffig+1,Nufig,Ffig
コード例 #14
0
def plot_u(cal_file, fm_file,  description, accidental_offset,
        results_file):
    M = np.genfromtxt(cal_file) #turn calibration file into a matrix
    N = np.genfromtxt(fm_file) #turn fm_scan file into a matrix


    i_helm = M[:,1] #current applied to helmholtz for calibration measurement
    b_helm = M[:,2] #field applied to helmholtz coil for calibration measurement
    p, cov = np.polyfit(i_helm, b_helm, 1,  cov=True) #fit a line to calibration measurement so that we get a calibration

    
    i_fm = N[:,1] #current applied to helmmholtz for shielding measurement
    Bin = unumpy.uarray(N[:,2],0.0005) - accidental_offset #field measured inside of ferromagnet shield


    Bin_nom = unumpy.nominal_values(Bin) 
    Bin_err = unumpy.std_devs(Bin)

    Bext = unumpy.uarray(np.polyval(p,i_fm), 0.0005) #external field
    Bext_nom = unumpy.nominal_values(Bext)
    Bext_err = unumpy.std_devs(Bext)

    B = Bin/Bext #ratio of internal to external field

    #calculate permeability
    u = (B*c**2 + B -2 -2*unumpy.sqrt(B**2*c**2 - B*c**2 - B + 1))/(B*c**2-B) 
    print(u)
    u_nom = unumpy.nominal_values(u)
    u_err = unumpy.std_devs(u)

    #cakculate uerr with just point to point uncertainties. I define this as just
    #uncertainty from the field measurements
    u_pp = (B*c.n**2 + B -2 -2*unumpy.sqrt(B**2*c.n**2 - B*c.n**2 - B +
        1))/(B*c.n**2-B)

    u_err_pp = unumpy.std_devs(u_pp)

    #calculate uerr from just geometry uncertainties
    u_geom = (unumpy.nominal_values(B)*c**2 + unumpy.nominal_values(B) -2 -2*unumpy.sqrt(unumpy.nominal_values(B)**2*c**2 - unumpy.nominal_values(B)*c**2 - unumpy.nominal_values(B) +
        1))/(unumpy.nominal_values(B)*c**2-unumpy.nominal_values(B))

    u_err_geom = unumpy.std_devs(u_geom)


    #write results onto a text file
    with open(results_file, "w") as myfile:
        myfile.write('#Bext, sig_Bext, Bi, sig_Bi, ur, sig_ur, sig_ur_pp, sig_ur_corr\n')
        for j in range(0, len(u_nom)):
            myfile.write('%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n' %(
                Bext_nom[j], Bext_err[j], Bin_nom[j], Bin_err[j],
                u_nom[j], u_err[j], u_err_pp[j], u_err_geom[j]))
    
    plt.errorbar(Bext_nom, u_nom, u_err, marker = '.', label = description)
コード例 #15
0
ファイル: papstats.py プロジェクト: knly/PAP2
def plot_data(xdata, ydata, ax=plt, **kwargs):
    xerr = unp.std_devs(xdata)
    if np.sum(xerr)==0:
        xerr = None
    yerr = unp.std_devs(ydata)
    if np.sum(yerr)==0:
        yerr = None
    if not (kwargs.has_key('ls') or kwargs.has_key('linestyle')):
        kwargs['ls'] = 'none'
    if not kwargs.has_key('marker'):
        kwargs['marker'] = '.'
    return ax.errorbar(unp.nominal_values(xdata), unp.nominal_values(ydata), xerr=xerr, yerr=yerr, **kwargs)
コード例 #16
0
ファイル: Plot_ER_Data.py プロジェクト: sgrieve/ER_Star
def PlotPatches(Sc, PatchData, ErrorBars):
    """
    Plot E*R* data binned from hilltop patches.
    """
    e_star = E_Star(Sc, PatchData[2], PatchData[0])
    r_star = R_Star(Sc, PatchData[1], PatchData[0])
    if ErrorBars:
        plt.errorbar(unp.nominal_values(e_star), unp.nominal_values(r_star),
                     yerr=unp.std_devs(r_star), xerr=unp.std_devs(e_star),
                     fmt='ro', label='Hilltop Patch Data')
    else:
        plt.errorbar(unp.nominal_values(e_star), unp.nominal_values(r_star),
                     fmt='ro', label='Hilltop Patch Data')
コード例 #17
0
ファイル: Plot_ER_Data.py プロジェクト: sgrieve/ER_Star
def PlotBasins(Sc, BasinData, ErrorBars):
    """
    Plot basin average E*R* data.
    """
    e_star = E_Star(Sc, BasinData[2], BasinData[0])
    r_star = R_Star(Sc, BasinData[1], BasinData[0])

    if ErrorBars:
        plt.errorbar(unp.nominal_values(e_star), unp.nominal_values(r_star),
                     yerr=unp.std_devs(r_star), xerr=unp.std_devs(e_star),
                     fmt='go', label='Basin Data')
    else:
        plt.errorbar(unp.nominal_values(e_star), unp.nominal_values(r_star),
                     fmt='go', label='Basin Data')
コード例 #18
0
def calc_mu(Bin, Bout, radius_inner, radius_outer):

    # ratio of inner and outer radius
    radius_ratio = radius_inner / radius_outer
    print( radius_ratio )
    # ratio of internal to external field
    B_ratio = Bin / Bout
    # convert from Series object to Numpy Array object
    B_ratio = B_ratio.values

    # If value under square root becomes neagtive, set B_ratio to NaN
    # (this seems to happen with Argonne MRI measurements at low fields)
    B_ratio[ (B_ratio**2) * (radius_ratio**2) - B_ratio * (radius_ratio**2) - B_ratio + 1 < 0 ] = np.nan

    # Calculate permeability. Here, use both uncertainties from field measurements and uncertainties from geometry, i.e. radius measurements.
    mu = ( B_ratio * (radius_ratio**2)
           + B_ratio
           - 2
           -2 * unumpy.sqrt( (B_ratio**2) * (radius_ratio**2) - B_ratio * (radius_ratio**2) - B_ratio + 1 )
           ) / ( B_ratio * (radius_ratio**2) - B_ratio )

    # store nominal values of mu in separate array
    mu_val = unumpy.nominal_values(mu)

    # store combined uncertainties of mu in separate array
    mu_err = unumpy.std_devs(mu)

    # Calculate uncertainties of mu values from just field measurement uncertainties (= point-to-point fluctuations). Ignore geometry (=radius) uncertainties.
    mu_pp = ( B_ratio * (radius_ratio.n**2)
              + B_ratio
              - 2
              -2 * unumpy.sqrt( (B_ratio**2) * (radius_ratio.n**2) - B_ratio * (radius_ratio.n**2) - B_ratio + 1 )
              ) / ( B_ratio * (radius_ratio.n**2) - B_ratio )

    # store point-to-point uncertainties of mu in separate array
    mu_err_pp = unumpy.std_devs(mu_pp)

    # Calculate uncertainties of mu values from just geometry uncertainties (= systematic uncertainty, i.e. all points move together). Ignore field uncertainties.
    B_ratio_n = unumpy.nominal_values( B_ratio )
    mu_geom = ( B_ratio_n * (radius_ratio**2)
                + B_ratio_n
                - 2
                -2 * unumpy.sqrt( (B_ratio_n**2) * (radius_ratio**2) - B_ratio_n * (radius_ratio**2) - B_ratio_n + 1 )
                ) / ( B_ratio_n * (radius_ratio**2) - B_ratio_n )

    # store geometric uncertainties of mu in separate array
    mu_err_geom = unumpy.std_devs(mu_geom)

    return( mu_val, mu_err, mu_err_pp, mu_err_geom )
コード例 #19
0
ファイル: test_unumpy.py プロジェクト: omdv/lmfit-py
def test_component_extraction():
    "Extracting the nominal values and standard deviations from an array"

    arr = unumpy.uarray(([1, 2], [0.1, 0.2]))

    assert numpy.all(unumpy.nominal_values(arr) == [1, 2])
    assert numpy.all(unumpy.std_devs(arr) == [0.1, 0.2])

    # unumpy matrices, in addition, should have nominal_values that
    # are simply numpy matrices (not unumpy ones, because they have no
    # uncertainties):
    mat = unumpy.matrix(arr)
    assert numpy.all(unumpy.nominal_values(mat) == [1, 2])
    assert numpy.all(unumpy.std_devs(mat) == [0.1, 0.2])
    assert type(unumpy.nominal_values(mat)) == numpy.matrix
コード例 #20
0
ファイル: stability.py プロジェクト: jojonas/particle-lab
def _uarray_fit(func, x, y, x0, B=1000, **kwargs):
    def _derivative(func, x, *params, h=1e-6):
        return (
            -func(x + 2 * h, *params) + 8 * func(x + h, *params) - 8 * func(x - h, *params) + func(x - 2 * h, *params)
        ) / (12 * h)

    def _chi(params, func, xn, yn, xs, ys):
        difference = yn - func(xn, *params)
        error = np.sqrt(np.power(_derivative(func, xn, *params) * xs, 2) + np.power(ys, 2))
        chi = difference / error
        return chi

    xs = unp.std_devs(x)
    ys = unp.std_devs(y)

    means = []
    for i in range(B):
        indices = np.random.randint(0, len(x), size=len(x))

        shifts1 = np.random.normal(loc=0, scale=1, size=len(x))
        x_simulated = unp.nominal_values(x) + xs * shifts1

        shifts2 = np.random.normal(loc=0, scale=1, size=len(y))
        y_simulated = unp.nominal_values(y) + ys * shifts2

        popt, pcov, infodict, mesg, ier = leastsq(
            _chi, x0=tuple(x0), args=(func, x_simulated, y_simulated, xs, ys), full_output=True, **kwargs
        )
        if ier in (1, 2, 3, 4):
            means.append(popt)

    popt, pcov, infodict, mesg, ier = leastsq(
        _chi,
        x0=tuple(x0),
        args=(func, unp.nominal_values(x), unp.nominal_values(y), xs, ys),
        full_output=True,
        **kwargs
    )

    errors = np.std(means, axis=0)
    results = tuple(ufloat(a, b) for a, b in zip(popt, errors))

    chisqndof = np.power(
        _chi(popt, func, unp.nominal_values(x), unp.nominal_values(y), unp.std_devs(x), unp.std_devs(y)), 2
    ).sum() / (len(x) - len(x0))
    print("Chi^2/ndof =", chisqndof)

    return results
コード例 #21
0
def plotRock(ax, xdat, ydat, labelstr, lc, fc, marker='o', Normalize=False):
    xdat = np.array(xdat)
    ydatval = unumpy.nominal_values(ydat) 
    ydaterr = unumpy.std_devs(ydat) 

    # We will plot how much above the norm is A2/A1
    # in units of the norm 
    if Normalize:
        ydatval = ydatval - 1.
        maxy = np.amax( ydatval)
        print "maxy =", maxy
    else:
        maxy = 1.0

    ax.errorbar( xdat, ydatval/maxy, yerr=ydaterr/maxy, \
               capsize=0., elinewidth = 1. ,\
               fmt='.', ecolor=lc, mec=lc, \
               mew=1., ms=5.,\
               marker=marker, mfc='None', \
               label=labelstr+', $B_{\mathrm{tof}}$=%.2g'%(maxy+1))
    # Fit data with a Gaussian
    fitdat = np.transpose( np.vstack( (xdat,ydat/maxy)))
    p0 = [1.0, 0., 10., 0.1]
    fun = fitlibrary.fitdict['Gaussian'].function
    ##pG, errorG = fitlibrary.fit_function( p0,  fitdat, fun)
    #print "Fitting with Gaussian:"
    #print pG
    #print errorG
    ##fitX, fitY = fitlibrary.plot_function(pG, \
    ##             np.linspace( xdat.min(), xdat.max(),120),fun) 
    ##ax.plot(  fitX, fitY, '-', c=lc, lw=1.0)
    ##return np.sqrt(2.)*pG[2], np.sqrt(2.)*errorG[2]
    return 1.,1.
コード例 #22
0
ファイル: h_z.py プロジェクト: drdangersimon/experements
 def fit_summary_stats(self):
     '''Real fit of H(Z) using netwright ages'''
     z = []
     means = self.mean #* self.yr_to_hubble_units
     std =  self.mean_std #* self.yr_to_hubble_units
     for t in self.points[:,0]:
         # find z that gives closest value
         if len(z) == 0:
             temp_z = 5.
         z.append(optimize.fmin(self.redshift_finder, temp_z, args=(t,)))
         temp_z = z[-1]
     z = np.concatenate(z)
     binz = np.histogram(z)[1]
     bint =[]
     #binerr =[]
     for i in range(len(binz)-1):
         index = np.where(np.logical_and(z >= binz[i], z<binz[i+1]))[0]
         temp = []
         for j in index:
             temp.append(ufloat(means[j], std[j]))
         bint.append(np.median(temp))
         
     bint = np.asarray(bint)
     
     # make h_z
     y = -1/(binz[:-2]+1)*np.diff(binz[:-1])/np.diff(bint)
     # convert to hubble units
     y *= self.yr_to_hubble_units
     # turn back to numpy arrays
     error = unumpy.std_devs(y)
     y = unumpy.nominal_values(y)
     # fit
     recv, recv_prob = mcmc(lnprob, binz[:-2], y, error)
     return recv, recv_prob,  binz[:-2], y, error
コード例 #23
0
ファイル: table.py プロジェクト: HelenaCarlArne/ProtokolleFP
def gettype(*datenarray): # Ermittelt den Datentyp der Arrays und rundet entsprechend
	global stm # schweizer taschenmesser
	stm=np.array([])
	global rundarray
	rundarray=dict()
	for ID,array in enumerate(datenarray):
		rundarray[ID]=np.array([])
		if np.any(unp.std_devs(array)):
			stm=np.append(stm,"fehlerbehaftet")
			for element in array:
				rundarray[ID]=np.append(rundarray[ID],ufloat(round(element.n,crts(element.s)),rts(element.s)))
		if not np.any(unp.std_devs(array)):
			stm=np.append(stm,"normal")
			for element in array:
				rundarray[ID]=np.append(rundarray[ID],round(element,2))
	pass
コード例 #24
0
ファイル: calibration.py プロジェクト: matthewjpeel/edxrd
    def __add__(self,other):
        if not isinstance(other,Calibration):
            raise TypeError("Can only add two 'Calibration' instances together")

        vals = np.vstack([np.hstack([self.candidates,other.candidates]),
                          np.hstack([self.known,other.known])])
    
        #sort all rows by value of first to maintain relationship
        vals = vals[:,vals[0,:].argsort()] 
        
        #fit a new polynomial through the combined data sets
        calvals = polyfit(unumpy.nominal_values(vals[0,:]),
                          vals[1,:],
                          unumpy.std_devs(vals[0,:]))    
        xfit = np.linspace(0,vals[0,:].max(),200)
        yfit = np.poly1d(calvals)(xfit)   
        
        #store everything again 
        cal = Calibration(calvals,vals[0,:],vals[1,:],xfit,yfit)        
        cal.match=[]
        cal.match.extend(self.match) 
        cal.match.extend(other.match) 
        cal.search=[]
        cal.search.extend(self.search) 
        cal.search.extend(other.search) 
        return cal
def fit_stretched_exp(x, y, yerr, xlong):
    start = time.time()
    p0 = (-1.5, 100, 0.15, 2)
    p, cov = curve_fit(func_stretched_exp, x, y, p0=p0, sigma = yerr, maxfev = 100000)
    print(p)
    perr = np.sqrt(np.diag(cov))    

    yfit = func_stretched_exp(x, p[0], p[1], p[2], p[3])
    resid = y-yfit
    dof = y.size - len(p)
    chi2 = np.sum((resid/yerr)**2)
    chi2red = chi2/dof
    p_val = 1 - stats.chi2.cdf(chi2,dof)
    yfit = func_stretched_exp(xlong, p[0], p[1], p[2], p[3])
    p = unumpy.uarray(p,perr)
    extrapolate_val = p[0]*unumpy.exp(-(extrapolate_times/p[1])**p[2])+p[3] 

    if p[2]>0:
        extrap_inf = p[3]
    elif p[2]<0:
        extrap_inf = p[0]+p[3]

    end = time.time()
    elapsed = end-start
    return (chi2red, resid, extrapolate_val, extrap_inf, 1, unumpy.nominal_values(p),
            unumpy.std_devs(p), yfit, elapsed, p_val)
コード例 #26
0
def calibrateGaussmeter(pars):

    print ( pars[0] )
    print ( pars[1] )
    print ( pars[2] )

    # hard-coded input folder name
    fname_in = "data/DATA_Gaussmeter/"
    fname_in += pars[0]

    # hard-coded output folder name
    fname_out = "data-calib/DATA_Gaussmeter/"
    fname_out += pars[0]
    fname_out = fname_out.replace(".txt",".csv")

    # file with calibration measurement
    fname_cal = "data/DATA_Gaussmeter/"
    fname_cal += pars[2]

    data = ld.Gaussmeter(fname_in, drop=False)

    df = pd.DataFrame(data)

    # Calculate nominal magnet field Bnom based on current and calibration measurement
    Bnom = af.calc_applied_field_lin( df['multi'].values, fname_cal)
    df['Bnom'] = unumpy.nominal_values(Bnom)
    df['Bnom_sdev'] = unumpy.std_devs(Bnom)

    # Print three line as crosscheck
    print(df.head(3))

    # Write csv output
    df.to_csv(fname_out, index=False)
コード例 #27
0
def detcoefs(f,nu,re,fh,fp,pr=None):
    """
    functia derermina coeficientii fit-ului functiei
    
    Nu=a*Re^b*Fh^c sau a functiei
    cf=a*Re^b*Fh^c
    
    Parametrii
    ----------
    f  : functie cu proptotipul :
        f((x,y),*coefs)
    nu : numpy.array of ufloats
        valorile nusselt pentru toate masuratorile
    re : numpy.array of ufloats
        valorile reynolds pentru toate masuratorile
    fh : numpy.array of floats
        valorile raportului h/Dh pentru toate masuratorile
    fp : numpy.array of floats
        valorile raportului p/Dh pentru toate masuratorile
        
    Intoarce
    --------
    coefs : array of ufloat 
        coeficientii functiei fitate cu erorile lor
    rchi2 : float 
        chi redus pentru analiza corectitudinii fit-ului
    dof : integer
        gradele de libertate

    """
    nu_n=uns.nominal_values(nu)
    nu_s=uns.std_devs(nu)
    w_nu=nu_s/nu_n
    re_n=uns.nominal_values(re)
    pr_n=uns.nominal_values(pr)
    if pr!=None:
        popt,pcov=curve_fit(f,(re_n,pr_n,fh,fp),nu_n,sigma=w_nu,
                        maxfev=1500)
        chi2=sum(((f((re_n,pr_n,fh,fp),*popt)-nu_n)/nu_s)**2)
    else:
        popt,pcov=curve_fit(f,(re_n,fh,fp),nu_n,sigma=w_nu,
                            maxfev=1500)
        chi2=sum(((f((re_n,fh,fp),*popt)-nu_n)/nu_s)**2)
    
    dof=len(nu_n)-len(popt)
    rchi2=chi2/dof
    
    coefs=[]
    for i in range(len(popt)):
        coefs.append(un.ufloat(popt[i],np.sqrt(pcov[i,i])))
    if pr!=None:
        func=lambda x,y,z,k:f((x,y,z,k),*popt)
    else:
        func=lambda x,y,z:f((x,y,z),*popt)
        
    return {'coefs':np.array(coefs),
            'rchi2':rchi2,
            'DOF':dof,
            'f':func
            }
コード例 #28
0
ファイル: results.py プロジェクト: OlgaVorokh/gammapy
    def butterfly(self, energy, flux_unit='TeV-1 cm-2 s-1'):
        """
        Compute butterfly.

        Parameters
        ----------
        energy : `~astropy.units.Quantity`
            Energies at which to evaluate the butterfly.
        flux_unit : str
            Flux unit for the butterfly.

        Returns
        -------
        butterfly : `~gammapy.spectrum.SpectrumButterfly`
            Butterfly object.
        """
        from uncertainties import unumpy

        flux = self.model(energy)

        butterfly = SpectrumButterfly()
        butterfly['energy'] = energy
        butterfly['flux'] = flux.to(flux_unit)

        # compute uncertainties
        umodel = self.model_with_uncertainties
        values = umodel(energy.value)

        # unit conversion factor, in case it doesn't match
        conversion_factor =  flux.to(flux_unit).value / unumpy.nominal_values(values)
        flux_err = u.Quantity(unumpy.std_devs(values), flux_unit) * conversion_factor

        butterfly['flux_lo'] = flux - flux_err
        butterfly['flux_hi'] = flux + flux_err
        return butterfly
コード例 #29
0
ファイル: plot.py プロジェクト: DimensionalScoop/kautschuk
def extract_error(data):
    if(isinstance(data[0], uncertainties.UFloat)):
        error = unp.std_devs(data)
        nominal = unp.nominal_values(data)
    else:
        nominal = data
        error = None
    return nominal, error
コード例 #30
0
def _ploteff(eff, part):
    from matplotlib import pyplot as plt
    label = EFF_LABELS.get(part, part)
    scale = EFF_SCALES.get(part, 1.0)
    color = EFF_COLORS.get(part, 'black')
    x, mask = eff['slit1'], eff['mask']
    y, dy = scale*nominal_values(eff[part]), scale*std_devs(eff[part])
    plt.errorbar(x, y, dy, fmt='.', color=color, label=label, capsize=0, hold=True)
コード例 #31
0
def peak_to_total(a, b, delta):
    peak_int = np.sum(y[a:b])
    peak_int_error = (((y[a] * y[a]) + (y[b] * y[b])) * delta**2)**(1 / 2)
    if datensatz == 'Co_H':
        peak_int = np.sum(y[a:6195]) + np.sum(y[6990:b])
        peak_int_error = (((y[a] * y[a]) +
                           (y[6195] * y[6195])) * delta**2)**(1 / 2) + (
                               ((y[6990] * y[6990]) +
                                (y[b] * y[b])) * delta**2)**(1 / 2)
    peak_to_total = peak_int / unp.nominal_values(total_counts)
    peak_to_total_error = np.sqrt(
        (peak_int_error / unp.nominal_values(total_counts))**2 +
        (peak_int / unp.nominal_values(total_counts)**2 *
         unp.std_devs(total_counts))**2)
    # return peak_to_total
    return peak_to_total, peak_to_total_error
コード例 #32
0
ファイル: powerlaw.py プロジェクト: jknodlseder/gammapy
def f_with_err(I_val=1,
               I_err=0,
               g_val=g_DEFAULT,
               g_err=0,
               e=1,
               e1=1,
               e2=E_INF):
    """Wrapper for f so the user doesn't have to know about
    the uncertainties module"""
    from uncertainties import unumpy
    I = unumpy.uarray(I_val, I_err)
    g = unumpy.uarray(g_val, g_err)
    _f = power_law_flux(I, g, e, e1, e2)
    f_val = unumpy.nominal_values(_f)
    f_err = unumpy.std_devs(_f)
    return f_val, f_err
コード例 #33
0
ファイル: test_utils.py プロジェクト: vuillaut/gammapy
def test_integrate_spectrum_ecpl():
    """
    Test ecpl integration. Regression test for
    https://github.com/gammapy/gammapy/issues/687
    """
    from uncertainties import unumpy
    amplitude = unumpy.uarray(1e-12, 1e-13)
    index = unumpy.uarray(2.3, 0.2)
    reference = 1
    lambda_ = 0.1
    ecpl = ExponentialCutoffPowerLaw(index, amplitude, reference, lambda_)
    emin, emax = 1, 1e10
    val = ecpl.integral(emin, emax)

    assert_allclose(unumpy.nominal_values(val), 5.956578235358054e-13)
    assert_allclose(unumpy.std_devs(val), 9.278302514378108e-14)
コード例 #34
0
ファイル: results.py プロジェクト: vorugantia/gammapy
    def butterfly(self, energy=None, flux_unit='TeV-1 cm-2 s-1'):
        """
        Compute butterfly.

        Parameters
        ----------
        energy : `~astropy.units.Quantity`, optional
            Energies at which to evaluate the butterfly.
        flux_unit : str
            Flux unit for the butterfly.

        Returns
        -------
        butterfly : `~gammapy.spectrum.SpectrumButterfly`
            Butterfly object.
        """
        from uncertainties import unumpy

        if energy is None:
            energy = EnergyBounds.equal_log_spacing(self.fit_range[0],
                                                    self.fit_range[1], 100)

        flux, flux_err = self.model.evaluate_error(energy)

        butterfly = SpectrumButterfly()
        butterfly['energy'] = energy
        butterfly['flux'] = flux.to(flux_unit)

        # compute uncertainties
        umodel = self.model_with_uncertainties

        if self.model.__class__.__name__ == 'PowerLaw2':
            energy_unit = self.model.parameters['emin'].unit
        else:
            energy_unit = self.model.parameters['reference'].unit

        values = umodel(energy.to(energy_unit).value)

        # unit conversion factor, in case it doesn't match
        conversion_factor = flux.to(flux_unit).value / unumpy.nominal_values(
            values)
        flux_err = u.Quantity(unumpy.std_devs(values),
                              flux_unit) * conversion_factor

        butterfly['flux_lo'] = flux - flux_err
        butterfly['flux_hi'] = flux + flux_err
        return butterfly
コード例 #35
0
ファイル: dynamickeMereni.py プロジェクト: sestami/Vyzkumak
def export_R(R, V, N):
    #export prutoku
    n = vymena_vzduchu(R, V, N)
    R = R.append(
        pd.DataFrame([n], index=[r'n $[\si{hod^{-1}}]$'], columns=['R']))

    R.index = R.index.str.replace('R', 'k')
    R = pd.DataFrame(
        np.array([unumpy.nominal_values(R),
                  unumpy.std_devs(R)]).T[0],
        columns=['hodnota $\left[\si{m^3/hod}\right]$', r'$\sigma$'],
        index=R.index)
    R.to_latex('vysledky_prutoky.tex',
               decimal=',',
               float_format='%0.2f',
               escape=False)
    return R
コード例 #36
0
def build_design_matrix(x, y):
    y_invsigma = 1.0 / unumpy.std_devs(y)
    dims = x.shape[1]
    n = 1 + dims + dims * (dims + 1) / 2

    A = np.zeros(shape=(len(x), n))

    A[:, 0] = 1.0 * y_invsigma
    for i in range(dims):
        A[:, 1 + i] = x[:, i] * y_invsigma

    col = 1 + dims
    for j in range(dims):
        for k in range(j, dims):
            A[:, col] = x[:, j] * x[:, k] * y_invsigma
            col += 1
    return A
コード例 #37
0
ファイル: test_image.py プロジェクト: arm61/islatu
 def test_str(self):
     """
     Test str
     """
     b = io.StringIO(EXAMPLE_FILE)
     buf = io.BytesIO()
     im = PILIm.fromarray(np.loadtxt(b).astype(np.uint32))
     im.save(buf, format="png")
     buf.seek(0)
     test_image = Image(buf)
     data = io.StringIO(EXAMPLE_FILE)
     load = np.loadtxt(data)
     expected_image_e = np.sqrt(load)
     expected_image_e[np.where(load == 0)] = 1
     assert_almost_equal(load, unp.nominal_values(test_image.__str__()))
     assert_almost_equal(expected_image_e,
                         unp.std_devs(test_image.__str__()))
コード例 #38
0
    def _get_dspacing_center(self):
        '''Internal function for getting d-spacing position'''
        effective_values, effective_errors = self.get_effective_params()
        theta_center = unumpy.uarray(
            0.5 * np.deg2rad(effective_values['Center']),
            0.5 * np.deg2rad(effective_errors['Center']))
        sine_theta = unumpy.sin(theta_center)
        try:
            dspacing_center = 0.5 * self._wavelength / sine_theta
        except ZeroDivisionError:
            # replace zeros in the denominator with nan explicitly
            dspacing_center = np.where(
                unumpy.nominal_values(sine_theta) != 0.,
                unumpy.std_devs(0.5 * self._wavelength /
                                sine_theta.clip(1e-9)), np.nan)

        return dspacing_center
コード例 #39
0
def mittel_und_abweichung_intervall(messreihe, intervall_laenge):
    messreihe_einheit = messreihe.units
    mittelwert_abweichung_liste = []
    for i in range(len(messreihe))[::intervall_laenge]:
        mittelwert = sum(messreihe[i:i + intervall_laenge]) / len(
            messreihe[i:i + intervall_laenge])
        abweichung_des_mittelwertes = 1 / (np.sqrt(
            len(messreihe[i:i + intervall_laenge]))) * np.std(
                messreihe[i:i + intervall_laenge])
        mittelwert_abweichung_liste.append(
            ufloat(mittelwert.magnitude,
                   abweichung_des_mittelwertes.magnitude))
    mittelwert_abweichung_u = Q_(
        unp.uarray(unp.nominal_values(mittelwert_abweichung_liste),
                   unp.std_devs(mittelwert_abweichung_liste)),
        messreihe_einheit)
    return mittelwert_abweichung_u
コード例 #40
0
def plot_sa(data):
    """
    Plot spin asymmetry data.
    """
    from matplotlib import pyplot as plt
    from uncertainties.unumpy import uarray as U, nominal_values, std_devs
    from ..refldata import Intent
    # TODO: interp doesn't test for matching resolution
    data = dict((d.polarization, d) for d in data)
    pp, mm = data['++'], data['--']
    v_pp = U(pp.v, pp.dv)
    v_mm = interp(pp.x, mm.x, U(mm.v, mm.dv))
    sa = (v_pp - v_mm) / (v_pp + v_mm)
    v, dv = nominal_values(sa), std_devs(sa)
    plt.errorbar(pp.x, v, yerr=dv, fmt='.', label=pp.name)
    plt.xlabel("%s (%s)" % (pp.xlabel, pp.xunits) if pp.xunits else pp.xlabel)
    plt.ylabel(r'$(R^{++} -\, R^{--}) / (R^{++} +\, R^{--})$')
コード例 #41
0
ファイル: powerlaw.py プロジェクト: watsonjj/gammapy
def power_law_I_with_err(f_val=1,
                         f_err=0,
                         g_val=g_DEFAULT,
                         g_err=0,
                         e=1,
                         e1=1,
                         e2=E_INF):
    """Wrapper for f so the user doesn't have to know about
    the uncertainties module"""
    from uncertainties import unumpy

    f = unumpy.uarray(f_val, f_err)
    g = unumpy.uarray(g_val, g_err)
    _I = power_law_integral_flux(f, g, e, e1, e2)
    I_val = unumpy.nominal_values(_I)
    I_err = unumpy.std_devs(_I)
    return I_val, I_err
コード例 #42
0
ファイル: chaincrunch.py プロジェクト: jpinedaf/pyspecnest
def cube_K(shape, rms, data, peaks=[0, 1, 2, 3], origin=(0, 0),
           header=None, writeto=None, **kwargs):
    """
    Construct a fits HDU with ln(K) values for all xy positions in a
    cube of a given shape. Optionally, writes a fits file.

    Additional keyword args are passed to lnK_xy function.
    """

    if origin != (0, 0):
        # TODO: implement this
        raise NotImplementedError("wip")

    Zs = cube_Z(shape, rms, data, peaks=peaks, origin=origin,
                header=header, writeto=None, **kwargs).data

    # this -2 denotes the that the K array is a difference of Z array
    # layers. It's 2, and not 1, because there are error layers as well
    zsize_K = Zs.shape[0] // 2 - 1
    lnKs = np.empty(shape=(zsize_K, ) + shape)
    lnKs.fill(np.nan)
    err_lnKs = lnKs.copy()
    for i in np.arange(zsize_K) + 1:
        # for all (i, j) such that i = j + 1
        Z_i = unumpy.uarray(Zs[i], Zs[i + zsize_K + 1])
        Z_j = unumpy.uarray(Zs[i - 1], Zs[i + zsize_K])
        K_ij = Z_i - Z_j

        lnKs[i - 1] = unumpy.nominal_values(K_ij)
        err_lnKs[i - 1] = unumpy.std_devs(K_ij)

    header = _tinker_header(header, ctype3='BAYES FACTORS', bunit='ln(Zi/Zj)')

    hdu = fits.PrimaryHDU(np.vstack([lnKs, err_lnKs]), header)
    for i in np.arange(zsize_K) + 1:
        head_key = 'lnK({}/{})'.format(i, i - 1)
        hdu.header['PLANE{}'.format(i)] = head_key
    for i in np.arange(zsize_K) + zsize_K + 1:
        head_key = 'err lnK({}/{})'.format(i, i - 1)
        hdu.header['PLANE{}'.format(i)] = head_key

    if writeto:
        hdu.writeto(writeto, overwrite=True)

    return hdu
コード例 #43
0
    def test_populations(self):
        """Test that counts are properly converted to a population."""

        processor = DataProcessor("counts")
        processor.append(Probability("00", alpha_prior=1.0))

        # Test on a single datum.
        new_data = processor(self.exp_data_lvl2.data(0))

        self.assertAlmostEqual(float(unp.nominal_values(new_data)), 0.41666667)
        self.assertAlmostEqual(float(unp.std_devs(new_data)), 0.13673544235706114)

        # Test on all the data
        new_data = processor(self.exp_data_lvl2.data())
        np.testing.assert_array_almost_equal(
            unp.nominal_values(new_data),
            np.array([0.41666667, 0.25]),
        )
コード例 #44
0
ファイル: rendimiento.py プロジェクト: ingridheuer/labo4
def plotear(X,Y,sig,label):
    try:
        varvals = un.nominal_values(Y).ravel()
        varerr = un.std_devs(Y).ravel()
        print('caca')
        print(varerr)
        X = X.ravel()
        Xerr = 0.1 * np.ones(np.shape(X))
        varmenos = varvals - varerr
        varmas = varvals + varerr
        print(medicion)
        plt.errorbar(X,varvals,xerr=Xerr,yerr=varerr,fmt=sig,label=label)

        # plt.plot(X,varvals,'ok')
        # plt.fill_between(X,varmenos.ravel(),varmas.ravel(), alpha=0.5)
    except TypeError:
        print('no andan lo errores')
        plt.plot(S,var,'.')
コード例 #45
0
ファイル: uncertainty.py プロジェクト: cartemic/DetResearch
def df_split_uncert(df, columns, inplace=True):
    if isinstance(columns, str):
        columns = [columns]

    if inplace:
        df_ret = df
    else:
        df_ret = df.copy()

    good_cols = list(df_ret.keys())
    for col in columns:
        if col not in good_cols:
            raise ValueError(f"{col} not a valid column. Valid: {good_cols}")

        df_ret["u_" + col] = unp.std_devs(df[col].values)
        df_ret[col] = unp.nominal_values(df[col].values)

    return df_ret
コード例 #46
0
ファイル: test_fdiff.py プロジェクト: swartmilan/PXL
def test_second_order_diff_uncertainties():
    """Test that `second_order_diff` works with uncertainties."""
    # Create a non-equally spaced x vector
    x = np.append(np.linspace(0, np.pi, 50),
                  np.linspace(np.pi + 0.01, 2 * np.pi, 100))
    x_unc = unumpy.uarray(x, np.ones(len(x)) * 1e-3)
    u = unumpy.uarray(np.sin(x), np.ones(len(x)) * 1e-2)
    dudx = second_order_diff(u, x)
    print(dudx[:5])
    print(dudx[-5:])
    if plot:
        plt.errorbar(x,
                     unumpy.nominal_values(dudx),
                     yerr=unumpy.std_devs(dudx),
                     fmt="-o",
                     lw=2,
                     alpha=0.5)
        plt.plot(x, np.cos(x), "--^", lw=2, alpha=0.5)
        plt.show()
コード例 #47
0
def LinfitLinearRegression(x_true, y):

    if (x_true is not None) and (y is not None):

        if len(x_true) > 2:

            x_mag = unumpy.nominal_values(x_true)
            y_mag = unumpy.nominal_values(y)
            y_err = unumpy.std_devs(y)

            Regression_Fit, Uncertainty_Matrix = linfit(x_mag,
                                                        y_mag,
                                                        y_err,
                                                        cov=True,
                                                        relsigma=False)
            m_n_error = [sqrt(Uncertainty_Matrix[t, t]) for t in range(2)]

            gradient, gradient_error = Regression_Fit[0], m_n_error[0]
            n, n_error = Regression_Fit[1], m_n_error[1]

            Gradient_MagErr = ufloat(gradient, gradient_error)
            n_MagError = ufloat(n, n_error)

        elif len(x_true) == 2:

            x_mag = unumpy.nominal_values(x_true)
            y_mag = unumpy.nominal_values(y)

            m = (y_mag[1] - y_mag[0]) / (x_mag[1] - x_mag[0])

            n = y_mag[0] - m * x_mag[0]

            Gradient_MagErr = ufloat(m, 1e-4)
            n_MagError = ufloat(n, 1e-4)

        else:
            print 'WARNING: Only one point to do a linear regression'

    else:

        Gradient_MagErr, n_MagError = None, None

    return Gradient_MagErr, n_MagError
コード例 #48
0
ファイル: data.py プロジェクト: ricleal/HFIRSANSReduction
    def plot_iq_errors(self,n_bins=50):
        '''
        IQ with error propagation
        Note: this could have been done simply with:
        bin_means, bin_edges, binnumber = stats.binned_statistic(self.df['q'].values, self.df['counts'].values, statistic='mean', bins=n_bins)
        But it wouldn't have the errors in the mean.
        I am summing all the counts and error in every bin (sum of values => sum of errors) and then use the uncertainties package for division.
        '''

        plt.figure()
        x = self.df['q'].values
        y = self.df['counts'].values
        e = self.df['errors'].values

        # Let's get the histogram detail
        logger.debug("Binning Q.")

        occurrences_per_bin, bin_edges = np.histogram(x,bins=n_bins)
        bin_width = (bin_edges[1] - bin_edges[0])
        bin_centers = bin_edges[1:] - bin_width/2

        # Return the indices of the bins to which each value in input array belongs.
        inds_x = np.digitize(x, bin_edges, right=True)
        # Don't know why but it puts a single value in the bin 0! Move it to pisition 1
        idx_to_remove = np.where(inds_x==0)[0]
        inds_x[idx_to_remove]=1

        # Error propagation: sum of values implies sum of errors
        values_sums_per_bin = np.bincount(inds_x, weights=y, minlength=len(bin_edges) - 1) #sums all values in every bin
        values_sums_per_bin = values_sums_per_bin[1:] # remove bin 0 (no counts!)

        error_sums_per_bin = np.bincount(inds_x, weights=e, minlength=len(bin_edges) - 1) #sums all errors in every bin
        error_sums_per_bin=error_sums_per_bin[1:] # remove bin 0 (no counts!)
        # Calculate average per bin (sum of the values divided by the cocurrences) with error propagation
        average_per_bin_un = unumpy.uarray(values_sums_per_bin,error_sums_per_bin) / occurrences_per_bin
        # Separate Values and error from the 2 arrays!
        values = unumpy.nominal_values(average_per_bin_un)
        errors = unumpy.std_devs(average_per_bin_un)

        plt.errorbar(bin_centers, values, yerr=errors, fmt='-', ecolor='g', capthick=2)
        plt.semilogx()
        plt.semilogy()
        plt.show()
コード例 #49
0
def streu(path, name, savepath, s):
    N, theta, t = np.genfromtxt(path, unpack=True)
    Nerr = np.sqrt(N)
    uN = unp.uarray(N, Nerr)
    n = uN / t
    plt.xlabel(r'$\Theta/°$')
    plt.ylabel(r'$N/\si{\becquerel}$')
    params, errors = plotfit(theta,
                             unp.nominal_values(n),
                             Rutherford,
                             savepath,
                             yerr=unp.std_devs(n),
                             slice_=(unp.nominal_values(n) <= s),
                             p0=(0.001, 2))
    param = unp.uarray(params, errors)
    print(name)
    print("C, \Theta_0")
    print(param)
    return param[0]
コード例 #50
0
    def test_iq_averaging(self):
        """Test averaging of IQ-data."""

        # This data represents IQ data for a single quantum circuit with 10 shots and 2 slots.
        iq_data = np.array(
            [
                [
                    [[-6.20601501e14, -1.33257051e15], [-1.70921324e15, -4.05881657e15]],
                    [[-5.80546502e14, -1.33492509e15], [-1.65094637e15, -4.05926942e15]],
                    [[-4.04649069e14, -1.33191056e15], [-1.29680377e15, -4.03604815e15]],
                    [[-2.22203874e14, -1.30291309e15], [-8.57663429e14, -3.97784973e15]],
                    [[-2.92074029e13, -1.28578530e15], [-9.78824053e13, -3.92071056e15]],
                    [[1.98056981e14, -1.26883024e15], [3.77157017e14, -3.87460328e15]],
                    [[4.29955888e14, -1.25022995e15], [1.02340118e15, -3.79508679e15]],
                    [[6.38981344e14, -1.25084614e15], [1.68918514e15, -3.78961044e15]],
                    [[7.09988897e14, -1.21906634e15], [1.91914171e15, -3.73670664e15]],
                    [[7.63169115e14, -1.20797552e15], [2.03772603e15, -3.74653863e15]],
                ]
            ],
            dtype=float,
        )
        iq_std = np.full_like(iq_data, np.nan)

        self.create_experiment_data(unp.uarray(iq_data, iq_std), single_shot=True)

        avg_iq = AverageData(axis=0)
        processed_data = avg_iq(data=np.asarray(self.iq_experiment.data(0)["memory"]))

        expected_avg = np.array([[8.82943876e13, -1.27850527e15], [1.43410186e14, -3.89952402e15]])
        expected_std = np.array(
            [[5.07650185e14, 4.44664719e13], [1.40522641e15, 1.22326831e14]]
        ) / np.sqrt(10)

        np.testing.assert_array_almost_equal(
            unp.nominal_values(processed_data),
            expected_avg,
            decimal=-8,
        )
        np.testing.assert_array_almost_equal(
            unp.std_devs(processed_data),
            expected_std,
            decimal=-8,
        )
コード例 #51
0
def infinite_training_limit(energy, start):
    step = np.arange(len(energy))
    step2 = np.arange(start, len(energy))
    E_ewm = ewm(
        step2,
        step,
        energy,
        (1 - 1 / (2 + step2 / 20))[:, None],
        with_err=True,
    )
    param = scipy.optimize.curve_fit(
        lambda x, Einf, slope: Einf + slope * x,
        1 / step2,
        unp.nominal_values(E_ewm),
        sigma=unp.std_devs(E_ewm),
        absolute_sigma=True,
    )
    param = unp.uarray(param[0], np.sqrt(np.diag(param[1])))
    return param[0], param[1], E_ewm
コード例 #52
0
ファイル: tess.py プロジェクト: MarcoMuellner/SMURFS
def mag(lc: LightCurve) -> LightCurve:
    """
    Converts and normalizes a LighCurve object to magnitudes.

    :param lc: lightcurve object
    :return: reduced light curve object
    """

    lc = lc.remove_nans()

    flux = lc.flux + (np.abs(2 * np.amin(lc.flux)) if np.amin(lc.flux) < 0 else 100)
    flux = unp.uarray(flux, lc.flux_err)
    flux = -2.5 * unp.log10(flux)
    flux = flux[~unp.isnan(flux)]
    flux -= np.median(flux)

    lc.flux = unp.nominal_values(flux) * u.mag
    lc.flux_err = unp.std_devs(flux) * u.mag
    return lc
コード例 #53
0
def config_pne(N, area, r):
    area = area / 3600

    denslog = N / area
    #denslog = np.log10(density)

    errlog = np.sqrt(N) / area
    #errlog = err/(2.3*(density))
    #errlog = np.log10(err)

    denslog10 = unp.uarray(denslog, errlog)
    denslog10 = unp.log10(denslog10)
    errlog = unp.std_devs(denslog10)

    rmin = r / 60

    rlog = rmin
    #TO DO: add binsizes

    return denslog, errlog, rlog
コード例 #54
0
ファイル: mostrartodas2.py プロジェクト: ingridheuer/labo4
def plotearS(style,*args):
    for i,var in enumerate(args):
        try:
            varerr = un.std_devs(var)
            varvals = un.nominal_values(var)

            varmenos = varvals - varerr
            varmas = varvals + varerr

            # plt.errorbar(S,varvals,yerr=varerr,fmt='.',label=labels[i])
            plt.plot(S.ravel(),varvals,'.',color=style[i])
            plt.fill_between(S.ravel(),varmenos.ravel(),varmas.ravel(), color=style[i], alpha=0.3)
        except TypeError:
            print('no andan lo errores')
            plt.plot(S,var,'.',label=labels[i])

    # plt.title(medicion.split('.')[0].replace('_',' '))
    # plt.legend(loc='best')
    plt.ylabel('Temperatura (C)')
    plt.xlabel('Tiempo (s)')
コード例 #55
0
def plot_confintervals(ax_obj, optpar, covpar, xarr, offset, fcolor='grey'):
    """
    Plots 3-Sigma Confidence Intervals in Fits of SN Parameters.
    Args:
        ax_obj  : Axes object on which the confidence interval is to be plotted
        optpar  : Optimised Parameters of the Fit
        covpar  : Covariance Parameters of the Fit
        xarr    : Array of X-Values over which confidence intervals are to be plotted
        offset  : Offset epoch in JD
        fcolor  : Fill color for the confidence intervals
    Returns:
        None
    """
    a1, a2, t0, a3 = unc.correlated_values(optpar, covpar)
    func = a1 * ((xarr - t0)**1.6) / (unp.exp(a2 * (
        (xarr - t0)**0.5) - 1)) + a3 * ((xarr - t0)**2)
    fit = unp.nominal_values(func)
    sigma = unp.std_devs(func)

    fitlow = fit - 3 * sigma
    fithigh = fit + 3 * sigma

    ax_obj.plot(xarr + offset,
                fitlow,
                ls='-.',
                c='k',
                lw=0.7,
                alpha=0.5,
                label='_nolegend_')
    ax_obj.plot(xarr + offset,
                fithigh,
                ls='-.',
                c='k',
                lw=0.7,
                alpha=0.5,
                label='_nolegend_')
    ax_obj.fill_between(xarr + offset,
                        fitlow,
                        fithigh,
                        facecolor=fcolor,
                        alpha=0.2)
コード例 #56
0
def reduced_chi_square(Residuals, Sc, DataErrs=None):
    """
    Compute a reduced chi square value for the best fit Sc value.
    """
    # if we are fitting from patches or basins, get the std err and include
    # in the chi squared
    if DataErrs:
        r_star = R_Star(Sc, DataErrs[1], DataErrs[0])

        # get rid of any divide by zero errors
        temp = ((Residuals / unp.std_devs(r_star))**2)
        temp[np.isinf(temp)] = 0
        chi_square = np.sum(temp)

    else:
        chi_square = np.sum(Residuals**2)

    # degrees of freedom, as we have 1 free parameter, Sc
    d_o_f = Residuals.size - 2

    return chi_square / d_o_f
コード例 #57
0
    def test_normalise_ter_b(self):
        """
        Test normalisation to 1 where the gradient max is after the first point
        """
        y = [1e9, 1.29e8, 6.25e7, 3.37e7, 2.00e7]
        dy = [1e7, 1.29e6, 6.25e5, 3.37e5, 2.00e5]

        x = np.logspace(-2, -0.8, 5)

        reflected_intensity = unumpy.uarray(y, dy)

        exp_y = unumpy.uarray(y, dy) / 1e9

        reflected_intensity = stitching.normalise_ter(x, reflected_intensity)

        assert_almost_equal(unumpy.nominal_values(reflected_intensity),
                            unumpy.nominal_values(exp_y))
        assert_almost_equal(
            unumpy.std_devs(reflected_intensity),
            [0.0, 0.0018243, 0.0008839, 0.0004766, 0.0002828],
        )
コード例 #58
0
ファイル: contact_utils.py プロジェクト: AspirinCode/Scripts
def getError_diffPlot(wt_maps_list, s1p_maps_list, sep_maps_list):
    """
    Use the uncertainties package to calculate the error
    propagation in the operations (averaging of the 
    two types of phosphorylated systems, and the substraction
    to get the difference plot.)
    """
    WT = np.dstack(wt_maps_list)
    S1P = np.dstack(s1p_maps_list)
    SEP = np.dstack(sep_maps_list)

    WT_uarray = unumpy.uarray((WT.mean(2), WT.std(2)))
    S1P_uarray = unumpy.uarray((S1P.mean(2), S1P.std(2)))
    SEP_uarray = unumpy.uarray((SEP.mean(2), SEP.std(2)))

    diff_uarray = ((S1P_uarray + SEP_uarray) / 2) - WT_uarray

    diff_val = unumpy.nominal_values(diff_uarray)
    diff_std = unumpy.std_devs(diff_uarray)

    return(diff_val, diff_std)
コード例 #59
0
ファイル: FitTripleGaussian.py プロジェクト: sPaMFouR/RedPipe
def plot_confintervals(ax_obj, optpar, covpar, xarr, fcolor='orange'):
    """
    Plots 3-Sigma Confidence Intervals in Fits of SN Parameters.
    Args:
        ax_obj  : Axes object on which the confidence interval is to be plotted
        optpar  : Optimised Parameters of the Fit
        covpar  : Covariance Parameters of the Fit
        xarr    : Array of X-Values over which confidence intervals are to be plotted
        fcolor  : Fill color for the confidence intervals
    Returns:
        None
    """
    def err(xarr, h, c, w):
        return h * unp.exp(-(xarr - c)**2 / (2 * w**2))

    h1, c1, w1, h2, c2, w2, h3, c3, w3, offset = unc.correlated_values(
        optpar, covpar)
    func = err(xarr, h1, c1, w1) + err(xarr, h2, c2, w2) + err(
        xarr, h3, c3, w3) + offset
    fit = unp.nominal_values(func)
    sigma = unp.std_devs(func)

    fitlow = fit - 3 * sigma
    fithigh = fit + 3 * sigma

    ax_obj.plot(xarr,
                fitlow,
                ls='-.',
                c='k',
                lw=0.7,
                alpha=0.5,
                label='_nolegend_')
    ax_obj.plot(xarr,
                fithigh,
                ls='-.',
                c='k',
                lw=0.7,
                alpha=0.5,
                label='_nolegend_')
    ax_obj.fill_between(xarr, fitlow, fithigh, facecolor=fcolor, alpha=0.3)
コード例 #60
0
ファイル: refl_data.py プロジェクト: arm61/islatu
    def resolution_function(self,
                            qz_dimension=1,
                            progress=True,
                            detector_distance=None,
                            energy=None,
                            pixel_size=172e-6):
        """
        Estimate the q-resolution function based on the reflected intensity
        on the detector and add this to the q uncertainty.

        Args:
            qz_dimension (:py:attr:`int`, optional): The dimension of q_z in
                the detector image (this should be the opposite index to that
                the summation is performed if the
                :py:func:`islatu.background.fit_gaussian_1d` background
                subtraction has been performed). Defaults to :py:attr:`1`.
            progress (:py:attr:`bool`, optional): Show a progress bar.
                Requires the :py:mod:`tqdm` package. Defaults
                to :py:attr:`True`.
            detector_distance (:py:attr:`float`): Sample detector distance in
                metres
            energy (:py:attr:`float`): X-ray energy in keV
            pixel_size (:py:attr:`float`, optional): Pixel size in metres
        """
        iterator = _get_iterator(self.images, progress)
        for i in iterator:
            self.images[i].q_resolution(qz_dimension)
        if detector_distance is None:
            detector_distance = self.metadata["diff1detdist"][0] * 1e-3
        if energy is None:
            energy = self.metadata["dcm1energy"][0]
        offset = np.arctan(pixel_size * 1.96 * self.n_pixels * 0.5 /
                           (detector_distance))
        planck = physical_constants["Planck constant in eV s"][0] * 1e-3
        speed_of_light = physical_constants["speed of light in vacuum"][
            0] * 1e10
        q_uncertainty = energy * 4 * np.pi * unp.sin(offset) / (planck *
                                                                speed_of_light)
        self.q = unp.uarray(unp.nominal_values(self.q),
                            unp.std_devs(self.q) + q_uncertainty)