def test_x2_in_x1_2(): """ x2 has a couple of bins, each of which span more than one original bin """ # old size m = 10 # bin edges x_old = np.linspace(0., 1., m+1) x_new = np.array([0.25, 0.55, 0.75]) # some arbitrary distribution y_old = 1. + np.sin(x_old[:-1]*np.pi) / np.ediff1d(x_old) y_old = unp.uarray(y_old, 0.1*y_old*uniform((m,))) # rebin y_new = rebin.rebin(x_old, y_old, x_new, interp_kind='piecewise_constant') # compute answer here to check rebin y_new_here = unp.uarray(np.zeros(2), np.zeros(2)) y_new_here[0] = 0.5 * y_old[2] + y_old[3] + y_old[4] + 0.5 * y_old[5] y_new_here[1] = 0.5 * y_old[5] + y_old[6] + 0.5 * y_old[7] assert_allclose(unp.nominal_values(y_new), unp.nominal_values(y_new_here)) # mean or nominal value comparison assert_allclose(unp.std_devs(y_new), unp.std_devs(y_new_here))
def day3_vacuum(): plt.clf() ux = unp.uarray([800, 1000, 500], ux_error) # V ug_crit = unp.uarray([110, 140, 30], ug_error) # V omega = 2 * math.pi * ufloat(48, 1) # Hz ug_crit = _apply_additional_proportional_error(ug_crit, stability_uncertainty_vacuum) ux *= ux_correction ug_crit *= ug_correction stability(ux, ug_crit, omega, label="Particle V1") plt.title("p = 300 mbar") plt.legend(loc=2) plt.savefig("images/stability_vacuum_1.pdf") plt.clf() ux = unp.uarray([700, 800], ux_error) # V ug_crit = unp.uarray([100, 140], ug_error) # V omega = 2 * math.pi * ufloat(45, 1) # Hz ug_crit = _apply_additional_proportional_error(ug_crit, stability_uncertainty_vacuum) ux *= ux_correction ug_crit *= ug_correction stability(ux, ug_crit, omega, label="Particle V2") plt.title("p = 180 mbar") plt.legend(loc=2) plt.savefig("images/stability_vacuum_2.pdf")
def other(g=True): freq = unumpy.uarray([100, 500, 1000, 5000, 10000, 50000], np.array([100, 500, 1000, 5000, 10000, 50000]) * 0.01) vin = ufloat(1.01, 0.01) vout = unumpy.uarray([0.640, 3.02, 5.27, 9.2, 9.6, 6.4], [0.01, 0.01, 0.01, 0.1, 0.1, 0.1]) fase = unumpy.uarray([92, 108, 123, 166, 178, -125], [1, 2, 1, 1, 1, 1]) Gs = vout / vin dB = 10 * unumpy.log10(Gs) if not g: return None f = plt.figure(figsize=(8, 8)) f.suptitle("Differenziatore", fontsize=15, y=0.98) ax = f.add_subplot(211) ax.errorbar(x=unumpy.nominal_values(freq), y=unumpy.nominal_values(Gs), c='black', fmt='o-') ax.set_xlabel('Frequenza', fontsize=14) ax.set_ylabel('Guadagno G', fontsize=14) ax.set_xscale('log') #ax.set_ylim((-13, 1)) #ax.set_yticklabels(('', 2, 4, 6, 8, 10, 12)) ax.set_xticklabels(('', '100 Hz', u"1 kHz", u"10 kHz", u"100 kHz", u"1 MHz")) ax.minorticks_on() ax.grid(b=True, which='major', color='0.7', linestyle='-', zorder=-5) ax.grid(b=True, which='minor', color='0.9', linestyle='-', zorder=-9) ax.set_axisbelow(True) ax2 = f.add_subplot(212) ax2.errorbar(x=unumpy.nominal_values(freq), y=unumpy.nominal_values(fase), c='black', fmt='o-') ax2.set_ylabel('Sfasamento [Gradi]', fontsize=14) ax2.set_xscale('log') #ax2.set_yticklabels(('', 25, 50, 75, 100)) ax2.set_xticklabels(('', '100 Hz', u"1 kHz", u"10 kHz", u"100 kHz")) ax2.minorticks_on() ax2.grid(b=True, which='major', color='0.7', linestyle='-', zorder=-5) ax2.grid(b=True, which='minor', color='0.9', linestyle='-', zorder=-9) ax2.set_axisbelow(True) ax3 = ax2.twiny() ax3.set_xticks((0, 0.333, 0.666, 1)) ax3.set_xticklabels(('', "1 kHz", u"10 kHz", u"100 kHz")) f.subplots_adjust(top=0.93, hspace=0.25, bottom=0.07, right=0.95) plt.savefig("../latex/diff.pdf") plt.show()
def g_1000hz(): g_1000hz = np.genfromtxt("../dati/g_1000hz.csv", skip_header=1, delimiter=",") vin = unumpy.uarray(g_1000hz[:,0] * 0.001, g_1000hz[:,1] * 0.001) vout = unumpy.uarray(g_1000hz[:,2], g_1000hz[:,3]) Gs = vout / vin G = sum(Gs) / float(len(Gs)) print Gs, "\n", G
def usum(self, width=False): """ Return the sum of the bin contents and uncertainty. See sum(). """ if width: return np.dot(np.diff(self.bins), uarray((self.hist, self.errs))) else: return np.sum(uarray((self.hist, self.errs)))
def I_with_err(f_val=1, f_err=0, g_val=g_DEFAULT, g_err=0, e=1, e1=1, e2=E_INF): """Wrapper for f so the user doesn't have to know about the uncertainties module""" from uncertainties import unumpy f = unumpy.uarray(f_val, f_err) g = unumpy.uarray(g_val, g_err) _I = power_law_integral_flux(f, g, e, e1, e2) I_val = unumpy.nominal_values(_I) I_err = unumpy.std_devs(_I) return I_val, I_err
def plot_u(cal_file, fm_file, offset_file, description, accidental_offset, results_file): M = np.genfromtxt(cal_file) N = np.genfromtxt(fm_file) O = np.genfromtxt(offset_file) i_helm = M[:,1] #current applied to helmholtz for calibration measurement b_helm = M[:,2] #field applied to helmholtz coil for calibration measurement p, cov = np.polyfit(i_helm, b_helm, 1, cov=True) #fit a line to calibration measurement so that we get a calibration i_fm = N[:,1] #current applied to helmmholtz for shielding measurement b_fm = unumpy.uarray(N[:,2],0.0005) - accidental_offset #field measured inside of ferromagnet shield B_earth = np.polyval(p,0) #We get the Earths magnetic field from i=0 of the Helmholtz calibration B_fm_no_i = unumpy.uarray(np.mean(O[:,2]), np.std(O[:,2])) #Get average and error for initial magnetization mag = B_fm_no_i - B_earth #initial magnetization is the field inside of the ferromagnet before any field is applied minus the earths magnetic field Bin = b_fm - mag #internal magnetization is the measured internal field minus the initial magnetization. This correction might not be necessary for a soft ferromagnet Bext = unumpy.uarray(np.polyval(p,i_fm), 0.0005) #external field Bext_nom = unumpy.nominal_values(Bext) Bext_err = unumpy.std_devs(Bext) B = Bext/Bin c = a/b u=(-2*B + c**2 - 2*unumpy.sqrt(B**2 - B*c**2 - B + c**2) + 1)/(c**2 - 1) u_nom = unumpy.nominal_values(u) u_err = unumpy.std_devs(u) #cakculate uerr with just point to point uncertainties. I define this as just #uncertainty from the field measurements u_pp=(-2*B + c.nominal_value**2 - 2*unumpy.sqrt(B**2 - B*c.nominal_value**2 - B + c.nominal_value**2) + 1)/(c.nominal_value**2 - 1) #calculate uerr from just geometry uncertainties u_geom=(-2*unumpy.nominal_values(B) + c**2 - 2*unumpy.sqrt(unumpy.nominal_values(B)**2 - unumpy.nominal_values(B)*c**2 - unumpy.nominal_values(B) + c**2) + 1)/(c**2 - 1) ##obtain uncertainties from field u_err_pp = unumpy.std_devs(u_pp) ##obtain uncertainties from geometry u_err_geom = unumpy.std_devs(u_geom) with open(results_file, "w") as myfile: myfile.write('#Bext, sig_Bext, ur, sig_ur, sig_ur_pp, sig_ur_corr\n') for j in range(0, len(u_nom)): myfile.write('%s\t%s\t%s\t%s\t%s\t%s\n' %( Bext_nom[j], Bext_err[j], u_nom[j], u_err[j], u_err_pp[j], u_err_geom[j])) plt.errorbar(Bext_nom, u_nom, u_err, marker = '.', label = description)
def f_with_err(I_val=1, I_err=0, g_val=g_DEFAULT, g_err=0, e=1, e1=1, e2=E_INF): """Wrapper for f so the user doesn't have to know about the uncertainties module""" from uncertainties import unumpy I = unumpy.uarray(I_val, I_err) g = unumpy.uarray(g_val, g_err) _f = power_law_flux(I, g, e, e1, e2) f_val = unumpy.nominal_values(_f) f_err = unumpy.std_devs(_f) return f_val, f_err
def compute_hwz(N_list, ttor, fit, plotname, title, sl=slice(None,None), Uscale=1, p0=None, eq=None, plabels=None, punits=None, Th_erw=None): N = np.sum(unp.uarray(N_list,np.sqrt(N_list)), axis=0) t = np.arange(len(N))*ttor+ttor/2. table = pt.PrettyTable() table.add_column('t [s]', t.astype(int), align='r') if len(N_list) > 1: for i in range(len(N_list)): table.add_column('N'+str(i+1), N_list[i].astype(int), align='r') table.add_column('Summe', N, align='r') else: table.add_column('N', N, align='r') with open("Resources/table_"+plotname+".txt", "w") as text_file: text_file.write(table.get_string()) global N_U N_U = N_U0*Uscale*ttor popt, pstats = papstats.curve_fit(fit, t[sl], N[sl], p0=p0) # Untergrundfehler N_U = (N_U0-N_U0.s)*Uscale*ttor popt_min, pstats_min = papstats.curve_fit(fit, t[sl], N[sl], p0=p0) N_U = (N_U0+N_U0.s)*Uscale*ttor popt_max, pstats_max = papstats.curve_fit(fit, t[sl], N[sl], p0=p0) N_U = N_U0*Uscale*ttor s_U = unp.nominal_values(((np.abs(popt-popt_min)+np.abs(popt-popt_max))/2.)) s_corrected = np.sqrt(unp.std_devs(popt)**2 + s_U**2) popt_corrected = unp.uarray(unp.nominal_values(popt),s_corrected) # Halbwertszeit Th = popt_corrected[::2]*unc.umath.log(2) for i in range(len(Th)): papstats.print_rdiff(Th[i]/60, Th_erw[i]/60) # Plot plt.clf() plt.title('Diagramm '+plotname+': '+title) plt.xlabel('Messzeit $t \, [s]$') plt.ylabel('Ereigniszahl $N$') xspace = np.linspace(0, t[-1]) papstats.plot_data(t, N, label='Messpunkte') papstats.plot_fit(fit, popt, pstats, xspace, eq=eq, plabels=plabels, punits=punits) plt.fill_between(xspace, fit(xspace, *unp.nominal_values(popt_min)), fit(xspace, *unp.nominal_values(popt_max)), color='g', alpha=0.2) Nmin = np.amin(unp.nominal_values(N)) for i in range(len(Th)): plt.hlines(popt[1::2][i].n/2.+N_U.n, 0, Th[i].n, lw=2, label='Halbwertszeit $'+papstats.pformat(Th[i], label=r'T_{\frac{1}{2}}'+('^'+str(i+1) if len(Th)>1 else ''), unit='s')+'$') handles, labels = plt.gca().get_legend_handles_labels() p = plt.Rectangle((0, 0), 1, 1, color='g', alpha=0.2) handles.append(p) labels.append('Fit im '+r'$1 \sigma$'+'-Bereich von $N_U$:'+''.join(['\n$'+papstats.pformat(s_U[i], label='\Delta '+plabels[i]+'^{U}', unit=punits[i])+'$' for i in range(len(plabels))])) plt.legend(handles, labels) papstats.savefig_a4(plotname+'.png')
def evaluate_permeability( fname_data="samplemeasurement.csv", Bin="B1", Bout="B2", fname_do="fm618_do_cryo.txt", fname_th="fm618_th_cryo.txt" ): print ("Evaluating permeability for: ", fname_data, Bin, Bout, fname_do, fname_th ) # prepare result dataframe result = pd.DataFrame(columns = ["Bout","Bout_sdev","mu","mu_err_pp","mu_err_geom"]) # get data data = pd.read_csv(fname_data) print(data.head(10)) # set uncertainty for field reading manually to 0 if it does not exist if Bin + '_sdev' not in data: print ("Use uncertainty of 0.005 for " , Bin+'_sdev') data[Bin + '_sdev'] = 0.005 #Gaussmeter precision 0.01 mT above 30 mT) # set uncertainty for field reading manually to 0 if it does not exist if Bout + '_sdev' not in data: print ("Use uncertainty of 0.005 for " , Bout+'_sdev') data[Bout + '_sdev'] = 0.005 # copy Bin and Bout to results result['Bout_c'] = unumpy.uarray( abs( data[Bout].values ), data[Bout + '_sdev'].values) result['Bin_c'] = unumpy.uarray( abs( data[Bin].values ), data[Bin + '_sdev'].values) # get inner and outer diameter diam_out = mean_from_file('diameter_files/'+fname_do) thickness = mean_from_file('diameter_files/'+fname_th) diam_in = diam_out - 2*thickness # check ideal permeability calc_mu_cloak(diam_in, diam_out) # calculate permeability and store values and uncertainties in arrays (mu_c, mu_err, mu_err_pp, mu_err_geom) = calc_mu(Bin=result['Bin_c'], Bout=result['Bout_c'], radius_inner=diam_in, radius_outer=diam_out) result["mu"] = mu_c result["mu_err"] = mu_err result["mu_err_pp"] = mu_err_pp result["mu_err_geom"] = mu_err_geom # same for Bout result["Bout"] = unumpy.nominal_values(result["Bout_c"]) result["Bout_sdev"] = unumpy.std_devs(result["Bout_c"]) #print(result.head(10)) # drop 'mu_c' and 'Bout_c' result.drop('Bin_c', axis=1, inplace=True) result.drop('Bout_c', axis=1, inplace=True) return (result)
def plot_u(cal_file, fm_file, description, accidental_offset, results_file): M = np.genfromtxt(cal_file) #turn calibration file into a matrix N = np.genfromtxt(fm_file) #turn fm_scan file into a matrix i_helm = M[:,1] #current applied to helmholtz for calibration measurement b_helm = M[:,2] #field applied to helmholtz coil for calibration measurement p, cov = np.polyfit(i_helm, b_helm, 1, cov=True) #fit a line to calibration measurement so that we get a calibration i_fm = N[:,1] #current applied to helmmholtz for shielding measurement Bin = unumpy.uarray(N[:,2],0.0005) - accidental_offset #field measured inside of ferromagnet shield Bin_nom = unumpy.nominal_values(Bin) Bin_err = unumpy.std_devs(Bin) Bext = unumpy.uarray(np.polyval(p,i_fm), 0.0005) #external field Bext_nom = unumpy.nominal_values(Bext) Bext_err = unumpy.std_devs(Bext) B = Bin/Bext #ratio of internal to external field #calculate permeability u = (B*c**2 + B -2 -2*unumpy.sqrt(B**2*c**2 - B*c**2 - B + 1))/(B*c**2-B) print(u) u_nom = unumpy.nominal_values(u) u_err = unumpy.std_devs(u) #cakculate uerr with just point to point uncertainties. I define this as just #uncertainty from the field measurements u_pp = (B*c.n**2 + B -2 -2*unumpy.sqrt(B**2*c.n**2 - B*c.n**2 - B + 1))/(B*c.n**2-B) u_err_pp = unumpy.std_devs(u_pp) #calculate uerr from just geometry uncertainties u_geom = (unumpy.nominal_values(B)*c**2 + unumpy.nominal_values(B) -2 -2*unumpy.sqrt(unumpy.nominal_values(B)**2*c**2 - unumpy.nominal_values(B)*c**2 - unumpy.nominal_values(B) + 1))/(unumpy.nominal_values(B)*c**2-unumpy.nominal_values(B)) u_err_geom = unumpy.std_devs(u_geom) #write results onto a text file with open(results_file, "w") as myfile: myfile.write('#Bext, sig_Bext, Bi, sig_Bi, ur, sig_ur, sig_ur_pp, sig_ur_corr\n') for j in range(0, len(u_nom)): myfile.write('%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n' %( Bext_nom[j], Bext_err[j], Bin_nom[j], Bin_err[j], u_nom[j], u_err[j], u_err_pp[j], u_err_geom[j])) plt.errorbar(Bext_nom, u_nom, u_err, marker = '.', label = description)
def power_law_I_with_err( f_val=1, f_err=0, g_val=g_DEFAULT, g_err=0, e=1, e1=1, e2=E_INF ): """Evaluate power-law flux and propagate errors.""" from uncertainties import unumpy f = unumpy.uarray(f_val, f_err) g = unumpy.uarray(g_val, g_err) _I = power_law_integral_flux(f, g, e, e1, e2) I_val = unumpy.nominal_values(_I) I_err = unumpy.std_devs(_I) return I_val, I_err
def power_law_f_with_err( I_val=1, I_err=0, g_val=g_DEFAULT, g_err=0, e=1, e1=1, e2=E_INF ): """Evaluate power-law ``dnde`` and propagate errors.""" from uncertainties import unumpy I = unumpy.uarray(I_val, I_err) g = unumpy.uarray(g_val, g_err) _f = power_law_flux(I, g, e, e1, e2) f_val = unumpy.nominal_values(_f) f_err = unumpy.std_devs(_f) return f_val, f_err
def calcGasMass(DMpc, FHI, FCO, FCOerr): # Calculate HI gas mass FHI = unumpy.uarray(FHI,(FHI*0.2)) MHI = (2.356E5)*((DMpc)**2.)*(FHI) # Calculate H2 gas mass FCO = unumpy.uarray(FCO,FCOerr) MH2 = 7845*FCO*((DMpc)**2.) where_are_nans = unumpy.isnan(MH2) MH2[where_are_nans] = 0 # Total gas mass Mgas = MHI + MH2 return MHI, MH2, Mgas
def __init__( self, x, y, text, saveAffix, execludeLast = False): from ROOT import TPaveText self.x = x self.y = y self.text = text self.saveAffix = saveAffix error_ug = 15 error_ux = 10 * sqrt(2) x = uarray(( array( x ), [ error_ug ]*len( x ))) y = uarray(( array( y ), [ error_ux]*len( y ))) x = x**2 self.reg = linearRegression(x, y, execludeLast ) self.reg.func.SetParNames('a','b') self.reg.draw(";U^{2}_{i} [V^{2}];U_{g} [V]" ) self.reg.canvas.cd() label = TPaveText(0.1, 0.95, .86, 1, "NDC") label.AddText( text ) label.SetFillStyle(0) label.SetBorderSize(0) label.Draw() ROOT.kPrint = 0 ''' ROOT.kInfo = 1000 ROOT.kWarning = 2000; ROOT.kError = 3000; ROOT.Break = 4000; ROOT.kSysError = 5000; ROOT.kFatal = 6000; ''' self.reg.canvas.SaveAs('linReg%s.pdf'%saveAffix) # calculate q_m # variable definitions: from math import pi K = 8 w = 2. * pi * 30 e_w = 2. * pi r = 0.0305 / 2 e_r = 0.0002 / 2 #print( '"{} +/- {}",'.format(self.reg.func.GetParameter(0), self.reg.func.GetParError(0) ) ) b = self.reg.func.GetParameter(1) if b == 0: return e_b = self.reg.func.GetParError(1) q_m = -2. * w**2 * r**2 * b / ( 3 * K ) * 1e6 stat = abs( 1.* q_m * e_b / b ) sys = abs (2. * q_m * sqrt( (e_w/w)**2 + (e_r/r)**2 ) ) #print('{}: q/m = {:.4e} ± {:.2e} (stat) ± {:.2e} (sys) ± {:.2e} (gesamt) μC/kg\n'.format(saveAffix, q_m, stat, sys, sqrt(stat**2 + sys**2))) druck = { 'Luft1': 1000, 'Luft2': 1000, '375bar': 375, '400bar': 400, '425bar': 425, '425bar2': 425 } print('{} & {:.3g} ± {:.2g} (stat) ± {:.2g} (sys) {:2g}\\\\'.format(druck[saveAffix], -q_m, stat, sys, sqrt( stat**2 + sys**2) ))
def uintegrate(self, x1, x2, width=False): """ Return the integral of the bin contents from `x1` to `x2` and uncertainty. See integrate(). """ i1, i2 = self.findbin([x1,x2]) if width: return np.dot(np.diff(self.bins[i1:i2+2]), uarray((self.hist[i1:i2+1], self.errs[i1:i2+1]))) else: return np.sum(uarray((self.hist[i1:i2+1], self.errs[i1:i2+1])))
def efficiencies(input, ht_val, alphat_binning, mhtmet_binning): vals = np.zeros((len(mhtmet_binning), len(alphat_binning))) errs = np.zeros_like(vals) diff = unumpy.uarray(vals, errs) cumu = unumpy.uarray(vals, errs) for imht in range(len(mhtmet_binning)): mhtmet_val = mhtmet_binning[imht] for ialphat in range(len(alphat_binning)): alphat_val = alphat_binning[ialphat] diff[imht][ialphat] = efficiency(input, ht_val, alphat_val, mhtmet_val, True) cumu[imht][ialphat] = efficiency(input, ht_val, alphat_val, mhtmet_val, False) # if ialphat == len(alphat_binning)-1 : # diff[imht][ialphat] = cumu[imht][ialphat] return diff, cumu
def auswerten(name, d, n, t, z, V_mol, eps, raw): d *= 1e-3 N = unp.uarray(n/t, np.sqrt(n)/t) - N_u if name=="Cu": tools.table((raw[0], raw[1], N), ("D/mm", "n", "(N-N_U)/\per\second"), "build/{}.tex".format(name), "Messdaten von {}.".format(name), "tab:daten{}".format(name), split=2, footer=r"$\Delta t = \SI{60}{s}$")#"(N-N_U)/\per\second" else: tools.table((raw[0], raw[1], raw[2], N), ("D/mm", "n", "\Delta t/s", "(N-N_U)/\per\second"), "build/{}.tex".format(name), "Messdaten von {}.".format(name), "tab:daten{}".format(name), split=2) mu = z * const.N_A / V_mol * 2 * np.pi * (const.e**2 / (4 * np.pi * const.epsilon_0 * const.m_e * const.c**2))**2 * ((1+eps)/eps**2 * ((2 * (1+eps))/(1+2*eps) - 1/eps * np.log(1+2*eps)) + 1/(2*eps) * np.log(1+ 2*eps) - (1+ 3*eps)/(1+2*eps)**2) params, pcov = curve_fit(fit, d, unp.nominal_values(N), sigma=unp.std_devs(N)) params_ = unc.correlated_values(params, pcov) print("{}: N(0) = {}, µ = {}, µ_com = {}".format(name, params_[0], -params_[1], mu)) sd = np.linspace(0, .07, 1000) valuesp = (fit(sd, *(unp.nominal_values(params_) + 10*unp.std_devs(params_)))).astype(float) valuesm = (fit(sd, *(unp.nominal_values(params_) - 10*unp.std_devs(params_)))).astype(float) #plt.xlim(0,7) plt.xlabel(r"$D/\si{mm}$") plt.ylabel(r"$(N-N_U)/\si{\per\second}$") plt.plot(1e3*sd, fit(sd, *params), 'b-', label="Fit") plt.fill_between(1e3*sd, valuesm, valuesp, facecolor='blue', alpha=0.125, edgecolor='none', label=r'$1\sigma$-Umgebung ($\times 10$)') plt.errorbar(1e3*d, unp.nominal_values(N), yerr=unp.std_devs(N), fmt='rx', label="Messdaten") plt.legend(loc='best') plt.yscale('linear') plt.tight_layout(pad=0) plt.savefig("build/{}.pdf".format(name)) plt.yscale('log') plt.savefig("build/{}_log.pdf".format(name)) plt.clf()
def test_Estrainfit(self): #create some fake strains d0=2.34 E0 = 6.19920965/(d0*np.sin(np.radians(4.5/2))) e1=0.002 e2=-0.0012 e12=0.0008 #the azimuthal angle phi = np.linspace(-np.pi/2,np.pi/2,23) #distort the lattice as function of phi edata = E0 + E0*(e1*np.cos(phi)**2 + e2*np.sin(phi)**2 + e12*np.sin(2*phi)) edata += np.random.uniform(-0.00002,0.00002,edata.size) edata = unumpy.uarray((edata,np.sqrt(edata))) #fit result = strain.strain_from_E(edata,phi,E0) #check the results self.assertAlmostEqual(e1,result['strain_x'].nominal_value,4) self.assertAlmostEqual(e2,result['strain_y'].nominal_value,4) self.assertAlmostEqual(e12,result['strain_xy'].nominal_value,4) #fit with d0 result = strain.strain_from_E(edata,phi,d0=d0,tth=4.5) #check the results self.assertAlmostEqual(e1,result['strain_x'].nominal_value,4) self.assertAlmostEqual(e2,result['strain_y'].nominal_value,4) self.assertAlmostEqual(e12,result['strain_xy'].nominal_value,4)
def fit_stretched_exp(x, y, yerr, xlong): start = time.time() p0 = (-1.5, 100, 0.15, 2) p, cov = curve_fit(func_stretched_exp, x, y, p0=p0, sigma = yerr, maxfev = 100000) print(p) perr = np.sqrt(np.diag(cov)) yfit = func_stretched_exp(x, p[0], p[1], p[2], p[3]) resid = y-yfit dof = y.size - len(p) chi2 = np.sum((resid/yerr)**2) chi2red = chi2/dof p_val = 1 - stats.chi2.cdf(chi2,dof) yfit = func_stretched_exp(xlong, p[0], p[1], p[2], p[3]) p = unumpy.uarray(p,perr) extrapolate_val = p[0]*unumpy.exp(-(extrapolate_times/p[1])**p[2])+p[3] if p[2]>0: extrap_inf = p[3] elif p[2]<0: extrap_inf = p[0]+p[3] end = time.time() elapsed = end-start return (chi2red, resid, extrapolate_val, extrap_inf, 1, unumpy.nominal_values(p), unumpy.std_devs(p), yfit, elapsed, p_val)
def mean(values, axis=0): """Returns mean values and their mean errors of a given array. Return value will be a unp.uarray Args: values: (list) Array containing numbers whose mean is desired. axis: (int) Axis along which the means are computed. The default is to compute the mean of the flattened array. """ return unp.uarray((np.mean(noms(values), axis=axis), scipy.stats.sem(noms(values), axis=axis)))
def fit_func(func, x, y, yerr, xlong,ylong): start = time.time() p, cov = curve_fit(func, x, y, p0 = initial_guess(func,x,y), sigma = yerr, maxfev = 1000000) # print(p) if func == func_power: pi = p p, cov = curve_fit(func, x, y, p0 = initial_guess(func, x+p[2], y), sigma = yerr, maxfev = 1000000) while (np.any((pi/p-1)>0.001)): pi=p p, cov = curve_fit(func, x, y, p0 = initial_guess(func, x+p[2], y), sigma = yerr, maxfev = 1000000) perr = np.sqrt(np.diag(cov)) chi2red, pval = calculate_stats(x, y, yerr, p, perr, func) yfit = func(xlong,*p) resid = ylong-yfit p = unumpy.uarray(p,perr) extrapolate_val = func_err_prop(func,extrapolate_times,p) print(extrapolate_val) end = time.time() elapsed = end-start return (p, yfit, resid, extrapolate_val, chi2red, pval, elapsed)
def test_integrate_spectrum_ecpl(): """ Test ecpl integration. Regression test for https://github.com/gammapy/gammapy/issues/687 """ from uncertainties import unumpy amplitude = unumpy.uarray(1E-12, 1E-13) index = unumpy.uarray(2.3, 0.2) reference = 1 lambda_ = 0.1 ecpl = ExponentialCutoffPowerLaw(index, amplitude, reference, lambda_) emin, emax = 1, 1E10 val = ecpl.integral(emin, emax) assert_allclose(unumpy.nominal_values(val), 5.956578235358054e-13) assert_allclose(unumpy.std_devs(val), 9.278302514378108e-14)
def estimate_sigmas(values, ableseunsicherheit): """Generates std deviations for analoge instruments. Returns a ufloatarray.""" nominal = values magnitude = np.floor(np.log10(nominal)) error = [ableseunsicherheit * 10**mag for mag in magnitude] return uarray(nominal, error)
def uarray(x,errx): """ With the new releases of the uncertainties and astropy.io.ascii (0.2.3, the replacement of asciitable), if I try to create an uncertainties array with the column of a table imported with ascii I run into trouble. For instance, if I use the sequence of commands below: >>> import astropy.io.ascii as asciitable >>> raff= asciitable.read('data/rafferty06.dat') >>> m,errm=raff['mass'],raff['errm'] >>> mass=unumpy.uarray(m,errm) >>> x=0.2*mass I get the error message: >>> TypeError: unsupported operand type(s) for *: 'float' and 'Column' which I can only assume is due to the new way ascii handles tables. I created this method to use as a replacement for unumpy.uarray that handles the tables created with astropy.io.ascii. Usage is the same as uncertainties.unumpy.uarray. :type x,errx: arrays created with astropy.io.ascii. :returns: uncertainties array. """ import uncertainties.unumpy as unumpy x=numpy.array(x) errx=numpy.array(errx) return unumpy.uarray(x,errx)
def propReg2(xdata,ydata): m = np.sum(xdata*ydata)/np.sum(xdata*xdata) stddev = np.sqrt( np.sum((ydata - m*xdata)**2)/(len(xdata)-1) ) uydata = unumpy.uarray(ydata,stddev) #uxdata = unumpy.uarray(xdata,stddev) um = np.sum(xdata*uydata)/np.sum(xdata*xdata) return np.array([uc.nominal_value(um), 0]), np.array([uc.std_dev(um), 0])
def regression(f, x_u, y_u, beta0 ): for i, y in enumerate(y_u): if ustd(y) == 0: y_u[i] = ufloat(y_u[i].nominal_value, y_u[i].nominal_value/1e15) if len(x_u.shape) == 2: x_nom = range(len(x_u)) x_error = range(len(x_u)) for i, col in enumerate(x_u): for j, elem in enumerate(col): if ustd(elem) == 0: x_u[i,j] = ufloat(elem.nominal_value, elem.nominal_value/1e15) x_nom[i] = unom(x_u[i]) x_error[i] = ustd(x_u[i]) x_nom = np.array(x_nom) x_error = np.array(x_error) elif np.sum(ustd(x_u)) == 0: x_error = None else: x_error = ustd(x_u) x_nom = unom(x_u) linear = odrpack.Model(f) mydata = odrpack.RealData(x_nom, unom(y_u),sy=ustd(y_u), sx=x_error) myodr = odrpack.ODR(mydata, linear, beta0=beta0) myoutput = myodr.run() #myoutput.pprint() #print "Chi-squared =", np.sum((myoutput.delta**2 + myoutput.eps**2)/(ustd(x_u)**2+ ustd(y_u)**2)) print "Chi-squared =", myoutput.sum_square beta_u = unumpy.uarray(myoutput.beta, myoutput.sd_beta) print "beta = ", beta_u print "reduced chi-square =", myoutput.res_var degrees_of_freedom = myoutput.sum_square/myoutput.res_var p_value = 1-scipy.stats.chi2.cdf(myoutput.sum_square, degrees_of_freedom) print "p-value =", p_value return beta_u
def get_contents( h ) : if h is None : return None values = np.zeros((h.GetNbinsX(),h.GetNbinsY())) errors = np.zeros((h.GetNbinsX(),h.GetNbinsY())) tuple = weight(h) # val,err,wei for i in range(h.GetNbinsX()) : for j in range(h.GetNbinsY()) : val = h.GetBinContent(i+1,j+1) err = h.GetBinError(i+1,j+1) values[i][j] = val errors[i][j] = err wei = err*err/val if val > 0. else tuple[2] entries = int(abs(val/wei if wei > 0. else val)) if entries < 10 : max_err = pois_h[entries] if pois_l[entries] > pois_h[entries] : max_err = pois_l[entries] errors[i][j] = max_err*wei mhtmet = [] alphat = [] for i in range(h.GetNbinsX()) : mhtmet.append(h.GetXaxis().GetBinLowEdge(i+1)) #mhtmet.append(h.GetXaxis().GetBinUpEdge(h.GetNbinsX())) for j in range(h.GetNbinsY()) : alphat.append(h.GetYaxis().GetBinLowEdge(j+1)) #alphat.append(h.GetYaxis().GetBinUpEdge(h.GetNbinsY())) temp = unumpy.uarray(values,errors) return temp#(mhtmet,alphat,temp)
def import_data_from_objLog(FilesList, Objects_Include, pv): List_Abundances = ['OI_HI', 'NI_HI', 'SI_HI', 'SI_HI_ArCorr', 'Y_Mass_O', 'Y_Mass_S', 'Y_Inference_O', 'Y_Inference_S'] #List_Abundances = ['OI_HI', 'NI_HI', 'SI_HI', 'SI_HI_ArCorr', 'Y_Mass_O', 'Y_Mass_S', 'Y_inf_O', 'Y_inf_S'] #Dictionary of dictionaries to store object abundances Abund_dict = OrderedDict() for abund in List_Abundances: Abund_dict[abund] = OrderedDict() #Loop through files for i in range(len(FilesList)): #Analyze file address CodeName, FileName, FileFolder = pv.Analyze_Address(FilesList[i]) if CodeName in Objects_Include: #Loop through abundances in the log for abund in List_Abundances: Abund_Mag = pv.GetParameter_ObjLog(CodeName, FileFolder, Parameter = abund, Assumption = 'float') #If the abundance was measure store it if Abund_Mag != None: Abund_dict[abund][CodeName] = Abund_Mag #Dictionary to store objects with abundances pairs for regressions. #As an initial value for the keys we define the abundances we want to use for the regression Abundances_Pairs_dict = OrderedDict() Abundances_Pairs_dict['O_Regression'] = ('OI_HI','Y_Mass_O') Abundances_Pairs_dict['N_Regression'] = ('NI_HI','Y_Mass_O') Abundances_Pairs_dict['S_Regression'] = ('SI_HI','Y_Mass_S') Abundances_Pairs_dict['S_ArCorr_Regression'] = ('SI_HI_ArCorr','Y_Mass_S') Abundances_Pairs_dict['O_Regression_Inference'] = ('OI_HI','Y_Inference_O') Abundances_Pairs_dict['N_Regression_Inference'] = ('NI_HI','Y_Inference_O') Abundances_Pairs_dict['S_Regression_Inference'] = ('SI_HI','Y_Inference_S') Abundances_Pairs_dict['S_ArCorr_Regression_Inference'] = ('SI_HI_ArCorr','Y_Inference_S') #Loop through the regression lists and get objects with both abundances observed for j in range(len(Abundances_Pairs_dict)): #Get the elements keys for the regression Vector, Elem_X, Elem_Y = Abundances_Pairs_dict.keys()[j], Abundances_Pairs_dict.values()[j][0], Abundances_Pairs_dict.values()[j][1] #Determine objects with both abundances observed Obj_vector = intersect1d(Abund_dict[Elem_X].keys(), Abund_dict[Elem_Y].keys(), assume_unique = True) X_vector = zeros(len(Obj_vector)) Y_vector = zeros(len(Obj_vector)) X_vector_E = zeros(len(Obj_vector)) Y_vector_E = zeros(len(Obj_vector)) #Generate abundances vectors for z in range(len(Obj_vector)): X_vector[z] = nominal_values(Abund_dict[Elem_X][Obj_vector[z]]) X_vector_E[z] = std_devs(Abund_dict[Elem_X][Obj_vector[z]]) Y_vector[z] = nominal_values(Abund_dict[Elem_Y][Obj_vector[z]]) Y_vector_E[z] = std_devs(Abund_dict[Elem_Y][Obj_vector[z]]) Abundances_Pairs_dict[Vector] = (list(Obj_vector), uarray(X_vector, X_vector_E), uarray(Y_vector, Y_vector_E)) return Abundances_Pairs_dict
def test_nom_ueig(): sA = array([[1, 2], [3, 4]]) A = array([[0.1, 0.2], [0.1, 0.3]]) w, v = eig(A) uA = uarray((A, sA)) uw, uv = ueig(A) assert nominal_values(uw) == w assert nominal_values(uv) == v
def setUp(self): self.x = numpy.array([[-1.0], [0.0], [1.0]]) self.y = unumpy.uarray([2.0, 1.0, 6.0], [0.1, 0.1, 0.1]) self.a = 1.0 self.b = numpy.array([2.0]) self.c = numpy.array([[3.0]])
import matplotlib.pyplot as plt import numpy as np from scipy.optimize import curve_fit from uncertainties import ufloat import uncertainties.unumpy as unp import scipy.constants as const from pylab import * #C Wert 3 C20 = np.array([399 * 10**(-9), 450 * 10**(-9), 750 * 10**(-9)]) R3 = np.array([203, 222, 323]) R4 = np.array([797, 778, 677]) C2 = unp.uarray(C20, 0.002 * C20) X = unp.uarray(R4 / R3, (R4 / R3) * 0.005) #R4/R3 Cx = C2 * X M = np.mean(Cx) #Mittelwert V = np.var(Cx) #Fehler des Mittelwerts F = V**(0.5) / 3**(0.5) print('Mittelwert Cx: ', "{:.12f}".format(M)) print('Fehler des Mittelwerts Cx: ', "{:.12f}".format(F))
differenz[i] = f_array[i] else: differenz[i] = f_array[i] - f_array[i-1] print(differenz) differenz *= 10**6 L = c/((np.sum(differenz)/4)*2) print('L = ', L) print('Abweichung = ', abweichungen(L_vergleich, L)) if __name__ == '__main__': # polarisation I = np.genfromtxt('daten/polarisation.txt', unpack='True') phi = np.linspace(0, 180, 19) phi = unp.uarray(phi, 1) phi_rad = (phi/360)*2*np.pi I = unp.uarray(I, 0.005) werteZuTabelle(noms(phi).astype(int), noms(I), rundungen=[0, 2]) plot(phi_rad, I, "Polarisation", "polarisation", r'$\phi/\si{\radian}$', r'$I/\si{\micro\ampere}$', fitfunktion_pol, [0.8, 2, 0], ["I_0", "phi_verschieb", "m"]) # moden x, I = np.genfromtxt('daten/tem00.txt', unpack='True') x = unp.uarray(x, 0.5) I = unp.uarray(I, 0.01) werteZuTabelle(noms(x).astype(int), noms(I), rundungen=[0, 3]) plot(x, I, r'TEM$_{00}$', 'grundmode', r'$x/\si{\milli\meter}$',
axis=1) filenames = list(np.array(elements, dtype=object)[:, 0]) + legierungen for i in range(len(filenames)): np.savetxt('data/' + filenames[i] + '.txt', data[:, 2 * i:2 * i + 2], delimiter='\t', header='E [eV]\tn [1/s]') ##### # Kombiniertes Spektrum ##### data = np.array( [np.loadtxt('data/' + e[0] + '.txt', skiprows=1) for e in elements], dtype=object) data[:, :, 1] = unp.uarray(data[:, :, 1], data[:, :, 1] / np.sqrt(180)) plt.clf() plt.title(u'Diagramm 3.1: Röntgenfluoreszenzspektrum verschiedener Elemente') plt.xlabel('Strahlungsenergie $E \, [keV]$') plt.ylabel(u'Zählrate ' + r'$n \, [\frac{Ereignisse}{s}]$') for i in range(data.shape[0]): p = papstats.plot_data(data[i, :, 0], data[i, :, 1], label=elements[i][0] + ' (' + str(elements[i][1]) + ')', elinewidth=0.5, capsize=4) plt.fill_between(data[i, :, 0], 0, unp.nominal_values(data[i, :, 1]),
F = 130 * 10**(-3) # meter def uncertainty_theta(theta): print('Delta theta in deg: ', np.degrees(theta * (lam_2 - lam_1) / lam_m * np.tan(theta))) return np.radians(1) * np.ones( len(theta)) + theta * (lam_2 - lam_1) / lam_m * np.tan(theta) theta_1_deg = 0.5 * np.array( [44.5, 51.5, 75.0, 90.6, 96.6, 117.5, 135.0, 144.6]) #mm theta_2_deg = 0.5 * np.array([ 29.0, 47.5, 57.0, 69.6, 76.5, 88.0, 94.7, 106.5, 113.4, 126.5, 135.5, 156.0 ]) #mm theta_1 = np.radians(theta_1_deg) theta_1 = unumpy.uarray(theta_1, uncertainty_theta(theta_1)) theta_2 = np.radians(theta_2_deg) theta_2 = unumpy.uarray(theta_2, uncertainty_theta(theta_2)) def gittertest(m, theta): return m / (unumpy.sin(theta)**2) def gitterabstand(lam, m, theta): return np.sqrt(m) * (lam / 2) / unumpy.sin(theta) def linear_func(x, a, b): return a * x + b def output(n_refl, gitter, theta,
print(peak_two_3mm) print(peak_two_9mm) print(peak_two_12mm) print() Aufspaltung = [ peak_two_3mm - peak_one_3mm, peak_two_9mm - peak_one_9mm, peak_two_12mm - peak_one_12mm ] for i in Aufspaltung: print(i) Dicke = [3, 9, 12] x_plot = np.linspace(3, 12, 2) params, covar_matrix = curve_fit(linear, Dicke, Aufspaltung) error = np.sqrt(np.diag(covar_matrix)) uparams = unp.uarray(params, error) for i in range(2): print(uparams[i]) plt.figure() plt.plot(Dicke, Aufspaltung, "rx") plt.plot(x_plot, linear(x_plot, *params)) plt.xlabel("d / mm") plt.ylabel(r"$\Delta$ f / Hz") #plt.savefig("../latex-template/figure/Peak_Aufspaltung.pdf") plt.close()
import matplotlib.pyplot as plt import numpy as np import uncertainties.unumpy as unp from scipy.optimize import curve_fit from uncertainties import correlated_values, correlation_matrix from uncertainties import ufloat Uio, tio = np.genfromtxt('5awerteoben.txt', unpack=True) Uiu, tiu = np.genfromtxt('5awerteunten.txt', unpack=True) Uio /= 1000 Uiu /= 1000 tiu /= 1000000 tio /= 1000000 U = np.append(Uio, Uiu) t = np.append(tio, tiu) Uer = unp.uarray(U, 5e-5) ter = unp.uarray(t, 1e-7) #UUo = np.log((np.absolute(U[0 : -3]))) def f1(x, a, b): return a * np.exp(-2 * np.pi * x * b) oparam, ocvar = curve_fit(f1, tio, Uio) #oben uparam, ucvar = curve_fit(f1, tiu[3:-1], Uiu[3:-1]) #unten oparams = correlated_values(oparam, ocvar) uparams = correlated_values(uparam, ucvar)
d_LiF *= 10**(-12) alpha = 7.297 * 10**(-3) h_eV = 4.1357 * 10**(-15) # read vals theta_zn, N_zn = np.genfromtxt('Zink.dat', unpack=True) theta_ga, N_ga = np.genfromtxt('Gallium.dat', unpack=True) theta_br, N_br = np.genfromtxt('Brom.dat', unpack=True) theta_rb, N_rb = np.genfromtxt('Rubidium.dat', unpack=True) theta_st, N_st = np.genfromtxt('Strontium.dat', unpack=True) theta_zk, N_zk = np.genfromtxt('Zirkonium.dat', unpack=True) names = ['Zink', 'Gallium', 'Brom', 'Rubidium', 'Strontium', 'Zirkonium'] thetas = [theta_zn, theta_ga, theta_br, theta_rb, theta_st, theta_zk] Ns = [N_zn, N_ga, N_br, N_rb, N_st, N_zk] theos = unp.uarray([9668.55, 10377.76, 13483.86, 15207.74, 16115.26, 18008.15], [15, 16, 19, 22, 23, 26]) Zs = [30, 31, 35, 37, 38, 40] E_K_array = np.empty(len(Zs)) i = 0 # make loop for all other stuffs for name, theta, N, Z, theo in zip(names, thetas, Ns, Zs, theos): print(f'-> {name}') # make plot plt.figure() plt.plot(theta, N, 'b.', label=f'{name}') plt.xlabel('Theta [°]') plt.ylabel('Anzahl Impulse') plt.legend()
def get_results(self): # planet search files fs = np.array(glob.glob('%s/EPIC_*/K2LC_-00099' % self.folder)) self.fs, self.epicnums = [], np.zeros(0) # detected planet params self.Ndetected, self.params_guess = np.zeros(0), np.zeros((0, 4)) self.params_optimized = np.zeros((0, 5)) self.Ps, self.e_Ps = np.zeros(0), np.zeros(0) self.rps, self.e_rps = np.zeros(0), np.zeros(0) # POI params self.cond_vals, self.cond_free_params = np.zeros((0, 7)), np.zeros( (0, 7)) # stellar params self.Kepmags, self.efs = np.zeros(0), np.zeros(0) self.Mss, self.e_Mss = np.zeros(0), np.zeros(0) self.Rss, self.e_Rss = np.zeros(0), np.zeros(0) self.Teffs, self.e_Teffs = np.zeros(0), np.zeros(0) self.loggs, self.e_loggs = np.zeros(0), np.zeros(0) for i in range(fs.size): print float(i) / fs.size, fs[i] d = loadpickle(fs[i]) if d.DONE: for j in range(d.Ndet + 1): self.fs.append(fs[i]) self.epicnums = np.append(self.epicnums, d.epicnum) self.Ndetected = np.append(self.Ndetected, d.Ndet) self.Kepmags = np.append(self.Kepmags, d.Kepmag) self.efs = np.append(self.efs, d.ef.mean()) self.Mss = np.append(self.Mss, d.Ms) self.e_Mss = np.append(self.e_Mss, d.e_Ms) self.Rss = np.append(self.Rss, d.Rs) self.e_Rss = np.append(self.e_Rss, d.e_Rs) self.Teffs = np.append(self.Teffs, d.Teff) self.e_Teffs = np.append(self.e_Teffs, d.e_Teff) self.loggs = np.append(self.loggs, d.logg) self.e_loggs = np.append(self.e_loggs, d.e_logg) # save parameter guesses of detected planets params = d.params_guess[j-1] if j > 0 \ else np.repeat(np.nan,4) self.params_guess = np.append(self.params_guess, params.reshape(1, 4), axis=0) # save optimized parameters params_opt = d.params_optimized[j-1] if j > 0 \ else np.repeat(np.nan,5) params_res = d.params_results[j-1] if j > 0 \ else np.repeat(np.nan,15).reshape(3,5) assert params_res.shape == (3, 5) self.params_optimized = np.append(self.params_optimized, params_opt.reshape(1, 5), axis=0) # save transit vetting results P = params[0] Pss = d.params_guess_priorto_confirm[:, 0] if Pss.size > 0: g = abs(Pss - P) == np.min(abs(Pss - P)) assert g.sum() in range(2) cond_vals = d.transit_condition_values[g] if j > 0 \ else np.repeat(np.nan,7) else: cond_vals = np.repeat(np.nan, 7) self.cond_vals = np.append(self.cond_vals, cond_vals.reshape(1, 7), axis=0) self.cond_free_params = np.append( self.cond_free_params, d.transit_condition_free_params.reshape(1, 7), axis=0) # save periods and planets radii self.Ps = np.append(self.Ps, params_opt[0]) self.e_Ps = np.append(self.e_Ps, get_1sigma(params_res[:, 0])) rpRs = unp.uarray(params_opt[3], get_1sigma(params_res[:, 3])) Rs = unp.uarray(d.Rs, d.e_Rs) rp, e_rp = rpRs2rp(rpRs, Rs) self.rps = np.append(self.rps, rp) self.e_rps = np.append(self.e_rps, e_rp) _, self.unique_inds = np.unique(self.epicnums, return_index=True) self.Nstar = self.unique_inds.size assert self.unique_inds.size == self.Nstar self.Nplanets = self.Ndetected[self.unique_inds].sum() self.fs = np.array(self.fs)
abstand = abstand + hoehe_zylinder abstand *= 1e-2 schwingungsdauer *= (1 / 5) schwingungsdauer_hilf = [] schwingungsdauer_abweichung = [] #print(abstand[::3]) for i in range(len(schwingungsdauer))[::3]: schwingungsdauer_hilf.append(np.mean(schwingungsdauer[i:i + 3])) schwingungsdauer_abweichung.append( 1 / (np.sqrt(len(schwingungsdauer[i:i + 3]))) * np.std(schwingungsdauer[i:i + 3])) schwingungsdauer_mittel = np.array(schwingungsdauer_hilf) schwingungsdauer_u = unp.uarray(schwingungsdauer_hilf, schwingungsdauer_abweichung) print(schwingungsdauer_u) #print('Zeitlichesmittel_abweichung ', schwingungsdaer_abweichung_mitt) def f(m, u, b): return m * u + b params_p, covarian = curve_fit(f, abstand[::3]**2, schwingungsdauer_mittel**2) def linregress(x, y): assert len(x) == len(y) x, y = np.array(x), np.array(y)
N_E = np.genfromtxt('scripts/RolfBlank/0.Spe', unpack=True) x = np.linspace(0, len(N_E), len(N_E)) E = 662 / 113.48 * x plt.cla() plt.clf() plt.plot(E, N_E, 'r-', label=r'Energiespektrum') plt.xlabel(r'$E/\si{\kilo\electronvolt}$') plt.ylabel(r'N') plt.xlim(0, 800) plt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08) plt.legend(loc='best') plt.savefig('build/Energiespektrum.pdf') #Würfel 1, Bestimmung von I_0 I0_gerade = unp.uarray(50636, 254) / 300 print('I0_gerade', I0_gerade) I0_schraeg1 = unp.uarray(16660, 146) / 100 I0_schraeg2 = unp.uarray(16417, 145) / 100 #I0_schraeg = np.mean([I0_schraeg1,I0_schraeg2]) I0_schraeg = avg_and_sem([noms(I0_schraeg1), noms(I0_schraeg2)]) I0_schraeg = unp.uarray(I0_schraeg[0], I0_schraeg[1]) print('I0_schraeg', I0_schraeg) makeTable([ noms([I0_gerade * 300, I0_schraeg1 * 100, I0_schraeg2 * 100]), stds([I0_gerade * 300, I0_schraeg1 * 100, I0_schraeg2 * 100]), [300, 100, 100], noms([I0_gerade, I0_schraeg1, I0_schraeg2]), stds([I0_gerade, I0_schraeg1, I0_schraeg2]) ], r'\multicolumn{2}{c}{' + r'$N_0$' + r'} & {' +
f1 = '/media/naikymen/SD64/Unsam/FQ/TP4 2016/Errores/vol08.csv' f2 = '/media/naikymen/SD64/Unsam/FQ/TP4 2016/Errores/vol008.csv' f3 = '/media/naikymen/SD64/Unsam/FQ/TP4 2016/Errores/abs.csv' f4 = '/media/naikymen/SD64/Unsam/FQ/TP4 2016/Errores/volAgua.csv' f5 = '/media/naikymen/SD64/Unsam/FQ/TP4 2016/Errores/volVBC.csv' f6 = '/media/naikymen/SD64/Unsam/FQ/TP4 2016/Errores/vol8.csv' d1 = genfromtxt(f1, delimiter=',') d2 = genfromtxt(f2, delimiter=',') d3 = genfromtxt(f3, delimiter=',') d4 = genfromtxt(f4, delimiter=',') d5 = genfromtxt(f5, delimiter=',') d6 = genfromtxt(f6, delimiter=',') ar8 = unumpy.uarray(d6[:, 0], d6[:, 1]) ar08 = unumpy.uarray(d1[:, 0], d1[:, 1]) ar008 = unumpy.uarray(d2[:, 0], d2[:, 1]) arabs = unumpy.uarray(d3[:, 0], d3[:, 1]) aragua = unumpy.uarray(d4[:, 0], d4[:, 1]) arVBC = unumpy.uarray(d5[:, 0], d4[:, 1]) vol = (ar8 + ar08 + ar008 + aragua + arVBC) mol = (ar8 * 0.8 + ar08 * 0.08 + ar008 * 0.008) conc = mol / vol mT = unumpy.matrix(conc) filename = "resconc.csv" numpy.savetxt(filename, mT, fmt='%r', delimiter='\n')
def plot_u(cal_file, fm_file, offset_file, description, accidental_offset, results_file): M = np.genfromtxt(cal_file) N = np.genfromtxt(fm_file) O = np.genfromtxt(offset_file) i_helm = M[:, 1] #current applied to helmholtz for calibration measurement b_helm = M[:, 2] #field applied to helmholtz coil for calibration measurement p, cov = np.polyfit( i_helm, b_helm, 1, cov=True ) #fit a line to calibration measurement so that we get a calibration i_fm = N[:, 1] #current applied to helmmholtz for shielding measurement b_fm = unumpy.uarray( N[:, 2], 0.0005 ) - accidental_offset #field measured inside of ferromagnet shield B_earth = np.polyval( p, 0 ) #We get the Earths magnetic field from i=0 of the Helmholtz calibration B_fm_no_i = unumpy.uarray(np.mean(O[:, 2]), np.std( O[:, 2])) #Get average and error for initial magnetization mag = B_fm_no_i - B_earth #initial magnetization is the field inside of the ferromagnet before any field is applied minus the earths magnetic field Bin = b_fm - mag #internal magnetization is the measured internal field minus the initial magnetization. This correction might not be necessary for a soft ferromagnet Bext = unumpy.uarray(np.polyval(p, i_fm), 0.0005) #external field Bext_nom = unumpy.nominal_values(Bext) Bext_err = unumpy.std_devs(Bext) B = Bext / Bin c = a / b u = (-2 * B + c**2 - 2 * unumpy.sqrt(B**2 - B * c**2 - B + c**2) + 1) / (c**2 - 1) u_nom = unumpy.nominal_values(u) u_err = unumpy.std_devs(u) #cakculate uerr with just point to point uncertainties. I define this as just #uncertainty from the field measurements u_pp = (-2 * B + c.nominal_value**2 - 2 * unumpy.sqrt(B**2 - B * c.nominal_value**2 - B + c.nominal_value**2) + 1) / (c.nominal_value**2 - 1) #calculate uerr from just geometry uncertainties u_geom = (-2 * unumpy.nominal_values(B) + c**2 - 2 * unumpy.sqrt( unumpy.nominal_values(B)**2 - unumpy.nominal_values(B) * c**2 - unumpy.nominal_values(B) + c**2) + 1) / (c**2 - 1) ##obtain uncertainties from field u_err_pp = unumpy.std_devs(u_pp) ##obtain uncertainties from geometry u_err_geom = unumpy.std_devs(u_geom) with open(results_file, "w") as myfile: myfile.write('#Bext, sig_Bext, ur, sig_ur, sig_ur_pp, sig_ur_corr\n') for j in range(0, len(u_nom)): myfile.write('%s\t%s\t%s\t%s\t%s\t%s\n' % (Bext_nom[j], Bext_err[j], u_nom[j], u_err[j], u_err_pp[j], u_err_geom[j])) plt.errorbar(Bext_nom, u_nom, u_err, marker='.', label=description)
plt.close() return (unp.uarray(params[1], np.sqrt(np.diag(errors)[1]))) # RECHNEN # berechne zugehoerige winkel theta meta_winkel = winkel(camera_rad, r_meta) # gitterkonstanten bcc metall gk_m_bcc = gitter(bcc, wavelen, meta_winkel) gk_m_fcc = gitter(fcc, wavelen, meta_winkel) gk_m_dia = gitter(dia, wavelen, meta_winkel) a_m_bcc = unp.uarray( gk_m_bcc, gk_korrektur_a(gk_m_bcc, meta_winkel, proben_rad, camera_rad) + gk_korrektur_v(gk_m_bcc, meta_winkel, camera_rad, v)) a_m_fcc = unp.uarray( gk_m_fcc, gk_korrektur_a(gk_m_fcc, meta_winkel, proben_rad, camera_rad) + gk_korrektur_v(gk_m_fcc, meta_winkel, camera_rad, v)) a_m_dia = unp.uarray( gk_m_dia, gk_korrektur_a(gk_m_dia, meta_winkel, proben_rad, camera_rad) + gk_korrektur_v(gk_m_dia, meta_winkel, camera_rad, v)) # auf konsole ausgeben # # print("\n\n\nwinkel metall:\n") # for element in meta_winkel: # print(element)
def mag2flux(mag, err_mag): umag = unumpy.uarray(mag, err_mag) uflux = 10**(-0.4 * umag) flux = unumpy.nominal_values(uflux) err_flux = unumpy.std_devs(uflux) return flux, err_flux
def salz_auswerten(atomfakt, fobj_out): lit_cae_clo = 4.119e-10 #wiki ss = generate_miller( 10, s_s, 1, atomfakt) # atomfakt = 1 -> gleiche vorfakoren, 2 -> fuer ungleiche ss = ss[:len(salz_winkel), :] # print("len winkel",len(salz_winkel)) # print("len ss",len(ss[:,1])) # print("ss=",ss) cc = generate_miller(10, cs_cl, 1, atomfakt) cc = cc[:len(salz_winkel), :] # ss = np.array([[1, 1, 1],#3 ggu und guu verboten #stein_salz # [0, 0, 2], # 4 # [0, 2, 2], # 8 # [1, 1, 3], # 11 # [2, 2, 2], # 12 # [0, 0, 4], # 16 # [1, 3, 3], # 19 # [0, 2, 4], # 20 # [2, 2, 4], # 24 # [3, 3, 3], # 27 # [0, 4, 4], # 32 # [1, 3, 5], # 35 # [2, 4, 4], # 36 # [0, 2, 6], # 40 # [3, 3, 5], # 43 # [4, 4, 4], # 48 # [1, 5, 5], # 51 # [0, 4, 6], # 52 # [2, 4, 6], # 56 # [3, 5, 5], # 59 # [4, 4, 6], # 68 # [0, 6, 6], # 72 # [5, 5, 5], # 75 # ]) # # cc = np.array([[0, 0, 1],#1 # [0, 1, 1],#2 # [1, 1, 1],#3 # [0, 0, 2],#4 abgeschwächt # [0, 1, 2],#5 # [1, 1, 2],#6 # [0, 2, 2],#8 abgeschwächt # [0, 0, 3],#9 # [0, 1, 3],#10 # [1, 1, 3],#11 # [2, 2, 2],#12 # [0, 2, 3],#13 # [1, 2, 3],#14 # [0, 0, 4],#16 # [0, 1, 4],#17 # [1, 1, 4],#18 # [1, 3, 3],#19 # [0, 2, 4],#20 # [1, 2, 4],#21 # [2, 3, 3],#22 # [2, 2, 4],#24 # [0, 0, 5],#25 # [0, 1, 5],#26 # ]) zb = generate_miller(10, z_b, 1, atomfakt) zb = zb[:len(salz_winkel), :] # zb = np.array([[1, 1, 1],#3 # [0, 0, 2],#4 # [0, 2, 2],#8 # [1, 1, 3],#11 # [2, 2, 2],#12 # [0, 0, 4],#16 # [1, 3, 3],#19 # [0, 2, 4]#20 # ]) fluor = generate_miller(10, fluorit, 1, atomfakt) fluor = fluor[:len(salz_winkel), :] gk_s_ss = gitter(ss, wavelen, salz_winkel) gk_s_cc = gitter(cc, wavelen, salz_winkel) gk_s_fluor = gitter(fluor, wavelen, salz_winkel) gk_s_zb = gitter(zb, wavelen, salz_winkel) # gk_m_dia = gitter(dia, wavelen, salz_winkel) a_s_ss = unp.uarray( gk_s_ss, gk_korrektur_a(gk_s_ss, salz_winkel, proben_rad, camera_rad) + gk_korrektur_v(gk_s_ss, salz_winkel, camera_rad, v)) a_s_cc = unp.uarray( gk_s_cc, gk_korrektur_a(gk_s_cc, salz_winkel, proben_rad, camera_rad) + gk_korrektur_v(gk_s_cc, salz_winkel, camera_rad, v)) a_s_fluor = unp.uarray( gk_s_fluor, gk_korrektur_a(gk_s_fluor, salz_winkel, proben_rad, camera_rad) + gk_korrektur_v(gk_s_fluor, salz_winkel, camera_rad, v)) a_s_zb = unp.uarray( gk_s_zb, gk_korrektur_a(gk_s_zb, salz_winkel, proben_rad, camera_rad) + gk_korrektur_v(gk_s_zb, salz_winkel, camera_rad, v)) # a_s_dia=unp.uarray(gk_s_dia,gk_korrektur_a(gk_s_dia, salz_winkel, proben_rad, camera_rad) + gk_korrektur_v(gk_s_dia, salz_winkel, camera_rad, v)) a_ss_end = gk_plot("ss" + str(atomfakt), salz_winkel, a_s_ss, gerade, winkel_korrektur, [0, 23]) a_cc_end = gk_plot("cc" + str(atomfakt), salz_winkel, a_s_cc, gerade, winkel_korrektur, [0, 23]) a_fluor_end = gk_plot("fluor" + str(atomfakt), salz_winkel, a_s_fluor, gerade, winkel_korrektur, [0, 23]) a_zb_end = gk_plot("zb" + str(atomfakt), salz_winkel, a_s_zb, gerade, winkel_korrektur, [0, 23]) fobj_out.write("\nFall:" + str(atomfakt) + "\n") fobj_out.write("Salz ss a=" + str(a_ss_end) + "\n") fobj_out.write("Salz Fluorit a=" + str(a_fluor_end) + "\n") fobj_out.write("Salz zb =" + str(a_zb_end) + "\n") fobj_out.write("Salz cc =" + str(a_cc_end) + "\n") lit_LiI = 6e-10 lit_NH4Cl = 3.87e-10 if (atomfakt == 2): fobj_out.write( "\n\nSteinsalz LiI ist auserwählt!!!!! Die relative Abweichung beträgt:" + str((a_ss_end - lit_LiI) / lit_LiI) + "\n") fobj_out.write( "\n\nNH4CL ist auserwählt!!!!! Die relative Abweichung beträgt:" + str((a_cc_end - lit_NH4Cl) / lit_NH4Cl) + "\n") tabelle_fertig(r_salz, salz_winkel, ss, a_s_ss, "ss" + str(atomfakt)) tabelle_fertig(r_salz, salz_winkel, cc, a_s_cc, "cc" + str(atomfakt)) tabelle_fertig(r_salz, salz_winkel, fluor, a_s_fluor, "fluor" + str(atomfakt)) tabelle_fertig(r_salz, salz_winkel, zb, a_s_zb, "zb" + str(atomfakt)) print("Gleich=1, Ungleich=2 =>", atomfakt) print(" \nSalz ss", a_ss_end) print(" \nSalz cc", a_cc_end) print(" \nSalz fluor", a_fluor_end) print(" \nSalz zb", a_zb_end)
# # 3. Setup data # Read data file. Data points are stored in `csv` files. # In[9]: data = pd.read_csv('./data/Lundin2007.csv') # Make error propagation possible. # In[10]: v = { 'en100': unp.uarray(data['v(en100)'][~np.isnan(data['v(en100)'])], data['sv(en100)'][~np.isnan(data['v(en100)'])]), 'en91': unp.uarray(data['v(en91)'], data['sv(en91)']), 'en85': unp.uarray(data['v(en85)'], data['sv(en85)']) } p_org = { 'en100': unp.uarray(data['p(en100)'][~np.isnan(data['v(en100)'])], data['sp(en100)'][~np.isnan(data['v(en100)'])]), 'en91': unp.uarray(data['p(en91)'], data['sp(en91)']), 'en85': unp.uarray(data['p(en85)'], data['sp(en85)']) } v_std = {}
def plot_on( self, ax1: plt.axis, ax2, style="stacked", ylabel="Events", sum_color=plot_style.KITColors.kit_purple, draw_legend: bool = True, legend_inside: bool = True, ): bin_edges, bin_mids, bin_width = self._get_bin_edges() self._bin_edges = bin_edges self._bin_mids = bin_mids self._bin_width = bin_width sum_w = np.sum(np.array([ binned_statistic(comp.data, comp.weights, statistic="sum", bins=bin_edges)[0] for comp in self._mc_components["MC"] ]), axis=0) sum_w2 = np.sum(np.array([ binned_statistic(comp.data, comp.weights**2, statistic="sum", bins=bin_edges)[0] for comp in self._mc_components["MC"] ]), axis=0) hdata, _ = np.histogram(self._data_component.data, bins=bin_edges) if style.lower() == "stacked": ax1.hist( x=[comp.data for comp in self._mc_components['MC']], bins=bin_edges, weights=[comp.weights for comp in self._mc_components['MC']], stacked=True, edgecolor="black", lw=0.3, color=[comp.color for comp in self._mc_components['MC']], label=[comp.label for comp in self._mc_components['MC']], histtype='stepfilled') ax1.bar(x=bin_mids, height=2 * np.sqrt(sum_w2), width=self.bin_width, bottom=sum_w - np.sqrt(sum_w2), color="black", hatch="///////", fill=False, lw=0, label="MC stat. unc.") if style.lower() == "summed": ax1.bar(x=bin_mids, height=2 * np.sqrt(sum_w2), width=self.bin_width, bottom=sum_w - np.sqrt(sum_w2), color=sum_color, lw=0, label="MC") ax1.errorbar(x=bin_mids, y=hdata, yerr=np.sqrt(hdata), ls="", marker=".", color="black", label=self._data_component.label) y_label = self._get_y_label(False, bin_width, evts_or_cand=ylabel) # ax1.legend(loc=0, bbox_to_anchor=(1,1)) ax1.set_ylabel(y_label, plot_style.ylabel_pos) if draw_legend: if legend_inside: ax1.legend(frameon=False) ylims = ax1.get_ylim() ax1.set_ylim(ylims[0], 1.4 * ylims[1]) else: ax1.legend(frameon=False, bbox_to_anchor=(1, 1)) ax2.set_ylabel(r"$\frac{\mathrm{Data - MC}}{\mathrm{Data}}$") ax2.set_xlabel(self._variable.x_label, plot_style.xlabel_pos) ax2.set_ylim((-1, 1)) try: uhdata = unp.uarray(hdata, np.sqrt(hdata)) uhmc = unp.uarray(sum_w, np.sqrt(sum_w2)) ratio = (uhdata - uhmc) / uhdata ax2.axhline(y=0, color=plot_style.KITColors.dark_grey, alpha=0.8) ax2.errorbar(bin_mids, unp.nominal_values(ratio), yerr=unp.std_devs(ratio), ls="", marker=".", color=plot_style.KITColors.kit_black) except ZeroDivisionError: ax2.axhline(y=0, color=plot_style.KITColors.dark_grey, alpha=0.8) plt.subplots_adjust(hspace=0.08)
plt.savefig('plotabbeb.pdf') Ag , Bg , rg ,pg ,stdg =stats.linregress(1+(1/V),g_abbe) Bg_fehler=fehler_b(stdg,1+(1/V)) plt.figure(4) #plt.errorbar(noms(R),noms(N_b) ,xerr=stds(R),yerr=stds(N_b), fmt='cx') plt.plot(1+(1/V),g_abbe,'rx',label=r'$\mathrm{Messwerte}$') plt.plot(x,Ag*x+Bg,'b-',label=r'$\mathrm{Ausgleichsfunktion}$') plt.legend(loc='best') plt.xlabel(r'$\mathrm{1+\frac{1}{V}}$') plt.ylabel(r'$\mathrm{Gegenstandweite \ g/mm}$') plt.savefig('plotabbeg.pdf') print('\n f_abbe 1+V',Ab,'+-',stdb) print('h`',Bg,'+-',Bg_fehler) print('\n f_abbe 1+1/V',Ag,'+-',stdg) print('h',Bb,'+-',Bb_fehler) f_g=unp.uarray(Ag,stdg) f_b=unp.uarray(Ab,stdb) print((f_g+f_b)/2) print(np.mean([f_g,f_b]))
ln = (1 / T - 1 / T0) * B V_measured = ((np.exp(ln) + 1) / 1023)**(-1) V_measured_u = unp.uarray([V_measured], [0.5]) lnR_R0_u = unp.log(1023 / V_measured_u - 1) return lnR_R0_u """Calibration -> plots""" CalibrationFilename = 'Data/MeasurementCalibration2.p' CalibrationData = pickle.load(open(CalibrationFilename, "rb")) EstimateB1(CalibrationData) """Calibration -> Deming regression""" CalibrationData = np.array(CalibrationData, dtype=float) fitparam, b1_std, b0_std = EstimateB_Deming(CalibrationData) """Time -> plots""" #TimeFilename = 'Data/Corrected_MeasurementCalibration2.p' #TimeData = pickle.load( open(TimeFilename, "rb" ) ) #Time = TimeData[:,0] #in millis #Temperature = TimeData[:,2] #in celsius #PlotTemperature(Time, Temperature) print( Tgrove_u(293.15 + 0.1, Bgrove_u=unp.uarray([4680], [12]), T0_u=unp.uarray([298.15], [0.1])))
from data_analysis_tools import * import numpy as np import pandas as pd from matplotlib import pyplot as plt from scipy import optimize from uncertainties import ufloat, unumpy df = pd.read_csv('data.csv') print(df) # Preparing our data masses = unumpy.uarray(df.iloc[1:, 0].to_numpy(), 0.004) counts = unumpy.uarray(df.iloc[1:, 1] - 159, np.sqrt(df.iloc[1:, 1]) + np.sqrt(159)) counts_per_m = counts / masses print(counts_per_m) def p(d, Eff, a): return Eff * (1 - np.exp(-a * d)) / (a * d) xdata = unumpy.nominal_values(masses) ydata = unumpy.nominal_values(counts_per_m) sigma = unumpy.std_devs(counts_per_m) print(f"{xdata = }") print(f"{ydata = }") print(f"{sigma = }")
amplitude_of_peaks.append(amplitude) sigma_of_peaks.append(sigma) offset_of_peak.append(offset) area_under_peak.append(area) # Save data in table print(index) l.Latexdocument( filename=abs_path('tabs/europium/peak_charakteristiken_eu.tex')).tabular( data=[ index, unp.uarray(noms(amplitude_of_peaks), stds(amplitude_of_peaks)), unp.uarray(noms(sigma_of_peaks), stds(sigma_of_peaks)), unp.uarray(noms(offset_of_peak), stds(offset_of_peak)) ], header=['Kanal / ', r'A / ', r'\sigma / ', r'\mu / '], places=[0, (2.2, 2.2), (1.2, 1.2), (4.2, 1.2)], caption='Bestimmte Eigenschaften der Peaks von $^{152}\ce{Eu}$.', label='results_peaks_eu') # ########################################################################### # # ## --- Linear Regression to get transformations parameters--- ## # # ########################################################################### # def g(x, m, b): '''Define linear function for Transformation purpose''' return m * x + b
def main(argv: list) -> int: # print(list(get_data(data_folder + "F0004CH1.CSV"))) # for xdata, ydata in get_all_data(data_folder, range(4, 42 + 1), [18]): # plt.plot(xdata, ydata) # plt.show() # # print(xdata) # print(ydata) # print() def loading_dgl(t, loading_rate, alpha, t0): return (loading_rate / alpha) * (1 - np.exp(-alpha * (t - t0))) mask_array = [(6.3, 10), (3.9, 9.9), (0.39, 9.8), (0.29, 10.8), (0.244, 10.67), (3.634, 13.5), (0.2705, 10.4), (-0.195, 10.16), (-0.0872, 10.16), (0.187, 10.7), (0.176, 10.86), (0.2903, 10.48), (0.2384, 10.83), (0.29, 10.0684), (0.304, 10.5282), (0.452, 10.59), (0.344, 10.5), (0.3077, 9.67), (0.332, 10.31), (0.332, 10.5), (0.2856, 10.79), (0.265, 10.369), (0.344, 10.32), (0.288, 10.34)] def fitparameter_getter(): L = [] delta_L = [] A = [] delta_A = [] N_max = [] delta_N_max = [] for i, (xdata, ydata) in enumerate(get_all_data(data_detuning, range(4, 28))): # oder 28 + 1 a, b = mask_array[i] x, y = zip(*list(get_data("data/detuning_coil_curves/F0028CH1.CSV"))) y = np.array(y) # y = np.where(y < 0.07993, 0.08505, y) y = np.mean(y[320:750]) # nominal Background value in volts # print(y) # y = np.where(y > 0.0542, 0.05055, y) y = conv_volts_to_atomnumber(y, 0) # print([conv_volts_to_atomnumber(0.5,0),conv_volts_to_atomnumber(0.5,4)]) # plt.plot(x[320:750], [y]*len(x[320:750]), marker = ".", linewidth=0) xdata = np.array(xdata) ydata = np.array(ydata) ydata = conv_volts_to_atomnumber(ydata, 0) ydata = ydata - y # print(ydata) # ydata = np.where(ydata < 0, 0, ydata) omxdata, omydata = xdata, ydata # n = 5 # omydata = rolling_mean(omydata, n) @np.vectorize def mask(x): nonlocal a, b return a <= x <= b # print(a, b) mxdata, mydata = list(omxdata), list(omydata) # print(len(mxdata), len(mydata)) mxdata, mydata = tuple(mask_data(mask, mxdata, mydata)) popt, pcov = curve_fit(loading_dgl, mxdata, unp.nominal_values(mydata), maxfev=5000) # plt.plot(mxdata, loading_dgl(mxdata, *popt), label=r"Loading-Fit-Function $\frac{L}{\alpha}(1-e^{-\alpha x})$") # plt.plot(mxdata, unp.nominal_values(mydata), marker='.', linewidth=0, label="measured atom number") # # plt.plot(xdata, unp.nominal_values(ydata), marker=".", linewidth=0) # plt.xlabel("Time [s]") # plt.ylabel("Number of Atoms") # plt.title(r"Coil Current: 10.0A and $f_{AOM}=110.75$ Hz") # print("L=", popt[0]," alpha=", popt[1], "N_max=", popt[0]/popt[1]) # plt.legend() # plt.show() # print(popt[2]) L.append(popt[0]) delta_L.append(np.sqrt(pcov[0][0])) A.append(popt[1]) delta_A.append(np.sqrt(pcov[1][1])) N_max.append(popt[0] / popt[1]) delta_N_max.append( unp.std_devs(ufloat(popt[0], np.sqrt(pcov[0][0])) / ufloat(popt[1], np.sqrt(pcov[1][1])))) return L, delta_L, A, delta_A, N_max, delta_N_max L, delta_L, A, delta_A, N_max, delta_N_max = fitparameter_getter() print(A) print(delta_A) print(L) print(delta_L) print(N_max) all_fit_params = [] all_fit_params.append(A) all_fit_params.append(L) all_fit_params.append(N_max) all_fit_params = np.array(all_fit_params) delta_all_fit_params = [] delta_all_fit_params.append(delta_A) delta_all_fit_params.append(delta_L) delta_all_fit_params.append(delta_N_max) delta_all_fit_params = np.array(delta_all_fit_params) def detuning_calculator(x): return (2 * x) - 60 - (2 * 85) # Mhz titles = np.array([r"$\alpha \ [\frac{1}{s}]$ vs. Detuning Frequency [MHz]", r"Loss rate L $[\frac{1}{s}]$ vs. Detuning Frequency [MHz]" , r"$N_{max} \ [-]$ vs. Detuning Frequency [MHz]"]) ylabels = np.array([r"$\alpha \ [\frac{1}{s}]$", r"Loss rate L $[\frac{1}{s}]$", r"$N_{max} \ [-]$"]) for z in range(1, 4): i = 1 plt.errorbar(detuning_calculator(np.array([109.75, 110.25, 110.75, 111.25, 111.75, 112.25])), all_fit_params[z - 1][6 * (i - 1): (6 * i)], label="(9.0 +/- 0.1)A", yerr=delta_all_fit_params[z - 1][6 * (i - 1): (6 * i)], fmt=".") plt.xlabel("Detuning [MHz]") plt.ylabel(ylabels[z - 1]) i = 2 plt.errorbar(detuning_calculator(np.array([109.75, 110.25, 110.75, 111.25, 111.75, 112.25])), all_fit_params[z - 1][6 * (i - 1): (6 * i)], label="(9.5 +/- 0.1)A", yerr=delta_all_fit_params[z - 1][6 * (i - 1): (6 * i)], fmt=".") i = 3 plt.errorbar(detuning_calculator(np.array([109.75, 110.25, 110.75, 111.25, 111.75, 112.25])), all_fit_params[z - 1][6 * (i - 1): (6 * i)], label="(10.0 +/- 0.1)A", yerr=delta_all_fit_params[z - 1][6 * (i - 1): (6 * i)], fmt=".") i = 4 plt.errorbar(detuning_calculator(np.array([109.75, 110.25, 110.75, 111.25, 111.75, 112.25])), all_fit_params[z - 1][6 * (i - 1): (6 * i)], label="(10.35 +/- 0.1)A", yerr=delta_all_fit_params[z - 1][6 * (i - 1): (6 * i)], fmt=".") plt.title(titles[z - 1]) plt.legend() plt.show() def magnetic_field_gradient(current): return (1.1E-6 * (90 * current / (8.5 ** 2))) / (10 ** -6) # i: current in Ampere, units: mikroT/cm fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.scatter(detuning_calculator(np.array( [109.75, 110.25, 110.75, 111.25, 111.75, 112.25, 109.75, 110.25, 110.75, 111.25, 111.75, 112.25, 109.75, 110.25, 110.75, 111.25, 111.75, 112.25, 109.75, 110.25, 110.75, 111.25, 111.75, 112.25])), magnetic_field_gradient(np.array( [9, 9, 9, 9, 9, 9, 9.5, 9.5, 9.5, 9.5, 9.5, 9.5, 10, 10, 10, 10, 10, 10, 10.35, 10.35, 10.35, 10.35, 10.35, 10.35])), N_max) ax.plot(detuning_calculator(np.array( [109.75, 110.25, 110.75, 111.25, 111.75, 112.25])), magnetic_field_gradient(np.array( [9, 9, 9, 9, 9, 9])), N_max[0:6], label= "9.0 +/- 0.1 A") ax.plot(detuning_calculator(np.array( [109.75, 110.25, 110.75, 111.25, 111.75, 112.25])), magnetic_field_gradient(np.array( [9.5, 9.5, 9.5, 9.5, 9.5, 9.5])), N_max[6:12], label= "9.5 +/- 0.1 A") ax.plot(detuning_calculator(np.array( [109.75, 110.25, 110.75, 111.25, 111.75, 112.25])), magnetic_field_gradient(np.array( [10, 10, 10, 10, 10, 10])), N_max[12:18], label= "10 +/- 0.1 A") ax.plot(detuning_calculator(np.array( [109.75, 110.25, 110.75, 111.25, 111.75, 112.25])), magnetic_field_gradient(np.array( [10.35, 10.35, 10.35, 10.35, 10.35, 10.35])), N_max[18:24], label= "10.35 +/- 0.1 A") ax.set_xlabel('Detuning Frequency [MHz]') ax.set_ylabel(r'Magnetic Field Gradient [$\frac{\mu T}{cm}$]') ax.set_zlabel(r'$N_{max}$') plt.legend() # plt.savefig("3dplot.pdf", format="pdf") plt.show() # return 0 titles_load_curve = np.array( ["10ms", "5ms", "3ms", "7ms", "9ms", "12ms", "11ms", "13ms", "14ms", "15ms", "16ms", "17ms", "18ms", "19ms", "20ms", "21ms", "22ms", "23ms", "24ms", "25ms", "26ms", "27ms", "28ms", "29ms", "30ms", "40ms", "50ms", "60ms", "70ms", "80ms", "90ms", "100ms"]) mask_array_N0 = [(-1.14, -0.204), (-1.139, -0.0259), (-1.195, -0.259), (-1.193, -0.244), (-1.138, -0.094), (-1.194, -0.204), (-1.194, -0.2046), (-1.63, -0.26), (-4.36, -0.63), (-4.47, -0.628), (-4.259, -0.408), (-4.369, -0.738), (-4.47, -0.29), (-4.259, -0.628), (-4.369, -0.628), (-4.479, -0.408), (-4.259, -0.408), (-4.479, -0.408), (-4.36, -0.628), (-4.36, -0.298), (-4.479, -0.298), (-8.73, -1.47), (-8.95, -0.59), (-8.739, -0.59), (-8.739, -0.376), (-8.739, -0.59), (-22.29, -1.39), (-22.84, -1.39), (-22.84, -3.04), (-23.0, -3.04), (-22.84, -3.04), (-24.5, -1.39)] mask_array_Nmax = [(10.0295, 10.5798), (5.077, 5.796), (3.041, 4.08), (7.058, 7.77), (9.04, 9.86), (12.06, 12.83), (11.075, 11.84), (12.99, 19.98), (14.22, 15.21), (15.108, 16.42), (16.2, 17.74), (17.0, 18.4), (18.18, 19.5), (19.17, 20.61), (20.17, 22.04), (21.16, 22.59), (22.04, 23.58), (22.15, 23.9), (24.13, 25.67), (25.012, 26.99), (26.11, 28.31), (27.35, 29.55), (28.45, 31.09), (29.33, 31.75), (30.43, 33.73), (40.56, 44.08), (50.88, 59.0), (60.23, 67.0), (70.68, 74.0), (81.69, 85.0), (89.9, 94.0), (100.4, 105.0)] averages_N0 = [] std_devs_N0 = [] averages_Nmax = [] std_devs_Nmax = [] for i, (xdata, ydata) in enumerate(get_all_data(data_folder, range(4, 36))): a, b = mask_array_N0[i] c, d = mask_array_Nmax[i] x, y = zip(*list(get_data("data/loading_curves/F0011CH1.CSV"))) y = np.array(y) y = np.mean(y[1000:1800]) xdata = np.array(xdata) xdata = xdata / (10 ** -3) ydata = np.array(ydata) - y @np.vectorize def mask1(x): nonlocal a, b return a <= x <= b @np.vectorize def mask2(x): nonlocal c, d return c <= x <= d xdata, ydata = list(xdata), list(ydata) m1xdata, m1ydata = tuple(mask_data(mask1, xdata, ydata)) m2xdata, m2ydata = tuple(mask_data(mask2, xdata, ydata)) plt.plot(xdata, ydata , marker=".", linewidth=0) plt.title(titles_load_curve[i] + " Down time") plt.xlabel("time [ms]") plt.ylabel("Intensity [a.u.]") plt.show() averages_N0.append(np.mean(m1ydata)) std_devs_N0.append(np.std(m1ydata)) averages_Nmax.append(np.mean(m2ydata)) std_devs_Nmax.append(np.std(m2ydata)) # print(averages_N0) # print(std_devs_N0) uarray_Nmax = unp.uarray(averages_Nmax, std_devs_Nmax) uarray_N0 = unp.uarray(averages_N0, std_devs_N0) print(uarray_Nmax, uarray_N0) down_time = np.array( [10, 5, 3, 7, 9, 12, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 40, 50, 60, 70, 80, 90, 100]) * 10 ** -3 down_time, uarray_N0, uarray_Nmax = tuple(sort_together(down_time, uarray_N0, uarray_Nmax)) uarray_N0 = np.array(uarray_N0) uarray_Nmax = np.array(uarray_Nmax) down_time = np.array(down_time) plt.errorbar(down_time, unp.nominal_values(uarray_Nmax / uarray_N0), marker=".", yerr=unp.std_devs(uarray_Nmax / uarray_N0), label="data points") def fitfunc(x, temp): M = (85.4678 * (1.6605e-27)) # kg chi = np.sqrt(M / (k * temp)) * ((1.5e-3) / (x)) return scipy.special.erf(chi) - ((2 / np.sqrt(np.pi)) * chi * np.exp(-(chi ** 2))) popt, pcov = curve_fit(fitfunc, down_time, unp.nominal_values(uarray_Nmax / uarray_N0), sigma=unp.std_devs(uarray_Nmax / uarray_N0)) plt.plot(down_time, fitfunc(down_time, *popt), label = r"Fit of $\frac{N(t)}{N_0} \ = \ erf(\chi) \ - \ \frac{2}{\sqrt{\pi}}\chi e^{-\chi^2}$") print("We get a MOT-Temperature of:", "(",popt[0] * 10 ** 6, "+-", np.sqrt(pcov[0][0]) * 10 ** 6,") micro Kelvin") plt.title("Fraction of Recaptured Atoms vs. Down Time") plt.xlabel("down time [s]") plt.ylabel(r"$\frac{N(t)}{N_0}$") plt.legend() plt.savefig("releaserecap.pdf", format="pdf") plt.show() diffs = (unp.nominal_values(uarray_Nmax / uarray_N0) - fitfunc(down_time, *popt)) / unp.std_devs(uarray_Nmax / uarray_N0) diffs_squared = diffs ** 2 chi2 = np.sum(diffs_squared) n_data_points = len(down_time) n_fit_parameters = 1 n_dof = n_data_points - n_fit_parameters print("chi2/ndf = " + str(round(chi2, 2)) + "/" + str(n_dof)) print(r"So we get chi2_red$ = ", round(chi2, 2)/n_dof) return 0
import pandas as pd import numpy as np import matplotlib.pyplot as plt import uncertainties as u import uncertainties.unumpy as unp parser = argparse.ArgumentParser() parser.add_argument("--path", default="./results/theta-simulation/fresnel-theta-simulation.pkl") args = parser.parse_args() df = pd.read_pickle(args.path) # Bin entries by groove number. bins = np.arange(0.0, 12.5, 1.0) groups = df.groupby(np.digitize(df.theta, bins)) x = -groups.mean().centroid_x.values z_r90 = unp.uarray(-groups.mean().z_r90.values, groups.std().z_r90.values) # z_r90 = unp.uarray(-groups.mean().centroid_z.values, groups.std().centroid_z.values) f, ax = plt.subplots() ax.errorbar(x, unp.nominal_values(z_r90), yerr=unp.std_devs(z_r90), fmt='.', label="Simulated $ r_{90}$") def fit(x, y): from ROOT import TF1, TGraphAsymmErrors g = TGraphAsymmErrors(len(x)) for i in xrange(len(x)): g.SetPoint(i, x[i], y[i].nominal_value) g.SetPointError(i, 0, 0, y[i].std_dev, y[i].std_dev) fit = TF1("fit", "-[1]*x*x+[0]", -40, 120) fit.SetParNames("z0", "a") g.Fit(fit) return fit
print(R_1_4) # Import Data nu_1, U_1, dU_1 = np.genfromtxt("data/data_a_1.txt",unpack=True) nu_2, U_2, dU_2 = np.genfromtxt("data/data_a_2.txt",unpack=True) nu_3, U_3, dU_3 = np.genfromtxt("data/data_a_3.txt",unpack=True) nu_4, U_4, dU_4 = np.genfromtxt("data/data_a_4.txt",unpack=True) print(len(nu_1)) print(len(nu_2)) print(len(nu_3)) print(len(nu_4)) with open('build/a_data_1.tex', 'w') as f: f.write(table([r'$\nu / \si{\kilo\hertz}$', r'$U / \si{\volt}$', r'$\nu / \si{\kilo\hertz}$', r'$U / \si{\volt}$'], [nu_1[0:7]/1e3, unp.uarray(U_1[0:7], dU_1[0:7]), nu_1[7:14]/1e3, unp.uarray(U_1[7:14], dU_1[7:14])] )) with open('build/a_data_2.tex', 'w') as f: f.write(table([r'$\nu / \si{\kilo\hertz}$', r'$U / \si{\volt}$', r'$\nu / \si{\kilo\hertz}$', r'$U / \si{\volt}$'], [nu_2[0:9]/1e3, unp.uarray(U_2[0:9], dU_2[0:9]), nu_2[9:18]/1e3, unp.uarray(U_2[9:18], dU_2[9:18])] )) with open('build/a_data_3.tex', 'w') as f: f.write(table([r'$\nu / \si{\kilo\hertz}$', r'$U / \si{\milli\volt}$', r'$\nu / \si{\kilo\hertz}$', r'$U / \si{\milli\volt}$'], [nu_3[0:10]/1e3, unp.uarray(U_3[0:10], dU_3[0:10])*1e3, nu_3[10:20]/1e3, unp.uarray(U_3[10:20], dU_3[10:20])*1e3] )) with open('build/a_data_4.tex', 'w') as f:
def ab(am,at): return (am-at)/at m_g=107.67 m_gw=465.18# Tgw=293.40 Tgm=296.63 Tg=352.19 ckz=ck(m_zw,m_z,Tzm,Tzw,Tz) ckg=ck(m_gw,m_g,Tgm,Tgw,Tg) #ckg=ck(465.18,107.67,295.63,293.40,352.19) ckz=unp.uarray(np.average(ckz),np.std(ckz)) Tzm=np.average(Tzm) print('Abweichung von ckz',ab(noms(ckz),0.23)) print('Abweichung von ckg',ab(ckg,0.715)) print('ckz',ckz) print('ckg',ckg) Cz=C(ckz,Molz,az,kz,rhoz,Tzm) Cg=C(ckg,Molg,ag,kg,rhog,Tgm) print('Cz',Cz) print('Cg',Cg) print('3R',3*8.3144598) print('Abweichung von 3R z',ab(noms(Cz),3*8.3144598))
decay_rate_calculated = decay_rate(area_under_peak, angle_distribution, prohability, efficency_calculated, measurment_time) print('Gemittelte Aktivität', decay_rate_calculated.mean()) # ########################################################################### # # #### --- Speicherergebnisse Peak Eigenschaften in eine Tabelle --- ######## # # ########################################################################### # l.Latexdocument( filename=abs_path('tabs/sb_or_ba/peak_fit_parameter.tex')).tabular( data=[ peak_indexes[0], unp.uarray(noms(amplitude_of_peaks), stds(amplitude_of_peaks)), unp.uarray(noms(sigma_of_peaks), stds(sigma_of_peaks)), unp.uarray(noms(offset_of_peak), stds(offset_of_peak)) ], header=[ 'Kanal / ', r'Amplitude / None ', r'\sigma /\kilo\eV', r'\mu / \kilo\eV' ], places=[0, (1.2, 1.2), (1.2, 1.2), (2.2, 1.2)], caption='Regressionsparameter der Peak-Anpassung.', label='results_peaks') l.Latexdocument( filename=abs_path('tabs/sb_or_ba/peak_charakteristiken.tex') ).tabular( data=[
B_1 = eichfunktion(current, *params) print(f'\tB: {B_1}') d_lambda = wellenlaengenAenderung(del_s, delta_s, d_lambda_D) delta_mg = g_factor(d_lambda, B_1, lambda_1) # print(f'\tWellenlängenänderung: {d_lambda}') # print(f'\tDelta_mg: {delta_mg}') # print(f'\tMittelwert: {sum(delta_mg)/len(delta_mg)}') print(f'\tMittelwert Delta_mg: {sum(delta_mg)/len(delta_mg)}') # save results make_table(header= ['$\delta s$ / pixel', '$\Delta s$ / pixel', '$\delta\lambda$ / \pico\meter', '$g$'], places= [3.0, 3.0, 2.2, (1.2, 1.2)], data = [delta_s, del_s, d_lambda*1e12, delta_mg], caption = 'Werte zur Bestimmung des Lande-Faktors für die rote Spektrallinie.', label = 'tab:rot_sigma', filename = 'build/rot_sigma.tex') if __name__ == '__main__': if not os.path.isdir('build'): os.mkdir('build') lambda_1 = 643.8e-9 # nano meter lambda_2 = 480e-9 # nano meter lande_factors() d_lambda_1, d_lambda_2 = lummer_gehrke_platte() p, e = eichung() params = unp.uarray(p, e) auswertung_blau(params, d_lambda_2) auswertung_rot(params, d_lambda_1)
import math from scipy.optimize import curve_fit import matplotlib.pyplot as plt from pint import UnitRegistry import latex as l import result #import pandas as pd r = result.Results() u = UnitRegistry() Q_ = u.Quantity #umrechnung einheiten mit var.to('unit') # Einheiten für pint:dimensionless, meter, second, degC, kelvin #beispiel: a = ufloat(5, 2) * u.meter b = Q_(unp.uarray([5, 4, 3], [0.1, 0.2, 0.3]), 'ohm') c = Q_(0, 'degC') #variabel_1,variabel_2=np.genfromtxt('name.txt',unpack=True) #Standartabweichung und Mittelwert def mittel_und_abweichung(messreihe): messreihe_einheit = messreihe.units mittelwert = sum(messreihe) / len(messreihe) abweichung_des_mittelwertes = 1 / (np.sqrt( len(messreihe))) * np.std(messreihe) mittel_und_abweichung = Q_( unp.uarray(mittelwert, abweichung_des_mittelwertes), messreihe_einheit) return mittel_und_abweichung
#Einlesen der Daten Ck_a_b_in_nF, Bäuche, fehler_bäuche, v_plus_in_kHz, v_minus_in_kHz, fehler_frequenzen = np.genfromtxt( 'messdaten_a_b.txt', unpack=True) Ck_c_in_nF, Delta_t_1_in_ms, Delta_t_2_in_ms, fehler_zeiten = np.genfromtxt( 'messdaten_c.txt', unpack=True) L_in_mH = 23.954 C_in_nF = 0.7932 Csp_in_nF = 0.028 f_start_in_kHz = 20.06 f_end_in_kHz = 68.97 f_resonanz_gemessen_in_kHz = 35.65 Fehler_Ck = 0.003 f_res_gemessen = ufloat(35.65, 0.02) #Messgrößen mit Fehlern Bäuche = unp.uarray(Bäuche, fehler_bäuche) v_plus_in_kHz = unp.uarray(v_plus_in_kHz, fehler_frequenzen) v_minus_in_kHz = unp.uarray(v_minus_in_kHz, fehler_frequenzen) Delta_t_1_in_ms = unp.uarray(Delta_t_1_in_ms, fehler_zeiten) Delta_t_2_in_ms = unp.uarray(Delta_t_2_in_ms, fehler_zeiten) #Kapazitäten mit Fehlern (gerundet auf 3 signifikante Stellen) Ck_a_b_Fehler_nF = unp.uarray(Ck_a_b_in_nF, np.round(Ck_a_b_in_nF * Fehler_Ck, 3)) Ck_c_Fehler_nF = unp.uarray(Ck_c_in_nF, np.round(Ck_c_in_nF * Fehler_Ck, 3)) #Werte in SI Ckab = Ck_a_b_Fehler_nF * 10**(-9) Ckc = Ck_c_Fehler_nF * 10**(-9) L = L_in_mH * 10**(-3) C = C_in_nF * 10**(-9)