예제 #1
0
def make_table(resultPath, classifer, splitFile, op, lk, tp):
    table.make_table(resultPath=resultPath,
                     classifer=classifer,
                     splitFile=splitFile,
                     op=op,
                     lk=lk,
                     tp=tp)
예제 #2
0
    def start(self):
        from table import make_table

        stbase.Strategy.start(self)
        stbase.println("Strategy : Sample Started..")

        cfgs = self.param_table
        params = cfgs.get("PARAMS")

        entries = []
        if not cfgs.get("TEST_ARRAY"):
            entries = make_table(params)
        else:
            entries = cfgs.get("TEST_ARRAY" )

        for idx, bar in enumerate(self.bar_list):
            bar.index = idx

        name_ov = 'runtest_overview_{}_{}.txt'.format(SYMBOL,INDEX)
        if os.path.exists(name_ov):
            os.remove(name_ov)

        # for pandas excel

        fields = OrderedDict(params=[],open_times=[], close_times=[],
                             fee=[],
                   profit=[],date_range=[],sample_num=[],
                   sample_unit=[],init_funds=[],curr_funds=[],curr_margin=[] )
        


        global STOP_WIN ,STOP_WIN_MIN ,N ,TIMEPERIOD,INIT_FUNDS ,LOT_PER_UNIT

        self.overview_report_ready()

        for p in entries:
            STOP_WIN = p['stop_win']
            STOP_WIN_MIN = p['stop_win_min']
            TIMEPERIOD = p['timeperiod']
            N = p['n']

            print '>>', STOP_WIN ,STOP_WIN_MIN ,N ,TIMEPERIOD
            # From Here
            self.reset()

            for bar in self.bar_list:
                self.onBar(bar)

            self.output_file = open(name_ov, 'a+')
            self.reportView(fields)

            # 净值处理
            self.data_net_report()

            self.overview_report_datarecord()


        self.overview_report_write()
        df = pd.DataFrame(fields)
        df.to_excel("runtest_overview_{}_{}.xlsx".format(SYMBOL,INDEX))
예제 #3
0
def result(result_list):
    a_lst = []
    for client in result_list:
        lst = []
        lst.append(client["ip"])
        lst.append(client["mac"])
        lst.append(client["mac_vendor"])
        a_lst.append(lst)
    labels = ["IP", "MAC Address", "MAC Vendor"]
    a = table.make_table(rows=a_lst, labels=labels, centered=True)
    print(a)
예제 #4
0
def main(args=None):
    # print('PHA debug in main')
    # parser = make_parser()
    # args=parser.parse_args(args)
    #
    # find the notebooks list
    #
    root_dir = context.curr_dir.parents[1]
    website_dir = root_dir / Path("website")
    notebook_dir = root_dir / Path("notebooks")
    print(f"found notebook_dir {notebook_dir}")
    json_path = website_dir / Path("notebooks.json")
    with open(json_path, "r") as json_file:
        notebook_order = json.load(json_file)
    print(f"notebook order: {notebook_order}")
    txt_out = website_dir / Path("index_notebooks.txt")
    rst_out = website_dir / Path("notebook_index.rst")
    # print(f'here are the output files: {txt_out},{rst_out}')
    # print(f'DEBUG: here is the noteobok order from find_notebooks.py: \n{pprint.pformat(notebook_order)}\n')
    web_notebooks_path = website_dir / Path("web_notebooks")
    web_pybooks_path = website_dir / Path("_build/python")
    os.makedirs(web_notebooks_path, exist_ok=True)
    #
    # first line of table
    #
    table_list = [["cocalc folder", "html", "python"]]
    namelist = []
    file_list = []
    git_url = "https://github.com/phaustin/eosc213_students/blob/master/python"
    for week, weeklist in notebook_order.items():
        week_head = "**{}**".format(week)
        table_list.append([week_head, " ", " "])
        for notebook in weeklist:
            py_name = f"{notebook}.py"
            nb_name = f"{notebook}.ipynb"
            try:
                nb_file = list(context.student_dir.glob(f"**/{nb_name}"))[0]
            except IndexError:
                print(f"error locating {context.student_dir} / {nb_name}")
                sys.exit(1)
            print(f"processing {nb_file} in folder {context.student_dir}")
            try:
                py_file = list(context.student_dir.glob(f"**/{py_name}"))[0]
            except IndexError:
                print(f"error locating {context.student_dir} / {py_name}")
                sys.exit(1)
            print(nb_file.is_file())
            nb_path = str(nb_file.relative_to(context.student_dir))
            shutil.copy(nb_file, web_notebooks_path)
            shutil.copy(py_file, web_pybooks_path)
            namelist.append(notebook)
            ipynb = r"{}_ipynb_"
            html = r"`{}_html`_"
            python = r"`{}_py`_"
            ipynb, html, python = [
                item.format(notebook) for item in [ipynb, html, python]
            ]
            ipynb = nb_path
            table_list.append([ipynb, html, python])
            file_list.append(nb_path)
    print(f"tablelist: {table_list}")
    # #
    # # write out the shortcuts
    # #
    with open(txt_out, "w") as notetxt:
        print(f"writing {str(txt_out.resolve())}")
        for name, filepath in zip(namelist, file_list):
            notetxt.write(f".. _{name}_ipynb: {filepath}\n")
            notetxt.write(f".. _{name}_html: web_notebooks/{name}.ipynb\n")
            notetxt.write(f".. _{name}_py: {git_url}/{name}.py\n")

    header = """
    .. include:: index_notebooks.txt

    .. _notebooks:

    E213 notebooks in order of appearance
    =====================================

    """

    header = textwrap.dedent(header)
    # pdb.set_trace()
    with open(rst_out, "w") as noterst:
        print(f"writing {str(rst_out.resolve())}")
        noterst.write(header)
        noterst.write(make_table(table_list))
예제 #5
0
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
from uncertainties import ufloat
from table import(
	make_table,
	make_SI,
	write)

# Spannungen und Ströme einlesen
spannung_orange, orange = np.genfromtxt('Daten1.txt', unpack=True)
spannung, rot, grun, lila, blau, uv = np.genfromtxt('Daten2.txt', unpack=True)
matrix=np.array([rot, grun, lila, blau, uv])


#werte in tabelle schreiben
write('build/tabelle_alle_wellenlangen.tex', make_table([spannung, rot, grun, lila, blau, uv],[2,0,0,0,0,0]))
write('build/tabelle_alle_wellenlangen_wurzel.tex', make_table([spannung, np.sqrt(rot), np.sqrt(grun), np.sqrt(lila), np.sqrt(blau), np.sqrt(uv)],[2,1,1,1,1,1]))
write('build/tabelle_orange_wellenlange.tex', make_table([spannung_orange, orange],[2,0]))



#pikoampere in ampere umrechnen
matrix = matrix*10**-12
orange = orange*10**-12




#Werte mit positivem strom heraussuchen und plotten
matrix_plus = np.ones((len(matrix[:,0]),len(matrix[0,:])))
#matrix_plus = np.ones((10,5))
예제 #6
0
# Bestimmung der Gitterkonstante

# bekannte Wellenlängen der Helium Spektrallinien (hier muss die
# Reihenfolge natürlich übereinstimmen mit derjenigen der Datei
# WinkelHelium.txt):
lambda_helium = np.array([438.8, 447.1, 471.3, 492.2, 501.6, 504.8, 587.6, 667.8, 706.5]) * 1e-9  # in m

# sinus für den plot und den fit
sin_phi_helium = np.array(np.sin(phi_helium))
# fit sin(phi) gegenüber lambda zur Bestimmung von g
params_gitterkonstante = ucurve_fit(reg_linear, sin_phi_helium, lambda_helium)

g, offset = params_gitterkonstante  # g in m, offset Einheitenfrei
write("build/gitterkonstante.tex", make_SI(g * 1e9, r"\nano\meter", figures=1))
write("build/offset.tex", make_SI(offset * 1e9, r"\nano\meter", figures=1))
write("build/Tabelle_messdaten_kalium.tex", make_table([phi_kalium * 180 / np.pi], [1]))
write("build/Tabelle_messdaten_natrium.tex", make_table([phi_natrium * 180 / np.pi], [1]))
write("build/Tabelle_messdaten_rubidium.tex", make_table([phi_rubidium * 180 / np.pi], [1]))

##### PLOT lineare Regression #####
t_plot = np.linspace(np.amin(sin_phi_helium), np.amax(sin_phi_helium), 2)
plt.xlim(
    t_plot[0] - 1 / np.size(sin_phi_helium) * (t_plot[-1] - t_plot[0]),
    t_plot[-1] + 1 / np.size(sin_phi_helium) * (t_plot[-1] - t_plot[0]),
)
plt.plot(t_plot, reg_linear(t_plot, *noms(params_gitterkonstante)) * 1e9, "b-", label="Fit")
plt.plot(sin_phi_helium, lambda_helium * 1e9, "rx", label="Messdaten")
plt.ylabel(r"$\lambda \:/\: \si{\nano\meter}$")
plt.xlabel(r"$\sin(\varphi)$")
plt.legend(loc="best")
plt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08)
예제 #7
0
params = ucurve_fit(reg.reg_linear, abstand_skala1_a, f1_a)
m1, b1 = params
write('build/m1_a.tex', make_SI(m1*1e3, r'\per\centi\meter', 'e-3', figures=1))     # type in Anz. signifikanter Stellen
write('build/b1_a.tex', make_SI(b1*1e3, r'', 'e-3', figures=1))

# Teil 2
f2_log_a = np.array([26800.1, 29500, 32800, 36400, 40100, 44300, 48700, 53500, 58800, 64200, 70200, 77500])
abstand_skala2_a = [0,2,4,6,8,10,12,14,16,18,20,22]
f2_a = np.log(f2_log_a)

params = ucurve_fit(reg.reg_linear, abstand_skala2_a, f2_a)
m2, b2 = params
write('build/m2_a.tex', make_SI(m2*1e3, r'\per\centi\meter', 'e-3',figures=1))     # type in Anz. signifikanter Stellen
write('build/b2_a.tex', make_SI(b2*1e3, r'', 'e-3', figures=1))

write('build/Tabelle_a.tex', make_table([abstand_skala1_a, f1_log_a*1e-3, f1_a, abstand_skala2_a, f2_log_a*1e-3, f2_a], [0, 1, 2, 0, 1, 2]))  # type in Nachkommastellen
# FULLTABLE
write('build/Tabelle_a_texformat.tex', make_full_table(
    'Messdaten Aderlasskurven.',
    'table:A1',
    'Tabelle_a.tex',
    [],              # Hier aufpassen: diese Zahlen bezeichnen diejenigen resultierenden Spaltennummern,
#                              # die Multicolumns sein sollen
    ['$\\textrm{Abstand}_\\textrm{C} \\:/\\: \\si{\\cm}$',
    '$\\nu_\\textrm{C} \\:/\\: \\si{\\kilo\\hertz}$',
    '$\\log{\\abs{\\nu_\\textrm{C}}}$',
    '$\\textrm{Abstand}_\\textrm{C$_1$,C$_2$} \\:/\\: \\si{\\cm}$',
    '$\\nu_\\textrm{C$_1$,C$_2$} \\:/\\: \\si{\\kilo\\hertz}$',
    '$\\log{\\abs{\\nu_\\textrm{C$_1$,C$_2$}}}$']))

# Testplot für lin. fit
예제 #8
0
v_auf_mittel_stds=[ stds(v_2_auf_mittel), stds(v_3_auf_mittel), stds(v_4_auf_mittel), stds(v_5_auf_mittel), stds(v_6_auf_mittel), stds(v_7_auf_mittel), stds(v_8_auf_mittel), stds(v_9_auf_mittel), stds(v_10_auf_mittel)]

v_ab_mittel_nom=[ noms(v_2_ab_mittel), noms(v_3_ab_mittel), noms(v_4_ab_mittel), noms(v_5_ab_mittel), noms(v_6_ab_mittel), noms(v_7_ab_mittel), noms(v_8_ab_mittel), noms(v_9_ab_mittel), noms(v_10_ab_mittel)]

v_ab_mittel_stds=[ stds(v_2_ab_mittel), stds(v_3_ab_mittel), stds(v_4_ab_mittel), stds(v_5_ab_mittel), stds(v_6_ab_mittel), stds(v_7_ab_mittel), stds(v_8_ab_mittel), stds(v_9_ab_mittel), stds(v_10_ab_mittel)]

# v_0 = array(1/v_2_0, 1/v_3_0, 1/v_4_0, 1/v_5_0, 1/v_6_0, 1/v_7_0, 1/v_8_0, 1/v_9_0, 1/v_10_0)
v_0 = np.genfromtxt('messdaten/V_0.txt', unpack=True)
v_0 = 1/v_0
U = np.genfromtxt('messdaten/Spannung.txt', unpack=True)
v_auf = unp.uarray(v_auf_mittel_nom, v_auf_mittel_stds)
v_auf = 1/v_auf
v_ab  = unp.uarray(v_ab_mittel_nom, v_ab_mittel_stds)
v_ab = 1/v_ab
write('build/Tabelle_Geschwindigkeiten.tex', make_table([v_auf, v_auf ,v_0, U],[1, 1, 3, 0]))     # Jeder fehlerbehaftete Wert bekommt zwei Spalten
write('build/Tabelle_Geschwindigkeiten_texformat.tex', make_full_table(
    caption = 'Mittelwerte der Messdaten für jedes untersuchte Tröpfchen und zugehörige Spannung.',
    label = 'table:A2',
    source_table = 'build/Tabelle_Geschwindigkeiten.tex',
    stacking = [0,1],              # Hier aufpassen: diese Zahlen bezeichnen diejenigen resultierenden Spaltennummern, die Multicolumns sein sollen
    units = [
    r'$v_\textrm{auf} \:/\: \si{\milli\meter\per\second}$',
    r'$v_\textrm{ab} \:/\: \si{\milli\meter\per\second}$',
    r'$v_0\:/\: \si{\milli\meter\per\second}$',
    r'$U\:/\: \si{\volt}$']))


write('build/Tabelle_Kriterium.tex', make_table([2*v_0, v_ab-v_auf],[3,3]))     # Jeder fehlerbehaftete Wert bekommt zwei Spalten
write('build/Tabelle_Kriterium_texformat.tex', make_full_table(
    caption = 'Überprüfung der Bedinung.',
예제 #9
0
def do_job_a(filename, error, j, filename_out=None):
    # Einlesen der Messdaten
    P, Delta_f_30, Delta_f_15, Delta_f_60 = np.genfromtxt(filename, unpack=True)

    #
    di = [7, 10, 16]
    colors = ["rx", "bx", "gx"]

    Delta_f_30_error = Delta_f_30 * error
    Delta_f_30 = unp.uarray(Delta_f_30, Delta_f_30_error)
    Delta_f_15_error = Delta_f_15 * error
    Delta_f_15 = unp.uarray(Delta_f_15, Delta_f_15_error)
    Delta_f_60_error = Delta_f_60 * error
    Delta_f_60 = unp.uarray(Delta_f_60, Delta_f_60_error)

    v = unp.uarray(np.zeros(3), np.zeros(3))
    v[0] = c_L / 2 / nu_0 * Delta_f_30 / np.cos(alpha[0])
    v[1] = c_L / 2 / nu_0 * Delta_f_15 / np.cos(alpha[1])
    v[2] = c_L / 2 / nu_0 * Delta_f_60 / np.cos(alpha[2])

    v_mean = mean([v[0], v[1], v[2]], 0)

    # TABLES
    write(
        "build/Tabelle_a_" + str(di[j]) + ".tex",
        make_table([P, Delta_f_30, Delta_f_15, Delta_f_60, v[0], v[1], v[2], v_mean], [0, 1, 1, 1, 1, 1, 1, 1]),
    )
    write(
        "build/Tabelle_a_" + str(di[j]) + "_texformat.tex",
        make_full_table(
            r"Messdaten und daraus errechnete Geschwindikgiet für $\d_i = $" + str(di[j]) + r"$\si{\milli\meter}$.",
            "table:A" + str(j),
            "build/Tabelle_a_" + str(di[j]) + ".tex",
            [1, 2, 3, 4, 5, 6, 7],
            [
                r"$\frac{P}{P_\text{max}} \:/\: \si{\percent}$",
                r"$\Delta f_{30°} \:/\: \si{\hertz}$",
                r"$\Delta f_{15°} \:/\: \si{\hertz}$",
                r"$\Delta f_{60°} \:/\: \si{\hertz}$",
                r"$v_{30°} \:/\: \si{\meter\per\second}$",
                r"$v_{15°} \:/\: \si{\meter\per\second}$",
                r"$v_{60°} \:/\: \si{\meter\per\second}$",
                r"$\overline{v} \:/\: \si{\meter\per\second}$",
            ],
        ),
    )

    # Plotting
    plt.figure(1)
    y = Delta_f_30 / np.cos(alpha[0])
    plt.errorbar(
        noms(v[0]),
        noms(y),
        fmt=colors[j],
        xerr=stds(v[0]),
        yerr=stds(y),
        label=r"$d_i = " + str(di[j]) + r"\si{\milli\meter}$",
    )

    plt.figure(2)
    y = Delta_f_15 / np.cos(alpha[1])
    plt.errorbar(
        noms(v[1]),
        noms(y),
        fmt=colors[j],
        xerr=stds(v[1]),
        yerr=stds(y),
        label=r"$d_i = " + str(di[j]) + r"\si{\milli\meter}$",
    )

    plt.figure(3)
    y = Delta_f_60 / np.cos(alpha[2])
    plt.errorbar(
        noms(v[2]),
        noms(y),
        fmt=colors[j],
        xerr=stds(v[2]),
        yerr=stds(y),
        label=r"$d_i = " + str(di[j]) + r"\si{\milli\meter}$",
    )

    i = 1
    if filename_out:
        for name in filename_out:
            plt.figure(i)
            plt.xlabel(r"$v \:/\: \si{\meter\per\second}$")
            plt.ylabel(r"$\Delta\nu / \cos{\alpha} \:/\: \si{\kilo\volt}$")
            plt.legend(loc="best")
            plt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08)
            plt.savefig(name)
            i += 1
예제 #10
0
# vacuum velo of light
c = 299792458 # metre per second
# diffraction distance
d = 201.4e-12 # metre
#elementary charge
e = 1.6e-19#coulomb
#Rydbergonstante
r = 13.6 #eV
#sommerfeldsche Feinstrukturkonstante
s_k = 7.29e-3



zwei_theta, impulsrate = np.genfromtxt('messdaten/1_Messung_werte.txt', unpack=True)

write('build/Tabelle_messung_1.tex', make_table([zwei_theta,impulsrate],[1, 0]))     # Jeder fehlerbehaftete Wert bekommt zwei Spalten
write('build/Tabelle_messung_1_texformat.tex', make_full_table(
    'Messdaten Bragg Bedingung.',
    'table:A2',
    'build/Tabelle_messung_1.tex',
    [],              # Hier aufpassen: diese Zahlen bezeichnen diejenigen resultierenden Spaltennummern,
                              # die Multicolumns sein sollen
    [
    r'$\theta \:/\: \si{\degree}$',
    r'$Zaehlrate$']))

theta, Z = np.loadtxt("messdaten/Bremsberg_werte.txt", unpack=True)

theta = theta/2

예제 #11
0
파일: Rechnung.py 프로젝트: Anjaaaa/AP-1516
print('eta_1:', eta_1)
print('eta_2:', eta_2)


################################################################
### etas und phi ausrechnen
################################################################
phi = 0.5 * (phi_1 - phi_2)
phi_Mittel = ufloat(np.mean(phi), np.std(phi)/np.sqrt(len(phi)))
# eta = 180 - (360 + eta_1 - eta_2)
eta = 180 - (eta_1-eta_2) # mit den Werten von Sonja und Saskia
print('eta:', eta)
print('phi_Mittel:', phi_Mittel)


write('build/Messwerte1.tex', make_table([phi_1, phi_2, phi],[1,1,1]))
write('build/Winkel_Prisma.tex', make_SI(phi_Mittel,r'',figures=1))
write('build/Messwerte2.tex', make_table([wavelength*10**9, eta_1, eta_2, eta],[2,1,1,1]))
################################################################


phi_Mittel = 60


################################################################
### Brechzahl
################################################################
n = unp.sin( 0.5 * 2 * np.pi / 360 * (eta + phi_Mittel) ) / unp.sin( 0.5 * 2 * np.pi / 360 * phi_Mittel)
print('Brechzahl:', n)

예제 #12
0
########## DIFFERENT STUFF ##########
# R = const.physical_constants["molar gas constant"]      # Array of value, unit, error

### VORARBEITEN ####

h_zylinder, t_zylinder = np.genfromtxt('messdaten/a.txt', unpack=True)

h_zylinder = h_zylinder*10**(-3)
t_zylinder = t_zylinder*10**(-6)


##### a #####

v_zylinder = 2*h_zylinder/t_zylinder

write('build/Tabelle_0.tex', make_table([h_zylinder*10**3, t_zylinder*10**6, v_zylinder],[2, 1, 2]))     # Jeder fehlerbehaftete Wert bekommt zwei Spalten
write('build/Tabelle_0_texformat.tex', make_full_table(
     'Bestimmung der Schallgeschwindigkeit mittels Impuls-Echo-Verfahren.',
     'tab:0',
     'build/Tabelle_0.tex',
     [],              # Hier aufpassen: diese Zahlen bezeichnen diejenigen resultierenden Spaltennummern,
                               # die Multicolumns sein sollen
     [r'$h_{\text{zylinder}} \:/\: 10^{-3} \si{\metre}$',
     r'$\increment t \:/\: 10^{-6} \si{\second} $',
     r'$c_\text{Acryl} \:/\:\si{\metre\per\second} $']))

c_arcyl_1 = ufloat(np.mean(v_zylinder), np.std(v_zylinder))
write('build/c_acryl_1.tex', make_SI(c_arcyl_1, r'\metre\per\second', figures=2))      # type in Anz. signifikanter Stellen

params = ucurve_fit(reg_linear, 0.5*t_zylinder, h_zylinder)             # linearer Fit
a, b = params
예제 #13
0
saturation_current_4 = saturation_current(d.anode_4,1)
saturation_current_5 = saturation_current(d.anode_5,1)

saturation_current = []
saturation_current.append(saturation_current_1)
saturation_current.append(saturation_current_2)
saturation_current.append(saturation_current_3)
saturation_current.append(saturation_current_4)
saturation_current.append(saturation_current_5)

saturation_tab = copy.deepcopy(saturation_current)
d.make_it_SI2(saturation_tab,6)
#write('../tex-data/saturation.tex',
#      make_table([[1,2,3,4,5],saturation_current], [0, 6]))
write('../tex-data/saturation.tex',
      make_table([[1,2,3,4,5],saturation_tab], [0, 0]))

print("Sättigungsstrom 1:",saturation_current_1)
print("Sättigungsstrom 2:",saturation_current_2)
print("Sättigungsstrom 3:",saturation_current_3)
print("Sättigungsstrom 4:",saturation_current_4)
print("Sättigungsstrom 5:",saturation_current_5)


#calculationg the Langmuir-Schottkysche exponent - should be 1.5

def langmuh_Reg(V, a, b):
    return a*V + b

def make_it_ln(array, k):
    """takes the logarithm"""
예제 #14
0
from table import(
	make_table,
	make_SI,
	write,
	make_composed_table)



e1, e2, t1, t2 = np.genfromtxt('WerteA.txt', unpack=True)
print(e1)

 # in SI-Einheiten umrechnen
e1 = e1/100
e2=e2/100
t1=t1*10**-6
t2=t2*10**-6

#schallgeschwindigkeit in acryl
c = 2730

s1 = c*t1/2
s2= c*t2/2

print(np.mean(s1-e1))
print(np.mean(s2-e2))

print(s1)
print(s2[::-1])

write('build/tabelle_WerteA.txt', make_table([e1*100, s1*100, (s1-np.mean(s1-e1))*100, e2*100, s2*100, (s2-np.mean(s2-e2))*100], [3,3,3,3,3,3]))
예제 #15
0
    make_table,
    make_full_table,
    make_composed_table,
    make_SI,
    write,
)

######################################################################################################################################################
## Verdampfungswärme

# Daten einlesen vorgegebene Daten
p1, T1 = np.genfromtxt('Messdaten/Verdampfungskurve.txt', unpack=True)
T1 += 273.1     # T in K
p1 += 1         # Offset 1 bar
p1 *= 1e5       # in Pa
write('build/Tabelle_Verdampfungskurve.tex', make_table([T1, p1, 1e3/(T1), np.log(p1)], [1,1,3,2]))
write('build/Tabelle_Verdampfungskurve_texformat.tex', make_full_table(
    'Abgelesene und daraus abgeleitete Werte für die Berechnung der Verdampfungswärme.',
    'table:A1',
    'build/Tabelle_Verdampfungskurve.tex',
    [],
    [r'$T \:/\: \si{\kelvin}$',
    r'$p \:/\: \si{\bar}$',
    r'$\frac{1}{T} \:/\: 10^{-3}\si{\per\kelvin}$',
    r'$\ln{(p/\si{\pascal})}$']))

# Fit Verdampfungskurve
params = ucurve_fit(reg.reg_linear, 1/T1, np.log(p1), p0=[-1, 1])
m1, b1 = params
write('build/m1.tex', make_SI(m1, r'\kelvin', '', 1))   # 1 signifikante Stelle
write('build/b1.tex', make_SI(b1, r'', '', 1))   # 1 signifikante Stelle
예제 #16
0
import scipy.constants as const
import uncertainties.unumpy as unp
from uncertainties import ufloat
from uncertainties.unumpy import (
    nominal_values as noms,
    std_devs as stds,
)
from curve_fit import ucurve_fit
from table import (
    make_table,
    make_SI,
    write,
)

p1, T1, T1_fl = np.genfromtxt('Messwerte/bis1000mBar.txt', unpack=True)
write('build/MessdatenBis1Bar.tex', make_table([T1, p1, 1e3/(T1+273.1), np.log(p1*1e2)], [0,0,3,2]))    # p1 in mbar, T in °C, 1/T in 10^-3 K^-1, ln(p/Pa)
T1 += 273.1     # T in K
p1 *= 1e2       # p in N/m^2  (Pa)

# lineare Regression
def f(x, m, b):
    return m * x + b
# Druck als Funktion der Temperatur n=3
def g(x, d, c, b, a):
    return d*x**3+c*x**2+b*x+a
# Druck als Funktion der Temperatur n=4
def h(x, e, d, c, b, a):
    return e*x**4+d*x**3+c*x**2+b*x+a

# Verdampfungswärme als Funktion der Temperatur
def L(konst_a, konst_R, T, d, c, b, a):
예제 #17
0
# STEP 3 read files

FAILED_TO_READ_FILES = [p for p, f in FILE_PATH_STR_PAIRS if f is None]
FILE_PATH_STR_PAIRS = [x for x in FILE_PATH_STR_PAIRS if not x[1] is None]
NUMBER_OF_READ_FILES = len(FILE_PATH_STR_PAIRS)
if NUMBER_OF_FILES > NUMBER_OF_READ_FILES:
    logger.log_failed_reads(
        NUMBER_OF_FILES,
        NUMBER_OF_READ_FILES,
        FAILED_TO_READ_FILES
    )

# STEP 4 remove failed to read files

print(f"\nextracting data from {NUMBER_OF_READ_FILES} files...")
DATA_FRAME, ERRORS = make_table(
    proc.SELECTORS,
    FILE_PATH_STR_PAIRS
)

proc.WRITER(EXCEL_PATH, DATA_FRAME)

# STEP 5 generate excel file

if not ERRORS.empty:
    logger.log_errors(ERRORS)

logger.log_success(NUMBER_OF_READ_FILES, NUMBER_OF_FILES, EXCEL_PATH)

# STEP 6 print results
예제 #18
0
write('build/Geschwindigkeitsabweichung.tex', make_SI((c_acryl_1/c_acryl)*1e2, r'\percent', figures=1))
silikon_konst = 3#ms
write('build/konst.tex', make_SI(silikon_konst, r'\micro\second', figures=1))
silikon_konst *= 1e-6 #sekunden

Abstände_A = np.genfromtxt('messdaten/Abstände.txt', unpack=True) # in centimeter
Abstände_B = Höhe-Abstände_A
Abstand = np.genfromtxt('messdaten/Abstände.txt', unpack=True)
Abstand_B = Höhe - Abstand
 #
 # y[n - 1::n]

print('Hallo')
print(Num[0:5])
print(Num[6:10])
write('build/Tabelle_Abstaende.tex', make_table([Num,Abstand],[0,2]))     # Jeder fehlerbehaftete Wert bekommt zwei Spalten
write('build/Tabelle_Abstaende_texformat.tex', make_full_table(
    'Gemessene Tiefe der Bohrungen aus Sicht der "A-Seite".',
    'table:A5',
    'build/Tabelle_Abstaende.tex',
    [],              # Hier aufpassen: diese Zahlen bezeichnen diejenigen resultierenden Spaltennummern,
                              # die Multicolumns sein sollen
    [
    r'$\text{Num.} $',
    r'$\text{Tiefe} \:/\: \si{\centi\meter}$']))

#### A-Scan
#### Ich schätze einen Ablesefehler von 0.1
Fehler_A = 0.1*1e-6 #in sekunden
Werte_A_A, Werte_A_B = np.genfromtxt('messdaten/aufgabenteil_a.txt', unpack=True)
t_a_a = unp.uarray(Werte_A_A, Fehler_A)
예제 #19
0
Nu=460
t_0=900 #in s
N_Offset_Indium = (Nu/t_0)*220
write('build/Fehler_Indium.tex', make_SI(N_Offset_Indium, r'', figures=1))
N_Offset_Silber = (Nu/t_0)*9
#Import Data
#Indium = Ind
#Silber = Si

Ind_nom, t = np.genfromtxt('messdaten/Indium.txt', unpack=True)
Ind_nom = Ind_nom -  N_Offset_Indium
Ind = unp.uarray(Ind_nom, np.sqrt(Ind_nom))
# Ind = unp.uarray(Ind_nom, N_Offset_Indium)


write('build/Tabelle_Indium.tex', make_table([Ind,t],[1, 0]))
write('build/Tabelle_Indium_texformat.tex', make_full_table(
    caption = 'Messdaten von Indium unter Berücksichtigung des Nulleffekts.',
    label = 'table:Indium',
    source_table = 'build/Tabelle_Indium.tex',
    stacking = [0],              # Hier aufpassen: diese Zahlen bezeichnen diejenigen resultierenden Spaltennummern, die Multicolumns sein sollen
    units = [
    r'$N_\textrm{\Delta t}$',
    r'$t \:/\: \si{\second}$']))         # default = '-'





Si_nom, t_Si = np.genfromtxt('messdaten/Silber.txt', unpack=True)
Si_nom = Si_nom - N_Offset_Silber
예제 #20
0
D_mess_o = D_o
D_mess_u = D_o


D_mess_o = c*t_o/2
D_mess_u = c*t_u/2
D_loch = hoehe_mess - (D_mess_o + D_mess_u)


D_mess_o = D_mess_o*10**(2)
D_mess_u = D_mess_u*10**(2)
D_o = D_o*10**2
D_loch = D_loch*10**3
List = [1,2,3,4,5,6,7,8,9,10,11]
write('build/Tabelle_a.tex', make_table([List, D_o, D_mess_o, D_mess_u, D_loch],[0,2,2,2,2])) #cm, cm ,cm, mm
write('build/Tabelle_a_texformat.tex', make_full_table(
    'Messdaten Tiefenmessungen.',
    'table:1',
    'build/Tabelle_a.tex',
    [],              # Hier aufpassen: diese Zahlen bezeichnen diejenigen resultierenden Spaltennummern,
                              # die Multicolumns sein sollen
    [
    r'$\text{Stelle}$',
    r'$D_{\text{oben}} \:/\: \si{\centi\metre}$',
    r'$D_{\text{oben,gem}} \:/\: \si{\centi\metre}$',
    r'$D_{\text{unten,gem}} \:/\: \si{\centi\metre}$',
    r'$D_{\text{loch,gem}} \:/\: \si{\milli\metre}$']))

D_o_rel_a = abs(D_mess_o-D_o)/D_o * 100
D_o_rel_a = np.mean(D_o_rel_a)
예제 #21
0
from ErrorCalculation import (
    MeanError,
    rmse,
)

#a) Wheatstonebrücke
#Widerstand Wert 14, 10
Wert_a, R_2, c    = np.genfromtxt('Messergebnisse/a.txt', unpack=True)
R_2er   = R_2*0.002
R_3     = c
R_4     = 1000-c
R_34    = R_3 / R_4
R_34er  = R_34*0.005
R2  = unp.uarray(R_2, R_2er)
R34 = unp.uarray(R_34, R_34er)
write('build/wheat1tabelle.tex', make_table([Wert_a, R2, R34], [0,1,1 , 1, 1]))
Rx = R2*R34

Rx_mean_Wert14=np.mean(Rx[0:3])
Rx_mean_Wert10=np.mean(Rx[3:6])
Rx_mean_Wert14_err = MeanError(noms(Rx[0:3]))
Rx_mean_Wert10_err = MeanError(noms(Rx[3:6]))

# Building Arrays from this...
Werte_a = np.array([Wert_a[0], Wert_a[3]])
Rx_mean = np.array([Rx_mean_Wert14, Rx_mean_Wert10])    # Mittelwert und syst. Fehler
Rx_mean_err = np.array([Rx_mean_Wert14_err, Rx_mean_Wert10_err])    # stat. Fehler


write('build/Tabelle_err_a.tex', make_table([Werte_a, Rx_mean, Rx_mean_err],[0, 1, 0]))
write('build/Tabelle_a.tex', make_table([Wert_a, R2, R34, Rx], [0, 1, 1, 1]))
slit_count = 0
for slit in data.slits:
    slit_count += 1
    count = len(slit.currents)
    midpoint = (slit.midpoint_index + 1)
    spacing = slit.spacing

    angles_left = np.linspace(-(midpoint) * spacing, -spacing, midpoint)
    angles_right = np.linspace(0, (count - midpoint) * spacing, count - midpoint)
    slit.angles = np.arctan(np.append(angles_left, angles_right) / data.screen_distance)

    slit.currents_sigma = hel.estimate_sigmas_only(slit.currents, data.analoge_abberation)

    np.savetxt("data/single_slit" + str(slit_count) + ".data", np.transpose([slit.angles, slit.currents]), header='Angle, Current')
    make_table([np.array(slit.angles) * 1e3, np.array(slit.currents) * 1e6, np.array(slit.currents_sigma) * 1e6], "../table/single_slit" + str(slit_count) + ".tex", figures=[2, 2, 3])



if __name__ == "__main__":
    slit_count = 0
    runSingleSlits = False

    if runSingleSlits:
        for slit in data.single_slits:
            slit_count += 1
            fit_params, quality = mulitopt([slit.angles, slit.currents, single_slit_fit], 1e-6, 9e-4, 100, [10, 1e-10], slit.currents_sigma)
            info("Fit Quality %s", quality)
            info("Slit Size %s", fit_params[0])
            phi_offset = fit_params[2]
예제 #23
0
from utility import(
    constant
)
################################################ Finish importing custom libraries #################################################

###########################   a)   ############################
Z, I = np.genfromtxt('messdaten/a.txt', unpack=True)        # I in µA
I = I*1e-6                      # in A
U = np.array(range(320, 710, 10))
delta_t = 10                    # fix for a)
Z_err = np.sqrt(Z)              # poisson verteilt
Z_unc = unp.uarray(Z, Z_err)    # mit Fehlern versehen
N = Z_unc / delta_t             # Zählrate
delta_Q = I/N/constant('elementary charge')*1e-10                   # in 10^10 e

write('build/Tabelle_a.tex', make_table([U, Z_unc, N, I*1e6, delta_Q],[0, 1, 1, 1, 1]))
write('build/Tabelle_a_texformat.tex', make_full_table(
    caption = 'Messdaten für die Charakteristik des Zählrohrs.',
    label = 'table:a',
    source_table = 'build/Tabelle_a.tex',
    stacking = [1,2,4],              # Hier aufpassen: diese Zahlen bezeichnen diejenigen resultierenden Spaltennummern, die Multicolumns sein sollen
    units = [r'$U \:/\: \si{\volt}$',
    r'$Z$',
    r'$N \:/\: \si{\per\second}$',
    r'$I \:/\: 10^{10}\si{\micro\ampere}$',
    r'$\Delta Q \:/\: \si{\elementarycharge}$']))


##### Fit ####
no_of_first_ignored_values = 3
no_of_last_ignored_values = 5
예제 #24
0
# Tabelle für Messdaten
Tiefe, Delta_f_45, Intensity_45 = np.genfromtxt("messdaten/stroemungsprofil45.txt", unpack=True)
Tiefe, Delta_f_70, Intensity_70 = np.genfromtxt("messdaten/stroemungsprofil70.txt", unpack=True)
error = 0.07
Delta_f_45_error = Delta_f_45 * error
Delta_f_45 = unp.uarray(Delta_f_45, Delta_f_45_error)
Delta_f_70_error = Delta_f_70 * error
Delta_f_70 = unp.uarray(Delta_f_70, Delta_f_70_error)
Intensity_45_error = Intensity_45 * error
Intensity_45 = unp.uarray(Intensity_45, Intensity_45_error)
Intensity_70_error = Intensity_70 * error
Intensity_70 = unp.uarray(Intensity_70, Intensity_70_error)

write(
    "build/Tabelle_messdaten.tex",
    make_table([Tiefe, Delta_f_45, Intensity_45, Delta_f_70, Intensity_70], [0, 1, 1, 1, 1]),
)
write(
    "build/Tabelle_messdaten_texformat.tex",
    make_full_table(
        "Messdaten zum Strömungsprofil.",
        "table:messdaten_b",
        "build/Tabelle_messdaten.tex",
        [1, 2, 3, 4],  # Hier aufpassen: diese Zahlen bezeichnen diejenigen resultierenden Spaltennummern,
        # die Multicolumns sein sollen
        [
            r"$\text{Laufzeit} \:/\: \si{\micro\second}$",
            r"$\Delta f_{45\si{\percent}} \:/\: \si{\hertz}$",
            r"$I_{45\si{\percent}} \:/\: \si{\kilo\square\volt\per\second}$",
            r"$\Delta f_{70\si{\percent}} \:/\: \si{\hertz}$",
            r"$I_{70\si{\percent}} \:/\: \si{\kilo\square\volt\per\second}$",
예제 #25
0
def f_dispersion_kHz(t):
    return c.f_dispersion(d.L, d.C1, t) * 1e-3

phase = []
for i in range(len(d.dispersion.phase)):
    phase.append((i + 1) * np.pi)  # Jede Lissajou-Figur liegt um pi von der nächsten entfernt
    phase[-1] /= d.dispersion.glieder_anzahl

plot.plot(phase, d.dispersion.frequenz * 1e-3, f_dispersion_kHz,
          r"Phasenverschiebung in Radian", r"Anregungsfrequenz in kHz", "dispersion.pdf")


make_table((
    range(1, len(d.dispersion.phase) + 1),
    d.dispersion.frequenz * 1e-3,
    phase,
), "../table/dispersion.tex",
    figures=[1, 1, 2])


print("--- Plot Frequenz gegen Phasengeschwindigkeit ---")


def phase_theorie(f):
    return c.phasengeschwindigkeit_theorie(d.C1, d.L, f * 1e3)

x = d.dispersion.frequenz  # d.eigenfrequenzen_homogen werden nicht benötigt! Sie sind identisch mit ~
y = c.phasengeschwindigkeit(x, phase)
plot.plot(x * 1e-3, y, phase_theorie,
          r"Anregungsfrequenz in kHz", "Phasengeschwindigkeit in Gliedern/s", "phasengesch.pdf")
예제 #26
0
# bekannte Wellenlängen der Helium Spektrallinien (hier muss die
# Reihenfolge natürlich übereinstimmen mit derjenigen der Datei
# WinkelHelium.txt):
lambda_helium = np.array([438.8, 447.1, 471.3, 492.2,
                          501.6, 504.8, 587.6, 667.8, 706.5]) * 1e-9    # in m

# sinus für den plot und den fit
sin_phi_helium = np.array(np.sin(phi_helium))
# fit sin(phi) gegenüber lambda zur Bestimmung von g
params_gitterkonstante = ucurve_fit(
    reg_linear, sin_phi_helium, lambda_helium)

g, offset = params_gitterkonstante                  # g in m, offset Einheitenfrei
write('build/gitterkonstante.tex', make_SI(g * 1e9, r'\nano\meter', figures=1))
write('build/offset.tex', make_SI(offset * 1e9, r'\nano\meter', figures=1))
write('build/Tabelle_messdaten_kalium.tex', make_table([phi_kalium*180/np.pi],[1]))
write('build/Tabelle_messdaten_natrium.tex', make_table([phi_natrium*180/np.pi],[1]))
write('build/Tabelle_messdaten_rubidium.tex', make_table([phi_rubidium*180/np.pi],[1]))

##### PLOT lineare Regression #####
t_plot = np.linspace(np.amin(sin_phi_helium), np.amax(sin_phi_helium), 2)
plt.xlim(t_plot[0] - 1 / np.size(sin_phi_helium) * (t_plot[-1] - t_plot[0]),
         t_plot[-1] + 1 / np.size(sin_phi_helium) * (t_plot[-1] - t_plot[0]))
plt.plot(t_plot,
         reg_linear(t_plot, *noms(params_gitterkonstante))* 1e9,
         'b-', label='Fit')
plt.plot(sin_phi_helium,
         lambda_helium * 1e9,
         'rx', label='Messdaten')
plt.ylabel(r'$\lambda \:/\: \si{\nano\meter}$')
plt.xlabel(r'$\sin(\varphi)$')
예제 #27
0
T_ohneB_roh = np.genfromtxt('Messdaten/OhneB.txt', unpack=True)
T_ohneB = ufloat(np.mean(T_ohneB_roh), np.std(T_ohneB_roh))
write('build/T_ohneB.tex', make_SI(T_ohneB, r'\second', figures=1))
T_mitB_roh = np.genfromtxt('Messdaten/MitB.txt', unpack=True)
T_mitB = ufloat(np.mean(T_mitB_roh), np.std(T_mitB_roh))
write('build/T_mitB.tex', make_SI(T_mitB, r'\second', figures=1))

T_10 = np.genfromtxt('Messdaten/I10.txt', unpack=True)
T_08 = np.genfromtxt('Messdaten/I08.txt', unpack=True)
T_06 = np.genfromtxt('Messdaten/I06.txt', unpack=True)
T_04 = np.genfromtxt('Messdaten/I04.txt', unpack=True)
T_02 = np.genfromtxt('Messdaten/I02.txt', unpack=True)
I=np.array([1,0.8,0.6,0.4,0.2])

write('build/Tabelle_Strom.tex', make_table([T_10,T_08,T_06,T_04,T_02],[3, 3, 3, 3, 3]))
# FULLTABLE
write('build/MagnetfeldPerioden_texformat.tex', make_full_table(
    'Periodendauern mit eingeschalteter Helmholtzspule.',
    'table:MagnetfeldPerioden',
    'build/Tabelle_Strom.tex',
    [],              # Hier aufpassen: diese Zahlen bezeichnen diejenigen resultierenden Spaltennummern,
#                              # die Multicolumns sein sollen
    [r'$T_{1,0\si{\ampere}} \:/\: \si{\second}$',
    r'$T_{0,4\si{\ampere}} \:/\: \si{\second}$',
    r'$T_{0,6\si{\ampere}} \:/\: \si{\second}$',
    r'$T_{0,8\si{\ampere}} \:/\: \si{\second}$',
    r'$T_{0,2\si{\ampere}} \:/\: \si{\second}$']))

write('build/aaa.tex', make_table([T_ohneB_roh, T_mitB_roh],[3, 3]))
# FULLTABLE
예제 #28
0
print ('Parabel')
print (a_parabel)
a_parabel *= 0.5967

# b)
n = [1,2,3,4,5,6,7,8,9]
n_ungerade = [1,3,5,7,9,11,13,15,17]
U_Dreieck, Skala_Dreieck = np.genfromtxt('Messdaten/Dreieckspannung.txt', unpack=True)
U_Dreieck_Fehler = 0.1 * Skala_Dreieck
U_Dreieck *= Skala_Dreieck
U_Dreieck_ges = unp.uarray(U_Dreieck, U_Dreieck_Fehler)
U_Dreieck_normiert = U_Dreieck_ges / U_Dreieck_ges[0]
RelFehler_Dreieck = np.abs(U_Dreieck_normiert - a_Dreieck) / a_Dreieck * 100

write('build/Tabelle_Dreieck_1.tex', '1 & 2.8 & 0.1 & 1 & 0 & 1 & 0 & 0 \\\\')
write('build/Tabelle_Dreieck_2.tex', make_table([n_ungerade[1:],U_Dreieck_ges[1:],U_Dreieck_normiert[1:],a_Dreieck[1:], RelFehler_Dreieck[1:]],[0, 1, 1, 3, 1]))
write('build/Tabelle_Dreieck.tex', make_composed_table(['build/Tabelle_Dreieck_1.tex','build/Tabelle_Dreieck_2.tex']))
## FULLTABLE
write('build/FK_Dreieck_texformat.tex', make_full_table(
    'Messdaten, Theoriewerte und relativer Messfehler $f$ der normierten Fourierkoeffizienten: Dreieckspannung.',
    'table:FK_Dreieck',
    'build/Tabelle_Dreieck.tex',
    [1,2,4],
    ['$k$',
    r'$U_{k,\text{mess}} \:/\: \si{\volt}$',
    r'$\frac{U_{k,\text{mess}}}{U_{1,\text{mess}}}$',
    r'$\Abs{\frac{U_{k,\text{theo}}}{U_{1,\text{theo}}}}$',
    r'$f$ \:/\: \si{\percent}']))


U_Rechteck, Skala_Rechteck = np.genfromtxt('Messdaten/Rechteckspannung.txt', unpack=True)
예제 #29
0
#write('../tex-data/v.tex',
#      make_table([[drops[i][0] for i in range(len(drops))], [drops[i][1] for i #in range(len(drops))], [drops[i][2] for i in range(len(drops))], #[drops[i][3] for i in range(len(drops))]], [0, 1, 1, 1]))

fuck1 = []
fuck2 = []
fuck3 = []
fuck4 = []
for i in range(len(drops)):
    fuck1.append(drops[i][0])
    fuck2.append(drops[i][1]*10**(5))
    fuck3.append(drops[i][2]*10**(5))
    fuck4.append(drops[i][3]*10**(5))

print(fuck3)
write('../tex-data/v.tex',
      make_table([fuck1,fuck2,fuck3,fuck4], [0, 2, 2, 2]))

#print(drops)
#for i in range(len(drops)):
#    diff = drops[i][2]-drops[i][3]
#    print(diff, i)
#    if drops[i][1] != 0:
#        c = 2*drops[i][1] / diff
#        print("Difference:" ,c ,"Stelle:", i)
#
#for i in range(len(drops2)):
#    diff = drops2[i][2]-drops2[i][3]
#    print(diff, i)
#    if drops2[i][1] != 0:
#        c2 = 2*drops2[i][1] / diff
#        print("Difference:" ,c2 ,"Stelle:", i)
예제 #30
0
파일: Rechung.py 프로젝트: Anjaaaa/AP-1516
from scipy.optimize import curve_fit
from uncertainties import ufloat
from scipy.constants import e
from table import (
    make_table,
    make_full_table,
    make_SI,
    write,)

U, c = np.genfromtxt('Messung1.txt', unpack = True)

t = 10 #Messzeit 10 sekunden
Z = c / t #Zählrate in counts/second
Z_fehler = np.sqrt(c)/t

write('build/tabelle_charakteristik.txt', make_table([U[:15], Z[:15], Z_fehler[:15], U[15:], Z[15:], Z_fehler[15:]], [2,2,2,2,2,2]))

plt.plot(U, Z, 'ko', label='Messwerte')
plt.errorbar(U, Z, xerr=1, yerr=Z_fehler, fmt='r.', label = r'Statistischer Fehler')
plt.legend(loc='best')
plt.xlabel(r'Spannung $U \ /\  \mathrm{V}$') 
plt.ylabel(r'Zählrate $Z \ /\ {\mathrm{Counts}}/{\mathrm{s}}$')
plt.xlim(0,750)
plt.savefig('build/charakteristik_gesamt.png')
plt.show()


def linear(x, m, b):
	return m*x+b
	
예제 #31
0
파일: Rechnung.py 프로젝트: Anjaaaa/AP-1516
            )
plt.xlabel('Effektiver Abstand zwischen Detektor und Strahler x in cm')
plt.ylabel('$10^3$ Pulse pro 120s')
plt.ylim(30000,120000)
plt.xlim(0,2.5)
plt.legend(loc='lower left') # 2 = upper left
plt.savefig('build/pulse1.png')
plt.show()




write('build/reichweite1.txt', make_SI(reichweite1, r'\centi\meter', figures=2))
write('build/m1.txt', make_SI(m1, r'\per\centi\meter', figures=1))
write('build/b1.txt', make_SI(b1, r'', figures=1))
write('build/tabelle_messung1.txt', make_table([p1, puls1, x1], [0,0,2]))


########gleiches für messung2

parameters2, popt2 = curve_fit(linear, x2[12:], puls2[12:])
m2 = ufloat(parameters2[0], np.sqrt(popt2[0,0]))
b2 = ufloat(parameters2[1], np.sqrt(popt2[1,1]))

#mittelere reichweite bestimmen (lineare gleichung auflösen)
reichweite2 = (max(puls2)/2 - b2)/m2
print(reichweite2)


x = np.linspace(0,3)
plt.gca().yaxis.set_major_formatter(mpl.ticker.FuncFormatter(lambda x,_:x*10**(-3)))
예제 #32
0
    plt.savefig('build/plot_ds.pdf')


####################### WRITING RESULT TABLES #######################
slits = ['klein', 'mittel', 'groß', 'doppel']
b = np.array([b_k, b_m, b_g, b_ds])                             # in m
b_hst = np.array([b_k_hst, b_m_hst, b_g_hst, b_ds_hst])         # in m
b_err = np.abs(((b-b_hst) / b_hst))                             # relative error
A0 = np.array([A0_k, A0_m, A0_g, A0_ds])                        # in A/m
s_err = np.abs(np.array([(s_ds - s_ds_hst) / s_ds_hst]))        # relative error
s_mic = np.array([s_ds_mic])
s = np.array([s_ds])
s_hst = np.array([s_ds_hst])


write('build/Tabelle_results.tex', make_table([slits, zeta_0*1e3, A0, b_microscope*1e3, b*1e3, b_hst*1e3, b_err*1e2], [0, 2, 1, 2, 1, 2, 1]))
write('build/Tabelle_results_texformat.tex', make_full_table(
    caption = r'Herstellerangaben, Mikroskopmessungen, Firparameter und der Fehler zwischen Fit und Herstellerangabe für die Spaltbreite $b$.',
    label = 'table:A1',
    source_table = 'build/Tabelle_results.tex',
    stacking = [2,4,6],
    units = [
    'Spalt',
    r'$\zeta_0 \:/\: \si{\milli\meter}$',
    r'$A_0 \:/\: \si{\ampere\per\meter}$',
    r'$b_\text{mic} \:/\: \si{\milli\meter}$',
    r'$b_\text{mess} \:/\: \si{\milli\meter}$',
    r'$b_\text{hst} \:/\: \si{\milli\meter}$',
    r'$|\varepsilon_b| \:/\: \si{\percent}$']))

write('build/Tabelle_results_s.tex', make_table([s_mic*1e3, s*1e3, s_hst*1e3, s_err*1e2], [2, 1, 2, 1]))
예제 #33
0
파일: Rechnung.py 프로젝트: Anjaaaa/AP-1516
plt.xlabel('$t / s$')
plt.ylabel('$N / 10s$ ')
plt.yscale('log')

plt.xlim([0,430])
plt.savefig('build/SilberAddiert.png')
plt.show()



write('build/Brom.tex', make_SI(ufloat(paramsBrom[0],np.sqrt(poptBrom[0,0])), r'\per\second', figures=1))
write('build/SilberKurz.tex', make_SI(ufloat(paramSK[0],np.sqrt(poptSK[0,0])), r'\per\second', figures=1))
write('build/SilberLang.tex', make_SI(ufloat(paramSL[0],np.sqrt(poptSL[0,0])), r'\per\second', figures=1))
write('build/BromB.tex', make_SI(ufloat(paramsBrom[1],np.sqrt(poptBrom[1,1])), r'', figures=1))
write('build/SilberKurzB.tex', make_SI(ufloat(paramSK[1],np.sqrt(poptSK[1,1])), r'', figures=1))
write('build/SilberLangB.tex', make_SI(ufloat(paramSL[1],np.sqrt(poptSL[1,1])), r'', figures=1))



Silber = unp.uarray(np.zeros(len(SilberAnf)), np.zeros(len(SilberAnf)))
Brom = unp.uarray(np.zeros(len(BromAnf)), np.zeros(len(BromAnf)))

for i in range(0, len(SilberAnf)):
   Silber[i] = ufloat(SilberAnf[i], np.sqrt(SilberAnf[i]))
for i in range(0, len(BromAnf)):
   Brom[i] = ufloat(BromAnf[i], np.sqrt(BromAnf[i]))


write('build/Werte.tex', make_table([Silber,Brom],[1,1]))

예제 #34
0
                                n=Nreps,
                                processes=nproc))
        summary_table_content.update({"solve": elapsed_time_solve})

    if args.serial or run_all:
        elapsed_time = execute_n_times(solve_serial, solver_args, n=Nreps)
        summary_table_content.update({"serial": elapsed_time})

    if args.map_serial or run_all:
        solver_args_map_serial = solver_args + [True]
        elapsed_time_map_serial = execute_n_times(solve_w_pool,
                                                  solver_args_map_serial,
                                                  n=Nreps)
        summary_table_content.update({"map-serial": elapsed_time_map_serial})

    table = make_table(summary_table_content, nproc_range)

    if args.serial or run_all:
        with open("scaling_serial.txt", "w") as f:
            f.write(" ".join((f"{numvar:.3f}" for numvar in elapsed_time)))

    if args.map_serial or run_all:
        with open("scaling_map_serial.txt", "w") as f:
            f.write(" ".join(
                (f"{numvar:.3f}" for numvar in elapsed_time_map_serial)))

    if args.sharedarray or run_all:
        with open("scaling_sharedarray.txt", "w") as f:
            np.savetxt(f,
                       np.array(elapsed_time_sharedarray),
                       fmt="%.3f",
예제 #35
0
print(m_K)
x = []
while i < np.size(m_K):
    x.append(S(c_W, m_w[i], c_g_m_g, T_M[i], T_W[i], m_K[i], T_K[i]))
    # np.append(x, S(c_W, m_w[i], c_g_m_g, T_M[i], T_W[i], m_K[i], T_K[i]))
    print(m_K[i], m_w[i], T_K[i], T_W[i], T_M[i], c_W, c_g_m_g)
    print(x)
    # write('build/Waermekapazitaeten_Blei['+str(i)+'].txt',str(x))
    i += 1
print(np.std(x))
print(np.mean(x))
print(x)
print(x[0])
c_k = ufloat(np.mean(x), np.std(x))
write("build/Waermekapazitaeten_Blei_gemittelt.tex", make_SI(c_k * 1e3, r"\joule\per\kelvin\per\kilogram"))
write("build/blei_tabelle.tex", make_table([m_K, m_w, T_K, T_W, T_M], [0, 0, 1, 1, 1]))
write("build/Waermekapazitaeten_Blei.tex", str(x[0]) + "&" + str(x[1]) + "&" + str(x[2]))
write("build/Waermekapazitaeten_Blei.txt", str(x[0]) + " " + str(x[1]) + " " + str(x[2]))

# aluminium
m_K, m_w, T_K, T_W, T_M = np.genfromtxt("Messdaten/alu_messung.txt", unpack=True)
c_W = np.genfromtxt("Messdaten/spezifischen_Waermekapazitaet.txt", unpack=True)
c_g_m_g = np.genfromtxt("Messdaten/Waermekapazitaet.txt", unpack=True)

print(m_K, m_w, T_K, T_W, T_M, c_W, c_g_m_g)
x = S(c_W, m_w, c_g_m_g, T_M, T_W, m_K, T_K)
c_k = x
print(x)
print(c_k)
# write('build/Waermekapazitaeten_Alu.tex', make_SI(c_k)*1e3, r'\joule\per\kelvin\per\gram' ))
write("build/Waermekapazitaeten_Alu.tex", str(c_k * 1e3))
예제 #36
0
# b)
R_ap_exp = 3.3
print('Aufgabenteil b')
print(L)
print(C)


R_ap = 2*unp.sqrt(L/C)
R_ap *= 1e-3 # in kilo ohm
print('R_ap')
print(R_ap)
L *=1e3
C *=1e9
div = R_ap - R_ap_exp

write('build/Tabelle_aufgabenteil_b_gegeben.tex', make_table([ L, C, R_1],[1,1,1]))
write('build/R_ap_exp.tex', make_SI(R_ap_exp, r'\kilo\ohm', figures=2))
write('build/R_ap.tex', make_SI(R_ap[0], r'\kilo\ohm', figures=1))
write('build/R_div.tex', make_SI((R_ap[0]-R_ap_exp), r'\kilo\ohm', figures=1))


############c)


fre, fre_err, abst, abst_err, U_c, U_c_err, U_er, U_er_err  = np.genfromtxt('Messdaten/d.txt', unpack=True)

fre_ges = unp.uarray(fre, fre_err)
abst_ges = unp.uarray(abst, abst_err)
U_c_ges = unp.uarray(U_c, U_c_err)
U_er_ges = unp.uarray(U_er, U_er_err)
write('build/Tabelle_aufgabenteil_c_Messdaten.tex', make_table([ fre_ges, abst_ges, U_c_ges, U_er_ges],[1,1,1,1]))
예제 #37
0
def main():
    print_options()
    choice = get_input()

    # global variables
    start_year = 0
    end_year = 0

    # recursively process all e statements
    if choice == '1':
        statement_pattern = '\d+X+\d+-20\d{2}-\d{2}-\d{2}-20\d{2}-\d{2}-\d{2}.pdf$'
        statement_paths = Path('.').rglob('*.pdf')
        statements = []

        # rename statements
        for path in statement_paths:
            if search(statement_pattern, str(path.name)):
                rename_file(path)
                statements.append(path)

        if len(statements) < 1:
            print('could not find any e statements...')
            os.system("pause")
            exit(0)

        start_year = int(statements[0].name.split('-')[1])
        end_year = int(statements[len(statements) - 1].name.split('-')[1])
        years = []
        for i in range(start_year, end_year):
            years.append(i)

        data = prepare_data_json(start_year, end_year)

        # open statements and write to csv file
        csv_file = 'all-transactions.csv'
        csv_header = 'id, account , date, description, category, amount\n'
        f = open(csv_file, 'w')
        f.write(csv_header)
        for statement in progressBar(statements,
                                     prefix='Progress:',
                                     suffix='Complete',
                                     length=50):
            if len(statement.name) == 38:
                get_debit_data(statement, f, data)
            if len(statement.name) == 42:
                get_credit_data(statement, f, data)
        f.close()

        # give transactions an id
        add_transaction_id()
        # round amounts to two decimal places
        for i in data['category']:
            for j in data['category'][i]:
                data['category'][i][j] = round(data['category'][i][j], 2)

        for i in data['month']:
            for j in data['month'][i]:
                data['month'][i][j] = round(data['month'][i][j], 2)
    # process all-transactions.csv
    elif choice == '2':
        if not os.path.isfile('all-transactions.csv'):
            print('did not find all-transactions.csv')
            os.system("pause")
            exit(0)

        with open("all-transactions.csv", 'r') as csv_file:
            csv_reader = csv.reader(csv_file, delimiter=',')
            rows = list(csv_reader)
            start_year = int(rows[1][2].split('-')[0])
            end_year = int(rows[len(rows) - 1][2].split('-')[0])
            data = prepare_data_json(start_year, end_year)
            line_count = 0
            year = 0
            month = ''
            amount = 0
            for row in progressBar(rows,
                                   prefix='Progress:',
                                   suffix='Complete',
                                   length=50):
                if line_count == 0:
                    line_count += 1
                else:
                    year = row[2].split('-')[0]
                    month = get_month_name(row[2].split('-')[1])
                    amount = float(row[5])
                    data['category'][row[4]][int(year)] = amount
                    data['month'][month][int(year)] = amount
                    line_count += 1
    # save data to json file
    save_data_json(data)
    # prepare data.js variables for visualization
    make_data_js(start_year, end_year)
    # convert all-transactions.csv to html table
    make_table()