Ejemplo n.º 1
0
def col_dens_hc3n_err(Tex, tau, dtex, dtau):
    """
       """

    #EJ is EJ/k
    Ej = 19.6484
    R = 1
    c = 2.98e10  #cm s-1
    pi = 3.141516
    k = 1.38 * 1e-16  #erg K-1
    h = 6.626 * 1e-27  #erg s
    nu = 91.199796e9
    Tbg = 2.73

    Tex = unumpy.uarray(Tex, dtex)
    tau = unumpy.uarray(tau, dtau)

    Qrot = ((k * Tex) / (h * 4.5490586e9)) + 1 / 3.
    gu = 21.0
    aul = 58.13e-6

    a1 = (8 * pi * nu**3) / ((c**3) * R)
    a2 = Qrot / (gu * aul)
    a3 = unumpy.exp(Ej / Tex) / (1 - unumpy.exp(-h * nu / (k * Tex)))
    a4 = 1 / (jota2(Tex, nu) - jota2(Tbg, nu))

    a5 = tau  #where tau=integral (tau dv)

    return a1 * a2 * a3 * a4 * a5
Ejemplo n.º 2
0
def col_dens_cch_err(Tex, tau, dtex, dtau, optthin=False):
    """
        C2H(1-0)
        see col_dens_cch
        """
    #EJ is EJ/k
    Ej = 0.00216
    nu = 87.316925e9
    R = 5 / 12.
    c = 2.98e10  #cm s-1
    pi = 3.141516
    k = 1.38 * 1e-16  #erg K-1
    h = 6.626 * 1e-27  #erg s
    Tbg = 2.73

    Tex = unumpy.uarray(Tex, dtex)
    tau = unumpy.uarray(tau, dtau)

    Qrot = ((k * Tex) / (h * 43.674518e9)) + 1 / 3.
    gu = 5.0
    aul = 1.52757e-6

    a1 = (8 * pi * (nu**3)) / ((c**3) * R)
    a2 = Qrot / (gu * aul)
    a3 = unumpy.exp(Ej / Tex) / (1 - unumpy.exp(-h * nu / (k * Tex)))
    if optthin == True:
        a4 = tau / (
            jota2(Tex, nu) - jota2(Tbg, nu)
        )  #asuming optically thin, prob best to calculate tau form hyperfine

    else:
        a4 = tau  #where tau=integral (tau dv)

    return a1 * a2 * a3 * a4
Ejemplo n.º 3
0
def unfunc(x, a, b, c):
    lock_effect = lock * np.heaviside(
        x - lock_start, 0.5) * (x - lock_start) + amp * (unp.cos(
            (x - ph) * (1 / 365) * math.pi * 2) - 1)
    lock_effect += np.exp(-vacc_const * (x**2))
    return a * unp.exp((b) * x + lock_effect) + c * unp.exp(
        (b + mut_effect) * x + lock_effect)
Ejemplo n.º 4
0
def col_dens_hnco_err(Tex, tau, dtex, dtau):
    """
        Returns the column density (Garden et
        al 1991) From sanhueza 2012, and references therein
        Tex: Excitation temperature
        tau=integral of the opacity (\int tau dv)
        nu=frequeancy in Hz
        Asumin R=1 which is diferent only for molecules with hyperfine structure
        """
    #EJ is EJ/k
    nu = 87.925252e9
    Ej = 6.32957
    R = 1
    c = 2.98e10  #cm s-1
    pi = 3.141516
    k = 1.38 * 1e-16  #erg K-1
    h = 6.626 * 1e-27  #erg s
    Tbg = 2.73

    Tex = unumpy.uarray(Tex, dtex)
    tau = unumpy.uarray(tau, dtau)

    Qrot = unumpy.sqrt((pi * ((Tex * k)**3)) /
                       ((h**3) * 918.417805e9 * 11.071010e9 * 10.910577e9))
    gu = 9.0
    aul = 8.78011e-6

    a1 = (8 * pi * (nu**3)) / ((c**3) * R)
    a2 = Qrot / (gu * aul)
    a3 = unumpy.exp(Ej / Tex) / (1 - unumpy.exp(-(h * nu) / (k * Tex)))
    a4 = 1 / (jota2(Tex, nu) - jota2(Tbg, nu))

    a5 = tau  #where tau=integral (tau dv)

    return a1 * a2 * a3 * a4 * a5
Ejemplo n.º 5
0
def countRR(orig_df, mu_df, **kwargs):
    """
    Assumption: One does not measure an isotope after 10 half-lives.
    Therefore we calculate delta_t and compare it with half-lives.
    If delta_t < 10HL => select df[Half-life] > 0.1 delta_t
    """
    AVOGADRO = float(6.02214076e+23)

    orig_df["Half-life [s]"] = orig_df[["Half-life [s]", "sigm_Half-life [s]"
                                        ]].apply(uncert_series, axis=1)
    orig_df["sigm_Area"] = 0.01 * orig_df["Area"] * orig_df["%err"]
    orig_df["Area"] = orig_df[["Area", "sigm_Area"]].apply(uncert_series,
                                                           axis=1)
    orig_df["Ig"] = orig_df[["Ig", "sigm_Ig"]].apply(uncert_series, axis=1)

    irr_start = pd.to_datetime(kwargs["irradiation_start"], dayfirst=True)
    t_irr = parse_time_unc(kwargs["irradiation_time"])
    acq_started = pd.to_datetime(orig_df["Acquisition Started"][0],
                                 dayfirst=True)
    delta_t = (acq_started - irr_start).total_seconds() - t_irr
    T_LOW = 0.1 * delta_t
    T_HIGH = 6e+6

    df = orig_df[(orig_df["Half-life [s]"] > T_LOW)
                 & (orig_df["Half-life [s]"] < T_HIGH)]
    lines_df = orig_df[orig_df["E_tab"].isna()]
    df_high = orig_df[(orig_df["Half-life [s]"] >= T_HIGH)]

    mu = get_mu_col(df["Energy"], mu_df)
    mu = mu.astype(np.float64)

    rho = ufloat_fromstr(kwargs['foil_material_rho'])
    d = ufloat_fromstr(kwargs['foil_thickness'])
    k = (mu * rho * d) / (1 - unp.exp(-mu * rho * d))

    lam = np.log(2) / df["Half-life [s]"]

    mass = ufloat_fromstr(kwargs['foil_mass'])
    molar_mass = ufloat_fromstr(kwargs['foil_material_molar_mass'])

    N = mass * AVOGADRO / molar_mass

    real_time = lines_df['Real Time'][0]
    live_time = lines_df['Live Time'][0]

    nom = ((real_time / live_time) * k * lam * df["Area"])
    denom = (N * (1 - unp.exp(-lam * t_irr)) * unp.exp(-lam * delta_t) *
             (1 - unp.exp(-lam * real_time)) * df["eps"] * df["Ig"])

    df["RR"] = nom / denom

    df["RR_fiss_prod"] = (2 / df["fiss_yield"]) * df["RR"]
    print("Reaction rates counted successfully.")
    df = df.append(lines_df)

    df = df.append(df_high)
    df = df.sort_values(by=["Energy", "Channel", "Ig [%]"],
                        ascending=[True, True, False])
    return df
Ejemplo n.º 6
0
def Gompertz(x, xp):
    if certain:
        y_pred = np.exp(-x[2] * xp) * x[1] * -1
        y_pred = x[0] * np.exp(y_pred)
    else:
        y_pred = unp.exp(-x[2] * xp) * x[1] * -1
        y_pred = x[0] * unp.exp(y_pred)
    return y_pred
Ejemplo n.º 7
0
def ExtremeValue(param, xp):
    if certain:
        y_pred = -np.exp(param[2] + param[1] * xp)
        y_pred = param[0] * (1 - np.exp(y_pred))
    else:
        y_pred = -unp.exp(param[2] + param[1] * xp)
        y_pred = param[0] * (1 - unp.exp(y_pred))
    return y_pred
Ejemplo n.º 8
0
def f_unc(x, A1, x01, sig1, A2, x02, sig2, offset):
    """
    similar to the raw function call, but uses unp instead of np for uncertainties calculations.
    :return:
    """
    return offset + A1 * unp.exp(-(x - x01)**2 /
                                 (2 * sig1**2)) + A2 * unp.exp(-(x - x02)**2 /
                                                               (2 * sig2**2))
Ejemplo n.º 9
0
def logistic_2(param, xp):
    if certain:
        y_pred = -1 + ((1 + param[0]) * np.exp(param[1] * xp)) / (
            param[0] + np.exp(param[1] * xp))
    else:
        y_pred = -1 + ((1 + param[0]) * unp.exp(param[1] * xp)) / (
            param[0] + unp.exp(param[1] * xp))
    return y_pred
Ejemplo n.º 10
0
def OH_from_TeO3(fluxes, Te=None):
    ''' Again, translated from Miguel's code.
    '''
    if Te is None:
        Te = Te_O3(fluxes['OIII4363'], fluxes['OIII4959'],
                fluxes['OIII5007'], fluxes['Hbeta'])
    if not 'Te4' in Te.columns:
        Te['Te4'] = Te['Te'] * 1.e4
    Oplus = 7.34e-7 * (fluxes['OII3726'] + fluxes['OII3729'])/fluxes['Hbeta']\
        * unp.exp(3.9/Te['Te4'])
    O2plus = (1.3e-6 / Te['Te4']**0.38) * unp.exp(2.9/Te['Te4'])\
        * 4.2 * fluxes['OIII4959']/fluxes['Hbeta']
    return 12. + unp.log10(Oplus + O2plus)
Ejemplo n.º 11
0
 def calculate_convoled_population(t):
     """
     Calculates the population at any given time t when a non-flash irradiation schedule is used,
     generated using irradiation duration a={} seconds
     """.format(a)
     vector_uncollapsed = ary([
         +unpy.exp(
             -ary([l * np.clip(t - a, 0, None) for l in decay_constants])),
         -unpy.exp(-ary([l * np.clip(t, 0, None) for l in decay_constants]))
     ],
                              dtype=object)
     vector = np.sum(vector_uncollapsed, axis=0)
     return premultiplying_factor * (multiplying_factors @ vector)
Ejemplo n.º 12
0
def col_dens_gralmol_err(Qrot,
                         gu,
                         aul,
                         Ej,
                         Tex,
                         tau,
                         nu,
                         dtex,
                         dtau,
                         R=1,
                         nutb=True):
    """
        Returns the column density of a linear , rigid roto molecule (Garden et
        al 1991) From sanhueza 2012, and references therein
        Qrot: Partition function
        gu: Statistical weight of the upper level
        aul: Eistein coefficient for spontaneous emission
        Tex: Excitation temperature
        R=relative intensity of the brightest hyperfine transition, it is 1 if the moel dont have hyperfine. N2H+=5/9. ; C2H=5/12.
        tau=integral of the opacity (\int tau dv)
        nu=frequeancy in Hz
        nutb=True mean use the case of optically thin assumption .
        """

    #EJ is EJ/k
    #Ej=0.00216
    R = 1
    c = 2.98e10  #cm s-1
    pi = 3.141516
    k = 1.38 * 1e-16  #erg K-1
    h = 6.626 * 1e-27  #erg s
    Tbg = 2.73

    Tex = unumpy.uarray(Tex, dtex)
    tau = unumpy.uarray(tau, dtau)

    a1 = (8 * pi * nu**3) / ((c**3) * R)
    a2 = Qrot / (gu * aul)
    a3 = unumpy.exp(Ej / Tex) / (1 - unumpy.exp(-h * nu / (k * Tex)))

    if nutb == True:
        a4 = tau * (1 / (jota2(Tex, nu) - jota2(Tbg, nu))
                    )  #where tau=integral (tb dv)

    else:
        a4 = tau  #where tau=integral (tau dv)

    return a1 * a2 * a3 * a4
Ejemplo n.º 13
0
def get_oddsratio_file(datanum, iteration):
    lls, ells = scale_ll(datanum, iteration)
    lls = unp.uarray(lls, ells)

    # compute odd ratios
    numerator, denominator = np.zeros(3), np.zeros(3)
    median_ratio, sigma_ratio = np.zeros(3), np.zeros(3)
    for i in range(3):
        numerator[i], denominator[i] = i + 1, i
        try:
            log10_oddsratio = unp.log10(unp.exp(lls[i + 1] - lls[i]))
        except ValueError:
            log10_oddsratio = unp.uarray(-np.inf, np.nan)
        median_ratio[i], sigma_ratio[i] = unp.nominal_values(log10_oddsratio), \
                                          unp.std_devs(log10_oddsratio)

    # create odds ratio files
    f = open(
        'Cloutier/TimeSeriesCV/oddsratio_%.4d%s.txt' % (datanum, iteration),
        'w')
    g = '# Number of lnL evaluations, planet model denominator, planet model numerator, mode log10(odds ratio), median log10(odds ratio), minus 2 sigma (not used), minus 1 sigma, plus 1 sigma, plus 2 sigma (not used)'
    for i in range(3):
        g += '\n1.6E+07,%i,%i,%.6f,%.6f,NaN,%.6f,%.6f,NaN' % (
            denominator[i], numerator[i], median_ratio[i], median_ratio[i],
            sigma_ratio[i], sigma_ratio[i])
    g = g.replace('nan', 'NaN')
    f.write(g)
    f.close()
def fit_stretched_exp(x, y, yerr, xlong):
    start = time.time()
    p0 = (-1.5, 100, 0.15, 2)
    p, cov = curve_fit(func_stretched_exp, x, y, p0=p0, sigma = yerr, maxfev = 100000)
    print(p)
    perr = np.sqrt(np.diag(cov))    

    yfit = func_stretched_exp(x, p[0], p[1], p[2], p[3])
    resid = y-yfit
    dof = y.size - len(p)
    chi2 = np.sum((resid/yerr)**2)
    chi2red = chi2/dof
    p_val = 1 - stats.chi2.cdf(chi2,dof)
    yfit = func_stretched_exp(xlong, p[0], p[1], p[2], p[3])
    p = unumpy.uarray(p,perr)
    extrapolate_val = p[0]*unumpy.exp(-(extrapolate_times/p[1])**p[2])+p[3] 

    if p[2]>0:
        extrap_inf = p[3]
    elif p[2]<0:
        extrap_inf = p[0]+p[3]

    end = time.time()
    elapsed = end-start
    return (chi2red, resid, extrapolate_val, extrap_inf, 1, unumpy.nominal_values(p),
            unumpy.std_devs(p), yfit, elapsed, p_val)
Ejemplo n.º 15
0
def NegativeExponential(param, xp):
    if certain:
        y_pred = param[0] * (1 - np.exp(-param[1] * (-param[2] + xp)))
    else:
        y_pred = param[0] * (1 - unp.exp(-param[1] * (-param[2] + xp)))

    return y_pred
Ejemplo n.º 16
0
def correct_attentuation(scan_list):
    """
    Correct the attentuation level between a a series of elements in lists.

    Args:
        scans (:py:attr:`list` of :py:class:`islatu.refl_data.Scan`):
            Reflectometry scans.

    Returns:
        :py:attr:`list` of :py:class:`islatu.refl_data.Scan`: Reflectometry scans with attenuation corrected.
    """
    for i in range(len(scan_list) - 1):
        overlap_start = scan_list[i + 1].q[0].n
        overlap_end = scan_list[i].q[-1].n
        if overlap_start > overlap_end:
            warnings.warn('Using extrapolation to correct attenuation between '
                          'scans {} and {}. Please double check these '
                          'results.'.format(scan_list[i].file_path,
                                            scan_list[i + 1].file_path))
            overlap_start_index = -2
            overlap_end_index = 1
        else:
            overlap_start_index = np.argmin(
                np.abs(scan_list[i].q - overlap_start))
            overlap_end_index = np.argmin(
                np.abs(scan_list[i + 1].q - overlap_end))
        res = linregress(
            unp.nominal_values(scan_list[i].q[overlap_start_index:]),
            np.log(unp.nominal_values(scan_list[i].R[overlap_start_index:])))
        target_r = unp.exp(scan_list[i + 1].q[:overlap_end_index + 1] *
                           res.slope + res.intercept)
        vary_r = scan_list[i + 1].R[:overlap_end_index + 1]
        ratio = target_r.mean() / vary_r.mean()
        scan_list[i + 1].R *= ratio
    return scan_list
Ejemplo n.º 17
0
 def evaluate(energy, index, amplitude, reference, ecut):
     pwl = amplitude * (energy / reference) ** (-index)
     try:
         cutoff = np.exp((reference - energy) / ecut)
     except AttributeError:
         from uncertainties.unumpy import exp
         cutoff = exp((reference - energy) / ecut)
     return pwl * cutoff
Ejemplo n.º 18
0
 def evaluate(energy, index, amplitude, reference, ecut):
     pwl = amplitude * (energy / reference)**(-index)
     try:
         cutoff = np.exp((reference - energy) / ecut)
     except AttributeError:
         from uncertainties.unumpy import exp
         cutoff = exp((reference - energy) / ecut)
     return pwl * cutoff
Ejemplo n.º 19
0
def Weibull(param, xp):
    if certain:
        y_pred = -param[1] * (xp**param[2])
        y_pred = param[0] * (1 - np.exp(y_pred))
    else:
        y_pred = -param[1] * (xp**param[2])
        y_pred = param[0] * (1 - unp.exp(y_pred))
    return y_pred
Ejemplo n.º 20
0
def logistic(x, xp):
    if certain:
        y_pred = np.exp(-x[1] * (-x[2] + xp))
        y_pred = x[0] / (1 + y_pred)
    else:
        y_pred = unp.exp(-x[1] * (-x[2] + xp))
        y_pred = x[0] / (1 + y_pred)
    return y_pred
Ejemplo n.º 21
0
def vonBertalanffy(x, xp):
    if certain:
        y_pred = 1 - np.exp(-x[1] * (-x[2] + xp))
        y_pred = x[0] * (y_pred**3)
    else:
        y_pred = 1 - unp.exp(-x[1] * (-x[2] + xp))
        y_pred = x[0] * (y_pred**3)
    return y_pred
Ejemplo n.º 22
0
def monomolecular(x, xp):

    if certain:
        y_pred = np.exp(-x[1] * xp)
        y_pred = x[0] * (1 - x[2] * y_pred)
    else:
        y_pred = unp.exp(-x[1] * xp)
        y_pred = x[0] * (1 - x[2] * y_pred)
    return y_pred
Ejemplo n.º 23
0
 def evaluate(energy, index, amplitude, reference, lambda_):
     """Evaluate the model (static function)."""
     pwl = amplitude * (energy / reference)**(-index)
     try:
         cutoff = np.exp(-energy * lambda_)
     except AttributeError:
         from uncertainties.unumpy import exp
         cutoff = exp(-energy * lambda_)
     return pwl * cutoff
Ejemplo n.º 24
0
def exp_func(x, c):
    """
    An exponential decay function:
    f(x) = exp(-c*x)
    """
    if isinstance(c, unc.UFloat):
        return unp.exp(-c * x)
    else:
        return np.exp(-c * x)
Ejemplo n.º 25
0
 def calculate_population(t):
     vector = (
         unpy.exp(-ary(decay_constants) * max(0, t))
         # -unpy.exp(-ary(decay_constants)*(c-a))
         # -unpy.exp(-ary(decay_constants)*b)
         # +unpy.exp(-ary(decay_constants)*(b-a))
     )
     return premultiplying_factor * (multiplying_factors @ vector
                                     )  # sum across rows
Ejemplo n.º 26
0
def ChapmanRichards(x, xp):

    if certain:
        y_pred = 1 - np.exp(-x[1] * xp)
        y_pred = x[0] * (y_pred**x[2])
    else:
        y_pred = 1 - unp.exp(-x[1] * xp)
        y_pred = x[0] * (y_pred**x[2])
    return y_pred
Ejemplo n.º 27
0
 def evaluate(energy, index, amplitude, reference, lambda_):
     """Evaluate the model (static function)."""
     pwl = amplitude * (energy / reference) ** (-index)
     try:
         cutoff = np.exp(-energy * lambda_)
     except AttributeError:
         from uncertainties.unumpy import exp
         cutoff = exp(-energy * lambda_)
     return pwl * cutoff
def func_err_prop(func, x, p):
    if func == func_log:
        #        return p[0]*unumpy.log((extrapolate_times + 1 +p[1])) + p[2]
        return p[0] * unumpy.log(extrapolate_times / p[1] + 1) + p[2]
    if func == func_power:
        return p[0] * (extrapolate_times + p[2])**p[1]
    if func == func_stretched_exp:
        return p[0] * unumpy.exp(-(extrapolate_times / p[1])**p[2]) + bo
    else:
        sys.exit("Error")
Ejemplo n.º 29
0
def f_unc(x, k, weight):
    """
    similar to the raw function call, but uses unp instead of np for uncertainties calculations.
    :return:
    """
    term = 1
    # calculate the term k^x / x!. Can't do this directly, x! is too large.
    for n in range(0, int(x)):
        term *= k / (x - n) * unp.exp(-k / int(x))
    return term * weight
Ejemplo n.º 30
0
 def evaluate(energy, amplitude, reference, ecut, index_1, index_2):
     pwl = amplitude * (energy / reference)**(-index_1)
     try:
         cutoff = np.exp((reference / ecut)**(index_2) -
                         (energy / ecut)**(index_2))
     except AttributeError:
         from uncertainties.unumpy import exp
         cutoff = exp((reference / ecut)**(index_2) -
                      (energy / ecut)**(index_2))
     return pwl * cutoff
Ejemplo n.º 31
0
    def disc_arm_number(self, y=1, X=1.5):
        R_d = self.baryonic_scalelength()

        a = unp.exp(2 * y) / X
        d = (y**2 / 2) * ((3 * (I(1, y) * K(0, y)) - 3 * (I(0, y) * K(1, y)) +
                           (I(1, y) * K(2, y)) - (I(2, y) * K(1, y))))
        e = (4 * y) * (I(0, y) * K(0, y) - (I(1, y) * K(1, y)))

        self.m_disc = np.array(a * (d + e))
        return self.m_disc
Ejemplo n.º 32
0
 def hernquist_arm_number(self,y=1,X=1.5):
     M_d = self.M_disc + self.M_hi
     R_d = self.baryonic_scalelength()
     M_h = self.M_halo_hernquist()
     a_h = self.R_halo()[0]
     
     a = unp.exp(2*y)/X
     c = (M_h/M_d) * ((2*y + (3*a_h/R_d)) / (2*y + (a_h/R_d))**3)
     self.m_hernquist = a * c
     return self.m_hernquist
def func_err_prop(func, x,p):
    if func == func_log:
#        return p[0]*unumpy.log((extrapolate_times + 1 +p[1])) + p[2]
        return p[0]*unumpy.log(extrapolate_times/p[1] + 1 ) + p[2] 
    if func == func_power:
        return p[0]*(extrapolate_times+p[2])**p[1]
    if func == func_stretched_exp:
        return p[0]*unumpy.exp(-(extrapolate_times/p[1])**p[2]) + bo
    else:
        sys.exit("Error")
Ejemplo n.º 34
0
 def bulge_arm_number(self,y=1,X=1.5):
     M_b = self.M_bulge
     a_b = self.R_bulge
     M_d = self.M_disc + self.M_hi
     R_d = self.baryonic_scalelength()
     
     a = unp.exp(2*y)/X
     b = (M_b/M_d) * ((2*y + (3*a_b/R_d)) / (2*y + (a_b/R_d))**3)
     self.m_bulge = a * b
     return self.m_bulge
def func_err_prop(func, x,p):
    if func == func_log:
        return p[0]*unumpy.log(extrapolate_times + 1 + p[1]) + p[2]
    if func == func_2log:
        return p[0]*unumpy.log(extrapolate_times + 1 + p[1]) +\
            p[2]*unumpy.log(extrapolate_times + 1 +p[1]) + p[3]
    if func == func_stretched_exp:
        return p[0]*unumpy.exp(-(extrapolate_times/p[1])**p[2])+p[3] 
    if func == func_stretched_exp_assume:
        return p[0]*unumpy.exp(-(extrapolate_times/p[1])**u)+p[2] 
    if func == func_double_log:
        return p[0]*unumpy.log(1+unumpy.log(x+1+p[1]))+p[2] 
    if func == func_2stretched_exp:
        return p[0]*unumpy.exp(-(x/p[1])**p[2])+p[3]*unumpy.exp(-(x/p[4])**p[5]) + p[6] 
#    if func == func_vortex:
#        return p[0]/((1 + p[1]*p[2]*unumpy.log(x/p[3]+p[4]+1))**(1/p[2])) 
    if func == func_vortex:
        return (p[0] + p[1]*unumpy.log(x+p[2]+1))**(-1/p[3])
    else:
        sys.exit("Error")
Ejemplo n.º 36
0
 def evaluate(energy, amplitude, reference, ecut, index_1, index_2):
     """Evaluate the model (static function)."""
     pwl = amplitude * (energy / reference) ** (-index_1)
     try:
         cutoff = np.exp((reference / ecut) ** (index_2)
                         - (energy / ecut) ** (index_2))
     except AttributeError:
         from uncertainties.unumpy import exp
         cutoff = exp((reference / ecut) ** (index_2)
                      - (energy / ecut) ** (index_2))
     return pwl * cutoff
Ejemplo n.º 37
0
def reaction2Keq(reaction_list):
    '''
        Calculates the equilibrium constants of a reaction, using dG0.
        
        Arguments:
            List of cobra model reaction objects
        Returns:
            Array of K-equilibrium values
    '''
    dG0_prime = reaction2dG0(reaction_list)
    Keq = unumpy.exp( -dG0_prime / (R*default_T) )

    return Keq
Ejemplo n.º 38
0
Archivo: part_a.py Proyecto: bixel/FP14
V_grenz_1 = V_1[0] / np.sqrt(2)
V_grenz_2 = V_2[0] / np.sqrt(2)
V_grenz_3 = V_3[0] / np.sqrt(2)
V_grenz_4 = V_4[0] / np.sqrt(2)

V_grenz_1_log = unp.log(V_grenz_1) + 0
V_grenz_2_log = unp.log(V_grenz_2) + 0
V_grenz_3_log = unp.log(V_grenz_3) + 0
V_grenz_4_log = unp.log(V_grenz_4) + 0

nu_grenz_1_log = ( V_grenz_1_log - popt_1[1] ) / popt_1[0]
nu_grenz_2_log = ( V_grenz_2_log - popt_2[1] ) / popt_2[0]
nu_grenz_3_log = ( V_grenz_3_log - popt_3[1] ) / popt_3[0]
nu_grenz_4_log = ( V_grenz_4_log - popt_4[1] ) / popt_4[0]

nu_grenz_1 = unp.exp(nu_grenz_1_log) + 0
nu_grenz_2 = unp.exp(nu_grenz_2_log) + 0 
nu_grenz_3 = unp.exp(nu_grenz_3_log) + 0
nu_grenz_4 = unp.exp(nu_grenz_4_log) + 0

print("nu1 = " + str(nu_grenz_1))
print("nu2 = " + str(nu_grenz_2))
print("nu3 = " + str(nu_grenz_3))
print("nu4 = " + str(nu_grenz_4))

with open('build/a_nu_1.tex', 'w') as f:
	f.write(r'\SI{{{:L}}}{{\hertz}}'.format(nu_grenz_1))
with open('build/a_nu_2.tex', 'w') as f:
	f.write(r'\SI{{{:L}}}{{\kilo\hertz}}'.format(nu_grenz_2 / 1e3 ))
with open('build/a_nu_3.tex', 'w') as f:
	f.write(r'\SI{{{:L}}}{{\kilo\hertz}}'.format(nu_grenz_3 / 1e3 ))
Ejemplo n.º 39
0
def main():
    data = pd.read_csv(
        'build/data_corrected.csv',
        skiprows=(1, 2, 3, 4, 5),
    )

    data = data.query('T > 255')

    func = lambda x, a, b: linear(x, a, b, x0=data['t'].mean())
    params, cov = curve_fit(func, data['t'], data['T'])
    heating_rate, _ = unc.correlated_values(params, cov)
    heating_rate *= u.kelvin / u.minute
    print('Rate = {}'.format(heating_rate))

    # fit activaiton energy
    T_max = data['T'][data['I_corrected'].argmax()] * u.kelvin
    print('T_max = {} '.format(T_max))

    data = data.drop_duplicates(subset=['T'])
    f = partial(diese_funktion_aus_der_anleitung, data=data, T_star=310)
    data['activation'] = data.apply(f, axis=1)

    data = data.replace([np.inf, -np.inf], np.nan).dropna()
    data['T_inv'] = 1 / data['T']
    # data = data.query('(T > 275)')

    fit_data = data.query('(0.00343 < T_inv < 0.00371)')
    ignored_data = data[~data.index.isin(fit_data.index)]
    func = partial(linear, x0=fit_data['T_inv'].mean())
    params, cov = curve_fit(
        func, fit_data['T_inv'], fit_data['activation'],
    )
    a, b = unc.correlated_values(params, cov)
    print(a, b)
    W = a * u.kelvin * u.boltzmann_constant
    print('Activation Energy {} / {}'.format(
        W.to(u.joule), W.to(u.eV))
    )

    tau_T_max = (
        ((u.boltzmann_constant * T_max**2) / (W * heating_rate)) *
        unp.exp((- W / (const.k * (u.joule / u.kelvin) * T_max)).to_base_units().magnitude)
    )

    print(tau_T_max.to(u.second))

    tau_0 = (
        tau_T_max /
        unp.exp(W.to(u.joule).magnitude / (const.k * T_max.magnitude))
    )
    print('Tau 0 : {}'.format(tau_0.to(u.second)))

    with open('build/activation_work_2.tex', 'w') as f:
        f.write('W = ')
        f.write(SI(W.to('J').magnitude, r'\joule'))
        f.write(' = ')
        f.write(SI(W.to('eV').magnitude, r'\electronvolt'))

    with open('build/tau.tex', 'w') as f:
        f.write('\\tau(T_{max}) = ')
        f.write(SI(tau_T_max.to('s').magnitude, r'\second'))

    with open('build/tau_0.tex', 'w') as f:
        f.write('\\tau_0 = ')
        f.write(SI(tau_0.to('s').magnitude, r'\second'))

    plt.figure()
    plt.plot(fit_data['T_inv'], fit_data['activation'], '+', ms=4)
    plt.plot(
        ignored_data['T_inv'], ignored_data['activation'],
        '+', ms=4, color='#626262',
    )
    plt.plot(
        fit_data['T_inv'], func(fit_data['T_inv'], *params),
        color='darkgray',
    )
    plt.xlabel(r'$T^{-1} \mathrel{/} \si{\per\kelvin}$')
    plt.ylabel(r"$\ln{\frac{\int_T^{T'}i(T)\dif{T'}}{i(T)τ_0}}$")
    plt.tight_layout(pad=0)
    plt.savefig('build/method2.pdf')


    plt.figure()

    T = np.linspace(data['T'].min(), data['T'].max(), 1000)
    plt.plot(T, tau_0.to('s').magnitude.n * np.exp((W.to('J').magnitude.n / const.k / T)))
    plt.xlabel(r'$T \mathbin{/} \si{\kelvin}$')
    plt.ylabel(r'$\tau(T) \mathbin{/} \si{\second}$')
    plt.tight_layout(pad=1)
    plt.savefig('build/tau.pdf')
x=np.linspace(20+273.15,65+273.15)
print('1/T',1/Temp)
print('ln(eha/pas)',unp.log(Kgr*(rhog-1)*T))

print('Geschwindigkeit klein=',Vk,'Zeitmittelwert=',Tk)
plt.figure(1)
plt.errorbar(noms(1/Temp),noms(unp.log(Kgr*(rhog-1)*T)),xerr=stds(1/Temp),yerr=stds(unp.log(Kgr*(rhog-1)*T)),fmt='rx')
plt.plot(noms(1/Temp),noms(unp.log(Kgr*(rhog-1)*T)),'xk',label=r'$Messwerte$')
plt.plot(1/x,m*(1/x)+b,'-b',label=r'$Ausgleichsfunktion$')
plt.legend(loc='best')
plt.grid(True)
plt.xlabel(r'$\frac{1}{T}/{K^{-1}}$')
plt.ylabel(r'$\ln(\eta / Pa \cdot s)$')
plt.savefig('plot.pdf')

def Fehlera(x,sa):
    b=sa*(sum(x**2)/len(x))**(1/2)
    return b
sa1=Fehlera(noms(1/Temp),stdsm)
lnA=unp.uarray(b,sa1)
Reg=(rhow*Dg)/(ngr*T)
Rek=(rhow*Dg)/(n*Tk)
A=unp.exp(lnA)
print('Reg',Reg)
print('Rek',Rek)
#print(Temp)
#np.savetxt('v.txt',np.column_stack((Temp,T,ngr)),fmt='%r',delimiter=' & ')
#print('B',m,stdsm)
#print('lnA',lnA)
#print('A',A)
Ejemplo n.º 41
0
print("m3:")
print(Start)
print(End)

m3=fit(wurzel,log(deltaT[Start:End]),log(r2[Start:End]))	


plt.plot(deltaT[Start-10:End+10],np.exp(m3.n)*(deltaT[Start-10:End+10])**.5,"g--", linewidth=0.6)

plt.plot(deltaT[m1Start:m1End],np.exp(m1.n)*(deltaT[m1Start:m1End])**.5,"g-")
plt.plot(deltaT[ViertelStart:ViertelEnd],np.exp(m2.n)*(deltaT[ViertelStart:ViertelEnd])**.25,"b-", label=r"$\Delta t^{1/4}$")
plt.plot(deltaT[Start:End],np.exp(m3.n)*(deltaT[Start:End])**.5,"g-", label=r"$\Delta t^{1/2}$")
plt.plot(deltaT[GeradeStart:GeradeEnd],np.exp(m4.n)*deltaT[GeradeStart:GeradeEnd],"r-", label=r"$\Delta t^{1}$")

TauD=(unp.exp(m3-m4))**2

print(TauD)

plt.plot([TauD.n,TauD.n],[0.1,1e5],"m--",label=r"$\tau_D="+"{:.1f}".format(TauD.n) +"\pm"+"{:.1f}".format(TauD.s)+"$")

TauR=(unp.exp(m2-m3))**4
plt.plot([TauR.n,TauR.n],[0.1,1e5],"y--",label=r"$\tau_R="+"{:.1f}".format(TauR.n) +"\pm"+"{:.1f}".format(TauR.s)+"$")


#plt.plot(t,.8*t**.5, label=r"$\Delta T^{ 1/2}$")
#plt.plot(t,0.01*(t)**1, label=r"$\Delta T^{ 1}$")
#plt.plot(t,3.5*(t)**.25, label=r"$\Delta T^{1/4}$")


Ejemplo n.º 42
0
Falldauer_unc[0] = t_gr_a
# print (Falldauer_unc)


rho_Wasser = [998.8, 996.5, 995.3, 994.0, 992.6, 991.0, 989.3, 987.5, 985.7, 983.2]
rho_K = rho_gr * np.ones(10)
eta_gr_b = K_gr * (rho_K - rho_Wasser) * Falldauer_unc
print('eta')
print(eta_gr_b)
eta_gr_b_log = unp.log(eta_gr_b)

params = ucurve_fit(reg.reg_linear, 1/T_2, noms(eta_gr_b_log))
m, b = params
write('build/Steigung_b.tex', make_SI(m, r'\kelvin', figures=1))  # Dies ist tatsächlich B !
write('build/Offset_b.tex', make_SI(b, r'', figures=1))
A = unp.exp(b)
# print(A)
write('build/ParameterA_b.tex', make_SI(A*1e3, r'\kilogram\meter\per\second', 'e-3', figures=1))

# write('build/Tabelle_daten1.tex', make_table([T_2-273, noms(Falldauer_unc), stds(Falldauer_unc), rho_Wasser, 1/T_2*1e3, noms(eta_gr_b)*1e3, stds(eta_gr_b)*1e3, noms(eta_gr_b_log)*(-1), stds(eta_gr_b_log)],[0, 2, 2, 1, 3, 3, 2 , 2, 2]))
write('build/Tabelle_daten1.tex', make_table([T_2-273, Falldauer_unc, rho_Wasser, 1/T_2*1e3, eta_gr_b*1e3, (-1)*eta_gr_b_log],[0, 1, 1, 3, 1, 1]))
# FULLTABLE
write('build/Tabelle_daten1_texformat.tex', make_full_table(
    'Messdaten Falldauer in Abhängigkeit der Temperatur.',
    'table:daten1',
    'build/Tabelle_daten1.tex',
    [1,4,5],              # Hier aufpassen: diese Zahlen bezeichnen diejenigen resultierenden Spaltennummern,
#                              # die Multicolumns sein sollen
    [r'$T \:/\: \si{\celsius}$',
    r'$t \:/\: \si{\second}$',
    r'$T \:/\: \si{\kilo\gram\per\cubic\meter}$',
Ejemplo n.º 43
0
tau = -1. / popt[0] * const.minute
t_H = np.log(2) * tau
print papstats.pformat(tau / const.minute, label=u'Dämpfungskonstante tau', unit='min')
print papstats.pformat(t_H / const.minute, label=u'Halbwertszeit t_H', unit='min')


#####
print u"# 3.3: Präzession"
#####

data = np.loadtxt('2.3.txt', skiprows=1)
w_1 = unp.uarray(data[:,0], 10) * 2 * const.pi / const.minute
T_P = unp.uarray(np.transpose(data[:,1::2]), 2)
w_2 = unp.uarray(np.transpose(data[:,2::2]), 10) * 2 * const.pi / const.minute
w_2_erw = w_1 * unp.exp(-T_P / tau)
diff = w_2 - w_2_erw
w_2 = np.mean([w_2, w_2_erw], axis=0)
w_F = unp.uarray(unp.nominal_values(w_1 + w_2) / 2., np.sqrt((unp.nominal_values(w_1 - w_2) / 2)**2 + unp.std_devs(w_1 + w_2)**2))

def fit_linear_origin(x, m):
	return m * x

n_m = np.array([1, 1, 2, 2])
d = unp.uarray([15, 20, 15, 20], 0.1) * const.centi
mlabels = [str(n_m[i]) + 'm, ' + str(int(d[i].nominal_value/const.centi)) + 'cm' for i in range(len(n_m))]
d = d + 1.1 / 2. * const.centi * n_m
s = []
plt.clf()
plt.suptitle(u'Diagramm 3.2: Präzessionszeit über der Drehfrequenz verschiedener Drehmomente')
ax = None
Ejemplo n.º 44
0
plt.xlim(C2K(26),C2K(65))
plt.xlabel(r'$T / \si{\kelvin}$')
plt.ylabel(r'$\eta / \si{\milli\pascal\second}$')
plt.legend(loc='best')
plt.tight_layout(pad=0)
plt.savefig('build/viskosität.pdf')
plt.clf()

T_r = 1/T
T_r_ = 1/T_
lneta = unp.log(eta)
slope, intercept, r_value, p_value, std_err = linregress(T_r, unp.nominal_values(lneta))
print(slope, intercept, r_value**2)

B = unc.ufloat(slope, std_err)
A = unp.exp(unc.ufloat(intercept, std_err * T_r.mean()))
print("A = {}, B = {}".format(A, B))

valuesp = (slope + std_err) * T_r_ + intercept + std_err * T_r.mean()
valuesm = (slope - std_err) * T_r_ + intercept - std_err * T_r.mean()

plt.errorbar(T_r, unp.nominal_values(lneta), yerr=unp.std_devs(lneta), fmt='rx', label='Messdaten', markersize=3)
plt.plot(T_r_, slope*T_r_ + intercept, 'b-', label='Lineare Regression')
plt.fill_between(T_r_, valuesm, valuesp, facecolor='blue', alpha=0.125, edgecolor='none', label=r'$1\sigma$-Umgebung')
plt.xlim(0.00297, 0.00333)
plt.xlabel(r'$T^{-1} / \si{\per\kelvin}$')
plt.ylabel(r'$\ln(\eta / \si{\milli\pascal\second})$')
plt.legend(loc='best')
plt.tight_layout(pad=0)
plt.savefig('build/viskosität_linear.pdf')
plt.clf()
Ejemplo n.º 45
0
def Eps(ntu,mu):
    """
    cele doua fluide sunt neamestecate
    """
    return 1.-unp.exp(1/mu*unp.pow(ntu,0.22)*(unp.exp(-mu*unp.pow(ntu,0.78))-1))
Ejemplo n.º 46
0
print(N_ind)
N_ind=unp.log(N_ind)

def f(t, a, b):
	return a*t+b

params, covariance = curve_fit(f, np.arange(250,(1+len(N_ind))*250,250), nom(N_ind),sigma=std(N_ind))
errors = np.sqrt(np.diag(covariance))
print("Parameter:")
a=ufloat(params[0],errors[0])
print("a={}".format(a))
b=ufloat(params[1],errors[1])
print("b={}".format(b))
print("")
print('T ={}'.format(np.log(2)/-a))
print('N0 ={}'.format(unp.exp(b)/(1-unp.exp(a*250))))
print("")
X = np.linspace(0, 4000)
plt.plot(X, f(X, *params), 'b-', label='Ausgleichsgerade')

plt.errorbar(np.arange(250,(1+len(N_ind))*250,250),nom(N_ind),yerr=std(N_ind),fmt="x",label="Indium")
plt.xlabel(r'Zeit $t$ in s')
plt.xticks(np.arange(250,(1+len(N_ind))*250,250),np.arange(250,(1+len(N_ind))*250,250))
plt.grid()
plt.ylabel(r'Logarithmierte Zerfallrate $\Delta N$ im Zeitintervall $\Delta t$')
plt.legend(loc="best")
plt.tight_layout()
#plt.yscale("log")
plt.savefig("../Bilder/indium.pdf")
plt.close()
#
Ejemplo n.º 47
0
def u_decay_with_background(t, N0, lamb, back):
    return N0 * unp.exp(-lamb*t) + back
Ejemplo n.º 48
0
plt.ylim(0.5, 30)
plt.savefig('Spannung1.png')
plt.show()


U_regression = np.log(U_0-U)
print(U_regression)
def f(t, m, b):
    return m*t+b

parameters, popt = curve_fit(f, t, U_regression)
m = ufloat(parameters[0], np.sqrt(popt[0,0]))
b = ufloat(parameters[1], np.sqrt(popt[1,1]))

Zeitkonstante = (-1/m)
Ausgangsspannung = unp.exp(b)

#Fehlerbalken in y-Richtung ausrechnen:
U_err = unp.log(U_0 - U_gesamt)

x=np.linspace(0,0.0035)
#plt.plot(t, U_regression, 'rx')
plt.errorbar(t, U_regression, xerr=t_err, yerr = unp.std_devs(U_err), fmt='r.', label='Datenpunkte mit Messunsicherheit')
plt.plot(x, f(x, *parameters), 'b-', label='Lineare Ausgleichsgerade')
plt.ylabel('$\log(U_0-U)$')
plt.xlabel('Zeit / s')
#plt.legend(loc='best')
plt.savefig('Spannung2.png')
plt.show()

Ejemplo n.º 49
0
errM = unp.std_devs(M)
plt.errorbar(unp.nominal_values(tau), np.log(unp.nominal_values(M)) + errM, fmt='bx', label="T2-Messung")
plt.plot(x_plot, g(x_plot, *params), 'r-', label='Linearer Fit')
plt.legend(loc="best", numpoints=1)
plt.xlim(0,1010)
plt.ylim(0, 0.8)
plt.xlabel(r'Zeitabstand $\tau$ [$\mu$s]')
plt.ylabel(r'ln(Magnetisierung M$_y$/V)')
plt.savefig('plotT2.png')

errors=np.sqrt(np.diag(covariance))

print('ln(M0) =', params[0], '+/-', errors[0])
print('-(1/T2) =', params[1], '+/-', errors[1])
m0=ufloat(params[0], errors[0])
M0=exp(m0)
print('M0 =', unp.nominal_values(M0) , '+/-', unp.std_devs(M0))
#t1/2-Messung
t= ufloat(params[1],errors[1])
T2=-(1/t)
print('T2=',T2)

t12= ufloat(282,4)
gG=8.8/(4.4*t12)
print(gG)

#Diffusionsskonstante

data2 = np.genfromtxt('DMessung.txt', unpack='True')

tau=unp.uarray(data2[0,:],0)