Example #1
0
def detcoefs(f,nu,re,fh,fp,pr=None):
    """
    functia derermina coeficientii fit-ului functiei
    
    Nu=a*Re^b*Fh^c sau a functiei
    cf=a*Re^b*Fh^c
    
    Parametrii
    ----------
    f  : functie cu proptotipul :
        f((x,y),*coefs)
    nu : numpy.array of ufloats
        valorile nusselt pentru toate masuratorile
    re : numpy.array of ufloats
        valorile reynolds pentru toate masuratorile
    fh : numpy.array of floats
        valorile raportului h/Dh pentru toate masuratorile
    fp : numpy.array of floats
        valorile raportului p/Dh pentru toate masuratorile
        
    Intoarce
    --------
    coefs : array of ufloat 
        coeficientii functiei fitate cu erorile lor
    rchi2 : float 
        chi redus pentru analiza corectitudinii fit-ului
    dof : integer
        gradele de libertate

    """
    nu_n=uns.nominal_values(nu)
    nu_s=uns.std_devs(nu)
    w_nu=nu_s/nu_n
    re_n=uns.nominal_values(re)
    pr_n=uns.nominal_values(pr)
    if pr!=None:
        popt,pcov=curve_fit(f,(re_n,pr_n,fh,fp),nu_n,sigma=w_nu,
                        maxfev=1500)
        chi2=sum(((f((re_n,pr_n,fh,fp),*popt)-nu_n)/nu_s)**2)
    else:
        popt,pcov=curve_fit(f,(re_n,fh,fp),nu_n,sigma=w_nu,
                            maxfev=1500)
        chi2=sum(((f((re_n,fh,fp),*popt)-nu_n)/nu_s)**2)
    
    dof=len(nu_n)-len(popt)
    rchi2=chi2/dof
    
    coefs=[]
    for i in range(len(popt)):
        coefs.append(un.ufloat(popt[i],np.sqrt(pcov[i,i])))
    if pr!=None:
        func=lambda x,y,z,k:f((x,y,z,k),*popt)
    else:
        func=lambda x,y,z:f((x,y,z),*popt)
        
    return {'coefs':np.array(coefs),
            'rchi2':rchi2,
            'DOF':dof,
            'f':func
            }
Example #2
0
def stability(ux, ug_crit, omega, label=None):
    formatter = FuncFormatter(lambda x, pos: "%g\u00B2" % np.sqrt(x))

    ux, ug_crit = _double_sort(ux, ug_crit)

    x = np.power(ux, 2)
    y = ug_crit

    line = plt.errorbar(
        unp.nominal_values(x), unp.nominal_values(y), xerr=unp.std_devs(x), yerr=unp.std_devs(y), fmt="o", label=label
    )
    color = line.lines[0].get_color()

    plt.gca().xaxis.set_major_formatter(formatter)
    plt.gca().xaxis.set_ticks(np.power(np.arange(4, 10) * 100, 2))
    plt.xlabel(r"$U_x^2$")
    plt.ylabel(r"$U_g$")

    linear = lambda x, a: a * x
    a = _uarray_fit(linear, x, y, x0=(0.0001,), epsfcn=1e-7)[0]

    x = np.linspace(0, unp.nominal_values(x).max() * 1.1, 20)
    y = linear(x, a.n)

    plt.plot(x, linear(x, a.n), color=color)
    plt.fill_between(x, linear(x, a.n + a.s), linear(x, a.n - a.s), color=color, alpha=0.1)

    a = _normalize_ufloat(a)
    print("Slope:", a * 1000, "1/kV")

    qm = -2 / 3 * a * r0 ** 2 * omega ** 2 / K

    print(label, "q/m = {:.4P} uC/kg".format(qm * 1e6))
Example #3
0
def other(g=True):
    freq = unumpy.uarray([100, 500, 1000, 5000, 10000, 50000], np.array([100, 500, 1000, 5000, 10000, 50000]) * 0.01)
    vin = ufloat(1.01, 0.01)
    vout = unumpy.uarray([0.640, 3.02, 5.27, 9.2, 9.6, 6.4], [0.01, 0.01, 0.01, 0.1, 0.1, 0.1])
    fase = unumpy.uarray([92, 108, 123, 166, 178, -125], [1, 2, 1, 1, 1, 1])

    Gs = vout / vin
    dB = 10 * unumpy.log10(Gs)

    if not g:
        return None

    f = plt.figure(figsize=(8, 8))
    f.suptitle("Differenziatore", fontsize=15, y=0.98)
    
    ax = f.add_subplot(211)

    ax.errorbar(x=unumpy.nominal_values(freq),
        y=unumpy.nominal_values(Gs),
        c='black', fmt='o-')

    ax.set_xlabel('Frequenza', fontsize=14)
    ax.set_ylabel('Guadagno G', fontsize=14)

    ax.set_xscale('log')
    #ax.set_ylim((-13, 1))
    #ax.set_yticklabels(('', 2, 4, 6, 8, 10, 12))
    ax.set_xticklabels(('', '100 Hz', u"1 kHz", u"10 kHz", u"100 kHz", u"1 MHz"))
    
    ax.minorticks_on()
    ax.grid(b=True, which='major', color='0.7', linestyle='-', zorder=-5)
    ax.grid(b=True, which='minor', color='0.9', linestyle='-', zorder=-9)
    ax.set_axisbelow(True)
    
    ax2 = f.add_subplot(212)

    ax2.errorbar(x=unumpy.nominal_values(freq),
        y=unumpy.nominal_values(fase),
        c='black', fmt='o-')

    ax2.set_ylabel('Sfasamento [Gradi]', fontsize=14)

    ax2.set_xscale('log')
    #ax2.set_yticklabels(('', 25, 50, 75, 100))
    ax2.set_xticklabels(('', '100 Hz', u"1 kHz", u"10 kHz", u"100 kHz"))
    
    ax2.minorticks_on()
    ax2.grid(b=True, which='major', color='0.7', linestyle='-', zorder=-5)
    ax2.grid(b=True, which='minor', color='0.9', linestyle='-', zorder=-9)
    ax2.set_axisbelow(True)

    ax3 = ax2.twiny()
    ax3.set_xticks((0, 0.333, 0.666, 1))
    ax3.set_xticklabels(('', "1 kHz", u"10 kHz", u"100 kHz"))

    f.subplots_adjust(top=0.93, hspace=0.25, bottom=0.07, right=0.95)

    plt.savefig("../latex/diff.pdf")

    plt.show()
Example #4
0
File: plot.py Project: Fujnky/ap
def auswerten(name, d, n, t, z, V_mol, eps, raw):
    d *= 1e-3
    N = unp.uarray(n/t, np.sqrt(n)/t) - N_u

    if name=="Cu":
        tools.table((raw[0], raw[1], N), ("D/mm", "n", "(N-N_U)/\per\second"), "build/{}.tex".format(name), "Messdaten von {}.".format(name), "tab:daten{}".format(name), split=2, footer=r"$\Delta t = \SI{60}{s}$")#"(N-N_U)/\per\second"
    else:
        tools.table((raw[0], raw[1], raw[2], N), ("D/mm", "n", "\Delta t/s", "(N-N_U)/\per\second"), "build/{}.tex".format(name), "Messdaten von {}.".format(name), "tab:daten{}".format(name), split=2)
    mu = z * const.N_A / V_mol * 2 * np.pi * (const.e**2 / (4 * np.pi * const.epsilon_0 * const.m_e * const.c**2))**2 * ((1+eps)/eps**2 * ((2 * (1+eps))/(1+2*eps) - 1/eps * np.log(1+2*eps)) + 1/(2*eps) * np.log(1+ 2*eps) - (1+ 3*eps)/(1+2*eps)**2)

    params, pcov = curve_fit(fit, d, unp.nominal_values(N), sigma=unp.std_devs(N))
    params_ = unc.correlated_values(params, pcov)
    print("{}: N(0) = {}, µ = {}, µ_com = {}".format(name, params_[0], -params_[1], mu))

    sd = np.linspace(0, .07, 1000)

    valuesp = (fit(sd, *(unp.nominal_values(params_) + 10*unp.std_devs(params_)))).astype(float)
    valuesm = (fit(sd, *(unp.nominal_values(params_) - 10*unp.std_devs(params_)))).astype(float)

    #plt.xlim(0,7)
    plt.xlabel(r"$D/\si{mm}$")
    plt.ylabel(r"$(N-N_U)/\si{\per\second}$")
    plt.plot(1e3*sd, fit(sd, *params), 'b-', label="Fit")
    plt.fill_between(1e3*sd, valuesm, valuesp, facecolor='blue', alpha=0.125, edgecolor='none', label=r'$1\sigma$-Umgebung ($\times 10$)')
    plt.errorbar(1e3*d, unp.nominal_values(N), yerr=unp.std_devs(N), fmt='rx', label="Messdaten")
    plt.legend(loc='best')
    plt.yscale('linear')
    plt.tight_layout(pad=0)
    plt.savefig("build/{}.pdf".format(name))
    plt.yscale('log')
    plt.savefig("build/{}_log.pdf".format(name))
    plt.clf()
Example #5
0
def test_x2_in_x1_2():
    """
    x2 has a couple of bins, each of which span more than one original bin
    """
    # old size
    m = 10

    # bin edges
    x_old = np.linspace(0., 1., m+1)
    x_new = np.array([0.25, 0.55, 0.75])

    # some arbitrary distribution
    y_old = 1. + np.sin(x_old[:-1]*np.pi) / np.ediff1d(x_old)

    y_old = unp.uarray(y_old, 0.1*y_old*uniform((m,)))

    # rebin
    y_new = rebin.rebin(x_old, y_old, x_new, interp_kind='piecewise_constant')

    # compute answer here to check rebin
    y_new_here = unp.uarray(np.zeros(2), np.zeros(2))
    y_new_here[0] = 0.5 * y_old[2] + y_old[3] + y_old[4] + 0.5 * y_old[5]
    y_new_here[1] = 0.5 * y_old[5] + y_old[6] + 0.5 * y_old[7]

    assert_allclose(unp.nominal_values(y_new),
                   unp.nominal_values(y_new_here))

    # mean or nominal value comparison
    assert_allclose(unp.std_devs(y_new),
                       unp.std_devs(y_new_here))
Example #6
0
def import_data_from_objLog(FilesList, Objects_Include, pv):
    
    
    List_Abundances     = ['OI_HI', 'NI_HI', 'SI_HI', 'SI_HI_ArCorr', 'Y_Mass_O', 'Y_Mass_S', 'Y_Inference_O', 'Y_Inference_S']
    #List_Abundances    = ['OI_HI', 'NI_HI', 'SI_HI', 'SI_HI_ArCorr', 'Y_Mass_O', 'Y_Mass_S', 'Y_inf_O', 'Y_inf_S']

    #Dictionary of dictionaries to store object abundances
    Abund_dict = OrderedDict()
    for abund in List_Abundances:
        Abund_dict[abund] = OrderedDict()

    #Loop through files
    for i in range(len(FilesList)):
        #Analyze file address
        CodeName, FileName, FileFolder  = pv.Analyze_Address(FilesList[i])  
      
        if CodeName in Objects_Include:
            #Loop through abundances in the log
            for abund in List_Abundances:
                Abund_Mag = pv.GetParameter_ObjLog(CodeName, FileFolder, Parameter = abund, Assumption = 'float')
                #If the abundance was measure store it 
                if Abund_Mag != None:
                    Abund_dict[abund][CodeName] = Abund_Mag
        
    #Dictionary to store objects with abundances pairs for regressions. 
    #As an initial value for the keys we define the abundances we want to use for the regression
    Abundances_Pairs_dict = OrderedDict()
    Abundances_Pairs_dict['O_Regression']                   = ('OI_HI','Y_Mass_O')      
    Abundances_Pairs_dict['N_Regression']                   = ('NI_HI','Y_Mass_O')      
    Abundances_Pairs_dict['S_Regression']                   = ('SI_HI','Y_Mass_S')      
    Abundances_Pairs_dict['S_ArCorr_Regression']            = ('SI_HI_ArCorr','Y_Mass_S')
    Abundances_Pairs_dict['O_Regression_Inference']         = ('OI_HI','Y_Inference_O')      
    Abundances_Pairs_dict['N_Regression_Inference']         = ('NI_HI','Y_Inference_O')      
    Abundances_Pairs_dict['S_Regression_Inference']         = ('SI_HI','Y_Inference_S')      
    Abundances_Pairs_dict['S_ArCorr_Regression_Inference']  = ('SI_HI_ArCorr','Y_Inference_S') 
        
    #Loop through the regression lists and get objects with both abundances observed
    for j in range(len(Abundances_Pairs_dict)):
        
        #Get the elements keys for the regression
        Vector, Elem_X, Elem_Y = Abundances_Pairs_dict.keys()[j], Abundances_Pairs_dict.values()[j][0], Abundances_Pairs_dict.values()[j][1]
        
        #Determine objects with both abundances observed
        Obj_vector  = intersect1d(Abund_dict[Elem_X].keys(), Abund_dict[Elem_Y].keys(), assume_unique = True)
        X_vector    = zeros(len(Obj_vector))
        Y_vector    = zeros(len(Obj_vector))
        X_vector_E  = zeros(len(Obj_vector))
        Y_vector_E  = zeros(len(Obj_vector))
                        
        #Generate abundances vectors
        for z in range(len(Obj_vector)):  
            X_vector[z] = nominal_values(Abund_dict[Elem_X][Obj_vector[z]])
            X_vector_E[z] = std_devs(Abund_dict[Elem_X][Obj_vector[z]])            
            Y_vector[z] = nominal_values(Abund_dict[Elem_Y][Obj_vector[z]])
            Y_vector_E[z] = std_devs(Abund_dict[Elem_Y][Obj_vector[z]])
    
        Abundances_Pairs_dict[Vector] = (list(Obj_vector), uarray(X_vector, X_vector_E), uarray(Y_vector, Y_vector_E))
        
    return Abundances_Pairs_dict
Example #7
0
def fitting_powerlaw_LP(lx_min, lx_max,
        const=[ufloat(0.083,0.058), ufloat(2.11,0.21)]):
    x = lx_range(lx_min, lx_max)
    y = (10**24.5) * ((x/1e45)**const[1]) * (10**const[0])
    y_nom = unumpy.nominal_values(y)
    y_min = unumpy.nominal_values(y) - unumpy.std_devs(y)
    y_max = unumpy.nominal_values(y) + unumpy.std_devs(y)
    return y_nom, y_min, y_max
def test_nom_ueig():
    sA = array([[1, 2], [3, 4]])
    A = array([[0.1, 0.2], [0.1, 0.3]])
    w, v = eig(A)
    uA = uarray((A, sA))
    uw, uv = ueig(A)
    assert nominal_values(uw) == w
    assert nominal_values(uv) == v
Example #9
0
def fitting_powerlaw_YP(sz_min, sz_max,
        const=[ufloat(-0.133,0.069), ufloat(2.03,0.30)]):
    x = lx_range(sz_min, sz_max)
    y = (10**24.5) * ((x/1e-4)**const[1]) * (10**const[0])
    y_nom = unumpy.nominal_values(y)
    y_min = unumpy.nominal_values(y) - unumpy.std_devs(y)
    y_max = unumpy.nominal_values(y) + unumpy.std_devs(y)
    return y_nom, y_min, y_max
Example #10
0
File: n.py Project: knly/PAP2
def analyze_spektrallinien(fileprefix, figindex, crstl, sl, d=None, y=None):

    data = np.append(np.loadtxt(fileprefix+'.b.1.txt', skiprows=1), np.loadtxt(fileprefix+'.b.2.txt', skiprows=1), axis=0)

    b, n = data[:,0], data[:,1]
    n = unp.uarray(n, np.sqrt(n*20)/20)
    
    sl = [ [(b >= bounds[0]) & (b <= bounds[1]) for bounds in sl_row] for sl_row in sl]

    def fit_gauss(x, m, s, A, n_0):
        return A/np.sqrt(2*const.pi)/s*np.exp(-((x-m)**2)/2/(s**2))+n_0
    
    r = []
    
    plt.clf()
    papstats.plot_data(b,n)
    papstats.savefig_a4('3.'+str(figindex)+'.a.png')

    plt.clf()
    plt.suptitle('Diagramm 3.'+str(figindex)+u': Spektrallinien von Molybdän bei Vermessung mit einem '+crstl+'-Kristall')
    for i in range(2):
        r.append([])
        # Linie
        for k in range(2):
            # Ordnung
            b_k = b[sl[i][k]]
            n_k = n[sl[i][k]]
            xspace = np.linspace(b_k[0], b_k[-1], num=1000)
            plt.subplot(2,2,i*2+k+1)
            plt.xlim(xspace[0], xspace[-1])
            if i==1:
                plt.xlabel(u'Bestrahlungswinkel '+r'$\beta \, [^\circ]$')
            if k==0:
                plt.ylabel(u'Zählrate '+r'$n \, [\frac{Ereignisse}{s}]$')
            plt.title('$K_{'+(r'\alpha' if i==0 else r'\beta')+'}$ ('+str(k+1)+'. Ordnung)')
            papstats.plot_data(b_k, n_k)
            # Gauss-Fit
            popt, pstats = papstats.curve_fit(fit_gauss, b_k, n_k, p0=[b_k[0]+(b_k[-1]-b_k[0])/2, (b_k[-1]-b_k[0])/4, np.sum(n_k).n, n_k[0].n])
            plt.fill_between(b_k, 0, unp.nominal_values(n_k), color='g', alpha=0.2)
            FWHM = popt[1]*2*unp.sqrt(2*unp.log(2))
            plt.hlines(popt[3].n+(fit_gauss(xspace, *unp.nominal_values(popt)).max()-popt[3].n)/2, popt[0].n-FWHM.n/2, popt[0].n+FWHM.n/2, color='black', lw=2, label='$'+papstats.pformat(FWHM, label='FWHM', unit=r'^\circ')+'$')
            papstats.plot_fit(fit_gauss, popt, xspace=xspace, plabels=[r'\mu', r'\sigma', 'A', 'n_0'], punits=['^\circ', '^\circ', 's^{-1}', 's^{-1}'])
            plt.ylim(unp.nominal_values(n_k).min()-n_k[unp.nominal_values(n_k).argmin()].s, unp.nominal_values(n_k).max()+(unp.nominal_values(n_k).max()-unp.nominal_values(n_k).min()))
            plt.legend(loc='upper center', prop={'size':10})

            b_S = unc.ufloat(popt[0].n, np.abs(popt[1].n))
            print "Winkel:", papstats.pformat(b_S, unit='°', format='.2u')
            if y is None:
                r[i].append(y_bragg(b_S, n=k+1))
                print "Wellenlänge der Linie:", papstats.pformat(r[i][k]/const.pico, label='y', unit='pm', format='.2u')
            if d is None:
                r[i].append((k+1)*y[i][k]/unc.umath.sin(b_S*const.degree))
                print "Gitterkonstante:", papstats.pformat(r[i][k]/const.pico, label='a', unit='pm', format='.2u')

    papstats.savefig_a4('3.'+str(figindex)+'.png')

    return r
Example #11
0
def ulinReg(xdata,ydata):
    """
    Führt über linReg_iter eine lineare Regression durch. Parameter sind hierbei
    allerdings uncertainties.ufloat
    """
    popt, perr = linReg_iter(unumpy.nominal_values(xdata),
                 unumpy.nominal_values(ydata), unumpy.std_devs(ydata),
                 unumpy.std_devs(xdata))
    return uc.ufloat(popt[0],perr[0]), uc.ufloat(popt[1],perr[1])
Example #12
0
    def state_space(self, speed, nominal=False):
        """
        Returns the A and B matrices for the Whipple model linearized about
        the upright constant velocity configuration.


        Parameters
        ----------
        speed : float
            The speed of the bicycle.
        nominal : boolean, optional
            The default is false and uarrays are returned with the calculated
            uncertainties. If true ndarrays are returned without uncertainties.

        Returns
        -------

        A : ndarray, shape(4,4)
            The state matrix.
        B : ndarray, shape(4,2)
            The input matrix.

        Notes
        -----
        ``A`` and ``B`` describe the Whipple model in state space form:

            x' = A * x + B * u

        where

        The states are [roll angle,
                        steer angle,
                        roll rate,
                        steer rate]

        The inputs are [roll torque,
                        steer torque]

        If you have a flywheel defined, body D, it will completely be ignored
        in these results. These results are strictly for the Whipple bicycle
        model.

        """

        M, C1, K0, K2 = self.canonical()

        g = self.parameters['Benchmark']['g']

        A, B = bicycle.ab_matrix(M, C1, K0, K2, speed, g)

        if nominal is True:
            return (unumpy.nominal_values(A), unumpy.nominal_values(B))
        elif nominal is False:
            return A, B
        else:
            raise ValueError('nominal must be True or False')
def plot(x,y,*args,**kwargs):
    nominal_curve = pyplot.plot(x, unumpy.nominal_values(y), *args, **kwargs)
    pyplot.fill_between(x, 
                        unumpy.nominal_values(y)-unumpy.std_devs(y), 
                        unumpy.nominal_values(y)+unumpy.std_devs(y),
                        facecolor=nominal_curve[0].get_color(),
                        edgecolor='face',
                        alpha=0.1,
                        linewidth=0)
    return nominal_curve
Example #14
0
File: n.py Project: knly/PAP2
def compute_hwz(N_list, ttor, fit, plotname, title, sl=slice(None,None), Uscale=1, p0=None, eq=None, plabels=None, punits=None, Th_erw=None):
    
    N = np.sum(unp.uarray(N_list,np.sqrt(N_list)), axis=0)
    t = np.arange(len(N))*ttor+ttor/2.

    table = pt.PrettyTable()
    table.add_column('t [s]', t.astype(int), align='r')
    if len(N_list) > 1:
        for i in range(len(N_list)):
            table.add_column('N'+str(i+1), N_list[i].astype(int), align='r')
        table.add_column('Summe', N, align='r')
    else:
        table.add_column('N', N, align='r')
    with open("Resources/table_"+plotname+".txt", "w") as text_file:
        text_file.write(table.get_string())


    global N_U
    N_U = N_U0*Uscale*ttor
    popt, pstats = papstats.curve_fit(fit, t[sl], N[sl], p0=p0)

    # Untergrundfehler
    N_U = (N_U0-N_U0.s)*Uscale*ttor
    popt_min, pstats_min = papstats.curve_fit(fit, t[sl], N[sl], p0=p0)
    N_U = (N_U0+N_U0.s)*Uscale*ttor
    popt_max, pstats_max = papstats.curve_fit(fit, t[sl], N[sl], p0=p0)
    N_U = N_U0*Uscale*ttor
    s_U = unp.nominal_values(((np.abs(popt-popt_min)+np.abs(popt-popt_max))/2.))
    s_corrected = np.sqrt(unp.std_devs(popt)**2 + s_U**2)
    popt_corrected = unp.uarray(unp.nominal_values(popt),s_corrected)
    
    # Halbwertszeit
    Th = popt_corrected[::2]*unc.umath.log(2)
    for i in range(len(Th)):
        papstats.print_rdiff(Th[i]/60, Th_erw[i]/60)

    # Plot
    plt.clf()
    plt.title('Diagramm '+plotname+': '+title)
    plt.xlabel('Messzeit $t \, [s]$')
    plt.ylabel('Ereigniszahl $N$')
    xspace = np.linspace(0, t[-1])
    papstats.plot_data(t, N, label='Messpunkte')
    papstats.plot_fit(fit, popt, pstats, xspace, eq=eq, plabels=plabels, punits=punits)
    plt.fill_between(xspace, fit(xspace, *unp.nominal_values(popt_min)), fit(xspace, *unp.nominal_values(popt_max)), color='g', alpha=0.2)
    Nmin = np.amin(unp.nominal_values(N))
    for i in range(len(Th)):
        plt.hlines(popt[1::2][i].n/2.+N_U.n, 0, Th[i].n, lw=2, label='Halbwertszeit $'+papstats.pformat(Th[i], label=r'T_{\frac{1}{2}}'+('^'+str(i+1) if len(Th)>1 else ''), unit='s')+'$')
    handles, labels = plt.gca().get_legend_handles_labels()
    p = plt.Rectangle((0, 0), 1, 1, color='g', alpha=0.2)
    handles.append(p)
    labels.append('Fit im '+r'$1 \sigma$'+'-Bereich von $N_U$:'+''.join(['\n$'+papstats.pformat(s_U[i], label='\Delta '+plabels[i]+'^{U}', unit=punits[i])+'$' for i in range(len(plabels))]))
    plt.legend(handles, labels)
    papstats.savefig_a4(plotname+'.png')
Example #15
0
def plot_u(cal_file, fm_file, offset_file, description, accidental_offset,
        results_file):
    M = np.genfromtxt(cal_file)
    N = np.genfromtxt(fm_file)
    O = np.genfromtxt(offset_file)

    i_helm = M[:,1] #current applied to helmholtz for calibration measurement
    b_helm = M[:,2] #field applied to helmholtz coil for calibration measurement
    p, cov = np.polyfit(i_helm, b_helm, 1,  cov=True) #fit a line to calibration measurement so that we get a calibration

    i_fm = N[:,1] #current applied to helmmholtz for shielding measurement
    b_fm = unumpy.uarray(N[:,2],0.0005) - accidental_offset #field measured inside of ferromagnet shield


    B_earth = np.polyval(p,0) #We get the Earths magnetic field from i=0 of the Helmholtz calibration
    B_fm_no_i = unumpy.uarray(np.mean(O[:,2]), np.std(O[:,2])) #Get average and error for initial magnetization

    mag = B_fm_no_i - B_earth #initial magnetization is the field inside of the ferromagnet before any field is applied minus the earths magnetic field 

    Bin = b_fm - mag #internal magnetization is the measured internal field minus the initial magnetization. This correction might not be necessary for a soft ferromagnet
    
    Bext = unumpy.uarray(np.polyval(p,i_fm), 0.0005) #external field
    Bext_nom = unumpy.nominal_values(Bext)
    Bext_err = unumpy.std_devs(Bext)

    B = Bext/Bin
    c = a/b
    u=(-2*B + c**2 - 2*unumpy.sqrt(B**2 - B*c**2 - B + c**2) + 1)/(c**2 - 1)

    u_nom = unumpy.nominal_values(u)
    u_err = unumpy.std_devs(u)

    #cakculate uerr with just point to point uncertainties. I define this as just
    #uncertainty from the field measurements
    u_pp=(-2*B + c.nominal_value**2 - 2*unumpy.sqrt(B**2 - B*c.nominal_value**2 - B + c.nominal_value**2) + 1)/(c.nominal_value**2 - 1)
    
    #calculate uerr from just geometry uncertainties
    u_geom=(-2*unumpy.nominal_values(B) + c**2 - 2*unumpy.sqrt(unumpy.nominal_values(B)**2 - unumpy.nominal_values(B)*c**2 - unumpy.nominal_values(B) + c**2) + 1)/(c**2 - 1)

    ##obtain uncertainties from field
    u_err_pp = unumpy.std_devs(u_pp)

    ##obtain uncertainties from geometry
    u_err_geom = unumpy.std_devs(u_geom)

    with open(results_file, "w") as myfile:
        myfile.write('#Bext, sig_Bext, ur, sig_ur, sig_ur_pp, sig_ur_corr\n')
        for j in range(0, len(u_nom)):
            myfile.write('%s\t%s\t%s\t%s\t%s\t%s\n' %(
                Bext_nom[j], Bext_err[j],
                u_nom[j], u_err[j], u_err_pp[j], u_err_geom[j]))
    

    plt.errorbar(Bext_nom, u_nom, u_err, marker = '.', label = description)
Example #16
0
def GenExpErrorPlot(ras,figoffset,save=True,close=True,**figaspect): 
    colors=['b','g','r','c','m','y','k']
    markers=['.','*','o','v','^','<','>']
    index=0  
    Nufig=figoffset
    Ffig=Nufig+1
    for ra in ras:
        fh=Fh(ra)
        plt.figure(figoffset)
        plt.errorbar(uns.nominal_values(ra.Re.magnitude),
                 uns.nominal_values(ra.Nu.magnitude),
                 uns.std_devs(ra.Nu.magnitude),
                 uns.std_devs(ra.Re.magnitude),
                 '{:s}{:s}'.format(colors[index%len(colors)],
                                markers[index%len(markers)]),
                 label='{:.3f}'.format(fh))
        plt.figure(Ffig)
        plt.errorbar(uns.nominal_values(ra.Re.magnitude),
                 uns.nominal_values(ra.fr.magnitude),
                 uns.std_devs(ra.Re.magnitude),
                 uns.std_devs(ra.fr.magnitude),
                 '{:s}{:s}'.format(colors[index%len(colors)],
                                markers[index%len(markers)]),
                 label='{:.3f}'.format(fh))
    
    plt.figure(Nufig)
    plt.legend(loc='upper left')
    plt.xlabel('Re')
    plt.ylabel('Nu')
    
    plt.figure(Ffig)
    plt.legend(loc='upper right')
    plt.xlabel('Re')
    plt.ylabel('$c_f$')

    if save:
        plt.figure(Nufig)
        plt.savefig('Nu_exp_all.png',dpi=300,
                    figsize=(166.54/2.54,81/2.54),
                    orientation='landscape',
                    facecolor='w',
                    edgecolor='k')
        plt.figure(Ffig)
        plt.savefig('cf_exp_all.png',dpi=300,
                    figsize=(166.54/2.54,81/2.54),
                    orientation='landscape',
                    facecolor='w',
                    edgecolor='k')
    if close:
        plt.close(Nufig)
        plt.close(Ffig)
        return figoffset,-1,-1
    
    return Ffig+1,Nufig,Ffig
	def residuals(self):
		'''
		usually called by draw()
		calculates residuals
		'''
		from uncertainties import unumpy
		from numpy import array
		from ROOT import TGraphErrors
		residuals = array( range( len( self.__x ) ) ,'float')
		for i in range( len( unumpy.nominal_values(self.__x) ) ):
			residuals[i] = unumpy.nominal_values(self.__y)[i] - self.func.Eval( unumpy.nominal_values(self.__x)[i] )
		self.resgraph = TGraphErrors( len( self.__x ), unumpy.nominal_values(self.__x), residuals, unumpy.std_devs(self.__x), unumpy.std_devs(self.__y) )
Example #18
0
def curve_fit(fit, xdata, ydata, sigma=None, p0=None):
    if sigma is None:
        # TODO: Nur y-Fehler? chisquared vergleicht nur y-Differenzen, x-Fehler relevant für Fit?
        sigma = unp.std_devs(ydata)
        if np.sum(sigma) == 0:
            sigma = None
    xdata = unp.nominal_values(xdata)
    ydata = unp.nominal_values(ydata)
    popt, pcov = opt.curve_fit(fit, xdata, ydata, sigma=sigma, p0=p0)
    popt = unp.uarray(popt, np.sqrt(np.diagonal(pcov)))
    pstats = PAPStats(ydata, fit(xdata, *unp.nominal_values(popt)), sigma=sigma, ddof=len(popt))
    return popt, pstats
def PlotPatches(Sc,PatchData,ErrorBars):
    """
    Plot E*R* data binned from hilltop patches.
    """
    e_star = E_Star(Sc,PatchData[2],PatchData[0])
    r_star = R_Star(Sc,PatchData[1],PatchData[0])
    if ErrorBars:
        plt.errorbar(unp.nominal_values(e_star),unp.nominal_values(r_star),yerr=unp.std_devs(r_star),xerr=unp.std_devs(e_star),
                     fmt='ro',label='Hilltop Patch Data')
    else:
        plt.errorbar(unp.nominal_values(e_star),unp.nominal_values(r_star),
                     fmt='ro',label='Hilltop Patch Data')
Example #20
0
def plot_data(xdata, ydata, ax=plt, **kwargs):
    xerr = unp.std_devs(xdata)
    if np.sum(xerr)==0:
        xerr = None
    yerr = unp.std_devs(ydata)
    if np.sum(yerr)==0:
        yerr = None
    if not (kwargs.has_key('ls') or kwargs.has_key('linestyle')):
        kwargs['ls'] = 'none'
    if not kwargs.has_key('marker'):
        kwargs['marker'] = '.'
    return ax.errorbar(unp.nominal_values(xdata), unp.nominal_values(ydata), xerr=xerr, yerr=yerr, **kwargs)
Example #21
0
def plotunc( ax, x, dat, lcolor, fcolor, marker, labelstr, alpha=1.0, errscale=1.0):
    print "plotting ", labelstr
    ax.errorbar( x, unumpy.nominal_values(dat),\
                yerr=errscale*unumpy.std_devs(dat),\
                capsize=0., elinewidth = 1. ,\
                fmt='.', ecolor=lcolor, mec=lcolor, \
                mew=1.0, ms=5.,\
                alpha = alpha, \
                marker=marker, mfc=fcolor, \
                label=labelstr)
    if ax == axRe:
        pdat = np.transpose( np.vstack(( x, unumpy.nominal_values(dat) )))
        np.savetxt( 'pbraggSim.dat', pdat)
def PlotBasins(Sc,BasinData,ErrorBars):
    """
    Plot basin average E*R* data.
    """
    e_star = E_Star(Sc,BasinData[2],BasinData[0])
    r_star = R_Star(Sc,BasinData[1],BasinData[0])

    if ErrorBars:
        plt.errorbar(unp.nominal_values(e_star),unp.nominal_values(r_star),yerr=unp.std_devs(r_star),xerr=unp.std_devs(e_star),
                     fmt='go',label='Basin Data')
    else:
        plt.errorbar(unp.nominal_values(e_star),unp.nominal_values(r_star),
                     fmt='go',label='Basin Data')
	def __init__( self, x, y, execludeLast = False ):
		from uncertainties import unumpy
		self.__x = x
		self.__y = y
		from ROOT import TGraphErrors, TF1
		self.graph = TGraphErrors( len(x), unumpy.nominal_values(x), unumpy.nominal_values(y) , unumpy.std_devs(x), unumpy.std_devs(y))
		if execludeLast:
			self.func = TF1('fitfunc', 'pol1', 0, unumpy.nominal_values(self.__x)[-1]-1 )
			self.graph.Fit('fitfunc', 'RQ')
			self.graph.Fit('fitfunc', 'RQ')
		else:
			self.graph.Fit('pol1', 'Q')
			self.graph.Fit('pol1', 'Q')
			self.func = self.graph.GetFunction('pol1')
def calc_mu(Bin, Bout, radius_inner, radius_outer):

    # ratio of inner and outer radius
    radius_ratio = radius_inner / radius_outer
    print( radius_ratio )
    # ratio of internal to external field
    B_ratio = Bin / Bout
    # convert from Series object to Numpy Array object
    B_ratio = B_ratio.values

    # If value under square root becomes neagtive, set B_ratio to NaN
    # (this seems to happen with Argonne MRI measurements at low fields)
    B_ratio[ (B_ratio**2) * (radius_ratio**2) - B_ratio * (radius_ratio**2) - B_ratio + 1 < 0 ] = np.nan

    # Calculate permeability. Here, use both uncertainties from field measurements and uncertainties from geometry, i.e. radius measurements.
    mu = ( B_ratio * (radius_ratio**2)
           + B_ratio
           - 2
           -2 * unumpy.sqrt( (B_ratio**2) * (radius_ratio**2) - B_ratio * (radius_ratio**2) - B_ratio + 1 )
           ) / ( B_ratio * (radius_ratio**2) - B_ratio )

    # store nominal values of mu in separate array
    mu_val = unumpy.nominal_values(mu)

    # store combined uncertainties of mu in separate array
    mu_err = unumpy.std_devs(mu)

    # Calculate uncertainties of mu values from just field measurement uncertainties (= point-to-point fluctuations). Ignore geometry (=radius) uncertainties.
    mu_pp = ( B_ratio * (radius_ratio.n**2)
              + B_ratio
              - 2
              -2 * unumpy.sqrt( (B_ratio**2) * (radius_ratio.n**2) - B_ratio * (radius_ratio.n**2) - B_ratio + 1 )
              ) / ( B_ratio * (radius_ratio.n**2) - B_ratio )

    # store point-to-point uncertainties of mu in separate array
    mu_err_pp = unumpy.std_devs(mu_pp)

    # Calculate uncertainties of mu values from just geometry uncertainties (= systematic uncertainty, i.e. all points move together). Ignore field uncertainties.
    B_ratio_n = unumpy.nominal_values( B_ratio )
    mu_geom = ( B_ratio_n * (radius_ratio**2)
                + B_ratio_n
                - 2
                -2 * unumpy.sqrt( (B_ratio_n**2) * (radius_ratio**2) - B_ratio_n * (radius_ratio**2) - B_ratio_n + 1 )
                ) / ( B_ratio_n * (radius_ratio**2) - B_ratio_n )

    # store geometric uncertainties of mu in separate array
    mu_err_geom = unumpy.std_devs(mu_geom)

    return( mu_val, mu_err, mu_err_pp, mu_err_geom )
Example #25
0
def plotsim( ax, x, dat, lcolor, fcolor, marker, labelstr, alpha=1.0):
    #Find the value at resonance
    res =  abs(abs(x) - 6.44) < 0.1
    datres = dat[res]
    ratiores = np.mean( unumpy.nominal_values(dat[res]))
    normTOF = 0.4/0.66
    normTOF = 1.0
    ax.errorbar( x, unumpy.nominal_values(dat),\
                yerr=0.*unumpy.std_devs(dat),\
                capsize=0., elinewidth = 1. ,\
                fmt='.', ecolor=lcolor, mec=lcolor, \
                mew=1.0, ms=3.,\
                alpha = alpha, \
                marker=marker, mfc=fcolor, \
                label=labelstr) 
Example #26
0
def test_component_extraction():
    "Extracting the nominal values and standard deviations from an array"

    arr = unumpy.uarray(([1, 2], [0.1, 0.2]))

    assert numpy.all(unumpy.nominal_values(arr) == [1, 2])
    assert numpy.all(unumpy.std_devs(arr) == [0.1, 0.2])

    # unumpy matrices, in addition, should have nominal_values that
    # are simply numpy matrices (not unumpy ones, because they have no
    # uncertainties):
    mat = unumpy.matrix(arr)
    assert numpy.all(unumpy.nominal_values(mat) == [1, 2])
    assert numpy.all(unumpy.std_devs(mat) == [0.1, 0.2])
    assert type(unumpy.nominal_values(mat)) == numpy.matrix
Example #27
0
    def butterfly(self, energy, flux_unit='TeV-1 cm-2 s-1'):
        """
        Compute butterfly.

        Parameters
        ----------
        energy : `~astropy.units.Quantity`
            Energies at which to evaluate the butterfly.
        flux_unit : str
            Flux unit for the butterfly.

        Returns
        -------
        butterfly : `~gammapy.spectrum.SpectrumButterfly`
            Butterfly object.
        """
        from uncertainties import unumpy

        flux = self.model(energy)

        butterfly = SpectrumButterfly()
        butterfly['energy'] = energy
        butterfly['flux'] = flux.to(flux_unit)

        # compute uncertainties
        umodel = self.model_with_uncertainties
        values = umodel(energy.value)

        # unit conversion factor, in case it doesn't match
        conversion_factor =  flux.to(flux_unit).value / unumpy.nominal_values(values)
        flux_err = u.Quantity(unumpy.std_devs(values), flux_unit) * conversion_factor

        butterfly['flux_lo'] = flux - flux_err
        butterfly['flux_hi'] = flux + flux_err
        return butterfly
def plotRock(ax, xdat, ydat, labelstr, lc, fc, marker='o', Normalize=False):
    xdat = np.array(xdat)
    ydatval = unumpy.nominal_values(ydat) 
    ydaterr = unumpy.std_devs(ydat) 

    # We will plot how much above the norm is A2/A1
    # in units of the norm 
    if Normalize:
        ydatval = ydatval - 1.
        maxy = np.amax( ydatval)
        print "maxy =", maxy
    else:
        maxy = 1.0

    ax.errorbar( xdat, ydatval/maxy, yerr=ydaterr/maxy, \
               capsize=0., elinewidth = 1. ,\
               fmt='.', ecolor=lc, mec=lc, \
               mew=1., ms=5.,\
               marker=marker, mfc='None', \
               label=labelstr+', $B_{\mathrm{tof}}$=%.2g'%(maxy+1))
    # Fit data with a Gaussian
    fitdat = np.transpose( np.vstack( (xdat,ydat/maxy)))
    p0 = [1.0, 0., 10., 0.1]
    fun = fitlibrary.fitdict['Gaussian'].function
    ##pG, errorG = fitlibrary.fit_function( p0,  fitdat, fun)
    #print "Fitting with Gaussian:"
    #print pG
    #print errorG
    ##fitX, fitY = fitlibrary.plot_function(pG, \
    ##             np.linspace( xdat.min(), xdat.max(),120),fun) 
    ##ax.plot(  fitX, fitY, '-', c=lc, lw=1.0)
    ##return np.sqrt(2.)*pG[2], np.sqrt(2.)*errorG[2]
    return 1.,1.
Example #29
0
    def __add__(self,other):
        if not isinstance(other,Calibration):
            raise TypeError("Can only add two 'Calibration' instances together")

        vals = np.vstack([np.hstack([self.candidates,other.candidates]),
                          np.hstack([self.known,other.known])])
    
        #sort all rows by value of first to maintain relationship
        vals = vals[:,vals[0,:].argsort()] 
        
        #fit a new polynomial through the combined data sets
        calvals = polyfit(unumpy.nominal_values(vals[0,:]),
                          vals[1,:],
                          unumpy.std_devs(vals[0,:]))    
        xfit = np.linspace(0,vals[0,:].max(),200)
        yfit = np.poly1d(calvals)(xfit)   
        
        #store everything again 
        cal = Calibration(calvals,vals[0,:],vals[1,:],xfit,yfit)        
        cal.match=[]
        cal.match.extend(self.match) 
        cal.match.extend(other.match) 
        cal.search=[]
        cal.search.extend(self.search) 
        cal.search.extend(other.search) 
        return cal
Example #30
0
def clip_pypi_uncertainties(field, low, high, nanval=0.):
    """
    Clip the values to the range, returning the indices of the values
    which were clipped.  Note that this modifies field in place. NaN
    values are clipped to the nanval default.

    *field* is an array from the uncertainties package, whose values retain
    their variance even if they are forced within the bounds.  Clipping is
    performed by subtracting from the uncertain value, which will help
    preserve correlations when performing further operations on the clipped
    values.

    *low*, *high* and *nanval* are floats.
    """
    # Move value to the limit without changing the correlated errors.
    # This is probably wrong, but it is less wrong than other straight forward
    # options, such setting x to the limit with zero uncertainty.  At least
    # the clipped points will be flagged.
    index = np.isnan(nominal_values(field))
    field[index] = [ufloat(nanval, 0.) for v in field[index]]
    reject = index

    index = field < low
    field[index] = [v+(low-v.n) for v in field[index]]
    reject |= index

    index = field > high
    field[index] = [v-(v.n-high) for v in field[index]]
    reject |= index

    return reject
#Prepare data
O_values = catalogue_df.loc[idcs].OI_HI_emis2nd.values
N_values = catalogue_df.loc[idcs].NI_HI_emis2nd.values
HeII_HI = catalogue_df.loc[idcs].HeII_HII_from_O_emis2nd.values
HeIII_HI = catalogue_df.loc[idcs].HeIII_HII_from_O_emis2nd.values
objects = catalogue_df.loc[idcs].index.values

He_ratio = HeII_HI / HeIII_HI

print objects

N_O_ratio = N_values / O_values

for i in range(len(N_O_ratio)):
    print objects[i], O_values[i], N_values[i], N_O_ratio[i]

dz.data_plot(unumpy.nominal_values(HeII_HI),
             unumpy.nominal_values(N_O_ratio),
             label='Abundances from our sample',
             markerstyle='o',
             x_error=unumpy.std_devs(HeII_HI),
             y_error=unumpy.std_devs(N_O_ratio))
dz.plot_text(unumpy.nominal_values(HeII_HI),
             unumpy.nominal_values(N_O_ratio),
             text=objects)

dz.FigWording(r'$HeII/HeIII$', r'$N/O$', r'N/O relation for HII galaxy sample')

dz.display_fig()
Example #32
0
def mcllh_mean(actual_values, expected_values):
    """Compute the log-likelihood (llh) based on LMean in table 2 - https://doi.org/10.1007/JHEP06(2019)030
    accounting for finite MC statistics.
    This is the second most recommended likelihood in the paper.

    Parameters
    ----------
    actual_values, expected_values : numpy.ndarrays of same shape

    Returns
    -------
    llh : numpy.ndarray of same shape as the inputs
        llh corresponding to each pair of elements in `actual_values` and
        `expected_values`.

    Notes
    -----
    *
    """
    assert actual_values.shape == expected_values.shape

    # Convert to simple numpy arrays containing floats
    actual_values = unp.nominal_values(actual_values).ravel()
    sigma = unp.std_devs(expected_values).ravel()
    expected_values = unp.nominal_values(expected_values).ravel()

    with np.errstate(invalid='ignore'):
        # Mask off any nan expected values (these are assumed to be ok)
        actual_values = np.ma.masked_invalid(actual_values)
        expected_values = np.ma.masked_invalid(expected_values)

        # TODO: How should we handle nan / masked values in the "data"
        # (actual_values) distribution? How about negative numbers?

        # Make sure actual values (aka "data") are valid -- no infs, no nans,
        # etc.
        if np.any((actual_values < 0) | ~np.isfinite(actual_values)):
            msg = (
                '`actual_values` must be >= 0 and neither inf nor nan...\n' +
                maperror_logmsg(actual_values))
            raise ValueError(msg)

        # Check that new array contains all valid entries
        if np.any(expected_values < 0.0):
            msg = ('`expected_values` must all be >= 0...\n' +
                   maperror_logmsg(expected_values))
            raise ValueError(msg)

        # Replace 0's with small positive numbers to avoid inf in log
        np.clip(expected_values,
                a_min=SMALL_POS,
                a_max=np.inf,
                out=expected_values)

    llh_val = likelihood_functions.poisson_gamma(data=actual_values,
                                                 sum_w=expected_values,
                                                 sum_w2=sigma**2,
                                                 a=0,
                                                 b=0)

    return llh_val
Example #33
0
 def _permittivity_iterate(self,corr=False):
     """
     Set up iteration and plot results. Corrected data currently only supported for un-shorted measurements.
     """
     number_of_fits = self.fits
     # Get electromagnetic properties
     # Note: does not currently check if using corrected data
     if self.start_freq:     #start iteration from self.start_freq
         freq = self.freq[self.freq>=self.start_freq]
     else:   #use full frequency range
         freq = self.freq
     # Get epsilon
     epsilon = -1j*unp.nominal_values(self.avg_lossfac);
     epsilon += unp.nominal_values(self.avg_dielec)
     epsilon = epsilon[self.freq>=freq[0]]
     # Uarrays fot plotting
     epsilon_plot_real = self.avg_dielec[self.freq>=freq[0]]
     epsilon_plot_imag = self.avg_lossfac[self.freq>=freq[0]]
     # If ierating for mu, get mu
     if self.fit_mu:
         if self.meas.nrw:   #get epsilon and mu
             mu = -1j*unp.nominal_values(self.avg_mu_real);
             mu += unp.nominal_values(self.avg_mu_imag)
             mu = mu[self.freq>=freq[0]]
         else:   #raise exception if nrw not used
             raise Exception('permittivitycalc needs to be run with nrw=True if fit_mu=True')
         # Uarrays for plotting
         mu_plot_real = self.avg_mu_real[self.freq>=freq[0]]
         mu_plot_imag = self.avg_mu_imag[self.freq>=freq[0]]
         
     ## First, fit Cole-Cole model(s) to analytical results to get initial guess
     # If in Trial mode and number_of_poles is a list, fit for each 
     # number_of_poles (and number_of_poles_mu) in the list(s) and report statistics
     # If not in trial mode, only one value for the number of poles may be 
     # given for each of epsilon and mu
     if isinstance(self.poles,list) and not self.trial and len(self.poles) != 1:
         raise Exception('Can only have one value for number_of_poles when trial_run=False.')
     # if trail_run=False and number_of_poles is a list of length 1, make int
     elif isinstance(self.poles,list) and not self.trial and len(self.poles) == 1:
         self.poles = self.poles[0]
     if self.fit_mu and isinstance(self.poles_mu,list) and not self.trial and len(self.poles_mu) != 1:
         raise Exception('Can only have one value for number_of_poles_mu when trial_run=False.')
     elif self.fit_mu and isinstance(self.poles_mu,list) and not self.trial and len(self.poles_mu) == 1:
         self.poles_mu = self.poles_mu[0]
     
     # When trial_run is False, then self.poles should be an int while number_of_poles should be a list (of length 1)
     number_of_poles = self.poles
     if self.fit_mu:
         number_of_mu_poles = self.poles_mu
     if not isinstance(self.poles,list): # make sure number_of_poles is a list
         number_of_poles = [number_of_poles]
     if self.fit_mu and not isinstance(self.poles_mu,list):
         number_of_mu_poles = [number_of_mu_poles]
     if self.fit_mu and len(number_of_poles) != len(number_of_mu_poles):
             raise Exception('Number of poles must be the same for epsilon and mu (len(number_of_poles) == len(number_of_poles_mu))')
     
     # Create a set of Parameters to the Cole-Cole model
     params = []
     if self.fit_mu:
         for m in range(len(number_of_mu_poles)):
             params.append(self._iteration_parameters([number_of_poles[m],number_of_mu_poles[m]],initial_values=self.initial_parameters,mu=True))
     else:
         for n in range(len(number_of_poles)):
             params.append(self._iteration_parameters(number_of_poles[n],initial_values=self.initial_parameters))
         
     # Iterate to find parameters
     result = []
     for n in range(len(number_of_poles)):
         # if fit_mu, fix mu parameters
         if self.fit_mu:
             params[n] = self._fix_parameters(params[n],number_of_mu_poles[n],mu=True)
         miner = Minimizer(self._colecole_residuals,params[n],\
                           fcn_args=(number_of_poles[n],freq,epsilon))
         result.append(miner.minimize(method='least_squares'))
     if self.fit_mu:
         result_mu = []
         for m in range(len(number_of_mu_poles)):
             # unfix mu parameters and fix epsilon parameters
             params[m] = self._fix_parameters(params[m],number_of_mu_poles[m],unfix=True,mu=True)
             params[m] = self._fix_parameters(params[m],number_of_poles[m],unfix=False,mu=False)
             # iterate
             miner_mu = Minimizer(self._colecole_residuals,params[m],\
                           fcn_args=(number_of_mu_poles[m],freq,mu,True))
             result_mu.append(miner_mu.minimize(method='least_squares'))
 
     # Write fit report
     for n in range(len(number_of_poles)):
         print('Results for epsilon with {} poles:'.format(str(number_of_poles[n])))
         report_fit(result[n])
     if self.fit_mu:
         for m in range(len(number_of_mu_poles)):
             print('Results for mu with {} poles:'.format(str(number_of_mu_poles[m])))
             report_fit(result_mu[m])
     
     # Get parameter values
     values = []
     for n in range(len(number_of_poles)):
         values_temp = result[n].params
         values.append(values_temp.valuesdict())
     if self.fit_mu:
         values_mu = []
         for m in range(len(number_of_mu_poles)):
             values_mu_temp = result_mu[m].params
             values_mu.append(values_mu_temp.valuesdict())
         if not self.trial:    
             # Merge results into single object for initial guess of Bayesian fit
             if self.poles_mu == 0:
                 values[0]['mu_real'] = values_mu[0]['mu_real']
                 values[0]['mu_imag'] = values_mu[0]['mu_imag']
             else:
                 values[0]['mu_inf'] = values_mu[0]['mu_inf']
                 for m in range(self.poles_mu):
                     m+=1
                     values[0]['mu_dc_{}'.format(m)] = values_mu[0]['mu_dc_{}'.format(m)]
                     values[0]['mutau_{}'.format(m)] = values_mu[0]['mutau_{}'.format(m)]
                     values[0]['mualpha_{}'.format(m)] = values_mu[0]['mualpha_{}'.format(m)]
         
     # Calculate model EM parameters
     for n in range(len(number_of_poles)):
         epsilon_iter = self._colecole(number_of_poles[n],freq,values[n])
         # Plot                    
         pc.pplot.make_plot([freq,freq],[epsilon_plot_real,epsilon_iter.real],legend_label=['Analytical','Iterative ({} poles)'.format(str(number_of_poles[n]))])
         pc.pplot.make_plot([freq,freq],[epsilon_plot_imag,-epsilon_iter.imag],plot_type='lf',legend_label=['Analytical','Iterative ({} poles)'.format(str(number_of_poles[n]))])
         # Find values at 8.5 GHz by finding index where freq is closest to 8.5 GHz
         ep_real = epsilon_iter.real[np.where(freq == freq[np.abs(freq - 8.5e9).argmin()])][0]
         ep_imag = epsilon_iter.imag[np.where(freq == freq[np.abs(freq - 8.5e9).argmin()])][0]
         print(ep_real)
         print(ep_imag)
     if self.fit_mu:
         for m in range(len(number_of_mu_poles)):
             mu_iter = self._colecole(number_of_mu_poles[m],freq,values_mu[m],mu=True)
             if number_of_mu_poles[m] == 0:
                 mu_iter =  mu_iter*np.ones(len(freq))
             pc.pplot.make_plot([freq,freq],[mu_plot_real,mu_iter.real],plot_type='ur',legend_label=['Analytical mu','Iterative mu ({} poles)'.format(str(number_of_mu_poles[m]))])
             pc.pplot.make_plot([freq,freq],[mu_plot_imag,-mu_iter.imag],plot_type='ui',legend_label=['Analytical mu','Iterative mu ({} poles)'.format(str(number_of_mu_poles[m]))])
             mu_real = mu_iter.real[np.where(freq == freq[np.abs(freq - 8.5e9).argmin()])][0]
             mu_imag = mu_iter.imag[np.where(freq == freq[np.abs(freq - 8.5e9).argmin()])][0]
             print(mu_real)
             print(mu_imag)
     
     # If not in trial mode (no iterative fitting of sparams), perform iteration
     if not self.trial:
         # Check if using corrected S-params
         if corr:
             s11 = unp.nominal_values(self.s11)
             L = self.meas.Lcorr
         else:
             # Use shorted S11 if available
             if self.shorted:
                 s11 = unp.nominal_values(self.s11_short)
             else:
                 s11 = unp.nominal_values(self.s11)
             L = self.meas.L
         s21 = unp.nominal_values(self.s21)
         s12 = unp.nominal_values(self.s12)
         s22 = unp.nominal_values(self.s22)
         
         # Start arrays at start_freq
         s11 = np.array((s11[0][self.freq>=freq[0]],s11[1][self.freq>=freq[0]]))
         s21 = np.array((s21[0][self.freq>=freq[0]],s21[1][self.freq>=freq[0]]))
         s12 = np.array((s12[0][self.freq>=freq[0]],s12[1][self.freq>=freq[0]]))
         s22 = np.array((s22[0][self.freq>=freq[0]],s22[1][self.freq>=freq[0]]))
         # Cast measured sparams to complex
         s11c = 1j*s11[0]*np.sin(np.radians(s11[1]));
         s11c += s11[0]*np.cos(np.radians(s11[1]))
         s22c = 1j*s22[0]*np.sin(np.radians(s22[1]));
         s22c += s22[0]*np.cos(np.radians(s22[1]))
         s21c = 1j*s21[0]*np.sin(np.radians(s21[1]));
         s21c += s21[0]*np.cos(np.radians(s21[1]))
         s12c = 1j*s12[0]*np.sin(np.radians(s12[1]));
         s12c += s12[0]*np.cos(np.radians(s12[1]))
         
         ## Perform the fits acording to number_of_fits
         values_sp = values[0] # Use Cole-Cole fit for intial values
         for n in range(number_of_fits):
             # Create a set of Parameters
             if self.initial_parameters: # Use given initial values instead of generated ones
                 initial_values = self.initial_parameters
             else:
                 initial_values = values_sp
             if self.fit_mu:
                 params = self._iteration_parameters([number_of_poles[0],number_of_mu_poles[0]],initial_values,mu=True)
             else:
                 params = self._iteration_parameters(number_of_poles,initial_values)
             # Fit data
             result_sp, time_str = self._sparam_iterator(params,L,freq[0],s11c,s21c,s12c,s22c)
             # Update initial values for next run
             values_sp = result_sp.params
             values_sp = values_sp.valuesdict()
         
         # Get final parameter values
         values_sp = result_sp.params
         values_sp = values_sp.valuesdict()
         
         # Calculate model EM parameters
         epsilon_iter_sp = self._colecole(number_of_poles[0],freq,values_sp)
         if self.fit_mu:
             mu_iter_sp = self._colecole(number_of_mu_poles[0],freq,values_sp,mu=True)
         else:
             mu_iter_sp = 1
         
         # Plot                    
         pc.pplot.make_plot([freq,freq],[epsilon_plot_real,epsilon_iter_sp.real],legend_label=['Analytical','Iterative'])
         pc.pplot.make_plot([freq,freq],[epsilon_plot_imag,-epsilon_iter_sp.imag],plot_type='lf',legend_label=['Analytical','Iterative'])
         if self.fit_mu:
             pc.pplot.make_plot([freq,freq],[mu_plot_real,mu_iter_sp.real],plot_type='ur',legend_label=['Analytical mu','Iterative mu'])
             pc.pplot.make_plot([freq,freq],[mu_plot_imag,-mu_iter_sp.imag],plot_type='ui',legend_label=['Analytical mu','Iterative mu'])
     
         # Plot s-params
         s11_predicted, s21_predicted, s12_predicted = self._model_sparams(freq,L/100,epsilon_iter_sp,mu_iter_sp)
         # Plot    
         f,ax = plt.subplots(3, 2, sharex=True, figsize=(18, 15))
         ax[0,0].plot(freq,np.absolute(s11c),label='Measured') #s11mag
         ax[0,0].plot(freq,np.absolute(s11_predicted),label='Predicted')
         ax[0,0].set_title('Magnitude of S11')
         ax[0,1].plot(freq,np.angle(s11c),label='Measured') #s11phase
         ax[0,1].plot(freq,np.angle(s11_predicted),label='Predicted')
         ax[0,1].set_title('Phase of S11')
         ax[1,0].plot(freq,np.absolute(s21c),label='Measured') #s21mag
         ax[1,0].plot(freq,np.absolute(s21_predicted),label='Predicted')
         ax[1,0].set_title('Magnitude of S21')
         ax[1,1].plot(freq,np.angle(s21c),label='Measured') #s21phase
         ax[1,1].plot(freq,np.angle(s21_predicted),label='Predicted')
         ax[1,1].set_title('Phase of S21')
         ax[2,0].plot(freq,np.absolute(s12c),label='Measured') #s12mag
         ax[2,0].plot(freq,np.absolute(s12_predicted),label='Predicted')
         ax[2,0].set_title('Magnitude of S12')
         ax[2,1].plot(freq,np.angle(s12c),label='Measured') #s12phase
         ax[2,1].plot(freq,np.angle(s12_predicted),label='Predicted')
         ax[2,1].set_title('Phase of S12')
         # Hide redundant x-axis tick marks
         plt.setp([a.get_xticklabels() for a in ax[0, :]], visible=False)
         ax[0,0].legend(loc=1)
         plt.show()
         
         #Corner plot
         corner.corner(result_sp.flatchain, labels=result_sp.var_names, \
                       truths=list(result_sp.params.valuesdict().values()))
         
         #Plot traces
         nplots = len(result_sp.var_names)
         fig, axes = plt.subplots(nplots, 1, sharex=True, figsize=(8,nplots*1.4))
         for n in range(nplots):
             axes[n].plot(result_sp.chain[:, :, n].T, color="k", alpha=0.4)
             axes[n].yaxis.set_major_locator(MaxNLocator(5))
             axes[n].set_ylabel(result_sp.var_names[n])
         axes[nplots-1].set_xlabel("step number")
         fig.tight_layout(h_pad=0.0)
         plt.show()
         
         print(time_str)
         
         # Return results
         if self.fit_mu:
             return epsilon_iter_sp, mu_iter_sp, values_sp, result_sp
         else:
             return epsilon_iter_sp, values_sp, result_sp
        trendline_all = cHbeta_all_MagEr * ratios_dict['all_x'] + n_all_MagEr
        cHbeta_in_MagEr, n_in_MagEr = LinfitLinearRegression(
            ratios_dict['in_x'], ratios_dict['in_y'])
        trendline_in = cHbeta_in_MagEr * ratios_dict['in_x'] + n_in_MagEr

        cHbeta_all_MagEr2, n_all_MagEr2 = LinfitLinearRegression(
            ratios_dict2['all_x'], ratios_dict2['all_y'])
        trendline_all2 = cHbeta_all_MagEr2 * ratios_dict2[
            'all_x'] + n_all_MagEr2
        cHbeta_in_MagEr2, n_in_MagEr2 = LinfitLinearRegression(
            ratios_dict2['in_x'], ratios_dict2['in_y'])
        trendline_in2 = cHbeta_in_MagEr2 * ratios_dict2['in_x'] + n_in_MagEr2

        #--Blue points
        if len(ratios_dict['blue_x']) > 0:
            dz.data_plot(unumpy.nominal_values(ratios_dict['blue_x'][2:]),
                         unumpy.nominal_values(ratios_dict['blue_y'][2:]),
                         'ISIS blue arm recombination ratios',
                         markerstyle='o',
                         color='#0072B2',
                         y_error=unumpy.std_devs(ratios_dict['blue_y'][2:]))
            dz.plot_text(unumpy.nominal_values(ratios_dict['blue_x'][2:]),
                         unumpy.nominal_values(ratios_dict['blue_y'][2:]),
                         ratios_dict['blue_ions'][2:],
                         color='#0072B2',
                         fontsize=18)

        #--Red points
        if len(ratios_dict['red_x']) > 0:
            dz.data_plot(unumpy.nominal_values(ratios_dict['red_x']),
                         unumpy.nominal_values(ratios_dict['red_y']),
Example #35
0
INPUT = "/home/federico/Laboratorio3/relazione6/datiIntegratore.txt"
OUTPUT = "/home/federico/Laboratorio3/relazione6/datiIntegratoreEstesi.txt"

file = open(OUTPUT, "w")

f, df, Vout, dVout, t, dt = pylab.loadtxt(INPUT, unpack=True)

#Nelle visualizzazioni devo introdurre gli errori

F = unumpy.uarray(f, df)
T = unumpy.uarray(t, dt) / 1000
V_OUT = unumpy.uarray(Vout, dVout)
PHI = 2 * F * T
PHI = 2 - PHI

for i in range(len(f)):
    file.write(str(f[i]))
    file.write("\t")
    file.write(str(df[i]))
    file.write("\t")
    file.write(str(Vout[i]))
    file.write("\t")
    file.write(str(dVout[i]))
    file.write("\t")
    file.write(str(unumpy.nominal_values(PHI)[i]))
    file.write("\t")
    file.write(str(unumpy.std_devs(PHI)[i]))
    file.write("\n")

file.close()
Example #36
0
l = data_schwing[:, 0]

print "Schwingung:"
(table, w_I) = schwingung_table(*np.transpose(data_schwing))
print table
w_I_erw = (w_1 + w_2) / 2.
print vergleich_table(l, w_I, w_I_erw)
print "Schwebung:"
(table, w_II) = schwingung_table(*np.transpose(data_schweb))
print table
w_II_erw = (w_2 - w_1) / 2.
print vergleich_table(l, w_II, w_II_erw)

print u"\n# Kopplungsgrade"

V_l = l**2 / np.roll(l, 1)**2
K = (w_2**2 - w_1**2) / 2 / w_1**2
print K
V_w = K / np.roll(K, 1)
diff = V_l - V_w
K_labels = ['K_1', 'K_2', 'K_3']
K_labels = [
    K_labels[i] + '/' + np.roll(K_labels, 1)[i] for i in range(len(K_labels))
]
print papstats.table(labels=['', 'V_l', 'V_w', 'V_l-V_w', 'Sigmabereich'],
                     columns=[
                         K_labels, V_l, V_w, diff,
                         unp.nominal_values(np.abs(diff)) / unp.std_devs(diff)
                     ])
Example #37
0
def analyze_spektrallinien(fileprefix, figindex, crstl, sl, d=None, y=None):

    data = np.append(np.loadtxt(fileprefix + '.b.1.txt', skiprows=1),
                     np.loadtxt(fileprefix + '.b.2.txt', skiprows=1),
                     axis=0)

    b, n = data[:, 0], data[:, 1]
    n = unp.uarray(n, np.sqrt(n * 20) / 20)

    sl = [[(b >= bounds[0]) & (b <= bounds[1]) for bounds in sl_row]
          for sl_row in sl]

    def fit_gauss(x, m, s, A, n_0):
        return A / np.sqrt(2 * const.pi) / s * np.exp(-((x - m)**2) / 2 /
                                                      (s**2)) + n_0

    r = []

    plt.clf()
    papstats.plot_data(b, n)
    papstats.savefig_a4('3.' + str(figindex) + '.a.png')

    plt.clf()
    plt.suptitle('Diagramm 3.' + str(figindex) +
                 u': Spektrallinien von Molybdän bei Vermessung mit einem ' +
                 crstl + '-Kristall')
    for i in range(2):
        r.append([])
        # Linie
        for k in range(2):
            # Ordnung
            b_k = b[sl[i][k]]
            n_k = n[sl[i][k]]
            xspace = np.linspace(b_k[0], b_k[-1], num=1000)
            plt.subplot(2, 2, i * 2 + k + 1)
            plt.xlim(xspace[0], xspace[-1])
            if i == 1:
                plt.xlabel(u'Bestrahlungswinkel ' + r'$\beta \, [^\circ]$')
            if k == 0:
                plt.ylabel(u'Zählrate ' + r'$n \, [\frac{Ereignisse}{s}]$')
            plt.title('$K_{' + (r'\alpha' if i == 0 else r'\beta') + '}$ (' +
                      str(k + 1) + '. Ordnung)')
            papstats.plot_data(b_k, n_k)
            # Gauss-Fit
            popt, pstats = papstats.curve_fit(fit_gauss,
                                              b_k,
                                              n_k,
                                              p0=[
                                                  b_k[0] +
                                                  (b_k[-1] - b_k[0]) / 2,
                                                  (b_k[-1] - b_k[0]) / 4,
                                                  np.sum(n_k).n, n_k[0].n
                                              ])
            plt.fill_between(b_k,
                             0,
                             unp.nominal_values(n_k),
                             color='g',
                             alpha=0.2)
            FWHM = popt[1] * 2 * unp.sqrt(2 * unp.log(2))
            plt.hlines(popt[3].n +
                       (fit_gauss(xspace, *unp.nominal_values(popt)).max() -
                        popt[3].n) / 2,
                       popt[0].n - FWHM.n / 2,
                       popt[0].n + FWHM.n / 2,
                       color='black',
                       lw=2,
                       label='$' +
                       papstats.pformat(FWHM, label='FWHM', unit=r'^\circ') +
                       '$')
            papstats.plot_fit(fit_gauss,
                              popt,
                              xspace=xspace,
                              plabels=[r'\mu', r'\sigma', 'A', 'n_0'],
                              punits=['^\circ', '^\circ', 's^{-1}', 's^{-1}'])
            plt.ylim(
                unp.nominal_values(n_k).min() -
                n_k[unp.nominal_values(n_k).argmin()].s,
                unp.nominal_values(n_k).max() +
                (unp.nominal_values(n_k).max() -
                 unp.nominal_values(n_k).min()))
            plt.legend(loc='upper center', prop={'size': 10})

            b_S = unc.ufloat(popt[0].n, np.abs(popt[1].n))
            print "Winkel:", papstats.pformat(b_S, unit='°', format='.2u')
            if y is None:
                r[i].append(y_bragg(b_S, n=k + 1))
                print "Wellenlänge der Linie:", papstats.pformat(r[i][k] /
                                                                 const.pico,
                                                                 label='y',
                                                                 unit='pm',
                                                                 format='.2u')
            if d is None:
                r[i].append(
                    (k + 1) * y[i][k] / unc.umath.sin(b_S * const.degree))
                print "Gitterkonstante:", papstats.pformat(r[i][k] /
                                                           const.pico,
                                                           label='a',
                                                           unit='pm',
                                                           format='.2u')

    papstats.savefig_a4('3.' + str(figindex) + '.png')

    return r
Example #38
0
import matplotlib
import matplotlib.pyplot as plt
from numpy import genfromtxt
from scipy.optimize import curve_fit
import scipy
from scipy import stats
from uncertainties import unumpy
import uncertainties.unumpy as unp
from uncertainties import ufloat
import scipy.odr as sodr

file = sys.argv[1]
data = genfromtxt(file, delimiter=';')
w_B = unumpy.uarray(data[1:, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
w_G = unumpy.uarray(data[1:, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
w_B_value = unp.nominal_values(w_B)
w_G_value = unp.nominal_values(w_G)

lamda = np.array([
    600.752, 671.643, 623.440, 579.066, 576.960, 546.074, 491.607, 435.833,
    434.749, 433.922, 410.805, 407.783, 404.656
]) * 1e-9

d = lamda / (unp.sin(unp.radians(w_G)) -
             unp.sin(unp.radians(w_G) + unp.radians(w_B)))
d_no_error = unp.nominal_values(d)
d_error = unumpy.std_devs(d)
d_avarage = np.sum(d_no_error) / len(d_no_error)
d_avarage_error = 1 / np.sqrt(len(d_error)) * np.sqrt(np.sum(
    d_error * d_error))
print(
Example #39
0
def chi2(actual_values, expected_values):
    """Compute the chi-square between each value in `actual_values` and
    `expected_values`.

    Parameters
    ----------
    actual_values, expected_values : numpy.ndarrays of same shape

    Returns
    -------
    chi2 : numpy.ndarray of same shape as inputs
        chi-squared values corresponding to each pair of elements in the inputs

    Notes
    -----
    * Uncertainties are not propagated through this calculation.
    * Values in each input are clipped to the range [SMALL_POS, inf] prior to
      the calculation to avoid infinities due to the divide function.

    """
    if actual_values.shape != expected_values.shape:
        raise ValueError('Shape mismatch: actual_values.shape = %s,'
                         ' expected_values.shape = %s' %
                         (actual_values.shape, expected_values.shape))

    # Convert to simple numpy arrays containing floats
    if not isbarenumeric(actual_values):
        actual_values = unp.nominal_values(actual_values)
    if not isbarenumeric(expected_values):
        expected_values = unp.nominal_values(expected_values)

    with np.errstate(invalid='ignore'):
        # Mask off any nan expected values (these are assumed to be ok)
        actual_values = np.ma.masked_invalid(actual_values)
        expected_values = np.ma.masked_invalid(expected_values)

        # TODO: this check (and the same for `actual_values`) should probably
        # be done elsewhere... maybe?
        if np.any(actual_values < 0):
            msg = ('`actual_values` must all be >= 0...\n' +
                   maperror_logmsg(actual_values))
            raise ValueError(msg)

        if np.any(expected_values < 0):
            msg = ('`expected_values` must all be >= 0...\n' +
                   maperror_logmsg(expected_values))
            raise ValueError(msg)

        # TODO: Is this okay to do? Mathematically suspect at best, and can
        #       still destroy a minimizer's hopes and dreams...

        # Replace 0's with small positive numbers to avoid inf in division
        np.clip(actual_values,
                a_min=SMALL_POS,
                a_max=np.inf,
                out=actual_values)
        np.clip(expected_values,
                a_min=SMALL_POS,
                a_max=np.inf,
                out=expected_values)

        delta = actual_values - expected_values

    if np.all(np.abs(delta) < 5 * FTYPE_PREC):
        return np.zeros_like(delta, dtype=FTYPE)

    assert np.all(actual_values > 0), str(actual_values)
    #chi2_val = np.square(delta) / actual_values
    chi2_val = np.square(delta) / expected_values
    assert np.all(chi2_val >= 0), str(chi2_val[chi2_val < 0])
    return chi2_val
Example #40
0
def llh(actual_values, expected_values):
    """Compute the log-likelihoods (llh) that each count in `actual_values`
    came from the the corresponding expected value in `expected_values`.

    Parameters
    ----------
    actual_values, expected_values : numpy.ndarrays of same shape

    Returns
    -------
    llh : numpy.ndarray of same shape as the inputs
        llh corresponding to each pair of elements in `actual_values` and
        `expected_values`.

    Notes
    -----
    * Uncertainties are not propagated through this calculation.
    * Values in `expected_values` are clipped to the range [SMALL_POS, inf]
      prior to the calculation to avoid infinities due to the log function.

    """
    assert actual_values.shape == expected_values.shape

    # Convert to simple numpy arrays containing floats
    if not isbarenumeric(actual_values):
        actual_values = unp.nominal_values(actual_values)
    if not isbarenumeric(expected_values):
        expected_values = unp.nominal_values(expected_values)

    with np.errstate(invalid='ignore'):
        # Mask off any nan expected values (these are assumed to be ok)
        actual_values = np.ma.masked_invalid(actual_values)
        expected_values = np.ma.masked_invalid(expected_values)

        # Check that new array contains all valid entries
        if np.any(actual_values < 0):
            msg = ('`actual_values` must all be >= 0...\n' +
                   maperror_logmsg(actual_values))
            raise ValueError(msg)

        # TODO: How should we handle nan / masked values in the "data"
        # (actual_values) distribution? How about negative numbers?

        # Make sure actual values (aka "data") are valid -- no infs, no nans,
        # etc.
        if np.any((actual_values < 0) | ~np.isfinite(actual_values)):
            msg = (
                '`actual_values` must be >= 0 and neither inf nor nan...\n' +
                maperror_logmsg(actual_values))
            raise ValueError(msg)

        # Check that new array contains all valid entries
        if np.any(expected_values < 0.0):
            msg = ('`expected_values` must all be >= 0...\n' +
                   maperror_logmsg(expected_values))
            raise ValueError(msg)

        # Replace 0's with small positive numbers to avoid inf in log
        np.clip(expected_values,
                a_min=SMALL_POS,
                a_max=np.inf,
                out=expected_values)

    llh_val = actual_values * np.log(expected_values) - expected_values

    # Do following to center around 0
    llh_val -= actual_values * np.log(actual_values) - actual_values

    return llh_val
Example #41
0
Nu_ = np.array([129.0, 143.0, 144.0, 136.0, 126.0, 158.0])
tu = 300

Nu_ *= 10**(-1)
Nu_err = np.sqrt(Nu_)
Nu = unp.uarray(Nu_, Nu_err)

Nu_mean = np.mean(Nu)
# Vanadium
# -> Curve fit
Nvtrue = Nv - Nu_mean
#print(Nvtrue)
# --> linregress
params1, cov1 = np.polyfit(tv,
                           np.log(unp.nominal_values(Nvtrue)),
                           deg=1,
                           cov=True)
errors1 = np.sqrt(np.diag(cov1))
params1[0] *= (-1)
print(params1, errors1)

# -> Plot
x1 = np.linspace(0, 1230)

plt.plot(x1, params1[1] - params1[0] * x1, 'b-', label="Lineare Regression")
plt.errorbar(tv,
             unp.nominal_values(unp.log(Nvtrue)),
             yerr=unp.std_devs(unp.log(Nvtrue)),
             fmt='g.',
             label="Messwerte mit Fehlern")
Example #42
0
                                               curvefit_matrix[0, :], 84)
    n_Median_cf, n_16th_cf, n_84th_cf = median(
        curvefit_matrix[1, :]), percentile(curvefit_matrix[1, :],
                                           16), percentile(
                                               curvefit_matrix[1, :], 84)
    m_Median_kp, m_16th_kp, m_84th_kp = median(
        kapteyn_matrix[0, :]), percentile(kapteyn_matrix[0, :],
                                          16), percentile(
                                              kapteyn_matrix[0, :], 84)
    n_Median_kp, n_16th_kp, n_84th_kp = median(
        kapteyn_matrix[1, :]), percentile(kapteyn_matrix[1, :],
                                          16), percentile(
                                              kapteyn_matrix[1, :], 84)

    # Bootstrap BCES
    m, n, m_err, n_err, cov = bcesboot(nominal_values(x),
                                       std_devs(x),
                                       nominal_values(y),
                                       std_devs(y),
                                       cerr=zeros(len(x)),
                                       nsim=10000)

    # Saving the data
    entry_key = r'$Y_{{P,\,{elem}}}$'.format(elem=element)
    regr_dict[entry_key][0] = median(kapteyn_matrix[1, :])
    regr_dict[entry_key][1] = std(kapteyn_matrix[1, :])
    regr_dict[entry_key][2] = len(objects)

    # Linear data
    x_regression_range = linspace(0.0, max(nominal_values(x)) * 1.10, 20)
    y_regression_range = m_Median_cf * x_regression_range + n_Median_cf
Example #43
0
print('Lande-Faktor g_J für rot (soll=1):', g_J)

# Plot der Differenzen Delta_s und var_s:
#plt.errorbar(count, unp.nominal_values(delta_s), xerr = 0, yerr = unp.std_devs(delta_s), fmt = 'rx', label = r'$\Delta s$')
#plt.errorbar(count, unp.nominal_values(var_s), xerr = 0, yerr = unp.std_devs(var_s), fmt ='kx', label = r'$\delta s$')
#plt.ylim(0,330)
#plt.xlabel(r'$n-n_0$')
#plt.ylabel(r'$d/\text{LE}$')
#plt.legend(loc = 'best')
#plt.grid()
#plt.tight_layout()
#plt.savefig('build/red_ds.pdf')

# Plot der Quotienten:
plt.errorbar(count,
             unp.nominal_values(s_quot),
             xerr=0,
             yerr=unp.std_devs(s_quot),
             fmt='rx',
             label=r'Berechnete Quotienten')
t = np.linspace(-0.5, 11.5, 10)
plt.plot(t, 0 * t + s_quot_av.nominal_value, 'r-', label=r'Mittelwert')
plt.xlim(-0.5, 11.5)
plt.ylim(0.3, 0.7)
plt.xlabel(r'$n-n_0$')
plt.ylabel(r'$\delta s/ \Delta s$')
plt.legend(loc='best')
plt.grid()
plt.tight_layout()
plt.savefig('build/red_quot.pdf')
Example #44
0
def plot_single_curved_angle_dependence_summary(fitted_values):

    # rename the argument
    rvals = fitted_values

    # plot the data and the fit
    theplot = plt.figure(figsize=(10, 5.5))
    plt.rcParams.update({'axes.titlesize': 10})
    plt.rcParams.update({'axes.labelsize': 9})
    plt.rcParams.update({'xtick.labelsize': 8})
    plt.rcParams.update({'ytick.labelsize': 8})
    textxloc = 0.9
    textyloc = 0.875

    plt.subplot(221)
    plt.errorbar(rvals["angles"],
                 rvals["m1e_dats"] / 10.0,
                 yerr=rvals["m1e_errs"] / 10.0,
                 fmt='rs',
                 label='eros.')
    plt.errorbar(rvals["angles"],
                 rvals["m1r_dats"] / 10.0,
                 yerr=rvals["m1r_errs"] / 10.0,
                 fmt='bs',
                 label='redist.')
    plt.plot(rvals["finedeg"],
             unp.nominal_values(rvals["m1e_vals"]) / 10.0,
             'r-',
             linewidth=2)
    plt.plot(rvals["finedeg"],
             unp.nominal_values(rvals["m1r_vals"]) / 10.0,
             'b-',
             linewidth=2)
    plt.xlim((0, 90))
    ylimits1 = plt.ylim()
    plt.xlabel(r'angle $\theta$')
    plt.ylabel(r'$M^{(1)} \left( \theta \right)$  [atom nm / ion]')
    plt.legend(loc=3, prop={'size': 9})
    plt.title('Erosive and Redistributive First Moments')
    plt.text(textxloc,
             textyloc,
             "(a)",
             fontweight="bold",
             transform=plt.gca().transAxes)

    plt.subplot(222)
    #plt.errorbar(rvals["angles"], rvals["m0e_dats"],    yerr=rvals["m0e_errs"],    fmt='bs', label='flat')
    #plt.errorbar(rvals["angles"], rvals["m0ek1p_avg"], yerr=rvals["m0ek1p_err"], fmt='gs', label='k11=%0.3f'%(dk))
    #plt.errorbar(rvals["angles"], rvals["m0ek1m_avg"], yerr=rvals["m0ek1m_err"], fmt='rs', label='k11=%0.3f'%(-dk))
    plt.errorbar(rvals["angles"],
                 rvals["dk11_dats"] / 10.0,
                 yerr=rvals['dk11_errs'] / 10.0,
                 fmt='gs',
                 label='K11')
    plt.plot(rvals["finedeg"],
             unp.nominal_values(rvals["dk11_vals"]) / 10.0,
             'g-',
             linewidth=2)
    #plt.plot(rvals["finedeg"], rvals["m0e_vals"], 'b-', linewidth=2)
    #plt.plot(rvals["finedeg"], rvals["m0ek1p_vals"], 'g-', linewidth=2)
    #plt.plot(rvals["finedeg"], rvals["m0ek1m_vals"], 'r-', linewidth=2)

    #plt.errorbar(rvals["angles"], rvals["m0e_dats"],    yerr=rvals["m0e_errs"],    fmt='bs', label='flat')
    #plt.errorbar(rvals["angles"], rvals["m0ek2p_avg"], yerr=rvals["m0ek2p_err"], fmt='gs', label='k22=%0.3f'%(dk))
    #plt.errorbar(rvals["angles"], rvals["m0ek2m_avg"], yerr=rvals["m0ek2m_err"], fmt='rs', label='k22=%0.3f'%(-dk))
    plt.errorbar(rvals["angles"],
                 rvals["dk22_dats"] / 10.0,
                 yerr=rvals['dk22_errs'] / 10.0,
                 fmt='ys',
                 label='K22')
    plt.plot(rvals["finedeg"],
             unp.nominal_values(rvals["dk22_vals"]) / 10.0,
             'y-',
             linewidth=2)
    #plt.plot(rvals["finedeg"], rvals["m0e_vals"], 'b-', linewidth=2)
    #plt.plot(rvals["finedeg"], rvals["m0ek2p_vals"], 'g-', linewidth=2)
    #plt.plot(rvals["finedeg"], rvals["m0ek2m_vals"], 'r-', linewidth=2)

    plt.ylim(ylimits1)
    plt.xlabel(r'angle $\theta$')
    plt.ylabel(
        r'$\partial M^{(0)}_{\mathsf{eros.}} / \partial K_{ii} \left( \theta \right)$ [atom nm / ion]'
    )
    plt.legend(loc=3, prop={'size': 9})
    plt.title('Curvature-Derivatives of Zeroth Moment')
    plt.text(textxloc,
             textyloc,
             "(b)",
             fontweight="bold",
             transform=plt.gca().transAxes)

    plt.subplot(223)
    plt.plot(rvals["finedeg"],
             unp.nominal_values(rvals["sxe_coeffs"]) / 10.0,
             'r-',
             linewidth=2,
             label="from $M^{(1)}_{eros.}$")
    plt.plot(rvals["finedeg"],
             unp.nominal_values(rvals["sxr_coeffs"]) / 10.0,
             'b-',
             linewidth=2,
             label="from $M^{(1)}_{redist.}$")
    plt.plot(rvals["finedeg"],
             unp.nominal_values(rvals["sxc_coeffs"]) / 10.0,
             'g-',
             linewidth=2,
             label="from $M^{(0)}_{eros.}$")
    #plt.plot(rvals["finedeg"], rvals["sxc_coeffs_approx"], 'y-', linewidth=2, label="curv.**")
    plt.plot(rvals["finedeg"],
             unp.nominal_values(rvals["sx_coeffs"]) / 10.0,
             'k--',
             linewidth=2,
             label="total")
    plt.xlabel(r'angle $\theta$')
    plt.ylabel(r'$S_{X}=C_{11} \left( \theta \right)$\, [nm$^{4}$ / ion]')
    ylimits2 = np.array(plt.ylim())
    ylimits2[0] = -ylimits2[1]
    plt.ylim(ylimits2)
    plt.legend(loc=3, prop={'size': 8}, ncol=2)
    plt.title(r'Components of $S_X = C_{11}$ ')
    plt.text(textxloc,
             textyloc,
             "(c)",
             fontweight="bold",
             transform=plt.gca().transAxes)

    plt.subplot(224)
    plt.plot(rvals["finedeg"],
             unp.nominal_values(rvals["sye_coeffs"]) / 10.0,
             'r-',
             linewidth=2,
             label="from $M^{(1)}_{eros.}$")
    plt.plot(rvals["finedeg"],
             unp.nominal_values(rvals["syr_coeffs"]) / 10.0,
             'b-',
             linewidth=2,
             label="from $M^{(1)}_{redist.}$")
    plt.plot(rvals["finedeg"],
             unp.nominal_values(rvals["syc_coeffs"]) / 10.0,
             'y-',
             linewidth=2,
             label="from $M^{(0)}_{eros.}$")
    #plt.plot(rvals["finedeg"], rvals["syc_coeffs_approx"], 'y-', linewidth=2, label="curv.**")
    plt.plot(rvals["finedeg"],
             unp.nominal_values(rvals["sy_coeffs"]) / 10.0,
             'k--',
             linewidth=2,
             label="total")
    plt.ylim(ylimits2)
    plt.xlabel(r'angle $\theta$')
    plt.ylabel(r'$S_{Y}=C_{22} \left( \theta \right)$\, [nm$^{4}$ / ion]')
    plt.legend(loc=3, prop={'size': 8}, ncol=2)
    plt.title('Components of $S_Y = C_{22}$')
    plt.text(textxloc,
             textyloc,
             "(d)",
             fontweight="bold",
             transform=plt.gca().transAxes)

    plt.tight_layout()
    return theplot
Example #45
0
def kramer(y, ymin, K):
    y = unp.nominal_values(y)
    return K * (y / ymin - 1) / y**2
Vin, dVin, Vout, dVout, t, dt = pylab.loadtxt(INPUT, unpack=True)

f = ufloat(5.00, 5.00 / 100.0)

VIN = unumpy.uarray(Vin, dVin)
VIN3 = unumpy.uarray(Vin, ((dVin)**2 + (0.03 * Vin))**0.5)
VOUT = unumpy.uarray(Vout, dVout)
VOUT3 = unumpy.uarray(Vout, ((dVout)**2 + (0.03 * Vout))**0.5)
T = unumpy.uarray(t, dt)
PHI = 2 * 3.14 * T * f
A = VOUT / VIN
mediaA = A.mean()

file = open(OUTPUT, "w")

for i in range(len(A)):
    file.write(str(unumpy.nominal_values(VIN3)[i]))
    file.write("\t")
    file.write(str(unumpy.std_devs(VIN3)[i]))
    file.write("\t")
    file.write(str(unumpy.nominal_values(VOUT3)[i]))
    file.write("\t")
    file.write(str(unumpy.std_devs(VOUT3)[i]))
    file.write("\t")
    file.write(str(unumpy.nominal_values(A)[i]))
    file.write("\t")
    file.write(str(unumpy.std_devs(A)[i]))
    file.write("\n")

file.close()
Example #47
0
                                        unit='°',
                                        format='.2u')

# Plot
plt.clf()
plt.title(
    u'Diagramm 3.1: Bremsspektrum von LiF mit Untergrund und Extrapolation am kurzwelligen Ende'
)
axmain = plt.subplot(111)
plt.xlabel(ur'Bestrahlungswinkel $\beta \, [^\circ]$')
plt.ylabel(ur'Zählrate $n \, [\frac{Ereignisse}{s}]$')
xlim = [b[0], b[-1]]
xspace = np.linspace(*xlim, num=1000)
axmain.set_xlim(*xlim)
papstats.plot_data(b, n, label='Messpunkte')
plt.fill_between(unp.nominal_values(b),
                 0,
                 unp.nominal_values(n),
                 color='g',
                 alpha=0.2)

from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
from mpl_toolkits.axes_grid1.inset_locator import mark_inset

zoomxlim = [3, 7]
zoomylim = [0, 200]
zoomxspace = np.linspace(*zoomxlim, num=100)
axzoom = zoomed_inset_axes(axmain, 2.5, loc=1)
axzoom.set_xlim(*zoomxlim)
axzoom.set_ylim(*zoomylim)
mark_inset(axmain, axzoom, loc1=4, loc2=2, fc="none", ec="0.5")
Example #48
0
#Declare data for the analisis
AbundancesFileExtension = '_' + catalogue_dict[
    'Datatype'] + '_linesLog_reduc.txt'
catalogue_df = dz.load_excel_DF(
    '/home/vital/Dropbox/Astrophysics/Data/WHT_observations/WHT_Galaxies_properties.xlsx'
)

idcs = (~catalogue_df.TeOII_from_TeOIII_emis.isnull()) & (
    ~catalogue_df.TeSIII_emis.isnull())

TeOII_array = catalogue_df.loc[idcs].TeOII_from_TeOIII_emis.values
TeSIII_array = catalogue_df.loc[idcs].TeSIII_emis.values
objects = catalogue_df.loc[idcs].index.values

#Make the plot
x_regression = linspace(0.8 * np_min(unumpy.nominal_values(TeOII_array)),
                        1.20 * np_max(unumpy.nominal_values(TeOII_array)), 10)
# y_regression_Epm2014    = (0.92 * x_regression/10000 + 0.078) * 10000
y_regression_One = 1.0 * x_regression + 0.0

#Perform the fit
regr_dict = bces_regression(unumpy.nominal_values(TeOII_array),
                            unumpy.nominal_values(TeSIII_array),
                            unumpy.std_devs(TeOII_array),
                            unumpy.std_devs(TeSIII_array))

# for i in range(len(regr_dict['m'])):
reg_code = 0
y_fit = regr_dict['m'][reg_code] * x_regression + regr_dict['n'][reg_code]
dz.data_plot(x_regression,
             y_fit,
Example #49
0
    def _compute_nominal_transforms(self):
        self.load_events(self.params.aeff_events)
        self.cut_events(self.params.transform_events_keep_criteria)

        # Units must be the following for correctly converting a sum-of-
        # OneWeights-in-bin to an average effective area across the bin.
        comp_units = dict(true_energy='GeV',
                          true_coszen=None,
                          true_azimuth='rad')

        # Select only the units in the input/output binning for conversion
        # (can't pass more than what's actually there)
        in_units = {
            dim: unit
            for dim, unit in comp_units.items() if dim in self.input_binning
        }
        #out_units = {dim: unit for dim, unit in comp_units.items()
        #             if dim in self.output_binning}

        # These will be in the computational units
        input_binning = self.input_binning.to(**in_units)

        # Account for "missing" dimension(s) (dimensions OneWeight expects for
        # computation of bin volume), and accommodate with a factor equal to
        # the full range. See IceCube wiki/documentation for OneWeight for
        # more info.
        missing_dims_vol = 1
        # TODO: currently, azimuth required to *not* be part of input binning
        if 'true_azimuth' not in input_binning:
            missing_dims_vol *= 2 * np.pi
        # TODO: Following is currently never the case, handle?
        if 'true_coszen' not in input_binning:
            missing_dims_vol *= 2

        nominal_transforms = []

        for xform_flavints in self.transform_groups:
            logging.info("Working on %s effective areas xform", xform_flavints)

            raw_hist = self.events.histogram(kinds=xform_flavints,
                                             binning=input_binning,
                                             weights_col='weighted_aeff',
                                             errors=True)
            raw_transform = unp.nominal_values(raw_hist.hist)
            raw_errors = unp.std_devs(raw_hist.hist)

            # Divide histogram by
            #   (energy bin width x coszen bin width x azimuth bin width)
            # volumes to convert from sums-of-OneWeights-in-bins to
            # effective areas. Note that volume correction factor for
            # missing dimensions is applied here.
            bin_volumes = input_binning.bin_volumes(attach_units=False)
            raw_transform /= (bin_volumes * missing_dims_vol)
            raw_errors /= (bin_volumes * missing_dims_vol)

            e_idx = input_binning.index('true_energy')
            if e_idx == 1:
                # transpose
                raw_transform = raw_transform.T
                raw_errors = raw_errors.T

            # Do the smoothing
            smooth_transform = self.smooth(raw_transform, raw_errors,
                                           input_binning['true_energy'],
                                           input_binning['true_coszen'])

            if e_idx == 1:
                # transpose back
                smooth_transform = smooth_transform.T

            nominal_transforms.extend(
                populate_transforms(service=self,
                                    xform_flavints=xform_flavints,
                                    xform_array=smooth_transform))

        return TransformSet(transforms=nominal_transforms)
#
#
#

# In[9]:

gamma_de = eos.constq_grun(v, v0, 1.45, 0.8)
gamma_sp = eos.speziale_grun(v, v0, uct.ufloat(1.49, 0.03),
                             uct.ufloat(1.65, 0.4), uct.ufloat(11.8, 0.2))
gamma_do = eos.altshuler_grun(v, v0, 1.50, 0.75, 2.96)

# In[10]:

plt.plot(v, gamma_de, label='Dewaele2000')
plt.errorbar(v,
             unp.nominal_values(gamma_sp),
             yerr=unp.std_devs(gamma_sp),
             label='Speziale2001')
plt.plot(v, gamma_do, label='Dorogokupets2007')
plt.xlabel('Unit-cell volume ($\mathrm{\AA}^3$)')
plt.ylabel(r"$\mathrm{Gr{\"u}neisen parameter}$")
plt.legend()

# # 3. Calculate Debye temperature

# In[11]:

help(eos.constq_debyetemp)

# In[12]:
Example #51
0
File: freq.py Project: frapa/lab4
freq_high = data_high[:, 0] * 1000
va_high = unumpy.uarray(data_high[:, 1] / 2.0, data_high[:, 3])
vout_high = unumpy.uarray(data_high[:, 2] / 2.0, data_high[:, 4])

G_high = vout_high / va_high

print(G_low, G_high)
print(20 * unumpy.log10(G_low), 20 * unumpy.log10(G_high))

f1 = plt.figure()
ax1 = f1.add_subplot(111)
ax1.set_xscale('log')
ax1.set_yscale('log')

p1 = ax1.errorbar(x=freq_low,
                  y=unumpy.nominal_values(G_low),
                  yerr=unumpy.std_devs(G_low),
                  fmt="o",
                  c="black")
p2 = ax1.errorbar(x=freq_high,
                  y=unumpy.nominal_values(G_high),
                  yerr=unumpy.std_devs(G_high),
                  fmt="o",
                  c="gray")

ax1.set_title("Guadagno di un opamp in funzione della frequenza")
ax1.set_xlabel("Frequenza [Hz]")
ax1.set_ylabel("Amplificazione")
ax1.set_xlim((1, 3e6))
ax1.set_ylim((1e-1, 1e6))
ax1.grid(True)
Example #52
0
import argparse
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import uncertainties as u
import uncertainties.unumpy as unp

parser = argparse.ArgumentParser()
parser.add_argument("--path", default="./results/grooves-simulation/fresnel-grooves-simulation.pkl")
args = parser.parse_args()

df = pd.read_pickle(args.path)
# Bin entries by groove number.
bins = np.arange(0.5, 12.5, 1.0)
groups = df.groupby(np.digitize(df.grooves, bins))

means = []
for name, group in groups:
    means.append(group.t.values.mean())

# Plot minimum r_90 as function of the groove number.
f, ax = plt.subplots()
ax.errorbar(bins, unp.nominal_values(means) * 100.0, xerr=0.5, yerr=unp.std_devs(means) * 100.0, fmt=".")
ax.set_xlabel("grooves / mm$^{-1}$")
ax.set_ylabel("transmission / \\%")
ax.text(0.975, 0.95, "FAMOUS simulation", ha="right", va="top", transform=ax.transAxes)
ax.text(0.975, 0.90, "%d entries" % df.t.size, ha="right", va="top", transform=ax.transAxes)
ax.set_xlim(-1, 13)

plt.show()
Example #53
0
def f(t,M0,T1):
    return M0*(1-2*np.exp(-t/T1))

def g(x, b, m):
    return m*x+b

def h(t, M0, D):
    T2=1.227
    G=7.09*10**6
    return M0*np.exp(-2*t/T2)*np.exp(-D*(G**2)*(2*t)**3/12)


#T1-Bestimmung
guess = [1.47, 1]
x_plot=np.linspace(0,10, num=1000)
params, covariance = curve_fit(f, unp.nominal_values(tau), -unp.nominal_values(M), p0=guess)

plt.figure()
errM = unp.std_devs(M)
plt.errorbar(unp.nominal_values(tau), -unp.nominal_values(M) + errM, fmt='bx', label="T1-Messung")
plt.plot(x_plot, f(x_plot, *params), 'r-', label='Nichtlinearer Fit')
plt.legend(loc="best", numpoints=1)
plt.xlim(0,10)
plt.ylim(0, 2)
plt.xlabel(r'Zeitabstand $\tau$ [s]')
plt.ylabel(r'Magnetisierung M$_z [V]$')
plt.savefig('plotT1.png')

errors=np.sqrt(np.diag(covariance))

print('M0 =', params[0], '+/-', errors[0])
Example #54
0
dic = numpy.array([0.01 for i in range(len(ic))])
dua = numpy.array([2 for i in range(len(ua))])

U_A = unumpy.uarray(ua, dua)
I_C = unumpy.uarray(ic, dic)

pylab.figure(num=None, figsize=(12, 6), dpi=80, facecolor='w', edgecolor='k')
pylab.rc('font', size=13)
#FIXME: Fix the title
pylab.title('$I_C$ vs $U_A$ at $U_E$ = 2.9V', fontsize="16")
#How to determine the unity of measure
pylab.xlabel('$U_A$ (V)', size="14")
#FIXME Unita' di misura
pylab.ylabel('$I_C$ (u.a.)', size="14")
pylab.grid(color="gray")
pylab.plot(unumpy.nominal_values(U_A),
           unumpy.nominal_values(I_C),
           unumpy.std_devs(I_C),
           unumpy.std_devs(U_A),
           "o",
           color="black",
           linewidth=2.5,
           linestyle="-")

#Per prendere il massimo locale dovrei eseguire una interpolazione con delle parabole, ma non ne ho troppa voglia.

pylab.savefig("plot29.png",
              dpi=None,
              facecolor='w',
              edgecolor='w',
              orientation='portrait',
# leer los datos y preparar variables adecuadas
tv, Nv = np.genfromtxt('Vanadium.dat', unpack=True)
Nu = np.array([129.0, 143.0, 144.0, 136.0, 126.0, 158.0])

Nu_temp = Nu/10
Nu_cor = unp.uarray(Nu_temp, np.sqrt(Nu_temp))  # con temporiz corrigida
Nu_mean = np.mean(Nu_cor)
Nv_cor = unp.uarray(Nv, np.sqrt(Nv))
Nv_diff = Nv_cor - Nu_mean
Nv_diff_log = unp.log(Nv_diff)

def lnn(x, lamb, const):
    return - lamb * x + const

# realizar el curve fit
params1, cov1 = curve_fit(lnn, tv, unp.nominal_values(Nv_diff_log), sigma=unp.std_devs(Nv_diff_log))
errors1 = np.sqrt(np.diag(cov1))
lamb1 = unp.uarray(params1[0], errors1[0])
const1 = unp.uarray(params1[1], errors1[1])

# encontrar al tiempo de medio
Tv1 = np.log(2)/lamb1

# realizar otra vez el curve fit pero sin algunos datos
params2, cov2 = curve_fit(lnn, tv[:17], unp.nominal_values(Nv_diff_log[:17]), sigma=unp.std_devs(Nv_diff_log[:17]))
errors2 = np.sqrt(np.diag(cov2))
lamb2 = unp.uarray(params2[0], errors2[0])
const2 = unp.uarray(params2[1], errors2[1])

# tiempo de medio con mas mas
Tv2 = np.log(2)/lamb2
Example #56
0
def generalized_poisson_llh(actual_values,
                            expected_values=None,
                            empty_bins=None):
    '''Compute the generalized Poisson likelihood as formulated in https://arxiv.org/abs/1902.08831


    Note that unlike the other likelihood functions, expected_values
    is expected to be a ditribution maker

    inputs:
    ------

    actual_values: flattened hist of a Map object

    expected_values: OrderedDict of MapSets

    empty_bins: None, list or np.ndarray (list the bin indices that are empty)

    returns:
    --------
    llh_per_bin : bin-wise llh values, in a numpy array

    '''
    from collections import OrderedDict

    assert isinstance(
        expected_values, OrderedDict
    ), 'ERROR: expected_values must be an OrderedDict of MapSet objects'
    assert 'weights' in expected_values.keys(
    ), 'ERROR: expected_values need a key named "weights"'
    assert 'llh_alphas' in expected_values.keys(
    ), 'ERROR: expected_values need a key named "llh_alphas"'
    assert 'llh_betas' in expected_values.keys(
    ), 'ERROR: expected_values need a key named "llh_betas"'

    num_bins = actual_values.flatten().shape[0]
    llh_per_bin = np.zeros(num_bins)
    actual_values = unp.nominal_values(actual_values).ravel()

    # If no empty bins are specified, we assume that all of them should be included
    if empty_bins is None:
        empty_bins = []

    for bin_i in range(num_bins):

        # TODO: sometimes the histogram spits out uncertainty objects, sometimes not.
        #       Not sure why.
        data_count = actual_values.astype(np.int64)[bin_i]

        # Automatically add a huge number if a bin has non zero data count
        # but completely empty MC
        if bin_i in empty_bins:
            if data_count > 0:
                llh_per_bin[bin_i] = np.log(SMALL_POS)
            continue

        # Make sure that no weight sum is negative. Crash if there are
        weight_sum = np.array(
            [m.hist.flatten()[bin_i] for m in expected_values['weights'].maps])
        if (weight_sum < 0).sum() > 0:
            logging.debug('\n\n\n')
            logging.debug('weights that are causing problem: ')
            logging.debug(weight_sum[weight_sum < 0])
            logging.debug((weight_sum < 0).sum())
            logging.debug('\n\n\n')
        assert np.all(weight_sum >= 0), 'ERROR: negative weights detected'

        #
        # If the number of MC events is high, compute a normal poisson probability
        #
        n_mc_events = np.array([
            m.hist.flatten()[bin_i]
            for m in expected_values['n_mc_events'].maps
        ])
        if np.all(n_mc_events > 100):

            logP = data_count * np.log(weight_sum.sum()) - weight_sum.sum() - (
                data_count * np.log(data_count) - data_count)
            llh_per_bin[bin_i] = logP

        else:
            from pisa.utils.llh_defs.poisson import fast_pgmix

            alphas = np.array([
                m.hist.flatten()[bin_i]
                for m in expected_values['llh_alphas'].maps
            ])
            betas = np.array([
                m.hist.flatten()[bin_i]
                for m in expected_values['llh_betas'].maps
            ])

            # Remove the NaN's
            mask = np.isfinite(alphas) * np.isfinite(betas)

            # Check that the alpha and betas make sense
            assert np.all(alphas[mask] > 0), 'ERROR: detected alpha values <=0'
            assert np.all(betas[mask] > 0), 'ERROR: detected beta values <=0'

            llh_of_bin = fast_pgmix(data_count, alphas[mask], betas[mask])
            llh_per_bin[bin_i] = llh_of_bin

    return llh_per_bin
Example #57
0
File: abcd.py Project: larsfu/ap
def mittel(arr):
    #TODO: FEHLERRECHNUNG?!
    return unc.ufloat(np.mean(unp.nominal_values(arr)), np.std(unp.nominal_values(arr)))
Example #58
0
#Differenzzwischen der hochreinen und den zwei Proben
D1 = abs(rein / hd - N1 / d1)
D2 = abs(rein / hd - N2 / d2)

C1 = A * 2.8 * 10**16  #weil umgerechnet auf Meter
C2 = A * 1.2 * 10**16


#Fitfunktion
def fitf1(x, a, c):
    return x * a + c


print(lamda, D1, D2)
params, cov = curve_fit(fitf1, lamda**2, unp.nominal_values(D1))
params = correlated_values(params, cov)
params2, cov2 = curve_fit(fitf1, lamda**2, unp.nominal_values(D2))
params2 = correlated_values(params2, cov2)

norm_rein = (rein / hd)
norm_N1 = (N1 / d1)
norm_N2 = (N2 / d2)

#Tabelle
np.savetxt('Tabellen/HGaAstab.txt',
           np.column_stack([(lamda * 10**(-6)),
                            unp.nominal_values(rein),
                            unp.std_devs(rein),
                            unp.nominal_values(norm_rein),
                            unp.std_devs(norm_rein)]),
Example #59
0
def get_CTOIs(self, fname_index):
    '''Given a folder with an OccurrenceRateclass object, get all the planet 
    candidates from orion with human dispositions and create a list of CTOIs using the 
    CTOI format from ExoFOP 
    (https://exofop.ipac.caltech.edu/tess/templates/params_planet_YYYYMMDD_001.txt)'''

    assert hasattr(self, 'tics')
    assert hasattr(self, 'disposition_human')
    assert np.all(np.in1d(fname_index, np.arange(1, 1e3, dtype=int)))

    # isolate planet candidates, putative PCs, and single transits
    g1 = np.in1d(self.disposition_human, [1])  # PCs and pPCs
    g2 = np.in1d(self.disposition_human, [2])  # STs and pSTs
    g = (g1 + g2).astype(bool)
    assert g.sum() == g1.sum() + g2.sum()

    # get string
    date = datetime.datetime.now()
    date_str = '%.4d%.2d%.2d' % (date.year, date.month, date.day)

    print '\nBe sure to update the list of TOIs and CTOIs from https://exofop.ipac.caltech.edu/tess/\n'

    # get columns of interest including uncertainties (NaNs where appropriate)
    # TIC,flag,disp,P,T0,Z,D,inc,b,rp/Rs,a/Rs,rp,mp,Teq,S,rho_star,sma,ecc,omega,tau_peri,K_RV,tag,group,prop_P,notes
    # for definitions see ORION_PCs/ctoi_header.txt
    f = open('ORION_PCs/ctoi_header.txt', 'r')
    outstr = f.read()
    f.close()
    for i in np.where(g)[0]:

        # check if potential CTOIs are already a TOI or a CTOI
        if (not is_TOI(self.tics[i])) and (not is_CTOI(self.tics[i])):

            print 'TIC %i: possible new CTOI' % self.tics[i]

            # get multiple transit parameters
            if self.disposition_human[i] < 2:
                P, eP = self.Ps[i], self.e_Ps[i]
                inc, einc = self.incs[i], np.mean(
                    [self.ehi_incs[i], self.elo_incs[i]])
                rpRs, erpRs = self.rpRss[i], np.mean(
                    [self.ehi_rpRss[i], self.elo_rpRss[i]])
                aRs, eaRs = self.aRss[i], np.mean(
                    [self.ehi_aRss[i], self.elo_aRss[i]])
                rp, erp = self.rps[i], np.mean(
                    [self.ehi_rps[i], self.elo_rps[i]])
                sma, esma = self.smas[i], np.mean(
                    [self.ehi_smas[i], self.elo_smas[i]])
                notes = 'new CTOI from ORION (https://arxiv.org/abs/1812.08145)'

        # get single transit parameters
            elif self.disposition_human[i] >= 2:
                P, eP = np.nan, np.nan  #self.Ps_singletransit[i], np.mean([self.ehi_Ps_singletransit[i],self.elo_Ps_singletransit[i]])
                inc, einc = self.inc_singletransit[i], np.mean([
                    self.ehi_inc_singletransit[i],
                    self.elo_inc_singletransit[i]
                ])
                rpRs, erpRs = self.rpRs_singletransit[i], np.mean([
                    self.ehi_rpRs_singletransit[i],
                    self.elo_rpRs_singletransit[i]
                ])
                aRs, eaRs = self.aRs_singletransit[i], np.mean([
                    self.ehi_aRs_singletransit[i],
                    self.elo_aRs_singletransit[i]
                ])
                rp, erp = self.rps_singletransit[i], np.mean([
                    self.ehi_rps_singletransit[i],
                    self.elo_rps_singletransit[i]
                ])
                #sampMs = np.random.randn(1000)*self.ehi_Mss[i] + self.Mss[i]
                #_,_,sampP = get_samples_from_percentiles(P, self.ehi_Ps_singletransit[i], self.elo_Ps_singletransit[i])
                #sampsma = rvs.semimajoraxis(sampP, sampMs, 0)
                #v = np.percentile(sampsma, (16,50,84))
                sma, esma = np.nan, np.nan  #rvs.semimajoraxis(P, self.Mss[i], 0), np.mean([v[2]-v[1], v[1]-v[0]])
                notes = 'new single transit CTOI from ORION (https://arxiv.org/abs/1812.08145)'

            else:
                raise ValueError('disposition %.1f is not valid.' %
                                 self.disposition_human[i])

        # get other planet params
            T0, eT0 = self.T0s[i], self.e_T0s[i]
            Z, eZ = self.depths[i], unp.std_devs(unp.uarray(rpRs, erpRs)**2)
            ub = rvs.impactparam_inc_aRs(unp.uarray(aRs, eaRs),
                                         unp.uarray(inc, einc))
            uD = rvs.transit_width_aRs(unp.uarray(P,
                                                  eP), unp.uarray(aRs, eaRs),
                                       unp.uarray(Z, eZ), ub)
            uTeq = unp.uarray(self.Teffs[i],self.ehi_Teffs[i]) * unp.sqrt(rvs.Rsun2m(unp.uarray(self.Rss[i],self.ehi_Rss[i])) \
                                                                          / rvs.AU2m(2*unp.uarray(sma,esma)))
            uS = unp.uarray(self.Rss[i], self.ehi_Rss[i])**2 * (
                unp.uarray(self.Teffs[i], self.ehi_Teffs[i]) /
                5777)**4 * (1. / unp.uarray(sma, esma))**2
            D, eD = unp.nominal_values(uD), unp.std_devs(uD)
            b, eb = unp.nominal_values(ub), unp.std_devs(ub)
            Teq, eTeq = unp.nominal_values(uTeq), unp.std_devs(uTeq)
            S, eS = unp.nominal_values(uS), unp.std_devs(uS)

            # add ancillary stuff
            flag, disp = 'newctoi', 'PC'
            tag = '%s_cloutier_orion_%.5d' % (date_str, fname_index)
            group, prop_P = '', 0

            # add planet parameters
            outstr += 'TIC%i.01|%s|%s|' % (self.tics[i], flag, disp)
            outstr += '%.6f|%.6f|%.5f|%.5f|' % (P, eP, T0, eT0)
            outstr += '%.1f|%.1f|%.3f|%.3f|%.2f|%.2f|' % (Z, eZ, D, eD, inc,
                                                          einc)
            outstr += '%.3f|%.3f|%.4f|%.4f|%.1f|%.1f|' % (b, eb, rpRs, erpRs,
                                                          aRs, eaRs)
            outstr += '%.2f|%.2f|%.2f|%.2f|%.1f|%.1f|' % (rp, erp, np.nan,
                                                          np.nan, Teq, eTeq)
            outstr += '%.1f|%.1f|%.2f|%.2f|%.4f|%.4f|' % (S, eS, np.nan,
                                                          np.nan, sma, esma)
            outstr += '%.2f|%.2f|%.2f|%.2f|%.2f|%.2f|' % (0, 0, np.nan, np.nan,
                                                          np.nan, np.nan)
            outstr += '%.2f|%.2f|' % (np.nan, np.nan)
            outstr += '%s|%s|%i|%s\n' % (tag, group, prop_P, notes)

        else:
            if is_TOI(self.tics[i]):
                print 'TIC %i: already a TOI' % self.tics[i]
            if is_CTOI(self.tics[i]):
                print 'TIC %i: already a CTOI' % self.tics[i]

    # replace NaNs
    outstr = outstr.replace('nan', '')

    # remind me to check stuff:
    # TIC name is not a duplicate
    # TO is correct for single transit events
    warnings.warn(
        '\nThe parameters here (e.g. T0 for single transits) should be confirmed by-eye.'
    )

    # save file
    fname_out = 'ORION_PCs/params_planet_%s_%.3d.txt' % (date_str, fname_index)
    f = open(fname_out, 'w')
    f.write(outstr)
    f.close()

    return outstr
Example #60
0
    del otherSR, otherCR1, otherCR2, otherCR3

    import scipy

    a = unumpy.umatrix([[TTJ_2, WJ_2], [TTJ_3, WJ_3]],
                       [[err_8, err_1], [err_9, err_4]])

    b = unumpy.umatrix([[Data_2], [Data_3]], [[err_3], [err_5]])

    #fun = lambda x: np.linalg.norm(np.dot(a,x)-b)
    #Y = minimize(fun, np.zeros(n), method='L-BFGS-B', bounds=[(0.,None) for x in range(n)])
    #Y = Y['x']
    #Y = np.linalg.solve(a,b)
    Y_ = a.I * b
    Y = np.squeeze(np.asarray(unumpy.nominal_values(Y_)))
    Yerr = np.squeeze(np.asarray(unumpy.std_devs(Y_)))
    #print (np.allclose(np.dot(a, Y), b))
    alpha, beta = round(Y[0], 2), round(Y[1], 2)
    alphaerr, betaerr = round(Yerr[0], 2), round(Yerr[1], 2)

    QCDSR_err = ROOT.Double(0.)
    QCDCR2_err = ROOT.Double(0.)
    QCDCR3_err = ROOT.Double(0.)

    QCDSR_yields = QCDSR.IntegralAndError(0, QCDSR.GetNbinsX() + 1, QCDSR_err)
    QCDCR2_yields = QCDCR2.IntegralAndError(0,
                                            QCDCR2.GetNbinsX() + 1, QCDCR2_err)
    QCDCR3_yields = QCDCR3.IntegralAndError(0,
                                            QCDCR3.GetNbinsX() + 1, QCDCR3_err)