Exemplo n.º 1
0
    def do_the_fit(obs, **kwargs):

        global print_output, beta0

        func = kwargs.get('function')
        yerr = kwargs.get('yerr')
        length = len(yerr)

        xerr = kwargs.get('xerr')

        if length == len(obs):
            assert 'x_constants' in kwargs
            data = RealData(kwargs.get('x_constants'), obs, sy=yerr)
            fit_type = 2
        elif length == len(obs) // 2:
            data = RealData(obs[:length], obs[length:], sx=xerr, sy=yerr)
            fit_type = 0
        else:
            raise Exception('x and y do not fit together.')

        model = Model(func)

        odr = ODR(data, model, beta0, partol=np.finfo(np.float64).eps)
        odr.set_job(fit_type=fit_type, deriv=1)
        output = odr.run()
        if print_output and not silent:
            print(*output.stopreason)
            print('chisquare/d.o.f.:', output.res_var)
            print_output = 0
        beta0 = output.beta
        return output.beta[kwargs.get('n')]
def chi2_iterative(k, k_nn, Foward=True):
    """
    Esta funcion agarra un eje X (k), un eje Y (k_nn) y busca los parametros para
    ajustar la mejor recta, buscando el regimen lineal de la curva. Esto lo hace
    sacando puntos de la curva, ajustando la curva resultante, y luego comparando 
    los parametros de los distintos ajustes, seleccionando el de menor chi2.
    
    Si Foward=True entonces la funcion va a ir sacando puntos del final para
    encontrar kmax. Si Foward=False, la funcion va a sacar puntos del principio para
    calcular kmin. El punto va a estar dado por k[index].
    
    Returns: m, b, chi2_stat, index
    
    m: pendiente de la recta resultante
    b: ordenada de la recta resultante
    chi2: estadistico de chi2 de la recta resultante
    index: indice del elemento donde empieza/termina el regimen lineal.
    .
    .
    """
    chi2_list = []
    m_list = []
    b_list = []
    if Foward == True:
        for j in range(0, len(k) - 3):
            k_nn_temp = k_nn[:len(k_nn) - j]
            k_temp = k[:len(k) - j]
            linear_model = Model(linear)
            data = RealData(k_temp, k_nn_temp)
            odr = ODR(data, linear_model, beta0=[0., 1.])
            out = odr.run()
            chi2_list.append(out.res_var)
            m_list.append(out.beta[0])
            b_list.append(out.beta[1])
    else:
        for j in range(0, len(k) - 3):
            k_nn_temp = k_nn[j:]
            k_temp = k[j:]
            linear_model = Model(linear)
            data = RealData(k_temp, k_nn_temp)
            odr = ODR(data, linear_model, beta0=[0., 1.])
            out = odr.run()
            chi2_list.append(out.res_var)
            m_list.append(out.beta[0])
            b_list.append(out.beta[1])
    #index = ClosestToOne(chi2_list)
    index = chi2_list.index(min(chi2_list))
    m = m_list[index]
    b = b_list[index]
    chi2 = chi2_list[index]

    return m, b, chi2, index
Exemplo n.º 3
0
def regress_odr(x, y, sx, sy, beta0=[0., 1.]):
    """Return an ODR linear fit
    """
    linear = Model(my_linear)
    mydata = RealData(x.ravel(), y.ravel(), sx=sx.ravel(), sy=sy.ravel())
    myodr = ODR(mydata, linear, beta0=beta0)
    return myodr.run()
Exemplo n.º 4
0
def fit_slope(X, Y, eX=None, eY=None):
    """Will fit a Y vs X with errors using the scipy
    ODR functionalities and a linear fitting function

    Args:
        X:
        Y: input arrays
        eX:
        eY: input uncertaintites [None]

    Returns:
        odr output
    """

    # First filtering the Nan
    good = ~np.isnan(X)
    if eX is None: eX_good = None
    else: eX_good = eX[good]
    if eY is None: eY_good = None
    else: eY_good = eY[good]

    linear = Model(linear_fit)
    odr_data = RealData(X[good], Y[good], sx=eX_good, sy=eY_good)
    odr_run = ODR(odr_data, linear, beta0=[2., 1.])
    return odr_run.run()
def voltage_current_regression():
    # read data from datafile
    df = pd.read_table(os.path.join(os.path.dirname(__file__), DATA_PATH,
                                    '1_a_soneloid.dat'),
                       delim_whitespace=True,
                       names=['current', 'voltage_min', 'voltage_max'],
                       decimal=',',
                       comment='#')

    # add columns for voltage mean and error
    df['voltage_mean'] = 0.5 * (df['voltage_min'] + df['voltage_max'])
    df['voltage_error'] = df['voltage_max'] - df['voltage_min']

    # Create a model for fitting.
    linear_model = Model(regression_func)

    x = np.array(df['current'])
    y = np.array(df['voltage_mean'])
    sy = np.array(df['voltage_error'])

    data = RealData(x, y, sy=sy)

    odr = ODR(data, linear_model, beta0=[0., 1.])
    out = odr.run()

    return out
Exemplo n.º 6
0
def ajuste_odr_u(x, y):
    data = RealData(unp.nominal_values(x), unp.nominal_values(y), sx=unp.std_devs(x), sy=unp.std_devs(y))
    odr = ODR(data, mlinear, beta0=[1., 1.], ndigit=20)
    ajuste = odr.run()
    a, b = ajuste.beta
    ua, ub = np.sqrt(np.diag(ajuste.cov_beta))
    return a, ua, b, ub
Exemplo n.º 7
0
    def find_c13(self):
        '''
        Find the c13 parameter by fitting a curve to 
        the measured group velocities using the scipy
        ODR library. Since c13 is par tof the equation
        for the curve, the c13 value corresponding to
        the best fit curve is the result of the inversion.
        '''

        self.c13_max = np.sqrt(
            self.c33 *
            (self.c11))  #The most physically reasonable maximum for c13
        self.c13_min = np.sqrt(self.c33 * (self.c11 - 2 * self.c66) +
                               self.c66**2) - self.c66
        self.c13_0 = self.c13_min * 1.2

        model = Model(self.forward_model,
                      estimate=[self.c13_0, self.theta0, self.c11, self.c33])
        data = RealData(self.group_angles,
                        self.group_vels,
                        sy=self.group_vel_err)
        fit = ODR(data,
                  model,
                  beta0=[self.c13_0, self.theta0, self.c11, self.c33],
                  ifixb=[1, 1, 1, 1])
        output = fit.run()

        best_fit = output.beta
        errors = output.sd_beta

        self.c13 = best_fit[0]
        self.c13_err = errors[0]
        self.theta0 = best_fit[1]
Exemplo n.º 8
0
 def roda(self, dados):
     x, ux = Variáveis[self.x], Variáveis[self.ux]
     y, uy = Variáveis[self.y], Variáveis[self.uy]
     if self.tipo == 'odr':
         data = RealData(x, y, ux, uy)
         odr = ODR(data, linear, beta0=[1, 0], ndigit=16, maxit=100)
         DadosAjuste = odr.run()
         p = DadosAjuste.beta
         u = sqrt(diag(DadosAjuste.cov_beta))
     elif self.tipo == 'cfit':
         p, cov = curve_fit(flinear,
                            x,
                            y,
                            sigma=uy,
                            absolute_sigma=True,
                            method='trf')
         u = sqrt(diag(cov))
     elif self.tipo == 'cfitse':
         p, cov = curve_fit(flinear, x, y, method='trf')
         u = sqrt(diag(cov))
     print(p, u)
     Ajustes[self.ref] = ({
         'a': p[0],
         'ua': u[0],
         'b': p[1],
         'ub': u[1]
     }, x, ux, y, uy)
Exemplo n.º 9
0
def meanResiduals(pts):
    f = lambda B, x: B[0] * x + B[1]
    model = Model(f)
    data = RealData(list(p.x() for p in pts), list(p.y() for p in pts))
    odr = ODR(data, model, beta0=[0., 1.])
    out = odr.run()
    return out.sum_square / len(pts)
Exemplo n.º 10
0
def ajuste_odr(x, y, ux, uy):
    data = RealData(x, y, sx=ux, sy=uy)
    odr = ODR(data, mlinear, beta0=[-1000., 1.], ndigit=20)
    ajuste = odr.run()
    a, b = ajuste.beta
    ua, ub = np.sqrt(np.diag(ajuste.cov_beta))
    ajuste.pprint()
    return a, ua, b, ub
Exemplo n.º 11
0
def graph(a,toc): #a:odr with xerrors or curvefit without? toc: number of the figure
    plt.figure(toc,figsize=(15,10))
    for tickLabel in plt.gca().get_xticklabels()+plt.gca().get_yticklabels():
        tickLabel.set_fontsize(15)
    plt.title("Number of coincidence counts in the peak of $Na^{22}$ wrt angle",fontsize=17)
    plt.xlabel(r'Angle $\theta$ (°)',fontsize=17)
    plt.ylabel('# coincidence events',fontsize=17)
    plt.scatter(Ld,res,label='\n Data with parameters'+\
               ' \n $16$ $cm$ source-detectors'+\
               ' \n $150$ $s/points$')
    if a==False:
        par, par_va = curve_fit(simplegaussian, Ld, res, p0=[70000, 180, 10,1000],sigma=err_res,absolute_sigma=True)
        chi2=round(sum(((res - simplegaussian(Ld,*par) )/ err_res) ** 2)/(len(Ld)-3),2)
        plt.plot(Lc,simplegaussian(Lc, *par),color='gold',label='Fit with '+r'$A\exp\{\frac{-(\theta-\mu)^2}{2\sigma^2}\}+Cst$'+\
                  ' \n $A =$%s'%int(par[0])+' $ \pm$ %s'%int(np.sqrt(np.diag(par_va)[0]))+' #'+\
                  ' \n $\mu =$ %s'%round(par[1],1)+' $\pm$ %s'%round(np.sqrt(np.diag(par_va)[1]),1)+'°'+\
                  ' \n $\sigma =$ %s'%round(par[2],1)+ '$\pm$ %s'%round(np.sqrt(np.diag(par_va)[2]),1)+'°'+\
                  ' \n $Cst=$ %s'%int(par[3])+' $\pm$ %s'%int(np.sqrt(np.diag(par_va)[3]))+' #'+\
                  ' \n $\chi^2/dof = $ %s'%chi2)
        plt.errorbar(Ld, res, err_res,fmt='.',label=r'$y=\sqrt{counts}$ '+\
                     '\n $x=0°$', color='black',ecolor='lightgray', elinewidth=3, capsize=0)

    else:
        data = RealData(Ld,res,err_resx,err_res)
        model = Model(simplegaussianl)

        odr = ODR(data, model, [73021, 183, 11,1208])
        odr.set_job(fit_type=2)
        output = odr.run()
        
        xn = Lc
        yn = simplegaussianl(output.beta, xn)
        
        #pl.hold(True)
        #plot(Ld,res,'ro')
        #print(x,y)
        plt.plot(xn,yn,'k-',label=' ODR leastsq fit'+\
            ' \n $\chi^2/dof = $ %s'%round(output.sum_square/(len(Ld)-3),2)+'\n')
        
        odr.set_job(fit_type=0)
        output = odr.run()
        par,par_va=output.beta,output.cov_beta
        yn = simplegaussianl(output.beta, xn)
        plt.plot(xn,yn,color='gold',label='ODR fit '+r'$A\exp\{\frac{-(\theta-\mu)^2}{2\sigma^2}\}+Cst$'+\
          ' \n $A =$%s'%int(par[0])+' $ \pm$ %s'%int(np.sqrt(np.diag(par_va)[0]))+' #'+\
          ' \n $\mu =$ %s'%round(par[1],1)+' $\pm$ %s'%round(np.sqrt(np.diag(par_va)[1]),1)+'°'+\
          ' \n $\sigma =$ %s'%round(par[2],1)+ '$\pm$ %s'%round(np.sqrt(np.diag(par_va)[2]),1)+'°'+\
          ' \n $Cst=$ %s'%int(par[3])+' $\pm$ %s'%int(np.sqrt(np.diag(par_va)[3]))+' #'+\
          ' \n Sum of squares/dof $= $ %s'%round(output.sum_square/(len(Ld)-3),2))
        plt.legend(loc=0)
        plt.errorbar(Ld, res, err_res,err_resx,label=r'$y=\sqrt{counts}$ '+\
                     '\n $x=$%s'%errx+'°',fmt='.', color='black',ecolor='lightgray', elinewidth=3, capsize=0)


    plt.gca().set_xlim(140,220)
    plt.legend(bbox_to_anchor=(0.68, 0.58), loc=1, borderaxespad=0.,prop={'size':14})
    plt.yscale('log')
    plt.ylim(1e2,1e5)
Exemplo n.º 12
0
    def test_pearson(self):
        p_x = np.array([0., .9, 1.8, 2.6, 3.3, 4.4, 5.2, 6.1, 6.5, 7.4])
        p_y = np.array([5.9, 5.4, 4.4, 4.6, 3.5, 3.7, 2.8, 2.8, 2.4, 1.5])
        p_sx = np.array([.03, .03, .04, .035, .07, .11, .13, .22, .74, 1.])
        p_sy = np.array([1., .74, .5, .35, .22, .22, .12, .12, .1, .04])

        p_dat = RealData(p_x, p_y, sx=p_sx, sy=p_sy)

        # Reverse the data to test invariance of results
        pr_dat = RealData(p_y, p_x, sx=p_sy, sy=p_sx)

        p_mod = Model(self.pearson_fcn, meta=dict(name='Uni-linear Fit'))

        p_odr = ODR(p_dat, p_mod, beta0=[1., 1.])
        pr_odr = ODR(pr_dat, p_mod, beta0=[1., 1.])

        out = p_odr.run()
        assert_array_almost_equal(
            out.beta,
            np.array([5.4767400299231674, -0.4796082367610305]),
        )
        assert_array_almost_equal(
            out.sd_beta,
            np.array([0.3590121690702467, 0.0706291186037444]),
        )
        assert_array_almost_equal(
            out.cov_beta,
            np.array([[0.0854275622946333, -0.0161807025443155],
                      [-0.0161807025443155, 0.003306337993922]]),
        )

        rout = pr_odr.run()
        assert_array_almost_equal(
            rout.beta,
            np.array([11.4192022410781231, -2.0850374506165474]),
        )
        assert_array_almost_equal(
            rout.sd_beta,
            np.array([0.9820231665657161, 0.3070515616198911]),
        )
        assert_array_almost_equal(
            rout.cov_beta,
            np.array([[0.6391799462548782, -0.1955657291119177],
                      [-0.1955657291119177, 0.0624888159223392]]),
        )
Exemplo n.º 13
0
    def test_empty_data(self):
        beta0 = [0.02, 0.0]
        linear = Model(self.empty_data_func)

        empty_dat = Data([], [])
        assert_warns(OdrWarning, ODR, empty_dat, linear, beta0=beta0)

        empty_dat = RealData([], [])
        assert_warns(OdrWarning, ODR, empty_dat, linear, beta0=beta0)
Exemplo n.º 14
0
def sigma(X, Y, X_err, Y_err):
    linear_model = Model(Linear)
    data = RealData(X, Y, sx=X_err, sy=Y_err)
    odr = ODR(data, linear_model, beta0=[0., 1.])
    out = odr.run()

    m = out.beta[0]
    b = out.beta[1]
    recta = [(I[i] - (V[i] * m + b)) for i in range(len(I))]
    return len(recta), f.dispersion(recta)
Exemplo n.º 15
0
    def orthoregress(x, y, xerr, yerr):

        linreg = linregress(x, y)
        mod = Model(f)

        dat = RealData(x, y, sx=xerr, sy=yerr)

        od = ODR(dat, mod, beta0=linreg[0:2])
        out = od.run()
        return list(out.beta)
Exemplo n.º 16
0
    def test_lorentz(self):
        l_sy = np.array([.29] * 18)
        l_sx = np.array([
            .000972971, .000948268, .000707632, .000706679, .000706074,
            .000703918, .000698955, .000456856, .000455207, .000662717,
            .000654619, .000652694, .000000859202, .00106589, .00106378,
            .00125483, .00140818, .00241839
        ])

        l_dat = RealData(
            [
                3.9094, 3.85945, 3.84976, 3.84716, 3.84551, 3.83964, 3.82608,
                3.78847, 3.78163, 3.72558, 3.70274, 3.6973, 3.67373, 3.65982,
                3.6562, 3.62498, 3.55525, 3.41886
            ],
            [
                652, 910.5, 984, 1000, 1007.5, 1053, 1160.5, 1409.5, 1430,
                1122, 957.5, 920, 777.5, 709.5, 698, 578.5, 418.5, 275.5
            ],
            sx=l_sx,
            sy=l_sy,
        )
        l_mod = Model(self.lorentz, meta=dict(name='Lorentz Peak'))
        l_odr = ODR(l_dat, l_mod, beta0=(1000., .1, 3.8))

        out = l_odr.run()
        assert_array_almost_equal(
            out.beta,
            np.array([
                1.4306780846149925e+03, 1.3390509034538309e-01,
                3.7798193600109009e+00
            ]),
        )
        assert_array_almost_equal(
            out.sd_beta,
            np.array([
                7.3621186811330963e-01, 3.5068899941471650e-04,
                2.4451209281408992e-04
            ]),
        )
        assert_array_almost_equal(
            out.cov_beta,
            np.array([[
                2.4714409064597873e-01, -6.9067261911110836e-05,
                -3.1236953270424990e-05
            ],
                      [
                          -6.9067261911110836e-05, 5.6077531517333009e-08,
                          3.6133261832722601e-08
                      ],
                      [
                          -3.1236953270424990e-05, 3.6133261832722601e-08,
                          2.7261220025171730e-08
                      ]]),
        )
Exemplo n.º 17
0
def orth_regression(obs, model):
    linear = Model(f)
    mydata = RealData(obs, model)
    myodr = ODR(mydata, linear, beta0=[1., 0.])
    myoutput = myodr.run()
    params = myoutput.beta
    gradient = params[0]
    y_intercept = params[1]
    res_var = myoutput.res_var
    return np.around(gradient, 2), np.around(y_intercept,
                                             2), np.around(res_var, 2)
Exemplo n.º 18
0
 def run(self):
     fit_data = RealData(self.data['ref_spd'].values.flatten(), self.data['target_spd'].values.flatten())
     p, res = lstsq(np.nan_to_num(fit_data.x[:, np.newaxis] ** [1, 0]), np.nan_to_num(np.asarray(fit_data.y)
                                                                                      [:, np.newaxis]))[0:2]
     self._model = ODR(fit_data, Model(OrthogonalLeastSquares.linear_func), beta0=[p[0][0], p[1][0]])
     self.out = self._model.run()
     self.params = {'slope': self.out.beta[0], 'offset': self.out.beta[1]}
     self.params['r2'] = self.get_r2()
     self.params['Num data points'] = self.num_data_pts
     # print("Model output:", self.out.pprint())
     self.show_params()
Exemplo n.º 19
0
def ODR_fitting(xdata, ydata, fitfunction, beta, fix):
    bpl_all = Model(fitfunction)
    data_all = RealData(xdata,
                        ydata,
                        sx=np.cov([xdata, ydata])[0][1],
                        sy=np.cov([xdata, ydata])[0][1])
    odr_all = ODR(data_all, bpl_all, beta0=beta, ifixb=fix)
    odr_all.set_job(fit_type=0)
    output_all = odr_all.run()
    #output_all.pprint()
    return (output_all.beta, output_all.sd_beta)
Exemplo n.º 20
0
def odrlin(x, y, sx, sy):
    """
    Linear fit of 2-D data set made with Orthogonal Distance Regression
    @params x, y: data to fit
    @param sx, sy: respective errors of data to fit
    """
    model = models.unilinear  # defines model as beta[0]*x + beta[1]
    data = RealData(x, y, sx=sx, sy=sy)
    kinit = (y[-1] - y[0]) / (x[-1] - x[0])
    init = (kinit, y[0] - kinit * x[0])
    linodr = ODR(data, model, init)
    return linodr.run()
Exemplo n.º 21
0
def odr_fit(func, dom, mrng, srng, pg):
    ''' performs orthogonal distance regression '''
    dat = RealData(dom, mrng, EPS * np.ones(len(dom)), srng + EPS)
    mod = ODRModel(func)
    odr = ODR(dat, mod, pg)
    odr.set_job(fit_type=0)
    fit = odr.run()
    popt = fit.beta
    perr = fit.sd_beta
    ndom = 128
    fdom = np.linspace(np.min(dom), np.max(dom), ndom)
    fval = func(popt, fdom)
    return popt, perr, fdom, fval
Exemplo n.º 22
0
def mieze_fit(counts, asym=False, addup=False):
    # for the moment, only fit points 2 to 15
    x = arange(2, len(counts))
    y = array(counts[1:-1])

    if addup:
        x = x[0:6]
        y = (y[:6] + y[8:]) / 2

    dat = RealData(x, y, sy=sqrt(y))
    est_A = (y.max() - y.min())/2
    est_B = (y.max() + y.min())/2
    # the first maximum is at pi/2 - k*x; a value of zero seems to have
    # bad effects on fitting in some cases
    est_phi = pi/2 - 4*pi/16*y.argmin() or 0.01

    beta0 = [est_A, est_B, est_phi]
    parnames = ['A', 'B', 'phi']
    model = odr_model_miez_signal
    if asym:
        parnames += ['D', 'chi']
        beta0 += [0, 0]
        model = odr_model_miez_signal_asym

    odr = ODR(dat, model, beta0=beta0, ifixx=array([0]*len(x)))
    out = odr.run()
    params = dict(zip(parnames, out.beta))
    errors = dict(zip(parnames, out.sd_beta))

    if 'D' not in params:
        params['D'] = errors['D'] = 0
        params['chi'] = errors['chi'] = 0

    # make A always positive
    if params['A'] < 0:
        params['A'] = -params['A']
        params['phi'] += pi

    # make phases unique: in range [0...2pi]
    params['phi'] %= 2*pi  # this converts negative phase correctly too!
    params['chi'] %= 2*pi

    # append C = A/B
    params['C'] = params['A']/params['B']

    # dito for error, add errors for A and B squared
    errors['C'] = params['C'] * sqrt((errors['A']/params['A'])**2 +
                                     (errors['B']/params['B'])**2)
    #errors.append(errors[0]/params[1] + errors[1]*params[0]/params[1]**2)

    return params, errors
Exemplo n.º 23
0
def linear_regression(data, err):
    """Calculate a linear regression using scipy.odr and return fit output."""
    print('Fitting with')
    print(data[:, 0], data[:, 1])
    print('and Error')
    print(err[:, 0], err[:, 1])
    beta0 = get_start_values(data)
    model = Model(linear_model)
    rdata = RealData(data[:, 0], data[:, 1], sx=err[:, 0], sy=err[:, 1])
    # TODO: check different errors
    fit = ODR(rdata, model, beta0=beta0)
    output = fit.run()
    print(output.beta)
    return output
Exemplo n.º 24
0
def fit_xy(x, y):
    # Create a model for fitting.
    linear_model = Model(linear_func)

    # Create a RealData object using our initiated data from above.
    data = RealData(x, y)

    # Set up ODR with the model and data.
    odr = ODR(data, linear_model, beta0=[0., 1.])

    # Run the regression.
    out = odr.run()

    return out.beta
Exemplo n.º 25
0
def ODR_Fit(function, x, y, x_err, y_err):
    print('Fit:\t', function.__name__, '\n')
    # Create a model for fitting.
    model = Model(function)
    # Create a RealData object using our initiated data from above.
    D = RealData(x, y, sx=x_err, sy=y_err)
    # Set up ODR with the model and data.
    odr = ODR(D, model, beta0=[0.7, 0.])
    # Run the regression.
    out = odr.run()
    # Use the in-built pprint method to give us results.
    out.pprint()
    print('\n')
    return out
Exemplo n.º 26
0
    def odr_inversion(self, initial_guess=None):
        '''
        Just another curve fit/inversion method
        to find c13, using scipy's ODR class. Does
        the same job as curve_fit.
        '''
        def odr_forward_model(parameters, xdata):
            c13, c11, c33, c55, c66, theta = parameters
            return self.forward_model(xdata, c13, c11, c33, c55, c66, theta)

        from scipy.odr import ODR, RealData, Model

        c13_max = np.sqrt(self.c33 * (self.c11 - self.c66))
        c13_min = np.sqrt(self.c33 *
                          (self.c11 - 2 * self.c66) + self.c66**2) - self.c66
        if not initial_guess:
            initial_guess = c13_min * 1.1
        print(self.c11, self.c33, self.c55, self.c66, self.theta0)
        print('ok', round(c13_min / 1e9), round(c13_max / 1e9))

        parameters = ['c11', 'c33', 'c55', 'c66', 'theta0']
        fixed_params = [0, 0, 0, 0, 0]
        for i, item in enumerate(parameters):
            if item in self.parameters_to_invert:
                fixed_params[i] = 1
        fixed_params = [
            1, 1, 1, 0, 0, 1
        ]  #MAKE THIS [1]+fixed_params. I'M FORCING C55 NOT TO BE INVERTED

        model = Model(odr_forward_model,
                      estimate=[
                          initial_guess, self.c11, self.c33, self.c55,
                          self.c66, self.theta0
                      ])
        data = RealData(self.group_angles,
                        self.group_vels,
                        sy=self.group_vel_err)
        fit = ODR(data,
                  model,
                  beta0=[
                      initial_guess, self.c11, self.c33, self.c55, self.c66,
                      self.theta0
                  ],
                  ifixb=fixed_params)
        output = fit.run()
        best_fit = output.beta
        errors = output.sd_beta

        return best_fit, errors
def orthoregress(x, y, xerr, yerr):
    """Por cortesia de http://blog.rtwilson.com/orthogonal-distance-regression-in-python/
	Perform an Orthogonal Distance Regression on the given data,
	using the same interface as the standard scipy.stats.linregress function.
	Arguments:
	x: x data
	y: y data
	Returns:
	[m, c, nan, nan, nan]
	Uses standard ordinary least squares to estimate the starting parameters
	then uses the scipy.odr interface to the ODRPACK Fortran code to do the
	orthogonal distance calculations.
	"""
    linreg = linregress(x, y)
    mod = Model(f)
    if xerr == 0:

        dat = RealData(x, y)
    else:
        dat = RealData(x, y, sx=xerr, sy=yerr)

    od = ODR(dat, mod, beta0=linreg[0:2])
    out = od.run()
    return list(out.beta)
Exemplo n.º 28
0
    def Regression_Plane(self, x, x_error, y, y_error, z, z_error):
        """
        Do the regression, here use ODR package from Scipy which use orthogonal distance regression
        """
        data = RealData([x, y], z)  #,sx=[x_error,y_error],sy=z_error)

        def func(beta, data):
            x, y = data
            a, b, c = beta
            return a * x + b * y + c

        model = Model(func)
        odr = ODR(data, model, [100, 100, 100])
        res = odr.run()
        return res.beta, res.sd_beta
Exemplo n.º 29
0
def do_linear_odr(x, y, xerr, yerr):
    """ Returns linear odr fit.

    Args:
        x: Series of x data
        y: Series of y data
        xerr: Series of x data errors
        yerr: Series of y data errors

    Returns: Linear odr fit. Betas see ``linear_model()``.
    """
    lin_model = Model(linear_model)
    data = RealData(x, y, sx=xerr, sy=yerr)
    odr_fit = ODR(data, lin_model, beta0=[0., 1.]).run()
    print_odr_result(LOG.debug, odr_fit)
    return odr_fit
Exemplo n.º 30
0
    def __init__(self,recebe_x,recebe_y,recebe_x_erro,recebe_y_erro):#construtor geral das classes
        for i in range(recebe_x.size):#verifica se x está em ordem crescente
            for j in range(i+1,recebe_x.size):
                if recebe_x[i]>recebe_x[j]:
                    recebe_x[i],recebe_x[j] = recebe_x[j],recebe_x[i]
                    recebe_y[i],recebe_y[j] = recebe_y[j],recebe_y[i]
                    recebe_x_erro[i],recebe_x_erro[j]=recebe_x_erro[j],recebe_x_erro[i]
                    recebe_y_erro[i],recebe_y_erro[j]=recebe_y_erro[j],recebe_y_erro[i]

        #coloca os valores em suas devidas variáveis locais
        self.xdata = recebe_x
        self.ydata = recebe_y
        self.x_erro = recebe_x_erro
        self.y_erro = recebe_y_erro
        self.model = 0
        self.data = RealData(self.xdata,self.ydata,self.x_erro,self.y_erro)