Пример #1
0
    def test_output_file_overwrite(self):
        """
        Verify fix for gh-1892
        """
        def func(b, x):
            return b[0] + b[1] * x

        p = Model(func)
        data = Data(np.arange(10), 12 * np.arange(10))
        tmp_dir = tempfile.mkdtemp()
        error_file_path = os.path.join(tmp_dir, "error.dat")
        report_file_path = os.path.join(tmp_dir, "report.dat")
        try:
            ODR(data,
                p,
                beta0=[0.1, 13],
                errfile=error_file_path,
                rptfile=report_file_path).run()
            ODR(data,
                p,
                beta0=[0.1, 13],
                errfile=error_file_path,
                rptfile=report_file_path,
                overwrite=True).run()
        finally:
            # remove output files for clean up
            shutil.rmtree(tmp_dir)
Пример #2
0
    def test_ifixx(self):
        x1 = [-2.01, -0.99, -0.001, 1.02, 1.98]
        x2 = [3.98, 1.01, 0.001, 0.998, 4.01]
        fix = np.vstack((np.zeros_like(x1, dtype=int), np.ones_like(x2, dtype=int)))
        data = Data(np.vstack((x1, x2)), y=1, fix=fix)
        model = Model(lambda beta, x: x[1, :] - beta[0] * x[0, :]**2., implicit=True)

        odr1 = ODR(data, model, beta0=np.array([1.]))
        sol1 = odr1.run()
        odr2 = ODR(data, model, beta0=np.array([1.]), ifixx=fix)
        sol2 = odr2.run()
        assert_equal(sol1.beta, sol2.beta)
def chi2_iterative(k, k_nn, Foward=True):
    """
    Esta funcion agarra un eje X (k), un eje Y (k_nn) y busca los parametros para
    ajustar la mejor recta, buscando el regimen lineal de la curva. Esto lo hace
    sacando puntos de la curva, ajustando la curva resultante, y luego comparando 
    los parametros de los distintos ajustes, seleccionando el de menor chi2.
    
    Si Foward=True entonces la funcion va a ir sacando puntos del final para
    encontrar kmax. Si Foward=False, la funcion va a sacar puntos del principio para
    calcular kmin. El punto va a estar dado por k[index].
    
    Returns: m, b, chi2_stat, index
    
    m: pendiente de la recta resultante
    b: ordenada de la recta resultante
    chi2: estadistico de chi2 de la recta resultante
    index: indice del elemento donde empieza/termina el regimen lineal.
    .
    .
    """
    chi2_list = []
    m_list = []
    b_list = []
    if Foward == True:
        for j in range(0, len(k) - 3):
            k_nn_temp = k_nn[:len(k_nn) - j]
            k_temp = k[:len(k) - j]
            linear_model = Model(linear)
            data = RealData(k_temp, k_nn_temp)
            odr = ODR(data, linear_model, beta0=[0., 1.])
            out = odr.run()
            chi2_list.append(out.res_var)
            m_list.append(out.beta[0])
            b_list.append(out.beta[1])
    else:
        for j in range(0, len(k) - 3):
            k_nn_temp = k_nn[j:]
            k_temp = k[j:]
            linear_model = Model(linear)
            data = RealData(k_temp, k_nn_temp)
            odr = ODR(data, linear_model, beta0=[0., 1.])
            out = odr.run()
            chi2_list.append(out.res_var)
            m_list.append(out.beta[0])
            b_list.append(out.beta[1])
    #index = ClosestToOne(chi2_list)
    index = chi2_list.index(min(chi2_list))
    m = m_list[index]
    b = b_list[index]
    chi2 = chi2_list[index]

    return m, b, chi2, index
Пример #4
0
 def roda(self, dados):
     x, ux = Variáveis[self.x], Variáveis[self.ux]
     y, uy = Variáveis[self.y], Variáveis[self.uy]
     if self.tipo == 'odr':
         data = RealData(x, y, ux, uy)
         odr = ODR(data, linear, beta0=[1, 0], ndigit=16, maxit=100)
         DadosAjuste = odr.run()
         p = DadosAjuste.beta
         u = sqrt(diag(DadosAjuste.cov_beta))
     elif self.tipo == 'cfit':
         p, cov = curve_fit(flinear,
                            x,
                            y,
                            sigma=uy,
                            absolute_sigma=True,
                            method='trf')
         u = sqrt(diag(cov))
     elif self.tipo == 'cfitse':
         p, cov = curve_fit(flinear, x, y, method='trf')
         u = sqrt(diag(cov))
     print(p, u)
     Ajustes[self.ref] = ({
         'a': p[0],
         'ua': u[0],
         'b': p[1],
         'ub': u[1]
     }, x, ux, y, uy)
Пример #5
0
    def find_c13(self):
        '''
        Find the c13 parameter by fitting a curve to 
        the measured group velocities using the scipy
        ODR library. Since c13 is par tof the equation
        for the curve, the c13 value corresponding to
        the best fit curve is the result of the inversion.
        '''

        self.c13_max = np.sqrt(
            self.c33 *
            (self.c11))  #The most physically reasonable maximum for c13
        self.c13_min = np.sqrt(self.c33 * (self.c11 - 2 * self.c66) +
                               self.c66**2) - self.c66
        self.c13_0 = self.c13_min * 1.2

        model = Model(self.forward_model,
                      estimate=[self.c13_0, self.theta0, self.c11, self.c33])
        data = RealData(self.group_angles,
                        self.group_vels,
                        sy=self.group_vel_err)
        fit = ODR(data,
                  model,
                  beta0=[self.c13_0, self.theta0, self.c11, self.c33],
                  ifixb=[1, 1, 1, 1])
        output = fit.run()

        best_fit = output.beta
        errors = output.sd_beta

        self.c13 = best_fit[0]
        self.c13_err = errors[0]
        self.theta0 = best_fit[1]
def voltage_current_regression():
    # read data from datafile
    df = pd.read_table(os.path.join(os.path.dirname(__file__), DATA_PATH,
                                    '1_a_soneloid.dat'),
                       delim_whitespace=True,
                       names=['current', 'voltage_min', 'voltage_max'],
                       decimal=',',
                       comment='#')

    # add columns for voltage mean and error
    df['voltage_mean'] = 0.5 * (df['voltage_min'] + df['voltage_max'])
    df['voltage_error'] = df['voltage_max'] - df['voltage_min']

    # Create a model for fitting.
    linear_model = Model(regression_func)

    x = np.array(df['current'])
    y = np.array(df['voltage_mean'])
    sy = np.array(df['voltage_error'])

    data = RealData(x, y, sy=sy)

    odr = ODR(data, linear_model, beta0=[0., 1.])
    out = odr.run()

    return out
Пример #7
0
 def test_multilinear_model(self):
     x = np.linspace(0.0, 5.0)
     y = 10.0 + 5.0 * x
     data = Data(x, y)
     odr_obj = ODR(data, multilinear)
     output = odr_obj.run()
     assert_array_almost_equal(output.beta, [10.0, 5.0])
Пример #8
0
 def test_unilinear_model(self):
     x = np.linspace(0.0, 5.0)
     y = 1.0 * x + 2.0
     data = Data(x, y)
     odr_obj = ODR(data, unilinear)
     output = odr_obj.run()
     assert_array_almost_equal(output.beta, [1.0, 2.0])
Пример #9
0
 def test_exponential_model(self):
     x = np.linspace(0.0, 5.0)
     y = -10.0 + np.exp(0.5 * x)
     data = Data(x, y)
     odr_obj = ODR(data, exponential)
     output = odr_obj.run()
     assert_array_almost_equal(output.beta, [-10.0, 0.5])
Пример #10
0
    def test_explicit(self):
        explicit_mod = Model(
            self.explicit_fcn,
            fjacb=self.explicit_fjb,
            fjacd=self.explicit_fjd,
            meta=dict(name='Sample Explicit Model',
                      ref='ODRPACK UG, pg. 39'),
        )
        explicit_dat = Data([0.,0.,5.,7.,7.5,10.,16.,26.,30.,34.,34.5,100.],
                        [1265.,1263.6,1258.,1254.,1253.,1249.8,1237.,1218.,1220.6,
                         1213.8,1215.5,1212.])
        explicit_odr = ODR(explicit_dat, explicit_mod, beta0=[1500.0, -50.0, -0.1],
                       ifixx=[0,0,1,1,1,1,1,1,1,1,1,0])
        explicit_odr.set_job(deriv=2)
        explicit_odr.set_iprint(init=0, iter=0, final=0)

        out = explicit_odr.run()
        assert_array_almost_equal(
            out.beta,
            np.array([1.2646548050648876e+03, -5.4018409956678255e+01,
                -8.7849712165253724e-02]),
        )
        assert_array_almost_equal(
            out.sd_beta,
            np.array([1.0349270280543437, 1.583997785262061, 0.0063321988657267]),
        )
        assert_array_almost_equal(
            out.cov_beta,
            np.array([[4.4949592379003039e-01, -3.7421976890364739e-01,
                 -8.0978217468468912e-04],
               [-3.7421976890364739e-01, 1.0529686462751804e+00,
                 -1.9453521827942002e-03],
               [-8.0978217468468912e-04, -1.9453521827942002e-03,
                  1.6827336938454476e-05]]),
        )
Пример #11
0
def orthoregress(x, y):
    """Perform an Orthogonal Distance Regression on the given data,
    using the same interface as the standard scipy.stats.linregress function.
    Adapted from https://gist.github.com/robintw/d94eb527c44966fbc8b9#file-orthoregress-py
    
    Arguments:
    x: x data
    y: y data

    Returns:
    [slope, intercept, residual]

    Uses standard ordinary least squares to estimate the starting parameters
    then uses the scipy.odr interface to the ODRPACK Fortran code to do the
    orthogonal distance calculations.
    """
    def f(p, x):
        """Basic linear regression 'model' for use with ODR"""
        return (p[0] * x) + p[1]

    linreg = stats.linregress(x, y)
    mod = Model(f)
    dat = Data(x, y)
    od = ODR(dat, mod, beta0=linreg[0:2])
    out = od.run()

    return list(out.beta) + [out.res_var]
Пример #12
0
    def do_the_fit(obs, **kwargs):

        global print_output, beta0

        func = kwargs.get('function')
        yerr = kwargs.get('yerr')
        length = len(yerr)

        xerr = kwargs.get('xerr')

        if length == len(obs):
            assert 'x_constants' in kwargs
            data = RealData(kwargs.get('x_constants'), obs, sy=yerr)
            fit_type = 2
        elif length == len(obs) // 2:
            data = RealData(obs[:length], obs[length:], sx=xerr, sy=yerr)
            fit_type = 0
        else:
            raise Exception('x and y do not fit together.')

        model = Model(func)

        odr = ODR(data, model, beta0, partol=np.finfo(np.float64).eps)
        odr.set_job(fit_type=fit_type, deriv=1)
        output = odr.run()
        if print_output and not silent:
            print(*output.stopreason)
            print('chisquare/d.o.f.:', output.res_var)
            print_output = 0
        beta0 = output.beta
        return output.beta[kwargs.get('n')]
Пример #13
0
def meanResiduals(pts):
    f = lambda B, x: B[0] * x + B[1]
    model = Model(f)
    data = RealData(list(p.x() for p in pts), list(p.y() for p in pts))
    odr = ODR(data, model, beta0=[0., 1.])
    out = odr.run()
    return out.sum_square / len(pts)
Пример #14
0
def ajuste_odr_u(x, y):
    data = RealData(unp.nominal_values(x), unp.nominal_values(y), sx=unp.std_devs(x), sy=unp.std_devs(y))
    odr = ODR(data, mlinear, beta0=[1., 1.], ndigit=20)
    ajuste = odr.run()
    a, b = ajuste.beta
    ua, ub = np.sqrt(np.diag(ajuste.cov_beta))
    return a, ua, b, ub
Пример #15
0
def regress_odr(x, y, sx, sy, beta0=[0., 1.]):
    """Return an ODR linear fit
    """
    linear = Model(my_linear)
    mydata = RealData(x.ravel(), y.ravel(), sx=sx.ravel(), sy=sy.ravel())
    myodr = ODR(mydata, linear, beta0=beta0)
    return myodr.run()
Пример #16
0
 def test_quadratic_model(self):
     x = np.linspace(0.0, 5.0)
     y = 1.0 * x**2 + 2.0 * x + 3.0
     data = Data(x, y)
     odr_obj = ODR(data, quadratic)
     output = odr_obj.run()
     assert_array_almost_equal(output.beta, [1.0, 2.0, 3.0])
Пример #17
0
def fit_slope(X, Y, eX=None, eY=None):
    """Will fit a Y vs X with errors using the scipy
    ODR functionalities and a linear fitting function

    Args:
        X:
        Y: input arrays
        eX:
        eY: input uncertaintites [None]

    Returns:
        odr output
    """

    # First filtering the Nan
    good = ~np.isnan(X)
    if eX is None: eX_good = None
    else: eX_good = eX[good]
    if eY is None: eY_good = None
    else: eY_good = eY[good]

    linear = Model(linear_fit)
    odr_data = RealData(X[good], Y[good], sx=eX_good, sy=eY_good)
    odr_run = ODR(odr_data, linear, beta0=[2., 1.])
    return odr_run.run()
Пример #18
0
 def test_polynomial_model(self):
     x = np.linspace(0.0, 5.0)
     y = 1.0 + 2.0 * x + 3.0 * x**2 + 4.0 * x**3
     poly_model = polynomial(3)
     data = Data(x, y)
     odr_obj = ODR(data, poly_model)
     output = odr_obj.run()
     assert_array_almost_equal(output.beta, [1.0, 2.0, 3.0, 4.0])
Пример #19
0
def ajuste_odr(x, y, ux, uy):
    data = RealData(x, y, sx=ux, sy=uy)
    odr = ODR(data, mlinear, beta0=[-1000., 1.], ndigit=20)
    ajuste = odr.run()
    a, b = ajuste.beta
    ua, ub = np.sqrt(np.diag(ajuste.cov_beta))
    ajuste.pprint()
    return a, ua, b, ub
Пример #20
0
def graph(a,toc): #a:odr with xerrors or curvefit without? toc: number of the figure
    plt.figure(toc,figsize=(15,10))
    for tickLabel in plt.gca().get_xticklabels()+plt.gca().get_yticklabels():
        tickLabel.set_fontsize(15)
    plt.title("Number of coincidence counts in the peak of $Na^{22}$ wrt angle",fontsize=17)
    plt.xlabel(r'Angle $\theta$ (°)',fontsize=17)
    plt.ylabel('# coincidence events',fontsize=17)
    plt.scatter(Ld,res,label='\n Data with parameters'+\
               ' \n $16$ $cm$ source-detectors'+\
               ' \n $150$ $s/points$')
    if a==False:
        par, par_va = curve_fit(simplegaussian, Ld, res, p0=[70000, 180, 10,1000],sigma=err_res,absolute_sigma=True)
        chi2=round(sum(((res - simplegaussian(Ld,*par) )/ err_res) ** 2)/(len(Ld)-3),2)
        plt.plot(Lc,simplegaussian(Lc, *par),color='gold',label='Fit with '+r'$A\exp\{\frac{-(\theta-\mu)^2}{2\sigma^2}\}+Cst$'+\
                  ' \n $A =$%s'%int(par[0])+' $ \pm$ %s'%int(np.sqrt(np.diag(par_va)[0]))+' #'+\
                  ' \n $\mu =$ %s'%round(par[1],1)+' $\pm$ %s'%round(np.sqrt(np.diag(par_va)[1]),1)+'°'+\
                  ' \n $\sigma =$ %s'%round(par[2],1)+ '$\pm$ %s'%round(np.sqrt(np.diag(par_va)[2]),1)+'°'+\
                  ' \n $Cst=$ %s'%int(par[3])+' $\pm$ %s'%int(np.sqrt(np.diag(par_va)[3]))+' #'+\
                  ' \n $\chi^2/dof = $ %s'%chi2)
        plt.errorbar(Ld, res, err_res,fmt='.',label=r'$y=\sqrt{counts}$ '+\
                     '\n $x=0°$', color='black',ecolor='lightgray', elinewidth=3, capsize=0)

    else:
        data = RealData(Ld,res,err_resx,err_res)
        model = Model(simplegaussianl)

        odr = ODR(data, model, [73021, 183, 11,1208])
        odr.set_job(fit_type=2)
        output = odr.run()
        
        xn = Lc
        yn = simplegaussianl(output.beta, xn)
        
        #pl.hold(True)
        #plot(Ld,res,'ro')
        #print(x,y)
        plt.plot(xn,yn,'k-',label=' ODR leastsq fit'+\
            ' \n $\chi^2/dof = $ %s'%round(output.sum_square/(len(Ld)-3),2)+'\n')
        
        odr.set_job(fit_type=0)
        output = odr.run()
        par,par_va=output.beta,output.cov_beta
        yn = simplegaussianl(output.beta, xn)
        plt.plot(xn,yn,color='gold',label='ODR fit '+r'$A\exp\{\frac{-(\theta-\mu)^2}{2\sigma^2}\}+Cst$'+\
          ' \n $A =$%s'%int(par[0])+' $ \pm$ %s'%int(np.sqrt(np.diag(par_va)[0]))+' #'+\
          ' \n $\mu =$ %s'%round(par[1],1)+' $\pm$ %s'%round(np.sqrt(np.diag(par_va)[1]),1)+'°'+\
          ' \n $\sigma =$ %s'%round(par[2],1)+ '$\pm$ %s'%round(np.sqrt(np.diag(par_va)[2]),1)+'°'+\
          ' \n $Cst=$ %s'%int(par[3])+' $\pm$ %s'%int(np.sqrt(np.diag(par_va)[3]))+' #'+\
          ' \n Sum of squares/dof $= $ %s'%round(output.sum_square/(len(Ld)-3),2))
        plt.legend(loc=0)
        plt.errorbar(Ld, res, err_res,err_resx,label=r'$y=\sqrt{counts}$ '+\
                     '\n $x=$%s'%errx+'°',fmt='.', color='black',ecolor='lightgray', elinewidth=3, capsize=0)


    plt.gca().set_xlim(140,220)
    plt.legend(bbox_to_anchor=(0.68, 0.58), loc=1, borderaxespad=0.,prop={'size':14})
    plt.yscale('log')
    plt.ylim(1e2,1e5)
Пример #21
0
    def test_pearson(self):
        p_x = np.array([0., .9, 1.8, 2.6, 3.3, 4.4, 5.2, 6.1, 6.5, 7.4])
        p_y = np.array([5.9, 5.4, 4.4, 4.6, 3.5, 3.7, 2.8, 2.8, 2.4, 1.5])
        p_sx = np.array([.03, .03, .04, .035, .07, .11, .13, .22, .74, 1.])
        p_sy = np.array([1., .74, .5, .35, .22, .22, .12, .12, .1, .04])

        p_dat = RealData(p_x, p_y, sx=p_sx, sy=p_sy)

        # Reverse the data to test invariance of results
        pr_dat = RealData(p_y, p_x, sx=p_sy, sy=p_sx)

        p_mod = Model(self.pearson_fcn, meta=dict(name='Uni-linear Fit'))

        p_odr = ODR(p_dat, p_mod, beta0=[1., 1.])
        pr_odr = ODR(pr_dat, p_mod, beta0=[1., 1.])

        out = p_odr.run()
        assert_array_almost_equal(
            out.beta,
            np.array([5.4767400299231674, -0.4796082367610305]),
        )
        assert_array_almost_equal(
            out.sd_beta,
            np.array([0.3590121690702467, 0.0706291186037444]),
        )
        assert_array_almost_equal(
            out.cov_beta,
            np.array([[0.0854275622946333, -0.0161807025443155],
                      [-0.0161807025443155, 0.003306337993922]]),
        )

        rout = pr_odr.run()
        assert_array_almost_equal(
            rout.beta,
            np.array([11.4192022410781231, -2.0850374506165474]),
        )
        assert_array_almost_equal(
            rout.sd_beta,
            np.array([0.9820231665657161, 0.3070515616198911]),
        )
        assert_array_almost_equal(
            rout.cov_beta,
            np.array([[0.6391799462548782, -0.1955657291119177],
                      [-0.1955657291119177, 0.0624888159223392]]),
        )
Пример #22
0
def ortho_regress(x, y):
    linreg = linregress(x, y)
    mod = Model(f)
    dat = Data(x, y)
    od = ODR(dat, mod, beta0=linreg[0:2])
    out = od.run()
    #print(list(out.beta))
    #return list(out.beta) + [np.nan, np.nan, np.nan]
    return(list(out.beta))
Пример #23
0
def orthogonal_distance_regression(func, data, err, beta0=None):
    """Fit the function using scipy.odr and return fit output."""
    if not beta0:
        beta0 = _start_values(data)
    model = Model(func)
    rdata = _real_data(data, err)
    fit = ODR(rdata, model, beta0=beta0)
    output = fit.run()
    print(output.beta)
    return output
Пример #24
0
    def orthoregress(x, y, xerr, yerr):

        linreg = linregress(x, y)
        mod = Model(f)

        dat = RealData(x, y, sx=xerr, sy=yerr)

        od = ODR(dat, mod, beta0=linreg[0:2])
        out = od.run()
        return list(out.beta)
Пример #25
0
    def fit(self, x, y):
        # Initial estimate of betas
        linreg = linregress(x, y)

        linear = Model(self.model)
        mydata = Data(x, y)
        myodr = ODR(mydata, linear, beta0=linreg[0:2])
        myoutput = myodr.run()

        self.betas = myoutput.beta
Пример #26
0
def sigma(X, Y, X_err, Y_err):
    linear_model = Model(Linear)
    data = RealData(X, Y, sx=X_err, sy=Y_err)
    odr = ODR(data, linear_model, beta0=[0., 1.])
    out = odr.run()

    m = out.beta[0]
    b = out.beta[1]
    recta = [(I[i] - (V[i] * m + b)) for i in range(len(I))]
    return len(recta), f.dispersion(recta)
Пример #27
0
    def test_lorentz(self):
        l_sy = np.array([.29] * 18)
        l_sx = np.array([
            .000972971, .000948268, .000707632, .000706679, .000706074,
            .000703918, .000698955, .000456856, .000455207, .000662717,
            .000654619, .000652694, .000000859202, .00106589, .00106378,
            .00125483, .00140818, .00241839
        ])

        l_dat = RealData(
            [
                3.9094, 3.85945, 3.84976, 3.84716, 3.84551, 3.83964, 3.82608,
                3.78847, 3.78163, 3.72558, 3.70274, 3.6973, 3.67373, 3.65982,
                3.6562, 3.62498, 3.55525, 3.41886
            ],
            [
                652, 910.5, 984, 1000, 1007.5, 1053, 1160.5, 1409.5, 1430,
                1122, 957.5, 920, 777.5, 709.5, 698, 578.5, 418.5, 275.5
            ],
            sx=l_sx,
            sy=l_sy,
        )
        l_mod = Model(self.lorentz, meta=dict(name='Lorentz Peak'))
        l_odr = ODR(l_dat, l_mod, beta0=(1000., .1, 3.8))

        out = l_odr.run()
        assert_array_almost_equal(
            out.beta,
            np.array([
                1.4306780846149925e+03, 1.3390509034538309e-01,
                3.7798193600109009e+00
            ]),
        )
        assert_array_almost_equal(
            out.sd_beta,
            np.array([
                7.3621186811330963e-01, 3.5068899941471650e-04,
                2.4451209281408992e-04
            ]),
        )
        assert_array_almost_equal(
            out.cov_beta,
            np.array([[
                2.4714409064597873e-01, -6.9067261911110836e-05,
                -3.1236953270424990e-05
            ],
                      [
                          -6.9067261911110836e-05, 5.6077531517333009e-08,
                          3.6133261832722601e-08
                      ],
                      [
                          -3.1236953270424990e-05, 3.6133261832722601e-08,
                          2.7261220025171730e-08
                      ]]),
        )
Пример #28
0
def ODR_fitting(xdata, ydata, fitfunction, beta, fix):
    bpl_all = Model(fitfunction)
    data_all = RealData(xdata,
                        ydata,
                        sx=np.cov([xdata, ydata])[0][1],
                        sy=np.cov([xdata, ydata])[0][1])
    odr_all = ODR(data_all, bpl_all, beta0=beta, ifixb=fix)
    odr_all.set_job(fit_type=0)
    output_all = odr_all.run()
    #output_all.pprint()
    return (output_all.beta, output_all.sd_beta)
Пример #29
0
def orth_regression(obs, model):
    linear = Model(f)
    mydata = RealData(obs, model)
    myodr = ODR(mydata, linear, beta0=[1., 0.])
    myoutput = myodr.run()
    params = myoutput.beta
    gradient = params[0]
    y_intercept = params[1]
    res_var = myoutput.res_var
    return np.around(gradient, 2), np.around(y_intercept,
                                             2), np.around(res_var, 2)
Пример #30
0
 def run(self):
     fit_data = RealData(self.data['ref_spd'].values.flatten(), self.data['target_spd'].values.flatten())
     p, res = lstsq(np.nan_to_num(fit_data.x[:, np.newaxis] ** [1, 0]), np.nan_to_num(np.asarray(fit_data.y)
                                                                                      [:, np.newaxis]))[0:2]
     self._model = ODR(fit_data, Model(OrthogonalLeastSquares.linear_func), beta0=[p[0][0], p[1][0]])
     self.out = self._model.run()
     self.params = {'slope': self.out.beta[0], 'offset': self.out.beta[1]}
     self.params['r2'] = self.get_r2()
     self.params['Num data points'] = self.num_data_pts
     # print("Model output:", self.out.pprint())
     self.show_params()