Example #1
0
    def test_jac(self):
        # Test that Jacobian callable is handled correctly and
        # weighted if sigma is provided.
        def f(x, a, b):
            return a * np.exp(-b*x)

        def jac(x, a, b):
            e = np.exp(-b*x)
            return np.vstack((e, -a * x * e)).T

        xdata = np.linspace(0, 1, 11)
        ydata = f(xdata, 2., 2.)

        # Test numerical options for least_squares backend.
        for method in ['trf', 'dogbox']:
            for scheme in ['2-point', '3-point', 'cs']:
                popt, pcov = curve_fit(f, xdata, ydata, jac=scheme,
                                       method=method)
                assert_allclose(popt, [2, 2])

        # Test the analytic option.
        for method in ['lm', 'trf', 'dogbox']:
            popt, pcov = curve_fit(f, xdata, ydata, method=method, jac=jac)
            assert_allclose(popt, [2, 2])

        # Now add an outlier and provide sigma.
        ydata[5] = 100
        sigma = np.ones(xdata.shape[0])
        sigma[5] = 200
        for method in ['lm', 'trf', 'dogbox']:
            popt, pcov = curve_fit(f, xdata, ydata, sigma=sigma, method=method,
                                   jac=jac)
            # Still the optimization process is influenced somehow,
            # have to set rtol=1e-3.
            assert_allclose(popt, [2, 2], rtol=1e-3)
Example #2
0
    def test_dtypes2(self):
        # regression test for gh-7117: curve_fit fails if
        # both inputs are float32
        def hyperbola(x, s_1, s_2, o_x, o_y, c):
            b_2 = (s_1 + s_2) / 2
            b_1 = (s_2 - s_1) / 2
            return o_y + b_1*(x-o_x) + b_2*np.sqrt((x-o_x)**2 + c**2/4)

        min_fit = np.array([-3.0, 0.0, -2.0, -10.0, 0.0])
        max_fit = np.array([0.0, 3.0, 3.0, 0.0, 10.0])
        guess = np.array([-2.5/3.0, 4/3.0, 1.0, -4.0, 0.5])

        params = [-2, .4, -1, -5, 9.5]
        xdata = np.array([-32, -16, -8, 4, 4, 8, 16, 32])
        ydata = hyperbola(xdata, *params)

        # run optimization twice, with xdata being float32 and float64
        popt_64, _ = curve_fit(f=hyperbola, xdata=xdata, ydata=ydata, p0=guess,
                               bounds=(min_fit, max_fit))

        xdata = xdata.astype(np.float32)
        ydata = hyperbola(xdata, *params)

        popt_32, _ = curve_fit(f=hyperbola, xdata=xdata, ydata=ydata, p0=guess,
                               bounds=(min_fit, max_fit))

        assert_allclose(popt_32, popt_64, atol=2e-5)
Example #3
0
    def test_curvefit_simplecovariance(self):

        def func(x, a, b):
            return a * np.exp(-b*x)

        def jac(x, a, b):
            e = np.exp(-b*x)
            return np.vstack((e, -a * x * e)).T

        np.random.seed(0)
        xdata = np.linspace(0, 4, 50)
        y = func(xdata, 2.5, 1.3)
        ydata = y + 0.2 * np.random.normal(size=len(xdata))

        sigma = np.zeros(len(xdata)) + 0.2
        covar = np.diag(sigma**2)

        for jac1, jac2 in [(jac, jac), (None, None)]:
            for absolute_sigma in [False, True]:
                popt1, pcov1 = curve_fit(func, xdata, ydata, sigma=sigma,
                        jac=jac1, absolute_sigma=absolute_sigma)
                popt2, pcov2 = curve_fit(func, xdata, ydata, sigma=covar,
                        jac=jac2, absolute_sigma=absolute_sigma)

                assert_allclose(popt1, popt2, atol=1e-14)
                assert_allclose(pcov1, pcov2, atol=1e-14)
Example #4
0
    def test_maxfev_and_bounds(self):
        # gh-6340: with no bounds, curve_fit accepts parameter maxfev (via leastsq)
        # but with bounds, the parameter is `max_nfev` (via least_squares)
        x = np.arange(0, 10)
        y = 2*x
        popt1, _ = curve_fit(lambda x,p: p*x, x, y, bounds=(0, 3), maxfev=100)
        popt2, _ = curve_fit(lambda x,p: p*x, x, y, bounds=(0, 3), max_nfev=100)

        assert_allclose(popt1, 2, atol=1e-14)
        assert_allclose(popt2, 2, atol=1e-14)
    def test_one_argument(self):
        def func(x,a):
            return x**a
        popt, pcov = curve_fit(func, self.x, self.y)
        assert_(len(popt) == 1)
        assert_(pcov.shape == (1,1))
        assert_almost_equal(popt[0], 1.9149, decimal=4)
        assert_almost_equal(pcov[0,0], 0.0016, decimal=4)

        # Test if we get the same with full_output. Regression test for #1415.
        res = curve_fit(func, self.x, self.y, full_output=1)
        (popt2, pcov2, infodict, errmsg, ier) = res
        assert_array_almost_equal(popt, popt2)
Example #6
0
def fitData(xdata, ydata):
    '''
    Fits the provided data to the first working function
    (first Gompertz, then Logistic, then Richards)

    Returns a tuple with plateau, slope, lag, y0 and model used
    If no fitting was possible all values are None

    Please note that the plateau may be reached outside the final time point
    '''
    retries = 2
    while retries > 0:
        params = [None, None, None, None, None]
        model = ''
        # Initial guesses for the output parameters
        p0 = [getPlateau(xdata, ydata), 4.0, getFlex(xdata, ydata), 0.1, 0]
        if retries == 1:
            p0[2] = 0
        try:
            params, pcov = curve_fit(gompertz, xdata, ydata, p0 = p0)
            model = 'gompertz'
            break
        except:
            #logger.debug('Gompertz fit failed')
            try:
                params, pcov = curve_fit(logistic, xdata, ydata, p0 = p0)
                model = 'logistic'
                break
            except:
                #logger.debug('Logistic fit failed')
                try:
                    params, pcov = curve_fit(richards, xdata, ydata, p0 = p0)
                    model = 'richards'
                    break
                except:
                    #logger.debug('Richards fit failed')
                    retries -= 1
                    #logger.debug('%d retries left'%retries)
                    # Compress again the data
                    ydata = np.array(compress(ydata, span=2))
                    if len(ydata) <= 11:
                        window_len = len(ydata)
                    else:
                        window_len = 11
                    ydata = np.array(smooth(ydata, window_len = window_len, 
                              window = 'blackman'))
                    xdata = np.array(compress(xdata, span=2))
                    #
                    params = [None, None, None, None, None]
    
    return params, model
Example #7
0
 def test_broadcast_y(self):
     xdata = np.arange(10)
     target = 4.7 * xdata ** 2 + 3.5 * xdata + np.random.rand(len(xdata))
     fit_func = lambda x, a, b: a*x**2 + b*x - target
     for method in ['lm', 'trf', 'dogbox']:
         popt0, pcov0 = curve_fit(fit_func,
                                  xdata=xdata,
                                  ydata=np.zeros_like(xdata),
                                  method=method)
         popt1, pcov1 = curve_fit(fit_func,
                                  xdata=xdata,
                                  ydata=0,
                                  method=method)
         assert_array_equal(pcov0, pcov1)
Example #8
0
 def GetHist_Gauss(self):
     
     fit_gauss = lambda x, a, x0, sigma: a*exp(-np.square(x-x0)/
                                               (2*np.square(sigma)))    
     
     f1 = plt.figure(figsize=(8,12))
     ax = f1.add_subplot(211); ax3 = f1.add_subplot(212)
     ax2 = ax.twinx(); ax4 = ax3.twinx();
     ax2.set_yticklabels(("")); ax4.set_yticklabels((""))
     #ax.set_ylim((0,500))
     ax.set_title('trace'); ax3.set_title('retrace')
     ax.set_xlabel(r'$\mu_0H_{||} \ (\mathsf{T})$')
     ax3.set_xlabel(r'$\mu_0H_{||} \ (\mathsf{T})$')
     ax.set_ylabel('occurence'); ax3.set_ylabel('occurence')
     ax.set_xlim((-0.06,0.06)); ax3.set_xlim((-0.06,0.06))
     #trace
     hist1 = np.histogram(self.raw[nonzero(self.raw[:,0]%(2*self.dt)<self.dt)][:,1],
                                   200,range=[-0.06,0.06]) 
     #retrace
     hist2 = np.histogram(self.raw[nonzero(self.raw[:,0]%(2*self.dt)>=self.dt)][:,1],
                                   200,range=[-0.06,0.06]) 
     
     cl = ['grey','blue','green','red']
     c = [-0.04,-0.013,0.013,0.04]
     
     for i in range(4):    
         params, cov = curve_fit(fit_gauss,hist1[1][i*50:(i+1)*50],
                                 hist1[0][i*50:(i+1)*50],[1000,c[i],0.005])
         fit = fit_gauss(hist1[1][i*50:(i+1)*50],params[0],params[1],params[2])
         ax2.bar(hist1[1][i*50:(i+1)*50]-3e-4,hist1[0][i*50:(i+1)*50],
                 width=6e-4,color=cl[i],edgecolor='None')
         ax2.plot(hist1[1][i*50:(i+1)*50],fit,'k--')
         ax.bar(params[1],np.sqrt(2*pi)*params[0]*params[2],align='center',
             alpha=0.3,width=6*params[2],color='grey',edgecolor='black',
             linewidth=2)            
     
     for i in range(4):    
         params, cov = curve_fit(fit_gauss,hist2[1][i*50:(i+1)*50],
                                 hist2[0][i*50:(i+1)*50],[1000,c[i],0.005])
         fit = fit_gauss(hist2[1][i*50:(i+1)*50],params[0],params[1],params[2])
         ax4.bar(hist2[1][i*50:(i+1)*50]-3e-4,hist2[0][i*50:(i+1)*50],
                 width=6e-4,color=cl[i],edgecolor='None')
         ax4.plot(hist2[1][i*50:(i+1)*50],fit,'k--')
         ax3.bar(params[1],np.sqrt(2*pi)*params[0]*params[2],align='center',
             alpha=0.3,width=6*params[2],color='grey',edgecolor='black',
             linewidth=2)  
         
     f1.tight_layout() 
     f1.savefig('Fidelity.png',dpi=300,format='png')
     f1.savefig('Fidelity.pdf',dpi=300,format='pdf')
Example #9
0
    def test_bounds_p0(self):
        # This test is for issue #5719. The problem was that an initial guess
        # was ignored when 'trf' or 'dogbox' methods were invoked.
        def f(x, a):
            return np.sin(x + a)

        xdata = np.linspace(-2 * np.pi, 2 * np.pi, 40)
        ydata = np.sin(xdata)
        bounds = (-3 * np.pi, 3 * np.pi)
        for method in ["trf", "dogbox"]:
            popt_1, _ = curve_fit(f, xdata, ydata, p0=2.1 * np.pi)
            popt_2, _ = curve_fit(f, xdata, ydata, p0=2.1 * np.pi, bounds=bounds, method=method)

            # If the initial guess is ignored, then popt_2 would be close 0.
            assert_allclose(popt_1, popt_2)
Example #10
0
    def test_regression_2639(self):
        # This test fails if epsfcn in leastsq is too large.
        x = [
            574.14200000000005,
            574.154,
            574.16499999999996,
            574.17700000000002,
            574.18799999999999,
            574.19899999999996,
            574.21100000000001,
            574.22199999999998,
            574.23400000000004,
            574.245,
        ]
        y = [859.0, 997.0, 1699.0, 2604.0, 2013.0, 1964.0, 2435.0, 1550.0, 949.0, 841.0]
        guess = [574.1861428571428, 574.2155714285715, 1302.0, 1302.0, 0.0035019999999983615, 859.0]
        good = [5.74177150e02, 5.74209188e02, 1.74187044e03, 1.58646166e03, 1.0068462e-02, 8.57450661e02]

        def f_double_gauss(x, x0, x1, A0, A1, sigma, c):
            return (
                A0 * np.exp(-(x - x0) ** 2 / (2.0 * sigma ** 2)) + A1 * np.exp(-(x - x1) ** 2 / (2.0 * sigma ** 2)) + c
            )

        popt, pcov = curve_fit(f_double_gauss, x, y, guess, maxfev=10000)
        assert_allclose(popt, good, rtol=1e-5)
Example #11
0
    def stdDev(self):
        '''
        get the standard deviation 
        from the PSF is evaluated as 2d Gaussian
        '''
        if self._corrPsf is None:
            self.psf()
        p = self._corrPsf.copy()
        mn = p.min()
        p[p<0.05*p.max()] = mn
        p-=mn
        p/=p.sum()
        
        x,y = self._psfGridCoords()
        x = x.flatten()
        y = y.flatten()

        guess = (1,1,0)

        param, _ = curve_fit(self._fn, (x,y), p.flatten(), guess)

        self._fitParam = param 
        stdx,stdy =  param[:2]
        self._std = (stdx+stdy)/2
        
        return self._std
Example #12
0
def fit_curve(r, n, c):
    '''Fit the data c(r) where c=concordance and r=call rate using an exponential approximation f.
    The fit is weighted using a normal approximation to a sum of binomial distributions, each representing
    a single events of a SNP being concordant for a single pair of samples at a single IBD2 segment.'''
    # print r, c
    (p, _) = curve_fit(model, r, c)  # , sigma=np.maximum(1e-5, np.sqrt(c * (1 - c) / n)))
    return model(r, *p), p
Example #13
0
def T1fit_run(x,y,guess,abs_fit):
    import numpy as np
    from scipy.optimize.minpack import curve_fit
    #t_order= x.argsort()
    #x=x[t_order]
    #y=y[t_order]
    smoothx = np.linspace(x[0], x[-1], 1000)
    #guess_a, guess_b, guess_c = y.max(), 2 * y.max(), T1_guess
    #guess = [guess_a, guess_b, guess_c]
    if abs_fit:
        exp_f = lambda x, A, B, t:abs((A - (B * np.exp(-x / t))))
    else:
        exp_f = lambda x, A, B, t:((A - (B * np.exp(-x / t))))
    params, cov = curve_fit(exp_f, x, y, p0=guess)
    A, B, t = params
    #best_fit = lambda x: abs((A - (B * np.exp(-x / t))))
    smoothy = exp_f(smoothx, A, B, t)
    T1 = t * (B / A - 1)            
    yfitting = exp_f(x, A, B, t)
    #yfitting = abs(A - (B * np.exp(-x / t)))
    residue=np.sum(abs(yfitting - y));
    result_dict = {            
            'A': A,
            'B': B,
            'T1_star':t,
            'T1':T1,
            't_val_org':x,
            'y_val_org':y,
            't_val_fit':smoothx,
            'y_val_fit':smoothy,
            'residue':residue,
            'error_status':0,
            'error_str':'OK!',
          }
    return result_dict
Example #14
0
def fit_ha_and_n2(x, y, dy):
    ha = 6562.81
    n2 = 6583.41
    mask = np.logical_not(np.logical_or(np.isnan(y), np.isnan(x)))
    args = np.argsort(x[mask])
    x = x[mask][args]
    y = y[mask][args]
    if not dy is None:
        dy = dy[mask][args]
    guess = np.zeros(8)
    guess[0] = np.percentile(y, 10)
    guess[3] = x[y == np.max(y)][0]
    guess_beta = ((guess[3] / ha) ** 2 - 1) / ((guess[3] / ha) ** 2 + 1)
    guess_n2 = n2 * (1 + guess_beta) / np.sqrt(1 - guess_beta ** 2)
    haslice = np.logical_and(x > guess[3] - 10, x < guess[3] + 10)
    n2slice = np.logical_and(x > guess_n2 - 10, x < guess_n2 + 10)
    guess[1] = np.sum(0.5 * (y[haslice][1:] + y[haslice][:-1] - 2 * guess[0]) * np.diff(x[haslice]))
    guess[2] = np.sum(0.5 * (y[n2slice][1:] + y[n2slice][:-1] - 2 * guess[0]) * np.diff(x[n2slice]))
    guess[4:8] = 2
    try:
        fit = curve_fit(ha_and_n2_voigts, x, y, p0=guess, sigma=dy)[0]
        success = True
    except:
        fit = guess
        success = False
    return fit, success
Example #15
0
 def Lifet(self,dt_max,t_max,save = False):
     delta_t = self.delta_t
     temp1 = []
     temp2 = []        
     tau = 0
     for ii in range(size(self.data[:,0])-1):           
         if (self.data[ii,1] == self.data[ii+1,1]):
             if(self.data[ii+1,0]-self.data[ii,0]<dt_max):
                 tau += self.data[ii+1,0]-self.data[ii,0]
             else:
                 if (self.data[ii,1] == +0.5): temp1.append(tau)
                 if (self.data[ii,1] == -0.5): temp2.append(tau)
                 tau = 0
         else:
             if (self.data[ii,1] == +0.5): temp1.append(tau)
             if (self.data[ii,1] == -0.5): temp2.append(tau)
             tau = 0
     
     temp = [temp1,temp2]
                         
     exp_fit = lambda t,tau,a : a*exp(-t/tau)    
     time = np.linspace(0,120,100)
     
     f1 = plt.figure()
     
     #for i in range(2):
     ax = f1.add_subplot(111)
     ax.set_yscale('log')
     ax.set_ylim(0.005,1)
     ax.set_yticks((0.01,0.1,1))
     ax.set_xlabel(r'$t \ (\mathsf{s})$')
     ax.set_ylabel(r"$\langle m_{\mathsf{I}} \ = \ -1/2 \rangle $");
     ax.set_xlim((0,t_max));
     ax.set_xticks((0,20,40,60,80,100,120))
     #create Histogramm of lifetime distribution
     H1 = np.histogram(np.array(temp[0]),
                       bins=np.linspace(delta_t,120,int(120/delta_t)))
 
     #extract all nonzero elements  #
     lft = (np.reshape(np.concatenate((H1[1][nonzero(H1[0]!=0)],
                                       H1[0][nonzero(H1[0]!=0)])),
                      (size(H1[1][nonzero(H1[0]!=0)]),2),order='F'))
                    
     params, cov = curve_fit(exp_fit,lft[:,0],lft[:,1],[10,100])
     
     fit = exp(-time/params[0])
 
     #normalize                     
     lft[:,1] = lft[:,1]/params[1]
     ax.scatter(lft[:,0],lft[:,1],c='k')  
     ax.plot(time,fit,'r--',linewidth=3)         
     ax.text(0.6,0.8,r'$\tau$'+' = '+str(round(params[0]*100)/100)+'s',
                                      fontsize = 'x-large',
                                      transform = ax.transAxes)      
                                          
     f1.tight_layout()
     
     if save == True:
         f1.savefig('lifetime.png',dpi=300,format='png')
         f1.savefig('lifetime.pdf',dpi=300,format='pdf')
Example #16
0
    def test_curvefit_covariance(self):
        def funcp(x, a, b):
            rotn = np.array(
                [[1.0 / np.sqrt(2), -1.0 / np.sqrt(2), 0], [1.0 / np.sqrt(2), 1.0 / np.sqrt(2), 0], [0, 0, 1.0]]
            )
            return rotn.dot(a * np.exp(-b * x))

        def jacp(x, a, b):
            rotn = np.array(
                [[1.0 / np.sqrt(2), -1.0 / np.sqrt(2), 0], [1.0 / np.sqrt(2), 1.0 / np.sqrt(2), 0], [0, 0, 1.0]]
            )
            e = np.exp(-b * x)
            return rotn.dot(np.vstack((e, -a * x * e)).T)

        def func(x, a, b):
            return a * np.exp(-b * x)

        def jac(x, a, b):
            e = np.exp(-b * x)
            return np.vstack((e, -a * x * e)).T

        np.random.seed(0)
        xdata = np.arange(1, 4)
        y = func(xdata, 2.5, 1.0)
        ydata = y + 0.2 * np.random.normal(size=len(xdata))
        sigma = np.zeros(len(xdata)) + 0.2
        covar = np.diag(sigma ** 2)
        # Get a rotation matrix, and obtain ydatap = R ydata
        # Chisq = ydata^T C^{-1} ydata
        #       = ydata^T R^T R C^{-1} R^T R ydata
        #       = ydatap^T Cp^{-1} ydatap
        # Cp^{-1} = R C^{-1} R^T
        # Cp      = R C R^T, since R^-1 = R^T
        rotn = np.array(
            [[1.0 / np.sqrt(2), -1.0 / np.sqrt(2), 0], [1.0 / np.sqrt(2), 1.0 / np.sqrt(2), 0], [0, 0, 1.0]]
        )
        ydatap = rotn.dot(ydata)
        covarp = rotn.dot(covar).dot(rotn.T)

        for jac1, jac2 in [(jac, jacp), (None, None)]:
            for absolute_sigma in [False, True]:
                popt1, pcov1 = curve_fit(func, xdata, ydata, sigma=sigma, jac=jac1, absolute_sigma=absolute_sigma)
                popt2, pcov2 = curve_fit(funcp, xdata, ydatap, sigma=covarp, jac=jac2, absolute_sigma=absolute_sigma)

                assert_allclose(popt1, popt2, atol=1e-14)
                assert_allclose(pcov1, pcov2, atol=1e-14)
Example #17
0
    def test_array_like(self):
        # Test sequence input.  Regression test for gh-3037.
        def f_linear(x, a, b):
            return a*x + b

        x = [1, 2, 3, 4]
        y = [3, 5, 7, 9]
        assert_allclose(curve_fit(f_linear, x, y)[0], [2, 1], atol=1e-10)
 def test_one_argument(self):
     def func(x,a):
         return x**a
     popt, pcov = curve_fit(func, self.x, self.y)
     assert_(len(popt) == 1)
     assert_(pcov.shape == (1,1))
     assert_almost_equal(popt[0], 1.9149, decimal=4)
     assert_almost_equal(pcov[0,0], 0.0016, decimal=4)
 def test_two_argument(self):
     def func(x, a, b):
         return b*x**a
     popt, pcov = curve_fit(func, self.x, self.y)
     assert_(len(popt) == 2)
     assert_(pcov.shape == (2,2))
     assert_array_almost_equal(popt, [1.7989, 1.1642], decimal=4)
     assert_array_almost_equal(pcov, [[0.0852, -0.1260],[-0.1260, 0.1912]], decimal=4)
Example #20
0
def fitGaussian(x, y, guessMu, guessSigma):
    """
    Given a histogram of intensity values (x = I bin centers, y = N(I))
    and a guess for mu and sigma, returns fits for mu and sigma
    """
    g_guess = [guessMu, guessSigma]
    gf = lambda fx, mu, sigma: gaussian(fx, mu, sigma)
    params, cov = curve_fit(gf, x, y, p0=g_guess, maxfev=2000)
    return params[0], params[1]  #params = [mu, sigma]
Example #21
0
def fitPoisson(x,y,guessLambda):
    '''
    Given a histogram of intensity values (x = I bin centers, y = N(I))
    and a guess for expectation value, returns fit values for lambda.
    '''
    p_guess = [guessLambda]
    pf = lambda fx, lam: poisson(fx, lam)
    params, cov = curve_fit(pf, x, y, p0=p_guess, maxfev=2000)
    return params[0] #params = [lambda]
Example #22
0
def fitGaussian(x,y,guessMu,guessSigma):
    '''
    Given a histogram of intensity values (x = I bin centers, y = N(I))
    and a guess for mu and sigma, returns fits for mu and sigma
    '''
    g_guess = [guessMu,guessSigma]
    gf = lambda fx, mu, sigma: gaussian(fx, mu, sigma)
    params, cov = curve_fit(gf, x, y, p0=g_guess, maxfev=2000)
    return params[0], params[1] #params = [mu, sigma]
Example #23
0
 def test_two_argument(self):
     def func(x, a, b):
         return b*x**a
     popt, pcov = curve_fit(func, self.x, self.y)
     assert_(len(popt) == 2)
     assert_(pcov.shape == (2,2))
     assert_array_almost_equal(popt, [1.7989, 1.1642], decimal=4)
     assert_array_almost_equal(pcov, [[0.0852, -0.1260], [-0.1260, 0.1912]],
                               decimal=4)
Example #24
0
def fitMR(x, y, guessIc, guessIs):
    """
    Given a histogram of intensity values (x = I bin centers, y = N(I))
    and a guess for Ic and Is, returns fit values for Ic and Is.
    """
    mr_guess = [guessIc, guessIs]
    mrf = lambda fx, Ic, Is: modifiedRician(fx, Ic, Is)
    params, cov = curve_fit(mrf, x, y, p0=mr_guess, maxfev=2000)
    return params[0], params[1]  #params = [fitIc, fitIs]
Example #25
0
def fitExponential(x, y, guessLam, guessTau, guessf0):
    """
    Given a histogram of intensity values (x = I bin centers, y = N(I))
    and a guess for mu and sigma, returns fits for mu and sigma
    """
    e_guess = [guessLam, guessTau, guessf0]
    ef = lambda fx, lam, tau, f0: exponential(fx, lam, tau, f0)
    params, cov = curve_fit(ef, x, y, p0=e_guess, maxfev=2000)
    return params[0], params[1], params[2]  #params = [lambda, tau, f0]
Example #26
0
def fitExponential(x,y,guessLam,guessTau,guessf0):
    '''
    Given a histogram of intensity values (x = I bin centers, y = N(I))
    and a guess for mu and sigma, returns fits for mu and sigma
    '''
    e_guess = [guessLam,guessTau,guessf0]
    ef = lambda fx, lam, tau, f0: exponential(fx, lam,tau,f0)
    params, cov = curve_fit(ef, x, y, p0=e_guess, maxfev=2000)
    return params[0], params[1], params[2] #params = [lambda, tau, f0]
Example #27
0
def fitPoisson(x, y, guessLambda):
    """
    Given a histogram of intensity values (x = I bin centers, y = N(I))
    and a guess for expectation value, returns fit values for lambda.
    """
    p_guess = [guessLambda]
    pf = lambda fx, lam: poisson(fx, lam)
    params, cov = curve_fit(pf, x, y, p0=p_guess, maxfev=2000)
    return params[0]  #params = [lambda]
Example #28
0
def fitMR(x, y, guessIc, guessIs):
    '''
    Given a histogram of intensity values (x = I bin centers, y = N(I))
    and a guess for Ic and Is, returns fit values for Ic and Is.
    '''
    mr_guess = [guessIc,guessIs]
    mrf = lambda fx, Ic, Is: modifiedRician(fx, Ic, Is)
    params, cov = curve_fit(mrf, x, y, p0=mr_guess, maxfev=2000)
    return params[0], params[1] #params = [fitIc, fitIs]
Example #29
0
    def test_maxfev_and_bounds(self):
        # gh-6340: with no bounds, curve_fit accepts parameter maxfev (via leastsq)
        # but with bounds, the parameter is `max_nfev` (via least_squares)
        x = np.arange(0, 10)
        y = 2 * x
        popt1, _ = curve_fit(lambda x, p: p * x,
                             x,
                             y,
                             bounds=(0, 3),
                             maxfev=100)
        popt2, _ = curve_fit(lambda x, p: p * x,
                             x,
                             y,
                             bounds=(0, 3),
                             max_nfev=100)

        assert_allclose(popt1, 2, atol=1e-14)
        assert_allclose(popt2, 2, atol=1e-14)
Example #30
0
def pvsatMat(Geff,Tmod,power,area, **kwargs):
    
    ''' 
    Origin: H2M
    Coefficients: four 
    Result: the four coefficients
    
    Parameters
    ------------
      
               
    Geff:     1d array or list
              indoor irradiance measurements (Watt/m2)
               
    Tmod:     1d array or list
              indoor module temperature measurements.
              
              
    power:    1d array or list
              power (Watt)
    
              
    Area:     numpy float
              the area of the module
              
              
    Returns:  a dictionary with the four coefficients of the model
    --------
    
    Note: Coefficient symbols are consistent to the source
    
    
    '''
    
    
    Geff = np.array((Geff),dtype = float)
    Tmod = np.array((Tmod),dtype = float)
    x = np.array((Geff,Tmod),dtype = float)
    power = np.array(power)
    
    # quality control
    Geff[Geff<0.0] = 0.0001   
    power[power<0.0] = 0.0
    
    
    def _pvs(x,a,a1,a2,a3):   
        p = (a1 + a2*x[0] +a3*np.log(x[0])) * (1+a*(x[1]-25))* area * x[0]
        return p           
        
    
    params, pcov = curve_fit(_pvs,x,power) # currently works with Lavenberg- Marquardt optimisation algorithm
    ermodule = {"a":params[0], "a1":params[1], "a2":params[2], "a3":params[3]}
    
    print(ermodule)     
    
    return ermodule
Example #31
0
 def Lifet(self):
    
     temp1 = []
     temp2 = []
     temp3 = []
     temp4 = []
     
     tau = 0
     for ii in range(size(self.data[:,0])-1):           
         if (self.data[ii,1] == self.data[ii+1,1]):
             tau += self.data[ii+1,0]-self.data[ii,0]
         else:
             if (self.data[ii,1] == -1.5): temp1.append(tau)
             if (self.data[ii,1] == -0.5): temp2.append(tau)
             if (self.data[ii,1] ==  0.5): temp3.append(tau)
             if (self.data[ii,1] ==  1.5): temp4.append(tau)                          
             tau = 0
     
     temp = [temp1,temp2,temp3,temp4]
                         
     exp_fit = lambda t,tau,a : a*exp(-t/tau)    
     time = np.linspace(0,120,100)
     
     f1 = plt.figure(figsize=(12,8))
     
     for i in range(4):
         ax = f1.add_subplot(2,2,i+1)
         ax.set_xlabel(r'$t \ (s)$')
         ax.set_ylabel(r'$<m_{\mathsf{I}} \ = \ $'+str(+1.5-i)+r"$>$");
         ax.set_xlim((0,120));
         ax.set_xticks((0,30,60,90,120))
         ax.set_yticks((0,0.5,1))
         ax.set_ylim((-0.1,1.1)); 
         #create Histogramm of lifetime distribution
         H1 = np.histogram(np.array(temp[i]),bins=np.linspace(1,48,120))
     
         #extract all nonzero elements  #
         lft = (np.reshape(np.concatenate((H1[1][nonzero(H1[0]!=0)],
                                           H1[0][nonzero(H1[0]!=0)])),
                          (size(H1[1][nonzero(H1[0]!=0)]),2),order='F'))
                        
         params, cov = curve_fit(exp_fit,lft[:,0],lft[:,1],[10,100])
         
         fit = exp(-time/params[0])
     
         #normalize                     
         lft[:,1] = lft[:,1]/params[1]
         ax.scatter(lft[:,0],lft[:,1],c='k')  
         ax.plot(time,fit,'r--',linewidth=3)         
         ax.text(0.3,0.8,r'$\tau$'+' = '+str(round(params[0]*100)/100)+'s',
                                          fontsize = 'x-large',
                                          transform = ax.transAxes)      
                                          
     f1.tight_layout()
     f1.savefig('lifetime.png',dpi=300,format='png')
     f1.savefig('lifetime.pdf',dpi=300,format='pdf')
Example #32
0
    def _parabola_fitter(x_data, y_data):
        tau = x_data[y_data.argmax()]
        c = np.mean(y_data)
        a = (y_data[2] - y_data[0]) / (x_data[2] - x_data[0]) / 2. / (
            x_data[1] - tau * .99)

        p0 = (a, tau, c)
        popt, pcov = curve_fit(parabola, x_data, y_data, p0)

        return popt
Example #33
0
    def test_curvefit_covariance(self):

        def funcp(x, a, b):
            rotn = np.array([[1./np.sqrt(2), -1./np.sqrt(2), 0], [1./np.sqrt(2), 1./np.sqrt(2), 0], [0, 0, 1.0]])
            return rotn.dot(a * np.exp(-b*x))

        def jacp(x, a, b):
            rotn = np.array([[1./np.sqrt(2), -1./np.sqrt(2), 0], [1./np.sqrt(2), 1./np.sqrt(2), 0], [0, 0, 1.0]])
            e = np.exp(-b*x)
            return rotn.dot(np.vstack((e, -a * x * e)).T)

        def func(x, a, b):
            return a * np.exp(-b*x)

        def jac(x, a, b):
            e = np.exp(-b*x)
            return np.vstack((e, -a * x * e)).T

        np.random.seed(0)
        xdata = np.arange(1, 4)
        y = func(xdata, 2.5, 1.0)
        ydata = y + 0.2 * np.random.normal(size=len(xdata))
        sigma = np.zeros(len(xdata)) + 0.2
        covar = np.diag(sigma**2)
        # Get a rotation matrix, and obtain ydatap = R ydata
        # Chisq = ydata^T C^{-1} ydata
        #       = ydata^T R^T R C^{-1} R^T R ydata
        #       = ydatap^T Cp^{-1} ydatap
        # Cp^{-1} = R C^{-1} R^T
        # Cp      = R C R^T, since R^-1 = R^T
        rotn = np.array([[1./np.sqrt(2), -1./np.sqrt(2), 0], [1./np.sqrt(2), 1./np.sqrt(2), 0], [0, 0, 1.0]])
        ydatap = rotn.dot(ydata)
        covarp = rotn.dot(covar).dot(rotn.T)

        for jac1, jac2 in [(jac, jacp), (None, None)]:
            for absolute_sigma in [False, True]:
                popt1, pcov1 = curve_fit(func, xdata, ydata, sigma=sigma,
                        jac=jac1, absolute_sigma=absolute_sigma)
                popt2, pcov2 = curve_fit(funcp, xdata, ydatap, sigma=covarp,
                        jac=jac2, absolute_sigma=absolute_sigma)

                assert_allclose(popt1, popt2, rtol=1.2e-7, atol=1e-14)
                assert_allclose(pcov1, pcov2, rtol=1.2e-7, atol=1e-14)
Example #34
0
    def test_ignore_nan(self):
        # Test for handling of NaNs in input data when ignore_nan flag is set: gh-11841
        f = lambda x, a, b: a*x + b

        xdata = np.array([1, 2, 3])
        ydata = np.array([3, 4, 6])
        expected = curve_fit(f, xdata, ydata)

        xdata = np.array([1, 2, np.nan, 3, 4 ,np.nan])
        ydata = np.array([3, 4, 5, 6, np.nan, 7])
        result = curve_fit(f, xdata, ydata, ignore_nan=True)

        assert result == expected

        xdata = np.array([np.nan, np.nan, np.nan])
        ydata = np.array([np.nan, np.nan, np.nan])
        result = curve_fit(f, xdata, ydata, ignore_nan=True)

        assert result == np.nan
Example #35
0
    def fit_delta_p(self):
        """
        Fit the PO2 difference between RBC and plasma using a fitting function that depends
        on linear density and the oxygen flux out of the capillary (j_t).

        Returns:
            fitted coefficient
        """
        x, delta_p = self._assemble_fit_func_xy()
        return curve_fit(self.fit_func, x, delta_p)
Example #36
0
    def test_one_argument(self):
        def func(x, a):
            return x**a

        popt, pcov = curve_fit(func, self.x, self.y)
        assert_(len(popt) == 1)
        assert_(pcov.shape == (1, 1))
        assert_almost_equal(popt[0], 1.9149, decimal=4)
        assert_almost_equal(pcov[0, 0], 0.0016, decimal=4)

        # Test if we get the same with full_output. Regression test for #1415.
        # Also test if check_finite can be turned off.
        res = curve_fit(func,
                        self.x,
                        self.y,
                        full_output=1,
                        check_finite=False)
        (popt2, pcov2, infodict, errmsg, ier) = res
        assert_array_almost_equal(popt, popt2)
def fit(function, x, y):
    """Fit the provided functrion to the x and y values.

    The function parameters and the parameters covariance."""
    # Compute guesses for the parameters
    # This is necessary to get significant fits
    p0 = [guess_plateau(x, y), 4.0, guess_lag(x, y), 0.1, min(y)]

    params, pcov = curve_fit(function, x, y, p0=p0)
    return params, pcov
Example #38
0
    def run(self, x, y, dy):
        if leastsq is None:
            return self.result(x,
                               y,
                               dy,
                               None,
                               None,
                               msg='scipy leastsq function not available')
        if len(x) < 2:
            return self.result(x,
                               y,
                               dy,
                               None,
                               None,
                               msg='need at least two data points to fit')

        xn, yn, dyn = self._prepare(x, y, dy)
        if len(xn) < len(self.parnames):
            return self.result(xn,
                               yn,
                               dyn,
                               None,
                               None,
                               msg='need at least as many valid data points '
                               'as there are parameters')

        if not len(self.parstart):  # pylint: disable=len-as-condition
            try:
                self.parstart = self.guesspar(xn, yn)
            except Exception as e:
                return self.result(xn,
                                   yn,
                                   dyn,
                                   None,
                                   None,
                                   msg='while guessing parameters: %s' % e)

        try:
            # pylint: disable=unbalanced-tuple-unpacking
            popt, pcov = curve_fit(
                self.model,
                xn,
                yn,
                self.parstart,
                dyn,
                # default of 1000 can be too restrictive,
                # especially with automatic initial guess
                maxfev=5000)
            if isinf(pcov).all():
                parerrors = full_like(popt, 0)
            else:
                parerrors = sqrt(abs(diagonal(pcov)))
        except (RuntimeError, ValueError, TypeError) as e:
            return self.result(xn, yn, dyn, None, None, msg=str(e))
        return self.result(xn, yn, dyn, popt, parerrors)
def fit(x):
    plimit = np.clip(x[0], 0, 100)
    gradlim = np.nanpercentile(np.abs(pder), plimit)
    idx = pder != 0
    idx &= np.abs(resid) < 5 * unc
    idx &= np.abs(pder) < gradlim

    # Sort pixels according to the change of the i
    # parameter needed to match the observations
    idx_sort = np.argsort(resid[idx] / pder[idx])
    ch_x = resid[idx][idx_sort] / pder[idx][idx_sort]
    # Weights of the individual pixels also sorted
    ch_y = np.abs(pder[idx][idx_sort]) / unc[idx][idx_sort]
    # Cumulative weights
    ch_y = np.cumsum(ch_y)
    # Normalized cumulative weights
    ch_y /= ch_y[-1]

    # Initial guess

    hmed = np.interp(0.5, ch_y, ch_x)
    interval = np.interp([0.16, 0.84], ch_y, ch_x)
    sigma_estimate = (interval[1] - interval[0]) / 2

    p0 = [hmed, sigma_estimate, 1]
    sopt = p0

    # Fit the distribution
    try:
        sopt, _ = curve_fit(cdf, ch_x, ch_y, p0=p0)
    except RuntimeError:
        # Fit failed, use dogbox instead
        try:
            sopt, _ = curve_fit(cdf, ch_x, ch_y, p0=p0, method="dogbox")
        except RuntimeError:
            sopt = p0

    sigma, interval = std(*sopt)
    hmed = (interval[0] + interval[1]) / 2

    res = ch_y - cdf(ch_x, *sopt)
    return np.sum(np.nan_to_num(res)**2) / ch_y.size
Example #40
0
    def test_jac(self):
        # Test that Jacobian callable is handled correctly and
        # weighted if sigma is provided.
        def f(x, a, b):
            return a * np.exp(-b * x)

        def jac(x, a, b):
            e = np.exp(-b * x)
            return np.vstack((e, -a * x * e)).T

        xdata = np.linspace(0, 1, 11)
        ydata = f(xdata, 2., 2.)

        # Test numerical options for least_squares backend.
        for method in ['trf', 'dogbox']:
            for scheme in ['2-point', '3-point', 'cs']:
                popt, pcov = curve_fit(f,
                                       xdata,
                                       ydata,
                                       jac=scheme,
                                       method=method)
                assert_allclose(popt, [2, 2])

        # Test the analytic option.
        for method in ['lm', 'trf', 'dogbox']:
            popt, pcov = curve_fit(f, xdata, ydata, method=method, jac=jac)
            assert_allclose(popt, [2, 2])

        # Now add an outlier and provide sigma.
        ydata[5] = 100
        sigma = np.ones(xdata.shape[0])
        sigma[5] = 200
        for method in ['lm', 'trf', 'dogbox']:
            popt, pcov = curve_fit(f,
                                   xdata,
                                   ydata,
                                   sigma=sigma,
                                   method=method,
                                   jac=jac)
            # Still the optimization process is influenced somehow,
            # have to set rtol=1e-3.
            assert_allclose(popt, [2, 2], rtol=1e-3)
Example #41
0
    def test_bounds_p0(self):
        # This test is for issue #5719. The problem was that an initial guess
        # was ignored when 'trf' or 'dogbox' methods were invoked.
        def f(x, a):
            return np.sin(x + a)

        xdata = np.linspace(-2 * np.pi, 2 * np.pi, 40)
        ydata = np.sin(xdata)
        bounds = (-3 * np.pi, 3 * np.pi)
        for method in ['trf', 'dogbox']:
            popt_1, _ = curve_fit(f, xdata, ydata, p0=2.1 * np.pi)
            popt_2, _ = curve_fit(f,
                                  xdata,
                                  ydata,
                                  p0=2.1 * np.pi,
                                  bounds=bounds,
                                  method=method)

            # If the initial guess is ignored, then popt_2 would be close 0.
            assert_allclose(popt_1, popt_2)
Example #42
0
def fitBlackbody(wvls, flux, fraction=1.0, newWvls=None, tempGuess=8600):
    """
    Seth 11/13/14
    Simple blackbody fitting function that returns BB temperature and fluxes if requested.

    INPUTS:
        wvls - wavelengths of data points (in Angstroms!)
        flux - fluxes of data points (in ergs/s/cm^2/Angstrom!)
        fraction - what fraction of spectrum's red end to use for fit. Default is 1.0 (use whole spectrum).
                   for example, fraction=1.0/5.0 only fits BB to last 20% of spectrum.
        newWvls - 1D array of wavelengths in angstroms. If given, function returns blackbody fit at requested points
        tempGuess - manually adjust guess of BB temperature (in Kelvin) that fit starts with

    OUTPUTS:
        T - temperature in Kelvin of blackbody fit
        newFlux - fluxes calculated at newWvls using the BB equation generated by the fit

    """
    c = 3.00E10  # cm/s
    h = 6.626E-27  # erg*s
    k = 1.3806488E-16  # erg/K

    x = wvls
    norm = flux.max()
    y = flux / norm

    # print("BBfit using last ", fraction * 100, "% of spectrum only")
    fitx = x[int((1.0 - fraction) * len(x))::]
    fity = y[int((1.0 - fraction) * len(x))::]

    guess_a, guess_b = 1 / (2 * h * c ** 2 / 1e-9), tempGuess  # Constant, Temp
    guess = [guess_a, guess_b]

    blackbody = lambda fx, N, T: N * 2 * h * c ** 2 / (fx) ** 5 * (
            np.exp(h * c / (k * T * (fx))) - 1) ** -1  # Planck Law
    # blackbody = lambda fx, N, T: N*2*c*k*T/(fx)**4 #Rayleigh Jeans tail
    # blackbody = lambda fx, N, T: N*2*h*c**2/(fx**5) * exp(-h*c/(k*T*fx)) #Wein Approx

    params, cov = curve_fit(blackbody, fitx * 1.0e-8, fity, p0=guess, maxfev=2000)
    N, T = params
    print("BBFit:\nN = %s\nT = %s\n" % (N, T))

    if newWvls is not None:
        best_fit = lambda fx: N * 2 * h * c ** 2 / (fx) ** 5 * (
                np.exp(h * c / (k * T * (fx))) - 1) ** -1  # Planck Law
        # best_fit = lambda fx: N*2*c*k*T/(fx)**4 # Rayleigh Jeans Tail
        # best_fit = lambda fx: N*2*h*c**2/(fx**5) * exp(-h*c/(k*T*fx)) #Wein Approx

        calcx = np.array(newWvls, dtype=float)
        newFlux = best_fit(calcx * 1.0E-8)
        newFlux *= norm
        return T, newFlux
    else:
        return T
Example #43
0
    def fitExponentialToStdHb(self):
        """
        Fit parameters of an exponential curve to the simulated standard deviation

        Return:
            float, fitted decay length
        """
        stdSim = self.rbcDataPostProcessor.fieldStd('Hb_mean', self.sValues(),
                                                    nAverage=self.nAverage)
        popt, pcov = curve_fit(self.exponentialFitFunc, self.xValues, stdSim, p0=(100e-6, stdSim[0]))
        return popt
Example #44
0
    def test_bounds(self):
        def f(x, a, b):
            return a * np.exp(-b * x)

        xdata = np.linspace(0, 1, 11)
        ydata = f(xdata, 2.0, 2.0)

        # The minimum w/out bounds is at [2., 2.],
        # and with bounds it's at [1.5, smth].
        bounds = ([1.0, 0], [1.5, 3.0])
        for method in [None, "trf", "dogbox"]:
            popt, pcov = curve_fit(f, xdata, ydata, bounds=bounds, method=method)
            assert_allclose(popt[0], 1.5)

        # With bounds, the starting estimate is feasible.
        popt, pcov = curve_fit(f, xdata, ydata, method="trf", bounds=([0.0, 0], [0.6, np.inf]))
        assert_allclose(popt[0], 0.6)

        # method='lm' doesn't support bounds.
        assert_raises(ValueError, curve_fit, f, xdata, ydata, bounds=bounds, method="lm")
Example #45
0
def mat61853_1(Geff,Tmod,power, **kwargs):
    
    '''
    Parameters
    ------------
       
               
    Geff:     numpy float or list
              indoor irradiance measurements (Watt/m2)
               
    Tmod:     numpy float or list
              module temperature indoor measurements.
              
              
     power:    numpy float or 1d array
              power returned for the given irradiance, module temperature (Watt)
    
    
    Returns:
    --------
              ermodule: dict 
              A dictionary containing the coefficients of the module
              It should contain the six fields required for this model 
    
    
    ''' 
    
    Tmod = np.array(Tmod, dtype = float)
    Geff = np.array((Geff),dtype = float)
    power = np.array((power), dtype = float)
    
    Geff[Geff<0.0] = 0.0001   
    power[power<0.0] = 0.0
    
    x = np.array((Geff, Tmod), dtype = float)
    
    
    
    
    
    
    def _matiec(x,a0,a1,a2,a3,a4):
        
        #power = (a0 + a1*Geff + a2*(Geff**2))*(a4 + a5*Tmod)
        
        p = (a0 + a1*x[0] + a2*(x[0]**2))*(a3 + a4*x[1]) 
        return p
    
    params, pcov = curve_fit(_matiec,x,power)
    ermodule = {"a0":params[0], "a1":params[1], "a2":params[2], "a3":params[3], "a4":params[4]}
    
    print(ermodule)
    
    return ermodule
Example #46
0
def fitLorentzian(x,y,guessGam,guessX0):
    '''
    Given a histogram of intensity values (x = I bin centers, y = N(I))
    and a guess for Gamma and x0, returns fit values for Gamma and x0.
    '''
    lor_guess = [guessGam,guessX0]
    lf = lambda fx, gam, x0: lorentzian(fx, gam, x0)
    params, cov = curve_fit(lf, x, y, p0=lor_guess, maxfev=2000)
    return params[0], params[1] #params = [gamma, x0]

#def fitLorentzian(x,y,guessGam):
    '''
Example #47
0
    def test_method_argument(self):
        def f(x, a, b):
            return a * np.exp(-b * x)

        xdata = np.linspace(0, 1, 11)
        ydata = f(xdata, 2.0, 2.0)

        for method in ["trf", "dogbox", "lm", None]:
            popt, pcov = curve_fit(f, xdata, ydata, method=method)
            assert_allclose(popt, [2.0, 2.0])

        assert_raises(ValueError, curve_fit, f, xdata, ydata, method="unknown")
Example #48
0
    def test_method_argument(self):
        def f(x, a, b):
            return a * np.exp(-b*x)

        xdata = np.linspace(0, 1, 11)
        ydata = f(xdata, 2., 2.)

        for method in ['trf', 'dogbox', 'lm', None]:
            popt, pcov = curve_fit(f, xdata, ydata, method=method)
            assert_allclose(popt, [2., 2.])

        assert_raises(ValueError, curve_fit, f, xdata, ydata, method='unknown')
Example #49
0
def fitLorentzian(x, y, guessGam, guessX0):
    '''
    Given a histogram of intensity values (x = I bin centers, y = N(I))
    and a guess for Gamma and x0, returns fit values for Gamma and x0.
    '''
    lor_guess = [guessGam, guessX0]
    lf = lambda fx, gam, x0: lorentzian(fx, gam, x0)
    params, cov = curve_fit(lf, x, y, p0=lor_guess, maxfev=2000)
    return params[0], params[1]  #params = [gamma, x0]

    #def fitLorentzian(x,y,guessGam):
    '''
Example #50
0
def fit2dArrayToFn(arr, fn, mask=None, down_scale_factor=None,
                   output_shape=None, guess=None,
                   outgrid=None):
    """Fit a 2d array to a 2d function

    USE ONLY MASKED VALUES
    
    * [down_scale_factor] map to speed up fitting procedure, set value smaller than 1
    * [output_shape] shape of the output array
    * [guess] must be scaled using [scale_factor]

    Returns:
        Fitted map, fitting params (scaled), error
    """
    if mask is None:
        #assert outgrid is not None
        mask = np.ones(shape=arr.shape, dtype=bool)

    if down_scale_factor is None:
        if mask.sum() > 1000:
            down_scale_factor = 0.3
        else:
            down_scale_factor = 1

    if down_scale_factor != 1:
        # SCALE TO DECREASE AMOUNT OF POINTS TO FIT:
        arr2 = zoom(arr, down_scale_factor)
        mask = zoom(mask, down_scale_factor, output=bool)
    else:
        arr2 = arr
    # USE ONLY VALID POINTS:
    x, y = np.where(mask)
    z = arr2[mask]
    # FIT:
    print (guess,111)
    parameters, cov_matrix = curve_fit(fn, (x, y), z, p0=guess)
    # ERROR:
    perr = np.sqrt(np.diag(cov_matrix))

    if outgrid is not None:
        yy,xx = outgrid
        rebuilt = fn((yy,xx), *parameters)
    else:
        if output_shape is None:
            output_shape = arr.shape
    
        fx = arr2.shape[0] / output_shape[0]
        fy = arr2.shape[1] / output_shape[1]
    
        rebuilt = np.fromfunction(lambda x, y: fn((x * fx, y * fy),
                                                  *parameters), output_shape)

    return rebuilt, parameters, perr
Example #51
0
    def test_method_argument(self):
        def f(x, a, b):
            return a * np.exp(-b*x)

        xdata = np.linspace(0, 1, 11)
        ydata = f(xdata, 2., 2.)

        for method in ['trf', 'dogbox', 'lm', None]:
            popt, pcov = curve_fit(f, xdata, ydata, method=method)
            assert_allclose(popt, [2., 2.])

        assert_raises(ValueError, curve_fit, f, xdata, ydata, method='unknown')
Example #52
0
    def fitExponentialToHbDifference(self):
        """
        Fit parameters of an exponential curve to the simulated hemoglobin saturation difference

        Return:
            float, fitted decay length
        """
        diffSim = self.hbDifferenceFromSimul(self.xValues)
        popt, pcov = curve_fit(self.exponentialFitFunc,
                               self.xValues,
                               diffSim,
                               p0=(100e-6, diffSim[0]))
        return popt
Example #53
0
    def test_func_is_classmethod(self):
        class test_self(object):
            """This class tests if curve_fit passes the correct number of
               arguments when the model function is a class instance method.
            """
            def func(self, x, a, b):
                return b * x**a

        test_self_inst = test_self()
        popt, pcov = curve_fit(test_self_inst.func, self.x, self.y)
        assert_(pcov.shape == (2, 2))
        assert_array_almost_equal(popt, [1.7989, 1.1642], decimal=4)
        assert_array_almost_equal(pcov, [[0.0852, -0.1260], [-0.1260, 0.1912]],
                                  decimal=4)
Example #54
0
    def test_bounds(self):
        def f(x, a, b):
            return a * np.exp(-b*x)

        xdata = np.linspace(0, 1, 11)
        ydata = f(xdata, 2., 2.)

        # The minimum w/out bounds is at [2., 2.],
        # and with bounds it's at [1.5, smth].
        bounds = ([1., 0], [1.5, 3.])
        for method in [None, 'trf', 'dogbox']:
            popt, pcov = curve_fit(f, xdata, ydata, bounds=bounds,
                                   method=method)
            assert_allclose(popt[0], 1.5)

        # With bounds, the starting estimate is feasible.
        popt, pcov = curve_fit(f, xdata, ydata, method='trf',
                               bounds=([0., 0], [0.6, np.inf]))
        assert_allclose(popt[0], 0.6)

        # method='lm' doesn't support bounds.
        assert_raises(ValueError, curve_fit, f, xdata, ydata, bounds=bounds,
                      method='lm')
Example #55
0
    def test_pcov(self):
        xdata = np.array([0, 1, 2, 3, 4, 5])
        ydata = np.array([1, 1, 5, 7, 8, 12])
        sigma = np.array([1, 2, 1, 2, 1, 2])

        def f(x, a, b):
            return a * x + b

        popt, pcov = curve_fit(f, xdata, ydata, p0=[2, 0], sigma=sigma)
        perr_scaled = np.sqrt(np.diag(pcov))
        assert_allclose(perr_scaled, [0.20659803, 0.57204404], rtol=1e-3)

        popt, pcov = curve_fit(f, xdata, ydata, p0=[2, 0], sigma=3 * sigma)
        perr_scaled = np.sqrt(np.diag(pcov))
        assert_allclose(perr_scaled, [0.20659803, 0.57204404], rtol=1e-3)

        popt, pcov = curve_fit(f,
                               xdata,
                               ydata,
                               p0=[2, 0],
                               sigma=sigma,
                               absolute_sigma=True)
        perr = np.sqrt(np.diag(pcov))
        assert_allclose(perr, [0.30714756, 0.85045308], rtol=1e-3)

        popt, pcov = curve_fit(f,
                               xdata,
                               ydata,
                               p0=[2, 0],
                               sigma=3 * sigma,
                               absolute_sigma=True)
        perr = np.sqrt(np.diag(pcov))
        assert_allclose(perr, [3 * 0.30714756, 3 * 0.85045308], rtol=1e-3)

        # infinite variances

        def f_flat(x, a, b):
            return a * x

        with warnings.catch_warnings():
            # suppress warnings when testing with inf's
            warnings.filterwarnings('ignore', category=OptimizeWarning)
            popt, pcov = curve_fit(f_flat,
                                   xdata,
                                   ydata,
                                   p0=[2, 0],
                                   sigma=sigma)
            assert_(pcov.shape == (2, 2))
            pcov_expected = np.array([np.inf] * 4).reshape(2, 2)
            assert_array_equal(pcov, pcov_expected)

            popt, pcov = curve_fit(f, xdata[:2], ydata[:2], p0=[2, 0])
            assert_(pcov.shape == (2, 2))
            assert_array_equal(pcov, pcov_expected)
def get_tau(autocorrelation):
    """
    Parameters:
    -----------
    autocorrelation: np.darray
        autocorrelation function of a given observable

    Return:
    -------
    params: np.darray
        fitting parameters of autocorrelation onto an exponential decay
    """
    N = len(autocorrelation)//2
    x = np.arange(N)
    y = np.nan_to_num(autocorrelation[:N])
    params, errors = curve_fit(func,x, y)
    return params
Example #57
0
    def test_regression_2639(self):
        # This test fails if epsfcn in leastsq is too large.
        x = [574.14200000000005, 574.154, 574.16499999999996,
             574.17700000000002, 574.18799999999999, 574.19899999999996,
             574.21100000000001, 574.22199999999998, 574.23400000000004,
             574.245]
        y = [859.0, 997.0, 1699.0, 2604.0, 2013.0, 1964.0, 2435.0,
             1550.0, 949.0, 841.0]
        guess = [574.1861428571428, 574.2155714285715, 1302.0, 1302.0,
                 0.0035019999999983615, 859.0]
        good = [5.74177150e+02, 5.74209188e+02, 1.74187044e+03, 1.58646166e+03,
                1.0068462e-02, 8.57450661e+02]

        def f_double_gauss(x, x0, x1, A0, A1, sigma, c):
            return (A0*np.exp(-(x-x0)**2/(2.*sigma**2))
                    + A1*np.exp(-(x-x1)**2/(2.*sigma**2)) + c)
        popt, pcov = curve_fit(f_double_gauss, x, y, guess, maxfev=10000)
        assert_allclose(popt, good, rtol=1e-5)
Example #58
0
    def test_dtypes(self):
        # regression test for gh-9581: curve_fit fails if x and y dtypes differ
        x = np.arange(-3, 5)
        y = 1.5*x + 3.0 + 0.5*np.sin(x)

        def func(x, a, b):
            return a*x + b

        for method in ['lm', 'trf', 'dogbox']:
            for dtx in [np.float32, np.float64]:
                for dty in [np.float32, np.float64]:
                    x = x.astype(dtx)
                    y = y.astype(dty)

                with warnings.catch_warnings():
                    warnings.simplefilter("error", OptimizeWarning)
                    p, cov = curve_fit(func, x, y, method=method)

                    assert np.isfinite(cov).all()
                    assert not np.allclose(p, 1)   # curve_fit's initial value
Example #59
0
def fitpol(x, y, guess=np.array([1, 90., 0.])):
    p, cov = curve_fit(polarized_intensity,
                       x,
                       y,
                       p0=guess,
                       Dfun=None,
                       maxfev=500000,
                       epsfcn=1.0e-9)
    if p[0] < 0:
        p[0] *= -1.0
    while p[1] > 180.0:
        p[1] -= 180.0
    while p[1] < 0.0:
        p[1] += 180.0

    a = np.isinf(cov)
    if (a.all()):
        print 'Failed to fit a function'

    return p, cov