Exemple #1
0
    def test_grid_fitting(self):
        xdata = np.arange(-5, 5, 1)
        ydata = np.arange(-5, 5, 1)
        xx, yy = np.meshgrid(xdata, ydata, sparse=False)
        xdata_coor = np.dstack((xx, yy))

        zdata = (2.5*xx**2 + 3.0*yy**2)

        a = Parameter(2.5, max=2.75)
        b = Parameter(3.0, min=2.75)
        x = Variable()
        y = Variable()
        new = (a*x**2 + b*y**2)

        fit = Fit(new, xdata_coor, zdata)

        # Test the flatten function for consistency.
        xdata_coor_flat, zdata_flat = fit._flatten(xdata_coor, zdata)
        # _flatten transposes such arrays because the variables are in the deepest dimension instead of the first.
        # This is normally not a problem because all we want from the fit is the correct parameters.
        self.assertFalse(np.array_equal(zdata, zdata_flat.reshape((10,10))))
        self.assertTrue(np.array_equal(zdata, zdata_flat.reshape((10,10)).T))
        self.assertFalse(np.array_equal(xdata_coor, xdata_coor_flat.reshape((10,10,2))))
        new_xdata = xdata_coor_flat.reshape((2,10,10)).T
        self.assertTrue(np.array_equal(xdata_coor, new_xdata))


        results = fit.execute()
        self.assertAlmostEqual(results.params.a, 2.5)
        self.assertAlmostEqual(results.params.b, 3.)
Exemple #2
0
    def test_gaussian_2d_fitting(self):
        mean = (0.6,0.4) # x, y mean 0.6, 0.4
        cov = [[0.2**2,0],[0,0.1**2]]

        data = np.random.multivariate_normal(mean, cov, 1000000)

        # Insert them as y,x here as np f***s up cartesian conventions.
        ydata, xedges, yedges = np.histogram2d(data[:,1], data[:,0], bins=100, range=[[0.0, 1.0], [0.0, 1.0]])
        xcentres = (xedges[:-1] + xedges[1:]) / 2
        ycentres = (yedges[:-1] + yedges[1:]) / 2

        # Make a valid grid to match ydata
        xx, yy = np.meshgrid(xcentres, ycentres, sparse=False)

        x0 = Parameter()
        sig_x = Parameter(min=0.0)
        x = Variable()
        y0 = Parameter()
        sig_y = Parameter(min=0.0)
        A = Parameter()
        y = Variable()
        g = A * Gaussian(x, x0, sig_x) * Gaussian(y, y0, sig_y)

        fit = Fit(g, xx, yy, ydata)
        fit_result = fit.execute()

        # Again, the order seems to be swapped for py3k
        self.assertAlmostEqual(fit_result.params.x0, np.mean(data[:,0]), 1)
        self.assertAlmostEqual(fit_result.params.y0, np.mean(data[:,1]), 1)
        self.assertAlmostEqual(np.abs(fit_result.params.sig_x), np.std(data[:,0]), 1)
        self.assertAlmostEqual(np.abs(fit_result.params.sig_y), np.std(data[:,1]), 1)
        self.assertGreaterEqual(fit_result.r_squared, 0.99)
Exemple #3
0
    def test_gaussian_fitting(self):
        xdata = 2*np.random.rand(10000) - 1 # random betwen [-1, 1]
        ydata = 5.0 * scipy.stats.norm.pdf(xdata, loc=0.0, scale=1.0)

        x0 = Parameter()
        sig = Parameter()
        A = Parameter()
        x = Variable()
        g = A * Gaussian(x, x0, sig)

        fit = Fit(g, xdata, ydata)
        fit_result = fit.execute()

        self.assertAlmostEqual(fit_result.params.A, 5.0)
        self.assertAlmostEqual(np.abs(fit_result.params.sig), 1.0)
        self.assertAlmostEqual(fit_result.params.x0, 0.0)
        # raise Exception([i for i in fit_result.params])
        sexy = g(x=2.0, **fit_result.params)
        ugly = g(
            x=2.0,
            x0=fit_result.params.x0,
            A=fit_result.params.A,
            sig=fit_result.params.sig,
        )
        self.assertEqual(sexy, ugly)
Exemple #4
0
    def test_read_only_results(self):
        """
        Fit results should be read-only. Let's try to break this!
        """
        xdata = np.linspace(1,10,10)
        ydata = 3*xdata**2

        a = Parameter(3.0, min=2.75)
        b = Parameter(2.0, max=2.75)
        x = Variable('x')
        new = a*x**b

        fit = Fit(new, xdata, ydata)
        fit_result = fit.execute()

        # Break it!
        try:
            fit_result.params = 'hello'
        except AttributeError:
            self.assertTrue(True) # desired result
        else:
            self.assertNotEqual(fit_result.params, 'hello')

        try:
            # Bypass the property getter. This will work, as it set's the instance value of __params.
            fit_result.__params = 'hello'
        except AttributeError as foo:
            self.assertTrue(False) # undesired result
        else:
            self.assertNotEqual(fit_result.params, 'hello')
            # The assginment will have succeeded on the instance because we set it from the outside.
            # I must admit I don't fully understand why this is allowed and I don't like it.
            # However, the tests below show that it did not influence the class method itself so
            # fitting still works fine.
            self.assertEqual(fit_result.__params, 'hello')

        # Do a second fit and dubble check that we do not overwrtie something crusial.
        xdata = np.arange(-5, 5, 1)
        ydata = np.arange(-5, 5, 1)
        xx, yy = np.meshgrid(xdata, ydata, sparse=False)
        xdata_coor = np.dstack((xx, yy))

        zdata = (2.5*xx**2 + 3.0*yy**2)

        a = Parameter(2.5, max=2.75)
        b = Parameter(3.0, min=2.75)
        x = Variable()
        y = Variable()
        new = (a*x**2 + b*y**2)

        fit_2 = Fit(new, xdata_coor, zdata)
        fit_result_2 = fit_2.execute()
        self.assertNotAlmostEqual(fit_result.params.a, fit_result_2.params.a)
        self.assertAlmostEqual(fit_result.params.a, 3.0)
        self.assertAlmostEqual(fit_result_2.params.a, 2.5)
        self.assertNotAlmostEqual(fit_result.params.b, fit_result_2.params.b)
        self.assertAlmostEqual(fit_result.params.b, 2.0)
        self.assertAlmostEqual(fit_result_2.params.b, 3.0)
Exemple #5
0
    def test_2_gaussian_2d_fitting(self):
        np.random.seed(4242)
        mean = (0.3, 0.3) # x, y mean 0.6, 0.4
        cov = [[0.01**2,0],[0,0.01**2]]
        data = np.random.multivariate_normal(mean, cov, 1000000)
        mean = (0.7,0.7) # x, y mean 0.6, 0.4
        cov = [[0.01**2,0],[0,0.01**2]]
        data_2 = np.random.multivariate_normal(mean, cov, 1000000)
        data = np.vstack((data, data_2))

        # Insert them as y,x here as np f***s up cartesian conventions.
        ydata, xedges, yedges = np.histogram2d(data[:,1], data[:,0], bins=100, range=[[0.0, 1.0], [0.0, 1.0]])
        xcentres = (xedges[:-1] + xedges[1:]) / 2
        ycentres = (yedges[:-1] + yedges[1:]) / 2

        # Make a valid grid to match ydata
        xx, yy = np.meshgrid(xcentres, ycentres, sparse=False)
        # xdata = np.dstack((xx, yy)).T

        x = Variable()
        y = Variable()

        x0_1 = Parameter(0.7, min=0.6, max=0.8)
        sig_x_1 = Parameter(0.1, min=0.0, max=0.2)
        y0_1 = Parameter(0.7, min=0.6, max=0.8)
        sig_y_1 = Parameter(0.1, min=0.0, max=0.2)
        A_1 = Parameter()
        g_1 = A_1 * Gaussian(x, x0_1, sig_x_1) * Gaussian(y, y0_1, sig_y_1)

        x0_2 = Parameter(0.3, min=0.2, max=0.4)
        sig_x_2 = Parameter(0.1, min=0.0, max=0.2)
        y0_2 = Parameter(0.3, min=0.2, max=0.4)
        sig_y_2 = Parameter(0.1, min=0.0, max=0.2)
        A_2 = Parameter()
        g_2 = A_2 * Gaussian(x, x0_2, sig_x_2) * Gaussian(y, y0_2, sig_y_2)

        model = g_1 + g_2
        fit = Fit(model, xx, yy, ydata)
        fit_result = fit.execute()

        img = model(x=xx, y=yy, **fit_result.params)
        img_g_1 = g_1(x=xx, y=yy, **fit_result.params)

        # Equal up to some precision. Not much obviously.
        self.assertAlmostEqual(fit_result.params.x0_1, 0.7, 3)
        self.assertAlmostEqual(fit_result.params.y0_1, 0.7, 3)
        self.assertAlmostEqual(fit_result.params.x0_2, 0.3, 3)
        self.assertAlmostEqual(fit_result.params.y0_2, 0.3, 3)
Exemple #6
0
    def test_straight_line_analytical(self):
        """
        Test symfit against a straight line, for which the parameters and their
        uncertainties are known analytically. Assuming equal weights.
        :return:
        """
        data = [[0, 1], [1, 0], [3, 2], [5, 4]]
        x, y = (np.array(i, dtype='float64') for i in zip(*data))
        # x = np.arange(0, 100, 0.1)
        # np.random.seed(10)
        # y = 3.0*x + 105.0 + np.random.normal(size=x.shape)

        dx = x - x.mean()
        dy = y - y.mean()
        mean_squared_x = np.mean(x**2) - np.mean(x)**2
        mean_xy = np.mean(x * y) - np.mean(x)*np.mean(y)
        a = mean_xy/mean_squared_x
        b = y.mean() - a * x.mean()
        self.assertAlmostEqual(a, 0.694915, 6) # values from Mathematica
        self.assertAlmostEqual(b, 0.186441, 6)
        print(a, b)

        S = np.sum((y - (a*x + b))**2)
        var_a_exact = S/(len(x) * (len(x) - 2) * mean_squared_x)
        var_b_exact = var_a_exact*np.mean(x ** 2)
        a_exact = a
        b_exact = b

        # We will now compare these exact results with values from symfit
        a, b, x_var = Parameter(name='a', value=3.0), Parameter(name='b'), Variable(name='x')
        model = a*x_var + b
        fit = Fit(model, x, y, absolute_sigma=False)
        fit_result = fit.execute()

        popt, pcov = curve_fit(lambda z, c, d: c * z + d, x, y,
                               Dfun=lambda p, x, y, func: np.transpose([x, np.ones_like(x)]))
                                # Dfun=lambda p, x, y, func: print(p, func, x, y))

        # curve_fit
        self.assertAlmostEqual(a_exact, popt[0], 4)
        self.assertAlmostEqual(b_exact, popt[1], 4)
        self.assertAlmostEqual(var_a_exact, pcov[0][0], 6)
        self.assertAlmostEqual(var_b_exact, pcov[1][1], 6)

        self.assertAlmostEqual(a_exact, fit_result.params.a, 4)
        self.assertAlmostEqual(b_exact, fit_result.params.b, 4)
        self.assertAlmostEqual(var_a_exact**0.5, fit_result.params.a_stdev, 6)
        self.assertAlmostEqual(var_b_exact**0.5, fit_result.params.b_stdev, 6)
Exemple #7
0
    def test_gaussian_fitting(self):
        xdata = 2*np.random.rand(10000) - 1 # random betwen [-1, 1]
        ydata = scipy.stats.norm.pdf(xdata, loc=0.0, scale=1.0)

        x0 = Parameter()
        sig = Parameter()
        A = Parameter()
        x = Variable()
        g = A * Gaussian(x, x0, sig)

        fit = Fit(g, xdata, ydata)
        fit_result = fit.execute()
        print fit_result
        self.assertAlmostEqual(fit_result.params.A, 0.3989423)
        self.assertAlmostEqual(np.abs(fit_result.params.sig), 1.0)
        self.assertAlmostEqual(fit_result.params.x0, 0.0)
Exemple #8
0
    def test_error_analytical(self):
        """
        Test using a case where the analytical answer is known.
        Modeled after:
        http://nbviewer.ipython.org/urls/gist.github.com/taldcroft/5014170/raw/31e29e235407e4913dc0ec403af7ed524372b612/curve_fit.ipynb
        """
        N = 10000
        sigma = 10.0
        xn = np.arange(N, dtype=np.float)
        # yn = np.zeros_like(xn)
        np.random.seed(10)
        yn = np.random.normal(size=len(xn), scale=sigma)

        a = Parameter()
        y = Variable()
        model = {y: a}

        fit = Fit(model, y=yn, sigma_y=sigma)
        fit_result = fit.execute()


        popt, pcov = curve_fit(lambda x, a: a * np.ones_like(x), xn, yn, sigma=sigma, absolute_sigma=True)
        self.assertAlmostEqual(fit_result.params.a, popt[0], 5)
        self.assertAlmostEqual(fit_result.params.a_stdev, np.sqrt(np.diag(pcov))[0], 2)

        fit_no_sigma = Fit(model, yn)
        fit_result_no_sigma = fit_no_sigma.execute()

        popt, pcov = curve_fit(lambda x, a: a * np.ones_like(x), xn, yn,)
        # With or without sigma, the bestfit params should be in agreement in case of equal weights
        self.assertAlmostEqual(fit_result.params.a, fit_result_no_sigma.params.a, 5)
        # Since symfit is all about absolute errors, the sigma will not be in agreement
        self.assertNotEqual(fit_result.params.a_stdev, fit_result_no_sigma.params.a_stdev, 5)
        self.assertAlmostEqual(fit_result_no_sigma.params.a, popt[0], 5)
        self.assertAlmostEqual(fit_result_no_sigma.params.a_stdev, pcov[0][0]**0.5, 5)

        # Analytical answer for mean of N(0,1):
        mu = 0.0
        sigma_mu = sigma/N**0.5

        # self.assertAlmostEqual(fit_result.params.a, mu, 5)
        self.assertAlmostEqual(fit_result.params.a_stdev, sigma_mu, 5)
Exemple #9
0
    def test_2D_fitting(self):
        xdata = np.random.randint(-10, 11, size=(2, 400))
        zdata = 2.5*xdata[0]**2 + 7.0*xdata[1]**2

        a = Parameter()
        b = Parameter()
        x = Variable()
        y = Variable()
        new = a*x**2 + b*y**2

        fit = Fit(new, xdata[0], xdata[1], zdata)

        # result = fit.scipy_func(fit.xdata, [2, 3])
        result = fit.model(xdata[0], xdata[1], 2, 3)

        for arg_name, name in zip(('x', 'y', 'a', 'b'), inspect_sig.signature(fit.model).parameters):
            self.assertEqual(arg_name, name)

        fit_result = fit.execute()
        self.assertIsInstance(fit_result, FitResults)
Exemple #10
0
    def test_2D_fitting(self):
        xdata = np.random.randint(-10, 11, size=(2, 400))
        zdata = 2.5*xdata[0]**2 + 7.0*xdata[1]**2

        a = Parameter('a')
        b = Parameter('b')
        x = Variable('x')
        y = Variable('y')
        new = a*x**2 + b*y**2

        fit = Fit(new, xdata, zdata)

        result = fit.scipy_func(fit.xdata, [2, 3])

        import inspect
        args, varargs, keywords, defaults = inspect.getargspec(fit.scipy_func)
        self.assertEqual(args, ['x', 'p'])

        fit_result = fit.execute()
        self.assertIsInstance(fit_result, FitResults)
Exemple #11
0
    def test_grid_fitting(self):
        """
        This fit seems to fail occasionally. WTF? I'm not in the randomness generation business.
        """
        xdata = np.arange(-5, 5, 1)
        ydata = np.arange(5, 15, 1)
        xx, yy = np.meshgrid(xdata, ydata, sparse=True)

        zdata = (2.5*xx**2 + 3.0*yy**2)

        a = Parameter(2.5, max=2.75)
        b = Parameter(3.0, min=2.75)
        x = Variable()
        y = Variable()
        z = Variable()
        new = {z: a*x**2 + b*y**2}

        fit = Fit(new, x=xx, y=yy, z=zdata)
        results = fit.execute()

        self.assertAlmostEqual(results.params.a, 2.5)
        self.assertAlmostEqual(results.params.b, 3.)
Exemple #12
0
    def test_fitting(self):
        xdata = np.linspace(1,10,10)
        ydata = 3*xdata**2

        a = Parameter('a')
        b = Parameter('b')
        x = Variable('x')
        new = a*x**b

        fit = Fit(new, xdata, ydata)

        func = sympy_to_py(new, [x], [a, b])
        result = func(xdata, 3, 2)
        self.assertTrue(np.array_equal(result, ydata))

        result = fit.scipy_func(fit.xdata, [3, 2])
        self.assertTrue(np.array_equal(result, ydata))

        import inspect
        args, varargs, keywords, defaults = inspect.getargspec(fit.scipy_func)

        # self.assertEqual(args, ['x', 'a', 'b'])
        fit_result = fit.execute()
        self.assertIsInstance(fit_result, FitResults)
        print(fit_result)
        self.assertAlmostEqual(fit_result.params.a, 3.0)
        self.assertAlmostEqual(fit_result.params.b, 2.0)

        self.assertIsInstance(fit_result.params.a_stdev, float)
        self.assertIsInstance(fit_result.params.b_stdev, float)

        self.assertIsInstance(fit_result.r_squared, float)

        # Test several false ways to access the data.
        self.assertRaises(AttributeError, getattr, *[fit_result.params, 'a_fdska'])
        self.assertRaises(AttributeError, getattr, *[fit_result.params, 'c'])
        self.assertRaises(AttributeError, getattr, *[fit_result.params, 'a_stdev_stdev'])
        self.assertRaises(AttributeError, getattr, *[fit_result.params, 'a_stdev_'])
        self.assertRaises(AttributeError, getattr, *[fit_result.params, 'a__stdev'])
Exemple #13
0
        def __get_grad(ar,n): # grad at different scales, see test_symfit_0707.ipynb
            m = n - 1
            (M,N) = ar.shape
    
            # output grad matrix with size (M-m)x(N-m)
            gd180 = np.zeros((M-m,N-m))
            gd360 = np.zeros((M-m,N-m))
    
            # initial values for fitting parameters
            ## Goodman et al. 1993 (doi:10.1086/172465)
            v0 = Parameter(value=5.)
            al = Parameter(value=0.)
            b1 = Parameter(value=0.) 

            v_0, a, b = parameters('v0, al, bl')
            x, y, z   = variables('x, y, z')      

            md = {z: v_0 + a * x + b * y}
    
            for (x,y),i in np.ndenumerate(ar):
                if x >= ar.shape[0]-m or y >= ar.shape[1]-m:
                    # fit grad from (x,y) (to (x+n, y+n)), so right/bottom edges are neglected
                    continue
                else:
                    ap = ar[slice(x,x+n),slice(y,y+n)]
                    # array of indices
                    xx,yy = np.where(~np.isnan(ap))
                    zz    = ap.flatten()
                    zz    = zz[~np.isnan(zz)]
                    
                    ft = Fit(md, x=xx, y=yy, z=zz)
                    ft_result = ft.execute()
            
                    (a,b) = (ft_result.params.al,ft_result.params.bl)
            
                    gd180[x,y] = np.mod(np.mod(360-np.degrees(np.arctan(b/a)), 360),180)
                    gd360[x,y] = np.mod(360-np.degrees(np.arctan(b/a)), 360)
                
            return gd180,gd360
Exemple #14
0
    def test_simple_sigma(self):
        from symfit.api import Variable, Parameter, Fit

        t_data = np.array([1.4, 2.1, 2.6, 3.0, 3.3])
        y_data = np.array([10, 20, 30, 40, 50])

        sigma = 0.2
        n = np.array([5, 3, 8, 15, 30])
        sigma_t = sigma / np.sqrt(n)

        # We now define our model
        y = Variable()
        g = Parameter()
        t_model = (2 * y / g)**0.5

        fit = Fit(t_model, y_data, t_data)#, sigma=sigma_t)
        fit_result = fit.execute()

        # h_smooth = np.linspace(0,60,100)
        # t_smooth = t_model(y=h_smooth, **fit_result.params)

        # Lets with the results from curve_fit, no weights
        popt_noweights, pcov_noweights = curve_fit(lambda y, p: (2 * y / p)**0.5, y_data, t_data)

        self.assertAlmostEqual(fit_result.params.g, popt_noweights[0])
        self.assertAlmostEqual(fit_result.params.g_stdev, np.sqrt(pcov_noweights[0, 0]))

        # Same sigma everywere
        fit = Fit(t_model, y_data, t_data, 0.0031, absolute_sigma=False)
        fit_result = fit.execute()
        popt_sameweights, pcov_sameweights = curve_fit(lambda y, p: (2 * y / p)**0.5, y_data, t_data, sigma=0.0031, absolute_sigma=False)
        self.assertAlmostEqual(fit_result.params.g, popt_sameweights[0], 4)
        self.assertAlmostEqual(fit_result.params.g_stdev, np.sqrt(pcov_sameweights[0, 0]), 4)
        # Same weight everywere should be the same as no weight when absolute_sigma=False
        self.assertAlmostEqual(fit_result.params.g, popt_noweights[0], 4)
        self.assertAlmostEqual(fit_result.params.g_stdev, np.sqrt(pcov_noweights[0, 0]), 4)

        # Different sigma for every point
        fit = Fit(t_model, y_data, t_data, 0.1*sigma_t, absolute_sigma=False)
        fit_result = fit.execute()
        popt, pcov = curve_fit(lambda y, p: (2 * y / p)**0.5, y_data, t_data, sigma=.1*sigma_t)

        self.assertAlmostEqual(fit_result.params.g, popt[0])
        self.assertAlmostEqual(fit_result.params.g_stdev, np.sqrt(pcov[0, 0]))

        self.assertAlmostEqual(fit_result.params.g, 9.095, 3)
        self.assertAlmostEqual(fit_result.params.g_stdev, 0.102, 3) # according to Mathematica
Exemple #15
0
class Fit1D(FitObject):
    'class for fitting 1d datasets'

    parent = ForwardInstance(lambda: ap.data.XYDataObject)
    plot = ForwardInstance(lambda: ap.plot.Plot1D)

    fitted = Bool(
        default=False
    )  # boolean which indicates if current model and data are fitted

    _model = Value()
    result = Value()

    _fit = Typed(Fit)

    def __init__(self, parent, *args, **kwargs):
        self.parent = parent
        super(Fit1D, self).__init__(*args, **kwargs)

        self.result = None

    def add_model(self, model):
        if self._model: del self._model

        if isinstance(model, str):
            self._model = get_model(model, self.parent.x, self.parent.y)
        else:
            self._model = model

        self.fitted = False

    @observe('parent.x', 'parent.x_updated', 'parent.y', 'parent.y_updated')
    def _data_updated(self, change):
        self.fitted = False

    def execute(self, *options, **kwoptions):
        self._fit = Fit(self._model, self.parent.x, self.parent.y)
        self.result = self._fit.execute(*options, **kwoptions)
Exemple #16
0
    def test_fitting(self):
        np.random.seed(4242)
        mean = (0.3, 0.3) # x, y mean 0.6, 0.4
        cov = [
            [0.01**2, 0.4],
            [0.4, 0.01**2]
        ]
        data = np.random.multivariate_normal(mean, cov, 1000000)
        mean = (0.7,0.7) # x, y mean 0.6, 0.4
        cov = [[0.01**2,0],[0,0.01**2]]
        data_2 = np.random.multivariate_normal(mean, cov, 1000000)
        data = np.vstack((data, data_2))

        # Insert them as y,x here as np f***s up cartesian conventions.
        ydata, xedges, yedges = np.histogram2d(data[:,1], data[:,0], bins=100, range=[[0.0, 1.0], [0.0, 1.0]])
        xcentres = (xedges[:-1] + xedges[1:]) / 2
        ycentres = (yedges[:-1] + yedges[1:]) / 2

        # Make a valid grid to match ydata
        xx, yy = np.meshgrid(xcentres, ycentres, sparse=False)
        # xdata = np.dstack((xx, yy)).T

        x = Variable()
        y = Variable()

        x0_1 = Parameter(0.7, min=0.6, max=0.8)
        sig_x_1 = Parameter(0.1, min=0.0, max=0.2)
        y0_1 = Parameter(0.7, min=0.6, max=0.8)
        sig_y_1 = Parameter(0.1, min=0.0, max=0.2)
        A_1 = Parameter()
        g_1 = A_1 * Gaussian(x, x0_1, sig_x_1) * Gaussian(y, y0_1, sig_y_1)

        x0_2 = Parameter(0.3, min=0.2, max=0.4)
        sig_x_2 = Parameter(0.1, min=0.0, max=0.2)
        y0_2 = Parameter(0.3, min=0.2, max=0.4)
        sig_y_2 = Parameter(0.1, min=0.0, max=0.2)
        A_2 = Parameter()
        g_2 = A_2 * Gaussian(x, x0_2, sig_x_2) * Gaussian(y, y0_2, sig_y_2)

        model = g_1 + g_2
        fit = Fit(model, xx, yy, ydata)
        fit_result = fit.execute()

        for param in fit_result.params:
            self.assertAlmostEqual(fit_result.stdev(param)**2, fit_result.variance(param))
            self.assertEqual(fit_result.stdev(param), fit_result.params.stdev(param))
            self.assertEqual(fit_result.value(param), fit_result.params.value(param))

        # Covariance matrix should be symmetric
        for param_1 in fit_result.params:
            for param_2 in fit_result.params:
                self.assertAlmostEqual(fit_result.covariance(param_1, param_2), fit_result.covariance(param_2, param_1))
        print(fit_result.params.covariance_matrix)
        print(fit_result.covariance(x0_1, x0_2))

        with warnings.catch_warnings(record=True) as w:
            # Cause all warnings to always be triggered.
            warnings.simplefilter("always")
            # Trigger DeprecationWarning
            fit_result.params.get_stdev(x0_1)
            fit_result.params.get_value(x0_1)
            self.assertTrue(len(w) == 2)
            for warning in w:
                self.assertTrue(issubclass(warning.category, DeprecationWarning))
Exemple #17
0
 def execute(self, *options, **kwoptions):
     self._fit = Fit(self._model, self.parent.x, self.parent.y)
     self.result = self._fit.execute(*options, **kwoptions)
Exemple #18
0
from __future__ import print_function
from symfit.api import Parameter, Variable, Fit, exp
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns

palette = sns.color_palette()


x = Variable()
A = Parameter()
sig = Parameter(name='sig', value=1.4, min=1.0, max=2.0)
x0 = Parameter(name='x0', value=15.0, min=0.0)
# Gaussian distrubution
model = A*exp(-((x - x0)**2/(2 * sig**2)))

# Sample 10000 points from a N(15.0, 1.5) distrubution
sample = np.random.normal(loc=15.0, scale=1.5, size=(10000,))
ydata, bin_edges = np.histogram(sample, 100)
xdata = (bin_edges[1:] + bin_edges[:-1])/2

fit = Fit(model, xdata, ydata)
fit_result = fit.execute()
print(fit_result)
print(model)

y = model(x=xdata, **fit_result.params)
sns.regplot(xdata, ydata, fit_reg=False)
plt.plot(xdata, y, color=palette[2])
plt.ylim(0, 400)
plt.show()
Exemple #19
0
argument_dict['x'] = x
argument_dict['x0'] = x0
argument_dict['k'] = k
if hasattr(args, 'n'):
    n = Parameter(value=args.n, min=0, max=args.n * 2, fixed=True)
    argument_dict['n'] = n

model = harmonic_distribution(**argument_dict)
#fit = fitter.InteractiveFit2D(model, xs, ys)
guess = symfit.contrib.interactive_guess.interactive_guess.InteractiveGuess2D(
    model, xs, ys)
#fit.visual_guess(1000)
#result = fit.execute(maxfev=1000)
result = guess.execute()

fit = Fit(model, xs, ys)
result = fit.execute()

print(result)

plt.scatter(xs, ys, color='b')
plt.plot(xs, model(x=xs, **result.params), color='r')
plt.show()
#
#angs = np.array(range(-180, 181), dtype=np.float64)
#probs = model(th=angs, **result.params)
#
#potentials = -kb*T*np.log(probs)
#print(potentials)
#
#potentials[potentials == np.inf] = 50
Exemple #20
0
    def test_error_advanced(self):
        """
        Models an example from the mathematica docs and try's to replicate it:
        http://reference.wolfram.com/language/howto/FitModelsWithMeasurementErrors.html
        """
        data = [
            [0.9, 6.1, 9.5], [3.9, 6., 9.7], [0.3, 2.8, 6.6],
            [1., 2.2, 5.9], [1.8, 2.4, 7.2], [9., 1.7, 7.],
            [7.9, 8., 10.4], [4.9, 3.9, 9.], [2.3, 2.6, 7.4],
            [4.7, 8.4, 10.]
        ]
        xdata, ydata, zdata = [np.array(data) for data in zip(*data)]
        xy = np.vstack((xdata, ydata))
        # z = np.array(z)
        errors = np.array([.4, .4, .2, .4, .1, .3, .1, .2, .2, .2])

        # raise Exception(xy, z)
        a = Parameter(3.0)
        b = Parameter(0.9)
        c = Parameter(5)
        x = Variable()
        y = Variable()
        z = Variable()
        model = {z: a * log(b * x + c * y)}

        # fit = Fit(model, xy, z, absolute_sigma=False)
        fit = Fit(model, xdata, ydata, zdata, absolute_sigma=False)
        # fit = Fit(model, x=xdata, y=ydata, z=zdata, absolute_sigma=False)
        fit_result = fit.execute()

        # Same as Mathematica default behavior.
        self.assertAlmostEqual(fit_result.params.a, 2.9956, 4)
        self.assertAlmostEqual(fit_result.params.b, 0.563212, 4)
        self.assertAlmostEqual(fit_result.params.c, 3.59732, 4)
        self.assertAlmostEqual(fit_result.params.a_stdev, 0.278304, 4)
        self.assertAlmostEqual(fit_result.params.b_stdev, 0.224107, 4)
        self.assertAlmostEqual(fit_result.params.c_stdev, 0.980352, 4)

        fit = Fit(model, xdata, ydata, zdata, absolute_sigma=True)
        fit_result = fit.execute()
        # Same as Mathematica in Measurement error mode, but without suplying
        # any errors.
        self.assertAlmostEqual(fit_result.params.a, 2.9956, 4)
        self.assertAlmostEqual(fit_result.params.b, 0.563212, 4)
        self.assertAlmostEqual(fit_result.params.c, 3.59732, 4)
        self.assertAlmostEqual(fit_result.params.a_stdev, 0.643259, 4)
        self.assertAlmostEqual(fit_result.params.b_stdev, 0.517992, 4)
        self.assertAlmostEqual(fit_result.params.c_stdev, 2.26594, 4)

        fit = Fit(model, xdata, ydata, zdata, sigma_z=errors)
        fit_result = fit.execute()

        popt, pcov, infodict, errmsg, ier = curve_fit(lambda x_vec, a, b, c: a * np.log(b * x_vec[0] + c * x_vec[1]), xy, zdata, sigma=errors, absolute_sigma=True, full_output=True)

        # Same as curve_fit?
        self.assertAlmostEqual(fit_result.params.a, popt[0], 4)
        self.assertAlmostEqual(fit_result.params.b, popt[1], 4)
        self.assertAlmostEqual(fit_result.params.c, popt[2], 4)
        self.assertAlmostEqual(fit_result.params.a_stdev, np.sqrt(pcov[0,0]), 4)
        self.assertAlmostEqual(fit_result.params.b_stdev, np.sqrt(pcov[1,1]), 4)
        self.assertAlmostEqual(fit_result.params.c_stdev, np.sqrt(pcov[2,2]), 4)

        # Same as Mathematica with MEASUREMENT ERROR
        self.assertAlmostEqual(fit_result.params.a, 2.68807, 4)
        self.assertAlmostEqual(fit_result.params.b, 0.941344, 4)
        self.assertAlmostEqual(fit_result.params.c, 5.01541, 4)
        self.assertAlmostEqual(fit_result.params.a_stdev, 0.0974628, 4)
        self.assertAlmostEqual(fit_result.params.b_stdev, 0.247018, 4)
        self.assertAlmostEqual(fit_result.params.c_stdev, 0.597661, 4)