Ejemplo n.º 1
0
    def test_rdistmodel_fit(self):
        psf = PSF(sigma=1.59146972e+00)
        rm = RDistModel(psf, mem=self.memory, r='equal')
        x, y = self.cells[0].r_dist(20, 1)
        y -= y.min()

        fit = Fit(rm, x, y, minimizer=Powell, sigma_y=1 / np.sqrt(y))
        res = fit.execute()

        par_dict = {
            'a1': 75984.78344557587,
            'a2': 170938.0835695505,
            'r': 7.186390052694122
        }
        for k, v in par_dict.items():
            self.assertAlmostEqual(v, res.params[k], 2)
        self.assertAlmostEqual(21834555979.09033, res.objective_value, 3)

        fit = Fit(rm, x, y, minimizer=Powell)
        res = fit.execute()

        par_dict = {
            'a1': 86129.37542153012,
            'a2': 163073.91919617794,
            'r': 7.372535479080642
        }
        for k, v in par_dict.items():
            self.assertAlmostEqual(v, res.params[k], 2)
        self.assertAlmostEqual(7129232.534842306, res.objective_value, 3)
Ejemplo n.º 2
0
    def calc_drift(self,
                   step_size=1,
                   interpolation='poly',
                   upsample_factor=100):
        f = np.arange(len(self.data_arr))

        data = self.data_arr[::step_size]
        shifts = list(
            self.gen_drift_shift(data, upsample_factor=upsample_factor))
        self.x_shift, self.y_shift = np.array(shifts).T
        indices = np.arange(0, len(self.data_arr), step_size)

        if interpolation == 'poly':
            model = self._get_poly(6)
            x_result = Fit(model, indices, self.x_shift).execute()
            y_result = Fit(model, indices, self.y_shift).execute()
            self.x = model(f, **x_result.params)
            self.y = model(f, **y_result.params)
        #
        # if 'deg' in interpolation:
        #     pass
        elif interpolation == 'linear':
            #todo always include last frame
            self.x = np.interp(f, indices, self.x_shift)
            self.y = np.interp(f, indices, self.y_shift)

        return self.x, self.y
Ejemplo n.º 3
0
    def test_known_solution(self):
        p, c1 = parameters('p, c1')
        y, t = variables('y, t')
        p.value = 3.0

        model_dict = {
            D(y, t): - p * y,
        }

        # Lets say we know the exact solution to this problem
        sol = Model({y: exp(- p * t)})

        # Generate some data
        tdata = np.linspace(0, 3, 10001)
        ydata = sol(t=tdata, p=3.22)[0]
        ydata += np.random.normal(0, 0.005, ydata.shape)

        ode_model = ODEModel(model_dict, initial={t: 0.0, y: ydata[0]})
        fit = Fit(ode_model, t=tdata, y=ydata)
        ode_result = fit.execute()

        c1.value = ydata[0]
        fit = Fit(sol, t=tdata, y=ydata)
        fit_result = fit.execute()

        self.assertAlmostEqual(ode_result.value(p) / fit_result.value(p), 1, 2)
        self.assertAlmostEqual(ode_result.r_squared / fit_result.r_squared, 1, 4)
        self.assertAlmostEqual(ode_result.stdev(p) / fit_result.stdev(p), 1, 3)
Ejemplo n.º 4
0
    def test_simple_kinetics(self):
        """
        Simple kinetics data to test fitting
        """
        tdata = np.array([10, 26, 44, 70, 120])
        adata = 10e-4 * np.array([44, 34, 27, 20, 14])
        a, b, t = variables('a, b, t')
        k, a0 = parameters('k, a0')
        k.value = 0.01
        # a0.value, a0.min, a0.max = 54 * 10e-4, 40e-4, 60e-4
        a0 = 54 * 10e-4

        model_dict = {
            D(a, t): - k * a**2,
            D(b, t): k * a**2,
        }

        ode_model = ODEModel(model_dict, initial={t: 0.0, a: a0, b: 0.0})

        # Analytical solution
        model = Model({a: 1 / (k * t + 1 / a0)})
        fit = Fit(model, t=tdata, a=adata)
        fit_result = fit.execute()

        fit = Fit(ode_model, t=tdata, a=adata, b=None, minimizer=MINPACK)
        ode_result = fit.execute()
        self.assertAlmostEqual(ode_result.value(k) / fit_result.value(k), 1.0, 4)
        self.assertAlmostEqual(ode_result.stdev(k) / fit_result.stdev(k), 1.0, 4)
        self.assertAlmostEqual(ode_result.r_squared / fit_result.r_squared, 1, 4)

        fit = Fit(ode_model, t=tdata, a=adata, b=None)
        ode_result = fit.execute()
        self.assertAlmostEqual(ode_result.value(k) / fit_result.value(k), 1.0, 4)
        self.assertAlmostEqual(ode_result.stdev(k) / fit_result.stdev(k), 1.0, 4)
        self.assertAlmostEqual(ode_result.r_squared / fit_result.r_squared, 1, 4)
Ejemplo n.º 5
0
    def test_likelihood_fitting_exponential(self):
        """
        Fit using the likelihood method.
        """
        b = Parameter(value=4, min=3.0)
        x, y = variables('x, y')
        pdf = {y: Exp(x, 1/b)}

        # Draw points from an Exp(5) exponential distribution.
        np.random.seed(100)
        xdata = np.random.exponential(5, 1000000)

        # Expected parameter values
        mean = np.mean(xdata)
        stdev = np.std(xdata)
        mean_stdev = stdev / np.sqrt(len(xdata))

        with self.assertRaises(NotImplementedError):
            fit = Fit(pdf, x=xdata, sigma_y=2.0, objective=LogLikelihood)
        fit = Fit(pdf, xdata, objective=LogLikelihood)
        fit_result = fit.execute()

        self.assertAlmostEqual(fit_result.value(b) / mean, 1, 3)
        self.assertAlmostEqual(fit_result.value(b) / stdev, 1, 3)
        self.assertAlmostEqual(fit_result.stdev(b) / mean_stdev, 1, 3)
Ejemplo n.º 6
0
    def test_diff_evo(self):
        """
        Tests fitting to a scalar gaussian with 2 independent variables with
        wide bounds.
        """

        fit = Fit(self.model, self.xx, self.yy, self.ydata, minimizer=BFGS)
        fit_result = fit.execute()

        self.assertIsInstance(fit.minimizer, BFGS)

        # Make sure a local optimizer doesn't find the answer.
        self.assertNotAlmostEqual(fit_result.value(self.x0_1), 0.4, 1)
        self.assertNotAlmostEqual(fit_result.value(self.y0_1), 0.4, 1)

        # On to the main event
        fit = Fit(self.model,
                  self.xx,
                  self.yy,
                  self.ydata,
                  minimizer=DifferentialEvolution)
        fit_result = fit.execute(polish=True, seed=0, tol=1e-4, maxiter=50)
        # Global minimizers are really bad at finding local minima though, so
        # roughly equal is good enough.
        self.assertAlmostEqual(fit_result.value(self.x0_1), 0.4, 1)
        self.assertAlmostEqual(fit_result.value(self.y0_1), 0.4, 1)
Ejemplo n.º 7
0
    def test_full_eval_range(self):
        """
        Test if ODEModels can be evaluated at t < t_initial.

        A bit of a no news is good news test.
        """
        tdata = np.array([0, 10, 26, 44, 70, 120])
        adata = 10e-4 * np.array([54, 44, 34, 27, 20, 14])
        a, b, t = variables('a, b, t')
        k, a0 = parameters('k, a0')
        k.value = 0.01
        t0 = tdata[2]
        a0 = adata[2]
        b0 = 0.02729855 # Obtained from evaluating from t=0.

        model_dict = {
            D(a, t): - k * a**2,
            D(b, t): k * a**2,
        }

        ode_model = ODEModel(model_dict, initial={t: t0, a: a0, b: b0})

        fit = Fit(ode_model, t=tdata, a=adata, b=None)
        ode_result = fit.execute()
        self.assertGreater(ode_result.r_squared, 0.95, 4)

        # Now start from a timepoint that is not in the t-array such that it
        # triggers another pathway to be taken in integrating it.
        # Again, no news is good news.
        ode_model = ODEModel(model_dict, initial={t: t0 + 1e-5, a: a0, b: b0})

        fit = Fit(ode_model, t=tdata, a=adata, b=None)
        ode_result = fit.execute()
        self.assertGreater(ode_result.r_squared, 0.95, 4)
Ejemplo n.º 8
0
def test_vector_none_fitting():
    """
    Fit to a 3 component vector valued function with one variables data set
    to None, without bounds or guesses.
    """
    a, b, c = parameters('a, b, c')
    a_i, b_i, c_i = variables('a_i, b_i, c_i')

    model = {a_i: a, b_i: b, c_i: c}

    xdata = np.array([
        [10.1, 9., 10.5, 11.2, 9.5, 9.6, 10.],
        [102.1, 101., 100.4, 100.8, 99.2, 100., 100.8],
        [71.6, 73.2, 69.5, 70.2, 70.8, 70.6, 70.1],
    ])

    fit_none = Fit(model=model,
                   a_i=xdata[0],
                   b_i=xdata[1],
                   c_i=None,
                   minimizer=MINPACK)
    fit = Fit(model=model,
              a_i=xdata[0],
              b_i=xdata[1],
              c_i=xdata[2],
              minimizer=MINPACK)
    fit_none_result = fit_none.execute()
    fit_result = fit.execute()

    assert fit_none_result.value(b) == pytest.approx(fit_result.value(b), 1e-4)
    assert fit_none_result.value(a) == pytest.approx(fit_result.value(a), 1e-4)
    # the parameter without data should be unchanged.
    assert fit_none_result.value(c) == pytest.approx(1.0)
Ejemplo n.º 9
0
def test_gaussian_2d_fitting():
    """
    Tests fitting to a scalar gaussian function with 2 independent
    variables. Very sensitive to initial guesses, and if they are chosen too
    restrictive Fit actually throws a tantrum.
    It therefore appears to be more sensitive than NumericalLeastSquares.
    """
    mean = (0.6, 0.4)  # x, y mean 0.6, 0.4
    cov = [[0.2**2, 0], [0, 0.1**2]]

    np.random.seed(0)
    data = np.random.multivariate_normal(mean, cov, 100000)

    # Insert them as y,x here as np f***s up cartesian conventions.
    ydata, xedges, yedges = np.histogram2d(data[:, 0],
                                           data[:, 1],
                                           bins=100,
                                           range=[[0.0, 1.0], [0.0, 1.0]])
    xcentres = (xedges[:-1] + xedges[1:]) / 2
    ycentres = (yedges[:-1] + yedges[1:]) / 2

    # Make a valid grid to match ydata
    xx, yy = np.meshgrid(xcentres, ycentres, sparse=False, indexing='ij')

    x0 = Parameter(value=mean[0], min=0.0, max=1.0)
    sig_x = Parameter(value=0.2, min=0.0, max=0.3)
    y0 = Parameter(value=mean[1], min=0.0, max=1.0)
    sig_y = Parameter(value=0.1, min=0.0, max=0.3)
    A = Parameter(value=np.mean(ydata), min=0.0)
    x = Variable('x')
    y = Variable('y')
    g = Variable('g')
    model = GradientModel(
        {g: A * Gaussian(x, x0, sig_x) * Gaussian(y, y0, sig_y)})
    fit = Fit(model, x=xx, y=yy, g=ydata)
    fit_result = fit.execute()

    assert fit_result.value(x0) == pytest.approx(np.mean(data[:, 0]), 1e-3)
    assert fit_result.value(y0) == pytest.approx(np.mean(data[:, 1]), 1e-3)
    assert np.abs(fit_result.value(sig_x)) == pytest.approx(
        np.std(data[:, 0]), 1e-2)
    assert np.abs(fit_result.value(sig_y)) == pytest.approx(
        np.std(data[:, 1]), 1e-2)
    assert (fit_result.r_squared, 0.96)

    # Compare with industry standard MINPACK
    fit_std = Fit(model, x=xx, y=yy, g=ydata, minimizer=MINPACK)
    fit_std_result = fit_std.execute()

    assert fit_std_result.value(x0) == pytest.approx(fit_result.value(x0),
                                                     1e-4)
    assert fit_std_result.value(y0) == pytest.approx(fit_result.value(y0),
                                                     1e-4)
    assert fit_std_result.value(sig_x) == pytest.approx(
        fit_result.value(sig_x), 1e-4)
    assert fit_std_result.value(sig_y) == pytest.approx(
        fit_result.value(sig_y), 1e-4)
    assert fit_std_result.r_squared == pytest.approx(fit_result.r_squared,
                                                     1e-4)
Ejemplo n.º 10
0
def harmonic_approximation(polygon: Polygon, n=3):
    from symfit import Eq, Fit, cos, parameters, pi, sin, variables

    def fourier_series(x, f, n=0):
        """
        Returns a symbolic fourier series of order `n`.

        :param n: Order of the fourier series.
        :param x: Independent variable
        :param f: Frequency of the fourier series
        """
        # Make the parameter objects for all the terms
        a0, *cos_a = parameters(','.join(['a{}'.format(i) for i in range(0, n + 1)]))
        sin_b = parameters(','.join(['b{}'.format(i) for i in range(1, n + 1)]))
        # Construct the series
        series = a0 + sum(ai * cos(i * f * x) + bi * sin(i * f * x)
                          for i, (ai, bi) in enumerate(zip(cos_a, sin_b), start=1))
        return series

    x, y = variables('x, y')
    w, = parameters('w')
    fourier = fourier_series(x, f=w, n=n)
    model_dict = {y: fourier}
    print(model_dict)

    # Extract data from argument
    # FIXME: how to make a clockwise strictly increasing curve?
    xdata, ydata = polygon.exterior.xy
    t = np.linspace(0, 2 * np.pi, num=len(xdata))

    constr = [
        # Ge(x, 0), Le(x, 2 * pi),
        Eq(fourier.subs({x: 0}), fourier.subs({x: 2 * pi})),
        Eq(fourier.diff(x).subs({x: 0}), fourier.diff(x).subs({x: 2 * pi})),
        # Eq(fourier.diff(x, 2).subs({x: 0}), fourier.diff(x, 2).subs({x: 2 * pi})),
        ]
    print(constr)

    fit_x = Fit(model_dict, x=t, y=xdata, constraints=constr)
    fit_y = Fit(model_dict, x=t, y=ydata, constraints=constr)
    fitx_result = fit_x.execute()
    fity_result = fit_y.execute()
    print(fitx_result)
    print(fity_result)

    # Define function that generates the curve
    def curve_lambda(_t):
        return np.array(
            [
                fit_x.model(x=_t, **fitx_result.params).y,
                fit_y.model(x=_t, **fity_result.params).y
                ]
            ).ravel()

    # code to test if fit is correct
    plot_fit(polygon, curve_lambda, t, title='Harmonic Approximation')

    return curve_lambda
Ejemplo n.º 11
0
def test_minimize():
    """
    Tests maximizing a function with and without constraints, taken from the
    scipy `minimize` tutorial. Compare the symfit result with the scipy
    result.
    https://docs.scipy.org/doc/scipy-0.18.1/reference/tutorial/optimize.html#constrained-minimization-of-multivariate-scalar-functions-minimize
    """
    x = Parameter(value=-1.0)
    y = Parameter(value=1.0)
    # Use an  unnamed Variable on purpose to test the auto-generation of names.
    model = Model(2 * x * y + 2 * x - x ** 2 - 2 * y ** 2)

    constraints = [
        Ge(y - 1, 0),  # y - 1 >= 0,
        Eq(x**3 - y, 0),  # x**3 - y == 0,
    ]

    def func(x, sign=1.0):
        """ Objective function """
        return sign*(2*x[0]*x[1] + 2*x[0] - x[0]**2 - 2*x[1]**2)

    def func_deriv(x, sign=1.0):
        """ Derivative of objective function """
        dfdx0 = sign*(-2*x[0] + 2*x[1] + 2)
        dfdx1 = sign*(2*x[0] - 4*x[1])
        return np.array([dfdx0, dfdx1])

    cons = (
        {'type': 'eq',
         'fun': lambda x: np.array([x[0]**3 - x[1]]),
         'jac': lambda x: np.array([3.0*(x[0]**2.0), -1.0])},
        {'type': 'ineq',
         'fun': lambda x: np.array([x[1] - 1]),
         'jac': lambda x: np.array([0.0, 1.0])}
    )

    # Unconstrained fit
    res = minimize(func, [-1.0, 1.0], args=(-1.0,), jac=func_deriv,
                   method='BFGS', options={'disp': False})
    fit = Fit(model=-model)
    assert isinstance(fit.objective, MinimizeModel)
    assert isinstance(fit.minimizer, BFGS)

    fit_result = fit.execute()

    assert fit_result.value(x) == pytest.approx(res.x[0], 1e-6)
    assert fit_result.value(y) == pytest.approx(res.x[1], 1e-6)

    # Same test, but with constraints in place.
    res = minimize(func, [-1.0, 1.0], args=(-1.0,), jac=func_deriv,
                   constraints=cons, method='SLSQP', options={'disp': False})

    fit = Fit(-model, constraints=constraints)
    assert fit.constraints[0].constraint_type == Ge
    assert fit.constraints[1].constraint_type == Eq
    fit_result = fit.execute()
    assert fit_result.value(x) == pytest.approx(res.x[0], 1e-6)
    assert fit_result.value(y) == pytest.approx(res.x[1], 1e-6)
Ejemplo n.º 12
0
def test_LeastSquares():
    """
    Tests if the LeastSquares objective gives the right shapes of output by
    comparing with its analytical equivalent.
    """
    i = Idx('i', 100)
    x, y = symbols('x, y', cls=Variable)
    X2 = symbols('X2', cls=Variable)
    a, b = parameters('a, b')

    model = Model({y: a * x**2 + b * x})
    xdata = np.linspace(0, 10, 100)
    ydata = model(x=xdata, a=5, b=2).y + np.random.normal(0, 5, xdata.shape)

    # Construct a LeastSquares objective and its analytical equivalent
    chi2_numerical = LeastSquares(model,
                                  data={
                                      x: xdata,
                                      y: ydata,
                                      model.sigmas[y]: np.ones_like(xdata)
                                  })
    chi2_exact = Model({X2: FlattenSum(0.5 * ((a * x**2 + b * x) - y)**2, i)})

    eval_exact = chi2_exact(x=xdata, y=ydata, a=2, b=3)
    jac_exact = chi2_exact.eval_jacobian(x=xdata, y=ydata, a=2, b=3)
    hess_exact = chi2_exact.eval_hessian(x=xdata, y=ydata, a=2, b=3)
    eval_numerical = chi2_numerical(x=xdata, a=2, b=3)
    jac_numerical = chi2_numerical.eval_jacobian(x=xdata, a=2, b=3)
    hess_numerical = chi2_numerical.eval_hessian(x=xdata, a=2, b=3)

    # Test model jacobian and hessian shape
    assert model(x=xdata, a=2, b=3)[0].shape == ydata.shape
    assert model.eval_jacobian(x=xdata, a=2, b=3)[0].shape == (2, 100)
    assert model.eval_hessian(x=xdata, a=2, b=3)[0].shape == (2, 2, 100)
    # Test exact chi2 shape
    assert eval_exact[0].shape, (1, )
    assert jac_exact[0].shape, (2, 1)
    assert hess_exact[0].shape, (2, 2, 1)

    # Test if these two models have the same call, jacobian, and hessian
    assert eval_exact[0] == pytest.approx(eval_numerical)
    assert isinstance(eval_numerical, float)
    assert isinstance(eval_exact[0][0], float)
    assert np.squeeze(jac_exact[0], axis=-1) == pytest.approx(jac_numerical)
    assert isinstance(jac_numerical, np.ndarray)
    assert np.squeeze(hess_exact[0], axis=-1) == pytest.approx(hess_numerical)
    assert isinstance(hess_numerical, np.ndarray)

    fit = Fit(chi2_exact, x=xdata, y=ydata, objective=MinimizeModel)
    fit_exact_result = fit.execute()
    fit = Fit(model, x=xdata, y=ydata, absolute_sigma=True)
    fit_num_result = fit.execute()
    assert fit_exact_result.value(a) == fit_num_result.value(a)
    assert fit_exact_result.value(b) == fit_num_result.value(b)
    assert fit_exact_result.stdev(a) == pytest.approx(fit_num_result.stdev(a))
    assert fit_exact_result.stdev(b) == pytest.approx(fit_num_result.stdev(b))
Ejemplo n.º 13
0
    def test_error_analytical(self):
        """
        Test using a case where the analytical answer is known. Uses both
        symfit and scipy's curve_fit.
        Modeled after:
        http://nbviewer.ipython.org/urls/gist.github.com/taldcroft/5014170/raw/31e29e235407e4913dc0ec403af7ed524372b612/curve_fit.ipynb
        """
        N = 10000
        sigma = 10.0 * np.ones(N)
        xn = np.arange(N, dtype=np.float)
        # yn = np.zeros_like(xn)
        np.random.seed(10)
        yn = np.random.normal(size=len(xn), scale=sigma)

        a = Parameter()
        y = Variable()
        model = {y: a}

        fit = Fit(model, y=yn, sigma_y=sigma)
        fit_result = fit.execute()

        popt, pcov = curve_fit(lambda x, a: a * np.ones_like(x),
                               xn,
                               yn,
                               sigma=sigma,
                               absolute_sigma=True)
        self.assertAlmostEqual(fit_result.value(a), popt[0], 5)
        self.assertAlmostEqual(fit_result.stdev(a),
                               np.sqrt(np.diag(pcov))[0], 2)

        fit_no_sigma = Fit(model, yn)
        fit_result_no_sigma = fit_no_sigma.execute()

        popt, pcov = curve_fit(
            lambda x, a: a * np.ones_like(x),
            xn,
            yn,
        )
        # With or without sigma, the bestfit params should be in agreement in case of equal weights
        self.assertAlmostEqual(fit_result.value(a),
                               fit_result_no_sigma.value(a), 5)
        # Since symfit is all about absolute errors, the sigma will not be in agreement
        self.assertNotEqual(fit_result.stdev(a), fit_result_no_sigma.stdev(a),
                            5)
        self.assertAlmostEqual(fit_result_no_sigma.value(a), popt[0], 5)
        self.assertAlmostEqual(fit_result_no_sigma.stdev(a), pcov[0][0]**0.5,
                               5)

        # Analytical answer for mean of N(0,1):
        mu = 0.0
        sigma_mu = sigma[0] / N**0.5

        self.assertAlmostEqual(fit_result.stdev(a), sigma_mu, 5)
Ejemplo n.º 14
0
    def test_simple_sigma(self):
        """
        Make sure we produce the same results as scipy's curve_fit, with and
        without sigmas, and compare the results of both to a known value.
        """
        t_data = np.array([1.4, 2.1, 2.6, 3.0, 3.3])
        y_data = np.array([10, 20, 30, 40, 50])

        sigma = 0.2
        n = np.array([5, 3, 8, 15, 30])
        sigma_t = sigma / np.sqrt(n)

        # We now define our model
        y = Variable('x')
        g = Parameter('g')
        t_model = (2 * y / g)**0.5

        fit = Fit(t_model, y_data, t_data)  # , sigma=sigma_t)
        fit_result = fit.execute()

        # h_smooth = np.linspace(0,60,100)
        # t_smooth = t_model(y=h_smooth, **fit_result.params)

        # Lets with the results from curve_fit, no weights
        popt_noweights, pcov_noweights = curve_fit(lambda y, p: (2 * y / p)**0.5, y_data, t_data)

        self.assertAlmostEqual(fit_result.value(g), popt_noweights[0])
        self.assertAlmostEqual(fit_result.stdev(g), np.sqrt(pcov_noweights[0, 0]), 6)

        # Same sigma everywere
        fit = Fit(t_model, y_data, t_data, 0.0031, absolute_sigma=False)
        fit_result = fit.execute()
        popt_sameweights, pcov_sameweights = curve_fit(lambda y, p: (2 * y / p)**0.5, y_data, t_data, sigma=0.0031*np.ones(len(y_data)), absolute_sigma=False)
        self.assertAlmostEqual(fit_result.value(g), popt_sameweights[0], 4)
        self.assertAlmostEqual(fit_result.stdev(g), np.sqrt(pcov_sameweights[0, 0]), 4)
        # Same weight everywere should be the same as no weight when absolute_sigma=False
        self.assertAlmostEqual(fit_result.value(g), popt_noweights[0], 4)
        self.assertAlmostEqual(fit_result.stdev(g), np.sqrt(pcov_noweights[0, 0]), 4)

        # Different sigma for every point
        fit = Fit(t_model, y_data, t_data, 0.1*sigma_t, absolute_sigma=False)
        fit_result = fit.execute()
        popt, pcov = curve_fit(lambda y, p: (2 * y / p)**0.5, y_data, t_data, sigma=.1*sigma_t)

        self.assertAlmostEqual(fit_result.value(g), popt[0])
        self.assertAlmostEqual(fit_result.stdev(g), np.sqrt(pcov[0, 0]), 6)

        # according to Mathematica
        self.assertAlmostEqual(fit_result.value(g), 9.095, 3)
        self.assertAlmostEqual(fit_result.stdev(g), 0.102, 3)
Ejemplo n.º 15
0
    def test_covariances(self):
        """
        Compare the equal and unequal length handeling of `HasCovarianceMatrix`.
        If it works properly, the unequal length method should reduce to the
        equal length one if called qith equal length data. Computing unequal
        dataset length covariances remains something to be careful with, but
        this backwards compatibility provides some validation.
        """
        N = 10000
        a, b, c = parameters('a, b, c')
        a_i, b_i, c_i = variables('a_i, b_i, c_i')

        model = {a_i: a, b_i: b, c_i: c}

        np.random.seed(1)
        # Sample from a multivariate normal with correlation.
        pcov = 1e-1 * np.array([[0.4, 0.3, 0.5], [0.3, 0.8, 0.4], [0.5, 0.4, 1.2]])
        xdata = np.random.multivariate_normal([10, 100, 70], pcov, N).T

        fit = Fit(
            model=model,
            a_i=xdata[0],
            b_i=xdata[1],
            c_i=xdata[2],
            absolute_sigma=False
        )
        fit_result = fit.execute()

        cov_equal = fit._cov_mat_equal_lenghts(fit_result.params)
        cov_unequal = fit._cov_mat_unequal_lenghts(fit_result.params)
        np.testing.assert_array_almost_equal(cov_equal, cov_unequal)

        # Try with absolute_sigma=True
        fit = Fit(
            model=model,
            a_i=xdata[0],
            b_i=xdata[1],
            c_i=xdata[2],
            sigma_a_i=np.sqrt(pcov[0, 0]),
            sigma_b_i=np.sqrt(pcov[1, 1]),
            sigma_c_i=np.sqrt(pcov[2, 2]),
            absolute_sigma=True
        )
        fit_result = fit.execute()

        cov_equal = fit._cov_mat_equal_lenghts(fit_result.params)
        cov_unequal = fit._cov_mat_unequal_lenghts(fit_result.params)
        np.testing.assert_array_almost_equal(cov_equal, cov_unequal)
Ejemplo n.º 16
0
    def test_likelihood_fitting_gaussian(self):
        """
        Fit using the likelihood method.
        """
        mu, sig = parameters('mu, sig')
        sig.min = 0.01
        sig.value = 3.0
        mu.value = 50.
        x = Variable()
        pdf = Gaussian(x, mu, sig)

        np.random.seed(10)
        xdata = np.random.normal(51., 3.5, 10000)

        # Expected parameter values
        mean = np.mean(xdata)
        stdev = np.std(xdata)
        mean_stdev = stdev/np.sqrt(len(xdata))

        fit = Fit(pdf, xdata, objective=LogLikelihood)
        fit_result = fit.execute()

        self.assertAlmostEqual(fit_result.value(mu) / mean, 1, 6)
        self.assertAlmostEqual(fit_result.stdev(mu) / mean_stdev, 1, 3)
        self.assertAlmostEqual(fit_result.value(sig) / np.std(xdata), 1, 6)
Ejemplo n.º 17
0
    def test_gaussian_fitting(self):
        """
        Tests fitting to a gaussian function and fit_result.params unpacking.
        """
        xdata = 2*np.random.rand(10000) - 1  # random betwen [-1, 1]
        ydata = 5.0 * scipy.stats.norm.pdf(xdata, loc=0.0, scale=1.0)

        x0 = Parameter('x0')
        sig = Parameter('sig')
        A = Parameter('A')
        x = Variable('x')
        g = A * Gaussian(x, x0, sig)

        fit = Fit(g, xdata, ydata)
        fit_result = fit.execute()

        self.assertAlmostEqual(fit_result.value(A), 5.0)
        self.assertAlmostEqual(np.abs(fit_result.value(sig)), 1.0)
        self.assertAlmostEqual(fit_result.value(x0), 0.0)
        # raise Exception([i for i in fit_result.params])
        sexy = g(x=2.0, **fit_result.params)
        ugly = g(
            x=2.0,
            x0=fit_result.value(x0),
            A=fit_result.value(A),
            sig=fit_result.value(sig),
        )
        self.assertEqual(sexy, ugly)
Ejemplo n.º 18
0
    def test_fitting(self):
        """
        Tests fitting with NumericalLeastSquares. Makes sure that the resulting
        objects and values are of the right type, and that the fit_result does
        not have unexpected members.
        """
        xdata = np.linspace(1, 10, 10)
        ydata = 3*xdata**2

        a = Parameter()  # 3.1, min=2.5, max=3.5
        b = Parameter()
        x = Variable()
        new = a*x**b

        fit = Fit(new, xdata, ydata, minimizer=MINPACK)

        fit_result = fit.execute()
        self.assertIsInstance(fit_result, FitResults)
        self.assertAlmostEqual(fit_result.value(a), 3.0)
        self.assertAlmostEqual(fit_result.value(b), 2.0)

        self.assertIsInstance(fit_result.stdev(a), float)
        self.assertIsInstance(fit_result.stdev(b), float)

        self.assertIsInstance(fit_result.r_squared, float)
        self.assertEqual(fit_result.r_squared, 1.0)  # by definition since there's no fuzzyness
Ejemplo n.º 19
0
    def test_vector_fitting_guess(self):
        """
        Tests fitting to a 3 component vector valued function, with guesses.
        """
        a, b, c = parameters('a, b, c')
        a.value = 10
        b.value = 100
        a_i, b_i, c_i = variables('a_i, b_i, c_i')

        model = {a_i: a, b_i: b, c_i: c}

        xdata = np.array([
            [10.1, 9., 10.5, 11.2, 9.5, 9.6, 10.],
            [102.1, 101., 100.4, 100.8, 99.2, 100., 100.8],
            [71.6, 73.2, 69.5, 70.2, 70.8, 70.6, 70.1],
        ])

        fit = Fit(
            model=model,
            a_i=xdata[0],
            b_i=xdata[1],
            c_i=xdata[2],
            minimizer = MINPACK
        )
        fit_result = fit.execute()

        self.assertAlmostEqual(fit_result.value(a), np.mean(xdata[0]), 4)
        self.assertAlmostEqual(fit_result.value(b), np.mean(xdata[1]), 4)
        self.assertAlmostEqual(fit_result.value(c), np.mean(xdata[2]), 4)
Ejemplo n.º 20
0
def test_vector_fitting():
    """
    Tests fitting to a 3 component vector valued function, without bounds
    or guesses.
    """
    a, b, c = parameters('a, b, c')
    a_i, b_i, c_i = variables('a_i, b_i, c_i')

    model = {a_i: a, b_i: b, c_i: c}

    xdata = np.array([
        [10.1, 9., 10.5, 11.2, 9.5, 9.6, 10.],
        [102.1, 101., 100.4, 100.8, 99.2, 100., 100.8],
        [71.6, 73.2, 69.5, 70.2, 70.8, 70.6, 70.1],
    ])

    fit = Fit(model=model,
              a_i=xdata[0],
              b_i=xdata[1],
              c_i=xdata[2],
              minimizer=MINPACK)
    fit_result = fit.execute()

    assert fit_result.value(a) == pytest.approx(np.mean(xdata[0]), 1e-5)
    assert fit_result.value(b) == pytest.approx(np.mean(xdata[1]), 1e-4)
    assert fit_result.value(c) == pytest.approx(np.mean(xdata[2]), 1e-5)
Ejemplo n.º 21
0
def test_fixed_parameters_2():
    """
    Make sure parameter boundaries are respected
    """
    x = Parameter('x', min=1)
    y = Variable('y')
    model = Model({y: x**2})

    bounded_minimizers = list(subclasses(BoundedMinimizer))
    for minimizer in bounded_minimizers:
        if minimizer is MINPACK:
            # Not a MINPACKable problem because it only has a param
            continue
        fit = Fit(model, minimizer=minimizer)
        assert isinstance(fit.objective, MinimizeModel)
        if minimizer is DifferentialEvolution:
            # Also needs a max
            x.max = 10
            fit_result = fit.execute()
            x.max = None
        else:
            fit_result = fit.execute()
            assert fit_result.value(x) >= 1.0
            assert fit_result.value(x) <= 2.0
        assert fit.minimizer.bounds == [(1, None)]
Ejemplo n.º 22
0
def test_fixed_parameters():
    """
    Make sure fixed parameters don't change on fitting
    """
    a, b, c, d = parameters('a, b, c, d')
    x, y = variables('x, y')

    c.value = 4.0
    a.min, a.max = 1.0, 5.0  # Bounds are needed for DifferentialEvolution
    b.min, b.max = 1.0, 5.0
    c.min, c.max = 1.0, 5.0
    d.min, d.max = 1.0, 5.0
    c.fixed = True

    model = Model({y: a * exp(-(x - b)**2 / (2 * c**2)) + d})
    # Generate data
    xdata = np.linspace(0, 100)
    ydata = model(xdata, a=2, b=3, c=2, d=2).y

    for minimizer in subclasses(BaseMinimizer):
        if minimizer is ChainedMinimizer:
            continue
        else:
            fit = Fit(model, x=xdata, y=ydata, minimizer=minimizer)
            fit_result = fit.execute()
            # Should still be 4.0, not 2.0!
            assert 4.0 == fit_result.params['c']
Ejemplo n.º 23
0
def test_likelihood_fitting_gaussian():
    """
    Fit using the likelihood method.
    """
    mu, sig = parameters('mu, sig')
    sig.min = 0.01
    sig.value = 3.0
    mu.value = 50.
    x = Variable('x')
    pdf = GradientModel(Gaussian(x, mu, sig))

    np.random.seed(10)
    # TODO: Do we really need 1k points?
    xdata = np.random.normal(51., 3.5, 10000)

    # Expected parameter values
    mean = np.mean(xdata)
    stdev = np.std(xdata)
    mean_stdev = stdev / np.sqrt(len(xdata))

    fit = Fit(pdf, xdata, objective=LogLikelihood)
    fit_result = fit.execute()

    assert fit_result.value(mu) == pytest.approx(mean, 1e-6)
    assert fit_result.stdev(mu) == pytest.approx(mean_stdev, 1e-3)
    assert fit_result.value(sig) == pytest.approx(np.std(xdata), 1e-6)
Ejemplo n.º 24
0
def test_gaussian_fitting():
    """
    Tests fitting to a gaussian function and fit_result.params unpacking.
    """
    xdata = 2 * np.random.rand(10000) - 1  # random betwen [-1, 1]
    ydata = 5.0 * scipy.stats.norm.pdf(xdata, loc=0.0, scale=1.0)

    x0 = Parameter('x0')
    sig = Parameter('sig')
    A = Parameter('A')
    x = Variable('x')
    g = GradientModel(A * Gaussian(x, x0, sig))

    fit = Fit(g, xdata, ydata)
    assert isinstance(fit.objective, LeastSquares)
    fit_result = fit.execute()

    assert fit_result.value(A) == pytest.approx(5.0)
    assert np.abs(fit_result.value(sig)) == pytest.approx(1.0)
    assert fit_result.value(x0) == pytest.approx(0.0)
    # raise Exception([i for i in fit_result.params])
    sexy = g(x=2.0, **fit_result.params)
    ugly = g(
        x=2.0,
        x0=fit_result.value(x0),
        A=fit_result.value(A),
        sig=fit_result.value(sig),
    )
    assert sexy == ugly
Ejemplo n.º 25
0
def fit_gauss2d(arr):
    Y, X = np.indices(arr.shape)

    total = arr.sum()
    x = (X * arr).sum() / total
    y = (Y * arr).sum() / total
    col = arr[:, int(y)]
    width_x = np.sqrt(
        np.abs((np.arange(col.size) - y)**2 * col).sum() / col.sum())
    row = arr[int(x), :]
    width_y = np.sqrt(
        np.abs((np.arange(row.size) - x)**2 * row).sum() / row.sum())
    base = 0

    idx = np.argmax(arr)
    y_mu, x_mu = np.unravel_index(idx, arr.shape)

    print(arr.max(), x_mu, y_mu, width_x, width_y, base)
    model = model_gauss2d(arr.max(),
                          x_mu,
                          y_mu,
                          width_x,
                          width_y,
                          base,
                          has_base=False)

    fit = Fit(model, z_var=arr, x_var=X, y_var=Y)
    return fit.execute(), fit.model
Ejemplo n.º 26
0
    def test_vector_fitting(self):
        """
        Tests fitting to a 3 component vector valued function, without bounds
        or guesses.
        """
        a, b, c = parameters('a, b, c')
        a_i, b_i, c_i = variables('a_i, b_i, c_i')

        model = {a_i: a, b_i: b, c_i: c}

        xdata = np.array([
            [10.1, 9., 10.5, 11.2, 9.5, 9.6, 10.],
            [102.1, 101., 100.4, 100.8, 99.2, 100., 100.8],
            [71.6, 73.2, 69.5, 70.2, 70.8, 70.6, 70.1],
        ])

        fit = Fit(
            model=model,
            a_i=xdata[0],
            b_i=xdata[1],
            c_i=xdata[2],
            minimizer = MINPACK
        )
        fit_result = fit.execute()

        self.assertAlmostEqual(fit_result.value(a) / 9.985691, 1.0, 5)
        self.assertAlmostEqual(fit_result.value(b) / 1.006143e+02, 1.0, 4)
        self.assertAlmostEqual(fit_result.value(c) / 7.085713e+01, 1.0, 5)
Ejemplo n.º 27
0
    def test_backwards_compatible_fitting(self):
        """
        In 0.4.2 we replaced the usage of inspect by automatically generated
        names. This can cause problems for users using named variables to call
        fit.
        """
        xdata = np.linspace(1, 10, 10)
        ydata = 3*xdata**2

        a = Parameter(value=1.0)
        b = Parameter(value=2.5)

        y = Variable('y')

        with warnings.catch_warnings(record=True) as w:
            # Cause all warnings to always be triggered.
            warnings.simplefilter("always")
            x = Variable()
            self.assertTrue(len(w) == 1)
            self.assertTrue(issubclass(w[-1].category, DeprecationWarning))

        model = {y: a*x**b}

        with self.assertRaises(TypeError):
            fit = Fit(model, x=xdata, y=ydata)
Ejemplo n.º 28
0
def test_fitting():
    """
    Tests fitting with NumericalLeastSquares. Makes sure that the resulting
    objects and values are of the right type, and that the fit_result does
    not have unexpected members.
    """
    xdata = np.linspace(1, 10, 10)
    ydata = 3 * xdata**2

    a = Parameter('a')  # 3.1, min=2.5, max=3.5
    b = Parameter('b')
    x = Variable('x')
    new = a * x**b

    fit = Fit(new, xdata, ydata, minimizer=MINPACK)

    fit_result = fit.execute()
    assert isinstance(fit_result, FitResults)
    assert fit_result.value(a) == pytest.approx(3.0)
    assert fit_result.value(b) == pytest.approx(2.0)

    assert isinstance(fit_result.stdev(a), float)
    assert isinstance(fit_result.stdev(b), float)

    assert isinstance(fit_result.r_squared, float)
    assert fit_result.r_squared == 1.0  # by definition since there's no fuzzyness
Ejemplo n.º 29
0
 def fit(self) :
     x, y = variables('x, y')
     model_dict = {y: self.fourier()}
     self.ffit = Fit(model_dict, x=self.x, y=self.y)
     self.fit_result = self.ffit.execute()
     self.orderedDict = self.fit_result.params
     return self.fit_result.params
Ejemplo n.º 30
0
def test_2D_fitting():
    """
    Makes sure that a scalar model with 2 independent variables has the
    proper signature, and that the fit result is of the correct type.
    """
    xdata = np.random.randint(-10, 11, size=(2, 400))
    zdata = 2.5 * xdata[0]**2 + 7.0 * xdata[1]**2

    a = Parameter('a')
    b = Parameter('b')
    x = Variable('x')
    y = Variable('y')
    new = a * x**2 + b * y**2

    fit = Fit(new, xdata[0], xdata[1], zdata)

    result = fit.model(xdata[0], xdata[1], 2, 3)
    assert isinstance(result, tuple)

    for arg_name, name in zip(('x', 'y', 'a', 'b'),
                              inspect_sig.signature(fit.model).parameters):
        assert arg_name == name

    fit_result = fit.execute()
    assert isinstance(fit_result, FitResults)