Beispiel #1
0
def Fourier_series(x, f, n = 0):
    """Construit la série de Fourier comme modèle de référence pour le fitting."""

    a0, *cos_a = parameters(','.join(['a{}'.format(i) for i in range(0, n + 1)]))
    sin_b = parameters(','.join(['b{}'.format(i) for i in range(1, n + 1)]))
    series = a0 + sum(ai * cos(i * f * x) + bi * sin(i * f * x) for i, (ai, bi) in enumerate(zip(cos_a, sin_b), start=1))

    return series
Beispiel #2
0
def fourier_series(x, f, n=0):
    # Make the parameter objects for all the terms
    a0, *A = parameters(','.join([f'a{i}' for i in range(0, n + 1)]))
    B = parameters(','.join([f'b{i}' for i in range(1, n + 1)]))
    # Construct the series
    series = a0 + sum(ai*cos(i*f*x) + bi*sin(i*f*x)
                     for i, (ai, bi) in enumerate(zip(A, B), start=1))
    return series
Beispiel #3
0
 def fourier(self) :
     n=self.n
     w = self.w
     lst = range(n+1)
     self.a_n = parameters(','.join(['a{}'.format(i) for i in lst]))
     self.b_n = parameters(','.join(['b{}'.format(i) for i in lst]))
     self.coeff = self.a_n + self.b_n
     self.eqn = sum([i * sp.cos(k * w * Symbol('x')) + j * sp.sin(k * w * Symbol('x')) for k,(i,j) in enumerate(zip(self.a_n,self.b_n))])
     return self.eqn
 def fourier_series(self, x, f, n=2):
     a0, *cos_a = parameters(','.join(
         ['a{}'.format(i) for i in range(0, n + 1)]))
     sin_b = parameters(','.join(['b{}'.format(i)
                                  for i in range(1, n + 1)]))
     series = a0 + sum(
         ai * cos(i * f * x) + bi * sin(i * f * x)
         for i, (ai, bi) in enumerate(zip(cos_a, sin_b), start=1))
     return series
Beispiel #5
0
def test_LogLikelihood_global():
    """
    This is a test for global likelihood fitting to multiple data sets.
    Based on SO question 56006357.
    """
    # creating the data
    mu1, mu2 = .05, -.05
    sigma1, sigma2 = 3.5, 2.5
    n1, n2 = 80, 90
    np.random.seed(42)
    x1 = np.random.vonmises(mu1, sigma1, n1)
    x2 = np.random.vonmises(mu2, sigma2, n2)

    n = 2  # number of components
    xs = variables('x,' + ','.join('x_{}'.format(i) for i in range(1, n + 1)))
    x, xs = xs[0], xs[1:]
    ys = variables(','.join('y_{}'.format(i) for i in range(1, n + 1)))
    mu, kappa = parameters('mu, kappa')
    kappas = parameters(','.join('k_{}'.format(i) for i in range(1, n + 1)),
                        min=0,
                        max=10)
    mu.min, mu.max = -np.pi, np.pi

    template = exp(kappa * cos(x - mu)) / (2 * pi * besseli(0, kappa))

    model = Model({
        y_i: template.subs({
            kappa: k_i,
            x: x_i
        })
        for y_i, x_i, k_i in zip(ys, xs, kappas)
    })

    all_data = {xs[0]: x1, xs[1]: x2, ys[0]: None, ys[1]: None}
    all_params = {'mu': 1}
    all_params.update({k_i.name: 1 for k_i in kappas})

    # Evaluate the loglikelihood and its jacobian and hessian
    logL = LogLikelihood(model, data=all_data)
    eval_numerical = logL(**all_params)
    jac_numerical = logL.eval_jacobian(**all_params)
    hess_numerical = logL.eval_hessian(**all_params)

    # Test the types and shapes of the components.
    assert isinstance(eval_numerical, float)
    assert isinstance(jac_numerical, np.ndarray)
    assert isinstance(hess_numerical, np.ndarray)

    assert eval_numerical.shape == tuple()  # Empty tuple -> scalar
    assert jac_numerical.shape == (3, )
    assert hess_numerical.shape == (
        3,
        3,
    )
def fourier_series(x, f, n=0):
    """
    Returns a symbolic fourier series of order `n`.

    :param n: Order of the fourier series.
    :param x: Independent variable
    :param f: Frequency of the fourier series
    """
    # Make the parameter objects for all the terms
    a0, *cos_a = parameters(','.join(['a{}'.format(i) for i in range(0, n + 1)]))
    sin_b = parameters(','.join(['b{}'.format(i) for i in range(1, n + 1)]))
    # Construct the series
    series = a0 + sum(ai * cos(i * f * x) + bi * sin(i * f * x)
                     for i, (ai, bi) in enumerate(zip(cos_a, sin_b), start=1))
    return series
Beispiel #7
0
    def test_simple_kinetics(self):
        """
        Simple kinetics data to test fitting
        """
        tdata = np.array([10, 26, 44, 70, 120])
        adata = 10e-4 * np.array([44, 34, 27, 20, 14])
        a, b, t = variables('a, b, t')
        k, a0 = parameters('k, a0')
        k.value = 0.01
        # a0.value, a0.min, a0.max = 54 * 10e-4, 40e-4, 60e-4
        a0 = 54 * 10e-4

        model_dict = {
            D(a, t): -k * a**2,
            D(b, t): k * a**2,
        }

        ode_model = ODEModel(model_dict, initial={t: 0.0, a: a0, b: 0.0})

        # Generate some data
        tvec = np.linspace(0, 500, 1000)

        fit = NumericalLeastSquares(ode_model, t=tdata, a=adata, b=None)
        fit_result = fit.execute()
        # print(fit_result)
        self.assertAlmostEqual(fit_result.value(k), 4.302875e-01, 4)
        self.assertAlmostEqual(fit_result.stdev(k), 6.447068e-03, 4)

        fit = Fit(ode_model, t=tdata, a=adata, b=None)
        fit_result = fit.execute()
        # print(fit_result)
        self.assertAlmostEqual(fit_result.value(k), 4.302875e-01, 4)
        self.assertTrue(np.isnan(fit_result.stdev(k)))
Beispiel #8
0
    def test_polgar(self):
        """
        Analysis of data published here:
        This whole ODE support was build to do this analysis in the first place
        """
        a, b, c, d, t = variables('a, b, c, d, t')
        k, p, l, m = parameters('k, p, l, m')

        a0 = 10
        b = a0 - d + a
        model_dict = {
            D(d, t): l * c * b - m * d,
            D(c, t): k * a * b - p * c - l * c * b + m * d,
            D(a, t): -k * a * b + p * c,
        }

        ode_model = ODEModel(model_dict,
                             initial={
                                 t: 0.0,
                                 a: a0,
                                 c: 0.0,
                                 d: 0.0
                             })

        # Generate some data
        tdata = np.linspace(0, 3, 1000)
        # Eval
        AA, AAB, BAAB = ode_model(t=tdata, k=0.1, l=0.2, m=.3, p=0.3)
Beispiel #9
0
    def test_simple_kinetics(self):
        """
        Simple kinetics data to test fitting
        """
        tdata = np.array([10, 26, 44, 70, 120])
        adata = 10e-4 * np.array([44, 34, 27, 20, 14])
        a, b, t = variables('a, b, t')
        k, a0 = parameters('k, a0')
        k.value = 0.01
        # a0.value, a0.min, a0.max = 54 * 10e-4, 40e-4, 60e-4
        a0 = 54 * 10e-4

        model_dict = {
            D(a, t): - k * a**2,
            D(b, t): k * a**2,
        }

        ode_model = ODEModel(model_dict, initial={t: 0.0, a: a0, b: 0.0})

        # Analytical solution
        model = GradientModel({a: 1 / (k * t + 1 / a0)})
        fit = Fit(model, t=tdata, a=adata)
        fit_result = fit.execute()

        fit = Fit(ode_model, t=tdata, a=adata, b=None, minimizer=MINPACK)
        ode_result = fit.execute()
        self.assertAlmostEqual(ode_result.value(k) / fit_result.value(k), 1.0, 4)
        self.assertAlmostEqual(ode_result.stdev(k) / fit_result.stdev(k), 1.0, 4)
        self.assertAlmostEqual(ode_result.r_squared / fit_result.r_squared, 1, 4)

        fit = Fit(ode_model, t=tdata, a=adata, b=None)
        ode_result = fit.execute()
        self.assertAlmostEqual(ode_result.value(k) / fit_result.value(k), 1.0, 4)
        self.assertAlmostEqual(ode_result.stdev(k) / fit_result.stdev(k), 1.0, 4)
        self.assertAlmostEqual(ode_result.r_squared / fit_result.r_squared, 1, 4)
Beispiel #10
0
    def test_known_solution(self):
        p, c1 = parameters('p, c1')
        y, t = variables('y, t')
        p.value = 3.0

        model_dict = {
            D(y, t): - p * y,
        }

        # Lets say we know the exact solution to this problem
        sol = Model({y: exp(- p * t)})

        # Generate some data
        tdata = np.linspace(0, 3, 10001)
        ydata = sol(t=tdata, p=3.22)[0]
        ydata += np.random.normal(0, 0.005, ydata.shape)

        ode_model = ODEModel(model_dict, initial={t: 0.0, y: ydata[0]})
        fit = Fit(ode_model, t=tdata, y=ydata)
        ode_result = fit.execute()

        c1.value = ydata[0]
        fit = Fit(sol, t=tdata, y=ydata)
        fit_result = fit.execute()

        self.assertAlmostEqual(ode_result.value(p) / fit_result.value(p), 1, 2)
        self.assertAlmostEqual(ode_result.r_squared / fit_result.r_squared, 1, 4)
        self.assertAlmostEqual(ode_result.stdev(p) / fit_result.stdev(p), 1, 3)
Beispiel #11
0
    def test_full_eval_range(self):
        """
        Test if ODEModels can be evaluated at t < t_initial.

        A bit of a no news is good news test.
        """
        tdata = np.array([0, 10, 26, 44, 70, 120])
        adata = 10e-4 * np.array([54, 44, 34, 27, 20, 14])
        a, b, t = variables('a, b, t')
        k, a0 = parameters('k, a0')
        k.value = 0.01
        t0 = tdata[2]
        a0 = adata[2]
        b0 = 0.02729855 # Obtained from evaluating from t=0.

        model_dict = {
            D(a, t): - k * a**2,
            D(b, t): k * a**2,
        }

        ode_model = ODEModel(model_dict, initial={t: t0, a: a0, b: b0})

        fit = Fit(ode_model, t=tdata, a=adata, b=None)
        ode_result = fit.execute()
        self.assertGreater(ode_result.r_squared, 0.95, 4)

        # Now start from a timepoint that is not in the t-array such that it
        # triggers another pathway to be taken in integrating it.
        # Again, no news is good news.
        ode_model = ODEModel(model_dict, initial={t: t0 + 1e-5, a: a0, b: b0})

        fit = Fit(ode_model, t=tdata, a=adata, b=None)
        ode_result = fit.execute()
        self.assertGreater(ode_result.r_squared, 0.95, 4)
Beispiel #12
0
    def test_simple_kinetics(self):
        """
        Simple kinetics data to test fitting
        """
        tdata = np.array([10, 26, 44, 70, 120])
        adata = 10e-4 * np.array([44, 34, 27, 20, 14])
        a, b, t = variables('a, b, t')
        k, a0 = parameters('k, a0')
        k.value = 0.01
        # a0.value, a0.min, a0.max = 54 * 10e-4, 40e-4, 60e-4
        a0 = 54 * 10e-4

        model_dict = {
            D(a, t): -k * a**2,
            D(b, t): k * a**2,
        }

        ode_model = ODEModel(model_dict, initial={t: 0.0, a: a0, b: 0.0})

        fit = ConstrainedNumericalLeastSquares(ode_model,
                                               t=tdata,
                                               a=adata,
                                               b=None)
        fit_result = fit.execute(tol=1e-9)

        self.assertAlmostEqual(fit_result.value(k), 4.302875e-01, 4)
        self.assertTrue(fit_result.stdev(k) is None)
Beispiel #13
0
def fit_data(data, guess=10.0, use_err=False):

    x_data = data[:, 0]
    y_data = 1 - data[:, 1]
    if use_err:
        data_err = data[:, 2]

    x, y = sf.variables('x, y')
    pKa, n = sf.parameters('pKa, n')
    model = sf.Model({y: (10**(n * (pKa - x)) + 1)**-1.0})
    pKa.value = guess
    n.value = 1
    if use_err:
        fit = sf.Fit(model,
                     x=x_data,
                     y=y_data,
                     sigma_y=data_err,
                     minimizer=Powell,
                     absolute_sigma=True)
    else:
        fit = sf.Fit(model, x=x_data, y=y_data, minimizer=Powell)
    result = fit.execute()

    print("pKa.....................................", result.value(pKa), '+/-',
          result.stdev(pKa))
    print("n.......................................", result.value(n), '+/-',
          result.stdev(n))
    print("Regression coefficent:................", result.r_squared, '\n')

    x_out = np.arange(min(x_data), max(x_data), 10**-3.0)
    y_out = fit.model(x=x_out, **result.params)[0]

    return x_out, y_out, result.value(pKa), result.stdev(
        pKa), result.r_squared, result.value(n), result.stdev(n)
Beispiel #14
0
    def test_full_eval_range(self):
        """
        Test if ODEModels can be evaluated at t < t_initial.

        A bit of a no news is good news test.
        """
        tdata = np.array([0, 10, 26, 44, 70, 120])
        adata = 10e-4 * np.array([54, 44, 34, 27, 20, 14])
        a, b, t = variables('a, b, t')
        k, a0 = parameters('k, a0')
        k.value = 0.01
        t0 = tdata[2]
        a0 = adata[2]
        b0 = 0.02729855 # Obtained from evaluating from t=0.

        model_dict = {
            D(a, t): - k * a**2,
            D(b, t): k * a**2,
        }

        ode_model = ODEModel(model_dict, initial={t: t0, a: a0, b: b0})

        fit = Fit(ode_model, t=tdata, a=adata, b=None)
        ode_result = fit.execute()
        self.assertGreater(ode_result.r_squared, 0.95, 4)

        # Now start from a timepoint that is not in the t-array such that it
        # triggers another pathway to be taken in integrating it.
        # Again, no news is good news.
        ode_model = ODEModel(model_dict, initial={t: t0 + 1e-5, a: a0, b: b0})

        fit = Fit(ode_model, t=tdata, a=adata, b=None)
        ode_result = fit.execute()
        self.assertGreater(ode_result.r_squared, 0.95, 4)
Beispiel #15
0
    def test_simple_kinetics(self):
        """
        Simple kinetics data to test fitting
        """
        tdata = np.array([10, 26, 44, 70, 120])
        adata = 10e-4 * np.array([44, 34, 27, 20, 14])
        a, b, t = variables('a, b, t')
        k, a0 = parameters('k, a0')
        k.value = 0.01
        # a0.value, a0.min, a0.max = 54 * 10e-4, 40e-4, 60e-4
        a0 = 54 * 10e-4

        model_dict = {
            D(a, t): - k * a**2,
            D(b, t): k * a**2,
        }

        ode_model = ODEModel(model_dict, initial={t: 0.0, a: a0, b: 0.0})

        # Analytical solution
        model = Model({a: 1 / (k * t + 1 / a0)})
        fit = Fit(model, t=tdata, a=adata)
        fit_result = fit.execute()

        fit = Fit(ode_model, t=tdata, a=adata, b=None, minimizer=MINPACK)
        ode_result = fit.execute()
        self.assertAlmostEqual(ode_result.value(k) / fit_result.value(k), 1.0, 4)
        self.assertAlmostEqual(ode_result.stdev(k) / fit_result.stdev(k), 1.0, 4)
        self.assertAlmostEqual(ode_result.r_squared / fit_result.r_squared, 1, 4)

        fit = Fit(ode_model, t=tdata, a=adata, b=None)
        ode_result = fit.execute()
        self.assertAlmostEqual(ode_result.value(k) / fit_result.value(k), 1.0, 4)
        self.assertAlmostEqual(ode_result.stdev(k) / fit_result.stdev(k), 1.0, 4)
        self.assertAlmostEqual(ode_result.r_squared / fit_result.r_squared, 1, 4)
Beispiel #16
0
    def test_pickle(self):
        """
        Make sure models can be pickled are preserved when pickling
        """
        a, b = parameters('a, b')
        x, y = variables('x, y')
        exact_model = Model({y: a * x ** b})
        constraint = Model.as_constraint(Eq(a, b), exact_model)
        num_model = CallableNumericalModel(
            {y: a * x ** b}, independent_vars=[x], params=[a, b]
        )
        connected_num_model = CallableNumericalModel(
            {y: a * x ** b}, connectivity_mapping={y: {x, a, b}}
        )
        # Test if lsoda args and kwargs are pickled too
        ode_model = ODEModel({D(y, x): a * x + b}, {x: 0.0}, 3, 4, some_kwarg=True)

        models = [exact_model, constraint, num_model, ode_model,
                  connected_num_model]
        for model in models:
            new_model = pickle.loads(pickle.dumps(model))
            # Compare signatures
            self.assertEqual(model.__signature__, new_model.__signature__)
            # Trigger the cached vars because we compare `__dict__` s
            model.vars
            new_model.vars
            # Explicitly make sure the connectivity mapping is identical.
            self.assertEqual(model.connectivity_mapping,
                             new_model.connectivity_mapping)
            if not isinstance(model, ODEModel):
                model.function_dict
                model.vars_as_functions
                new_model.function_dict
                new_model.vars_as_functions
            self.assertEqual(model.__dict__, new_model.__dict__)
Beispiel #17
0
    def test_simple_kinetics(self):
        """
        Simple kinetics data to test fitting
        """
        tdata = np.array([10, 26, 44, 70, 120])
        adata = 10e-4 * np.array([44, 34, 27, 20, 14])
        a, b, t = variables('a, b, t')
        k, a0 = parameters('k, a0')
        k.value = 0.01
        # a0.value, a0.min, a0.max = 54 * 10e-4, 40e-4, 60e-4
        a0 = 54 * 10e-4

        model_dict = {
            D(a, t): - k * a**2,
            D(b, t): k * a**2,
        }

        ode_model = ODEModel(model_dict, initial={t: 0.0, a: a0, b: 0.0})

        # Generate some data
        tvec = np.linspace(0, 500, 1000)

        fit = NumericalLeastSquares(ode_model, t=tdata, a=adata, b=None)
        fit_result = fit.execute()
        # print(fit_result)
        self.assertAlmostEqual(fit_result.value(k), 4.302875e-01, 4)
        self.assertAlmostEqual(fit_result.stdev(k), 6.447068e-03, 4)

        fit = Fit(ode_model, t=tdata, a=adata, b=None)
        fit_result = fit.execute()
        # print(fit_result)
        self.assertAlmostEqual(fit_result.value(k), 4.302875e-01, 4)
        self.assertTrue(np.isnan(fit_result.stdev(k)))
def test_multi_indep():
    '''
    Tests the case with multiple components, multiple parameters and
    multiple independent variables
    '''
    w, x, y, z = sf.variables('w, x, y, z')
    a, b, c = sf.parameters('a, b, c')
    model = sf.Model({
        y: 3 * a * x**2 + b * x * w - c,
        z: sf.exp(a * x - b) + c * w
    })
    x_data = np.arange(10) / 10
    w_data = np.arange(10)

    exact = model.eval_jacobian(x=x_data, w=w_data, a=3.5, b=2, c=5)
    approx = model.finite_difference(x=x_data, w=w_data, a=3.5, b=2, c=5)
    _assert_equal(exact, approx)

    exact = model.eval_jacobian(x=0.3, w=w_data, a=3.5, b=2, c=5)
    approx = model.finite_difference(x=0.3, w=w_data, a=3.5, b=2, c=5)
    _assert_equal(exact, approx)

    exact = model.eval_jacobian(x=0.3, w=5, a=3.5, b=2, c=5)
    approx = model.finite_difference(x=0.3, w=5, a=3.5, b=2, c=5)
    _assert_equal(exact, approx)
def fit_plane(xyz):
    """
    Fit a plane to the point coordinates in xyz.

    Dev note: An alternative implementation is possible that omits the `f`
    variable, and thus has one fewer degree of freedom. This means the fit is
    easier and maybe more precise. This could be tested. The notebook
    req4.1_fit_plane.ipynb in the explore repository
    (https://github.com/sundial-pointcloud-geometry/explore) has some notes on
    this. The problem with those models where f is just zero and the named
    symfit model is created for one of x, y or z instead is that you have to
    divide by one of the a, b or c parameters respectively. If one of these
    turns out to be zero, symfit will not find a fit. A solution would be
    to actually create three models and try another if one of them fails to
    converge.
    """
    a, b, c, d = sf.parameters('a, b, c, d')
    x, y, z, f = sf.variables('x, y, z, f')
    plane_model = {f: x * a + y * b + z * c + d}

    plane_fit = sf.Fit(plane_model, x=xyz[0], y=xyz[1], z=xyz[2],
                       f=np.zeros_like(xyz[0]),
                       constraints=[sf.Equality(a**2 + b**2 + c**2, 1)])  # keep plane normal a unit vector

    plane_fit_result = plane_fit.execute()

    return plane_fit_result
Beispiel #20
0
    def test_vector_fitting_guess(self):
        """
        Tests fitting to a 3 component vector valued function, with guesses.
        """
        a, b, c = parameters('a, b, c')
        a.value = 10
        b.value = 100
        a_i, b_i, c_i = variables('a_i, b_i, c_i')

        model = {a_i: a, b_i: b, c_i: c}

        xdata = np.array([
            [10.1, 9., 10.5, 11.2, 9.5, 9.6, 10.],
            [102.1, 101., 100.4, 100.8, 99.2, 100., 100.8],
            [71.6, 73.2, 69.5, 70.2, 70.8, 70.6, 70.1],
        ])

        fit = Fit(
            model=model,
            a_i=xdata[0],
            b_i=xdata[1],
            c_i=xdata[2],
            minimizer = MINPACK
        )
        fit_result = fit.execute()

        self.assertAlmostEqual(fit_result.value(a), np.mean(xdata[0]), 4)
        self.assertAlmostEqual(fit_result.value(b), np.mean(xdata[1]), 4)
        self.assertAlmostEqual(fit_result.value(c), np.mean(xdata[2]), 4)
Beispiel #21
0
def test_likelihood_fitting_gaussian():
    """
    Fit using the likelihood method.
    """
    mu, sig = parameters('mu, sig')
    sig.min = 0.01
    sig.value = 3.0
    mu.value = 50.
    x = Variable('x')
    pdf = GradientModel(Gaussian(x, mu, sig))

    np.random.seed(10)
    # TODO: Do we really need 1k points?
    xdata = np.random.normal(51., 3.5, 10000)

    # Expected parameter values
    mean = np.mean(xdata)
    stdev = np.std(xdata)
    mean_stdev = stdev / np.sqrt(len(xdata))

    fit = Fit(pdf, xdata, objective=LogLikelihood)
    fit_result = fit.execute()

    assert fit_result.value(mu) == pytest.approx(mean, 1e-6)
    assert fit_result.stdev(mu) == pytest.approx(mean_stdev, 1e-3)
    assert fit_result.value(sig) == pytest.approx(np.std(xdata), 1e-6)
Beispiel #22
0
    def test_data_for_constraint(self):
        """
        Test the signature handling when constraints are at play. Constraints
        should take seperate data, but still kwargs that are not found in either
        the model nor the constraints should raise an error.
        """
        A, mu, sig = parameters('A, mu, sig')
        x, y, Y = variables('x, y, Y')

        model = Model({y: A * Gaussian(x, mu=mu, sig=sig)})
        constraint = Model.as_constraint(Y, model, constraint_type=Eq)

        np.random.seed(2)
        xdata = np.random.normal(1.2, 2, 10)
        ydata, xedges = np.histogram(xdata, bins=int(np.sqrt(len(xdata))),
                                     density=True)

        # Allowed
        fit = Fit(model, x=xdata, y=ydata, Y=2, constraints=[constraint])
        fit = Fit(model, x=xdata, y=ydata)
        fit = Fit(model, x=xdata, objective=LogLikelihood)

        # Not allowed
        with self.assertRaises(TypeError):
            fit = Fit(model, x=xdata, y=ydata, Y=2)
        with self.assertRaises(TypeError):
            fit = Fit(model, x=xdata, y=ydata, Y=2, Z=3, constraints=[constraint])
Beispiel #23
0
def test_pickle():
    """
    Make sure models can be pickled are preserved when pickling
    """
    a, b = parameters('a, b')
    x, y = variables('x, y')
    exact_model = Model({y: a * x ** b})
    constraint = Model.as_constraint(Eq(a, b), exact_model)
    num_model = CallableNumericalModel(
        {y: a * x ** b}, independent_vars=[x], params=[a, b]
    )
    connected_num_model = CallableNumericalModel(
        {y: a * x ** b}, connectivity_mapping={y: {x, a, b}}
    )
    # Test if lsoda args and kwargs are pickled too
    ode_model = ODEModel({D(y, x): a * x + b}, {x: 0.0}, 3, 4, some_kwarg=True)

    models = [exact_model, constraint, num_model, ode_model, connected_num_model]
    for model in models:
        new_model = pickle.loads(pickle.dumps(model))
        # Compare signatures
        assert model.__signature__ == new_model.__signature__
        # Trigger the cached vars because we compare `__dict__` s
        model.vars
        new_model.vars
        # Explicitly make sure the connectivity mapping is identical.
        assert model.connectivity_mapping == new_model.connectivity_mapping
        if not isinstance(model, ODEModel):
            model.function_dict
            model.vars_as_functions
            new_model.function_dict
            new_model.vars_as_functions
        assert model.__dict__ == new_model.__dict__
Beispiel #24
0
    def test_likelihood_fitting_gaussian(self):
        """
        Fit using the likelihood method.
        """
        mu, sig = parameters('mu, sig')
        sig.min = 0.01
        sig.value = 3.0
        mu.value = 50.
        x = Variable()
        pdf = Gaussian(x, mu, sig)

        np.random.seed(10)
        xdata = np.random.normal(51., 3.5, 10000)

        # Expected parameter values
        mean = np.mean(xdata)
        stdev = np.std(xdata)
        mean_stdev = stdev/np.sqrt(len(xdata))

        fit = Fit(pdf, xdata, objective=LogLikelihood)
        fit_result = fit.execute()

        self.assertAlmostEqual(fit_result.value(mu) / mean, 1, 6)
        self.assertAlmostEqual(fit_result.stdev(mu) / mean_stdev, 1, 3)
        self.assertAlmostEqual(fit_result.value(sig) / np.std(xdata), 1, 6)
Beispiel #25
0
    def test_vector_fitting(self):
        """
        Tests fitting to a 3 component vector valued function, without bounds
        or guesses.
        """
        a, b, c = parameters('a, b, c')
        a_i, b_i, c_i = variables('a_i, b_i, c_i')

        model = {a_i: a, b_i: b, c_i: c}

        xdata = np.array([
            [10.1, 9., 10.5, 11.2, 9.5, 9.6, 10.],
            [102.1, 101., 100.4, 100.8, 99.2, 100., 100.8],
            [71.6, 73.2, 69.5, 70.2, 70.8, 70.6, 70.1],
        ])

        fit = Fit(
            model=model,
            a_i=xdata[0],
            b_i=xdata[1],
            c_i=xdata[2],
            minimizer = MINPACK
        )
        fit_result = fit.execute()

        self.assertAlmostEqual(fit_result.value(a) / 9.985691, 1.0, 5)
        self.assertAlmostEqual(fit_result.value(b) / 1.006143e+02, 1.0, 4)
        self.assertAlmostEqual(fit_result.value(c) / 7.085713e+01, 1.0, 5)
    def test_taylor_model(self):
        a, b = parameters('a, b')
        x, y, z = variables('x, y, z')

        model = Model({y: a * x + b})
        appr = TaylorModel(model)
        self.assertEqual(set([a, b]), set(appr.params))
        appr.p0 = {a: 2.0, b: 5.0}
        self.assertEqual(set(appr.p0.keys()),
                         set(appr.params_0[p] for p in appr.params))
        self.assertTrue(LinearLeastSquares.is_linear(appr))

        model = Model({z: a * x**2 + b * y**2})
        appr = TaylorModel(model)
        appr.p0 = {a: 2, b: 5}
        model = Model({z: a * x**2 + b * y**2})
        appr_2 = TaylorModel(model)
        appr_2.p0 = {a: 1, b: 1}
        self.assertTrue(appr == appr_2)

        model = Model({y: a * sympy.exp(x * b)})
        appr = TaylorModel(model)
        appr.p0 = {a: 2.0, b: 5.0}
        self.assertTrue(LinearLeastSquares.is_linear(appr))

        model = Model({y: sympy.sin(a * x)})
        appr = TaylorModel(model)
        appr.p0 = {a: 0.0}
        self.assertTrue(LinearLeastSquares.is_linear(appr))
Beispiel #27
0
def test_vector_fitting():
    """
    Tests fitting to a 3 component vector valued function, without bounds
    or guesses.
    """
    a, b, c = parameters('a, b, c')
    a_i, b_i, c_i = variables('a_i, b_i, c_i')

    model = {a_i: a, b_i: b, c_i: c}

    xdata = np.array([
        [10.1, 9., 10.5, 11.2, 9.5, 9.6, 10.],
        [102.1, 101., 100.4, 100.8, 99.2, 100., 100.8],
        [71.6, 73.2, 69.5, 70.2, 70.8, 70.6, 70.1],
    ])

    fit = Fit(model=model,
              a_i=xdata[0],
              b_i=xdata[1],
              c_i=xdata[2],
              minimizer=MINPACK)
    fit_result = fit.execute()

    assert fit_result.value(a) == pytest.approx(np.mean(xdata[0]), 1e-5)
    assert fit_result.value(b) == pytest.approx(np.mean(xdata[1]), 1e-4)
    assert fit_result.value(c) == pytest.approx(np.mean(xdata[2]), 1e-5)
Beispiel #28
0
    def test_taylor_model(self):
        a, b = parameters('a, b')
        x, y, z = variables('x, y, z')

        model = Model({y: a * x + b})
        appr = TaylorModel(model)
        self.assertEqual(set([a, b]), set(appr.params))
        appr.p0 = {a: 2.0, b: 5.0}
        self.assertEqual(set(appr.p0.keys()), set(appr.params_0[p] for p in appr.params))
        self.assertTrue(LinearLeastSquares.is_linear(appr))

        model = Model({z: a * x**2 + b * y**2})
        appr = TaylorModel(model)
        appr.p0 = {a: 2, b: 5}
        model = Model({z: a * x**2 + b * y**2})
        appr_2 = TaylorModel(model)
        appr_2.p0 = {a: 1, b: 1}
        self.assertTrue(appr == appr_2)

        model = Model({y: a * sympy.exp(x * b)})
        appr = TaylorModel(model)
        appr.p0 = {a: 2.0, b: 5.0}
        self.assertTrue(LinearLeastSquares.is_linear(appr))

        model = Model({y: sympy.sin(a * x)})
        appr = TaylorModel(model)
        appr.p0 = {a: 0.0}
        self.assertTrue(LinearLeastSquares.is_linear(appr))
Beispiel #29
0
def test_fixed_parameters():
    """
    Make sure fixed parameters don't change on fitting
    """
    a, b, c, d = parameters('a, b, c, d')
    x, y = variables('x, y')

    c.value = 4.0
    a.min, a.max = 1.0, 5.0  # Bounds are needed for DifferentialEvolution
    b.min, b.max = 1.0, 5.0
    c.min, c.max = 1.0, 5.0
    d.min, d.max = 1.0, 5.0
    c.fixed = True

    model = Model({y: a * exp(-(x - b)**2 / (2 * c**2)) + d})
    # Generate data
    xdata = np.linspace(0, 100)
    ydata = model(xdata, a=2, b=3, c=2, d=2).y

    for minimizer in subclasses(BaseMinimizer):
        if minimizer is ChainedMinimizer:
            continue
        else:
            fit = Fit(model, x=xdata, y=ydata, minimizer=minimizer)
            fit_result = fit.execute()
            # Should still be 4.0, not 2.0!
            assert 4.0 == fit_result.params['c']
Beispiel #30
0
    def test_straight_line_analytical(self):
        """
        Test symfit against a straight line, for which the parameters and their
        uncertainties are known analytically. Assuming equal weights.
        """
        data = [[0, 1], [1, 0], [3, 2], [5, 4]]
        xdata, ydata = (np.array(i, dtype='float64') for i in zip(*data))
        # x = np.arange(0, 100, 0.1)
        # np.random.seed(10)
        # y = 3.0*x + 105.0 + np.random.normal(size=x.shape)

        dx = xdata - xdata.mean()
        dy = ydata - ydata.mean()
        mean_squared_x = np.mean(xdata**2) - np.mean(xdata)**2
        mean_xy = np.mean(xdata * ydata) - np.mean(xdata)*np.mean(ydata)
        a = mean_xy/mean_squared_x
        b = ydata.mean() - a * xdata.mean()
        self.assertAlmostEqual(a, 0.694915, 6) # values from Mathematica
        self.assertAlmostEqual(b, 0.186441, 6)

        S = np.sum((ydata - (a*xdata + b))**2)
        var_a_exact = S/(len(xdata) * (len(xdata) - 2) * mean_squared_x)
        var_b_exact = var_a_exact*np.mean(xdata**2)
        a_exact = a
        b_exact = b

        # We will now compare these exact results with values from symfit, numerically
        a, b = parameters('a, b')
        x, y = variables('x, y')
        model = {y: a*x + b}
        fit = NumericalLeastSquares(model, x=xdata, y=ydata)#, absolute_sigma=False)
        fit_result = fit.execute()

        popt, pcov = curve_fit(lambda z, c, d: c * z + d, xdata, ydata,
                               jac=lambda z, c, d: np.transpose([xdata, np.ones_like(xdata)]))
#                               jac=lambda p, x, y, func: np.transpose([x, np.ones_like(x)]))
                                # Dfun=lambda p, x, y, func: print(p, func, x, y))

        # curve_fit
        self.assertAlmostEqual(a_exact, popt[0], 4)
        self.assertAlmostEqual(b_exact, popt[1], 4)
        self.assertAlmostEqual(var_a_exact, pcov[0][0], 6)
        self.assertAlmostEqual(var_b_exact, pcov[1][1], 6)

        self.assertAlmostEqual(a_exact, fit_result.value(a), 4)
        self.assertAlmostEqual(b_exact, fit_result.value(b), 4)
        self.assertAlmostEqual(var_a_exact, fit_result.variance(a), 6)
        self.assertAlmostEqual(var_b_exact, fit_result.variance(b), 6)

        # Do the fit with the LinearLeastSquares object
        fit = LinearLeastSquares(model, x=xdata, y=ydata)
        fit_result = fit.execute()
        self.assertAlmostEqual(a_exact, fit_result.value(a), 4)
        self.assertAlmostEqual(b_exact, fit_result.value(b), 4)
        self.assertAlmostEqual(var_a_exact, fit_result.variance(a), 6)
        self.assertAlmostEqual(var_b_exact, fit_result.variance(b), 6)

        # Lets also make sure the entire covariance matrix is the same
        for cov1, cov2 in zip(fit_result.params.covariance_matrix.flatten(), pcov.flatten()):
            self.assertAlmostEqual(cov1, cov2)
Beispiel #31
0
    def test_vector_none_fitting(self):
        """
        Fit to a 3 component vector valued function with one variables data set
        to None, without bounds or guesses.
        """
        a, b, c = parameters('a, b, c')
        a_i, b_i, c_i = variables('a_i, b_i, c_i')

        model = {a_i: a, b_i: b, c_i: c}

        xdata = np.array([
            [10.1, 9., 10.5, 11.2, 9.5, 9.6, 10.],
            [102.1, 101., 100.4, 100.8, 99.2, 100., 100.8],
            [71.6, 73.2, 69.5, 70.2, 70.8, 70.6, 70.1],
        ])

        fit_none = NumericalLeastSquares(
            model=model,
            a_i=xdata[0],
            b_i=xdata[1],
            c_i=None,
        )
        fit = NumericalLeastSquares(
            model=model,
            a_i=xdata[0],
            b_i=xdata[1],
            c_i=xdata[2],
        )
        fit_none_result = fit_none.execute()
        fit_result = fit.execute()

        self.assertAlmostEqual(fit_none_result.value(a), fit_result.value(a), 4)
        self.assertAlmostEqual(fit_none_result.value(b), fit_result.value(b), 4)
        # the parameter without data should be unchanged.
        self.assertAlmostEqual(fit_none_result.value(c), 1.0)
Beispiel #32
0
    def test_vector_fitting_bounds(self):
        """
        Tests fitting to a 3 component vector valued function, with bounds.
        """
        a, b, c = parameters('a, b, c')
        a.min = 0
        a.max = 25
        b.min = 0
        b.max = 500
        a_i, b_i, c_i = variables('a_i, b_i, c_i')

        model = {a_i: a, b_i: b, c_i: c}

        xdata = np.array([
            [10.1, 9., 10.5, 11.2, 9.5, 9.6, 10.],
            [102.1, 101., 100.4, 100.8, 99.2, 100., 100.8],
            [71.6, 73.2, 69.5, 70.2, 70.8, 70.6, 70.1],
        ])

        fit = NumericalLeastSquares(
            model=model,
            a_i=xdata[0],
            b_i=xdata[1],
            c_i=xdata[2],
        )
        fit_result = fit.execute()

        self.assertAlmostEqual(fit_result.value(a), np.mean(xdata[0]), 4)
        self.assertAlmostEqual(fit_result.value(b), np.mean(xdata[1]), 4)
        self.assertAlmostEqual(fit_result.value(c), np.mean(xdata[2]), 4)
Beispiel #33
0
def test_vector_none_fitting():
    """
    Fit to a 3 component vector valued function with one variables data set
    to None, without bounds or guesses.
    """
    a, b, c = parameters('a, b, c')
    a_i, b_i, c_i = variables('a_i, b_i, c_i')

    model = {a_i: a, b_i: b, c_i: c}

    xdata = np.array([
        [10.1, 9., 10.5, 11.2, 9.5, 9.6, 10.],
        [102.1, 101., 100.4, 100.8, 99.2, 100., 100.8],
        [71.6, 73.2, 69.5, 70.2, 70.8, 70.6, 70.1],
    ])

    fit_none = Fit(model=model,
                   a_i=xdata[0],
                   b_i=xdata[1],
                   c_i=None,
                   minimizer=MINPACK)
    fit = Fit(model=model,
              a_i=xdata[0],
              b_i=xdata[1],
              c_i=xdata[2],
              minimizer=MINPACK)
    fit_none_result = fit_none.execute()
    fit_result = fit.execute()

    assert fit_none_result.value(b) == pytest.approx(fit_result.value(b), 1e-4)
    assert fit_none_result.value(a) == pytest.approx(fit_result.value(a), 1e-4)
    # the parameter without data should be unchanged.
    assert fit_none_result.value(c) == pytest.approx(1.0)
Beispiel #34
0
    def test_likelihood_fitting_gaussian(self):
        """
        Fit using the likelihood method.
        """
        mu, sig = parameters('mu, sig')
        sig.min = 0.01
        sig.value = 3.0
        mu.value = 50.
        x = Variable()
        pdf = Gaussian(x, mu, sig)

        np.random.seed(10)
        xdata = np.random.normal(51., 3.5, 10000)

        # Expected parameter values
        mean = np.mean(xdata)
        stdev = np.std(xdata)
        mean_stdev = stdev/np.sqrt(len(xdata))

        fit = Fit(pdf, xdata, objective=LogLikelihood)
        fit_result = fit.execute()

        self.assertAlmostEqual(fit_result.value(mu) / mean, 1, 6)
        self.assertAlmostEqual(fit_result.stdev(mu) / mean_stdev, 1, 3)
        self.assertAlmostEqual(fit_result.value(sig) / np.std(xdata), 1, 6)
Beispiel #35
0
 def __init__(self, data_dir, degree):
     # get directory
     self.data_dir = data_dir
     self.degree = degree
     x, y = variables('x, y')
     w, = parameters('w')
     self.model_dict = {y: self.fourier_series(x, f=w, n=self.degree)}
Beispiel #36
0
def test_initial_parameters():
    """
    Identical to test_polgar, but with a0 as free Parameter.
    """
    a, b, c, d, t = variables('a, b, c, d, t')
    k, p, l, m = parameters('k, p, l, m')

    a0 = Parameter('a0', min=0, value=10, fixed=True)
    c0 = Parameter('c0', min=0, value=0.1)
    b = a0 - d + a
    model_dict = {
        D(d, t): l * c * b - m * d,
        D(c, t): k * a * b - p * c - l * c * b + m * d,
        D(a, t): - k * a * b + p * c,
    }

    ode_model = ODEModel(model_dict, initial={t: 0.0, a: a0, c: c0, d: 0.0})

    # Generate some data
    tdata = np.linspace(0, 3, 1000)
    # Eval
    AA, AAB, BAAB = ode_model(t=tdata, k=0.1, l=0.2, m=.3, p=0.3, a0=10, c0=0)
    fit = Fit(ode_model, t=tdata, a=AA, c=AAB, d=BAAB)
    results = fit.execute()
    print(results)
    assert results.value(a0) == pytest.approx(10, abs=1e-8)
    assert results.value(c0) == pytest.approx(0, abs=1e-8)

    assert ode_model.params == [a0, c0, k, l, m, p]
    assert ode_model.initial_params == [a0, c0]
    assert ode_model.model_params == [a0, k, l, m, p]
Beispiel #37
0
    def test_single_eval(self):
        """
        Eval an ODEModel at a single value rather than a vector.
        """
        x, y, t = variables('x, y, t')
        k, = parameters('k') # C is the integration constant.

        # The harmonic oscillator as a system, >1st order is not supported yet.
        harmonic_dict = {
            D(x, t): - k * y,
            D(y, t): k * x,
        }

        # Make a second model to prevent caching of integration results.
        # This also means harmonic_dict should NOT be a Model object.
        harmonic_model_array = ODEModel(harmonic_dict, initial={t: 0.0, x: 1.0, y: 0.0})
        harmonic_model_points = ODEModel(harmonic_dict, initial={t: 0.0, x: 1.0, y: 0.0})
        tdata = np.linspace(0, 100, 101)
        X, Y = harmonic_model_array(t=tdata, k=0.1)
        # Shuffle the data to prevent using the result at time t to calculate
        # t+dt
        random_order = np.random.permutation(len(tdata))
        for idx in random_order:
            t = tdata[idx]
            X_val = X[idx]
            Y_val = Y[idx]
            X_point, Y_point = harmonic_model_points(t=t, k=0.1)
            self.assertAlmostEqual(X_point[0], X_val)
            self.assertAlmostEqual(Y_point[0], Y_val)
Beispiel #38
0
    def test_global_fitting(self):
        """
        In case of shared parameters between the components of the model, `Fit`
        should automatically use `ConstrainedLeastSquares`.
        :return:
        """
        x_1, x_2, y_1, y_2 = variables('x_1, x_2, y_1, y_2')
        y0, a_1, a_2, b_1, b_2 = parameters('y0, a_1, a_2, b_1, b_2')

        # The following vector valued function links all the equations together
        # as stated in the intro.
        model = Model({
            y_1: a_1 * x_1**2 + b_1 * x_1 + y0,
            y_2: a_2 * x_2**2 + b_2 * x_2 + y0,
        })
        self.assertTrue(model.shared_parameters)

        # Generate data from this model
        xdata1 = np.linspace(0, 10)
        xdata2 = xdata1[::2] # Only every other point.

        ydata1, ydata2 = model(x_1=xdata1, x_2=xdata2, a_1=101.3, b_1=0.5, a_2=56.3, b_2=1.1111, y0=10.8)
        # Add some noise to make it appear like real data
        np.random.seed(1)
        ydata1 += np.random.normal(0, 2, size=ydata1.shape)
        ydata2 += np.random.normal(0, 2, size=ydata2.shape)

        xdata = [xdata1, xdata2]
        ydata = [ydata1, ydata2]

        # Guesses
        a_1.value = 100
        a_2.value = 50
        b_1.value = 1
        b_2.value = 1
        y0.value = 10

        fit = Fit(
            model, x_1=xdata[0], x_2=xdata[1], y_1=ydata[0], y_2=ydata[1]
        )
        self.assertIsInstance(fit.fit, ConstrainedNumericalLeastSquares)

        # The next model does not share parameters, but is still a vector
        model = Model({
            y_1: a_1 * x_1**2 + b_1 * x_1,
            y_2: a_2 * x_2**2 + b_2 * x_2,
        })
        fit = Fit(
            model, x_1=xdata[0], x_2=xdata[1], y_1=ydata[0], y_2=ydata[1]
        )
        self.assertFalse(model.shared_parameters)
        self.assertIsInstance(fit.fit, NumericalLeastSquares)

        # Scalar model, so it should use NumericalLeastSquares.
        model = Model({
            y_1: a_1 * x_1**2 + b_1 * x_1,
        })
        fit = Fit(model, x_1=xdata[0], y_1=ydata[0])
        self.assertFalse(model.shared_parameters)
        self.assertIsInstance(fit.fit, NumericalLeastSquares)
Beispiel #39
0
    def test_single_eval(self):
        """
        Eval an ODEModel at a single value rather than a vector.
        """
        x, y, t = variables('x, y, t')
        k, = parameters('k') # C is the integration constant.

        # The harmonic oscillator as a system, >1st order is not supported yet.
        harmonic_dict = {
            D(x, t): - k * y,
            D(y, t): k * x,
        }

        # Make a second model to prevent caching of integration results.
        # This also means harmonic_dict should NOT be a Model object.
        harmonic_model_array = ODEModel(harmonic_dict, initial={t: 0.0, x: 1.0, y: 0.0})
        harmonic_model_points = ODEModel(harmonic_dict, initial={t: 0.0, x: 1.0, y: 0.0})
        tdata = np.linspace(-100, 100, 101)
        X, Y = harmonic_model_array(t=tdata, k=0.1)
        # Shuffle the data to prevent using the result at time t to calculate
        # t+dt
        random_order = np.random.permutation(len(tdata))
        for idx in random_order:
            t = tdata[idx]
            X_val = X[idx]
            Y_val = Y[idx]
            X_point, Y_point = harmonic_model_points(t=t, k=0.1)
            self.assertAlmostEqual(X_point[0], X_val)
            self.assertAlmostEqual(Y_point[0], Y_val)
Beispiel #40
0
    def test_vector_fitting_guess(self):
        """
        Tests fitting to a 3 component vector valued function, with guesses.
        """
        a, b, c = parameters('a, b, c')
        a.value = 10
        b.value = 100
        a_i, b_i, c_i = variables('a_i, b_i, c_i')

        model = {a_i: a, b_i: b, c_i: c}

        xdata = np.array([
            [10.1, 9., 10.5, 11.2, 9.5, 9.6, 10.],
            [102.1, 101., 100.4, 100.8, 99.2, 100., 100.8],
            [71.6, 73.2, 69.5, 70.2, 70.8, 70.6, 70.1],
        ])

        fit = NumericalLeastSquares(
            model=model,
            a_i=xdata[0],
            b_i=xdata[1],
            c_i=xdata[2],
        )
        fit_result = fit.execute()

        self.assertAlmostEqual(fit_result.value(a), np.mean(xdata[0]), 4)
        self.assertAlmostEqual(fit_result.value(b), np.mean(xdata[1]), 4)
        self.assertAlmostEqual(fit_result.value(c), np.mean(xdata[2]), 4)
Beispiel #41
0
    def test_known_solution(self):
        p, c1 = parameters('p, c1')
        y, t = variables('y, t')
        p.value = 3.0

        model_dict = {
            D(y, t): - p * y,
        }

        # Lets say we know the exact solution to this problem
        sol = Model({y: exp(- p * t)})

        # Generate some data
        tdata = np.linspace(0, 3, 10001)
        ydata = sol(t=tdata, p=3.22)[0]
        ydata += np.random.normal(0, 0.005, ydata.shape)

        ode_model = ODEModel(model_dict, initial={t: 0.0, y: ydata[0]})
        fit = Fit(ode_model, t=tdata, y=ydata)
        ode_result = fit.execute()

        c1.value = ydata[0]
        fit = Fit(sol, t=tdata, y=ydata)
        fit_result = fit.execute()

        self.assertAlmostEqual(ode_result.value(p) / fit_result.value(p), 1, 2)
        self.assertAlmostEqual(ode_result.r_squared / fit_result.r_squared, 1, 4)
        self.assertAlmostEqual(ode_result.stdev(p) / fit_result.stdev(p), 1, 3)
Beispiel #42
0
    def test_vector_fitting(self):
        """
        Tests fitting to a 3 component vector valued function, without bounds
        or guesses.
        """
        a, b, c = parameters('a, b, c')
        a_i, b_i, c_i = variables('a_i, b_i, c_i')

        model = {a_i: a, b_i: b, c_i: c}

        xdata = np.array([
            [10.1, 9., 10.5, 11.2, 9.5, 9.6, 10.],
            [102.1, 101., 100.4, 100.8, 99.2, 100., 100.8],
            [71.6, 73.2, 69.5, 70.2, 70.8, 70.6, 70.1],
        ])

        fit = NumericalLeastSquares(
            model=model,
            a_i=xdata[0],
            b_i=xdata[1],
            c_i=xdata[2],
        )
        fit_result = fit.execute()

        self.assertAlmostEqual(fit_result.value(a), 9.985691, 6)
        self.assertAlmostEqual(fit_result.value(b), 1.006143e+02, 4)
        self.assertAlmostEqual(fit_result.value(c), 7.085713e+01, 5)
Beispiel #43
0
    def test_global_fitting(self):
        """
        Test a global fitting scenario with datasets of unequal length. In this
        scenario, a quartic equation is fitted where the constant term is shared
        between the datasets. (e.g. identical background noise)
        """
        x_1, x_2, y_1, y_2 = variables('x_1, x_2, y_1, y_2')
        y0, a_1, a_2, b_1, b_2 = parameters('y0, a_1, a_2, b_1, b_2')

        # The following vector valued function links all the equations together
        # as stated in the intro.
        model = Model({
            y_1: a_1 * x_1**2 + b_1 * x_1 + y0,
            y_2: a_2 * x_2**2 + b_2 * x_2 + y0,
        })

        # Generate data from this model
        # xdata = np.linspace(0, 10)
        xdata1 = np.linspace(0, 10)
        xdata2 = xdata1[::2]  # Make the sets of unequal size

        ydata1, ydata2 = model(x_1=xdata1,
                               x_2=xdata2,
                               a_1=101.3,
                               b_1=0.5,
                               a_2=56.3,
                               b_2=1.1111,
                               y0=10.8)
        # Add some noise to make it appear like real data
        np.random.seed(1)
        ydata1 += np.random.normal(0, 2, size=ydata1.shape)
        ydata2 += np.random.normal(0, 2, size=ydata2.shape)

        xdata = [xdata1, xdata2]
        ydata = [ydata1, ydata2]

        # Guesses
        a_1.value = 100
        a_2.value = 50
        b_1.value = 1
        b_2.value = 1
        y0.value = 10

        sigma_y = np.concatenate((np.ones(20), [2., 4., 5, 7, 3]))

        fit = ConstrainedNumericalLeastSquares(model,
                                               x_1=xdata[0],
                                               x_2=xdata[1],
                                               y_1=ydata[0],
                                               y_2=ydata[1],
                                               sigma_y_2=sigma_y)
        fit_result = fit.execute()

        # fit_curves = model(x_1=xdata[0], x_2=xdata[1], **fit_result.params)
        self.assertAlmostEqual(fit_result.value(y0), 1.061892e+01, 3)
        self.assertAlmostEqual(fit_result.value(a_1), 1.013269e+02, 3)
        self.assertAlmostEqual(fit_result.value(a_2), 5.625694e+01, 3)
        self.assertAlmostEqual(fit_result.value(b_1), 3.362240e-01, 3)
        self.assertAlmostEqual(fit_result.value(b_2), 1.565253e+00, 3)
 def app(self, n):
     n = n + 1
     x, y = variables('x, y')
     w, = parameters('w')
     model_dict = {y: self.fourier_series(x, f=w, n=n)}
     fit = Fit(model_dict, x=self.xdata, y=self.ydata)
     fit_result = fit.execute()
     return fit.model(x=self.xdata, **fit_result.params).y
Beispiel #45
0
    def test_interdependency_constrained(self):
        """
        Test a model with interdependent components, and with constraints which
        depend on the Model's output.
        This is done in the MatrixSymbol formalism, using a Tikhonov
        regularization as an example. In this, a matrix inverse has to be
        calculated and is used multiple times. Therefore we split that term of
        into a seperate component, so the inverse only has to be computed once
        per model call.

        See https://arxiv.org/abs/1901.05348 for a more detailed background.
        """
        N = Symbol('N', integer=True)
        M = MatrixSymbol('M', N, N)
        W = MatrixSymbol('W', N, N)
        I = MatrixSymbol('I', N, N)
        y = MatrixSymbol('y', N, 1)
        c = MatrixSymbol('c', N, 1)
        a, = parameters('a')
        z, = variables('z')
        i = Idx('i')

        model_dict = {
            W: Inverse(I + M / a ** 2),
            c: - W * y,
            z: sqrt(c.T * c)
        }
        # Sympy currently does not support derivatives of matrix expressions,
        # so we use CallableModel instead of Model.
        model = CallableModel(model_dict)

        # Generate data
        iden = np.eye(2)
        M_mat = np.array([[2, 1], [3, 4]])
        y_vec = np.array([[3], [5]])
        eval_model = model(I=iden, M=M_mat, y=y_vec, a=0.1)
        # Calculate the answers 'manually' so I know it was done properly
        W_manual = np.linalg.inv(iden + M_mat / 0.1 ** 2)
        c_manual = - np.atleast_2d(W_manual.dot(y_vec))
        z_manual = np.atleast_1d(np.sqrt(c_manual.T.dot(c_manual)))

        self.assertEqual(y_vec.shape, (2, 1))
        self.assertEqual(M_mat.shape, (2, 2))
        self.assertEqual(iden.shape, (2, 2))
        self.assertEqual(W_manual.shape, (2, 2))
        self.assertEqual(c_manual.shape, (2, 1))
        self.assertEqual(z_manual.shape, (1, 1))
        np.testing.assert_almost_equal(W_manual, eval_model.W)
        np.testing.assert_almost_equal(c_manual, eval_model.c)
        np.testing.assert_almost_equal(z_manual, eval_model.z)
        fit = Fit(model, z=z_manual, I=iden, M=M_mat, y=y_vec)
        fit_result = fit.execute()

        # See if a == 0.1 was reconstructed properly. Since only a**2 features
        # in the equations, we check for the absolute value. Setting a.min = 0.0
        # is not appreciated by the Minimizer, it seems.
        self.assertAlmostEqual(np.abs(fit_result.value(a)), 0.1)
Beispiel #46
0
    def test_jacobian_matrix(self):
        """
        The jacobian matrix of a model should be a 2D list (matrix) containing
        all the partial derivatives.
        """
        a, b, c = parameters('a, b, c')
        a_i, b_i, c_i = variables('a_i, b_i, c_i')

        model = Model({a_i: 2 * a + 3 * b, b_i: 5 * b, c_i: 7 * c})
        self.assertEqual([[2, 3, 0], [0, 5, 0], [0, 0, 7]], model.jacobian)
Beispiel #47
0
    def test_global_fitting(self):
        """
        Test a global fitting scenario with datasets of unequal length. In this
        scenario, a quartic equation is fitted where the constant term is shared
        between the datasets. (e.g. identical background noise)
        """
        x_1, x_2, y_1, y_2 = variables('x_1, x_2, y_1, y_2')
        y0, a_1, a_2, b_1, b_2 = parameters('y0, a_1, a_2, b_1, b_2')

        # The following vector valued function links all the equations together
        # as stated in the intro.
        model = Model({
            y_1: a_1 * x_1**2 + b_1 * x_1 + y0,
            y_2: a_2 * x_2**2 + b_2 * x_2 + y0,
        })

        # Generate data from this model
        # xdata = np.linspace(0, 10)
        xdata1 = np.linspace(0, 10)
        xdata2 = xdata1[::2]  # Make the sets of unequal size

        ydata1, ydata2 = model(x_1=xdata1, x_2=xdata2, a_1=101.3, b_1=0.5, a_2=56.3, b_2=1.1111, y0=10.8)
        # Add some noise to make it appear like real data
        np.random.seed(1)
        ydata1 += np.random.normal(0, 2, size=ydata1.shape)
        ydata2 += np.random.normal(0, 2, size=ydata2.shape)

        xdata = [xdata1, xdata2]
        ydata = [ydata1, ydata2]

        # Guesses
        a_1.value = 100
        a_2.value = 50
        b_1.value = 1
        b_2.value = 1
        y0.value = 10

        eval_jac = model.eval_jacobian(x_1=xdata1, x_2=xdata2, a_1=101.3,
                                       b_1=0.5, a_2=56.3, b_2=1.1111, y0=10.8)
        self.assertEqual(len(eval_jac), 2)
        for comp in eval_jac:
            self.assertEqual(len(comp), len(model.params))

        sigma_y = np.concatenate((np.ones(20), [2., 4., 5, 7, 3]))

        fit = Fit(model, x_1=xdata[0], x_2=xdata[1],
                  y_1=ydata[0], y_2=ydata[1], sigma_y_2=sigma_y)
        fit_result = fit.execute()

        # fit_curves = model(x_1=xdata[0], x_2=xdata[1], **fit_result.params)
        self.assertAlmostEqual(fit_result.value(y0), 1.061892e+01, 3)
        self.assertAlmostEqual(fit_result.value(a_1), 1.013269e+02, 3)
        self.assertAlmostEqual(fit_result.value(a_2), 5.625694e+01, 3)
        self.assertAlmostEqual(fit_result.value(b_1), 3.362240e-01, 3)
        self.assertAlmostEqual(fit_result.value(b_2), 1.565253e+00, 3)
Beispiel #48
0
    def test_order(self):
        """
        The model has to behave like an OrderedDict. This is of the utmost importance!
        """
        x, y_1, y_2 = variables('x, y_1, y_2')
        a, b = parameters('a, b')

        model_dict = {y_2: a * x**2, y_1: 2 * x * b}
        model = Model(model_dict)

        self.assertEqual(model.dependent_vars, list(model.keys()))
Beispiel #49
0
    def test_hessian_matrix(self):
        """
        The Hessian matrix of a model should be a 3D list (matrix) containing
        all the 2nd partial derivatives.
        """
        a, b, c = parameters('a, b, c')
        a_i, b_i, c_i = variables('a_i, b_i, c_i')

        model = Model({a_i: 2 * a**2 + 3 * b, b_i: 5 * b**2, c_i: 7 * c*b})
        self.assertEqual([[[4, 0, 0], [0, 0, 0], [0, 0, 0]],
                          [[0, 0, 0], [0, 10, 0], [0, 0, 0]],
                          [[0, 0, 0], [0, 0, 7], [0, 7, 0]]], model.hessian)
Beispiel #50
0
    def test_basinhopping(self):
        func = lambda x: np.cos(14.5 * x - 0.3) + (x + 0.2) * x
        x0 = [1.]
        np.random.seed(555)
        res = basinhopping(func, x0, minimizer_kwargs={"method": "BFGS"}, niter=200)
        np.random.seed(555)
        x, = parameters('x')
        fit = BasinHopping(func, [x], local_minimizer=BFGS)
        fit_result = fit.execute(niter=200)
        # fit_result = fit.execute(minimizer_kwargs={"method": "BFGS"}, niter=200)

        self.assertEqual(res.x, fit_result.value(x))
        self.assertEqual(res.fun, fit_result.objective_value)
Beispiel #51
0
    def test_linear_analytical_fit(self):
        a, b = parameters('a, b')
        x, y = variables('x, y')
        model = {y: a * x + b}

        data = [[0, 1], [1, 0], [3, 2], [5, 4]]
        xdata, ydata = (np.array(i, dtype='float64') for i in zip(*data))

        fit = LinearLeastSquares(model, x=xdata, y=ydata)
        fit_result = fit.execute()

        self.assertAlmostEqual(fit_result.value(a), 0.694915, 6) # values from Mathematica
        self.assertAlmostEqual(fit_result.value(b), 0.186441, 6)
Beispiel #52
0
    def test_unequal_data(self):
        """
        Test to make sure finite differences work with data of unequal length.
        """
        x_1, x_2, y_1, y_2 = sf.variables('x_1, x_2, y_1, y_2')
        y0, a_1, a_2, b_1, b_2 = sf.parameters('y0, a_1, a_2, b_1, b_2')

        model = sf.Model({
            y_1: a_1 * x_1**2 + b_1 * x_1 + y0,
            y_2: a_2 * x_2**2 + b_2 * x_2 + y0,
        })

        # Generate data from this model
        xdata1 = np.linspace(0, 10)
        xdata2 = xdata1[::2]  # Only every other point.

        exact = model.eval_jacobian(x_1=xdata1, x_2=xdata2,
                                    a_1=101.3, b_1=0.5, a_2=56.3, b_2=1.1111, y0=10.8)
        approx = model.finite_difference(x_1=xdata1, x_2=xdata2,
                                         a_1=101.3, b_1=0.5, a_2=56.3, b_2=1.1111, y0=10.8)
        # First axis is the number of components
        self.assertEqual(len(exact), 2)
        self.assertEqual(len(approx), 2)

        # Second axis is the number of parameters, same for all components
        for exact_comp, approx_comp, xdata in zip(exact, approx, [xdata1, xdata2]):
            self.assertEqual(len(exact_comp), len(model.params))
            self.assertEqual(len(approx_comp), len(model.params))
            for exact_elem, approx_elem in zip(exact_comp, approx_comp):
                self.assertEqual(exact_elem.shape, xdata.shape)
                self.assertEqual(approx_elem.shape, xdata.shape)

        self._assert_equal(exact, approx, rtol=1e-4)

        model = sf.Model({
            y_1: a_1 * x_1**2 + b_1 * x_1,
            y_2: a_2 * x_2**2 + b_2 * x_2,
        })

        exact = model.eval_jacobian(x_1=xdata1, x_2=xdata2,
                                    a_1=101.3, b_1=0.5, a_2=56.3, b_2=1.1111)
        approx = model.finite_difference(x_1=xdata1, x_2=xdata2,
                                         a_1=101.3, b_1=0.5, a_2=56.3, b_2=1.1111)
        self._assert_equal(exact, approx, rtol=1e-4)

        model = sf.Model({
            y_1: a_1 * x_1**2 + b_1 * x_1,
        })
        exact = model.eval_jacobian(x_1=xdata1, a_1=101.3, b_1=0.5)
        approx = model.finite_difference(x_1=xdata1, a_1=101.3, b_1=0.5)
        self._assert_equal(exact, approx, rtol=1e-4)
Beispiel #53
0
    def test_MatrixSymbolModel(self):
        """
        Test a model which is defined by ModelSymbols, see #194
        """
        N = Symbol('N', integer=True)
        M = MatrixSymbol('M', N, N)
        W = MatrixSymbol('W', N, N)
        I = MatrixSymbol('I', N, N)
        y = MatrixSymbol('y', N, 1)
        c = MatrixSymbol('c', N, 1)
        a, b = parameters('a, b')
        z, x = variables('z, x')

        model_dict = {
            W: Inverse(I + M / a ** 2),
            c: - W * y,
            z: sqrt(c.T * c)
        }
        # TODO: This should be a Model in the future, but sympy is not yet
        # capable of computing Matrix derivatives at the time of writing.
        model = CallableModel(model_dict)

        self.assertEqual(model.params, [a])
        self.assertEqual(model.independent_vars, [I, M, y])
        self.assertEqual(model.dependent_vars, [z])
        self.assertEqual(model.interdependent_vars, [W, c])
        self.assertEqual(model.connectivity_mapping,
                         {W: {I, M, a}, c: {W, y}, z: {c}})
        # Generate data
        iden = np.eye(2)
        M_mat = np.array([[2, 1], [3, 4]])
        y_vec = np.array([3, 5])

        eval_model = model(I=iden, M=M_mat, y=y_vec, a=0.1)
        W_manual = np.linalg.inv(iden + M_mat / 0.1 ** 2)
        c_manual = - W_manual.dot(y_vec)
        z_manual = np.atleast_1d(np.sqrt(c_manual.T.dot(c_manual)))
        np.testing.assert_allclose(eval_model.W, W_manual)
        np.testing.assert_allclose(eval_model.c, c_manual)
        np.testing.assert_allclose(eval_model.z, z_manual)

        # Now try to retrieve the value of `a` from a fit
        a.value = 0.2
        fit = Fit(model, z=z_manual, I=iden, M=M_mat, y=y_vec)
        fit_result = fit.execute()
        eval_model = model(I=iden, M=M_mat, y=y_vec, **fit_result.params)
        self.assertAlmostEqual(0.1, np.abs(fit_result.value(a)))
        np.testing.assert_allclose(eval_model.W, W_manual, rtol=1e-5)
        np.testing.assert_allclose(eval_model.c, c_manual, rtol=1e-5)
        np.testing.assert_allclose(eval_model.z, z_manual, rtol=1e-5)
Beispiel #54
0
    def test_1_multi_model(self):
        '''Tests the case with 1 component and multiple parameters'''
        x, y = sf.variables('x, y')
        a, b = sf.parameters('a, b')
        model = sf.Model({y: 3 * a * x**2 - sf.exp(b) * x})
        x_data = np.arange(10)

        exact = model.eval_jacobian(x=x_data, a=3.5, b=2)
        approx = model.finite_difference(x=x_data, a=3.5, b=2)
        np.testing.assert_allclose(exact, approx)

        exact = model.eval_jacobian(x=3, a=3.5, b=2)
        approx = model.finite_difference(x=3, a=3.5, b=2)
        np.testing.assert_allclose(exact, approx)
Beispiel #55
0
 def test_model_from_dict(self):
     """
     Tries to create a model from a dictionary.
     """
     x, y_1, y_2 = variables('x, y_1, y_2')
     a, b = parameters('a, b')
     # This way the test fails rather than errors.
     try:
         Model({
                y_1: 2 * a * x,
                y_2: b * x**2
               })
     except Exception as error:
         self.fail('test_model_from_dict raised {}'.format(error))
Beispiel #56
0
    def test_powell(self):
        """
        Powell with a single parameter gave an error because a 0-d array was
        returned by scipy. So no error here is winning.
        """
        x, y = variables('x, y')
        a, b = parameters('a, b')
        b.fixed = True

        model = Model({y: a * x + b})
        xdata = np.linspace(0, 10)
        ydata = model(x=xdata, a=5.5, b=15.0).y + np.random.normal(0, 1)
        fit = Fit({y: a * x + b}, x=xdata, y=ydata, minimizer=Powell)
        fit_result = fit.execute()
        self.assertAlmostEqual(fit_result.value(b), 1.0)
Beispiel #57
0
    def test_multi_multi_model(self):
        '''Tests the case with multiple components and multiple parameters'''
        x, y, z = sf.variables('x, y, z')
        a, b, c = sf.parameters('a, b, c')
        model = sf.Model({y: 3 * a * x**2 + b * x - c,
                          z: sf.exp(a*x - b) * c})
        x_data = np.arange(10)

        exact = model.eval_jacobian(x=x_data, a=3.5, b=2, c=5)
        approx = model.finite_difference(x=x_data, a=3.5, b=2, c=5)
        np.testing.assert_allclose(exact, approx, rtol=1e-5)

        exact = model.eval_jacobian(x=3, a=3.5, b=2, c=5)
        approx = model.finite_difference(x=3, a=3.5, b=2, c=5)
        np.testing.assert_allclose(exact, approx, rtol=1e-5)
Beispiel #58
0
    def test_multi_1_model(self):
        '''Tests the case with multiple components and one parameter'''
        x, y, z = sf.variables('x, y, z')
        a, = sf.parameters('a')
        model = sf.Model({y: 3 * a * x**2,
                          z: sf.exp(a*x)})
        x_data = np.arange(10)

        exact = model.eval_jacobian(x=x_data, a=3.5)
        approx = model.finite_difference(x=x_data, a=3.5)
        np.testing.assert_allclose(exact, approx)

        exact = model.eval_jacobian(x=3, a=3.5)
        approx = model.finite_difference(x=3, a=3.5)
        np.testing.assert_allclose(exact, approx)
Beispiel #59
0
    def test_model_as_dict(self):
        x, y_1, y_2 = variables('x, y_1, y_2')
        a, b = parameters('a, b')

        model_dict = OrderedDict([(y_1, a * x**2), (y_2, 2 * x * b)])
        model = Model(model_dict)

        self.assertEqual(id(model[y_1]), id(model_dict[y_1]))
        self.assertEqual(id(model[y_2]), id(model_dict[y_2]))
        self.assertEqual(len(model), len(model_dict))
        self.assertEqual(model.items(), model_dict.items())
        self.assertEqual(model.keys(), model_dict.keys())
        self.assertEqual(list(model.values()), list(model_dict.values()))
        self.assertTrue(y_1 in model)
        self.assertFalse(model[y_1] in model)