示例#1
0
    def test_constraint_types(self):
        x = Parameter(value=-1.0)
        y = Parameter(value=1.0)
        z = Variable()
        model = Model({z: 2 * x * y + 2 * x - x**2 - 2 * y**2})

        # These types are not allowed constraints.
        for relation in [Lt, Gt, Ne]:
            with self.assertRaises(ModelError):
                Fit(model, constraints=[relation(x, y)])

        # Should execute without problems.
        for relation in [Eq, Ge, Le]:
            Fit(model, constraints=[relation(x, y)])

        fit = Fit(model, constraints=[Le(x, y)])
        # Le should be transformed to Ge
        self.assertIs(fit.constraints[0].constraint_type, Ge)

        # Redo the standard test as a Le
        constraints = [
            Le(-y + 1, 0),  # y - 1 >= 0,
            Eq(x**3 - y, 0),  # x**3 - y == 0,
        ]
        std_constraints = [
            Ge(y - 1, 0),  # y - 1 >= 0,
            Eq(x**3 - y, 0),  # x**3 - y == 0,
        ]

        fit = Fit(-model, constraints=constraints)
        std_fit = Fit(-model, constraints=std_constraints)
        self.assertEqual(fit.constraints[0].constraint_type, Ge)
        self.assertEqual(fit.constraints[1].constraint_type, Eq)
        fit_result = fit.execute()
        std_result = std_fit.execute()
        self.assertAlmostEqual(fit_result.value(x), std_result.value(x))
        self.assertAlmostEqual(fit_result.value(y), std_result.value(y))
示例#2
0
def test_pickle():
    """
    Test the picklability of the built-in objectives.
    """
    # Create test data
    xdata = np.linspace(0, 100, 100)  # From 0 to 100 in 100 steps
    a_vec = np.random.normal(15.0, scale=2.0, size=xdata.shape)
    b_vec = np.random.normal(100, scale=2.0, size=xdata.shape)
    ydata = a_vec * xdata + b_vec  # Point scattered around the line 15 * x + 100

    # Normal symbolic fit
    a = Parameter('a', value=0, min=0.0, max=1000)
    b = Parameter('b', value=0, min=0.0, max=1000)
    x, y = variables('x, y')
    model = Model({y: a * x + b})

    for objective in [VectorLeastSquares, LeastSquares, LogLikelihood, MinimizeModel]:
        if issubclass(objective, BaseIndependentObjective):
            data = {x: xdata}
        else:
            data = {x: xdata, y: ydata, model.sigmas[y]: np.ones_like(ydata)}
        obj = objective(model, data=data)
        new_obj = pickle.loads(pickle.dumps(obj))
        assert FitResults._array_safe_dict_eq(obj.__dict__, new_obj.__dict__)
示例#3
0
    def test_bounds(self):
        """
        The bounds of an object should always be such that lower < upper.
        :return:
        """
        a = Parameter(value= - 2.482092e-01, fixed=True)
        # a = Parameter()
        try:
            b = Parameter(value=5.0, min=6.0, max=4.0)
        except ValueError:
            b = Parameter(value=5.0, min=4.0, max=6.0)
        c = Parameter(value=2.219756e+02, fixed=True)
        x = Variable()

        # build the model
        model = Model(a + b * (1 - exp(-c / x)))
        print(model.bounds)
        for bounds in model.bounds:
            if None in bounds:
                pass
            else:
                # Both are set
                min, max = bounds
                self.assertGreaterEqual(max, min)
示例#4
0
def test_data_for_constraint():
    """
    Test the signature handling when constraints are at play. Constraints
    should take seperate data, but still kwargs that are not found in either
    the model nor the constraints should raise an error.
    """
    A, mu, sig = parameters('A, mu, sig')
    x, y, Y = variables('x, y, Y')

    model = Model({y: A * Gaussian(x, mu=mu, sig=sig)})
    constraint = Model.as_constraint(Y, model, constraint_type=Eq)

    np.random.seed(2)
    xdata = np.random.normal(1.2, 2, 10)
    ydata, xedges = np.histogram(xdata,
                                 bins=int(np.sqrt(len(xdata))),
                                 density=True)

    # Allowed
    fit = Fit(model, x=xdata, y=ydata, Y=2, constraints=[constraint])
    assert isinstance(fit.objective, LeastSquares)
    assert isinstance(fit.minimizer.constraints[0], MinimizeModel)
    fit = Fit(model, x=xdata, y=ydata)
    assert isinstance(fit.objective, LeastSquares)
    fit = Fit(model, x=xdata, objective=LogLikelihood)
    assert isinstance(fit.objective, LogLikelihood)

    # Not allowed
    with pytest.raises(TypeError):
        fit = Fit(model, x=xdata, y=ydata, Y=2)

    with pytest.raises(TypeError):
        fit = Fit(model, x=xdata, y=ydata, Y=2, Z=3, constraints=[constraint])

    with pytest.raises(TypeError):
        fit = Fit(model, x=xdata, y=ydata, objective=LogLikelihood)
示例#5
0
    def test_read_only_results(self):
        """
        Fit results should be read-only. Let's try to break this!
        """
        xdata = np.linspace(1, 10, 10)
        ydata = 3 * xdata**2

        a = Parameter(3.0, min=2.75)
        b = Parameter(2.0, max=2.75)
        x = Variable('x')
        new = a * x**b

        fit = Fit(new, xdata, ydata)
        # raise Exception(fit.partial_chi(3, 2), [component(3, 2) for component in fit.partial_chi_jacobian])
        # raise Exception(fit.model.chi_jacobian)
        fit_result = fit.execute()

        # Break it!
        try:
            fit_result.params = 'hello'
        except AttributeError:
            self.assertTrue(True)  # desired result
        finally:
            self.assertNotEqual(fit_result.params, 'hello')

        try:
            # Bypass the property getter. This will work, as it set's the instance value of __params.
            fit_result.__params = 'hello'
        except AttributeError:
            self.assertTrue(False)  # undesired result
        finally:
            self.assertNotEqual(fit_result.params, 'hello')
            # The assginment will have succeeded on the instance because we set it from the outside.
            # I must admit I don't fully understand why this is allowed and I don't like it.
            # However, the tests below show that it did not influence the class method itself so
            # fitting still works fine.
            # assinging to __params makes *new* instance attribute, the "real"
            # __params instance is called _FitResult__params. See dir(fit_results) and
            # https://www.python.org/dev/peps/pep-0008/#designing-for-inheritance
            self.assertEqual(fit_result.__params, 'hello')

        # Do a second fit and dubble check that we do not overwrtie something crusial.
        xdata = np.arange(-5, 5, 1)
        ydata = np.arange(-5, 5, 1)
        xx, yy = np.meshgrid(xdata, ydata, sparse=False)
        xdata_coor = np.dstack((xx, yy))

        zdata = 2.5 * xx**2 + 3.0 * yy**2

        a = Parameter(1., max=2.75)
        b = Parameter(5., min=2.75)
        x = Variable()
        y = Variable()
        new = Variable()
        new_model = Model({new: a * x**2 + b * y**2})

        fit_2 = Fit(new_model, x=xx, y=yy, new=zdata)
        fit_result_2 = fit_2.execute()
        self.assertNotAlmostEqual(fit_result.value(a), fit_result_2.value(a))
        self.assertAlmostEqual(fit_result.value(a), 3.0)
        self.assertAlmostEqual(fit_result_2.value(a), 2.5)
        self.assertNotAlmostEqual(fit_result.value(b), fit_result_2.value(b))
        self.assertAlmostEqual(fit_result.value(b), 2.0)
        self.assertAlmostEqual(fit_result_2.value(b), 3.0)
示例#6
0
from symfit import Poly, variables, parameters, Model, Fit
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns

x, y, z = variables('x, y, z')
c1, c2 = parameters('c1, c2')
# Make a polynomial. Note the `as_expr` to make it symfit friendly.
model_dict = {z: Poly({(2, 0): c1, (0, 2): c1, (1, 1): c2}, x, y).as_expr()}
model = Model(model_dict)
print(model)

# Generate example data
x_vec = np.linspace(-5, 5)
y_vec = np.linspace(-10, 10)
xdata, ydata = np.meshgrid(x_vec, y_vec)
zdata = model(x=xdata, y=ydata, c1=1.0, c2=2.0).z
zdata = np.random.normal(zdata, 0.05 * zdata)  # add 5% noise

# Perform the fit
fit = Fit(model, x=xdata, y=ydata, z=zdata)
fit_result = fit.execute()
zfit = model(x=xdata, y=ydata, **fit_result.params).z
print(fit_result)

fig, (ax1, ax2) = plt.subplots(1, 2)
sns.heatmap(zdata, ax=ax1)
sns.heatmap(zfit, ax=ax2)
plt.show()
示例#7
0
# -*- coding: utf-8 -*-

from symfit import variables, Parameter, exp, Fit, Model
from symfit.distributions import Gaussian
from symfit.contrib.interactive_guess import InteractiveGuess
import numpy as np

x, y, z = variables('x, y, z')
mu_x = Parameter('mu_x', 10)
mu_y = Parameter('mu_y', 10)
sig_x = Parameter('sig_x', 1)
sig_y = Parameter('sig_y', 1)

model = Model({z: Gaussian(x, mu_x, sig_x) * Gaussian(y, mu_y, sig_y)})
x_data = np.linspace(0, 25, 50)
y_data = np.linspace(0, 25, 50)
x_data, y_data = np.meshgrid(x_data, y_data)
x_data = x_data.flatten()
y_data = y_data.flatten()
z_data = model(x=x_data, y=y_data, mu_x=5, sig_x=0.3, mu_y=10, sig_y=1).z

guess = InteractiveGuess(model, x=x_data, y=y_data, z=z_data)
guess.execute()
print(guess)

fit = Fit(model, x=x_data, y=y_data, z=z_data)
fit_result = fit.execute()
print(fit_result)
示例#8
0
#
# SPDX-License-Identifier: MIT

# -*- coding: utf-8 -*-
from symfit import Variable, Parameter, Fit, Model
from symfit.contrib.interactive_guess import InteractiveGuess
import numpy as np

x = Variable('x')
y1 = Variable('y1')
y2 = Variable('y2')
k = Parameter('k', 900)
x0 = Parameter('x0', 1.5)

model = {y1: k * (x - x0)**2, y2: x - x0}
model = Model(model)

# Generate example data
x_data = np.linspace(0, 2.5, 50)
data = model(x=x_data, k=1000, x0=1)
y1_data = data.y1
y2_data = data.y2

guess = InteractiveGuess(model, x=x_data, y1=y1_data, y2=y2_data, n_points=250)
guess.execute()
print(guess)

fit = Fit(model, x=x_data, y1=y1_data, y2=y2_data)
fit_result = fit.execute()
print(fit_result)
示例#9
0
from symfit import parameters, variables, Fit, Piecewise, exp, Eq, Model
import numpy as np
import matplotlib.pyplot as plt

t, y = variables('t, y')
a, b, d, k, t0 = parameters('a, b, d, k, t0')

# Make a piecewise model
y1 = a * t + b
y2 = d * exp(-k * t)
model = Model({y: Piecewise((y1, t <= t0), (y2, t > t0))})

# As a constraint, we demand equality between the two models at the point t0
# to do this, we substitute t -> t0 and demand equality using `Eq`
constraints = [Eq(y1.diff(t).subs({t: t0}), y2.diff(t).subs({t: t0}))]

# # Generate example data
tdata = np.linspace(0, 4., 200)
ydata = model(t=tdata, a=63, b=300, d=2205, k=3, t0=0.65).y
ydata = np.random.normal(ydata, 0.05 * ydata)  # add 5% noise

# Help the fit by bounding the switchpoint between the models and giving initial
# guesses
t0.min = 0.5
t0.max = 0.8
b.value = 320

fit = Fit(model, t=tdata, y=ydata, constraints=constraints)
fit_result = fit.execute()
print(fit_result)
示例#10
0
def test_global_fitting():
    """
    In case of shared parameters between the components of the model, `Fit`
    should automatically use `ConstrainedLeastSquares`.
    :return:
    """
    x_1, x_2, y_1, y_2 = variables('x_1, x_2, y_1, y_2')
    y0, a_1, a_2, b_1, b_2 = parameters('y0, a_1, a_2, b_1, b_2')

    # The following vector valued function links all the equations together
    # as stated in the intro.
    model = Model({
        y_1: a_1 * x_1**2 + b_1 * x_1 + y0,
        y_2: a_2 * x_2**2 + b_2 * x_2 + y0,
    })
    assert model.shared_parameters

    # Generate data from this model
    xdata1 = np.linspace(0, 10)
    xdata2 = xdata1[::2]  # Only every other point.

    ydata1, ydata2 = model(x_1=xdata1,
                           x_2=xdata2,
                           a_1=101.3,
                           b_1=0.5,
                           a_2=56.3,
                           b_2=1.1111,
                           y0=10.8)
    # Add some noise to make it appear like real data
    np.random.seed(1)
    ydata1 += np.random.normal(0, 2, size=ydata1.shape)
    ydata2 += np.random.normal(0, 2, size=ydata2.shape)

    xdata = [xdata1, xdata2]
    ydata = [ydata1, ydata2]

    # Guesses
    a_1.value = 100
    a_2.value = 50
    b_1.value = 1
    b_2.value = 1
    y0.value = 10

    fit = Fit(model, x_1=xdata[0], x_2=xdata[1], y_1=ydata[0], y_2=ydata[1])
    assert isinstance(fit.minimizer, BFGS)

    # The next model does not share parameters, but is still a vector
    model = Model({
        y_1: a_1 * x_1**2 + b_1 * x_1,
        y_2: a_2 * x_2**2 + b_2 * x_2,
    })
    fit = Fit(model, x_1=xdata[0], x_2=xdata[1], y_1=ydata[0], y_2=ydata[1])
    assert not model.shared_parameters
    assert isinstance(fit.minimizer, BFGS)

    # Scalar model, still use bfgs.
    model = Model({
        y_1: a_1 * x_1**2 + b_1 * x_1,
    })
    fit = Fit(model, x_1=xdata[0], y_1=ydata[0])
    assert model.shared_parameters is False
    assert isinstance(fit.minimizer, BFGS)
示例#11
0
    def test_minimize(self):
        """
        Tests maximizing a function with and without constraints, taken from the
        scipy `minimize` tutorial. Compare the symfit result with the scipy
        result.
        https://docs.scipy.org/doc/scipy-0.18.1/reference/tutorial/optimize.html#constrained-minimization-of-multivariate-scalar-functions-minimize
        """
        x = Parameter(value=-1.0)
        y = Parameter(value=1.0)
        # Use an  unnamed Variable on purpose to test the auto-generation of names.
        model = Model(2 * x * y + 2 * x - x**2 - 2 * y**2)

        constraints = [
            Ge(y - 1, 0),  # y - 1 >= 0,
            Eq(x**3 - y, 0),  # x**3 - y == 0,
        ]

        def func(x, sign=1.0):
            """ Objective function """
            return sign * (2 * x[0] * x[1] + 2 * x[0] - x[0]**2 - 2 * x[1]**2)

        def func_deriv(x, sign=1.0):
            """ Derivative of objective function """
            dfdx0 = sign * (-2 * x[0] + 2 * x[1] + 2)
            dfdx1 = sign * (2 * x[0] - 4 * x[1])
            return np.array([dfdx0, dfdx1])

        cons = ({
            'type': 'eq',
            'fun': lambda x: np.array([x[0]**3 - x[1]]),
            'jac': lambda x: np.array([3.0 * (x[0]**2.0), -1.0])
        }, {
            'type': 'ineq',
            'fun': lambda x: np.array([x[1] - 1]),
            'jac': lambda x: np.array([0.0, 1.0])
        })

        # Unconstrained fit
        res = minimize(func, [-1.0, 1.0],
                       args=(-1.0, ),
                       jac=func_deriv,
                       method='BFGS',
                       options={'disp': False})
        fit = Fit(model=-model)
        self.assertIsInstance(fit.objective, MinimizeModel)
        self.assertIsInstance(fit.minimizer, BFGS)

        fit_result = fit.execute()

        self.assertAlmostEqual(fit_result.value(x) / res.x[0], 1.0, 6)
        self.assertAlmostEqual(fit_result.value(y) / res.x[1], 1.0, 6)

        # Same test, but with constraints in place.
        res = minimize(func, [-1.0, 1.0],
                       args=(-1.0, ),
                       jac=func_deriv,
                       constraints=cons,
                       method='SLSQP',
                       options={'disp': False})

        from symfit.core.minimizers import SLSQP
        fit = Fit(-model, constraints=constraints)
        self.assertEqual(fit.constraints[0].constraint_type, Ge)
        self.assertEqual(fit.constraints[1].constraint_type, Eq)
        fit_result = fit.execute()
        self.assertAlmostEqual(fit_result.value(x), res.x[0], 6)
        self.assertAlmostEqual(fit_result.value(y), res.x[1], 6)
示例#12
0
from symfit import parameters, variables, Fit, Piecewise, exp, Eq, Model
import numpy as np
import matplotlib.pyplot as plt

x, y = variables('x, y')
a, b, x0 = parameters('a, b, x0')

# Make a piecewise model
y1 = x**2 - a * x
y2 = a * x + b
model = Model({y: Piecewise((y1, x <= x0), (y2, x > x0))})

# As a constraint, we demand equality between the two models at the point x0
# to do this, we substitute x -> x0 and demand equality using `Eq`
constraints = [
    Eq(y1.subs({x: x0}), y2.subs({x: x0}))
]
# Generate example data
xdata = np.linspace(-4, 4., 50)
ydata = model(x=xdata, a=0.0, b=1.0, x0=1.0).y
np.random.seed(2)
ydata = np.random.normal(ydata, 0.5)  # add noise

# Help the fit by bounding the switchpoint between the models
x0.min = 0.8
x0.max = 1.2

fit = Fit(model, x=xdata, y=ydata, constraints=constraints)
fit_result = fit.execute()
print(fit_result)
示例#13
0
def test_global_fitting():
    """
    Test a global fitting scenario with datasets of unequal length. In this
    scenario, a quartic equation is fitted where the constant term is shared
    between the datasets. (e.g. identical background noise)
    """
    x_1, x_2, y_1, y_2 = variables('x_1, x_2, y_1, y_2')
    y0, a_1, a_2, b_1, b_2 = parameters('y0, a_1, a_2, b_1, b_2')

    # The following vector valued function links all the equations together
    # as stated in the intro.
    model = Model({
        y_1: a_1 * x_1**2 + b_1 * x_1 + y0,
        y_2: a_2 * x_2**2 + b_2 * x_2 + y0,
    })

    # Generate data from this model
    # xdata = np.linspace(0, 10)
    xdata1 = np.linspace(0, 10)
    xdata2 = xdata1[::2]  # Make the sets of unequal size

    ydata1, ydata2 = model(x_1=xdata1,
                           x_2=xdata2,
                           a_1=101.3,
                           b_1=0.5,
                           a_2=56.3,
                           b_2=1.1111,
                           y0=10.8)
    # Add some noise to make it appear like real data
    np.random.seed(1)
    ydata1 += np.random.normal(0, 2, size=ydata1.shape)
    ydata2 += np.random.normal(0, 2, size=ydata2.shape)

    xdata = [xdata1, xdata2]
    ydata = [ydata1, ydata2]

    # Guesses
    a_1.value = 100
    a_2.value = 50
    b_1.value = 1
    b_2.value = 1
    y0.value = 10

    eval_jac = model.eval_jacobian(x_1=xdata1,
                                   x_2=xdata2,
                                   a_1=101.3,
                                   b_1=0.5,
                                   a_2=56.3,
                                   b_2=1.1111,
                                   y0=10.8)
    assert len(eval_jac) == 2
    for comp in eval_jac:
        assert len(comp) == len(model.params)

    sigma_y = np.concatenate((np.ones(20), [2., 4., 5, 7, 3]))

    fit = Fit(model,
              x_1=xdata[0],
              x_2=xdata[1],
              y_1=ydata[0],
              y_2=ydata[1],
              sigma_y_2=sigma_y)
    fit_result = fit.execute()

    # fit_curves = model(x_1=xdata[0], x_2=xdata[1], **fit_result.params)
    assert fit_result.value(y0) == pytest.approx(1.061892e+01, 1e-03)
    assert fit_result.value(a_1) == pytest.approx(1.013269e+02, 1e-03)
    assert fit_result.value(a_2) == pytest.approx(5.625694e+01, 1e-03)
    assert fit_result.value(b_1) == pytest.approx(3.362240e-01, 1e-03)
    assert fit_result.value(b_2) == pytest.approx(1.565253e+00, 1e-03)
示例#14
0
def test_trustconstr():
    """
    Solve the standard constrained example from
    https://docs.scipy.org/doc/scipy-0.18.1/reference/tutorial/optimize.html#constrained-minimization-of-multivariate-scalar-functions-minimize
    using the trust-constr method.
    """
    def func(x, sign=1.0):
        """ Objective function """
        return sign * (2 * x[0] * x[1] + 2 * x[0] - x[0]**2 - 2 * x[1]**2)

    def func_jac(x, sign=1.0):
        """ Derivative of objective function """
        dfdx0 = sign * (-2 * x[0] + 2 * x[1] + 2)
        dfdx1 = sign * (2 * x[0] - 4 * x[1])
        return np.array([dfdx0, dfdx1])

    def func_hess(x, sign=1.0):
        """ Hessian of objective function """
        dfdx2 = sign * (-2)
        dfdxdy = sign * 2
        dfdy2 = sign * (-4)
        return np.array([[dfdx2, dfdxdy], [dfdxdy, dfdy2]])

    def cons_f(x):
        return [x[1] - 1, x[0]**3 - x[1]]

    def cons_J(x):
        return [[0, 1], [3 * x[0]**2, -1]]

    def cons_H(x, v):
        return v[0] * np.zeros(
            (2, 2)) + v[1] * np.array([[6 * x[0], 0], [0, 0]])

    # Unconstrained fit
    res = minimize(func, [-1.0, 1.0],
                   args=(-1.0, ),
                   jac=func_jac,
                   hess=func_hess,
                   method='trust-constr')
    assert res.x == pytest.approx([2, 1])

    # Constrained fit
    nonlinear_constraint = NonlinearConstraint(cons_f,
                                               0, [np.inf, 0],
                                               jac=cons_J,
                                               hess=cons_H)
    res_constr = minimize(func, [-1.0, 1.0],
                          args=(-1.0, ),
                          tol=1e-15,
                          jac=func_jac,
                          hess=func_hess,
                          method='trust-constr',
                          constraints=[nonlinear_constraint])
    assert res_constr.x == pytest.approx([1, 1])

    # Symfit equivalent code
    x = Parameter('x', value=-1.0)
    y = Parameter('y', value=1.0)
    z = Variable('z')
    model = Model({z: 2 * x * y + 2 * x - x**2 - 2 * y**2})

    # Unconstrained fit first, see if we get the known result.
    fit = Fit(-model, minimizer=TrustConstr)
    fit_result = fit.execute()
    assert list(fit_result.params.values()) == pytest.approx([2, 1])

    # Now we are ready for the constrained fit.
    constraints = [
        Le(-y + 1, 0),  # y - 1 >= 0,
        Eq(x**3 - y, 0),  # x**3 - y == 0,
    ]
    fit = Fit(-model, constraints=constraints, minimizer=TrustConstr)
    fit_result = fit.execute(tol=1e-15)

    # Test if the constrained results are equal
    assert list(fit_result.params.values()) == pytest.approx(res_constr.x)
示例#15
0
def test_LogLikelihood():
    """
    Tests if the LeastSquares objective gives the right shapes of output by
    comparing with its analytical equivalent.
    """
    # TODO: update these tests to use indexed variables in the future
    a, b = parameters('a, b')
    i = Idx('i', 100)
    x, y = variables('x, y')
    pdf = Exp(x, 1 / a) * Exp(x, b)

    np.random.seed(10)
    xdata = np.random.exponential(3.5, 100)

    # We use minus loglikelihood for the model, because the objective was
    # designed to find the maximum when used with a *minimizer*, so it has
    # opposite sign. Also test MinimizeModel at the same time.
    logL_model = Model({y: pdf})
    logL_exact = Model({y: -FlattenSum(log(pdf), i)})
    logL_numerical = LogLikelihood(logL_model, {x: xdata, y: None})
    logL_minmodel = MinimizeModel(logL_exact, data={x: xdata, y: None})

    # Test model jacobian and hessian shape
    eval_exact = logL_exact(x=xdata, a=2, b=3)
    jac_exact = logL_exact.eval_jacobian(x=xdata, a=2, b=3)
    hess_exact = logL_exact.eval_hessian(x=xdata, a=2, b=3)
    eval_minimizemodel = logL_minmodel(a=2, b=3)
    jac_minimizemodel = logL_minmodel.eval_jacobian(a=2, b=3)
    hess_minimizemodel = logL_minmodel.eval_hessian(a=2, b=3)
    eval_numerical = logL_numerical(a=2, b=3)
    jac_numerical = logL_numerical.eval_jacobian(a=2, b=3)
    hess_numerical = logL_numerical.eval_hessian(a=2, b=3)

    # TODO: These shapes should not have the ones! This is due to the current
    # convention that scalars should be returned as a 1d array by Model's.
    assert eval_exact[0].shape == (1, )
    assert jac_exact[0].shape == (2, 1)
    assert hess_exact[0].shape == (2, 2, 1)
    # Test if identical to MinimizeModel
    assert eval_exact[0] == pytest.approx(eval_minimizemodel)
    assert jac_exact[0] == pytest.approx(jac_minimizemodel)
    assert hess_exact[0] == pytest.approx(hess_minimizemodel)

    # Test if these two models have the same call, jacobian, and hessian.
    # Since models always have components as their first dimension, we have
    # to slice that away.
    assert eval_exact.y == pytest.approx(eval_numerical)
    assert isinstance(eval_numerical, float)
    assert isinstance(eval_exact.y[0], float)
    assert np.squeeze(jac_exact[0], axis=-1) == pytest.approx(jac_numerical)
    assert isinstance(jac_numerical, np.ndarray)
    assert np.squeeze(hess_exact[0], axis=-1) == pytest.approx(hess_numerical)
    assert isinstance(hess_numerical, np.ndarray)

    fit = Fit(logL_exact, x=xdata, objective=MinimizeModel)
    fit_exact_result = fit.execute()
    fit = Fit(logL_model, x=xdata, objective=LogLikelihood)
    fit_num_result = fit.execute()
    assert fit_exact_result.value(a) == pytest.approx(fit_num_result.value(a))
    assert fit_exact_result.value(b) == pytest.approx(fit_num_result.value(b))
    assert fit_exact_result.stdev(a) == pytest.approx(fit_num_result.stdev(a))
    assert fit_exact_result.stdev(b) == pytest.approx(fit_num_result.stdev(b))
示例#16
0
def test_interdependency():
    a, b = parameters('a, b')
    x, y, z = variables('x, y, z')
    model_dict = {
        y: a**3 * x + b**2,
        z: y**2 + a * b
    }
    callable_model = CallableModel(model_dict)
    assert callable_model.independent_vars == [x]
    assert callable_model.interdependent_vars == [y]
    assert callable_model.dependent_vars == [z]
    assert callable_model.params == [a, b]
    assert callable_model.connectivity_mapping == {y: {a, b, x}, z: {a, b, y}}
    assert callable_model(x=3, a=1, b=2) == pytest.approx(np.atleast_2d([7, 51]).T)
    for var, func in callable_model.vars_as_functions.items():
        # TODO comment on what this does
        str_con_map = set(x.name for x in callable_model.connectivity_mapping[var])
        str_args = set(str(x.__class__) if isinstance(x, Function) else x.name
                       for x in func.args)
        assert str_con_map == str_args

    jac_model = jacobian_from_model(callable_model)
    assert jac_model.params == [a, b]
    assert jac_model.dependent_vars == [D(z, a), D(z, b), z]
    assert jac_model.interdependent_vars == [D(y, a), D(y, b), y]
    assert jac_model.independent_vars == [x]
    for p1, p2 in zip_longest(jac_model.__signature__.parameters, [x, a, b]):
        assert str(p1) == str(p2)
    # The connectivity of jac_model should be that from it's own components
    # plus that of the model. The latter is needed to properly compute the
    # Hessian.
    jac_con_map = {D(y, a): {a, x},
                   D(y, b): {b},
                   D(z, a): {b, y, D(y, a)},
                   D(z, b): {a, y, D(y, b)},
                   y: {a, b, x}, z: {a, b, y}}
    assert jac_model.connectivity_mapping == jac_con_map
    jac_model_dict = {D(y, a): 3 * a**2 * x,
                      D(y, b): 2 * b,
                      D(z, a): b + 2 * y * D(y, a),
                      D(z, b): a + 2 * y * D(y, b),
                      y: callable_model[y], z: callable_model[z]}
    assert jac_model.model_dict == jac_model_dict
    for var, func in jac_model.vars_as_functions.items():
        str_con_map = set(x.name for x in jac_model.connectivity_mapping[var])
        str_args = set(str(x.__class__) if isinstance(x, Function) else x.name
                       for x in func.args)
        assert str_con_map == str_args

    hess_model = hessian_from_model(callable_model)
    # Result according to Mathematica
    hess_as_dict = {
        D(y, (a, 2)): 6 * a * x,
        D(y, a, b): 0,
        D(y, b, a): 0,
        D(y, (b, 2)): 2,
        D(z, (a, 2)): 2 * D(y, a)**2 + 2 * y * D(y, (a, 2)),
        D(z, a, b): 1 + 2 * D(y, b) * D(y, a) + 2 * y * D(y, a, b),
        D(z, b, a): 1 + 2 * D(y, b) * D(y, a) + 2 * y * D(y, a, b),
        D(z, (b, 2)): 2 * D(y, b)**2 + 2 * y * D(y, (b, 2)),
        D(y, a): 3 * a ** 2 * x,
        D(y, b): 2 * b,
        D(z, a): b + 2 * y * D(y, a),
        D(z, b): a + 2 * y * D(y, b),
        y: callable_model[y], z: callable_model[z]
    }
    assert dict(hess_model) == hess_as_dict

    assert hess_model.params == [a, b]
    assert hess_model.dependent_vars == [D(z, (a, 2)), D(z, a, b), D(z, (b, 2)), D(z, b, a), D(z, a), D(z, b), z]
    assert hess_model.interdependent_vars == [D(y, (a, 2)), D(y, a), D(y, b), y]
    assert hess_model.independent_vars == [x]

    model = Model(model_dict)
    assert model(x=3, a=1, b=2) == pytest.approx(np.atleast_2d([7, 51]).T)
    assert model.eval_jacobian(x=3, a=1, b=2) == pytest.approx(np.array([[[9], [4]], [[128], [57]]]))
    assert model.eval_hessian(x=3, a=1, b=2) == pytest.approx(np.array([[[[18], [0]], [[0], [2]]],[[[414], [73]], [[73], [60]]]]))

    assert model.__signature__ == model.jacobian_model.__signature__
    assert model.__signature__ == model.hessian_model.__signature__
示例#17
0
    def calibrate(self, corr_type="gamma_corr"):
        """
        Get the calibration matrix.
        :param corr_type: Type of fit/correction. Default is gamma correction.
        :return: Calibration matrix.
        """

        if corr_type != "gamma_corr":
            # here could be setting for other fit types
            raise ValueError(
                'Correction type {} is not recognized. '
                'Possible values are: "gamma_corr"'.format(corr_type))

        self.corr_type = corr_type

        # fit variables
        r, g, b, l, m, s = variables('r, g, b, l, m, s')

        # define models
        model = None

        if self.corr_type == "gamma_corr":
            a_0l, a_0m, a_0s = parameters('a_0l, a_0m, a_0s',
                                          min=0.0,
                                          value=0.0)
            a_lr, a_lg, a_lb, a_mr, a_mg, a_mb, a_sr, a_sg, a_sb = \
                parameters('a_lr, a_lg, a_lb, a_mr, a_mg, a_mb, '
                           'a_sr, a_sg, a_sb', min=0.0, value=1.0)
            gamma_r, gamma_g, gamma_b = parameters('gamma_r, gamma_g, gamma_b',
                                                   value=1.5)

            model = Model({
                l:
                a_0l + a_lr * r**gamma_r + a_lg * g**gamma_g +
                a_lb * b**gamma_b,
                m:
                a_0m + a_mr * r**gamma_r + a_mg * g**gamma_g +
                a_mb * b**gamma_b,
                s:
                a_0s + a_sr * r**gamma_r + a_sg * g**gamma_g +
                a_sb * b**gamma_b,
            })

        # get values for variables
        r, g, b = self._rgb_mat
        l, m, s = self._lms_mat

        # avoid division by zero errors
        min_val = 0.00000001
        r[r == 0] = min_val
        g[g == 0] = min_val
        b[b == 0] = min_val

        fit = Fit(model, r=r, g=g, b=b, l=l, m=m, s=s)
        fit_res = fit.execute()
        p_r = fit_res.params

        cm = np.ones(1)
        if self.corr_type == "gamma_corr":
            cm = np.asarray([[p_r["a_0l"], p_r["a_0m"], p_r["a_0s"]],
                             [p_r["a_lr"], p_r["a_lg"], p_r["a_lb"]],
                             [p_r["a_mr"], p_r["a_mg"], p_r["a_mb"]],
                             [p_r["a_sr"], p_r["a_sg"], p_r["a_sb"]],
                             [p_r["gamma_r"], p_r["gamma_g"], p_r["gamma_b"]]])

        self.calibration_matrix = cm

        inv_mat = np.zeros((5, 3))
        inv_mat[0] = cm[0]
        inv_mat[1:4] = np.linalg.inv(cm[1:4])
        inv_mat[4] = np.asarray([1. / cm[4][0], 1. / cm[4][1], 1. / cm[4][2]])

        self.inv_calibration_matrix = inv_mat
示例#18
0
import numpy as np
from symfit import Variable, Parameter, Fit, Model, sqrt

t_data = np.array([1.4, 2.1, 2.6, 3.0, 3.3])
h_data = np.array([10, 20, 30, 40, 50])

# We now define our model
h = Variable('h')
t = Variable('t')
g = Parameter('g')

t_model = Model({t: sqrt(2 * h / g)})

fit = Fit(t_model, h=h_data, t=t_data)
fit_result = fit.execute()
print(fit_result)

# Make an array from 0 to 50 in 1000 steps
h_range = np.linspace(0, 50, 1000)
fit_data = t_model(h=h_range, g=fit_result.value(g))
t_fit = fit_data.t

#---------------------------------------------------

t_data = np.array([1.4, 2.1, 2.6, 3.0, 3.3])
h_data = np.array([10, 20, 30, 40, 50])
n = np.array([5, 3, 8, 15, 30])
sigma = 0.2
sigma_t = sigma / np.sqrt(n)

# We now define our model
示例#19
0
Created on Mon Dec  7 11:28:58 2015

@author: peterkroon
"""
from symfit import Variable, Parameter, exp, Fit, Model
from symfit.contrib.interactive_guess import InteractiveGuess
import numpy as np


def distr(x, k, x0):
    kbT = 4.11
    return exp(-k * (x - x0)**2 / kbT)


x = Variable('x')
y = Variable('y')
k = Parameter('k', 900)
x0 = Parameter('x0', 1.5)

model = Model({y: distr(x, k, x0)})
x_data = np.linspace(0, 2.5, 50)
y_data = model(x=x_data, k=1000, x0=1).y

guess = InteractiveGuess(model, x=x_data, y=y_data, n_points=150)
guess.execute()
print(guess)

fit = Fit(model, x=x_data, y=y_data)
fit_result = fit.execute(maxiter=1000)
print(fit_result)
示例#20
0
import numpy as np
from symfit import variables, parameters, Fit, exp, Model
from symfit.core.objectives import LogLikelihood

# Draw samples from a bivariate distribution
np.random.seed(42)
data1 = np.random.exponential(5.5, 1000)
data2 = np.random.exponential(6, 2000)

# Define the model for an exponential distribution (numpy style)
a, b = parameters('a, b')
x1, y1, x2, y2 = variables('x1, y1, x2, y2')
model = Model({y1: (1 / a) * exp(-x1 / a), y2: (1 / b) * exp(-x2 / b)})
print(model)

fit = Fit(model, x1=data1, x2=data2, objective=LogLikelihood)
fit_result = fit.execute()
print(fit_result)

# Instead, we could also fit with only one parameter to see which works best
model = Model({y1: (1 / a) * exp(-x1 / a), y2: (1 / a) * exp(-x2 / a)})

fit = Fit(model, x1=data1, x2=data2, objective=LogLikelihood)
fit_result = fit.execute()
print(fit_result)