示例#1
0
def test_pickle():
    """
    Make sure models can be pickled are preserved when pickling
    """
    a, b = parameters('a, b')
    x, y = variables('x, y')
    exact_model = Model({y: a * x ** b})
    constraint = Model.as_constraint(Eq(a, b), exact_model)
    num_model = CallableNumericalModel(
        {y: a * x ** b}, independent_vars=[x], params=[a, b]
    )
    connected_num_model = CallableNumericalModel(
        {y: a * x ** b}, connectivity_mapping={y: {x, a, b}}
    )
    # Test if lsoda args and kwargs are pickled too
    ode_model = ODEModel({D(y, x): a * x + b}, {x: 0.0}, 3, 4, some_kwarg=True)

    models = [exact_model, constraint, num_model, ode_model, connected_num_model]
    for model in models:
        new_model = pickle.loads(pickle.dumps(model))
        # Compare signatures
        assert model.__signature__ == new_model.__signature__
        # Trigger the cached vars because we compare `__dict__` s
        model.vars
        new_model.vars
        # Explicitly make sure the connectivity mapping is identical.
        assert model.connectivity_mapping == new_model.connectivity_mapping
        if not isinstance(model, ODEModel):
            model.function_dict
            model.vars_as_functions
            new_model.function_dict
            new_model.vars_as_functions
        assert model.__dict__ == new_model.__dict__
示例#2
0
    def test_global_fitting(self):
        """
        Test a global fitting scenario with datasets of unequal length. In this
        scenario, a quartic equation is fitted where the constant term is shared
        between the datasets. (e.g. identical background noise)
        """
        x_1, x_2, y_1, y_2 = variables('x_1, x_2, y_1, y_2')
        y0, a_1, a_2, b_1, b_2 = parameters('y0, a_1, a_2, b_1, b_2')

        # The following vector valued function links all the equations together
        # as stated in the intro.
        model = Model({
            y_1: a_1 * x_1**2 + b_1 * x_1 + y0,
            y_2: a_2 * x_2**2 + b_2 * x_2 + y0,
        })

        # Generate data from this model
        # xdata = np.linspace(0, 10)
        xdata1 = np.linspace(0, 10)
        xdata2 = xdata1[::2]  # Make the sets of unequal size

        ydata1, ydata2 = model(x_1=xdata1, x_2=xdata2, a_1=101.3, b_1=0.5, a_2=56.3, b_2=1.1111, y0=10.8)
        # Add some noise to make it appear like real data
        np.random.seed(1)
        ydata1 += np.random.normal(0, 2, size=ydata1.shape)
        ydata2 += np.random.normal(0, 2, size=ydata2.shape)

        xdata = [xdata1, xdata2]
        ydata = [ydata1, ydata2]

        # Guesses
        a_1.value = 100
        a_2.value = 50
        b_1.value = 1
        b_2.value = 1
        y0.value = 10

        eval_jac = model.eval_jacobian(x_1=xdata1, x_2=xdata2, a_1=101.3,
                                       b_1=0.5, a_2=56.3, b_2=1.1111, y0=10.8)
        self.assertEqual(len(eval_jac), 2)
        for comp in eval_jac:
            self.assertEqual(len(comp), len(model.params))

        sigma_y = np.concatenate((np.ones(20), [2., 4., 5, 7, 3]))

        fit = Fit(model, x_1=xdata[0], x_2=xdata[1],
                  y_1=ydata[0], y_2=ydata[1], sigma_y_2=sigma_y)
        fit_result = fit.execute()

        # fit_curves = model(x_1=xdata[0], x_2=xdata[1], **fit_result.params)
        self.assertAlmostEqual(fit_result.value(y0), 1.061892e+01, 3)
        self.assertAlmostEqual(fit_result.value(a_1), 1.013269e+02, 3)
        self.assertAlmostEqual(fit_result.value(a_2), 5.625694e+01, 3)
        self.assertAlmostEqual(fit_result.value(b_1), 3.362240e-01, 3)
        self.assertAlmostEqual(fit_result.value(b_2), 1.565253e+00, 3)
示例#3
0
    def test_order(self):
        """
        The model has to behave like an OrderedDict. This is of the utmost importance!
        """
        x, y_1, y_2 = variables('x, y_1, y_2')
        a, b = parameters('a, b')

        model_dict = {y_2: a * x**2, y_1: 2 * x * b}
        model = Model(model_dict)

        self.assertEqual(model.dependent_vars, list(model.keys()))
示例#4
0
    def test_model_as_dict(self):
        x, y_1, y_2 = variables('x, y_1, y_2')
        a, b = parameters('a, b')

        model_dict = OrderedDict([(y_1, a * x**2), (y_2, 2 * x * b)])
        model = Model(model_dict)

        self.assertEqual(id(model[y_1]), id(model_dict[y_1]))
        self.assertEqual(id(model[y_2]), id(model_dict[y_2]))
        self.assertEqual(len(model), len(model_dict))
        self.assertEqual(model.items(), model_dict.items())
        self.assertEqual(model.keys(), model_dict.keys())
        self.assertEqual(list(model.values()), list(model_dict.values()))
        self.assertTrue(y_1 in model)
        self.assertFalse(model[y_1] in model)
示例#5
0
def test_model_as_dict():
    x, y_1, y_2 = variables('x, y_1, y_2')
    a, b = parameters('a, b')

    model_dict = OrderedDict([(y_1, a * x**2), (y_2, 2 * x * b)])
    model = Model(model_dict)

    assert model[y_1] is model_dict[y_1]
    assert model[y_2] is model_dict[y_2]
    assert len(model) == len(model_dict)
    assert model.items() == model_dict.items()
    assert model.keys() == model_dict.keys()
    assert list(model.values()) == list(model_dict.values())
    assert y_1 in model
    assert not model[y_1] in model
示例#6
0
    def test_data_for_constraint(self):
        """
        Test the signature handling when constraints are at play. Constraints
        should take seperate data, but still kwargs that are not found in either
        the model nor the constraints should raise an error.
        """
        A, mu, sig = parameters('A, mu, sig')
        x, y, Y = variables('x, y, Y')

        model = Model({y: A * Gaussian(x, mu=mu, sig=sig)})
        constraint = Model.as_constraint(Y, model, constraint_type=Eq)

        np.random.seed(2)
        xdata = np.random.normal(1.2, 2, 10)
        ydata, xedges = np.histogram(xdata, bins=int(np.sqrt(len(xdata))),
                                     density=True)

        # Allowed
        fit = Fit(model, x=xdata, y=ydata, Y=2, constraints=[constraint])
        fit = Fit(model, x=xdata, y=ydata)
        fit = Fit(model, x=xdata, objective=LogLikelihood)

        # Not allowed
        with self.assertRaises(TypeError):
            fit = Fit(model, x=xdata, y=ydata, Y=2)
        with self.assertRaises(TypeError):
            fit = Fit(model, x=xdata, y=ydata, Y=2, Z=3, constraints=[constraint])
示例#7
0
def test_fixed_parameters():
    """
    Make sure fixed parameters don't change on fitting
    """
    a, b, c, d = parameters('a, b, c, d')
    x, y = variables('x, y')

    c.value = 4.0
    a.min, a.max = 1.0, 5.0  # Bounds are needed for DifferentialEvolution
    b.min, b.max = 1.0, 5.0
    c.min, c.max = 1.0, 5.0
    d.min, d.max = 1.0, 5.0
    c.fixed = True

    model = Model({y: a * exp(-(x - b)**2 / (2 * c**2)) + d})
    # Generate data
    xdata = np.linspace(0, 100)
    ydata = model(xdata, a=2, b=3, c=2, d=2).y

    for minimizer in subclasses(BaseMinimizer):
        if minimizer is ChainedMinimizer:
            continue
        else:
            fit = Fit(model, x=xdata, y=ydata, minimizer=minimizer)
            fit_result = fit.execute()
            # Should still be 4.0, not 2.0!
            assert 4.0 == fit_result.params['c']
示例#8
0
def test_fixed_parameters_2():
    """
    Make sure parameter boundaries are respected
    """
    x = Parameter('x', min=1)
    y = Variable('y')
    model = Model({y: x**2})

    bounded_minimizers = list(subclasses(BoundedMinimizer))
    for minimizer in bounded_minimizers:
        if minimizer is MINPACK:
            # Not a MINPACKable problem because it only has a param
            continue
        fit = Fit(model, minimizer=minimizer)
        assert isinstance(fit.objective, MinimizeModel)
        if minimizer is DifferentialEvolution:
            # Also needs a max
            x.max = 10
            fit_result = fit.execute()
            x.max = None
        else:
            fit_result = fit.execute()
            assert fit_result.value(x) >= 1.0
            assert fit_result.value(x) <= 2.0
        assert fit.minimizer.bounds == [(1, None)]
示例#9
0
    def test_pickle(self):
        """
        Make sure models can be pickled are preserved when pickling
        """
        a, b = parameters('a, b')
        x, y = variables('x, y')
        exact_model = Model({y: a * x ** b})
        constraint = Model.as_constraint(Eq(a, b), exact_model)
        num_model = CallableNumericalModel(
            {y: a * x ** b}, independent_vars=[x], params=[a, b]
        )
        connected_num_model = CallableNumericalModel(
            {y: a * x ** b}, connectivity_mapping={y: {x, a, b}}
        )
        # Test if lsoda args and kwargs are pickled too
        ode_model = ODEModel({D(y, x): a * x + b}, {x: 0.0}, 3, 4, some_kwarg=True)

        models = [exact_model, constraint, num_model, ode_model,
                  connected_num_model]
        for model in models:
            new_model = pickle.loads(pickle.dumps(model))
            # Compare signatures
            self.assertEqual(model.__signature__, new_model.__signature__)
            # Trigger the cached vars because we compare `__dict__` s
            model.vars
            new_model.vars
            # Explicitly make sure the connectivity mapping is identical.
            self.assertEqual(model.connectivity_mapping,
                             new_model.connectivity_mapping)
            if not isinstance(model, ODEModel):
                model.function_dict
                model.vars_as_functions
                new_model.function_dict
                new_model.vars_as_functions
            self.assertEqual(model.__dict__, new_model.__dict__)
示例#10
0
    def __init__(self, bounds):
        super(OneComponentAssociationModel, self).__init__(bounds)
        k1 = self.make_parameter("k1")
        t = self.make_variable("t")
        y = self.make_variable("y")

        self.sf_model = Model({y: (1 - exp(-k1 * t))})
示例#11
0
    def test_known_solution(self):
        p, c1 = parameters('p, c1')
        y, t = variables('y, t')
        p.value = 3.0

        model_dict = {
            D(y, t): - p * y,
        }

        # Lets say we know the exact solution to this problem
        sol = Model({y: exp(- p * t)})

        # Generate some data
        tdata = np.linspace(0, 3, 10001)
        ydata = sol(t=tdata, p=3.22)[0]
        ydata += np.random.normal(0, 0.005, ydata.shape)

        ode_model = ODEModel(model_dict, initial={t: 0.0, y: ydata[0]})
        fit = Fit(ode_model, t=tdata, y=ydata)
        ode_result = fit.execute()

        c1.value = ydata[0]
        fit = Fit(sol, t=tdata, y=ydata)
        fit_result = fit.execute()

        self.assertAlmostEqual(ode_result.value(p) / fit_result.value(p), 1, 2)
        self.assertAlmostEqual(ode_result.r_squared / fit_result.r_squared, 1, 4)
        self.assertAlmostEqual(ode_result.stdev(p) / fit_result.stdev(p), 1, 3)
示例#12
0
    def test_simple_kinetics(self):
        """
        Simple kinetics data to test fitting
        """
        tdata = np.array([10, 26, 44, 70, 120])
        adata = 10e-4 * np.array([44, 34, 27, 20, 14])
        a, b, t = variables('a, b, t')
        k, a0 = parameters('k, a0')
        k.value = 0.01
        # a0.value, a0.min, a0.max = 54 * 10e-4, 40e-4, 60e-4
        a0 = 54 * 10e-4

        model_dict = {
            D(a, t): - k * a**2,
            D(b, t): k * a**2,
        }

        ode_model = ODEModel(model_dict, initial={t: 0.0, a: a0, b: 0.0})

        # Analytical solution
        model = Model({a: 1 / (k * t + 1 / a0)})
        fit = Fit(model, t=tdata, a=adata)
        fit_result = fit.execute()

        fit = Fit(ode_model, t=tdata, a=adata, b=None, minimizer=MINPACK)
        ode_result = fit.execute()
        self.assertAlmostEqual(ode_result.value(k) / fit_result.value(k), 1.0, 4)
        self.assertAlmostEqual(ode_result.stdev(k) / fit_result.stdev(k), 1.0, 4)
        self.assertAlmostEqual(ode_result.r_squared / fit_result.r_squared, 1, 4)

        fit = Fit(ode_model, t=tdata, a=adata, b=None)
        ode_result = fit.execute()
        self.assertAlmostEqual(ode_result.value(k) / fit_result.value(k), 1.0, 4)
        self.assertAlmostEqual(ode_result.stdev(k) / fit_result.stdev(k), 1.0, 4)
        self.assertAlmostEqual(ode_result.r_squared / fit_result.r_squared, 1, 4)
示例#13
0
    def minimize_fixture(self):
        """
        Set up the parameters, model, constraints for the minimization fits.

        These tests used to purposefully use unamed variable, however this has
        been changed as this feature is now being depricated in a future
        version of Symfit.
        """
        x = Parameter('x', value=-1.0)
        y = Parameter('y', value=1.0)
        self.x = x
        self.y = y
        self.model = Model(2 * x * y + 2 * x - x ** 2 - 2 * y ** 2)

        self.constraints = [
            Ge(y - 1, 0),  # y - 1 >= 0,
            Eq(x**3 - y, 0),  # x**3 - y == 0,
        ]

        self.cons = (
            {'type': 'eq',
             'fun': lambda x: np.array([x[0]**3 - x[1]]),
             'jac': lambda x: np.array([3.0 * (x[0]**2.0), -1.0])},
            {'type': 'ineq',
             'fun': lambda x: np.array([x[1] - 1]),
             'jac': lambda x: np.array([0.0, 1.0])}
        )
示例#14
0
def test_pickle():
    """
    Test the picklability of the built-in objectives.
    """
    # Create test data
    xdata = np.linspace(0, 100, 100)  # From 0 to 100 in 100 steps
    a_vec = np.random.normal(15.0, scale=2.0, size=xdata.shape)
    b_vec = np.random.normal(100, scale=2.0, size=xdata.shape)
    ydata = a_vec * xdata + b_vec  # Point scattered around the line 15 * x + 100

    # Normal symbolic fit
    a = Parameter('a', value=0, min=0.0, max=1000)
    b = Parameter('b', value=0, min=0.0, max=1000)
    x, y = variables('x, y')
    model = Model({y: a * x + b})

    for objective in [
            VectorLeastSquares, LeastSquares, LogLikelihood, MinimizeModel
    ]:
        if issubclass(objective, BaseIndependentObjective):
            data = {x: xdata}
        else:
            data = {x: xdata, y: ydata, model.sigmas[y]: np.ones_like(ydata)}
        obj = objective(model, data=data)
        new_obj = pickle.loads(pickle.dumps(obj))
        assert FitResults._array_safe_dict_eq(obj.__dict__, new_obj.__dict__)
示例#15
0
    def __init__(self, bounds):
        super(OneComponentDissociationModel, self).__init__(bounds)
        k1 = self.make_parameter('k1')
        t = self.make_variable('t')
        y = self.make_variable('y')

        self.sf_model = Model({y: exp(-k1*t)})
示例#16
0
def test_data_sanity():
    """
    Tests very basicly the data sanity for different objective types.
    :return:
    """
    # Create test data
    xdata = np.linspace(0, 100, 25)  # From 0 to 100 in 25 steps
    a_vec = np.random.normal(15.0, scale=2.0, size=xdata.shape)
    b_vec = np.random.normal(100, scale=2.0, size=xdata.shape)
    ydata = a_vec * xdata + b_vec  # Point scattered around the line 5 * x + 105

    # Normal symbolic fit
    a = Parameter('a', value=0, min=0.0, max=1000)
    b = Parameter('b', value=0, min=0.0, max=1000)
    x, y, z = variables('x, y, z')
    model = Model({y: a * x + b})

    for objective in [VectorLeastSquares, LeastSquares, LogLikelihood, MinimizeModel]:
        if issubclass(objective, BaseIndependentObjective):
            incomplete_data = {}
            data = {x: xdata}
            overcomplete_data = {x: xdata, z: ydata}
        else:
            incomplete_data = {x: xdata, y: ydata}
            data = {x: xdata, y: ydata, model.sigmas[y]: np.ones_like(ydata)}
            overcomplete_data = {x: xdata, y: ydata, z: ydata, model.sigmas[y]: np.ones_like(ydata)}
        with pytest.raises(KeyError):
            obj = objective(model, data=incomplete_data)

        obj = objective(model, data=data)
        # Overcomplete data has to be allowed, since constraints share their
        # data with models.
        obj = objective(model, data=overcomplete_data)
示例#17
0
    def test_global_fitting(self):
        """
        Test a global fitting scenario with datasets of unequal length. In this
        scenario, a quartic equation is fitted where the constant term is shared
        between the datasets. (e.g. identical background noise)
        """
        x_1, x_2, y_1, y_2 = variables('x_1, x_2, y_1, y_2')
        y0, a_1, a_2, b_1, b_2 = parameters('y0, a_1, a_2, b_1, b_2')

        # The following vector valued function links all the equations together
        # as stated in the intro.
        model = Model({
            y_1: a_1 * x_1**2 + b_1 * x_1 + y0,
            y_2: a_2 * x_2**2 + b_2 * x_2 + y0,
        })

        # Generate data from this model
        # xdata = np.linspace(0, 10)
        xdata1 = np.linspace(0, 10)
        xdata2 = xdata1[::2]  # Make the sets of unequal size

        ydata1, ydata2 = model(x_1=xdata1,
                               x_2=xdata2,
                               a_1=101.3,
                               b_1=0.5,
                               a_2=56.3,
                               b_2=1.1111,
                               y0=10.8)
        # Add some noise to make it appear like real data
        np.random.seed(1)
        ydata1 += np.random.normal(0, 2, size=ydata1.shape)
        ydata2 += np.random.normal(0, 2, size=ydata2.shape)

        xdata = [xdata1, xdata2]
        ydata = [ydata1, ydata2]

        # Guesses
        a_1.value = 100
        a_2.value = 50
        b_1.value = 1
        b_2.value = 1
        y0.value = 10

        sigma_y = np.concatenate((np.ones(20), [2., 4., 5, 7, 3]))

        fit = ConstrainedNumericalLeastSquares(model,
                                               x_1=xdata[0],
                                               x_2=xdata[1],
                                               y_1=ydata[0],
                                               y_2=ydata[1],
                                               sigma_y_2=sigma_y)
        fit_result = fit.execute()

        # fit_curves = model(x_1=xdata[0], x_2=xdata[1], **fit_result.params)
        self.assertAlmostEqual(fit_result.value(y0), 1.061892e+01, 3)
        self.assertAlmostEqual(fit_result.value(a_1), 1.013269e+02, 3)
        self.assertAlmostEqual(fit_result.value(a_2), 5.625694e+01, 3)
        self.assertAlmostEqual(fit_result.value(b_1), 3.362240e-01, 3)
        self.assertAlmostEqual(fit_result.value(b_2), 1.565253e+00, 3)
示例#18
0
def test_LeastSquares():
    """
    Tests if the LeastSquares objective gives the right shapes of output by
    comparing with its analytical equivalent.
    """
    i = Idx('i', 100)
    x, y = symbols('x, y', cls=Variable)
    X2 = symbols('X2', cls=Variable)
    a, b = parameters('a, b')

    model = Model({y: a * x**2 + b * x})
    xdata = np.linspace(0, 10, 100)
    ydata = model(x=xdata, a=5, b=2).y + np.random.normal(0, 5, xdata.shape)

    # Construct a LeastSquares objective and its analytical equivalent
    chi2_numerical = LeastSquares(model,
                                  data={
                                      x: xdata,
                                      y: ydata,
                                      model.sigmas[y]: np.ones_like(xdata)
                                  })
    chi2_exact = Model({X2: FlattenSum(0.5 * ((a * x**2 + b * x) - y)**2, i)})

    eval_exact = chi2_exact(x=xdata, y=ydata, a=2, b=3)
    jac_exact = chi2_exact.eval_jacobian(x=xdata, y=ydata, a=2, b=3)
    hess_exact = chi2_exact.eval_hessian(x=xdata, y=ydata, a=2, b=3)
    eval_numerical = chi2_numerical(x=xdata, a=2, b=3)
    jac_numerical = chi2_numerical.eval_jacobian(x=xdata, a=2, b=3)
    hess_numerical = chi2_numerical.eval_hessian(x=xdata, a=2, b=3)

    # Test model jacobian and hessian shape
    assert model(x=xdata, a=2, b=3)[0].shape == ydata.shape
    assert model.eval_jacobian(x=xdata, a=2, b=3)[0].shape == (2, 100)
    assert model.eval_hessian(x=xdata, a=2, b=3)[0].shape == (2, 2, 100)
    # Test exact chi2 shape
    assert eval_exact[0].shape, (1, )
    assert jac_exact[0].shape, (2, 1)
    assert hess_exact[0].shape, (2, 2, 1)

    # Test if these two models have the same call, jacobian, and hessian
    assert eval_exact[0] == pytest.approx(eval_numerical)
    assert isinstance(eval_numerical, float)
    assert isinstance(eval_exact[0][0], float)
    assert np.squeeze(jac_exact[0], axis=-1) == pytest.approx(jac_numerical)
    assert isinstance(jac_numerical, np.ndarray)
    assert np.squeeze(hess_exact[0], axis=-1) == pytest.approx(hess_numerical)
    assert isinstance(hess_numerical, np.ndarray)

    fit = Fit(chi2_exact, x=xdata, y=ydata, objective=MinimizeModel)
    fit_exact_result = fit.execute()
    fit = Fit(model, x=xdata, y=ydata, absolute_sigma=True)
    fit_num_result = fit.execute()
    assert fit_exact_result.value(a) == fit_num_result.value(a)
    assert fit_exact_result.value(b) == fit_num_result.value(b)
    assert fit_exact_result.stdev(a) == pytest.approx(fit_num_result.stdev(a))
    assert fit_exact_result.stdev(b) == pytest.approx(fit_num_result.stdev(b))
示例#19
0
    def test_gaussian_2d_fitting(self):
        """
        Tests fitting to a scalar gaussian function with 2 independent
        variables. Very sensitive to initial guesses, and if they are chosen too
        restrictive ConstrainedNumericalLeastSquares actually throws a tantrum.
        It therefore appears to be more sensitive than NumericalLeastSquares.
        """
        mean = (0.6, 0.4)  # x, y mean 0.6, 0.4
        cov = [[0.2**2, 0], [0, 0.1**2]]

        np.random.seed(0)
        data = np.random.multivariate_normal(mean, cov, 100000)

        # Insert them as y,x here as np f***s up cartesian conventions.
        ydata, xedges, yedges = np.histogram2d(data[:, 0],
                                               data[:, 1],
                                               bins=100,
                                               range=[[0.0, 1.0], [0.0, 1.0]])
        xcentres = (xedges[:-1] + xedges[1:]) / 2
        ycentres = (yedges[:-1] + yedges[1:]) / 2

        # Make a valid grid to match ydata
        xx, yy = np.meshgrid(xcentres, ycentres, sparse=False, indexing='ij')

        x0 = Parameter(value=mean[0], min=0.0, max=1.0)
        sig_x = Parameter(0.2, min=0.0, max=0.3)
        y0 = Parameter(value=mean[1], min=0.0, max=1.0)
        sig_y = Parameter(0.1, min=0.0, max=0.3)
        A = Parameter(value=np.mean(ydata), min=0.0)
        x = Variable()
        y = Variable()
        g = Variable()
        model = Model({g: A * Gaussian(x, x0, sig_x) * Gaussian(y, y0, sig_y)})
        fit = ConstrainedNumericalLeastSquares(model, x=xx, y=yy, g=ydata)
        fit_result = fit.execute()

        self.assertAlmostEqual(fit_result.value(x0), np.mean(data[:, 0]), 3)
        self.assertAlmostEqual(fit_result.value(y0), np.mean(data[:, 1]), 3)
        self.assertAlmostEqual(np.abs(fit_result.value(sig_x)),
                               np.std(data[:, 0]), 2)
        self.assertAlmostEqual(np.abs(fit_result.value(sig_y)),
                               np.std(data[:, 1]), 2)
        self.assertGreaterEqual(fit_result.r_squared, 0.96)

        # Compare with industry standard MINPACK
        fit_std = NumericalLeastSquares(model, x=xx, y=yy, g=ydata)
        fit_std_result = fit_std.execute()

        self.assertAlmostEqual(fit_std_result.value(x0), fit_result.value(x0),
                               4)
        self.assertAlmostEqual(fit_std_result.value(y0), fit_result.value(y0),
                               4)
        self.assertAlmostEqual(fit_std_result.value(sig_x),
                               fit_result.value(sig_x), 4)
        self.assertAlmostEqual(fit_std_result.value(sig_y),
                               fit_result.value(sig_y), 4)
        self.assertAlmostEqual(fit_std_result.r_squared, fit_result.r_squared,
                               4)
示例#20
0
def test_minimize():
    """
    Tests maximizing a function with and without constraints, taken from the
    scipy `minimize` tutorial. Compare the symfit result with the scipy
    result.
    https://docs.scipy.org/doc/scipy-0.18.1/reference/tutorial/optimize.html#constrained-minimization-of-multivariate-scalar-functions-minimize
    """
    x = Parameter(value=-1.0)
    y = Parameter(value=1.0)
    # Use an  unnamed Variable on purpose to test the auto-generation of names.
    model = Model(2 * x * y + 2 * x - x ** 2 - 2 * y ** 2)

    constraints = [
        Ge(y - 1, 0),  # y - 1 >= 0,
        Eq(x**3 - y, 0),  # x**3 - y == 0,
    ]

    def func(x, sign=1.0):
        """ Objective function """
        return sign*(2*x[0]*x[1] + 2*x[0] - x[0]**2 - 2*x[1]**2)

    def func_deriv(x, sign=1.0):
        """ Derivative of objective function """
        dfdx0 = sign*(-2*x[0] + 2*x[1] + 2)
        dfdx1 = sign*(2*x[0] - 4*x[1])
        return np.array([dfdx0, dfdx1])

    cons = (
        {'type': 'eq',
         'fun': lambda x: np.array([x[0]**3 - x[1]]),
         'jac': lambda x: np.array([3.0*(x[0]**2.0), -1.0])},
        {'type': 'ineq',
         'fun': lambda x: np.array([x[1] - 1]),
         'jac': lambda x: np.array([0.0, 1.0])}
    )

    # Unconstrained fit
    res = minimize(func, [-1.0, 1.0], args=(-1.0,), jac=func_deriv,
                   method='BFGS', options={'disp': False})
    fit = Fit(model=-model)
    assert isinstance(fit.objective, MinimizeModel)
    assert isinstance(fit.minimizer, BFGS)

    fit_result = fit.execute()

    assert fit_result.value(x) == pytest.approx(res.x[0], 1e-6)
    assert fit_result.value(y) == pytest.approx(res.x[1], 1e-6)

    # Same test, but with constraints in place.
    res = minimize(func, [-1.0, 1.0], args=(-1.0,), jac=func_deriv,
                   constraints=cons, method='SLSQP', options={'disp': False})

    fit = Fit(-model, constraints=constraints)
    assert fit.constraints[0].constraint_type == Ge
    assert fit.constraints[1].constraint_type == Eq
    fit_result = fit.execute()
    assert fit_result.value(x) == pytest.approx(res.x[0], 1e-6)
    assert fit_result.value(y) == pytest.approx(res.x[1], 1e-6)
示例#21
0
    def __init__(self, bounds):
        super(TwoComponentDissociationModel, self).__init__(bounds)

        r = self.make_parameter('r', value=0.5, min=0, max=1)
        k1 = self.make_parameter('k1')
        k2 = self.make_parameter('k2')
        t = self.make_variable('t')
        y = self.make_variable('y')

        self.sf_model = Model({y: (r * exp(-k1*t) + (1 - r) * exp(-k2*t))})
示例#22
0
def test_interdependency_invalid():
    """
    Create an invalid model with interdependency.
    """
    a, b, c = parameters('a, b, c')
    x, y, z = variables('x, y, z')

    with pytest.raises(ModelError):
        # Invalid, parameters can not be keys
        model_dict = {
            c: a ** 3 * x + b ** 2,
            z: c ** 2 + a * b
        }
        model = Model(model_dict)

    with pytest.raises(ModelError):
        # Invalid, parameters can not be keys
        model_dict = {c: a ** 3 * x + b ** 2}
        model = Model(model_dict)
示例#23
0
    def __init__(self, bounds):
        super(TwoComponentDissociationModel, self).__init__(bounds)

        r = self.make_parameter("r", value=0.5, min=0, max=1)
        k1 = self.make_parameter("k1")
        k2 = self.make_parameter("k2")
        t = self.make_variable("t")
        y = self.make_variable("y")

        self.sf_model = Model({y: (r * exp(-k1 * t) + (1 - r) * exp(-k2 * t))})
示例#24
0
def test_model_from_dict():
    """
    Tries to create a model from a dictionary.
    """
    x, y_1, y_2 = variables('x, y_1, y_2')
    a, b = parameters('a, b')
    # This way the test fails rather than errors.
    try:
        Model({y_1: 2 * a * x, y_2: b * x**2})
    except Exception as error:
        pytest.fail('test_model_from_dict raised {}'.format(error))
示例#25
0
def test_LogLikelihood_global():
    """
    This is a test for global likelihood fitting to multiple data sets.
    Based on SO question 56006357.
    """
    # creating the data
    mu1, mu2 = .05, -.05
    sigma1, sigma2 = 3.5, 2.5
    n1, n2 = 80, 90
    np.random.seed(42)
    x1 = np.random.vonmises(mu1, sigma1, n1)
    x2 = np.random.vonmises(mu2, sigma2, n2)

    n = 2  # number of components
    xs = variables('x,' + ','.join('x_{}'.format(i) for i in range(1, n + 1)))
    x, xs = xs[0], xs[1:]
    ys = variables(','.join('y_{}'.format(i) for i in range(1, n + 1)))
    mu, kappa = parameters('mu, kappa')
    kappas = parameters(','.join('k_{}'.format(i) for i in range(1, n + 1)),
                        min=0,
                        max=10)
    mu.min, mu.max = -np.pi, np.pi

    template = exp(kappa * cos(x - mu)) / (2 * pi * besseli(0, kappa))

    model = Model({
        y_i: template.subs({
            kappa: k_i,
            x: x_i
        })
        for y_i, x_i, k_i in zip(ys, xs, kappas)
    })

    all_data = {xs[0]: x1, xs[1]: x2, ys[0]: None, ys[1]: None}
    all_params = {'mu': 1}
    all_params.update({k_i.name: 1 for k_i in kappas})

    # Evaluate the loglikelihood and its jacobian and hessian
    logL = LogLikelihood(model, data=all_data)
    eval_numerical = logL(**all_params)
    jac_numerical = logL.eval_jacobian(**all_params)
    hess_numerical = logL.eval_hessian(**all_params)

    # Test the types and shapes of the components.
    assert isinstance(eval_numerical, float)
    assert isinstance(jac_numerical, np.ndarray)
    assert isinstance(hess_numerical, np.ndarray)

    assert eval_numerical.shape == tuple()  # Empty tuple -> scalar
    assert jac_numerical.shape == (3, )
    assert hess_numerical.shape == (
        3,
        3,
    )
示例#26
0
def test_neg():
    """
    Test negation of all model types
    """
    x, y_1, y_2 = variables('x, y_1, y_2')
    a, b = parameters('a, b')

    model_dict = {y_2: a * x ** 2, y_1: 2 * x * b}
    model = Model(model_dict)

    model_neg = - model
    for key in model:
        assert model[key] == - model_neg[key]

    # Constraints
    constraint = Model.as_constraint(Eq(a * x, 2), model)

    constraint_neg = - constraint
    # for key in constraint:
    assert constraint[constraint.dependent_vars[0]] == - constraint_neg[constraint_neg.dependent_vars[0]]

    # ODEModel
    odemodel = ODEModel({D(y_1, x): a * x}, initial={a: 1.0})

    odemodel_neg = - odemodel
    for key in odemodel:
        assert odemodel[key] == - odemodel_neg[key]

    # For models with interdependency, negation should only change the
    # dependent components.
    model_dict = {x: y_1**2, y_1: a * y_2 + b}
    model = Model(model_dict)

    model_neg = - model
    for key in model:
        if key in model.dependent_vars:
            assert model[key] == - model_neg[key]
        elif key in model.interdependent_vars:
            assert model[key] == model_neg[key]
        else:
            pytest.fail()
示例#27
0
def test_hessian_matrix():
    """
    The Hessian matrix of a model should be a 3D list (matrix) containing
    all the 2nd partial derivatives.
    """
    a, b, c = parameters('a, b, c')
    a_i, b_i, c_i = variables('a_i, b_i, c_i')

    model = Model({a_i: 2 * a**2 + 3 * b, b_i: 5 * b**2, c_i: 7 * c * b})
    assert [[[4, 0, 0], [0, 0, 0], [0, 0, 0]],
            [[0, 0, 0], [0, 10, 0], [0, 0, 0]],
            [[0, 0, 0], [0, 0, 7], [0, 7, 0]]] == model.hessian
示例#28
0
    def setUp(self):
        x = Variable('x')
        y = Variable('y')
        xmin, xmax = -5, 5
        self.x0_1 = Parameter('x01', value=0, min=xmin, max=xmax)
        self.sig_x_1 = Parameter('sigx1', value=0, min=0.0, max=1)
        self.y0_1 = Parameter('y01', value=0, min=xmin, max=xmax)
        self.sig_y_1 = Parameter('sigy1', value=0, min=0.0, max=1)
        self.A_1 = Parameter('A1', min=0, max=1000)
        g_1 = self.A_1 * Gaussian(x, self.x0_1, self.sig_x_1) *\
                Gaussian(y, self.y0_1, self.sig_y_1)

        self.model = Model(g_1)
示例#29
0
    def test_minimizer_constraint_compatibility(self):
        """
        Test if #156 has been solved, and test all the other constraint styles.
        """
        x, y, z = variables('x, y, z')
        a, b, c = parameters('a, b, c')
        b.fixed = True

        model = Model({z: a * x**2 - b * y**2 + c})
        # Generate data, z has to be scalar for MinimizeModel to be happy
        xdata = 3 #np.linspace(0, 10)
        ydata = 5 # np.linspace(0, 10)
        zdata = model(a=2, b=3, c=5, x=xdata, y=ydata).z
        data_dict = {x: xdata, y: ydata, z: zdata}

        # Equivalent ways of defining the same constraint
        constraint_model = Model.as_constraint(a - c, model, constraint_type=Eq)
        constraint_model.params = model.params
        constraints = [
            Eq(a, c),
            MinimizeModel(constraint_model, data=data_dict),
            constraint_model
        ]

        objective = MinimizeModel(model, data=data_dict)
        for constraint in constraints:
            fit = SLSQP(objective, parameters=[a, b, c],
                        constraints=[constraint])
            wrapped_constr = fit.wrapped_constraints[0]['fun'].model
            self.assertIsInstance(wrapped_constr, Model)
            self.assertEqual(wrapped_constr.params, model.params)
            self.assertEqual(wrapped_constr.jacobian_model.params, model.params)
            self.assertEqual(wrapped_constr.hessian_model.params, model.params)
            # Set the data for the dependent var of the constraint to None
            # Normally this is handled by Fit because here we interact with the
            # Minimizer directly, it is up to us.
            constraint_var = fit.wrapped_constraints[0]['fun'].model.dependent_vars[0]
            objective.data[constraint_var] = None
            fit.execute()

        # No scipy style dicts allowed.
        with self.assertRaises(TypeError):
            fit = SLSQP(MinimizeModel(model, data={}),
                        parameters=[a, b, c],
                        constraints=[
                            {'type': 'eq', 'fun': lambda a, b, c: a - c}
                        ]
            )
示例#30
0
    def test_neg(self):
        """
        Test negation of all model types
        """
        x, y_1, y_2 = variables('x, y_1, y_2')
        a, b = parameters('a, b')

        model_dict = {y_2: a * x ** 2, y_1: 2 * x * b}
        model = Model(model_dict)

        model_neg = - model
        for key in model:
            self.assertEqual(model[key], - model_neg[key])

        # Constraints
        constraint = Model.as_constraint(Eq(a * x, 2), model)

        constraint_neg = - constraint
        # for key in constraint:
        self.assertEqual(constraint[constraint.dependent_vars[0]], - constraint_neg[constraint_neg.dependent_vars[0]])

        # ODEModel
        odemodel = ODEModel({D(y_1, x): a * x}, initial={a: 1.0})

        odemodel_neg = - odemodel
        for key in odemodel:
            self.assertEqual(odemodel[key], - odemodel_neg[key])

        # For models with interdependency, negation should only change the
        # dependent components.
        model_dict = {x: y_1**2, y_1: a * y_2 + b}
        model = Model(model_dict)

        model_neg = - model
        for key in model:
            if key in model.dependent_vars:
                self.assertEqual(model[key], - model_neg[key])
            elif key in model.interdependent_vars:
                self.assertEqual(model[key], model_neg[key])
            else:
                raise Exception('There should be no such variable')
示例#31
0
    def test_interdependency(self):
        a, b = parameters('a, b')
        x, y, z = variables('x, y, z')
        model_dict = {
            y: a**3 * x + b**2,
            z: y**2 + a * b
        }
        callable_model = CallableModel(model_dict)
        self.assertEqual(callable_model.independent_vars, [x])
        self.assertEqual(callable_model.interdependent_vars, [y])
        self.assertEqual(callable_model.dependent_vars, [z])
        self.assertEqual(callable_model.params, [a, b])
        self.assertEqual(callable_model.connectivity_mapping,
                         {y: {a, b, x}, z: {a, b, y}})
        np.testing.assert_almost_equal(callable_model(x=3, a=1, b=2),
                                       np.atleast_2d([7, 51]).T)
        for var, func in callable_model.vars_as_functions.items():
            self.assertEqual(
                set(str(x) for x in callable_model.connectivity_mapping[var]),
                set(str(x.__class__) if isinstance(x, Function) else str(x)
                    for x in func.args)
            )

        jac_model = jacobian_from_model(callable_model)
        self.assertEqual(jac_model.params, [a, b])
        self.assertEqual(jac_model.dependent_vars, [D(z, a), D(z, b), z])
        self.assertEqual(jac_model.interdependent_vars, [D(y, a), D(y, b), y])
        self.assertEqual(jac_model.independent_vars, [x])
        for p1, p2 in zip_longest(jac_model.__signature__.parameters, [x, a, b]):
            self.assertEqual(str(p1), str(p2))
        # The connectivity of jac_model should be that from it's own components
        # plus that of the model. The latter is needed to properly compute the
        # Hessian.
        self.assertEqual(
            jac_model.connectivity_mapping,
             {D(y, a): {a, x},
              D(y, b): {b},
              D(z, a): {b, y, D(y, a)},
              D(z, b): {a, y, D(y, b)},
              y: {a, b, x}, z: {a, b, y}
              }
        )
        self.assertEqual(
            jac_model.model_dict,
            {D(y, a): 3 * a**2 * x,
             D(y, b): 2 * b,
             D(z, a): b + 2 * y * D(y, a),
             D(z, b): a + 2 * y * D(y, b),
             y: callable_model[y], z: callable_model[z]
             }
        )
        for var, func in jac_model.vars_as_functions.items():
            self.assertEqual(
                set(x.name for x in jac_model.connectivity_mapping[var]),
                set(str(x.__class__) if isinstance(x, Function) else str(x)
                    for x in func.args)
            )
        hess_model = hessian_from_model(callable_model)
        # Result according to Mathematica
        hess_as_dict = {
            D(y, (a, 2)): 6 * a * x,
            D(y, a, b): 0,
            D(y, b, a): 0,
            D(y, (b, 2)): 2,
            D(z, (a, 2)): 2 * D(y, a)**2 + 2 * y * D(y, (a, 2)),
            D(z, a, b): 1 + 2 * D(y, b) * D(y, a) + 2 * y * D(y, a, b),
            D(z, b, a): 1 + 2 * D(y, b) * D(y, a) + 2 * y * D(y, a, b),
            D(z, (b, 2)): 2 * D(y, b)**2 + 2 * y * D(y, (b, 2)),
            D(y, a): 3 * a ** 2 * x,
            D(y, b): 2 * b,
            D(z, a): b + 2 * y * D(y, a),
            D(z, b): a + 2 * y * D(y, b),
            y: callable_model[y], z: callable_model[z]
        }
        self.assertEqual(len(hess_model), len(hess_as_dict))
        for key, expr in hess_model.items():
            self.assertEqual(expr, hess_as_dict[key])

        self.assertEqual(hess_model.params, [a, b])
        self.assertEqual(
            hess_model.dependent_vars,
            [D(z, (a, 2)), D(z, a, b), D(z, (b, 2)), D(z, b, a),
             D(z, a), D(z, b), z]
        )
        self.assertEqual(hess_model.interdependent_vars,
                         [D(y, (a, 2)), D(y, a), D(y, b), y])
        self.assertEqual(hess_model.independent_vars, [x])


        model = Model(model_dict)
        np.testing.assert_almost_equal(model(x=3, a=1, b=2),
                                       np.atleast_2d([7, 51]).T)
        np.testing.assert_almost_equal(model.eval_jacobian(x=3, a=1, b=2),
                                       np.array([[[9], [4]], [[128], [57]]]))
        np.testing.assert_almost_equal(
            model.eval_hessian(x=3, a=1, b=2),
            np.array([[[[18], [0]], [[0], [2]]],
                           [[[414], [73]], [[73], [60]]]]))

        self.assertEqual(model.__signature__, model.jacobian_model.__signature__)
        self.assertEqual(model.__signature__, model.hessian_model.__signature__)
示例#32
0
    def test_constrained_dependent_on_model(self):
        """
        For a simple Gaussian distribution, we test if Models of various types
        can be used as constraints. Of particular interest are NumericalModels,
        which can be used to fix the integral of the model during the fit to 1,
        as it should be for a probability distribution.
        :return:
        """
        A, mu, sig = parameters('A, mu, sig')
        x, y, Y = variables('x, y, Y')
        i = Idx('i', (0, 1000))
        sig.min = 0.0

        model = Model({y: A * Gaussian(x, mu=mu, sig=sig)})

        # Generate data, 100 samples from a N(1.2, 2) distribution
        np.random.seed(2)
        xdata = np.random.normal(1.2, 2, 1000)
        ydata, xedges = np.histogram(xdata, bins=int(np.sqrt(len(xdata))), density=True)
        xcentres = (xedges[1:] + xedges[:-1]) / 2

        # Unconstrained fit
        fit = Fit(model, x=xcentres, y=ydata)
        unconstr_result = fit.execute()

        # Constraints must be scalar models.
        with self.assertRaises(ModelError):
            Model.as_constraint([A - 1, sig - 1], model, constraint_type=Eq)
        constraint_exact = Model.as_constraint(
            A * sqrt(2 * sympy.pi) * sig - 1, model, constraint_type=Eq
        )
        # Only when explicitly asked, do models behave as constraints.
        self.assertTrue(hasattr(constraint_exact, 'constraint_type'))
        self.assertEqual(constraint_exact.constraint_type, Eq)
        self.assertFalse(hasattr(model, 'constraint_type'))

        # Now lets make some valid constraints and see if they are respected!
        # TODO: These first two should be symbolical integrals over `y` instead,
        # but currently this is not converted into a numpy/scipy function. So instead the first two are not valid constraints.
        constraint_model = Model.as_constraint(A - 1, model, constraint_type=Eq)
        constraint_exact = Eq(A, 1)
        constraint_num = CallableNumericalModel.as_constraint(
            {Y: lambda x, y: simps(y, x) - 1},  # Integrate using simps
            model=model,
            connectivity_mapping={Y: {x, y}},
            constraint_type=Eq
        )

        # Test for all these different types of constraint.
        for constraint in [constraint_model, constraint_exact, constraint_num]:
            if not isinstance(constraint, Eq):
                self.assertEqual(constraint.constraint_type, Eq)

            xcentres = (xedges[1:] + xedges[:-1]) / 2
            fit = Fit(model, x=xcentres, y=ydata, constraints=[constraint])
            # Test if conversion into a constraint was done properly
            fit_constraint = fit.constraints[0]
            self.assertEqual(fit.model.params, fit_constraint.params)
            self.assertEqual(fit_constraint.constraint_type, Eq)

            con_map = fit_constraint.connectivity_mapping
            if isinstance(constraint, CallableNumericalModel):
                self.assertEqual(con_map, {Y: {x, y}, y: {x, mu, sig, A}})
                self.assertEqual(fit_constraint.independent_vars, [x])
                self.assertEqual(fit_constraint.dependent_vars, [Y])
                self.assertEqual(fit_constraint.interdependent_vars, [y])
                self.assertEqual(fit_constraint.params, [A, mu, sig])
            else:
                # ToDo: if these constraints can somehow be written as integrals
                # depending on y and x this if/else should be removed.
                self.assertEqual(con_map,
                                 {fit_constraint.dependent_vars[0]: {A}})
                self.assertEqual(fit_constraint.independent_vars, [])
                self.assertEqual(len(fit_constraint.dependent_vars), 1)
                self.assertEqual(fit_constraint.interdependent_vars, [])
                self.assertEqual(fit_constraint.params, [A, mu, sig])

            # Finally, test if the constraint worked
            fit_result = fit.execute(options={'eps': 1e-15, 'ftol': 1e-10})
            unconstr_value = fit.minimizer.wrapped_constraints[0]['fun'](**unconstr_result.params)
            constr_value = fit.minimizer.wrapped_constraints[0]['fun'](**fit_result.params)
            self.assertAlmostEqual(constr_value[0], 0.0, 10)
        # And if it was very poorly met before
        self.assertNotAlmostEqual(unconstr_value[0], 0.0, 2)