Beispiel #1
0
def harmonic_approximation(polygon: Polygon, n=3):
    from symfit import Eq, Fit, cos, parameters, pi, sin, variables

    def fourier_series(x, f, n=0):
        """
        Returns a symbolic fourier series of order `n`.

        :param n: Order of the fourier series.
        :param x: Independent variable
        :param f: Frequency of the fourier series
        """
        # Make the parameter objects for all the terms
        a0, *cos_a = parameters(','.join(['a{}'.format(i) for i in range(0, n + 1)]))
        sin_b = parameters(','.join(['b{}'.format(i) for i in range(1, n + 1)]))
        # Construct the series
        series = a0 + sum(ai * cos(i * f * x) + bi * sin(i * f * x)
                          for i, (ai, bi) in enumerate(zip(cos_a, sin_b), start=1))
        return series

    x, y = variables('x, y')
    w, = parameters('w')
    fourier = fourier_series(x, f=w, n=n)
    model_dict = {y: fourier}
    print(model_dict)

    # Extract data from argument
    # FIXME: how to make a clockwise strictly increasing curve?
    xdata, ydata = polygon.exterior.xy
    t = np.linspace(0, 2 * np.pi, num=len(xdata))

    constr = [
        # Ge(x, 0), Le(x, 2 * pi),
        Eq(fourier.subs({x: 0}), fourier.subs({x: 2 * pi})),
        Eq(fourier.diff(x).subs({x: 0}), fourier.diff(x).subs({x: 2 * pi})),
        # Eq(fourier.diff(x, 2).subs({x: 0}), fourier.diff(x, 2).subs({x: 2 * pi})),
        ]
    print(constr)

    fit_x = Fit(model_dict, x=t, y=xdata, constraints=constr)
    fit_y = Fit(model_dict, x=t, y=ydata, constraints=constr)
    fitx_result = fit_x.execute()
    fity_result = fit_y.execute()
    print(fitx_result)
    print(fity_result)

    # Define function that generates the curve
    def curve_lambda(_t):
        return np.array(
            [
                fit_x.model(x=_t, **fitx_result.params).y,
                fit_y.model(x=_t, **fity_result.params).y
                ]
            ).ravel()

    # code to test if fit is correct
    plot_fit(polygon, curve_lambda, t, title='Harmonic Approximation')

    return curve_lambda
Beispiel #2
0
    def minimize_fixture(self):
        """
        Set up the parameters, model, constraints for the minimization fits.

        These tests used to purposefully use unamed variable, however this has
        been changed as this feature is now being depricated in a future
        version of Symfit.
        """
        x = Parameter('x', value=-1.0)
        y = Parameter('y', value=1.0)
        self.x = x
        self.y = y
        self.model = Model(2 * x * y + 2 * x - x ** 2 - 2 * y ** 2)

        self.constraints = [
            Ge(y - 1, 0),  # y - 1 >= 0,
            Eq(x**3 - y, 0),  # x**3 - y == 0,
        ]

        self.cons = (
            {'type': 'eq',
             'fun': lambda x: np.array([x[0]**3 - x[1]]),
             'jac': lambda x: np.array([3.0 * (x[0]**2.0), -1.0])},
            {'type': 'ineq',
             'fun': lambda x: np.array([x[1] - 1]),
             'jac': lambda x: np.array([0.0, 1.0])}
        )
Beispiel #3
0
def test_pickle():
    """
    Make sure models can be pickled are preserved when pickling
    """
    a, b = parameters('a, b')
    x, y = variables('x, y')
    exact_model = Model({y: a * x ** b})
    constraint = Model.as_constraint(Eq(a, b), exact_model)
    num_model = CallableNumericalModel(
        {y: a * x ** b}, independent_vars=[x], params=[a, b]
    )
    connected_num_model = CallableNumericalModel(
        {y: a * x ** b}, connectivity_mapping={y: {x, a, b}}
    )
    # Test if lsoda args and kwargs are pickled too
    ode_model = ODEModel({D(y, x): a * x + b}, {x: 0.0}, 3, 4, some_kwarg=True)

    models = [exact_model, constraint, num_model, ode_model, connected_num_model]
    for model in models:
        new_model = pickle.loads(pickle.dumps(model))
        # Compare signatures
        assert model.__signature__ == new_model.__signature__
        # Trigger the cached vars because we compare `__dict__` s
        model.vars
        new_model.vars
        # Explicitly make sure the connectivity mapping is identical.
        assert model.connectivity_mapping == new_model.connectivity_mapping
        if not isinstance(model, ODEModel):
            model.function_dict
            model.vars_as_functions
            new_model.function_dict
            new_model.vars_as_functions
        assert model.__dict__ == new_model.__dict__
Beispiel #4
0
def test_minimize():
    """
    Tests maximizing a function with and without constraints, taken from the
    scipy `minimize` tutorial. Compare the symfit result with the scipy
    result.
    https://docs.scipy.org/doc/scipy-0.18.1/reference/tutorial/optimize.html#constrained-minimization-of-multivariate-scalar-functions-minimize
    """
    x = Parameter(value=-1.0)
    y = Parameter(value=1.0)
    # Use an  unnamed Variable on purpose to test the auto-generation of names.
    model = Model(2 * x * y + 2 * x - x ** 2 - 2 * y ** 2)

    constraints = [
        Ge(y - 1, 0),  # y - 1 >= 0,
        Eq(x**3 - y, 0),  # x**3 - y == 0,
    ]

    def func(x, sign=1.0):
        """ Objective function """
        return sign*(2*x[0]*x[1] + 2*x[0] - x[0]**2 - 2*x[1]**2)

    def func_deriv(x, sign=1.0):
        """ Derivative of objective function """
        dfdx0 = sign*(-2*x[0] + 2*x[1] + 2)
        dfdx1 = sign*(2*x[0] - 4*x[1])
        return np.array([dfdx0, dfdx1])

    cons = (
        {'type': 'eq',
         'fun': lambda x: np.array([x[0]**3 - x[1]]),
         'jac': lambda x: np.array([3.0*(x[0]**2.0), -1.0])},
        {'type': 'ineq',
         'fun': lambda x: np.array([x[1] - 1]),
         'jac': lambda x: np.array([0.0, 1.0])}
    )

    # Unconstrained fit
    res = minimize(func, [-1.0, 1.0], args=(-1.0,), jac=func_deriv,
                   method='BFGS', options={'disp': False})
    fit = Fit(model=-model)
    assert isinstance(fit.objective, MinimizeModel)
    assert isinstance(fit.minimizer, BFGS)

    fit_result = fit.execute()

    assert fit_result.value(x) == pytest.approx(res.x[0], 1e-6)
    assert fit_result.value(y) == pytest.approx(res.x[1], 1e-6)

    # Same test, but with constraints in place.
    res = minimize(func, [-1.0, 1.0], args=(-1.0,), jac=func_deriv,
                   constraints=cons, method='SLSQP', options={'disp': False})

    fit = Fit(-model, constraints=constraints)
    assert fit.constraints[0].constraint_type == Ge
    assert fit.constraints[1].constraint_type == Eq
    fit_result = fit.execute()
    assert fit_result.value(x) == pytest.approx(res.x[0], 1e-6)
    assert fit_result.value(y) == pytest.approx(res.x[1], 1e-6)
Beispiel #5
0
def test_constraint_types():
    x = Parameter('x', value=-1.0)
    y = Parameter('y', value=1.0)
    z = Variable('z')
    model = Model({z: 2*x*y + 2*x - x**2 - 2*y**2})

    # These types are not allowed constraints.
    for relation in [Lt, Gt, Ne]:
        with pytest.raises(ModelError):
            Fit(model, constraints=[relation(x, y)])


    # Should execute without problems.
    for relation in [Eq, Ge, Le]:
        Fit(model, constraints=[relation(x, y)])

    fit = Fit(model, constraints=[Le(x, y)])
    # Le should be transformed to Ge
    assert fit.constraints[0].constraint_type is Ge

    # Redo the standard test as a Le
    constraints = [
        Le(- y + 1, 0),  # y - 1 >= 0,
        Eq(x**3 - y, 0),  # x**3 - y == 0,
    ]
    std_constraints = [
        Ge(y - 1, 0),  # y - 1 >= 0,
        Eq(x**3 - y, 0),  # x**3 - y == 0,
    ]

    fit = Fit(-model, constraints=constraints)
    std_fit = Fit(-model, constraints=std_constraints)
    assert fit.constraints[0].constraint_type == Ge
    assert fit.constraints[1].constraint_type == Eq
    assert fit.constraints[0].params == [x, y]
    assert fit.constraints[1].params == [x, y]
    assert fit.constraints[0].jacobian_model.params == [x, y]
    assert fit.constraints[1].jacobian_model.params == [x, y]
    assert fit.constraints[0].hessian_model.params == [x, y]
    assert fit.constraints[1].hessian_model.params == [x, y]
    assert fit.constraints[0].__signature__ == fit.constraints[1].__signature__
    fit_result = fit.execute()
    std_result = std_fit.execute()
    assert fit_result.value(x) == pytest.approx(std_result.value(x))
    assert fit_result.value(y) == pytest.approx(std_result.value(y))
Beispiel #6
0
    def test_constraint_types(self):
        x = Parameter(-1.0)
        y = Parameter(1.0)
        z = Variable()
        model = {z: 2 * x * y + 2 * x - x**2 - 2 * y**2}

        # These types are not allowed constraints.
        for relation in [Lt, Gt, Ne]:
            with self.assertRaises(ModelError):
                Maximize(model, constraints=[relation(x, y)])

        # Should execute without problems.
        for relation in [Eq, Ge, Le]:
            Maximize(model, constraints=[relation(x, y)])

        fit = Maximize(model, constraints=[Le(x, y)])
        # Le should be transformed to Ge
        self.assertIs(fit.constraints[0].constraint_type, Ge)

        # Redo the standard test as a Le
        constraints = [
            Le(-y + 1, 0),  # y - 1 >= 0,
            Eq(x**3 - y, 0),  # x**3 - y == 0,
        ]
        std_constraints = [
            Ge(y - 1, 0),  # y - 1 >= 0,
            Eq(x**3 - y, 0),  # x**3 - y == 0,
        ]

        fit = Maximize(model, constraints=constraints)
        std_fit = Maximize(model, constraints=std_constraints)
        self.assertEqual(fit.constraints[0].constraint_type, Ge)
        self.assertEqual(fit.constraints[1].constraint_type, Eq)
        fit_result = fit.execute()
        std_result = std_fit.execute()
        self.assertAlmostEqual(fit_result.value(x), std_result.value(x))
        self.assertAlmostEqual(fit_result.value(y), std_result.value(y))
Beispiel #7
0
def test_minimizer_constraint_compatibility():
    """
    Test if #156 has been solved, and test all the other constraint styles.
    """
    x, y, z = variables('x, y, z')
    a, b, c = parameters('a, b, c')
    b.fixed = True

    model = Model({z: a * x**2 - b * y**2 + c})
    # Generate data, z has to be scalar for MinimizeModel to be happy
    xdata = 3  # np.linspace(0, 10)
    ydata = 5  # np.linspace(0, 10)
    zdata = model(a=2, b=3, c=5, x=xdata, y=ydata).z
    data_dict = {x: xdata, y: ydata, z: zdata}

    # Equivalent ways of defining the same constraint
    constraint_model = Model.as_constraint(a - c, model, constraint_type=Eq)
    constraint_model.params = model.params
    constraints = [
        Eq(a, c),
        MinimizeModel(constraint_model, data=data_dict), constraint_model
    ]

    objective = MinimizeModel(model, data=data_dict)
    for constraint in constraints:
        fit = SLSQP(objective, parameters=[a, b, c], constraints=[constraint])
        wrapped_constr = fit.wrapped_constraints[0]['fun'].model
        assert isinstance(wrapped_constr, Model)
        assert wrapped_constr.params == model.params
        assert wrapped_constr.jacobian_model.params == model.params
        assert wrapped_constr.hessian_model.params == model.params
        # Set the data for the dependent var of the constraint to None
        # Normally this is handled by Fit because here we interact with the
        # Minimizer directly, it is up to us.
        constraint_var = fit.wrapped_constraints[0][
            'fun'].model.dependent_vars[0]
        objective.data[constraint_var] = None
        fit.execute()

    # No scipy style dicts allowed.
    with pytest.raises(TypeError):
        fit = SLSQP(MinimizeModel(model, data=data_dict),
                    parameters=[a, b, c],
                    constraints=[{
                        'type': 'eq',
                        'fun': lambda a, b, c: a - c
                    }])
Beispiel #8
0
def test_constrainedminimizers():
    """
    Compare the different constrained minimizers, to make sure all support
    constraints, and converge to the same answer.
    """
    minimizers = list(subclasses(ScipyConstrainedMinimize))
    x = Parameter('x', value=-1.0)
    y = Parameter('y', value=1.0)
    z = Variable('z')
    model = Model({z: 2 * x * y + 2 * x - x**2 - 2 * y**2})

    # First we try an unconstrained fit
    results = []
    for minimizer in minimizers:
        fit = Fit(-model, minimizer=minimizer)
        assert isinstance(fit.objective, MinimizeModel)
        fit_result = fit.execute(tol=1e-15)
        results.append(fit_result)

    # Compare the parameter values.
    for r1, r2 in zip(results[:-1], results[1:]):
        assert r1.value(x) == pytest.approx(r2.value(x), 1e-6)
        assert r1.value(y) == pytest.approx(r2.value(y), 1e-6)
        assert r1.covariance_matrix == pytest.approx(r2.covariance_matrix)

    constraints = [
        Ge(y - 1, 0),  # y - 1 >= 0,
        Eq(x**3 - y, 0),  # x**3 - y == 0,
    ]

    # Constrained fit.
    results = []
    for minimizer in minimizers:
        if minimizer is COBYLA:
            # COBYLA only supports inequality.
            continue
        fit = Fit(-model, constraints=constraints, minimizer=minimizer)
        fit_result = fit.execute(tol=1e-15)
        results.append(fit_result)

    for r1, r2 in zip(results[:-1], results[1:]):
        assert r1.value(x) == pytest.approx(r2.value(x), 1e-6)
        assert r1.value(y) == pytest.approx(r2.value(y), 1e-6)
        assert r1.covariance_matrix == pytest.approx(r2.covariance_matrix)
Beispiel #9
0
def test_neg():
    """
    Test negation of all model types
    """
    x, y_1, y_2 = variables('x, y_1, y_2')
    a, b = parameters('a, b')

    model_dict = {y_2: a * x ** 2, y_1: 2 * x * b}
    model = Model(model_dict)

    model_neg = - model
    for key in model:
        assert model[key] == - model_neg[key]

    # Constraints
    constraint = Model.as_constraint(Eq(a * x, 2), model)

    constraint_neg = - constraint
    # for key in constraint:
    assert constraint[constraint.dependent_vars[0]] == - constraint_neg[constraint_neg.dependent_vars[0]]

    # ODEModel
    odemodel = ODEModel({D(y_1, x): a * x}, initial={a: 1.0})

    odemodel_neg = - odemodel
    for key in odemodel:
        assert odemodel[key] == - odemodel_neg[key]

    # For models with interdependency, negation should only change the
    # dependent components.
    model_dict = {x: y_1**2, y_1: a * y_2 + b}
    model = Model(model_dict)

    model_neg = - model
    for key in model:
        if key in model.dependent_vars:
            assert model[key] == - model_neg[key]
        elif key in model.interdependent_vars:
            assert model[key] == model_neg[key]
        else:
            pytest.fail()
Beispiel #10
0
    def setup_class(cls):
        xdata = np.linspace(1, 10, 10)
        ydata = 3 * xdata**2

        cls.a = Parameter('a')
        cls.b = Parameter('b')

        x = Variable('x')
        y = Variable('y')
        model = Model({y: cls.a * x**cls.b})

        fit = Fit(model, x=xdata, y=ydata)
        cls.fit_result = fit.execute()
        fit = Fit(model, x=xdata, y=ydata, minimizer=MINPACK)
        cls.minpack_result = fit.execute()
        fit = Fit(model, x=xdata, objective=LogLikelihood)
        cls.likelihood_result = fit.execute()
        fit = Fit(model, x=xdata, y=ydata, minimizer=[BFGS, NelderMead])
        cls.chained_result = fit.execute()

        z = Variable('z')
        constraints = [
            Eq(cls.a, cls.b),
            CallableNumericalModel.as_constraint(
                {z: ge_constraint},
                connectivity_mapping={z: {cls.a}},
                constraint_type=Ge,
                model=model)
        ]
        fit = Fit(model, x=xdata, y=ydata, constraints=constraints)
        cls.constrained_result = fit.execute()
        fit = Fit(model,
                  x=xdata,
                  y=ydata,
                  constraints=constraints,
                  minimizer=BasinHopping)
        cls.constrained_basinhopping_result = fit.execute()
Beispiel #11
0
def test_CallableNumericalModel():
    x, y, z = variables('x, y, z')
    a, b = parameters('a, b')

    model = CallableModel({y: a * x + b})
    numerical_model = CallableNumericalModel(
        {y: lambda x, a, b: a * x + b}, [x], [a, b]
    )
    assert model.__signature__ == numerical_model.__signature__

    xdata = np.linspace(0, 10)
    ydata = model(x=xdata, a=5.5, b=15.0).y + np.random.normal(0, 1)

    symbolic_answer = np.array(model(x=xdata, a=5.5, b=15.0))
    numerical_answer = np.array(numerical_model(x=xdata, a=5.5, b=15.0))

    assert numerical_answer == pytest.approx(symbolic_answer)

    faulty_model = CallableNumericalModel({y: lambda x, a, b: a * x + b},
                                          [], [a, b])
    assert not model.__signature__ == faulty_model.__signature__
    with pytest.raises(TypeError):
        # This is an incorrect signature, even though the lambda function is
        # correct. Should fail.
        faulty_model(xdata, 5.5, 15.0)

    # Faulty model whose components do not all accept all of the args
    faulty_model = CallableNumericalModel(
        {y: lambda x, a, b: a * x + b, z: lambda x, a: x**a}, [x], [a, b]
    )
    assert model.__signature__ == faulty_model.__signature__

    with pytest.raises(TypeError):
        # Lambda got an unexpected keyword 'b'
        faulty_model(xdata, 5.5, 15.0)

    # Faulty model with a wrongly named argument
    faulty_model = CallableNumericalModel(
        {y: lambda x, a, c=5: a * x + c}, [x], [a, b]
    )
    assert model.__signature__ == faulty_model.__signature__

    with pytest.raises(TypeError):
        # Lambda got an unexpected keyword 'b'
        faulty_model(xdata, 5.5, 15.0)

    # Correct version of the previous model
    numerical_model = CallableNumericalModel(
        {y: lambda x, a, b: a * x + b, z: lambda x, a: x ** a},
        connectivity_mapping={y: {a, b, x}, z: {x, a}}
    )
    # Correct version of the previous model
    mixed_model = CallableNumericalModel(
        {y: lambda x, a, b: a * x + b, z: x ** a}, [x],
        [a, b]
    )

    numberical_answer = np.array(numerical_model(x=xdata, a=5.5, b=15.0))
    mixed_answer = np.array(mixed_model(x=xdata, a=5.5, b=15.0))
    assert numberical_answer == pytest.approx(mixed_answer)

    zdata = mixed_model(x=xdata, a=5.5, b=15.0).z + np.random.normal(0, 1)

    # Check if the fits are the same
    fit = Fit(mixed_model, x=xdata, y=ydata, z=zdata)
    mixed_result = fit.execute()
    fit = Fit(numerical_model, x=xdata, y=ydata, z=zdata)
    numerical_result = fit.execute()
    for param in [a, b]:
        assert mixed_result.value(param) == pytest.approx(numerical_result.value(param))
        if mixed_result.stdev(param) is not None and numerical_result.stdev(param) is not None:
            assert mixed_result.stdev(param) == pytest.approx(numerical_result.stdev(param))
        else:
            assert  mixed_result.stdev(param) is None and numerical_result.stdev(param) is None
    assert mixed_result.r_squared == pytest.approx(numerical_result.r_squared)

    # Test if the constrained syntax is supported
    fit = Fit(numerical_model, x=xdata, y=ydata,
              z=zdata, constraints=[Eq(a, b)])
    constrained_result = fit.execute()
    assert constrained_result.value(a) == pytest.approx(constrained_result.value(b))
Beispiel #12
0
def test_basinhopping_2d():
    def func2d(x):
        f = np.cos(14.5 * x[0] - 0.3) + (x[1] + 0.2) * x[1] + (x[0] + 0.2) * x[0]
        df = np.zeros(2)
        df[0] = -14.5 * np.sin(14.5 * x[0] - 0.3) + 2. * x[0] + 0.2
        df[1] = 2. * x[1] + 0.2
        return f, df

    def func2d_symfit(x1, x2):
        f = np.cos(14.5 * x1 - 0.3) + (x2 + 0.2) * x2 + (x1 + 0.2) * x1
        return f

    def jac2d_symfit(x1, x2):
        df = np.zeros(2)
        df[0] = -14.5 * np.sin(14.5 * x1 - 0.3) + 2. * x1 + 0.2
        df[1] = 2. * x2 + 0.2
        return df

    np.random.seed(555)
    minimizer_kwargs = {'method': 'BFGS', 'jac': True}
    x0 = [1.0, 1.0]
    res = basinhopping(func2d, x0, minimizer_kwargs=minimizer_kwargs, niter=200)

    np.random.seed(555)
    x1, x2 = parameters('x1, x2', value=x0)

    with pytest.raises(TypeError):
        fit = BasinHopping(
            func2d_symfit, [x1, x2],
            local_minimizer=NelderMead(func2d_symfit, [x1, x2],
                                       jacobian=jac2d_symfit)
        )

    fit = BasinHopping(
        func2d_symfit, [x1, x2],
        local_minimizer=BFGS(func2d_symfit, [x1, x2], jacobian=jac2d_symfit)
    )
    fit_result = fit.execute(niter=200)
    assert isinstance(fit.local_minimizer.jacobian, MinimizeModel)
    assert isinstance(fit.local_minimizer.jacobian.model, CallableNumericalModel)
    assert res.x[0] == fit_result.value(x1)
    assert res.x[1] == fit_result.value(x2)
    assert res.fun == fit_result.objective_value

    # Now compare with the symbolic equivalent
    np.random.seed(555)
    model = cos(14.5 * x1 - 0.3) + (x2 + 0.2) * x2 + (x1 + 0.2) * x1
    fit = Fit(model, minimizer=BasinHopping)
    fit_result = fit.execute()
    assert res.x[0] == fit_result.value(x1)
    assert res.x[1] == fit_result.value(x2)
    assert res.fun == fit_result.objective_value
    assert isinstance(fit.minimizer.local_minimizer, BFGS)

    # Impose constrains
    np.random.seed(555)
    model = cos(14.5 * x1 - 0.3) + (x2 + 0.2) * x2 + (x1 + 0.2) * x1
    fit = Fit(model, minimizer=BasinHopping, constraints=[Eq(x1, x2)])
    fit_result = fit.execute()
    assert fit_result.value(x1) == fit_result.value(x2)
    assert isinstance(fit.minimizer.local_minimizer, SLSQP)

    # Impose bounds
    np.random.seed(555)
    x1.min = 0.0
    model = cos(14.5 * x1 - 0.3) + (x2 + 0.2) * x2 + (x1 + 0.2) * x1
    fit = Fit(model, minimizer=BasinHopping)
    fit_result = fit.execute()
    assert fit_result.value(x1) >= x1.min
    assert isinstance(fit.minimizer.local_minimizer, LBFGSB)
Beispiel #13
0
def test_trustconstr():
    """
    Solve the standard constrained example from
    https://docs.scipy.org/doc/scipy-0.18.1/reference/tutorial/optimize.html#constrained-minimization-of-multivariate-scalar-functions-minimize
    using the trust-constr method.
    """
    def func(x, sign=1.0):
        """ Objective function """
        return sign * (2 * x[0] * x[1] + 2 * x[0] - x[0]**2 - 2 * x[1]**2)

    def func_jac(x, sign=1.0):
        """ Derivative of objective function """
        dfdx0 = sign * (-2 * x[0] + 2 * x[1] + 2)
        dfdx1 = sign * (2 * x[0] - 4 * x[1])
        return np.array([dfdx0, dfdx1])

    def func_hess(x, sign=1.0):
        """ Hessian of objective function """
        dfdx2 = sign * (-2)
        dfdxdy = sign * 2
        dfdy2 = sign * (-4)
        return np.array([[dfdx2, dfdxdy], [dfdxdy, dfdy2]])

    def cons_f(x):
        return [x[1] - 1, x[0]**3 - x[1]]

    def cons_J(x):
        return [[0, 1], [3 * x[0]**2, -1]]

    def cons_H(x, v):
        return v[0] * np.zeros(
            (2, 2)) + v[1] * np.array([[6 * x[0], 0], [0, 0]])

    # Unconstrained fit
    res = minimize(func, [-1.0, 1.0],
                   args=(-1.0, ),
                   jac=func_jac,
                   hess=func_hess,
                   method='trust-constr')
    assert res.x == pytest.approx([2, 1])

    # Constrained fit
    nonlinear_constraint = NonlinearConstraint(cons_f,
                                               0, [np.inf, 0],
                                               jac=cons_J,
                                               hess=cons_H)
    res_constr = minimize(func, [-1.0, 1.0],
                          args=(-1.0, ),
                          tol=1e-15,
                          jac=func_jac,
                          hess=func_hess,
                          method='trust-constr',
                          constraints=[nonlinear_constraint])
    assert res_constr.x == pytest.approx([1, 1])

    # Symfit equivalent code
    x = Parameter('x', value=-1.0)
    y = Parameter('y', value=1.0)
    z = Variable('z')
    model = Model({z: 2 * x * y + 2 * x - x**2 - 2 * y**2})

    # Unconstrained fit first, see if we get the known result.
    fit = Fit(-model, minimizer=TrustConstr)
    fit_result = fit.execute()
    assert list(fit_result.params.values()) == pytest.approx([2, 1])

    # Now we are ready for the constrained fit.
    constraints = [
        Le(-y + 1, 0),  # y - 1 >= 0,
        Eq(x**3 - y, 0),  # x**3 - y == 0,
    ]
    fit = Fit(-model, constraints=constraints, minimizer=TrustConstr)
    fit_result = fit.execute(tol=1e-15)

    # Test if the constrained results are equal
    assert list(fit_result.params.values()) == pytest.approx(res_constr.x)
Beispiel #14
0
def test_constrained_dependent_on_model():
    """
    For a simple Gaussian distribution, we test if Models of various types
    can be used as constraints. Of particular interest are NumericalModels,
    which can be used to fix the integral of the model during the fit to 1,
    as it should be for a probability distribution.
    :return:
    """
    A, mu, sig = parameters('A, mu, sig')
    x, y, Y = variables('x, y, Y')
    i = Idx('i', (0, 1000))
    sig.min = 0.0

    model = GradientModel({y: A * Gaussian(x, mu=mu, sig=sig)})

    # Generate data, 100 samples from a N(1.2, 2) distribution
    np.random.seed(2)
    xdata = np.random.normal(1.2, 2, 1000)
    ydata, xedges = np.histogram(xdata,
                                 bins=int(np.sqrt(len(xdata))),
                                 density=True)
    xcentres = (xedges[1:] + xedges[:-1]) / 2

    # Unconstrained fit
    fit = Fit(model, x=xcentres, y=ydata)
    unconstr_result = fit.execute()

    # Constraints must be scalar models.
    with pytest.raises(ModelError):
        Model.as_constraint([A - 1, sig - 1], model, constraint_type=Eq)

    constraint_exact = Model.as_constraint(A * sqrt(2 * sympy.pi) * sig - 1,
                                           model,
                                           constraint_type=Eq)
    # Only when explicitly asked, do models behave as constraints.
    assert hasattr(constraint_exact, 'constraint_type')
    assert constraint_exact.constraint_type == Eq
    assert not hasattr(model, 'constraint_type')

    # Now lets make some valid constraints and see if they are respected!
    # FIXME These first two should be symbolical integrals over `y` instead,
    # but currently this is not converted into a numpy/scipy function. So
    # instead the first two are not valid constraints.
    constraint_model = Model.as_constraint(A - 1, model, constraint_type=Eq)
    constraint_exact = Eq(A, 1)
    constraint_num = CallableNumericalModel.as_constraint(
        {
            Y: lambda x, y: simps(y, x) - 1
        },  # Integrate using simps
        model=model,
        connectivity_mapping={Y: {x, y}},
        constraint_type=Eq)

    # Test for all these different types of constraint.
    for constraint in [constraint_model, constraint_exact, constraint_num]:
        if not isinstance(constraint, Eq):
            assert constraint.constraint_type == Eq

        xcentres = (xedges[1:] + xedges[:-1]) / 2
        fit = Fit(model, x=xcentres, y=ydata, constraints=[constraint])
        # Test if conversion into a constraint was done properly
        fit_constraint = fit.constraints[0]
        assert fit.model.params == fit_constraint.params
        assert fit_constraint.constraint_type == Eq

        con_map = fit_constraint.connectivity_mapping
        if isinstance(constraint, CallableNumericalModel):
            assert con_map == {Y: {x, y}, y: {x, mu, sig, A}}
            assert fit_constraint.independent_vars == [x]
            assert fit_constraint.dependent_vars == [Y]
            assert fit_constraint.interdependent_vars == [y]
            assert fit_constraint.params == [A, mu, sig]
        else:
            # TODO if these constraints can somehow be written as integrals
            # depending on y and x this if/else should be removed.
            assert con_map == {fit_constraint.dependent_vars[0]: {A}}
            assert fit_constraint.independent_vars == []
            assert len(fit_constraint.dependent_vars) == 1
            assert fit_constraint.interdependent_vars == []
            assert fit_constraint.params == [A, mu, sig]

        # Finally, test if the constraint worked
        fit_result = fit.execute(options={'eps': 1e-15, 'ftol': 1e-10})
        unconstr_value = fit.minimizer.wrapped_constraints[0]['fun'](
            **unconstr_result.params)
        constr_value = fit.minimizer.wrapped_constraints[0]['fun'](
            **fit_result.params)

        # TODO because of a bug by pytest we have to solve it like this
        assert constr_value[0] == pytest.approx(0, abs=1e-10)
    # And if it was very poorly met before
    assert not unconstr_value[0] == pytest.approx(0.0, 1e-1)
Beispiel #15
0
from symfit import parameters, variables, Fit, Piecewise, exp, Eq, Model
import numpy as np
import matplotlib.pyplot as plt

x, y = variables('x, y')
a, b, x0 = parameters('a, b, x0')

# Make a piecewise model
y1 = x**2 - a * x
y2 = a * x + b
model = Model({y: Piecewise((y1, x <= x0), (y2, x > x0))})

# As a constraint, we demand equality between the two models at the point x0
# to do this, we substitute x -> x0 and demand equality using `Eq`
constraints = [
    Eq(y1.subs({x: x0}), y2.subs({x: x0}))
]
# Generate example data
xdata = np.linspace(-4, 4., 50)
ydata = model(x=xdata, a=0.0, b=1.0, x0=1.0).y
np.random.seed(2)
ydata = np.random.normal(ydata, 0.5)  # add noise

# Help the fit by bounding the switchpoint between the models
x0.min = 0.8
x0.max = 1.2

fit = Fit(model, x=xdata, y=ydata, constraints=constraints)
fit_result = fit.execute()
print(fit_result)
Beispiel #16
0
    def test_minimize(self):
        """
        Tests maximizing a function with and without constraints, taken from the
        scipy `minimize` tutorial. Compare the symfit result with the scipy
        result.
        https://docs.scipy.org/doc/scipy-0.18.1/reference/tutorial/optimize.html#constrained-minimization-of-multivariate-scalar-functions-minimize
        """
        x = Parameter(-1.0)
        y = Parameter(1.0)
        z = Variable()
        model = {z: 2 * x * y + 2 * x - x**2 - 2 * y**2}

        constraints = [
            Ge(y - 1, 0),  # y - 1 >= 0,
            Eq(x**3 - y, 0),  # x**3 - y == 0,
        ]

        def func(x, sign=1.0):
            """ Objective function """
            return sign * (2 * x[0] * x[1] + 2 * x[0] - x[0]**2 - 2 * x[1]**2)

        def func_deriv(x, sign=1.0):
            """ Derivative of objective function """
            dfdx0 = sign * (-2 * x[0] + 2 * x[1] + 2)
            dfdx1 = sign * (2 * x[0] - 4 * x[1])
            return np.array([dfdx0, dfdx1])

        cons = ({
            'type': 'eq',
            'fun': lambda x: np.array([x[0]**3 - x[1]]),
            'jac': lambda x: np.array([3.0 * (x[0]**2.0), -1.0])
        }, {
            'type': 'ineq',
            'fun': lambda x: np.array([x[1] - 1]),
            'jac': lambda x: np.array([0.0, 1.0])
        })

        # Unconstrained fit
        res = minimize(func, [-1.0, 1.0],
                       args=(-1.0, ),
                       jac=func_deriv,
                       method='SLSQP',
                       options={'disp': False})
        fit = Maximize(model)
        fit_result = fit.execute()

        self.assertAlmostEqual(fit_result.value(x), res.x[0])
        self.assertAlmostEqual(fit_result.value(y), res.x[1])

        # Same test, but with constraints in place.
        res = minimize(func, [-1.0, 1.0],
                       args=(-1.0, ),
                       jac=func_deriv,
                       constraints=cons,
                       method='SLSQP',
                       options={'disp': False})

        fit = Maximize(model, constraints=constraints)
        self.assertEqual(fit.constraints[0].constraint_type, Ge)
        self.assertEqual(fit.constraints[1].constraint_type, Eq)
        fit_result = fit.execute()
        self.assertAlmostEqual(fit_result.value(x), res.x[0])
        self.assertAlmostEqual(fit_result.value(y), res.x[1])
Beispiel #17
0
from symfit import parameters, variables, Fit, Piecewise, exp, Eq, Model
import numpy as np
import matplotlib.pyplot as plt

t, y = variables('t, y')
a, b, d, k, t0 = parameters('a, b, d, k, t0')

# Make a piecewise model
y1 = a * t + b
y2 = d * exp(-k * t)
model = Model({y: Piecewise((y1, t <= t0), (y2, t > t0))})

# As a constraint, we demand equality between the two models at the point t0
# to do this, we substitute t -> t0 and demand equality using `Eq`
constraints = [Eq(y1.diff(t).subs({t: t0}), y2.diff(t).subs({t: t0}))]

# # Generate example data
tdata = np.linspace(0, 4., 200)
ydata = model(t=tdata, a=63, b=300, d=2205, k=3, t0=0.65).y
ydata = np.random.normal(ydata, 0.05 * ydata)  # add 5% noise

# Help the fit by bounding the switchpoint between the models and giving initial
# guesses
t0.min = 0.5
t0.max = 0.8
b.value = 320

fit = Fit(model, t=tdata, y=ydata, constraints=constraints)
fit_result = fit.execute()
print(fit_result)