Example #1
0
def test_pickle():
    """
    Make sure models can be pickled are preserved when pickling
    """
    a, b = parameters('a, b')
    x, y = variables('x, y')
    exact_model = Model({y: a * x ** b})
    constraint = Model.as_constraint(Eq(a, b), exact_model)
    num_model = CallableNumericalModel(
        {y: a * x ** b}, independent_vars=[x], params=[a, b]
    )
    connected_num_model = CallableNumericalModel(
        {y: a * x ** b}, connectivity_mapping={y: {x, a, b}}
    )
    # Test if lsoda args and kwargs are pickled too
    ode_model = ODEModel({D(y, x): a * x + b}, {x: 0.0}, 3, 4, some_kwarg=True)

    models = [exact_model, constraint, num_model, ode_model, connected_num_model]
    for model in models:
        new_model = pickle.loads(pickle.dumps(model))
        # Compare signatures
        assert model.__signature__ == new_model.__signature__
        # Trigger the cached vars because we compare `__dict__` s
        model.vars
        new_model.vars
        # Explicitly make sure the connectivity mapping is identical.
        assert model.connectivity_mapping == new_model.connectivity_mapping
        if not isinstance(model, ODEModel):
            model.function_dict
            model.vars_as_functions
            new_model.function_dict
            new_model.vars_as_functions
        assert model.__dict__ == new_model.__dict__
Example #2
0
def test_custom_objective(recwarn):
    """
    Compare the result of a custom objective with the symbolic result.
    :return:
    """
    # Create test data
    xdata = np.linspace(0, 100, 25)  # From 0 to 100 in 100 steps
    a_vec = np.random.normal(15.0, scale=2.0, size=xdata.shape)
    b_vec = np.random.normal(100, scale=2.0, size=xdata.shape)
    ydata = a_vec * xdata + b_vec  # Point scattered around the line 5 * x + 105

    # Normal symbolic fit
    a = Parameter('a', value=0, min=0.0, max=1000)
    b = Parameter('b', value=0, min=0.0, max=1000)
    x = Variable('x')
    y = Variable('y')
    model = {y: a * x + b}

    fit = Fit(model, xdata, ydata, minimizer=BFGS)
    fit_result = fit.execute()

    def f(x, a, b):
        return a * x + b

    def chi_squared(a, b):
        return np.sum((ydata - f(xdata, a, b))**2)

    # Should no longer raise warnings, because internally we practice
    # what we preach.
    fit_custom = BFGS(chi_squared, [a, b])
    assert len(recwarn) == 0

    fit_custom_result = fit_custom.execute()

    assert isinstance(fit_custom_result, FitResults)
    assert fit_custom_result.value(a) == pytest.approx(fit_result.value(a),
                                                       1e-5)
    assert fit_custom_result.value(b) == pytest.approx(fit_result.value(b),
                                                       1e-4)

    # New preferred usage, multi component friendly.
    with pytest.raises(TypeError):
        callable_model = CallableNumericalModel(
            chi_squared, connectivity_mapping={y: {a, b}})
    callable_model = CallableNumericalModel({y: chi_squared},
                                            connectivity_mapping={y: {a, b}})
    assert callable_model.params == [a, b]
    assert callable_model.independent_vars == []
    assert callable_model.dependent_vars == [y]
    assert callable_model.interdependent_vars == []
    assert callable_model.connectivity_mapping == {y: {a, b}}
    fit_custom = BFGS(callable_model, [a, b])
    fit_custom_result = fit_custom.execute()

    assert isinstance(fit_custom_result, FitResults)
    assert fit_custom_result.value(a) == pytest.approx(fit_result.value(a),
                                                       1e-5)
    assert fit_custom_result.value(b) == pytest.approx(fit_result.value(b),
                                                       1e-4)
Example #3
0
def test_CallableNumericalModel2D():
    """
    Apply a CallableNumericalModel to 2D data, to see if it is
    agnostic to data shape.
    """
    shape = (30, 40)

    def function(a, b):
        out = np.ones(shape) * a
        out[15:, :] += b
        return out

    a, b = parameters('a, b')
    y, = variables('y')

    model = CallableNumericalModel({y: function}, [], [a, b])
    data = 15 * np.ones(shape)
    data[15:, :] += 20

    fit = Fit(model, y=data)
    fit_result = fit.execute()
    assert fit_result.value(a) == pytest.approx(15)
    assert fit_result.value(b) == pytest.approx(20)

    def flattened_function(a, b):
        out = np.ones(shape) * a
        out[15:, :] += b
        return out.flatten()

    model = CallableNumericalModel({y: flattened_function}, [], [a, b])
    data = 15 * np.ones(shape)
    data[15:, :] += 20
    data = data.flatten()

    fit = Fit(model, y=data)
    flat_result = fit.execute()

    assert fit_result.value(a) == pytest.approx(flat_result.value(a))
    assert fit_result.value(b) == pytest.approx(flat_result.value(b))

    assert fit_result.stdev(a) is None and flat_result.stdev(a) is None
    assert fit_result.stdev(b) is None and flat_result.stdev(b) is None

    assert fit_result.r_squared == pytest.approx(flat_result.r_squared)
Example #4
0
def test_CallableNumericalModel_infer_connectivity():
    """
    When a CallableNumericalModel is initiated with symbolical and
    non-symbolical components, only the connectivity mapping for
    non-symbolical part has to be provided.
    """
    x, y, z = variables('x, y, z')
    a, b = parameters('a, b')
    model_dict = {z: lambda y, a, b: a * y + b,
                  y: x ** a}
    mixed_model = CallableNumericalModel(
        model_dict, connectivity_mapping={z: {y, a, b}}
    )
    assert mixed_model.connectivity_mapping == {z: {y, a, b}, y: {x, a}}
Example #5
0
    def gen_fit_objs(x, a, minimizer):
        """Generates linear fits with different a parameter values."""
        for a_i in a:
            a_par = Parameter('a', 4.0, min=0.0, max=20)
            b_par = Parameter('b', 1.2, min=0.0, max=2)
            x_var = Variable('x')
            y_var = Variable('y')

            con_map = {y_var: {x_var, a_par, b_par}}
            model = CallableNumericalModel({y_var: f}, connectivity_mapping=con_map)

            fit = Fit(
                model, x, a_i * x + 1, minimizer=minimizer,
                objective=SqrtLeastSquares if minimizer is not MINPACK else VectorLeastSquares
            )
            yield fit
Example #6
0
    def setup_class(cls):
        xdata = np.linspace(1, 10, 10)
        ydata = 3 * xdata**2

        cls.a = Parameter('a')
        cls.b = Parameter('b')

        x = Variable('x')
        y = Variable('y')
        model = Model({y: cls.a * x**cls.b})

        fit = Fit(model, x=xdata, y=ydata)
        cls.fit_result = fit.execute()
        fit = Fit(model, x=xdata, y=ydata, minimizer=MINPACK)
        cls.minpack_result = fit.execute()
        fit = Fit(model, x=xdata, objective=LogLikelihood)
        cls.likelihood_result = fit.execute()
        fit = Fit(model, x=xdata, y=ydata, minimizer=[BFGS, NelderMead])
        cls.chained_result = fit.execute()

        z = Variable('z')
        constraints = [
            Eq(cls.a, cls.b),
            CallableNumericalModel.as_constraint(
                {z: ge_constraint},
                connectivity_mapping={z: {cls.a}},
                constraint_type=Ge,
                model=model)
        ]
        fit = Fit(model, x=xdata, y=ydata, constraints=constraints)
        cls.constrained_result = fit.execute()
        fit = Fit(model,
                  x=xdata,
                  y=ydata,
                  constraints=constraints,
                  minimizer=BasinHopping)
        cls.constrained_basinhopping_result = fit.execute()
Example #7
0
def test_pickle():
    """
    Test the picklability of the different minimizers.
    """
    # Create test data
    xdata = np.linspace(0, 100, 100)  # From 0 to 100 in 100 steps
    a_vec = np.random.normal(15.0, scale=2.0, size=xdata.shape)
    b_vec = np.random.normal(100, scale=2.0, size=xdata.shape)
    ydata = a_vec * xdata + b_vec  # Point scattered around the line 5 * x + 105

    # Normal symbolic fit
    a = Parameter('a', value=0, min=0.0, max=1000)
    b = Parameter('b', value=0, min=0.0, max=1000)
    x, y = variables('x, y')

    # Make a set of all ScipyMinimizers, and add a chained minimizer.
    scipy_minimizers = list(subclasses(ScipyMinimize))
    chained_minimizer = (DifferentialEvolution, BFGS)
    scipy_minimizers.append(chained_minimizer)
    constrained_minimizers = subclasses(ScipyConstrainedMinimize)
    # Test for all of them if they can be pickled.
    for minimizer in scipy_minimizers:
        if minimizer in constrained_minimizers:
            constraints = [Ge(b, a)]
        else:
            constraints = []
        model = CallableNumericalModel({y: f},
                                       independent_vars=[x],
                                       params=[a, b])
        fit = Fit(model,
                  x=xdata,
                  y=ydata,
                  minimizer=minimizer,
                  constraints=constraints)
        if minimizer is not MINPACK:
            assert isinstance(fit.objective, LeastSquares)
            assert isinstance(fit.minimizer.objective, LeastSquares)
        else:
            assert isinstance(fit.objective, VectorLeastSquares)
            assert isinstance(fit.minimizer.objective, VectorLeastSquares)

        fit = fit.minimizer  # Just check if the minimizer pickles
        dump = pickle.dumps(fit)
        pickled_fit = pickle.loads(dump)
        problematic_attr = [
            'objective', '_pickle_kwargs', 'wrapped_objective', 'constraints',
            'wrapped_constraints', 'local_minimizer', 'minimizers'
        ]

        for key, value in fit.__dict__.items():
            new_value = pickled_fit.__dict__[key]
            try:
                assert value == new_value
            except AssertionError as err:
                if key not in problematic_attr:
                    raise err
                # These attr are new instances, and therefore do not
                # pass an equality test. All we can do is see if they
                # are at least the same type.
                if isinstance(value, (list, tuple)):
                    for val1, val2 in zip(value, new_value):
                        assert isinstance(val1, val2.__class__)
                        if key == 'constraints':
                            assert val1.model.constraint_type == val2.model.constraint_type
                            assert list(
                                val1.model.model_dict.values())[0] == list(
                                    val2.model.model_dict.values())[0]
                            assert val1.model.independent_vars == val2.model.independent_vars
                            assert val1.model.params == val2.model.params
                            assert val1.model.__signature__ == val2.model.__signature__
                        elif key == 'wrapped_constraints':
                            if isinstance(val1, dict):
                                assert val1['type'] == val2['type']
                                assert set(val1.keys()) == set(val2.keys())
                            elif isinstance(val1, NonlinearConstraint):
                                # For trust-ncg we manually check if
                                # their dicts are equal, because no
                                # __eq__ is implemented on
                                # NonLinearConstraint
                                assert len(val1.__dict__) == len(val2.__dict__)
                                for key in val1.__dict__:
                                    try:
                                        assert val1.__dict__[
                                            key] == val2.__dict__[key]
                                    except AssertionError:
                                        assert isinstance(
                                            val1.__dict__[key],
                                            val2.__dict__[key].__class__)
                            else:
                                raise NotImplementedError(
                                    'No such constraint type is known.')
                elif key == '_pickle_kwargs':
                    FitResults._array_safe_dict_eq(value, new_value)
                else:
                    assert isinstance(new_value, value.__class__)
        assert set(fit.__dict__.keys()) == set(pickled_fit.__dict__.keys())

        # Test if we converge to the same result.
        np.random.seed(2)
        res_before = fit.execute()
        np.random.seed(2)
        res_after = pickled_fit.execute()
        assert FitResults._array_safe_dict_eq(res_before.__dict__,
                                              res_after.__dict__)
Example #8
0
def make_linear_model(model, data):
    new_dict = {k: wrapped_func(v, data[k], [par.name for par in model.linear_params]) for k, v in model.model_dict.items()}
    new_params = [par for par in model.params if par not in model.linear_params]
    return CallableNumericalModel(new_dict, model.independent_vars, new_params)
Example #9
0
def test_CallableNumericalModel():
    x, y, z = variables('x, y, z')
    a, b = parameters('a, b')

    model = CallableModel({y: a * x + b})
    numerical_model = CallableNumericalModel(
        {y: lambda x, a, b: a * x + b}, [x], [a, b]
    )
    assert model.__signature__ == numerical_model.__signature__

    xdata = np.linspace(0, 10)
    ydata = model(x=xdata, a=5.5, b=15.0).y + np.random.normal(0, 1)

    symbolic_answer = np.array(model(x=xdata, a=5.5, b=15.0))
    numerical_answer = np.array(numerical_model(x=xdata, a=5.5, b=15.0))

    assert numerical_answer == pytest.approx(symbolic_answer)

    faulty_model = CallableNumericalModel({y: lambda x, a, b: a * x + b},
                                          [], [a, b])
    assert not model.__signature__ == faulty_model.__signature__
    with pytest.raises(TypeError):
        # This is an incorrect signature, even though the lambda function is
        # correct. Should fail.
        faulty_model(xdata, 5.5, 15.0)

    # Faulty model whose components do not all accept all of the args
    faulty_model = CallableNumericalModel(
        {y: lambda x, a, b: a * x + b, z: lambda x, a: x**a}, [x], [a, b]
    )
    assert model.__signature__ == faulty_model.__signature__

    with pytest.raises(TypeError):
        # Lambda got an unexpected keyword 'b'
        faulty_model(xdata, 5.5, 15.0)

    # Faulty model with a wrongly named argument
    faulty_model = CallableNumericalModel(
        {y: lambda x, a, c=5: a * x + c}, [x], [a, b]
    )
    assert model.__signature__ == faulty_model.__signature__

    with pytest.raises(TypeError):
        # Lambda got an unexpected keyword 'b'
        faulty_model(xdata, 5.5, 15.0)

    # Correct version of the previous model
    numerical_model = CallableNumericalModel(
        {y: lambda x, a, b: a * x + b, z: lambda x, a: x ** a},
        connectivity_mapping={y: {a, b, x}, z: {x, a}}
    )
    # Correct version of the previous model
    mixed_model = CallableNumericalModel(
        {y: lambda x, a, b: a * x + b, z: x ** a}, [x],
        [a, b]
    )

    numberical_answer = np.array(numerical_model(x=xdata, a=5.5, b=15.0))
    mixed_answer = np.array(mixed_model(x=xdata, a=5.5, b=15.0))
    assert numberical_answer == pytest.approx(mixed_answer)

    zdata = mixed_model(x=xdata, a=5.5, b=15.0).z + np.random.normal(0, 1)

    # Check if the fits are the same
    fit = Fit(mixed_model, x=xdata, y=ydata, z=zdata)
    mixed_result = fit.execute()
    fit = Fit(numerical_model, x=xdata, y=ydata, z=zdata)
    numerical_result = fit.execute()
    for param in [a, b]:
        assert mixed_result.value(param) == pytest.approx(numerical_result.value(param))
        if mixed_result.stdev(param) is not None and numerical_result.stdev(param) is not None:
            assert mixed_result.stdev(param) == pytest.approx(numerical_result.stdev(param))
        else:
            assert  mixed_result.stdev(param) is None and numerical_result.stdev(param) is None
    assert mixed_result.r_squared == pytest.approx(numerical_result.r_squared)

    # Test if the constrained syntax is supported
    fit = Fit(numerical_model, x=xdata, y=ydata,
              z=zdata, constraints=[Eq(a, b)])
    constrained_result = fit.execute()
    assert constrained_result.value(a) == pytest.approx(constrained_result.value(b))
Example #10
0
    def test_constrained_dependent_on_model(self):
        """
        For a simple Gaussian distribution, we test if Models of various types
        can be used as constraints. Of particular interest are NumericalModels,
        which can be used to fix the integral of the model during the fit to 1,
        as it should be for a probability distribution.
        :return:
        """
        A, mu, sig = parameters('A, mu, sig')
        x, y, Y = variables('x, y, Y')
        i = Idx('i', (0, 1000))
        sig.min = 0.0

        model = Model({y: A * Gaussian(x, mu=mu, sig=sig)})

        # Generate data, 100 samples from a N(1.2, 2) distribution
        np.random.seed(2)
        xdata = np.random.normal(1.2, 2, 1000)
        ydata, xedges = np.histogram(xdata, bins=int(np.sqrt(len(xdata))), density=True)
        xcentres = (xedges[1:] + xedges[:-1]) / 2

        # Unconstrained fit
        fit = Fit(model, x=xcentres, y=ydata)
        unconstr_result = fit.execute()

        # Constraints must be scalar models.
        with self.assertRaises(ModelError):
            Model.as_constraint([A - 1, sig - 1], model, constraint_type=Eq)
        constraint_exact = Model.as_constraint(
            A * sqrt(2 * sympy.pi) * sig - 1, model, constraint_type=Eq
        )
        # Only when explicitly asked, do models behave as constraints.
        self.assertTrue(hasattr(constraint_exact, 'constraint_type'))
        self.assertEqual(constraint_exact.constraint_type, Eq)
        self.assertFalse(hasattr(model, 'constraint_type'))

        # Now lets make some valid constraints and see if they are respected!
        # TODO: These first two should be symbolical integrals over `y` instead,
        # but currently this is not converted into a numpy/scipy function. So instead the first two are not valid constraints.
        constraint_model = Model.as_constraint(A - 1, model, constraint_type=Eq)
        constraint_exact = Eq(A, 1)
        constraint_num = CallableNumericalModel.as_constraint(
            {Y: lambda x, y: simps(y, x) - 1},  # Integrate using simps
            model=model,
            connectivity_mapping={Y: {x, y}},
            constraint_type=Eq
        )

        # Test for all these different types of constraint.
        for constraint in [constraint_model, constraint_exact, constraint_num]:
            if not isinstance(constraint, Eq):
                self.assertEqual(constraint.constraint_type, Eq)

            xcentres = (xedges[1:] + xedges[:-1]) / 2
            fit = Fit(model, x=xcentres, y=ydata, constraints=[constraint])
            # Test if conversion into a constraint was done properly
            fit_constraint = fit.constraints[0]
            self.assertEqual(fit.model.params, fit_constraint.params)
            self.assertEqual(fit_constraint.constraint_type, Eq)

            con_map = fit_constraint.connectivity_mapping
            if isinstance(constraint, CallableNumericalModel):
                self.assertEqual(con_map, {Y: {x, y}, y: {x, mu, sig, A}})
                self.assertEqual(fit_constraint.independent_vars, [x])
                self.assertEqual(fit_constraint.dependent_vars, [Y])
                self.assertEqual(fit_constraint.interdependent_vars, [y])
                self.assertEqual(fit_constraint.params, [A, mu, sig])
            else:
                # ToDo: if these constraints can somehow be written as integrals
                # depending on y and x this if/else should be removed.
                self.assertEqual(con_map,
                                 {fit_constraint.dependent_vars[0]: {A}})
                self.assertEqual(fit_constraint.independent_vars, [])
                self.assertEqual(len(fit_constraint.dependent_vars), 1)
                self.assertEqual(fit_constraint.interdependent_vars, [])
                self.assertEqual(fit_constraint.params, [A, mu, sig])

            # Finally, test if the constraint worked
            fit_result = fit.execute(options={'eps': 1e-15, 'ftol': 1e-10})
            unconstr_value = fit.minimizer.wrapped_constraints[0]['fun'](**unconstr_result.params)
            constr_value = fit.minimizer.wrapped_constraints[0]['fun'](**fit_result.params)
            self.assertAlmostEqual(constr_value[0], 0.0, 10)
        # And if it was very poorly met before
        self.assertNotAlmostEqual(unconstr_value[0], 0.0, 2)
Example #11
0
def test_constrained_dependent_on_model():
    """
    For a simple Gaussian distribution, we test if Models of various types
    can be used as constraints. Of particular interest are NumericalModels,
    which can be used to fix the integral of the model during the fit to 1,
    as it should be for a probability distribution.
    :return:
    """
    A, mu, sig = parameters('A, mu, sig')
    x, y, Y = variables('x, y, Y')
    i = Idx('i', (0, 1000))
    sig.min = 0.0

    model = GradientModel({y: A * Gaussian(x, mu=mu, sig=sig)})

    # Generate data, 100 samples from a N(1.2, 2) distribution
    np.random.seed(2)
    xdata = np.random.normal(1.2, 2, 1000)
    ydata, xedges = np.histogram(xdata,
                                 bins=int(np.sqrt(len(xdata))),
                                 density=True)
    xcentres = (xedges[1:] + xedges[:-1]) / 2

    # Unconstrained fit
    fit = Fit(model, x=xcentres, y=ydata)
    unconstr_result = fit.execute()

    # Constraints must be scalar models.
    with pytest.raises(ModelError):
        Model.as_constraint([A - 1, sig - 1], model, constraint_type=Eq)

    constraint_exact = Model.as_constraint(A * sqrt(2 * sympy.pi) * sig - 1,
                                           model,
                                           constraint_type=Eq)
    # Only when explicitly asked, do models behave as constraints.
    assert hasattr(constraint_exact, 'constraint_type')
    assert constraint_exact.constraint_type == Eq
    assert not hasattr(model, 'constraint_type')

    # Now lets make some valid constraints and see if they are respected!
    # FIXME These first two should be symbolical integrals over `y` instead,
    # but currently this is not converted into a numpy/scipy function. So
    # instead the first two are not valid constraints.
    constraint_model = Model.as_constraint(A - 1, model, constraint_type=Eq)
    constraint_exact = Eq(A, 1)
    constraint_num = CallableNumericalModel.as_constraint(
        {
            Y: lambda x, y: simps(y, x) - 1
        },  # Integrate using simps
        model=model,
        connectivity_mapping={Y: {x, y}},
        constraint_type=Eq)

    # Test for all these different types of constraint.
    for constraint in [constraint_model, constraint_exact, constraint_num]:
        if not isinstance(constraint, Eq):
            assert constraint.constraint_type == Eq

        xcentres = (xedges[1:] + xedges[:-1]) / 2
        fit = Fit(model, x=xcentres, y=ydata, constraints=[constraint])
        # Test if conversion into a constraint was done properly
        fit_constraint = fit.constraints[0]
        assert fit.model.params == fit_constraint.params
        assert fit_constraint.constraint_type == Eq

        con_map = fit_constraint.connectivity_mapping
        if isinstance(constraint, CallableNumericalModel):
            assert con_map == {Y: {x, y}, y: {x, mu, sig, A}}
            assert fit_constraint.independent_vars == [x]
            assert fit_constraint.dependent_vars == [Y]
            assert fit_constraint.interdependent_vars == [y]
            assert fit_constraint.params == [A, mu, sig]
        else:
            # TODO if these constraints can somehow be written as integrals
            # depending on y and x this if/else should be removed.
            assert con_map == {fit_constraint.dependent_vars[0]: {A}}
            assert fit_constraint.independent_vars == []
            assert len(fit_constraint.dependent_vars) == 1
            assert fit_constraint.interdependent_vars == []
            assert fit_constraint.params == [A, mu, sig]

        # Finally, test if the constraint worked
        fit_result = fit.execute(options={'eps': 1e-15, 'ftol': 1e-10})
        unconstr_value = fit.minimizer.wrapped_constraints[0]['fun'](
            **unconstr_result.params)
        constr_value = fit.minimizer.wrapped_constraints[0]['fun'](
            **fit_result.params)

        # TODO because of a bug by pytest we have to solve it like this
        assert constr_value[0] == pytest.approx(0, abs=1e-10)
    # And if it was very poorly met before
    assert not unconstr_value[0] == pytest.approx(0.0, 1e-1)
    which is not easily written or supported as an analytical expression.
    """
    # Do your non-trivial magic here. In this case a Piecewise, although this
    # could also be done symbolically.
    y = np.zeros_like(x)
    y[x > b] = (a * (x - b) + b)[x > b]
    y[x <= b] = b
    return y


x, y1, y2 = variables('x, y1, y2')
a, b = parameters('a, b')

mixed_model = CallableNumericalModel({
    y1: nonanalytical_func,
    y2: x**a
},
                                     connectivity_mapping={y1: {x, a, b}})

# Generate data
xdata = np.linspace(0, 10)
y1data, y2data = mixed_model(x=xdata, a=1.3, b=4)
y1data = np.random.normal(y1data, 0.1 * y1data)
y2data = np.random.normal(y2data, 0.1 * y2data)

# Perform the fit
b.value = 3.5
fit = Fit(mixed_model, x=xdata, y1=y1data, y2=y2data)
fit_result = fit.execute()
print(fit_result)