Exemplo n.º 1
0
    def test_error_advanced(self):
        """
        Compare the error propagation of ConstrainedNumericalLeastSquares against
        NumericalLeastSquares.
        Models an example from the mathematica docs and try's to replicate it:
        http://reference.wolfram.com/language/howto/FitModelsWithMeasurementErrors.html
        """
        data = [[0.9, 6.1, 9.5], [3.9, 6., 9.7], [0.3, 2.8,
                                                  6.6], [1., 2.2, 5.9],
                [1.8, 2.4, 7.2], [9., 1.7, 7.], [7.9, 8., 10.4],
                [4.9, 3.9, 9.], [2.3, 2.6, 7.4], [4.7, 8.4, 10.]]
        xdata, ydata, zdata = [np.array(data) for data in zip(*data)]
        # errors = np.array([.4, .4, .2, .4, .1, .3, .1, .2, .2, .2])

        a = Parameter(3.0)
        b = Parameter(0.9)
        c = Parameter(5.0)
        x = Variable()
        y = Variable()
        z = Variable()
        model = {z: a * log(b * x + c * y)}

        const_fit = ConstrainedNumericalLeastSquares(model,
                                                     xdata,
                                                     ydata,
                                                     zdata,
                                                     absolute_sigma=False)
        const_result = const_fit.execute()
        fit = NumericalLeastSquares(model,
                                    xdata,
                                    ydata,
                                    zdata,
                                    absolute_sigma=False)
        std_result = fit.execute()

        self.assertEqual(const_fit.absolute_sigma, fit.absolute_sigma)

        self.assertAlmostEqual(const_result.value(a), std_result.value(a), 4)
        self.assertAlmostEqual(const_result.value(b), std_result.value(b), 4)
        self.assertAlmostEqual(const_result.value(c), std_result.value(c), 4)

        self.assertAlmostEqual(const_result.stdev(a), std_result.stdev(a), 4)
        self.assertAlmostEqual(const_result.stdev(b), std_result.stdev(b), 4)
        self.assertAlmostEqual(const_result.stdev(c), std_result.stdev(c), 4)
Exemplo n.º 2
0
    def test_error_advanced(self):
        """
        Compare the error propagation of ConstrainedNumericalLeastSquares against
        NumericalLeastSquares.
        Models an example from the mathematica docs and try's to replicate it:
        http://reference.wolfram.com/language/howto/FitModelsWithMeasurementErrors.html
        """
        data = [
            [0.9, 6.1, 9.5], [3.9, 6., 9.7], [0.3, 2.8, 6.6],
            [1., 2.2, 5.9], [1.8, 2.4, 7.2], [9., 1.7, 7.],
            [7.9, 8., 10.4], [4.9, 3.9, 9.], [2.3, 2.6, 7.4],
            [4.7, 8.4, 10.]
        ]
        xdata, ydata, zdata = [np.array(data) for data in zip(*data)]
        # errors = np.array([.4, .4, .2, .4, .1, .3, .1, .2, .2, .2])

        a = Parameter(3.0)
        b = Parameter(0.9)
        c = Parameter(5.0)
        x = Variable()
        y = Variable()
        z = Variable()
        model = {z: a * log(b * x + c * y)}

        const_fit = ConstrainedNumericalLeastSquares(model, xdata, ydata, zdata, absolute_sigma=False)
        const_result = const_fit.execute()
        fit = NumericalLeastSquares(model, xdata, ydata, zdata, absolute_sigma=False)
        std_result = fit.execute()

        self.assertEqual(const_fit.absolute_sigma, fit.absolute_sigma)

        self.assertAlmostEqual(const_result.value(a), std_result.value(a), 4)
        self.assertAlmostEqual(const_result.value(b), std_result.value(b), 4)
        self.assertAlmostEqual(const_result.value(c), std_result.value(c), 4)

        self.assertAlmostEqual(const_result.stdev(a), std_result.stdev(a), 4)
        self.assertAlmostEqual(const_result.stdev(b), std_result.stdev(b), 4)
        self.assertAlmostEqual(const_result.stdev(c), std_result.stdev(c), 4)
Exemplo n.º 3
0
    def test_error_advanced(self):
        """
        Models an example from the mathematica docs and try's to replicate it
        using both symfit and scipy's curve_fit.
        http://reference.wolfram.com/language/howto/FitModelsWithMeasurementErrors.html
        """
        data = [
            [0.9, 6.1, 9.5], [3.9, 6., 9.7], [0.3, 2.8, 6.6],
            [1., 2.2, 5.9], [1.8, 2.4, 7.2], [9., 1.7, 7.],
            [7.9, 8., 10.4], [4.9, 3.9, 9.], [2.3, 2.6, 7.4],
            [4.7, 8.4, 10.]
        ]
        xdata, ydata, zdata = [np.array(data) for data in zip(*data)]
        xy = np.vstack((xdata, ydata))
        # z = np.array(z)
        errors = np.array([.4, .4, .2, .4, .1, .3, .1, .2, .2, .2])

        # raise Exception(xy, z)
        a = Parameter(value=3.0)
        b = Parameter(value=0.9)
        c = Parameter(value=5)
        x = Variable('x')
        y = Variable('y')
        z = Variable('z')
        model = {z: a * log(b * x + c * y)}

        # fit = Fit(model, xy, z, absolute_sigma=False)
        fit = Fit(model, xdata, ydata, zdata, absolute_sigma=False)
        # fit = Fit(model, x=xdata, y=ydata, z=zdata, absolute_sigma=False)
        fit_result = fit.execute()

        # Same as Mathematica default behavior.
        self.assertAlmostEqual(fit_result.value(a), 2.9956, 4)
        self.assertAlmostEqual(fit_result.value(b), 0.563212, 4)
        self.assertAlmostEqual(fit_result.value(c), 3.59732, 4)
        self.assertAlmostEqual(fit_result.stdev(a), 0.278304, 4)
        self.assertAlmostEqual(fit_result.stdev(b), 0.224107, 4)
        self.assertAlmostEqual(fit_result.stdev(c), 0.980352, 4)

        fit = Fit(model, xdata, ydata, zdata, absolute_sigma=True)
        fit_result = fit.execute()
        # Same as Mathematica in Measurement error mode, but without suplying
        # any errors.
        self.assertAlmostEqual(fit_result.value(a), 2.9956, 4)
        self.assertAlmostEqual(fit_result.value(b), 0.563212, 4)
        self.assertAlmostEqual(fit_result.value(c), 3.59732, 4)
        self.assertAlmostEqual(fit_result.stdev(a), 0.643259, 4)
        self.assertAlmostEqual(fit_result.stdev(b), 0.517992, 4)
        self.assertAlmostEqual(fit_result.stdev(c), 2.26594, 4)

        fit = Fit(model, xdata, ydata, zdata, sigma_z=errors)
        fit_result = fit.execute()

        popt, pcov, infodict, errmsg, ier = curve_fit(lambda x_vec, a, b, c: a * np.log(b * x_vec[0] + c * x_vec[1]), xy, zdata, sigma=errors, absolute_sigma=True, full_output=True)

        # Same as curve_fit?
        self.assertAlmostEqual(fit_result.value(a), popt[0], 4)
        self.assertAlmostEqual(fit_result.value(b), popt[1], 4)
        self.assertAlmostEqual(fit_result.value(c), popt[2], 4)
        self.assertAlmostEqual(fit_result.stdev(a), np.sqrt(pcov[0,0]), 4)
        self.assertAlmostEqual(fit_result.stdev(b), np.sqrt(pcov[1,1]), 4)
        self.assertAlmostEqual(fit_result.stdev(c), np.sqrt(pcov[2,2]), 4)

        # Same as Mathematica with MEASUREMENT ERROR
        self.assertAlmostEqual(fit_result.value(a), 2.68807, 4)
        self.assertAlmostEqual(fit_result.value(b), 0.941344, 4)
        self.assertAlmostEqual(fit_result.value(c), 5.01541, 4)
        self.assertAlmostEqual(fit_result.stdev(a), 0.0974628, 4)
        self.assertAlmostEqual(fit_result.stdev(b), 0.247018, 4)
        self.assertAlmostEqual(fit_result.stdev(c), 0.597661, 4)
Exemplo n.º 4
0
def test_LogLikelihood():
    """
    Tests if the LeastSquares objective gives the right shapes of output by
    comparing with its analytical equivalent.
    """
    # TODO: update these tests to use indexed variables in the future
    a, b = parameters('a, b')
    i = Idx('i', 100)
    x, y = variables('x, y')
    pdf = Exp(x, 1 / a) * Exp(x, b)

    np.random.seed(10)
    xdata = np.random.exponential(3.5, 100)

    # We use minus loglikelihood for the model, because the objective was
    # designed to find the maximum when used with a *minimizer*, so it has
    # opposite sign. Also test MinimizeModel at the same time.
    logL_model = Model({y: pdf})
    logL_exact = Model({y: -FlattenSum(log(pdf), i)})
    logL_numerical = LogLikelihood(logL_model, {x: xdata, y: None})
    logL_minmodel = MinimizeModel(logL_exact, data={x: xdata, y: None})

    # Test model jacobian and hessian shape
    eval_exact = logL_exact(x=xdata, a=2, b=3)
    jac_exact = logL_exact.eval_jacobian(x=xdata, a=2, b=3)
    hess_exact = logL_exact.eval_hessian(x=xdata, a=2, b=3)
    eval_minimizemodel = logL_minmodel(a=2, b=3)
    jac_minimizemodel = logL_minmodel.eval_jacobian(a=2, b=3)
    hess_minimizemodel = logL_minmodel.eval_hessian(a=2, b=3)
    eval_numerical = logL_numerical(a=2, b=3)
    jac_numerical = logL_numerical.eval_jacobian(a=2, b=3)
    hess_numerical = logL_numerical.eval_hessian(a=2, b=3)

    # TODO: These shapes should not have the ones! This is due to the current
    # convention that scalars should be returned as a 1d array by Model's.
    assert eval_exact[0].shape == (1, )
    assert jac_exact[0].shape == (2, 1)
    assert hess_exact[0].shape == (2, 2, 1)
    # Test if identical to MinimizeModel
    assert eval_exact[0] == pytest.approx(eval_minimizemodel)
    assert jac_exact[0] == pytest.approx(jac_minimizemodel)
    assert hess_exact[0] == pytest.approx(hess_minimizemodel)

    # Test if these two models have the same call, jacobian, and hessian.
    # Since models always have components as their first dimension, we have
    # to slice that away.
    assert eval_exact.y == pytest.approx(eval_numerical)
    assert isinstance(eval_numerical, float)
    assert isinstance(eval_exact.y[0], float)
    assert np.squeeze(jac_exact[0], axis=-1) == pytest.approx(jac_numerical)
    assert isinstance(jac_numerical, np.ndarray)
    assert np.squeeze(hess_exact[0], axis=-1) == pytest.approx(hess_numerical)
    assert isinstance(hess_numerical, np.ndarray)

    fit = Fit(logL_exact, x=xdata, objective=MinimizeModel)
    fit_exact_result = fit.execute()
    fit = Fit(logL_model, x=xdata, objective=LogLikelihood)
    fit_num_result = fit.execute()
    assert fit_exact_result.value(a) == pytest.approx(fit_num_result.value(a))
    assert fit_exact_result.value(b) == pytest.approx(fit_num_result.value(b))
    assert fit_exact_result.stdev(a) == pytest.approx(fit_num_result.stdev(a))
    assert fit_exact_result.stdev(b) == pytest.approx(fit_num_result.stdev(b))
Exemplo n.º 5
0
    def test_error_advanced(self):
        """
        Models an example from the mathematica docs and try's to replicate it
        using both symfit and scipy's curve_fit.
        http://reference.wolfram.com/language/howto/FitModelsWithMeasurementErrors.html
        """
        data = [
            [0.9, 6.1, 9.5], [3.9, 6., 9.7], [0.3, 2.8, 6.6],
            [1., 2.2, 5.9], [1.8, 2.4, 7.2], [9., 1.7, 7.],
            [7.9, 8., 10.4], [4.9, 3.9, 9.], [2.3, 2.6, 7.4],
            [4.7, 8.4, 10.]
        ]
        xdata, ydata, zdata = [np.array(data) for data in zip(*data)]
        xy = np.vstack((xdata, ydata))
        # z = np.array(z)
        errors = np.array([.4, .4, .2, .4, .1, .3, .1, .2, .2, .2])

        # raise Exception(xy, z)
        a = Parameter(3.0)
        b = Parameter(0.9)
        c = Parameter(5)
        x = Variable()
        y = Variable()
        z = Variable()
        model = {z: a * log(b * x + c * y)}

        # fit = Fit(model, xy, z, absolute_sigma=False)
        fit = Fit(model, xdata, ydata, zdata, absolute_sigma=False)
        # fit = Fit(model, x=xdata, y=ydata, z=zdata, absolute_sigma=False)
        fit_result = fit.execute()

        # Same as Mathematica default behavior.
        self.assertAlmostEqual(fit_result.value(a), 2.9956, 4)
        self.assertAlmostEqual(fit_result.value(b), 0.563212, 4)
        self.assertAlmostEqual(fit_result.value(c), 3.59732, 4)
        self.assertAlmostEqual(fit_result.stdev(a), 0.278304, 4)
        self.assertAlmostEqual(fit_result.stdev(b), 0.224107, 4)
        self.assertAlmostEqual(fit_result.stdev(c), 0.980352, 4)

        fit = Fit(model, xdata, ydata, zdata, absolute_sigma=True)
        fit_result = fit.execute()
        # Same as Mathematica in Measurement error mode, but without suplying
        # any errors.
        self.assertAlmostEqual(fit_result.value(a), 2.9956, 4)
        self.assertAlmostEqual(fit_result.value(b), 0.563212, 4)
        self.assertAlmostEqual(fit_result.value(c), 3.59732, 4)
        self.assertAlmostEqual(fit_result.stdev(a), 0.643259, 4)
        self.assertAlmostEqual(fit_result.stdev(b), 0.517992, 4)
        self.assertAlmostEqual(fit_result.stdev(c), 2.26594, 4)

        fit = Fit(model, xdata, ydata, zdata, sigma_z=errors)
        fit_result = fit.execute()

        popt, pcov, infodict, errmsg, ier = curve_fit(lambda x_vec, a, b, c: a * np.log(b * x_vec[0] + c * x_vec[1]), xy, zdata, sigma=errors, absolute_sigma=True, full_output=True)

        # Same as curve_fit?
        self.assertAlmostEqual(fit_result.value(a), popt[0], 4)
        self.assertAlmostEqual(fit_result.value(b), popt[1], 4)
        self.assertAlmostEqual(fit_result.value(c), popt[2], 4)
        self.assertAlmostEqual(fit_result.stdev(a), np.sqrt(pcov[0,0]), 4)
        self.assertAlmostEqual(fit_result.stdev(b), np.sqrt(pcov[1,1]), 4)
        self.assertAlmostEqual(fit_result.stdev(c), np.sqrt(pcov[2,2]), 4)

        # Same as Mathematica with MEASUREMENT ERROR
        self.assertAlmostEqual(fit_result.value(a), 2.68807, 4)
        self.assertAlmostEqual(fit_result.value(b), 0.941344, 4)
        self.assertAlmostEqual(fit_result.value(c), 5.01541, 4)
        self.assertAlmostEqual(fit_result.stdev(a), 0.0974628, 4)
        self.assertAlmostEqual(fit_result.stdev(b), 0.247018, 4)
        self.assertAlmostEqual(fit_result.stdev(c), 0.597661, 4)
Exemplo n.º 6
0
    def test_error_advanced(self):
        """
        Compare the error propagation of Fit against
        NumericalLeastSquares.
        Models an example from the mathematica docs and try's to replicate it:
        http://reference.wolfram.com/language/howto/FitModelsWithMeasurementErrors.html
        """
        data = [
            [0.9, 6.1, 9.5], [3.9, 6., 9.7], [0.3, 2.8, 6.6],
            [1., 2.2, 5.9], [1.8, 2.4, 7.2], [9., 1.7, 7.],
            [7.9, 8., 10.4], [4.9, 3.9, 9.], [2.3, 2.6, 7.4],
            [4.7, 8.4, 10.]
        ]
        xdata, ydata, zdata = [np.array(data) for data in zip(*data)]
        # errors = np.array([.4, .4, .2, .4, .1, .3, .1, .2, .2, .2])

        a = Parameter('a', 3.0)
        b = Parameter('b', 0.9)
        c = Parameter('c', 5.0)
        x = Variable('x')
        y = Variable('y')
        z = Variable('z')
        model = {z: a * log(b * x + c * y)}

        const_fit = Fit(model, xdata, ydata, zdata, absolute_sigma=False)
        self.assertEqual(len(const_fit.model(x=xdata, y=ydata, a=2, b=2, c=5)), 1)
        self.assertEqual(
            const_fit.model(x=xdata, y=ydata, a=2, b=2, c=5)[0].shape,
            (10,)
        )
        self.assertEqual(len(const_fit.model.eval_jacobian(x=xdata, y=ydata, a=2, b=2, c=5)), 1)
        self.assertEqual(
            const_fit.model.eval_jacobian(x=xdata, y=ydata, a=2, b=2, c=5)[0].shape,
            (3, 10)
        )
        self.assertEqual(len(const_fit.model.eval_hessian(x=xdata, y=ydata, a=2, b=2, c=5)), 1)
        self.assertEqual(
            const_fit.model.eval_hessian(x=xdata, y=ydata, a=2, b=2, c=5)[0].shape,
            (3, 3, 10)
        )

        self.assertEqual(const_fit.objective(a=2, b=2, c=5).shape,
                         tuple())
        self.assertEqual(
            const_fit.objective.eval_jacobian(a=2, b=2, c=5).shape,
            (3,)
        )
        self.assertEqual(
            const_fit.objective.eval_hessian(a=2, b=2, c=5).shape,
            (3, 3)
        )
        self.assertNotEqual(
            const_fit.objective.eval_hessian(a=2, b=2, c=5).dtype,
            object
        )

        const_result = const_fit.execute()
        fit = Fit(model, xdata, ydata, zdata, absolute_sigma=False, minimizer=MINPACK)
        std_result = fit.execute()

        self.assertEqual(const_fit.absolute_sigma, fit.absolute_sigma)

        self.assertAlmostEqual(const_result.value(a), std_result.value(a), 4)
        self.assertAlmostEqual(const_result.value(b), std_result.value(b), 4)
        self.assertAlmostEqual(const_result.value(c), std_result.value(c), 4)

        # This used to be a tighter equality test, but since we now use the
        # Hessian we actually get a more accurate value from the standard fit
        # then for MINPACK. Hence we check if it is roughly equal, and if our
        # stdev is greater than that of minpack.
        self.assertAlmostEqual(const_result.stdev(a) / std_result.stdev(a), 1, 2)
        self.assertAlmostEqual(const_result.stdev(b) / std_result.stdev(b), 1, 1)
        self.assertAlmostEqual(const_result.stdev(c) / std_result.stdev(c), 1, 2)

        self.assertGreaterEqual(const_result.stdev(a), std_result.stdev(a))
        self.assertGreaterEqual(const_result.stdev(b), std_result.stdev(b))
        self.assertGreaterEqual(const_result.stdev(c), std_result.stdev(c))
Exemplo n.º 7
0
def test_error_advanced():
    """
    Compare the error propagation of Fit against
    NumericalLeastSquares.
    Models an example from the mathematica docs and tries to replicate it:
    http://reference.wolfram.com/language/howto/FitModelsWithMeasurementErrors.html
    """
    data = [[0.9, 6.1, 9.5], [3.9, 6., 9.7], [0.3, 2.8, 6.6], [1., 2.2, 5.9],
            [1.8, 2.4, 7.2], [9., 1.7, 7.], [7.9, 8., 10.4], [4.9, 3.9, 9.],
            [2.3, 2.6, 7.4], [4.7, 8.4, 10.]]
    xdata, ydata, zdata = [np.array(data) for data in zip(*data)]
    # errors = np.array([.4, .4, .2, .4, .1, .3, .1, .2, .2, .2])

    a = Parameter('a', 3.0)
    b = Parameter('b', 0.9)
    c = Parameter('c', 5.0)
    x = Variable('x')
    y = Variable('y')
    z = Variable('z')
    model = {z: a * log(b * x + c * y)}

    const_fit = Fit(model, xdata, ydata, zdata, absolute_sigma=False)
    assert len(const_fit.model(x=xdata, y=ydata, a=2, b=2, c=5)) == 1
    assert const_fit.model(x=xdata, y=ydata, a=2, b=2, c=5)[0].shape == (10, )

    assert len(const_fit.model.eval_jacobian(x=xdata, y=ydata, a=2, b=2,
                                             c=5)) == 1
    assert const_fit.model.eval_jacobian(x=xdata, y=ydata, a=2, b=2,
                                         c=5)[0].shape == (3, 10)

    assert len(const_fit.model.eval_hessian(x=xdata, y=ydata, a=2, b=2,
                                            c=5)) == 1
    assert const_fit.model.eval_hessian(x=xdata, y=ydata, a=2, b=2,
                                        c=5)[0].shape == (3, 3, 10)

    assert const_fit.objective(a=2, b=2, c=5).shape == tuple()
    assert const_fit.objective.eval_jacobian(a=2, b=2, c=5).shape == (3, )

    assert const_fit.objective.eval_hessian(a=2, b=2, c=5).shape == (3, 3)
    assert const_fit.objective.eval_hessian(a=2, b=2, c=5).dtype != object

    const_result = const_fit.execute()
    fit = Fit(model,
              xdata,
              ydata,
              zdata,
              absolute_sigma=False,
              minimizer=MINPACK)
    std_result = fit.execute()

    assert const_fit.absolute_sigma == fit.absolute_sigma

    assert const_result.value(a) == pytest.approx(std_result.value(a), 1e-4)
    assert const_result.value(b) == pytest.approx(std_result.value(b), 1e-4)
    assert const_result.value(c) == pytest.approx(std_result.value(c), 1e-4)

    # This used to be a tighter equality test, but since we now use the
    # Hessian we actually get a more accurate value from the standard fit
    # then for MINPACK. Hence we check if it is roughly equal, and if our
    # stdev is greater than that of minpack.
    assert const_result.stdev(a) / std_result.stdev(a) == pytest.approx(
        1, 1e-2)
    assert const_result.stdev(b) / std_result.stdev(b) == pytest.approx(
        1, 1e-1)
    assert const_result.stdev(c) / std_result.stdev(c) == pytest.approx(
        1, 1e-2)

    assert const_result.stdev(a) >= std_result.stdev(a)
    assert const_result.stdev(b) >= std_result.stdev(b)
    assert const_result.stdev(c) >= std_result.stdev(c)