Ejemplo n.º 1
0
def test_derived_observables():
    # Construct pseudo Obs with random shape
    test_obs = pe.pseudo_Obs(2, 0.1 * (1 + np.random.rand()), 't',
                             int(1000 * (1 + np.random.rand())))

    # Check if autograd and numgrad give the same result
    d_Obs_ad = pe.derived_observable(
        lambda x, **kwargs: x[0] * x[1] * np.sin(x[0] * x[1]),
        [test_obs, test_obs])
    d_Obs_ad.gamma_method()
    d_Obs_fd = pe.derived_observable(
        lambda x, **kwargs: x[0] * x[1] * np.sin(x[0] * x[1]),
        [test_obs, test_obs],
        num_grad=True)
    d_Obs_fd.gamma_method()

    assert d_Obs_ad.value == d_Obs_fd.value
    assert np.abs(4.0 * np.sin(4.0) - d_Obs_ad.value) < 1000 * np.finfo(
        np.float64).eps * np.abs(d_Obs_ad.value)
    assert np.abs(d_Obs_ad.dvalue - d_Obs_fd.dvalue) < 1000 * np.finfo(
        np.float64).eps * d_Obs_ad.dvalue

    i_am_one = pe.derived_observable(lambda x, **kwargs: x[0] / x[1],
                                     [d_Obs_ad, d_Obs_ad])
    i_am_one.gamma_method()

    assert i_am_one.value == 1.0
    assert i_am_one.dvalue < 2 * np.finfo(np.float64).eps
    assert i_am_one.e_dvalue['t'] <= 2 * np.finfo(np.float64).eps
    assert i_am_one.e_ddvalue['t'] <= 2 * np.finfo(np.float64).eps
Ejemplo n.º 2
0
def test_function_overloading():
    a = pe.pseudo_Obs(17, 2.9, 'e1')
    b = pe.pseudo_Obs(4, 0.8, 'e1')

    fs = [
        lambda x: x[0] + x[1], lambda x: x[1] + x[0], lambda x: x[0] - x[1],
        lambda x: x[1] - x[0], lambda x: x[0] * x[1], lambda x: x[1] * x[0],
        lambda x: x[0] / x[1], lambda x: x[1] / x[0], lambda x: np.exp(x[0]),
        lambda x: np.sin(x[0]), lambda x: np.cos(x[0]), lambda x: np.tan(x[0]),
        lambda x: np.log(x[0]), lambda x: np.sqrt(np.abs(x[0])),
        lambda x: np.sinh(x[0]), lambda x: np.cosh(x[0]),
        lambda x: np.tanh(x[0])
    ]

    for i, f in enumerate(fs):
        t1 = f([a, b])
        t2 = pe.derived_observable(f, [a, b])
        c = t2 - t1
        assert c.is_zero()

    assert np.log(np.exp(b)) == b
    assert np.exp(np.log(b)) == b
    assert np.sqrt(b**2) == b
    assert np.sqrt(b)**2 == b

    np.arcsin(1 / b)
    np.arccos(1 / b)
    np.arctan(1 / b)
    np.arctanh(1 / b)
    np.sinc(1 / b)
Ejemplo n.º 3
0
def test_overloaded_functions():
    funcs = [
        np.exp, np.log, np.sin, np.cos, np.tan, np.sinh, np.cosh, np.arcsinh,
        np.arccosh
    ]
    deriv = [
        np.exp, lambda x: 1 / x, np.cos, lambda x: -np.sin(x),
        lambda x: 1 / np.cos(x)**2, np.cosh, np.sinh,
        lambda x: 1 / np.sqrt(x**2 + 1), lambda x: 1 / np.sqrt(x**2 - 1)
    ]
    val = 3 + 0.5 * np.random.rand()
    dval = 0.3 + 0.4 * np.random.rand()
    test_obs = pe.pseudo_Obs(val, dval, 't',
                             int(1000 * (1 + np.random.rand())))

    for i, item in enumerate(funcs):
        ad_obs = item(test_obs)
        fd_obs = pe.derived_observable(lambda x, **kwargs: item(x[0]),
                                       [test_obs],
                                       num_grad=True)
        ad_obs.gamma_method(S=0.01)
        assert np.max((ad_obs.deltas['t'] - fd_obs.deltas['t']) /
                      ad_obs.deltas['t']) < 1e-8, item.__name__
        assert np.abs(
            (ad_obs.value - item(val)) / ad_obs.value) < 1e-10, item.__name__
        assert np.abs(ad_obs.dvalue -
                      dval * np.abs(deriv[i](val))) < 1e-6, item.__name__
Ejemplo n.º 4
0
def test_man_grad():
    a = pe.pseudo_Obs(17, 2.9, 'e1')
    b = pe.pseudo_Obs(4, 0.8, 'e1')

    fs = [
        lambda x: x[0] + x[1], lambda x: x[1] + x[0], lambda x: x[0] - x[1],
        lambda x: x[1] - x[0], lambda x: x[0] * x[1], lambda x: x[1] * x[0],
        lambda x: x[0] / x[1], lambda x: x[1] / x[0], lambda x: np.exp(x[0]),
        lambda x: np.sin(x[0]), lambda x: np.cos(x[0]), lambda x: np.tan(x[0]),
        lambda x: np.log(x[0]), lambda x: np.sqrt(x[0]),
        lambda x: np.sinh(x[0]), lambda x: np.cosh(x[0]),
        lambda x: np.tanh(x[0])
    ]

    for i, f in enumerate(fs):
        t1 = f([a, b])
        t2 = pe.derived_observable(f, [a, b])
        c = t2 - t1
        assert c.value == 0.0, str(i)
        assert np.all(np.abs(c.deltas['e1']) < 1e-14), str(i)
Ejemplo n.º 5
0
def test_overloading(n):
    # Construct pseudo Obs with random shape
    obs_list = []
    for i in range(5):
        value = np.abs(np.random.normal(5, 2)) + 2.0
        dvalue = np.abs(np.random.normal(0, 0.1)) + 1e-5
        obs_list.append(pe.pseudo_Obs(value, dvalue, 't', 2000))

    # Test if the error is processed correctly
    def f(x):
        return x[0] * x[1] + np.sin(x[2]) * np.exp(
            x[3] / x[1] / x[0]) - np.sqrt(2) / np.cosh(x[4] / x[0])

    o_obs = f(obs_list)
    d_obs = pe.derived_observable(f, obs_list)

    assert np.max(
        np.abs((o_obs.deltas['t'] - d_obs.deltas['t']) /
               o_obs.deltas['t'])) < 1e-7, str(obs_list)
    assert np.abs((o_obs.value - d_obs.value) / o_obs.value) < 1e-10
Ejemplo n.º 6
0
def fit_general(x, y, func, silent=False, **kwargs):
    """Performs a non-linear fit to y = func(x) and returns a list of Obs corresponding to the fit parameters.

    Plausibility of the results should be checked. To control the numerical differentiation
    the kwargs of numdifftools.step_generators.MaxStepGenerator can be used.

    func has to be of the form

    def func(a, x):
        y = a[0] + a[1] * x + a[2] * np.sinh(x)
        return y

    y has to be a list of Obs, the dvalues of the Obs are used as yerror for the fit.
    x can either be a list of floats in which case no xerror is assumed, or
    a list of Obs, where the dvalues of the Obs are used as xerror for the fit.

    Keyword arguments
    -----------------
    silent -- If true all output to the console is omitted (default False).
    initial_guess -- can provide an initial guess for the input parameters. Relevant for non-linear fits
                     with many parameters.
    """

    if not callable(func):
        raise TypeError('func has to be a function.')

    for i in range(10):
        try:
            func(np.arange(i), 0)
        except:
            pass
        else:
            break
    n_parms = i
    if not silent:
        print('Fit with', n_parms, 'parameters')

    global print_output, beta0
    print_output = 1
    if 'initial_guess' in kwargs:
        beta0 = kwargs.get('initial_guess')
        if len(beta0) != n_parms:
            raise Exception('Initial guess does not have the correct length.')
    else:
        beta0 = np.arange(n_parms)

    if len(x) != len(y):
        raise Exception('x and y have to have the same length')

    if all(isinstance(n, pe.Obs) for n in x):
        obs = x + y
        x_constants = None
        xerr = [o.dvalue for o in x]
        yerr = [o.dvalue for o in y]
    elif all(isinstance(n, float) or isinstance(n, int)
             for n in x) or isinstance(x, np.ndarray):
        obs = y
        x_constants = x
        xerr = None
        yerr = [o.dvalue for o in y]
    else:
        raise Exception('Unsupported types for x')

    def do_the_fit(obs, **kwargs):

        global print_output, beta0

        func = kwargs.get('function')
        yerr = kwargs.get('yerr')
        length = len(yerr)

        xerr = kwargs.get('xerr')

        if length == len(obs):
            assert 'x_constants' in kwargs
            data = RealData(kwargs.get('x_constants'), obs, sy=yerr)
            fit_type = 2
        elif length == len(obs) // 2:
            data = RealData(obs[:length], obs[length:], sx=xerr, sy=yerr)
            fit_type = 0
        else:
            raise Exception('x and y do not fit together.')

        model = Model(func)

        odr = ODR(data, model, beta0, partol=np.finfo(np.float64).eps)
        odr.set_job(fit_type=fit_type, deriv=1)
        output = odr.run()
        if print_output and not silent:
            print(*output.stopreason)
            print('chisquare/d.o.f.:', output.res_var)
            print_output = 0
        beta0 = output.beta
        return output.beta[kwargs.get('n')]

    res = []
    for n in range(n_parms):
        res.append(
            pe.derived_observable(do_the_fit,
                                  obs,
                                  function=func,
                                  xerr=xerr,
                                  yerr=yerr,
                                  x_constants=x_constants,
                                  num_grad=True,
                                  n=n,
                                  **kwargs))
    return res