コード例 #1
0
ファイル: physics.py プロジェクト: chm-ipmu/zfit
def double_crystalball_integral(limits, params, model):
    mu = params['mu']
    sigma = params['sigma']

    (lower,), (upper,) = limits.limits
    lower = lower[0]  # obs number 0
    upper = upper[0]

    limits_left = Space(limits.obs, (lower, mu))
    limits_right = Space(limits.obs, (mu, upper))
    params_left = dict(mu=mu, sigma=sigma, alpha=params["alphal"],
                       n=params["nl"])
    params_right = dict(mu=mu, sigma=sigma, alpha=-params["alphar"],
                        n=params["nr"])
    #
    left = tf.cond(pred=tf.less(mu, lower), true_fn=lambda: z.constant(0.),
                   false_fn=lambda: crystalball_integral(limits_left, params_left, model))
    right = tf.cond(pred=tf.greater(mu, upper), true_fn=lambda: z.constant(0.),
                    false_fn=lambda: crystalball_integral(limits_right, params_right, model))
    integral = left + right
    # integral = z.where(condition=tf.less(mu, lower),
    #                     x=crystalball_integral(limits_left, params_left, model),
    #                     y=crystalball_integral(limits_right, params_right, model))

    return integral
コード例 #2
0
ファイル: test_loss.py プロジェクト: mozgit/zfit
def test_add():
    param1 = zfit.Parameter("param1", 1.)
    param2 = zfit.Parameter("param2", 2.)

    pdfs = [0] * 4
    pdfs[0] = Gauss(param1, 4, obs=obs1)
    pdfs[1] = Gauss(param2, 5, obs=obs1)
    pdfs[2] = Gauss(3, 6, obs=obs1)
    pdfs[3] = Gauss(4, 7, obs=obs1)

    datas = [0] * 4
    datas[0] = z.constant(1.)
    datas[1] = z.constant(2.)
    datas[2] = z.constant(3.)
    datas[3] = z.constant(4.)

    ranges = [0] * 4
    ranges[0] = (1, 4)
    ranges[1] = Space(limits=(2, 5), obs=obs1)
    ranges[2] = Space(limits=(3, 6), obs=obs1)
    ranges[3] = Space(limits=(4, 7), obs=obs1)

    constraint1 = zfit.constraint.nll_gaussian(params=param1,
                                               observation=1.,
                                               uncertainty=0.5)
    constraint2 = zfit.constraint.nll_gaussian(params=param2,
                                               observation=2.,
                                               uncertainty=0.25)
    merged_contraints = [constraint1, constraint2]

    nll1 = UnbinnedNLL(model=pdfs[0],
                       data=datas[0],
                       fit_range=ranges[0],
                       constraints=constraint1)
    nll2 = UnbinnedNLL(model=pdfs[1],
                       data=datas[1],
                       fit_range=ranges[1],
                       constraints=constraint2)
    nll3 = UnbinnedNLL(model=[pdfs[2], pdfs[3]],
                       data=[datas[2], datas[3]],
                       fit_range=[ranges[2], ranges[3]])

    simult_nll = nll1 + nll2 + nll3

    assert simult_nll.model == pdfs
    assert simult_nll.data == datas

    ranges[0] = Space(
        limits=ranges[0], obs='obs1',
        axes=(0, ))  # for comparison, Space can only compare with Space
    ranges[1].coords._axes = (0, )
    ranges[2].coords._axes = (0, )
    ranges[3].coords._axes = (0, )
    assert simult_nll.fit_range == ranges

    def eval_constraint(constraints):
        return z.reduce_sum([c.value() for c in constraints]).numpy()

    assert eval_constraint(
        simult_nll.constraints) == eval_constraint(merged_contraints)
コード例 #3
0
ファイル: basic.py プロジェクト: tgag17/zfit
def _exp_integral_func_shifting(lambd, lower, upper, model):
    def raw_integral(x):
        return z.exp(lambd * (model._shift_x(x))) / lambd  # needed due to overflow in exp otherwise

    lower_int = raw_integral(x=z.constant(lower))
    upper_int = raw_integral(x=z.constant(upper))
    integral = (upper_int - lower_int)
    return integral
コード例 #4
0
def double_crystalball_mu_integral_func(mu, sigma, alphal, nl, alphar, nr, lower, upper):
    left = tf.cond(pred=tf.less(mu, lower), true_fn=lambda: z.constant(0.),
                   false_fn=lambda: crystalball_integral_func(mu=mu, sigma=sigma, alpha=alphal, n=nl,
                                                              lower=lower, upper=mu))
    right = tf.cond(pred=tf.greater(mu, upper), true_fn=lambda: z.constant(0.),
                    false_fn=lambda: crystalball_integral_func(mu=mu, sigma=sigma, alpha=alphar, n=nr,
                                                               lower=mu, upper=upper))
    integral = left + right
    return integral
コード例 #5
0
ファイル: basic.py プロジェクト: mozgit/zfit
def _exp_integral_func_shifting(lambd, lower, upper, model):
    def raw_integral(x):
        return model._numerics_shifted_exp(
            x=x,
            lambda_=lambd) / lambd  # needed due to overflow in exp otherwise

    lower_int = raw_integral(x=z.constant(lower))
    upper_int = raw_integral(x=z.constant(upper))
    integral = (upper_int - lower_int)
    return integral
コード例 #6
0
ファイル: test_sampling.py プロジェクト: tgag17/zfit
        def __call__(self, n_to_produce, limits, dtype):
            importance_sampling_called[0] = True

            import tensorflow_probability.python.distributions as tfd
            n_to_produce = tf.cast(n_to_produce, dtype=tf.int32)
            gaussian = tfd.TruncatedNormal(loc=z.constant(-1.), scale=z.constant(2.),
                                           low=low, high=high)
            sample = gaussian.sample(sample_shape=(n_to_produce, 1))
            weights = gaussian.prob(sample)[:, 0]
            thresholds = tf.random.uniform(shape=(n_to_produce,), dtype=dtype)
            return sample, thresholds, weights, None, n_to_produce
コード例 #7
0
ファイル: basic.py プロジェクト: chm-ipmu/zfit
def _exp_integral_from_any_to_any(limits, params, model):
    lambda_ = params['lambda']

    def raw_integral(x):
        return model._numerics_shifted_exp(x=x, lambda_=lambda_) / lambda_  # needed due to overflow in exp otherwise

    (lower,), (upper,) = limits.limits
    if lower[0] == - upper[0] == np.inf:
        raise NotImplementedError
    lower_int = raw_integral(x=z.constant(lower))
    upper_int = raw_integral(x=z.constant(upper))
    return (upper_int - lower_int)[0]
コード例 #8
0
ファイル: test_loss.py プロジェクト: zfit/zfit
def test_gradients(chunksize):
    from numdifftools import Gradient

    zfit.run.chunking.active = True
    zfit.run.chunking.max_n_points = chunksize

    initial1 = 1.0
    initial2 = 2
    param1 = zfit.Parameter("param1", initial1)
    param2 = zfit.Parameter("param2", initial2)

    gauss1 = Gauss(param1, 4, obs=obs1)
    gauss1.set_norm_range((-5, 5))
    gauss2 = Gauss(param2, 5, obs=obs1)
    gauss2.set_norm_range((-5, 5))

    data1 = zfit.Data.from_tensor(obs=obs1,
                                  tensor=z.constant(1.0, shape=(100, )))
    data1.set_data_range((-5, 5))
    data2 = zfit.Data.from_tensor(obs=obs1,
                                  tensor=z.constant(1.0, shape=(100, )))
    data2.set_data_range((-5, 5))

    nll = UnbinnedNLL(model=[gauss1, gauss2], data=[data1, data2])

    def loss_func(values):
        for val, param in zip(values, nll.get_cache_deps(only_floating=True)):
            param.set_value(val)
        return nll.value().numpy()

    # theoretical, numerical = tf.test.compute_gradient(loss_func, list(params))
    gradient1 = nll.gradient(params=param1)
    gradient_func = Gradient(loss_func)
    # gradient_func = lambda *args, **kwargs: list(gradient_func_numpy(*args, **kwargs))
    assert gradient1[0].numpy() == pytest.approx(
        gradient_func([param1.numpy()]))
    param1.set_value(initial1)
    param2.set_value(initial2)
    params = [param2, param1]
    gradient2 = nll.gradient(params=params)
    both_gradients_true = list(
        reversed(list(gradient_func([initial1, initial2
                                     ]))))  # because param2, then param1
    assert [g.numpy() for g in gradient2] == pytest.approx(both_gradients_true)

    param1.set_value(initial1)
    param2.set_value(initial2)
    gradient3 = nll.gradient()
    assert frozenset(g.numpy() for g in gradient3) == pytest.approx(
        frozenset(both_gradients_true))
コード例 #9
0
ファイル: diverse.py プロジェクト: uiuc-arc/zfit
def gauss_4d(x, params):
    norm = params[0]
    mean = tf.stack(params[1:5])
    sigma = tf.stack(params[5:9])
    corr = tf.stack([[z.constant(1.), params[9], params[10], params[11]],
                     [params[9],
                      z.constant(1.), params[12], params[13]],
                     [params[10], params[12],
                      z.constant(1.), params[14]],
                     [params[11], params[13], params[14],
                      z.constant(1.)]])

    cov = tf.einsum("i,ij,j->ij", sigma, corr, sigma)
    invcov = tf.linalg.inv(cov)
    return multivariate_gauss(x, norm, mean, invcov)
コード例 #10
0
def create_loss():
    with tf.compat.v1.variable_scope("func1"):
        a_param = zfit.Parameter("variable_a15151",
                                 1.5,
                                 -1.,
                                 20.,
                                 step_size=z.constant(0.1))
        b_param = zfit.Parameter("variable_b15151", 3.5)
        c_param = zfit.Parameter("variable_c15151", -0.04)
        obs1 = zfit.Space(obs='obs1', limits=(-2.4, 9.1))

        # load params for sampling
        a_param.load(true_a)
        b_param.load(true_b)
        c_param.load(true_c)

    gauss1 = zfit.pdf.Gauss(mu=a_param, sigma=b_param, obs=obs1)
    exp1 = zfit.pdf.Exponential(lambda_=c_param, obs=obs1)

    sum_pdf1 = 0.9 * gauss1 + exp1

    sampled_data = sum_pdf1.create_sampler(n=15000)
    sampled_data.resample()

    loss = zfit.loss.UnbinnedNLL(model=sum_pdf1,
                                 data=sampled_data,
                                 fit_range=obs1)

    return loss, (a_param, b_param, c_param)
コード例 #11
0
ファイル: polynomials.py プロジェクト: zfit/zfit
def func_integral_chebyshev2(limits, norm, params, model):
    lower, upper = limits.limit1d
    lower_rescaled = model._polynomials_rescale(lower)
    upper_rescaled = model._polynomials_rescale(upper)

    lower = z.convert_to_tensor(lower_rescaled)
    upper = z.convert_to_tensor(upper_rescaled)

    # the integral of cheby2_ni is a cheby1_ni+1/(n+1). We add the (n+1) to the coeffs. The cheby1 shape makes
    # the sum for us.
    coeffs_cheby1 = {"c_0": z.constant(0.0, dtype=model.dtype)}

    for name, coeff in params.items():
        n_plus1 = int(name.split("_", 1)[-1]) + 1
        coeffs_cheby1[f"c_{n_plus1}"] = coeff / z.convert_to_tensor(
            n_plus1, dtype=model.dtype)
    coeffs_cheby1 = convert_coeffs_dict_to_list(coeffs_cheby1)

    def indefinite_integral(limits):
        return chebyshev_shape(x=limits, coeffs=coeffs_cheby1)

    integral = indefinite_integral(upper) - indefinite_integral(lower)
    integral = znp.reshape(integral, newshape=())
    integral *= 0.5 * model.space.area()  # rescale back to whole width

    return integral
コード例 #12
0
ファイル: basic.py プロジェクト: tgag17/zfit
    def __init__(self, lam=None, obs: ztyping.ObsTypeInput = None, name: str = "Exponential", lambda_=None):
        """Exponential function exp(lambda * x).

        The function is normalized over a finite range and therefore a pdf. So the PDF is precisely
        defined as :math:`\\frac{ e^{\\lambda \\cdot x}}{ \\int_{lower}^{upper} e^{\\lambda \\cdot x} dx}`

        Args:
            lam: Accessed as parameter "lambda".
            obs: The :py:class:`~zfit.Space` the pdf is defined in.
            name: Name of the pdf.
            dtype:
        """
        if lambda_ is not None:
            if lam is None:
                lam = lambda_
            else:
                raise BreakingAPIChangeError("The 'lambda' parameter has been renamed from 'lambda_' to 'lam'.")
        params = {'lambda': lam}
        super().__init__(obs, name=name, params=params)

        self._calc_numerics_data_shift = lambda: z.constant(0.)

        if not self.space.has_limits:
            warn_advanced_feature("Exponential pdf relies on a shift of the input towards 0 to keep the numerical "
                                  f"stability high. The space {self.space} does not have limits set and no shift"
                                  f" will occure. To set it manually, set _numerics_data_shift to the expected"
                                  f" average values given to this function _in case you want things to be set_."
                                  f"If this sounds unfamiliar, regard this as an error and use a normalization range.",
                                  identifier='exp_shift')
        self._set_numerics_data_shift(self.space)
コード例 #13
0
ファイル: test_fitresult.py プロジェクト: mozgit/zfit
def create_loss(n=15000):
    a_param = zfit.Parameter("variable_a15151",
                             1.5,
                             -1.,
                             20.,
                             step_size=z.constant(0.1))
    b_param = zfit.Parameter("variable_b15151", 3.5, 0, 20)
    c_param = zfit.Parameter("variable_c15151", -0.04, -1, 0.)
    obs1 = zfit.Space(obs='obs1', limits=(-2.4, 9.1))

    # load params for sampling
    a_param.set_value(true_a)
    b_param.set_value(true_b)
    c_param.set_value(true_c)

    gauss1 = zfit.pdf.Gauss(mu=a_param, sigma=b_param, obs=obs1)
    exp1 = zfit.pdf.Exponential(lambda_=c_param, obs=obs1)

    sum_pdf1 = zfit.pdf.SumPDF((gauss1, exp1), 0.7)

    sampled_data = sum_pdf1.create_sampler(n=n)
    sampled_data.resample()

    loss = zfit.loss.UnbinnedNLL(model=sum_pdf1, data=sampled_data)

    return loss, (a_param, b_param, c_param)
コード例 #14
0
ファイル: polynomials.py プロジェクト: zfit/zfit
def func_integral_hermite(limits, norm, params, model):
    lower, upper = limits.limit1d
    lower_rescaled = model._polynomials_rescale(lower)
    upper_rescaled = model._polynomials_rescale(upper)

    lower = z.convert_to_tensor(lower_rescaled)
    upper = z.convert_to_tensor(upper_rescaled)

    # the integral of hermite is a hermite_ni. We add the ni to the coeffs.
    coeffs = {"c_0": z.constant(0.0, dtype=model.dtype)}

    for name, coeff in params.items():
        ip1_coeff = int(name.split("_", 1)[-1]) + 1
        coeffs[f"c_{ip1_coeff}"] = coeff / z.convert_to_tensor(
            ip1_coeff * 2.0, dtype=model.dtype)
    coeffs = convert_coeffs_dict_to_list(coeffs)

    def indefinite_integral(limits):
        return hermite_shape(x=limits, coeffs=coeffs)

    integral = indefinite_integral(upper) - indefinite_integral(lower)
    integral = znp.reshape(integral, newshape=())
    integral *= 0.5 * model.space.area()  # rescale back to whole width

    return integral
コード例 #15
0
def test_normalization(obs1, pdf_factory):
    import numpy as np
    import tensorflow as tf

    import zfit
    from zfit import z

    test_yield = 1524.3
    dist = pdf_factory()
    samples = tf.cast(np.random.uniform(low=low, high=high, size=100000),
                      dtype=tf.float64)
    small_samples = tf.cast(np.random.uniform(low=low, high=high, size=10),
                            dtype=tf.float64)
    with dist.set_norm_range(zfit.Space(obs1, limits=(low, high))):
        probs = dist.pdf(samples)
        probs_small = dist.pdf(small_samples)
        log_probs = dist.log_pdf(small_samples)
        probs, log_probs = probs.numpy(), log_probs.numpy()
        probs = np.average(probs) * (high - low)
        assert probs == pytest.approx(1., rel=0.05)
        assert log_probs == pytest.approx(tf.math.log(probs_small).numpy(),
                                          rel=0.05)
        dist = dist.create_extended(z.constant(test_yield))
        probs = dist.pdf(samples)
        probs_extended = dist.ext_pdf(samples)
        result = probs.numpy()
        result = np.average(result) * (high - low)
        result_ext = np.average(probs_extended) * (high - low)
        assert result == pytest.approx(1, rel=0.05)
        assert result_ext == pytest.approx(test_yield, rel=0.05)
コード例 #16
0
def create_loss(n=15000, weights=None):
    avalue = 1.5
    a_param = zfit.Parameter("variable_a15151", avalue, -1., 20.,
                             step_size=z.constant(0.1))
    a_param.init_val = avalue
    bvalue = 3.5
    b_param = zfit.Parameter("variable_b15151", bvalue, 0, 20)
    b_param.init_val = bvalue
    cvalue = -0.04
    c_param = zfit.Parameter("variable_c15151", cvalue, -1, 0.)
    c_param.init_val = cvalue
    obs1 = zfit.Space(obs='obs1', limits=(-2.4, 9.1))

    # load params for sampling
    a_param.set_value(true_a)
    b_param.set_value(true_b)
    c_param.set_value(true_c)

    gauss1 = zfit.pdf.Gauss(mu=a_param, sigma=b_param, obs=obs1)
    exp1 = zfit.pdf.Exponential(lam=c_param, obs=obs1)

    sum_pdf1 = zfit.pdf.SumPDF((gauss1, exp1), 0.7)

    sampled_data = sum_pdf1.create_sampler(n=n)
    sampled_data.resample()

    if weights is not None:
        sampled_data.set_weights(weights)

    loss = zfit.loss.UnbinnedNLL(model=sum_pdf1, data=sampled_data)

    return loss, (a_param, b_param, c_param)
コード例 #17
0
ファイル: polynomials.py プロジェクト: zfit/zfit
    def __init__(
        self,
        obs,
        coeffs: list,
        apply_scaling: bool = True,
        coeff0: tf.Tensor | None = None,
        name: str = "Polynomial",
    ):  # noqa
        """Base class to create 1 dimensional recursive polynomials that can be rescaled. Overwrite _poly_func.

        Args:
            coeffs: Coefficients for each polynomial. Used to calculate the degree.
            apply_scaling: Rescale the data so that the actual limits represent (-1, 1).

                .. math::
                   x_{n+1} = recurrence(x_{n}, x_{n-1}, n)
        """
        # 0th coefficient set to 1 by default
        coeff0 = (z.constant(1.0)
                  if coeff0 is None else tf.cast(coeff0, dtype=ztypes.float))
        coeffs = convert_to_container(coeffs).copy()
        coeffs.insert(0, coeff0)
        params = {f"c_{i}": coeff for i, coeff in enumerate(coeffs)}
        self._degree = len(coeffs) - 1  # 1 coeff -> 0th degree
        self._do_scale = apply_scaling
        if apply_scaling and not (isinstance(obs, Space)
                                  and obs.n_limits == 1):
            raise ValueError(
                "obs need to be a Space with exactly one limit if rescaling is requested."
            )
        super().__init__(obs=obs, name=name, params=params)
コード例 #18
0
ファイル: test_loss.py プロジェクト: zfit/zfit
def test_simple_loss():
    true_a = 1.0
    true_b = 4.0
    true_c = -0.3
    truevals = true_a, true_b, true_c
    a_param = zfit.Parameter("variable_a15151loss",
                             1.5,
                             -1.0,
                             20.0,
                             step_size=z.constant(0.1))
    b_param = zfit.Parameter("variable_b15151loss", 3.5)
    c_param = zfit.Parameter("variable_c15151loss", -0.23)
    param_list = [a_param, b_param, c_param]

    def loss_func(params):
        a_param, b_param, c_param = params
        probs = (z.convert_to_tensor((a_param - true_a)**2 +
                                     (b_param - true_b)**2 +
                                     (c_param - true_c)**4) + 0.42)
        return tf.reduce_sum(input_tensor=tf.math.log(probs))

    with pytest.raises(ValueError):
        _ = zfit.loss.SimpleLoss(func=loss_func, params=param_list)

    loss_func.errordef = 1
    loss_deps = zfit.loss.SimpleLoss(func=loss_func, params=param_list)
    # loss = zfit.loss.SimpleLoss(func=loss_func)
    loss = zfit.loss.SimpleLoss(func=loss_func, params=param_list)
    loss2 = zfit.loss.SimpleLoss(func=loss_func, params=truevals)

    assert loss_deps.get_cache_deps() == set(param_list)
    assert set(loss_deps.get_params()) == set(param_list)

    loss_tensor = loss_func(param_list)
    loss_value_np = loss_tensor.numpy()

    assert loss.value().numpy() == pytest.approx(loss_value_np)
    assert loss_deps.value().numpy() == pytest.approx(loss_value_np)

    with pytest.raises(IntentionAmbiguousError):
        _ = loss + loss_deps

    minimizer = zfit.minimize.Minuit()
    result = minimizer.minimize(loss=loss)
    assert result.valid
    assert true_a == pytest.approx(result.params[a_param]["value"], rel=0.03)
    assert true_b == pytest.approx(result.params[b_param]["value"], rel=0.06)
    assert true_c == pytest.approx(result.params[c_param]["value"], rel=0.5)

    zfit.param.set_values(param_list, np.array(zfit.run(param_list)) + 0.6)
    result2 = minimizer.minimize(loss=loss2)
    assert result2.valid
    params = list(result2.params)
    assert true_a == pytest.approx(result2.params[params[0]]["value"],
                                   rel=0.03)
    assert true_b == pytest.approx(result2.params[params[1]]["value"],
                                   rel=0.06)
    assert true_c == pytest.approx(result2.params[params[2]]["value"], rel=0.5)
コード例 #19
0
ファイル: test_constraint.py プロジェクト: zfit/zfit
def true_nll_gaussian(x, mu, sigma):
    x = convert_to_container(x, container=tuple)
    mu = convert_to_container(mu, container=tuple)
    sigma = convert_to_container(sigma, container=tuple)
    constraint = z.constant(0.0)
    if not len(x) == len(mu) == len(sigma):
        raise ValueError("params, mu and sigma have to have the same length.")
    for x_, mean, sig in zip(x, mu, sigma):
        constraint += z.reduce_sum(z.square(x_ - mean) / (2.0 * z.square(sig)))

    return constraint
コード例 #20
0
def test_extended_unbinned_nll():
    test_values = z.constant(test_values_np)
    test_values = zfit.Data.from_tensor(obs=obs1, tensor=test_values)
    gaussian3, mu3, sigma3, yield3 = create_gauss3ext()
    nll = zfit.loss.ExtendedUnbinnedNLL(model=gaussian3,
                                        data=test_values,
                                        fit_range=(-20, 20))
    assert {mu3, sigma3, yield3} == nll.get_params()
    minimizer = Minuit()
    status = minimizer.minimize(loss=nll)
    params = status.params
    assert params[mu3]['value'] == pytest.approx(np.mean(test_values_np), rel=0.007)
    assert params[sigma3]['value'] == pytest.approx(np.std(test_values_np), rel=0.007)
    assert params[yield3]['value'] == pytest.approx(yield_true, rel=0.007)
コード例 #21
0
ファイル: test_graph.py プロジェクト: chm-ipmu/zfit
    def dependents_func():
        a = zfit.pdf.Gauss(var1, var2, obs='obs1').sample(n=500,
                                                          limits=(-5, 5)) * 5.
        b = z.constant(3.) + 4 * var1
        c = 5. * b * var3
        d = b * var2 + a
        e = d * 3.
        return a, b, c, d, e

        assert get_dependents_auto(
            e, [a, b, c, d, var1, var2, var3]) == [a, b, d, var1, var2]
        assert get_dependents_auto(e, [var1, var2, var3]) == [var1, var2]
        assert get_dependents_auto(
            c, [a, b, d, var1, var2, var3]) == [b, var1, var3]
        return True
コード例 #22
0
ファイル: test_loss.py プロジェクト: simonthor/zfit
def test_extended_unbinned_nll(size):
    if size is None:
        test_values = z.constant(test_values_np)
        size = test_values.shape[0]
    else:
        test_values = create_test_values(size)
    test_values = zfit.Data.from_tensor(obs=obs1, tensor=test_values)
    gaussian3, mu3, sigma3, yield3 = create_gauss3ext()
    nll = zfit.loss.ExtendedUnbinnedNLL(model=gaussian3,
                                        data=test_values,
                                        fit_range=(-20, 20))
    assert {mu3, sigma3, yield3} == nll.get_params()
    minimizer = Minuit(tol=1e-4)
    status = minimizer.minimize(loss=nll)
    params = status.params
    assert params[mu3]['value'] == pytest.approx(zfit.run(tf.math.reduce_mean(test_values)), rel=0.05)
    assert params[sigma3]['value'] == pytest.approx(zfit.run(tf.math.reduce_std(test_values)), rel=0.05)
    assert params[yield3]['value'] == pytest.approx(size, rel=0.005)
コード例 #23
0
def test_simple_loss():
    true_a = 1.
    true_b = 4.
    true_c = -0.3
    a_param = zfit.Parameter("variable_a15151loss",
                             1.5,
                             -1.,
                             20.,
                             step_size=z.constant(0.1))
    b_param = zfit.Parameter("variable_b15151loss", 3.5)
    c_param = zfit.Parameter("variable_c15151loss", -0.23)
    param_list = [a_param, b_param, c_param]

    def loss_func():
        probs = z.convert_to_tensor((a_param - true_a)**2 +
                                    (b_param - true_b)**2 +
                                    (c_param - true_c)**4) + 0.42
        return tf.reduce_sum(input_tensor=tf.math.log(probs))

    loss_deps = zfit.loss.SimpleLoss(func=loss_func, deps=param_list)
    # loss = zfit.loss.SimpleLoss(func=loss_func)
    loss = zfit.loss.SimpleLoss(func=loss_func, deps=param_list)

    assert loss_deps.get_cache_deps() == set(param_list)
    assert set(loss_deps.get_params()) == set(param_list)

    loss_tensor = loss_func()
    loss_value_np = loss_tensor.numpy()

    assert loss.value().numpy() == loss_value_np
    assert loss_deps.value().numpy() == loss_value_np

    with pytest.raises(IntentionAmbiguousError):
        _ = loss + loss_deps

    minimizer = zfit.minimize.Minuit()
    result = minimizer.minimize(loss=loss)

    assert true_a == pytest.approx(result.params[a_param]['value'], rel=0.03)
    assert true_b == pytest.approx(result.params[b_param]['value'], rel=0.06)
    assert true_c == pytest.approx(result.params[c_param]['value'], rel=0.5)
コード例 #24
0
def do_integrate():
    return tf.while_loop(
        cond=all_calculated,
        body=body,
        loop_vars=[
            z.constant(0.),  # integral
            initial_points[:-1],  # lower
            initial_points[1:],  # upper
            0  # n_iter
        ],
        # here we specify the shape of the loop_vars: since they change (of the second and third),
        # we need to specify them, with None as "shape is not fixed". For the integral as well as for
        # the number of iterations, this is a scalar with shape ()
        shape_invariants=[
            tf.TensorShape(()),
            tf.TensorShape((None, )),
            tf.TensorShape((None, )),
            tf.TensorShape(()),
        ],
        maximum_iterations=n_iter_max,
    )
コード例 #25
0
ファイル: test_z.py プロジェクト: zfit/zfit
def test_wrapped_func():
    rnd = z.random.uniform(shape=(10, ))
    result = wrapped_numpy_func(rnd, z.constant(3.0))
    np.testing.assert_allclose(rnd * np.sqrt(3), result)
コード例 #26
0
ファイル: diverse.py プロジェクト: uiuc-arc/zfit
 def model(self, x):
     d = z.constant(0.)
     for i in self.params:
         d += gauss_2d(x, i[0], i[1], i[2], i[3], i[4], i[5])
     return d
コード例 #27
0
from zfit import z


def integrate(func, lower, upper):
    func_upper = func(upper)
    func_lower = func(lower)
    uncertainty = tf.abs(func_upper - func_lower)  # can be improved of course
    integrals = (func_lower + func_upper) / 2 * (upper - lower)
    return integrals, uncertainty


# func = lambda x: tf.where(tf.less(x, 0.1),
#                           tf.sin(x * 100),
#                           tf.sin(x))
func = lambda x: tf.sin(x) + tf.cos(x * 100)  # example func to integrate
lower, upper = z.constant(0.), z.constant(math.pi)

n_iter_max = 32  # maximum iteration: if we have a discontinuous function, we won't reacht the precision requested

# so we should break


def body(integral, lower, upper, n_iter):
    integrals, uncertainties = integrate(func, lower, upper)
    uncertainties_too_large = tf.greater(uncertainties, 1e-5)
    # if we reached the max number of iterations, we take the values anyway, so the uncertainties are just "too large",
    # or need to be redone if we did not yet reach the max iterations
    uncertainties_too_large = tf.logical_and(uncertainties_too_large,
                                             n_iter < n_iter_max)

    too_large_indices = tf.where(uncertainties_too_large)[:, 0]
コード例 #28
0
ファイル: integration.py プロジェクト: mozgit/zfit
def chunked_average(func, x, num_batches, batch_size, space, mc_sampler):
    lower, upper = space.limits

    fake_resource_var = tf.Variable("fake_hack_ResVar_for_custom_gradient",
                                    initializer=z.constant(4242.))
    fake_x = z.constant(42.) * fake_resource_var

    @tf.custom_gradient
    def dummy_func(fake_x):  # to make working with custom_gradient
        if x is not None:
            raise WorkInProgressError("partial not yet implemented")

        def body(batch_num, mean):
            if mc_sampler == tfp.mcmc.sample_halton_sequence:
                start_idx = batch_num * batch_size
                end_idx = start_idx + batch_size
                indices = tf.range(start_idx, end_idx, dtype=tf.int32)
                sample = mc_sampler(space.n_obs,
                                    sequence_indices=indices,
                                    dtype=ztypes.float,
                                    randomized=False)
            else:
                sample = mc_sampler(shape=(batch_size, space.n_obs),
                                    dtype=ztypes.float)
            sample = tf.guarantee_const(sample)
            sample = (np.array(upper[0]) -
                      np.array(lower[0])) * sample + lower[0]
            sample = tf.transpose(a=sample)
            sample = func(sample)
            sample = tf.guarantee_const(sample)

            batch_mean = tf.reduce_mean(input_tensor=sample)
            batch_mean = tf.guarantee_const(batch_mean)
            err_weight = 1 / tf.cast(batch_num + 1, dtype=tf.float64)
            # err_weight /= err_weight + 1
            # print_op = tf.print(batch_mean)
            do_print = False
            if do_print:
                tf.print(batch_num + 1, mean, err_weight * (batch_mean - mean))

            return batch_num + 1, mean + err_weight * (batch_mean - mean)

        cond = lambda batch_num, _: batch_num < num_batches

        initial_mean = tf.convert_to_tensor(value=0, dtype=ztypes.float)
        _, final_mean = tf.while_loop(cond=cond,
                                      body=body,
                                      loop_vars=(0, initial_mean),
                                      parallel_iterations=1,
                                      swap_memory=False,
                                      back_prop=False,
                                      maximum_iterations=num_batches)

        def dummy_grad_with_var(dy, variables=None):
            raise WorkInProgressError("Who called me? Mayou36")
            if variables is None:
                raise WorkInProgressError(
                    "Is this needed? Why? It's not a NN. Please make an issue."
                )

            def dummy_grad_func(x):
                values = func(x)
                if variables:
                    gradients = tf.gradients(ys=values,
                                             xs=variables,
                                             grad_ys=dy)
                else:
                    gradients = None
                return gradients

            return chunked_average(func=dummy_grad_func,
                                   x=x,
                                   num_batches=num_batches,
                                   batch_size=batch_size,
                                   space=space,
                                   mc_sampler=mc_sampler)

        def dummy_grad_without_var(dy):
            return dummy_grad_with_var(dy=dy, variables=None)

        do_print = False
        if do_print:
            tf.print("Total mean calculated = ", final_mean)

        return final_mean, dummy_grad_with_var

    try:
        return dummy_func(fake_x)
    except TypeError:
        return dummy_func(fake_x)