Exemplo n.º 1
0
def test_gradients(chunksize):
    zfit.run.chunking.active = True
    zfit.run.chunking.max_n_points = chunksize

    param1 = zfit.Parameter("param1", 1.)
    param2 = zfit.Parameter("param2", 2.)

    gauss1 = Gauss(param1, 4, obs=obs1)
    gauss1.set_norm_range((-5, 5))
    gauss2 = Gauss(param2, 5, obs=obs1)
    gauss2.set_norm_range((-5, 5))

    data1 = zfit.Data.from_tensor(obs=obs1, tensor=ztf.constant(1., shape=(100,)))
    data1.set_data_range((-5, 5))
    data2 = zfit.Data.from_tensor(obs=obs1, tensor=ztf.constant(1., shape=(100,)))
    data2.set_data_range((-5, 5))

    nll = UnbinnedNLL(model=[gauss1, gauss2], data=[data1, data2])

    gradient1 = nll.gradients(params=param1)
    assert zfit.run(gradient1) == zfit.run(tf.gradients(ys=nll.value(), xs=param1))
    gradient2 = nll.gradients(params=[param2, param1])
    both_gradients_true = zfit.run(tf.gradients(ys=nll.value(), xs=[param2, param1]))
    assert zfit.run(gradient2) == both_gradients_true
    gradient3 = nll.gradients()
    assert frozenset(zfit.run(gradient3)) == frozenset(both_gradients_true)
Exemplo n.º 2
0
def test_add():
    param1 = Parameter("param1", 1.)
    param2 = Parameter("param2", 2.)

    pdfs = [0] * 4
    pdfs[0] = Gauss(param1, 4, obs=obs1)
    pdfs[1] = Gauss(param2, 5, obs=obs1)
    pdfs[2] = Gauss(3, 6, obs=obs1)
    pdfs[3] = Gauss(4, 7, obs=obs1)

    datas = [0] * 4
    datas[0] = ztf.constant(1.)
    datas[1] = ztf.constant(2.)
    datas[2] = ztf.constant(3.)
    datas[3] = ztf.constant(4.)

    ranges = [0] * 4
    ranges[0] = (1, 4)
    ranges[1] = Space(limits=(2, 5), obs=obs1)
    ranges[2] = Space(limits=(3, 6), obs=obs1)
    ranges[3] = Space(limits=(4, 7), obs=obs1)

    constraint1 = zfit.constraint.nll_gaussian(params=param1, mu=1, sigma=0.5)
    constraint2 = zfit.constraint.nll_gaussian(params=param1, mu=2, sigma=0.25)
    merged_contraints = [constraint1, constraint2]

    nll1 = UnbinnedNLL(model=pdfs[0],
                       data=datas[0],
                       fit_range=ranges[0],
                       constraints=constraint1)
    nll2 = UnbinnedNLL(model=pdfs[1],
                       data=datas[1],
                       fit_range=ranges[1],
                       constraints=constraint2)
    nll3 = UnbinnedNLL(model=[pdfs[2], pdfs[3]],
                       data=[datas[2], datas[3]],
                       fit_range=[ranges[2], ranges[3]])

    simult_nll = nll1 + nll2 + nll3

    assert simult_nll.model == pdfs
    assert simult_nll.data == datas

    ranges[0] = Space._from_any(
        limits=ranges[0], obs=obs1,
        axes=(0, ))  # for comparison, Space can only compare with Space
    ranges[1]._axes = (0, )
    ranges[2]._axes = (0, )
    ranges[3]._axes = (0, )
    assert simult_nll.fit_range == ranges

    def eval_constraint(constraints):
        return zfit.run(ztf.reduce_sum([c.value() for c in constraints]))

    assert eval_constraint(
        simult_nll.constraints) == eval_constraint(merged_contraints)
Exemplo n.º 3
0
def _exp_integral_from_any_to_any(limits, params, model):
    lambda_ = params['lambda']

    def raw_integral(x):
        return model._numerics_shifted_exp(x=x, lambda_=lambda_) / lambda_  # needed due to overflow in exp otherwise

    (lower,), (upper,) = limits.limits
    if lower[0] == - upper[0] == np.inf:
        raise NotImplementedError
    lower_int = raw_integral(x=ztf.constant(lower))
    upper_int = raw_integral(x=ztf.constant(upper))
    return (upper_int - lower_int)[0]
Exemplo n.º 4
0
def gauss_4d(x, params):
    norm = params[0]
    mean = tf.stack(params[1:5])
    sigma = tf.stack(params[5:9])
    corr = tf.stack([[ztf.constant(1.), params[9], params[10], params[11]],
                     [params[9], ztf.constant(1.), params[12], params[13]],
                     [params[10], params[12], ztf.constant(1.), params[14]],
                     [params[11], params[13], params[14], ztf.constant(1.)]])

    cov = tf.einsum("i,ij,j->ij", sigma, corr, sigma)
    invcov = tf.matrix_inverse(cov)
    return multivariate_gauss(x, norm, mean, invcov)
Exemplo n.º 5
0
        def __call__(self, n_to_produce, limits, dtype):
            importance_sampling_called[0] = True

            import tensorflow_probability.python.distributions as tfd
            n_to_produce = tf.cast(n_to_produce, dtype=tf.int32)
            gaussian = tfd.TruncatedNormal(loc=ztf.constant(-1.),
                                           scale=ztf.constant(2.),
                                           low=low,
                                           high=high)
            sample = gaussian.sample(sample_shape=(n_to_produce, 1))
            weights = gaussian.prob(sample)[:, 0]
            thresholds = tf.random.uniform(shape=(n_to_produce, ), dtype=dtype)
            return sample, thresholds, weights, None, n_to_produce
Exemplo n.º 6
0
    def __call__(self, n_to_produce: Union[int, tf.Tensor], limits: Space,
                 dtype):
        rnd_samples = []
        thresholds_unscaled_list = []
        weights = ztf.constant(1., shape=(1, ))

        for (lower, upper), area in zip(limits.iter_limits(as_tuple=True),
                                        limits.iter_areas(rel=True)):
            n_partial_to_produce = tf.to_int64(
                ztf.to_real(n_to_produce) *
                ztf.to_real(area))  # TODO(Mayou36): split right!
            lower = ztf.convert_to_tensor(lower, dtype=dtype)
            upper = ztf.convert_to_tensor(upper, dtype=dtype)
            sample_drawn = tf.random_uniform(
                shape=(n_partial_to_produce, limits.n_obs + 1),
                # + 1 dim for the function value
                dtype=ztypes.float)
            rnd_sample = sample_drawn[:, :-1] * (
                upper - lower) + lower  # -1: all except func value
            thresholds_unscaled = sample_drawn[:, -1]
            # if not multiple_limits:
            #     return rnd_sample, thresholds_unscaled
            rnd_samples.append(rnd_sample)
            thresholds_unscaled_list.append(thresholds_unscaled)

        rnd_sample = tf.concat(rnd_samples, axis=0)
        thresholds_unscaled = tf.concat(thresholds_unscaled_list, axis=0)

        n_drawn = n_to_produce
        return rnd_sample, thresholds_unscaled, weights, weights, n_drawn
Exemplo n.º 7
0
def create_loss():
    with tf.variable_scope("func1"):
        a_param = zfit.Parameter("variable_a15151",
                                 1.5,
                                 -1.,
                                 20.,
                                 step_size=ztf.constant(0.1))
        b_param = zfit.Parameter("variable_b15151", 3.5)
        c_param = zfit.Parameter("variable_c15151", -0.04)
        obs1 = zfit.Space(obs='obs1', limits=(-2.4, 9.1))

        # load params for sampling
        a_param.load(true_a)
        b_param.load(true_b)
        c_param.load(true_c)

    gauss1 = zfit.pdf.Gauss(mu=a_param, sigma=b_param, obs=obs1)
    exp1 = zfit.pdf.Exponential(lambda_=c_param, obs=obs1)

    sum_pdf1 = 0.9 * gauss1 + exp1

    sampled_data = sum_pdf1.create_sampler(n=15000)
    sampled_data.resample()

    loss = zfit.loss.UnbinnedNLL(model=sum_pdf1,
                                 data=sampled_data,
                                 fit_range=obs1)

    return loss, (a_param, b_param, c_param)
Exemplo n.º 8
0
def func_integral_chebyshev2(limits, norm_range, params, model):
    lower, upper = limits.limit1d
    lower_rescaled = model._polynomials_rescale(lower)
    upper_rescaled = model._polynomials_rescale(upper)

    lower = ztf.convert_to_tensor(lower_rescaled)
    upper = ztf.convert_to_tensor(upper_rescaled)

    # the integral of cheby2_ni is a cheby1_ni+1/(n+1). We add the (n+1) to the coeffs. The cheby1 shape makes
    # the sum for us.
    coeffs_cheby1 = {'c_0': ztf.constant(0., dtype=model.dtype)}

    for name, coeff in params.items():
        n_plus1 = int(name.split("_", 1)[-1]) + 1
        coeffs_cheby1[f'c_{n_plus1}'] = coeff / ztf.convert_to_tensor(
            n_plus1, dtype=model.dtype)
    coeffs_cheby1 = convert_coeffs_dict_to_list(coeffs_cheby1)

    def indefinite_integral(limits):
        return chebyshev_shape(x=limits, coeffs=coeffs_cheby1)

    integral = indefinite_integral(upper) - indefinite_integral(lower)
    integral = tf.reshape(integral, shape=())
    integral *= 0.5 * model.space.area()  # rescale back to whole width

    return integral
Exemplo n.º 9
0
def func_integral_hermite(limits, norm_range, params, model):
    lower, upper = limits.limit1d
    lower_rescaled = model._polynomials_rescale(lower)
    upper_rescaled = model._polynomials_rescale(upper)

    lower = ztf.convert_to_tensor(lower_rescaled)
    upper = ztf.convert_to_tensor(upper_rescaled)

    # the integral of hermite is a hermite_ni. We add the ni to the coeffs.
    coeffs = {'c_0': ztf.constant(0., dtype=model.dtype)}

    for name, coeff in params.items():
        ip1_coeff = int(name.split("_", 1)[-1]) + 1
        coeffs[f'c_{ip1_coeff}'] = coeff / ztf.convert_to_tensor(
            ip1_coeff * 2., dtype=model.dtype)
    coeffs = convert_coeffs_dict_to_list(coeffs)

    def indefinite_integral(limits):
        return hermite_shape(x=limits, coeffs=coeffs)

    integral = indefinite_integral(upper) - indefinite_integral(lower)
    integral = tf.reshape(integral, shape=())
    integral *= 0.5 * model.space.area()  # rescale back to whole width

    return integral
Exemplo n.º 10
0
    def __init__(self,
                 obs,
                 coeffs: list,
                 apply_scaling: bool = True,
                 coeff0: Optional[tf.Tensor] = None,
                 name: str = "Polynomial"):  # noqa
        """Base class to create 1 dimensional recursive polynomials that can be rescaled. Overwrite _poly_func.

        Args:
            coeffs (list): Coefficients for each polynomial. Used to calculate the degree.
            apply_scaling (bool): Rescale the data so that the actual limits represent (-1, 1).

                .. math::
                   x_{n+1} = recurrence(x_{n}, x_{n-1}, n)

        """
        # 0th coefficient set to 1 by default
        coeff0 = ztf.constant(1.) if coeff0 is None else tf.cast(
            coeff0, dtype=ztypes.float)
        coeffs = convert_to_container(coeffs).copy()
        coeffs.insert(0, coeff0)
        params = {f"c_{i}": coeff for i, coeff in enumerate(coeffs)}
        self._degree = len(coeffs) - 1  # 1 coeff -> 0th degree
        self._do_scale = apply_scaling
        if apply_scaling and not (isinstance(obs, Space)
                                  and obs.n_limits == 1):
            raise ValueError(
                "obs need to be a Space with exactly one limit if rescaling is requested."
            )
        super().__init__(obs=obs, name=name, params=params)
Exemplo n.º 11
0
def _nll_constraints_tf(constraints):
    if not constraints:
        return ztf.constant(0.)  # adding 0 to nll
    probs = []
    for param, dist in constraints.items():
        probs.append(dist.pdf(param))
    # probs = [dist.pdf(param) for param, dist in constraints.items()]
    constraints_neg_log_prob = -tf.reduce_sum(input_tensor=tf.math.log(probs))
    return constraints_neg_log_prob
Exemplo n.º 12
0
def true_nll_gaussian(params, mu, sigma):
    params = convert_to_container(params, container=tuple)
    mu = convert_to_container(mu, container=tuple)
    sigma = convert_to_container(sigma, container=tuple)
    constraint = ztf.constant(0.)
    if not len(params) == len(mu) == len(sigma):
        raise ValueError("params, mu and sigma have to have the same length.")
    for param, mean, sig in zip(params, mu, sigma):
        constraint += ztf.reduce_sum(
            ztf.square(param - mean) / (2. * ztf.square(sig)))

    return constraint
Exemplo n.º 13
0
def test_extended_unbinned_nll():
    test_values = ztf.constant(test_values_np)
    test_values = zfit.data.Data.from_tensor(obs=obs1, tensor=test_values)
    nll_object = zfit.loss.ExtendedUnbinnedNLL(model=gaussian3,
                                               data=test_values,
                                               fit_range=(-20, 20))
    minimizer = MinuitMinimizer()
    status = minimizer.minimize(loss=nll_object, params=[mu3, sigma3, yield3])
    params = status.params
    assert params[mu3]['value'] == pytest.approx(np.mean(test_values_np), rel=0.005)
    assert params[sigma3]['value'] == pytest.approx(np.std(test_values_np), rel=0.005)
    assert params[yield3]['value'] == pytest.approx(yield_true, rel=0.005)
Exemplo n.º 14
0
def test_gradients():
    param1 = Parameter("param111", 1.)
    param2 = Parameter("param222", 2.)

    gauss1 = Gauss(param1, 4, obs=obs1)
    gauss1.set_norm_range((-5, 5))
    gauss2 = Gauss(param2, 5, obs=obs1)
    gauss2.set_norm_range((-5, 5))

    data1 = zfit.data.Data.from_tensor(obs=obs1, tensor=ztf.constant(1., shape=(100,)))
    data1.set_data_range((-5, 5))
    data2 = zfit.data.Data.from_tensor(obs=obs1, tensor=ztf.constant(1., shape=(100,)))
    data2.set_data_range((-5, 5))

    nll = UnbinnedNLL(model=[gauss1, gauss2], data=[data1, data2])

    gradient1 = nll.gradients(params=param1)
    assert zfit.run(gradient1) == zfit.run(tf.gradients(nll.value(), param1))
    gradient2 = nll.gradients(params=[param2, param1])
    both_gradients_true = zfit.run(tf.gradients(nll.value(), [param2, param1]))
    assert zfit.run(gradient2) == both_gradients_true
    gradient3 = nll.gradients()
    assert frozenset(zfit.run(gradient3)) == frozenset(both_gradients_true)
Exemplo n.º 15
0
def test_get_dependents():
    var1 = zfit.Parameter('var1', 1.)
    var2 = zfit.Parameter('var2', 2.)
    var3 = zfit.Parameter('var3', 3.)
    a = zfit.pdf.Gauss(var1, var2, obs='obs1').sample(n=500,
                                                      limits=(-5, 5)) * 5.
    b = ztf.constant(3.) + 4 * var1
    c = 5. * b * var3
    d = b * var2 + a
    e = d * 3.
    zfit.run(e)
    assert get_dependents(
        e, [a, b, c, d, var1, var2, var3]) == [a, b, d, var1, var2]
    assert get_dependents(e, [var1, var2, var3]) == [var1, var2]
    assert get_dependents(c, [a, b, d, var1, var2, var3]) == [b, var1, var3]
Exemplo n.º 16
0
def test_normalization(pdf_factory):
    test_yield = 1524.3
    dist = pdf_factory()
    samples = tf.cast(np.random.uniform(low=low, high=high, size=100000), dtype=tf.float64)
    small_samples = tf.cast(np.random.uniform(low=low, high=high, size=10), dtype=tf.float64)
    with dist.set_norm_range(Space(obs1, limits=(low, high))):
        samples.limits = low, high
        probs = dist.pdf(samples)
        probs_small = dist.pdf(small_samples)
        log_probs = dist.log_pdf(small_samples)
        probs, log_probs = zfit.run([probs, log_probs])
        probs = np.average(probs) * (high - low)
        assert probs == pytest.approx(1., rel=0.05)
        assert log_probs == pytest.approx(zfit.run(tf.log(probs_small)), rel=0.05)
        dist = dist.create_extended(ztf.constant(test_yield))
        probs_extended = dist.pdf(samples)
        result_extended = zfit.run(probs_extended)
        result_extended = np.average(result_extended) * (high - low)
        assert result_extended == pytest.approx(1, rel=0.05)
Exemplo n.º 17
0
def test_normalization():
    test_yield = 1524.3

    samples = tf.cast(np.random.uniform(low=low, high=high, size=100000), dtype=tf.float64)
    small_samples = tf.cast(np.random.uniform(low=low, high=high, size=10), dtype=tf.float64)
    for dist in gaussian_dists + [wrapped_gauss, wrapped_normal1]:
        with dist.set_norm_range(Space(obs1, limits=(low, high))):
            samples.limits = low, high
            print("Testing currently: ", dist.name)
            probs = dist.pdf(samples)
            probs_small = dist.pdf(small_samples)
            log_probs = dist.log_pdf(small_samples)
            probs, log_probs = zfit.run([probs, log_probs])
            probs = np.average(probs) * (high - low)
            assert probs == pytest.approx(1., rel=0.05)
            assert log_probs == pytest.approx(zfit.run(tf.log(probs_small)), rel=0.05)
            dist = dist.create_extended(ztf.constant(test_yield))
            probs_extended = dist.pdf(samples)
            result_extended = zfit.run(probs_extended)
            result_extended = np.average(result_extended) * (high - low)
            assert result_extended == pytest.approx(1, rel=0.05)
Exemplo n.º 18
0
def test_implicit_sumpdf():
    # return  # TODO(Mayou36): deps: impl_copy, (mostly for Simple{PDF,Func})
    # tf.reset_default_graph()
    norm_range = (-5.7, 13.6)
    param1 = Parameter('param13s', 1.1)
    frac1 = 0.11
    frac1_param = Parameter('frac13s', frac1)
    frac2 = 0.56
    frac2_param = Parameter('frac23s', frac2)
    frac3 = 1 - frac1 - frac2  # -frac1_param -frac2_param

    param2 = Parameter('param23s', 1.5, floating=False)
    param3 = Parameter('param33s', 0.4, floating=False)
    pdf1 = SimplePDF(func=lambda self, x: x * param1**2, obs=obs1)
    pdf2 = SimplePDF(func=lambda self, x: x * param2, obs=obs1)
    pdf3 = SimplePDF(func=lambda self, x: x * 2 + param3, obs=obs1)

    # sugar 1
    # sum_pdf = frac1_param * pdf1 + frac2_param * pdf2 + pdf3  # TODO(Mayou36): deps, correct copy
    sum_pdf = zfit.pdf.SumPDF(pdfs=[pdf1, pdf2, pdf3],
                              fracs=[frac1_param, frac2_param])

    true_values = pdf1.pdf(rnd_test_values, norm_range=norm_range)
    true_values *= frac1_param
    true_values += pdf2.pdf(rnd_test_values,
                            norm_range=norm_range) * frac2_param
    true_values += pdf3.pdf(rnd_test_values,
                            norm_range=norm_range) * ztf.constant(frac3)

    assert isinstance(sum_pdf, SumPDF)
    assert not sum_pdf.is_extended

    assert zfit.run(sum(sum_pdf.fracs)) == 1.
    true_values = zfit.run(true_values)
    test_values = zfit.run(sum_pdf.pdf(rnd_test_values, norm_range=norm_range))
    np.testing.assert_allclose(true_values, test_values,
                               rtol=5e-2)  # it's MC normalized

    # sugar 2
    sum_pdf2_part1 = frac1 * pdf1 + frac2 * pdf3
Exemplo n.º 19
0
def test_simple_loss():
    true_a = 1.
    true_b = 4.
    true_c = -0.3
    a_param = zfit.Parameter("variable_a15151loss",
                             1.5,
                             -1.,
                             20.,
                             step_size=ztf.constant(0.1))
    b_param = zfit.Parameter("variable_b15151loss", 3.5)
    c_param = zfit.Parameter("variable_c15151loss", -0.23)
    param_list = [a_param, b_param, c_param]

    def loss_func():
        probs = ztf.convert_to_tensor((a_param - true_a)**2 +
                                      (b_param - true_b)**2 +
                                      (c_param - true_c)**4) + 0.42
        return tf.reduce_sum(tf.log(probs))

    loss_deps = zfit.loss.SimpleLoss(func=loss_func, dependents=param_list)
    loss = zfit.loss.SimpleLoss(func=loss_func)

    assert loss_deps.get_dependents() == set(param_list)
    assert loss.get_dependents() == set(param_list)

    loss_tensor = loss_func()
    loss_value_np = zfit.run(loss_tensor)

    assert zfit.run(loss.value()) == loss_value_np
    assert zfit.run(loss_deps.value()) == loss_value_np

    with pytest.raises(IntentionNotUnambiguousError):
        _ = loss + loss_deps

    minimizer = zfit.minimize.Minuit()
    result = minimizer.minimize(loss=loss)

    assert true_a == pytest.approx(result.params[a_param]['value'], rel=0.03)
    assert true_b == pytest.approx(result.params[b_param]['value'], rel=0.06)
    assert true_c == pytest.approx(result.params[c_param]['value'], rel=0.5)
Exemplo n.º 20
0
        mu = self.params['mu']
        sigma = self.params['sigma']
        alpha = self.params['alpha']
        n = self.params['n']
        x = x.unstack_x()
        return crystalball_func(x=x, mu=mu, sigma=sigma, alpha=alpha, n=n)


crystalball_integral_limits = Space.from_axes(axes=(0, ),
                                              limits=(((ANY_LOWER, ), ),
                                                      ((ANY_UPPER, ), )))
# TODO uncomment, dependency: bug in TF (31.1.19) # 25339 that breaks gradient of resource var in cond
# CrystalBall.register_analytic_integral(func=crystalball_integral, limits=crystalball_integral_limits)

if __name__ == '__main__':
    mu = ztf.constant(0)
    sigma = ztf.constant(0.5)
    alpha = ztf.constant(3)
    n = ztf.constant(1)
    # res = crystalball_func(np.random.random(size=100), mu, sigma, alpha, n)
    # int1 = crystalball_integral(limits=zfit.Space(obs='obs1', limits=(-3, 5)),
    #                             params={'mu': mu, "sigma": sigma, "alpha": alpha, "n": n})
    from tensorflow.contrib import autograph
    import matplotlib.pyplot as plt

    new_code = autograph.to_code(crystalball_integral)
    obs = zfit.Space(obs='obs1', limits=(-3, 1))
    cb1 = CrystalBall(mu, sigma, alpha, n, obs=obs)
    res = cb1.pdf(np.random.random(size=100))
    int1 = cb1.integrate(limits=(-0.01, 2), norm_range=obs)
    # tf.add_check_numerics_ops()
Exemplo n.º 21
0
def chunked_average(func, x, num_batches, batch_size, space, mc_sampler):
    lower, upper = space.limits

    fake_resource_var = tf.get_variable("fake_hack_ResVar_for_custom_gradient",
                                        initializer=ztf.constant(4242.))
    fake_x = ztf.constant(42.) * fake_resource_var

    @tf.custom_gradient
    def dummy_func(fake_x):  # to make working with custom_gradient
        if x is not None:
            raise DueToLazynessNotImplementedError(
                "partial not yet implemented")

        def body(batch_num, mean):
            if mc_sampler == tfp.mcmc.sample_halton_sequence:
                start_idx = batch_num * batch_size
                end_idx = start_idx + batch_size
                indices = tf.range(start_idx, end_idx, dtype=tf.int32)
                sample = mc_sampler(space.n_obs,
                                    sequence_indices=indices,
                                    dtype=ztypes.float,
                                    randomized=False)
            else:
                sample = mc_sampler(shape=(batch_size, space.n_obs),
                                    dtype=ztypes.float)
            sample = tf.guarantee_const(sample)
            sample = (np.array(upper[0]) -
                      np.array(lower[0])) * sample + lower[0]
            sample = tf.transpose(sample)
            sample = func(sample)
            sample = tf.guarantee_const(sample)

            batch_mean = tf.reduce_mean(sample)
            batch_mean = tf.guarantee_const(batch_mean)
            # with tf.control_dependencies([batch_mean]):
            err_weight = 1 / tf.to_double(batch_num + 1)
            # err_weight /= err_weight + 1
            # print_op = tf.print(batch_mean)
            print_op = tf.print(batch_num + 1, mean,
                                err_weight * (batch_mean - mean))
            with tf.control_dependencies([print_op]):
                return batch_num + 1, mean + err_weight * (batch_mean - mean)
            # return batch_num + 1, tf.guarantee_const(mean + err_weight * (batch_mean - mean))

        cond = lambda batch_num, _: batch_num < num_batches

        initial_mean = tf.convert_to_tensor(0, dtype=ztypes.float)
        _, final_mean = tf.while_loop(cond,
                                      body, (0, initial_mean),
                                      parallel_iterations=1,
                                      swap_memory=False,
                                      back_prop=False,
                                      maximum_iterations=num_batches)

        def dummy_grad_with_var(dy, variables=None):
            raise DueToLazynessNotImplementedError("Who called me? Mayou36")
            if variables is None:
                raise DueToLazynessNotImplementedError(
                    "Is this needed? Why? It's not a NN. Please make an issue."
                )

            def dummy_grad_func(x):
                values = func(x)
                if variables:
                    gradients = tf.gradients(values, variables, grad_ys=dy)
                else:
                    gradients = None
                return gradients

            return chunked_average(func=dummy_grad_func,
                                   x=x,
                                   num_batches=num_batches,
                                   batch_size=batch_size,
                                   space=space,
                                   mc_sampler=mc_sampler)

        def dummy_grad_without_var(dy):
            return dummy_grad_with_var(dy=dy, variables=None)

        print_op = tf.print(final_mean)
        with tf.control_dependencies([print_op]):
            return tf.guarantee_const(final_mean), dummy_grad_with_var

    try:
        return dummy_func(fake_x)
    except TypeError:
        return dummy_func(fake_x)
Exemplo n.º 22
0
 def model(self, x):
     d = ztf.constant(0.)
     for i in self.params:
         d += gauss_2d(x, i[0], i[1], i[2], i[3], i[4], i[5])
     return d
Exemplo n.º 23
0
import numpy as np

from zfit.core.loss import SimpleLoss
import zfit.minimizers.baseminimizer as zmin
from zfit import ztf
import zfit.minimizers.optimizers_tf

with tf.variable_scope("func1"):
    true_a = 1.
    true_b = 4.
    true_c = -0.3
    a_param = zfit.Parameter("variable_a15151",
                             1.5,
                             -1.,
                             20.,
                             step_size=ztf.constant(0.1))
    b_param = zfit.Parameter("variable_b15151", 3.5)
    c_param = zfit.Parameter("variable_c15151", -0.04)
    obs1 = zfit.Space(obs='obs1', limits=(-2.4, 9.1))

    # load params for sampling
    a_param.load(true_a)
    b_param.load(true_b)
    c_param.load(true_c)

gauss1 = zfit.pdf.Gauss(mu=a_param, sigma=b_param, obs=obs1)
exp1 = zfit.pdf.Exponential(lambda_=c_param, obs=obs1)

sum_pdf1 = 0.9 * gauss1 + exp1

sampled_data = sum_pdf1.create_sampler(n=15000)
Exemplo n.º 24
0
def accept_reject_sample(
        prob: Callable,
        n: int,
        limits: Space,
        sample_and_weights_factory: Callable = UniformSampleAndWeights,
        dtype=ztypes.float,
        prob_max: Union[None, int] = None,
        efficiency_estimation: float = 1.0) -> tf.Tensor:
    """Accept reject sample from a probability distribution.

    Args:
        prob (function): A function taking x a Tensor as an argument and returning the probability
            (or anything that is proportional to the probability).
        n (int): Number of samples to produce
        limits (:py:class:`~zfit.Space`): The limits to sample from
        sample_and_weights_factory (Callable): A factory function that returns the following function:
            A function that returns the sample to insert into `prob` and the weights
            (probability density) of each sample together with the random thresholds. The API looks as follows:

            - Parameters:

                - n_to_produce (int, tf.Tensor): The number of events to produce (not exactly).
                - limits (Space): the limits in which the samples will be.
                - dtype (dtype): DType of the output.

            - Return:
                A tuple of length 5:
                - proposed sample (tf.Tensor with shape=(n_to_produce, n_obs)): The new (proposed) sample
                    whose values are inside `limits`.
                - thresholds_unscaled (tf.Tensor with shape=(n_to_produce,): Uniformly distributed
                    random values **between 0 and 1**.
                - weights (tf.Tensor with shape=(n_to_produce)): (Proportional to the) probability
                    for each sample of the distribution it was drawn from.
                - weights_max (int, tf.Tensor, None): The maximum of the weights (if known). This is
                    what the probability maximum will be scaled with, so it should be rather lower than the maximum
                    if the peaks do not exactly coincide. Otherwise return None (which will **assume**
                    that the peaks coincide).
                - n_produced: the number of events produced. Can deviate from the requested number.

        dtype ():
        prob_max (Union[None, int]): The maximum of the model function for the given limits. If None
            is given, it will be automatically, safely estimated (by a 10% increase in computation time
            (constant weak scaling)).
        efficiency_estimation (float): estimation of the initial sampling efficiency.

    Returns:
        tf.Tensor:
    """
    multiple_limits = limits.n_limits > 1

    sample_and_weights = sample_and_weights_factory()
    n = tf.to_int32(n)
    if run.numeric_checks:
        assert_valid_n_op = tf.assert_non_negative(n)
        deps = [assert_valid_n_op]
    else:
        deps = []
    # whether we may produce more then n, we normally do (except for EventSpace which is not a generator)
    # we cannot cut inside the while loop as soon as we have produced enough because we may sample from
    # multiple limits and therefore need to randomly remove events, otherwise we are biased because the
    # drawn samples are ordered in the different
    dynamic_array_shape = True

    # for fixed limits in EventSpace we need to know which indices have been successfully sampled. Therefore this
    # can be None (if not needed) or a boolean tensor with the size `n`
    initial_is_sampled = tf.constant("EMPTY")
    if isinstance(limits, EventSpace) and not limits.is_generator:
        dynamic_array_shape = False
        if run.numeric_checks:
            assert_n_matches_limits_op = tf.assert_equal(
                tf.shape(limits.lower[0][0])[0], n)
            tfdeps = [assert_n_matches_limits_op]
        else:
            tfdeps = []
        with tf.control_dependencies(
                tfdeps):  # TODO(Mayou36): good check? could be 1d
            initial_is_sampled = tf.fill(value=False, dims=(n, ))
        efficiency_estimation = 1.0  # generate exactly n
    with tf.control_dependencies(deps):
        inital_n_produced = tf.constant(0, dtype=tf.int32)
        initial_n_drawn = tf.constant(0, dtype=tf.int32)
        with tf.control_dependencies([n]):
            sample = tf.TensorArray(
                dtype=dtype,
                size=n,
                dynamic_size=dynamic_array_shape,
                clear_after_read=True,  # we read only once at end to tensor
                element_shape=(limits.n_obs, ))

    def not_enough_produced(n, sample, n_produced, n_total_drawn, eff,
                            is_sampled, weights_scaling):
        return tf.greater(n, n_produced)

    def sample_body(n,
                    sample,
                    n_produced=0,
                    n_total_drawn=0,
                    eff=1.0,
                    is_sampled=None,
                    weights_scaling=0.):
        eff = tf.reduce_max([eff, ztf.to_real(1e-6)])

        n_to_produce = n - n_produced

        if isinstance(
                limits,
                EventSpace):  # EXPERIMENTAL(Mayou36): added to test EventSpace
            limits.create_limits(n=n)

        do_print = settings.get_verbosity() > 5
        if do_print:
            print_op = tf.print("Number of samples to produce:", n_to_produce,
                                " with efficiency ", eff,
                                " with total produced ", n_produced,
                                " and total drawn ", n_total_drawn,
                                " with weights scaling", weights_scaling)
        with tf.control_dependencies([print_op] if do_print else []):
            n_to_produce = tf.identity(n_to_produce)
        if dynamic_array_shape:
            n_to_produce = tf.to_int32(
                ztf.to_real(n_to_produce) / eff *
                (1.1)) + 10  # just to make sure
            # TODO: adjustable efficiency cap for memory efficiency (prevent too many samples at once produced)
            max_produce_cap = tf.to_int32(8e5)
            safe_to_produce = tf.maximum(
                max_produce_cap,
                n_to_produce)  # protect against overflow, n_to_prod -> neg.
            n_to_produce = tf.minimum(
                safe_to_produce,
                max_produce_cap)  # introduce a cap to force serial
            new_limits = limits
        else:
            # TODO(Mayou36): add cap for n_to_produce here as well
            if multiple_limits:
                raise DueToLazynessNotImplementedError(
                    "Multiple limits for fixed event space not yet implemented"
                )
            is_not_sampled = tf.logical_not(is_sampled)
            (lower, ), (upper, ) = limits.limits
            lower = tuple(
                tf.boolean_mask(low, is_not_sampled) for low in lower)
            upper = tuple(tf.boolean_mask(up, is_not_sampled) for up in upper)
            new_limits = limits.with_limits(limits=((lower, ), (upper, )))
            draw_indices = tf.where(is_not_sampled)

        with tf.control_dependencies([n_to_produce]):
            rnd_sample, thresholds_unscaled, weights, weights_max, n_drawn = sample_and_weights(
                n_to_produce=n_to_produce, limits=new_limits, dtype=dtype)

        n_drawn = tf.cast(n_drawn, dtype=tf.int32)
        if run.numeric_checks:
            assert_op_n_drawn = tf.assert_non_negative(n_drawn)
            tfdeps = [assert_op_n_drawn]
        else:
            tfdeps = []
        with tf.control_dependencies(tfdeps):
            n_total_drawn += n_drawn

            probabilities = prob(rnd_sample)
        shape_rnd_sample = tf.shape(rnd_sample)[0]
        if run.numeric_checks:
            assert_prob_rnd_sample_op = tf.assert_equal(
                tf.shape(probabilities), shape_rnd_sample)
            tfdeps = [assert_prob_rnd_sample_op]
        else:
            tfdeps = []
        # assert_weights_rnd_sample_op = tf.assert_equal(tf.shape(weights), shape_rnd_sample)
        # print_op = tf.print("shapes: ", tf.shape(weights), shape_rnd_sample, "shapes end")
        with tf.control_dependencies(tfdeps):
            probabilities = tf.identity(probabilities)
        if prob_max is None or weights_max is None:  # TODO(performance): estimate prob_max, after enough estimations -> fix it?
            # TODO(Mayou36): This control dependency is needed because otherwise the max won't be determined
            # correctly. A bug report on will be filled (WIP).
            # The behavior is very odd: if we do not force a kind of copy, the `reduce_max` returns
            # a value smaller by a factor of 1e-14
            # with tf.control_dependencies([probabilities]):
            # UPDATE: this works now? Was it just a one-time bug?

            # safety margin, predicting future, improve for small samples?
            weights_maximum = tf.reduce_max(weights)
            weights_clipped = tf.maximum(weights, weights_maximum * 1e-5)
            # prob_weights_ratio = probabilities / weights
            prob_weights_ratio = probabilities / weights_clipped
            # min_prob_weights_ratio = tf.reduce_min(prob_weights_ratio)
            max_prob_weights_ratio = tf.reduce_max(prob_weights_ratio)
            ratio_threshold = 50000000.
            # clipping means that we don't scale more for a certain threshold
            # to properly account for very small numbers, the thresholds should be scaled to match the ratio
            # but if a weight of a sample is very low (compared to the other weights), this would force the acceptance
            # of other samples to decrease strongly. We introduce a cut here, meaning that any event with an acceptance
            # chance of less then 1 in ratio_threshold will be underestimated.
            # TODO(Mayou36): make ratio_threshold a global setting
            # max_prob_weights_ratio_clipped = tf.minimum(max_prob_weights_ratio,
            #                                             min_prob_weights_ratio * ratio_threshold)
            max_prob_weights_ratio_clipped = max_prob_weights_ratio
            weights_scaling = tf.maximum(
                weights_scaling, max_prob_weights_ratio_clipped * (1 + 1e-2))
        else:
            weights_scaling = prob_max / weights_max
            min_prob_weights_ratio = weights_scaling

        weights_scaled = weights_scaling * weights * (1 + 1e-8
                                                      )  # numerical epsilon
        random_thresholds = thresholds_unscaled * weights_scaled
        if run.numeric_checks:
            invalid_probs_weights = tf.greater(probabilities, weights_scaled)
            failed_weights = tf.boolean_mask(weights_scaled,
                                             mask=invalid_probs_weights)
            failed_probs = tf.boolean_mask(probabilities,
                                           mask=invalid_probs_weights)

            print_op = tf.print(
                "HACK WARNING: if the following is NOT empty, your sampling _may_ be biased."
                " Failed weights:", failed_weights, " failed probs",
                failed_probs)
            assert_no_failed_probs = tf.assert_equal(tf.shape(failed_weights),
                                                     [0])
            # assert_op = [print_op]
            assert_op = [assert_no_failed_probs]
            # for weights scaled more then ratio_threshold
            # assert_op = [tf.assert_greater_equal(x=weights_scaled, y=probabilities,
            #                                      data=[tf.shape(failed_weights), failed_weights, failed_probs],
            #                                      message="Not all weights are >= probs so the sampling "
            #                                              "will be biased. If a custom `sample_and_weights` "
            #                                              "was used, make sure that either the shape of the "
            #                                              "custom sampler (resp. it's weights) overlap better "
            #                                              "or decrease the `max_weight`")]
            #
            # # check disabled (below not added to deps)
            # assert_scaling_op = tf.assert_less(weights_scaling / min_prob_weights_ratio, ztf.constant(ratio_threshold),
            #                                    data=[weights_scaling, min_prob_weights_ratio],
            #                                    message="The ratio between the probabilities from the pdf and the"
            #                                    f"probability from the sampler is higher "
            #                                    f" then {ratio_threshold}. This will most probably bias the sampling. "
            #                                    f"Use importance sampling or, to disable this check, do"
            #                                    f"zfit.run.numeric_checks = False")
            # assert_op.append(assert_scaling_op)
        else:
            assert_op = []
        with tf.control_dependencies(assert_op):
            take_or_not = probabilities > random_thresholds
        take_or_not = take_or_not[0] if len(
            take_or_not.shape) == 2 else take_or_not
        filtered_sample = tf.boolean_mask(rnd_sample, mask=take_or_not, axis=0)

        n_accepted = tf.shape(filtered_sample)[0]
        n_produced_new = n_produced + n_accepted
        if not dynamic_array_shape:
            indices = tf.boolean_mask(draw_indices, mask=take_or_not)
            current_sampled = tf.sparse_tensor_to_dense(tf.SparseTensor(
                indices=indices,
                values=tf.broadcast_to(input=(True, ), shape=(n_accepted, )),
                dense_shape=(tf.cast(n, dtype=tf.int64), )),
                                                        default_value=False)
            is_sampled = tf.logical_or(is_sampled, current_sampled)
            indices = indices[:, 0]
        else:
            indices = tf.range(n_produced, n_produced_new)

        sample_new = sample.scatter(indices=tf.cast(indices, dtype=tf.int32),
                                    value=filtered_sample)

        # efficiency (estimate) of how many samples we get
        eff = tf.reduce_max([ztf.to_real(n_produced_new),
                             ztf.to_real(1.)]) / tf.reduce_max(
                                 [ztf.to_real(n_total_drawn),
                                  ztf.to_real(1.)])
        return n, sample_new, n_produced_new, n_total_drawn, eff, is_sampled, weights_scaling

    efficiency_estimation = ztf.to_real(efficiency_estimation)
    weights_scaling = ztf.constant(0.)
    loop_vars = (n, sample, inital_n_produced, initial_n_drawn,
                 efficiency_estimation, initial_is_sampled, weights_scaling)

    sample_array = tf.while_loop(
        cond=not_enough_produced,
        body=sample_body,  # paraopt
        loop_vars=loop_vars,
        swap_memory=True,
        parallel_iterations=1,
        back_prop=False)[1]  # backprop not needed here
    new_sample = sample_array.stack()
    if multiple_limits:
        new_sample = tf.random.shuffle(
            new_sample)  # to make sure, randomly remove and not biased.
    if dynamic_array_shape:  # if not dynamic we produced exact n -> no need to cut
        new_sample = new_sample[:n, :]  # cutting away to many produced

    # if no failure, uncomment both for improvement of shape inference, but what if n is tensor?
    # with suppress(AttributeError):  # if n_samples_int is not a numpy object
    #     new_sample.set_shape((n_samples_int, n_dims))
    return new_sample
Exemplo n.º 25
0
def test_run():
    a = ztf.constant(4.)
    b = 5 * a
    assert zfit.run(b) == pytest.approx(20)