Exemple #1
0
def func_integral_hermite(limits, norm, params, model):
    lower, upper = limits.limit1d
    lower_rescaled = model._polynomials_rescale(lower)
    upper_rescaled = model._polynomials_rescale(upper)

    lower = z.convert_to_tensor(lower_rescaled)
    upper = z.convert_to_tensor(upper_rescaled)

    # the integral of hermite is a hermite_ni. We add the ni to the coeffs.
    coeffs = {"c_0": z.constant(0.0, dtype=model.dtype)}

    for name, coeff in params.items():
        ip1_coeff = int(name.split("_", 1)[-1]) + 1
        coeffs[f"c_{ip1_coeff}"] = coeff / z.convert_to_tensor(
            ip1_coeff * 2.0, dtype=model.dtype)
    coeffs = convert_coeffs_dict_to_list(coeffs)

    def indefinite_integral(limits):
        return hermite_shape(x=limits, coeffs=coeffs)

    integral = indefinite_integral(upper) - indefinite_integral(lower)
    integral = znp.reshape(integral, newshape=())
    integral *= 0.5 * model.space.area()  # rescale back to whole width

    return integral
Exemple #2
0
def func_integral_chebyshev1(limits, norm_range, params, model):
    lower, upper = limits.limit1d
    lower_rescaled = model._polynomials_rescale(lower)
    upper_rescaled = model._polynomials_rescale(upper)

    lower = z.convert_to_tensor(lower_rescaled)
    upper = z.convert_to_tensor(upper_rescaled)

    integral = model.params[f"c_0"] * (
        upper - lower)  # if polynomial 0 is defined as T_0 = 1
    if model.degree >= 1:
        integral += model.params[f"c_1"] * 0.5 * (
            upper**2 - lower**2)  # if polynomial 0 is defined as T_0 = 1
    if model.degree >= 2:

        def indefinite_integral(limits):
            max_degree = model.degree + 1
            polys = do_recurrence(x=limits,
                                  polys=chebyshev_polys,
                                  degree=max_degree,
                                  recurrence=chebyshev_recurrence)
            one_limit_integrals = []
            for degree in range(2, max_degree):
                coeff = model.params[f"c_{degree}"]
                n_float = z.convert_to_tensor(degree)
                integral = (n_float * polys[degree + 1] /
                            (z.square(n_float) - 1) - limits * polys[degree] /
                            (n_float - 1))
                one_limit_integrals.append(coeff * integral)
            return z.reduce_sum(one_limit_integrals, axis=0)

        integral += indefinite_integral(upper) - indefinite_integral(lower)
        integral = tf.reshape(integral, shape=())
    integral *= 0.5 * model.space.area()  # rescale back to whole width
    return integral
Exemple #3
0
def test_poisson_constrain():
    x, lam = np.random.randint(1, 100, size=(2, 50))
    constr = zfit.constraint.PoissonConstraint(
        params=z.convert_to_tensor(x), observation=z.convert_to_tensor(lam))
    poiss_constr_val = constr.value()
    true_val = true_poisson_constr_value(x, lam)
    np.testing.assert_allclose(poiss_constr_val, true_val)
Exemple #4
0
def func_integral_chebyshev2(limits, norm, params, model):
    lower, upper = limits.limit1d
    lower_rescaled = model._polynomials_rescale(lower)
    upper_rescaled = model._polynomials_rescale(upper)

    lower = z.convert_to_tensor(lower_rescaled)
    upper = z.convert_to_tensor(upper_rescaled)

    # the integral of cheby2_ni is a cheby1_ni+1/(n+1). We add the (n+1) to the coeffs. The cheby1 shape makes
    # the sum for us.
    coeffs_cheby1 = {"c_0": z.constant(0.0, dtype=model.dtype)}

    for name, coeff in params.items():
        n_plus1 = int(name.split("_", 1)[-1]) + 1
        coeffs_cheby1[f"c_{n_plus1}"] = coeff / z.convert_to_tensor(
            n_plus1, dtype=model.dtype)
    coeffs_cheby1 = convert_coeffs_dict_to_list(coeffs_cheby1)

    def indefinite_integral(limits):
        return chebyshev_shape(x=limits, coeffs=coeffs_cheby1)

    integral = indefinite_integral(upper) - indefinite_integral(lower)
    integral = znp.reshape(integral, newshape=())
    integral *= 0.5 * model.space.area()  # rescale back to whole width

    return integral
Exemple #5
0
def test_mc_partial_integration():
    values = z.convert_to_tensor(func4_values)
    data1 = zfit.Data.from_tensor(obs="obs2",
                                  tensor=tf.expand_dims(values, axis=-1))
    limits1 = Space(limits=limits4_2dim, obs=["obs1", "obs3"], axes=(0, 2))
    num_integral = zintegrate.mc_integrate(func=func4_3deps,
                                           limits=limits1,
                                           x=data1)

    vals_tensor = z.convert_to_tensor(func4_2values)

    vals_reshaped = tf.transpose(a=vals_tensor)
    data2 = zfit.Data.from_tensor(obs=["obs1", "obs3"], tensor=vals_reshaped)

    limits2 = Space(limits=limits4_1dim, obs=["obs2"], axes=1)
    num_integral2 = zintegrate.mc_integrate(func=func4_3deps,
                                            limits=limits2,
                                            x=data2,
                                            draws_per_dim=1000)

    integral = num_integral.numpy()
    integral2 = num_integral2.numpy()
    # print("DEBUG", value:", vals_reshaped)
    assert len(integral) == len(func4_values)
    assert len(integral2) == len(func4_2values[0])
    assert func4_3deps_0and2_integrated(
        x=func4_values, limits=limits4_2dim) == pytest.approx(integral,
                                                              rel=0.05)

    assert func4_3deps_1_integrated(
        x=func4_2values, limits=limits4_1dim) == pytest.approx(integral2,
                                                               rel=0.05)
Exemple #6
0
def test_mc_partial_integration():
    values = z.convert_to_tensor(func4_values)
    data1 = zfit.Data.from_tensor(obs='obs2',
                                  tensor=tf.expand_dims(values, axis=-1))
    limits1 = Space(limits=limits4_2dim, obs=['obs1', 'obs3'])
    limits1._set_obs_axes({'obs1': 0, 'obs3': 2})
    num_integral = zintegrate.mc_integrate(x=data1,
                                           func=func4_3deps,
                                           limits=limits1)

    vals_tensor = z.convert_to_tensor(func4_2values)

    vals_reshaped = tf.transpose(a=vals_tensor)
    data2 = zfit.Data.from_tensor(obs=['obs1', 'obs3'], tensor=vals_reshaped)

    limits2 = Space(limits=limits4_1dim, obs=['obs2'])
    limits2._set_obs_axes({'obs2': 1})
    num_integral2 = zintegrate.mc_integrate(x=data2,
                                            func=func4_3deps,
                                            limits=limits2,
                                            draws_per_dim=100)

    integral = num_integral.numpy()
    integral2 = num_integral2.numpy()
    # print("DEBUG", value:", vals_reshaped)
    assert len(integral) == len(func4_values)
    assert len(integral2) == len(func4_2values[0])
    assert func4_3deps_0and2_integrated(
        x=func4_values, limits=limits4_2dim) == pytest.approx(integral,
                                                              rel=0.03)

    assert func4_3deps_1_integrated(
        x=func4_2values, limits=limits4_1dim) == pytest.approx(integral2,
                                                               rel=0.03)
Exemple #7
0
def normalization_nograd(func,
                         n_axes,
                         batch_size,
                         num_batches,
                         dtype,
                         space,
                         x=None,
                         shape_after=()):
    upper, lower = space.rect_limits
    lower = z.convert_to_tensor(lower, dtype=dtype)
    upper = z.convert_to_tensor(upper, dtype=dtype)

    def body(batch_num, mean):
        start_idx = batch_num * batch_size
        end_idx = start_idx + batch_size
        indices = tf.range(start_idx, end_idx, dtype=tf.int32)
        samples_normed = tfp.mcmc.sample_halton_sequence(
            n_axes,
            # num_results=batch_size,
            sequence_indices=indices,
            dtype=dtype,
            randomized=False)
        # halton_sample = tf.random_uniform(shape=(n_axes, batch_size), dtype=dtype)
        samples_normed.set_shape((batch_size, n_axes))
        samples_normed = tf.expand_dims(samples_normed, axis=0)
        samples = samples_normed * (upper - lower) + lower
        func_vals = func(samples)
        if shape_after == ():
            reduce_axis = None
        else:
            reduce_axis = 1
            if len(func_vals.shape) == 1:
                func_vals = tf.expand_dims(func_vals, -1)
        batch_mean = tf.reduce_mean(input_tensor=func_vals,
                                    axis=reduce_axis)  # if there are gradients
        err_weight = 1 / tf.cast(batch_num + 1, dtype=tf.float64)

        do_print = False
        if do_print:
            tf.print(batch_num + 1)
        return batch_num + 1, mean + err_weight * (batch_mean - mean)

    cond = lambda batch_num, _: batch_num < num_batches

    initial_mean = tf.constant(0, shape=shape_after, dtype=dtype)
    initial_body_args = (0, initial_mean)
    _, final_mean = tf.while_loop(cond=cond,
                                  body=body,
                                  loop_vars=initial_body_args,
                                  parallel_iterations=1,
                                  swap_memory=False,
                                  back_prop=True)
    # def normalization_grad(x):
    return final_mean
Exemple #8
0
def _exp_integral_func_shifting(lambd, lower, upper, model):
    def raw_integral(x):
        return (z.exp(lambd * (model._shift_x(x))) / lambd
                )  # needed due to overflow in exp otherwise

    lower = z.convert_to_tensor(lower)
    lower_int = raw_integral(x=lower)
    upper = z.convert_to_tensor(upper)
    upper_int = raw_integral(x=upper)
    integral = upper_int - lower_int
    return integral
Exemple #9
0
def integral_full(x, limits, norm_range, params, model):
    lower, upper = limits.limit1d
    param1 = params['super_param']
    param2 = params['param2']
    param3 = params['param3']

    lower = z.convert_to_tensor(lower)
    upper = z.convert_to_tensor(upper)

    # calculate the integral here, dummy integral
    integral = param1 * param2 * param3 + z.reduce_sum([lower, upper])
    return integral
Exemple #10
0
            def calc_numerics_data_shift():
                lower, upper = [], []
                for limit in limits:
                    low, up = limit.rect_limits
                    lower.append(z.convert_to_tensor(low[:, 0]))
                    upper.append(z.convert_to_tensor(up[:, 0]))
                lower = z.convert_to_tensor(lower)
                upper = z.convert_to_tensor(upper)
                lower_val = znp.min(lower, axis=0)
                upper_val = znp.max(upper, axis=0)

                return (upper_val + lower_val) / 2
Exemple #11
0
def integral_full(limits, norm_range, params, model):
    lower, upper = limits.rect_limits  # for a more detailed guide, see the space.py example
    param1 = params['super_param']
    param2 = params['param2']
    param3 = params['param3']

    lower = z.convert_to_tensor(lower)
    upper = z.convert_to_tensor(upper)

    # calculate the integral here, dummy integral, wrong!
    integral = param1 * param2 * param3 + z.reduce_sum([lower, upper])
    return integral
Exemple #12
0
def test_log_normal_constraint():
    # x, lam = np.random.randint(1, 100, size=(2, 50))
    lam = 44.3
    x = 45.3
    lam_tensor = z.convert_to_tensor(lam)
    constr = zfit.constraint.LogNormalConstraint(
        params=z.convert_to_tensor(x),
        observation=lam_tensor,
        uncertainty=lam_tensor**0.5,
    )
    lognormal_constr_val = constr.value()
    # true_val = true_poisson_constr_value(x, lam)
    true_lognormal = 25.128554  # maybe add dynamically?
    np.testing.assert_allclose(lognormal_constr_val, true_lognormal)
Exemple #13
0
def integral_axis1(x, limits, norm_range, params, model):
    data_0 = x.unstack_x()  # data from axis 0

    param1 = params['super_param']
    param2 = params['param2']
    param3 = params['param3']

    lower, upper = limits.limit1d
    lower = z.convert_to_tensor(lower)  # the limits are now 1-D, for axis 1
    upper = z.convert_to_tensor(upper)

    # calculate the integral here, dummy integral
    integral = data_0 * param1 * param2 * param3 + z.reduce_sum([lower, upper])
    return integral
Exemple #14
0
def integral_axis1(x, limits, norm_range, params, model):
    data_0 = x.unstack_x()  # data from axis 0

    param1 = params["super_param"]
    param2 = params["param2"]
    param3 = params["param3"]

    lower, upper = limits.limit1d  # for a more detailed guide, see the space.py example
    lower = z.convert_to_tensor(lower)  # the limits are now 1-D, for axis 1
    upper = z.convert_to_tensor(upper)

    # calculate the integral here, dummy integral
    integral = data_0**2 * param1 * param2 * param3 + z.reduce_sum([lower, upper])
    # notice that the returned shape will be in the same as data_0, e.g. the number of events given in x
    return integral
Exemple #15
0
    def step_size(self) -> tf.Tensor:  # TODO: improve default step_size?
        """Step size of the parameter, the estimated order of magnitude of the uncertainty.

        This can be crucial to tune for the minimization. A too large `step_size` can produce NaNs, a too small won't
        converge.

        If the step size is not set, the `DEFAULT_STEP_SIZE` is used.

        Returns:
            :py:class:`tf.Tensor`: the step size
        """
        step_size = self._step_size
        if step_size is None:
            #     # auto-infer from limits
            #     step_splits = 1e5
            #     if self.has_limits:
            #         step_size = (self.upper_limit - self.lower_limit) / step_splits  # TODO improve? can be tensor?
            #     else:
            #         step_size = self.DEFAULT_STEP_SIZE
            #     if np.isnan(step_size):
            #         if self.lower_limit == -np.infty or self.upper_limit == np.infty:
            #             step_size = self.DEFAULT_STEP_SIZE
            #         else:
            #             raise ValueError("Could not set step size. Is NaN.")
            #     # step_size = z.to_real(step_size)
            #     self.step_size = step_size
            step_size = self.DEFAULT_STEP_SIZE
        step_size = z.convert_to_tensor(step_size)
        return step_size
Exemple #16
0
def legendre_integral(
    limits: ztyping.SpaceType,
    norm: ztyping.SpaceType,
    params: list[zfit.Parameter],
    model: RecursivePolynomial,
):
    """Recursive integral of Legendre polynomials."""
    lower, upper = limits.limit1d
    lower_rescaled = model._polynomials_rescale(lower)
    upper_rescaled = model._polynomials_rescale(upper)
    # if np.allclose((lower_rescaled, upper_rescaled), (-1, 1)):
    #     return z.constant(2.)  #

    lower = z.convert_to_tensor(lower_rescaled)
    upper = z.convert_to_tensor(upper_rescaled)

    integral_0 = model.params[f"c_0"] * (upper - lower)  # if polynomial 0 is 1
    if model.degree == 0:
        integral = integral_0
    else:

        def indefinite_integral(limits):
            max_degree = (
                model.degree + 1
            )  # needed +1 for integral, max poly in term for n is n+1
            polys = do_recurrence(
                x=limits,
                polys=legendre_polys,
                degree=max_degree,
                recurrence=legendre_recurrence,
            )
            one_limit_integrals = []
            for degree in range(1, max_degree):
                coeff = model.params[f"c_{degree}"]
                one_limit_integrals.append(
                    coeff * (polys[degree + 1] - polys[degree - 1]) /
                    (2.0 * (z.convert_to_tensor(degree)) + 1))
            return z.reduce_sum(one_limit_integrals, axis=0)

        integral = indefinite_integral(upper) - indefinite_integral(
            lower) + integral_0
        integral = znp.reshape(integral, newshape=())
    integral *= 0.5 * model.space.area()  # rescale back to whole width

    return integral
Exemple #17
0
def func3_2deps_fully_integrated(limits, params=None, model=None):
    lower, upper = limits.limits
    with suppress(TypeError):
        lower, upper = lower[0], upper[0]

    lower_a, lower_b = lower
    upper_a, upper_b = upper
    integral = (lower_a**3 - upper_a**3) * (lower_b - upper_b)
    integral += (lower_a - upper_a) * (lower_b**3 - upper_b**3)
    integral /= 3
    return z.convert_to_tensor(integral)
Exemple #18
0
def test_midpoints():
    edges = np.array([[-1.0, 0, 3, 10], [-5.0, 0, 1, 4]])
    bincounts = np.array([[0, 0, 1], [0, 5, 7], [0, 3, 0], [0, 0, 0]])

    edges = z.convert_to_tensor(edges)
    midpoints_true = np.array([[-0.5, 2.5], [1.5, 0.5], [1.5, 2.5], [6.5, 0.5]])
    bincounts_nonzero, midpoints_nonzero, bincounts_nonzero_index = midpoints_from_hist(
        bincounts=bincounts, edges=edges
    )
    np.testing.assert_allclose(np.array([1, 5, 7, 3]), zfit.run(bincounts_nonzero))
    np.testing.assert_allclose(midpoints_true, zfit.run(midpoints_nonzero))
Exemple #19
0
        def create_covariance(mu, sigma):
            mu = z.convert_to_tensor([z.convert_to_tensor(m) for m in mu])
            sigma = z.convert_to_tensor(sigma)  # TODO (Mayou36): fix as above?
            params_tensor = z.convert_to_tensor(params)

            if sigma.shape.ndims > 1:
                covariance = sigma
            elif sigma.shape.ndims == 1:
                covariance = tf.linalg.tensor_diag(z.pow(sigma, 2.))
            else:
                sigma = tf.reshape(sigma, [1])
                covariance = tf.linalg.tensor_diag(z.pow(sigma, 2.))

            if not params_tensor.shape[0] == mu.shape[0] == covariance.shape[
                    0] == covariance.shape[1]:
                raise ShapeIncompatibleError(
                    f"params_tensor, mu and sigma have to have the same length. Currently"
                    f"param: {params_tensor.shape[0]}, mu: {mu.shape[0]}, "
                    f"covariance (from sigma): {covariance.shape[0:2]}")
            return covariance
Exemple #20
0
        def create_covariance(mu, sigma):
            mu = z.convert_to_tensor(mu)
            sigma = z.convert_to_tensor(sigma)  # TODO (Mayou36): fix as above?
            params_tensor = z.convert_to_tensor(params)

            if sigma.shape.ndims > 1:
                covariance = sigma  # TODO: square as well?
            elif sigma.shape.ndims == 1:
                covariance = tf.linalg.tensor_diag(z.pow(sigma, 2.0))
            else:
                sigma = znp.reshape(sigma, [1])
                covariance = tf.linalg.tensor_diag(z.pow(sigma, 2.0))

            if (not params_tensor.shape[0] == mu.shape[0] ==
                    covariance.shape[0] == covariance.shape[1]):
                raise ShapeIncompatibleError(
                    f"params_tensor, observation and uncertainty have to have the"
                    " same length. Currently"
                    f"param: {params_tensor.shape[0]}, mu: {mu.shape[0]}, "
                    f"covariance (from uncertainty): {covariance.shape[0:2]}")
            return covariance
Exemple #21
0
 def indefinite_integral(limits):
     max_degree = model.degree + 1  # needed +1 for integral, max poly in term for n is n+1
     polys = do_recurrence(x=limits,
                           polys=legendre_polys,
                           degree=max_degree,
                           recurrence=legendre_recurrence)
     one_limit_integrals = []
     for degree in range(1, max_degree):
         coeff = model.params[f"c_{degree}"]
         one_limit_integrals.append(
             coeff * (polys[degree + 1] - polys[degree - 1]) /
             (2. * (z.convert_to_tensor(degree)) + 1))
     return z.reduce_sum(one_limit_integrals, axis=0)
Exemple #22
0
def func_integral_laguerre(limits, norm_range, params: Dict, model):
    """The integral of the simple laguerre polynomials.

    Defined as :math:`\int L_{n} = (-1) L_{n+1}^{(-1)}` with :math:`L^{(\alpha)}` the generalized Laguerre polynom.

    Args:
        limits:
        norm_range:
        params:
        model:

    Returns:

    """
    lower, upper = limits.limit1d
    lower_rescaled = model._polynomials_rescale(lower)
    upper_rescaled = model._polynomials_rescale(upper)

    lower = z.convert_to_tensor(lower_rescaled)
    upper = z.convert_to_tensor(upper_rescaled)

    # The laguerre shape makes the sum for us. setting the 0th coeff to 0, since no -1 term exists.
    coeffs_laguerre_nup = {
        f'c_{int(n.split("_", 1)[-1]) + 1}': c
        for i, (n, c) in enumerate(params.items())
    }  # increase n -> n+1 of naming
    coeffs_laguerre_nup['c_0'] = tf.constant(0., dtype=model.dtype)
    coeffs_laguerre_nup = convert_coeffs_dict_to_list(coeffs_laguerre_nup)

    def indefinite_integral(limits):
        return -1 * laguerre_shape_alpha_minusone(x=limits,
                                                  coeffs=coeffs_laguerre_nup)

    integral = indefinite_integral(upper) - indefinite_integral(lower)
    integral = tf.reshape(integral, shape=())
    integral *= 0.5 * model.space.area()  # rescale back to whole width
    return integral
Exemple #23
0
def midpoints_from_hist(bincounts, edges):  # TODO: implement correctly, old
    """Calculate the midpoints of a hist and return the non-zero entries, non-zero bincounts and indices.

    Args:
        bincounts: Tensor with shape (nbins_0, ..., nbins_n) with n being the dimension.
        edges: Tensor with shape (n_obs, nbins + 1) holding the position of the edges, assuming a rectangular grid.
    Returns:
        bincounts: the bincounts that are non-zero in a 1-D array corresponding to the indices and the midpoints
        midpoints: the coordinates of the midpoint of each bin with shape (nbincounts, n_obs)
        indices: original position in the bincounts from the input
    """
    bincounts = z.convert_to_tensor(bincounts)
    edges = z.convert_to_tensor(edges)

    midpoints = (edges[:, :-1] + edges[:, 1:]) / 2.0
    midpoints_grid = tf.stack(tf.meshgrid(*tf.unstack(midpoints),
                                          indexing="ij"),
                              axis=-1)
    bincounts_nonzero_index = tf.where(bincounts)
    bincounts_nonzero = tf.gather_nd(bincounts,
                                     indices=bincounts_nonzero_index)
    midpoints_nonzero = tf.gather_nd(midpoints_grid,
                                     indices=bincounts_nonzero_index)
    return bincounts_nonzero, midpoints_nonzero, bincounts_nonzero_index
Exemple #24
0
 def indefinite_integral(limits):
     max_degree = model.degree + 1
     polys = do_recurrence(x=limits,
                           polys=chebyshev_polys,
                           degree=max_degree,
                           recurrence=chebyshev_recurrence)
     one_limit_integrals = []
     for degree in range(2, max_degree):
         coeff = model.params[f"c_{degree}"]
         n_float = z.convert_to_tensor(degree)
         integral = (n_float * polys[degree + 1] /
                     (z.square(n_float) - 1) - limits * polys[degree] /
                     (n_float - 1))
         one_limit_integrals.append(coeff * integral)
     return z.reduce_sum(one_limit_integrals, axis=0)
Exemple #25
0
    def set_weights(self, weights: ztyping.WeightsInputType):
        """Set (temporarily) the weights of the dataset.

        Args:
            weights:


        """
        if weights is not None:
            weights = z.convert_to_tensor(weights)
            weights = z.to_real(weights)
            if weights.shape.ndims != 1:
                raise ShapeIncompatibleError(
                    "Weights have to be 1-Dim objects.")

        def setter(value):
            self._weights = value

        def getter():
            return self.weights

        return TemporarilySet(value=weights, getter=getter, setter=setter)
Exemple #26
0
def histogramdd(sample, bins=10, range=None, weights=None, density=None):
    out_dtype = [tf.float64, tf.float64]
    if isinstance(sample, ZfitData):
        sample = sample.value()
        n_obs = sample.n_obs
    else:
        sample = z.convert_to_tensor(sample)
        n_obs = sample.shape[-1]

    none_tensor = tf.constant("NONE_TENSOR", shape=(), name="none_tensor")
    inputs = [sample, bins, range, weights]
    inputs_cleaned = [
        inp if inp is not None else none_tensor for inp in inputs
    ]

    def histdd(sample, bins, range, weights):
        kwargs = {
            "sample": sample,
            "bins": bins,
            "range": range,
            "weights": weights
        }
        new_kwargs = {}
        for key, value in kwargs.items():
            value = value
            if value == b"NONE_TENSOR":
                value = None

            new_kwargs[key] = value
        return np.histogramdd(**new_kwargs, density=density)

    bincounts, *edges = tf.numpy_function(func=histdd,
                                          inp=inputs_cleaned,
                                          Tout=out_dtype)
    bincounts.set_shape(shape=(None, ) * n_obs)
    # edges = [edge.set_shape(shape=(None)) for edge in edges]
    return bincounts, edges
Exemple #27
0
 def loss_func():
     probs = z.convert_to_tensor((a_param - true_a)**2 +
                                 (b_param - true_b)**2 +
                                 (c_param - true_c)**4) + 0.42
     return tf.reduce_sum(input_tensor=tf.math.log(probs))
Exemple #28
0
    sigma3 = zfit.Parameter("sigma35" + nameadd,
                            z.to_real(sigma_true) - 0.3, sigma_true - 2.,
                            sigma_true + 2.)
    yield3 = zfit.Parameter("yield35" + nameadd, yield_true + 300, 0,
                            yield_true + 20000)
    return mu3, sigma3, yield3


obs1 = zfit.Space('obs1',
                  (np.min([test_values_np[:, 0], test_values_np2]) - 1.4,
                   np.max([test_values_np[:, 0], test_values_np2]) + 2.4))

mu_constr = [1.6, 0.02]  # mu, sigma
sigma_constr = [3.5, 0.01]
constr = lambda: [mu_constr[1], sigma_constr[1]]
constr_tf = lambda: z.convert_to_tensor(constr())
covariance = lambda: np.array([[mu_constr[1]**2, 0], [0, sigma_constr[1]**2]])
covariance_tf = lambda: z.convert_to_tensor(covariance())


def create_gauss1():
    mu, sigma = create_params1()
    return Gauss(mu, sigma, obs=obs1, name="gaussian1"), mu, sigma


def create_gauss2():
    mu, sigma = create_params2()
    return Gauss(mu, sigma, obs=obs1, name="gaussian2"), mu, sigma


def create_gauss3ext():
Exemple #29
0
def mc_integrate(func: Callable,
                 limits: ztyping.LimitsType,
                 axes: Optional[ztyping.AxesTypeInput] = None,
                 x: Optional[ztyping.XType] = None,
                 n_axes: Optional[int] = None,
                 draws_per_dim: int = 20000,
                 method: str = None,
                 dtype: Type = ztypes.float,
                 mc_sampler: Callable = tfp.mcmc.sample_halton_sequence,
                 importance_sampling: Optional[Callable] = None) -> tf.Tensor:
    """Monte Carlo integration of `func` over `limits`.

    Args:
        func (callable): The function to be integrated over
        limits (:py:class:`~ZfitSpace`): The limits of the integral
        axes (tuple(int)): The row to integrate over. None means integration over all value
        x (numeric): If a partial integration is performed, this are the value where x will be evaluated.
        n_axes (int): the number of total dimensions (old?)
        draws_per_dim (int): How many random points to draw per dimensions
        method (str): Which integration method to use
        dtype (dtype): |dtype_arg_descr|
        mc_sampler (callable): A function that takes one argument (`n_draws` or similar) and returns
            random value between 0 and 1.
        importance_sampling ():

    Returns:
        numerical: the integral
    """
    if axes is not None and n_axes is not None:
        raise ValueError("Either specify axes or n_axes")
    limits = convert_to_space(limits)

    axes = limits.axes
    partial = (axes is not None) and (x is not None
                                      )  # axes, value can be tensors

    if axes is not None and n_axes is None:
        n_axes = len(axes)
    if n_axes is not None and axes is None:
        axes = tuple(range(n_axes))

    integrals = []
    for space in limits:
        lower, upper = space._rect_limits_tf
        tf.debugging.assert_all_finite((
            lower, upper
        ), "MC integration does (currently) not support unbound limits (np.infty) as given here:"
                                       "\nlower: {}, upper: {}".format(
                                           lower, upper))

        n_samples = draws_per_dim

        chunked_normalization = zfit.run.chunksize < n_samples
        # chunked_normalization = True
        if chunked_normalization and partial:
            print(
                "NOT SUPPORTED! partial and chunked not working, auto switch back to not-chunked."
            )
        if chunked_normalization and not partial:
            n_chunks = int(np.ceil(n_samples / zfit.run.chunksize))
            chunksize = int(np.ceil(n_samples / n_chunks))
            # print("starting normalization with {} chunks and a chunksize of {}".format(n_chunks, chunksize))
            avg = normalization_chunked(func=func,
                                        n_axes=n_axes,
                                        dtype=dtype,
                                        x=x,
                                        num_batches=n_chunks,
                                        batch_size=chunksize,
                                        space=space)

        else:
            # TODO: deal with n_obs properly?

            samples_normed = mc_sampler(
                dim=n_axes,
                num_results=n_samples / 2,  # to decrease integration size
                dtype=dtype)
            samples = samples_normed * (
                upper - lower) + lower  # samples is [0, 1], stretch it

            if partial:  # TODO(Mayou36): shape of partial integral?
                data_obs = x.obs
                new_obs = []
                x = x.value()
                value_list = []
                index_samples = 0
                index_values = 0
                if len(x.shape) == 1:
                    x = tf.expand_dims(x, axis=1)
                for i in range(n_axes + x.shape[-1]):
                    if i in axes:
                        new_obs.append(space.obs[index_samples])
                        value_list.append(samples[:, index_samples])
                        index_samples += 1
                    else:
                        new_obs.append(data_obs[index_values])
                        value_list.append(
                            tf.expand_dims(x[:, index_values], axis=1))
                        index_values += 1
                value_list = [tf.cast(val, dtype=dtype) for val in value_list]
                x = PartialIntegralSampleData(sample=value_list,
                                              space=Space(obs=new_obs))
            else:
                x = samples

            # convert rnd samples with value to feedable vector
            reduce_axis = 1 if partial else None
            avg = tf.reduce_mean(input_tensor=func(x), axis=reduce_axis)
            # avg = tfp.monte_carlo.expectation(f=func, samples=x, axis=reduce_axis)
            # TODO: importance sampling?
            # avg = tfb.monte_carlo.expectation_importance_sampler(f=func, samples=value,axis=reduce_axis)
        integral = avg * tf.cast(z.convert_to_tensor(space.rect_area()),
                                 dtype=avg.dtype)
        integrals.append(integral)
    return z.reduce_sum(integrals, axis=0)
Exemple #30
0
 def _params_array(self):
     return z.convert_to_tensor(self._ordered_params)