def _loss_func(self, model, data, fit_range, constraints): nll = super()._loss_func(model=model, data=data, fit_range=fit_range, constraints=constraints) poisson_terms = [] for mod, dat in zip(model, data): if not mod.is_extended: raise NotExtendedPDFError("The pdf {} is not extended but has to be (for an extended fit)".format(mod)) nevents = dat.nevents if dat.weights is None else ztf.reduce_sum(dat.weights) poisson_terms.append(-mod.get_yield() + ztf.to_real(nevents) * tf.log(mod.get_yield())) nll -= tf.reduce_sum(poisson_terms) return nll
def true_nll_gaussian(params, mu, sigma): params = convert_to_container(params, container=tuple) mu = convert_to_container(mu, container=tuple) sigma = convert_to_container(sigma, container=tuple) constraint = ztf.constant(0.) if not len(params) == len(mu) == len(sigma): raise ValueError("params, mu and sigma have to have the same length.") for param, mean, sig in zip(params, mu, sigma): constraint += ztf.reduce_sum( ztf.square(param - mean) / (2. * ztf.square(sig))) return constraint
def integral_full(x, limits, norm_range, params, model): lower, upper = limits.limit1d param1 = params['super_param'] param2 = params['param2'] param3 = params['param3'] lower = ztf.convert_to_tensor(lower) upper = ztf.convert_to_tensor(upper) # calculate the integral here, dummy integral integral = param1 * param2 * param3 + ztf.reduce_sum([lower, upper]) return integral
def indefinite_integral(limits): max_degree = model.degree + 1 # needed +1 for integral, max poly in term for n is n+1 polys = do_recurrence(x=limits, polys=legendre_polys, degree=max_degree, recurrence=legendre_recurrence) one_limit_integrals = [] for degree in range(1, max_degree): coeff = model.params[f"c_{degree}"] one_limit_integrals.append( coeff * (polys[degree + 1] - polys[degree - 1]) / (2. * (ztf.convert_to_tensor(degree)) + 1)) return ztf.reduce_sum(one_limit_integrals, axis=0)
def integral_axis1(x, limits, norm_range, params, model): data_0 = x.unstack_x() # data from axis 0 param1 = params['super_param'] param2 = params['param2'] param3 = params['param3'] lower, upper = limits.limit1d lower = ztf.convert_to_tensor(lower) # the limits are now 1-D, for axis 1 upper = ztf.convert_to_tensor(upper) # calculate the integral here, dummy integral integral = data_0 * param1 * param2 * param3 + ztf.reduce_sum([lower, upper]) return integral
def indefinite_integral(limits): max_degree = model.degree + 1 polys = do_recurrence(x=limits, polys=chebyshev_polys, degree=max_degree, recurrence=chebyshev_recurrence) one_limit_integrals = [] for degree in range(2, max_degree): coeff = model.params[f"c_{degree}"] n_float = ztf.convert_to_tensor(degree) integral = (n_float * polys[degree + 1] / (ztf.square(n_float) - 1) - limits * polys[degree] / (n_float - 1)) one_limit_integrals.append(coeff * integral) return ztf.reduce_sum(one_limit_integrals, axis=0)
def _cache_add_constraints(self, constraints): if self._cache.get('loss') is not None: constraints = [c.value() for c in constraints] self._cache['loss'] += ztf.reduce_sum(constraints)
def _loss_func(self, model, data, fit_range, constraints): nll = _unbinned_nll_tf(model=model, data=data, fit_range=fit_range) if constraints: constraints = ztf.reduce_sum([c.value() for c in constraints]) nll += constraints return nll
def _cache_add_constraints(self, constraints): if self._cache.get('loss') is not None: self._cache['loss'] += ztf.reduce_sum(constraints)
def eval_constraint(constraints): return zfit.run(ztf.reduce_sum([c.value() for c in constraints]))