def gen_ylds(num_of_evt, fit_params, names=['bkg', 'sig']): for n in names: yld_param = zfit.ComposedParameter( f'yld_{n}', lambda raw: num_of_evt * raw, {'raw': fit_params[f'yld_{n}_ratio']}) fit_params[f'yld_{n}'] = yld_param return [fit_params[f'yld_{i}'] for i in names]
def create_loss_counting(): n = 370 nbkg = 340 Nsig = zfit.Parameter("Nsig", 0, -100.0, 100) Nbkg = zfit.Parameter("Nbkg", nbkg, floating=False) Nobs = zfit.ComposedParameter("Nobs", lambda a, b: a + b, params=[Nsig, Nbkg]) obs = zfit.Space("N", limits=(0, 800)) model = Poisson(obs=obs, lamb=Nobs) data = zfit.data.Data.from_numpy(obs=obs, array=np.array([n])) loss = UnbinnedNLL(model=model, data=data) return loss, Nsig
def test_repr(): val = 1543 val2 = 1543**2 param1 = Parameter("param1", val) param2 = zfit.ComposedParameter("comp1", lambda x: x**2, params=param1) repr_value = repr(param1) repr_value2 = repr(param2) assert str(val) in repr_value @z.function def tf_call(): repr_value = repr(param1) repr_value2 = repr(param2) assert str(val) not in repr_value assert str(val2) not in repr_value2 assert "graph-node" in repr_value assert "graph-node" in repr_value2 if zfit.run.get_graph_mode(): # only test if running in graph mode tf_call()
def create_data_fit_model(data, parameters, obs, tags): num_events = len(data.index) data = format_data(data, obs) b_tag = tags["brem_cat"] # Initializing new model parameters to save the previous model for comparison # Floating parameters, required for smearing shift_mu = zfit.Parameter('delta_mu' + name_tags(tags), 0., -200., 200.) scale_sigma = zfit.Parameter('scale_sigma' + name_tags(tags), 1., 0.001, 100.) # main fit parameters, not allowed to float (though we explicitly say which parameters to float later) mu = zfit.Parameter('data_mu' + name_tags(tags), parameters['mu' + name_tags(tags)], floating=False) sigma = zfit.Parameter('data_sigma' + name_tags(tags), parameters['sigma' + name_tags(tags)], floating=False) alphal = zfit.Parameter('data_alphal' + name_tags(tags), parameters['alphal' + name_tags(tags)], floating=False) nl = zfit.Parameter('data_nl' + name_tags(tags), parameters['nl' + name_tags(tags)], floating=False) alphar = zfit.Parameter('data_alphar' + name_tags(tags), parameters['alphar' + name_tags(tags)], floating=False) nr = zfit.Parameter('data_nr' + name_tags(tags), parameters['nr' + name_tags(tags)], floating=False) # Additional floating parameter, required for better simulation of an upper power law tail, only for brem 1, 2 if b_tag == "b_zero": scale_r = zfit.Parameter('sc_r' + name_tags(tags), 1., floating=False) else: scale_r = zfit.Parameter('sc_r' + name_tags(tags), 1., 0.01, 2.) # Create composed parameters mu_shifted = zfit.ComposedParameter("mu_shifted" + name_tags(tags), mu_shifted_fn, params=[mu, shift_mu]) sigma_scaled = zfit.ComposedParameter("sigma_scaled" + name_tags(tags), sigma_scaled_fn, params=[sigma, scale_sigma]) nr_scaled = zfit.ComposedParameter("nr_scaled" + name_tags(tags), n_scaled_fn, params=[nr, scale_r]) alphar_scaled = zfit.ComposedParameter("alphar_scaled" + name_tags(tags), sigma_scaled_fn, # used as a general multiplication function params=[alphar, scale_r]) # Creating model with new scale/shift parameters model = zfit.pdf.DoubleCB(obs=obs, mu=mu_shifted, sigma=sigma_scaled, alphal=alphal, nl=nl, alphar=alphar_scaled, nr=nr_scaled) # Background model: exponential lambd = zfit.Parameter("lambda" + name_tags(tags), -0.00005, -1., 0.) model_bgr = zfit.pdf.Exponential(lambd, obs=obs) # Make models extended and combine them n_sig = zfit.Parameter("n_signal" + name_tags(tags), int(num_events * 0.99), int(num_events * 0.6), int(num_events * 1.2), step_size=1) n_bgr = zfit.Parameter("n_bgr" + name_tags(tags), int(num_events * 0.01), 0., int(num_events * 0.4), step_size=1) model_extended = model.create_extended(n_sig) model_bgr_extended = model_bgr.create_extended(n_bgr) model = zfit.pdf.SumPDF([model_extended, model_bgr_extended]) # NLL and minimizer nll = zfit.loss.ExtendedUnbinnedNLL(model=model, data=data) minimizer = zfit.minimize.Minuit(verbosity=0, use_minuit_grad=True) # minimization of shift and scale factors if b_tag == "brem_zero" or b_tag == 'brem_one' or b_tag == 'brem_two': result = minimizer.minimize(nll, params=[lambd, n_sig, n_bgr, mu_shifted, sigma_scaled]) else: result = minimizer.minimize(nll, params=[lambd, n_sig, n_bgr, mu_shifted, sigma_scaled, scale_r]) final_params = result.params param_errors = result.hesse() print("Result Valid:", result.valid) print("Fit converged:", result.converged) print(result.params) models = {"combined": model, "signal": model_extended, "background": model_bgr_extended} # need for tests parameters = {param[0].name: {"value": param[1]['value'], "error": err[1]['error']} for param, err in zip(result.params.items(), param_errors.items())} parameters["nr"] = {'value': zfit.run(nr_scaled)} parameters['alphar'] = {'value': zfit.run(alphar_scaled)} return parameters, models
# create the model to fit # we can share parameters directly or create composed parameters. Here we have a # parameter that scales the sigma from the rare fit sigma_scaling = zfit.Parameter('sigma_scaling', 0.9, 0.1, 10) def sigma_scaled_fn(): return sigma * sigma_scaling # this can be an arbitrary function sigma_scaled = zfit.ComposedParameter( 'sigma scaled', sigma_scaled_fn, dependents=sigma # the objects used inside the func ) # we could also make the free parameters, not shared # alphal_reso = zfit.Parameter('alpha left reso', -0.7, -5, 0) # nl_reso = zfit.Parameter('n left reso', 0.4, 0, 10) # alphar_reso = zfit.Parameter('alpha right reso', 1, 0, 5) # nr_reso = zfit.Parameter('n right reso', 1.8, 0, 10) alphal_reso = alphal_rare nl_reso = nl_rare alphar_reso = alphal_rare nr_reso = nr_rare # frac_dcb_reso = zfit.Parameter('frac dcb_reso', 0.5, 0.01, 0.99)
def __init__( self, pdf: ZfitBinnedPDF, modifiers: bool | Mapping[str, ztyping.ParamTypeInput] = None, extended: ztyping.ExtendedInputType = None, norm: ztyping.NormInputType = None, name: str | None = "BinnedTemplatePDF", ) -> None: """Modifier that scales each bin separately of the *pdf*. Binwise modification can be used to account for uncorrelated or correlated uncertainties. Args: pdf: Binned pdf to be modified. modifiers: Modifiers for each bin. extended: |@doc:pdf.init.extended| The overall yield of the PDF. If this is parameter-like, it will be used as the yield, the expected number of events, and the PDF will be extended. An extended PDF has additional functionality, such as the `ext_*` methods and the `counts` (for binned PDFs). |@docend:pdf.init.extended| norm: |@doc:pdf.init.norm| Normalization of the PDF. By default, this is the same as the default space of the PDF. |@docend:pdf.init.norm| name: |@doc:model.init.name| Human-readable name or label of the PDF for better identification. Has no programmatical functional purpose as identification. |@docend:model.init.name| """ obs = pdf.space if not isinstance(pdf, ZfitBinnedPDF): raise TypeError("pdf must be a BinnedPDF") if extended is None: extended = pdf.is_extended if modifiers is None: modifiers = True if modifiers is True: import zfit modifiers = { f"sysshape_{i}": zfit.Parameter(f"auto_sysshape_{self}_{i}", 1.0) for i in range(pdf.counts(obs).shape.num_elements()) } if not isinstance(modifiers, dict): raise TypeError("modifiers must be a dict-like object or True or None") params = modifiers.copy() self._binwise_modifiers = modifiers if extended is True: self._automatically_extended = True if modifiers: import zfit def sumfunc(params): values = self.pdfs[0].counts(obs) sysshape = list(params.values()) if sysshape: sysshape_flat = tf.stack(sysshape) sysshape = znp.reshape(sysshape_flat, values.shape) values = values * sysshape return znp.sum(values) from zfit.core.parameter import get_auto_number extended = zfit.ComposedParameter( f"AUTO_binwise_modifier_{get_auto_number()}", sumfunc, params=modifiers, ) else: extended = self.pdfs[0].get_yield() elif extended is not False: self._automatically_extended = False super().__init__( obs=obs, name=name, params=params, models=pdf, extended=extended, norm=norm )
def test_get_params(): obs = zfit.Space("obs", (-4, 5)) mu_nofloat = zfit.Parameter("mu_nofloat", 1, floating=False) mu2 = zfit.Parameter("mu2", 1) sigma2 = zfit.Parameter("sigma2", 2) sigma_comp = zfit.ComposedParameter("sigma_comp", lambda s: s * 0.7, params=sigma2) yield1 = zfit.Parameter("yield1", 10) yield2 = zfit.Parameter("yield2", 200) yield2_comp = zfit.ComposedParameter("yield2_comp", lambda y: y * 0.9, params=yield2) gauss = zfit.pdf.Gauss(mu_nofloat, sigma_comp, obs) gauss2 = zfit.pdf.Gauss(mu2, sigma2, obs) gauss_ext = gauss.create_extended(yield1) gauss2_ext = gauss2.create_extended(yield2_comp) frac = zfit.Parameter("frac", 0.4) sum1 = zfit.pdf.SumPDF([gauss, gauss2], fracs=frac) sum1_ext = zfit.pdf.SumPDF([gauss_ext, gauss2_ext]) assert set(gauss.get_params()) == {sigma2} with pytest.raises(ValueError): set( gauss.get_params(floating=True, is_yield=False, extract_independent=False)) with pytest.raises(ValueError): set( gauss_ext.get_params(floating=True, is_yield=False, extract_independent=False)) assert set( gauss_ext.get_params(floating=None, is_yield=False, extract_independent=False)) == { mu_nofloat, sigma_comp } with pytest.raises(ValueError): set( gauss_ext.get_params(floating=False, is_yield=False, extract_independent=False)) with pytest.raises(ValueError): set( gauss_ext.get_params(floating=True, is_yield=None, extract_independent=False)) assert set( gauss_ext.get_params(floating=None, is_yield=None, extract_independent=False)) == { mu_nofloat, sigma_comp, yield1 } with pytest.raises(ValueError): set( gauss_ext.get_params(floating=False, is_yield=None, extract_independent=False)) assert (set( gauss_ext.get_params(floating=False, is_yield=True, extract_independent=False)) == set()) assert set( gauss_ext.get_params(floating=None, is_yield=True, extract_independent=False)) == {yield1} assert set( gauss_ext.get_params(floating=True, is_yield=True, extract_independent=False)) == {yield1} # with extract deps assert set( gauss_ext.get_params(floating=True, is_yield=False, extract_independent=True)) == {sigma2} assert set( gauss_ext.get_params(floating=None, is_yield=False, extract_independent=True)) == { mu_nofloat, sigma2 } assert set( gauss_ext.get_params(floating=False, is_yield=False, extract_independent=True)) == {mu_nofloat} assert set( gauss_ext.get_params(floating=True, is_yield=None, extract_independent=True)) == {sigma2, yield1} assert set( gauss_ext.get_params(floating=None, is_yield=None, extract_independent=True)) == { mu_nofloat, sigma2, yield1 } assert set( gauss_ext.get_params(floating=False, is_yield=None, extract_independent=True)) == {mu_nofloat} assert set( gauss_ext.get_params(floating=True, is_yield=True, extract_independent=True)) == {yield1} assert set( gauss_ext.get_params(floating=None, is_yield=True, extract_independent=True)) == {yield1} assert (set( gauss_ext.get_params(floating=False, is_yield=True, extract_independent=True)) == set()) # Gauss ext2 assert set( gauss2_ext.get_params(floating=True, is_yield=False, extract_independent=False)) == {mu2, sigma2} assert set( gauss2_ext.get_params(floating=None, is_yield=False, extract_independent=False)) == {mu2, sigma2} assert (set( gauss2_ext.get_params(floating=False, is_yield=False, extract_independent=False)) == set()) with pytest.raises(ValueError): set( gauss2_ext.get_params(floating=True, is_yield=None, extract_independent=False)) assert set( gauss2_ext.get_params(floating=None, is_yield=None, extract_independent=False)) == { mu2, sigma2, yield2_comp } with pytest.raises(ValueError): set( gauss2_ext.get_params(floating=False, is_yield=None, extract_independent=False)) with pytest.raises(ValueError): set( gauss2_ext.get_params(floating=False, is_yield=True, extract_independent=False)) assert set( gauss2_ext.get_params(floating=None, is_yield=True, extract_independent=False)) == {yield2_comp} with pytest.raises(ValueError): set( gauss2_ext.get_params(floating=True, is_yield=True, extract_independent=False)) # with extract deps assert set( gauss2_ext.get_params(floating=True, is_yield=False, extract_independent=True)) == {mu2, sigma2} assert set( gauss2_ext.get_params(floating=None, is_yield=False, extract_independent=True)) == {mu2, sigma2} yield2.floating = False assert (set( gauss2_ext.get_params(floating=False, is_yield=False, extract_independent=True)) == set()) yield2.floating = True assert (set( gauss2_ext.get_params(floating=False, is_yield=False, extract_independent=True)) == set()) assert set( gauss2_ext.get_params(floating=True, is_yield=None, extract_independent=True)) == { mu2, sigma2, yield2 } assert set( gauss2_ext.get_params(floating=None, is_yield=None, extract_independent=True)) == { mu2, sigma2, yield2 } assert (set( gauss2_ext.get_params(floating=False, is_yield=None, extract_independent=True)) == set()) assert set( gauss2_ext.get_params(floating=True, is_yield=True, extract_independent=True)) == {yield2} assert set( gauss2_ext.get_params(floating=None, is_yield=True, extract_independent=True)) == {yield2} assert (set( gauss2_ext.get_params(floating=False, is_yield=True, extract_independent=True)) == set()) # sum extended with pytest.raises(ValueError): assert set( sum1_ext.get_params(floating=True, is_yield=False, extract_independent=False)) frac0 = sum1_ext.fracs[0] frac1 = sum1_ext.fracs[1] assert set( sum1_ext.get_params(floating=None, is_yield=False, extract_independent=False)) == { mu_nofloat, sigma_comp, mu2, sigma2, frac0, frac1 } with pytest.raises(ValueError): assert set( sum1_ext.get_params(floating=False, is_yield=False, extract_independent=False)) with pytest.raises(ValueError): set( sum1_ext.get_params(floating=True, is_yield=None, extract_independent=False)) assert set( sum1_ext.get_params(floating=None, is_yield=None, extract_independent=False)) == { mu_nofloat, sigma_comp, mu2, sigma2, frac0, frac1, sum1_ext.get_yield() } with pytest.raises(ValueError): set( sum1_ext.get_params(floating=False, is_yield=None, extract_independent=False)) with pytest.raises(ValueError): set( sum1_ext.get_params(floating=False, is_yield=True, extract_independent=False)) assert set( sum1_ext.get_params(floating=None, is_yield=True, extract_independent=False)) == { sum1_ext.get_yield() } with pytest.raises(ValueError): set( sum1_ext.get_params(floating=True, is_yield=True, extract_independent=False)) # with extract deps assert set( sum1_ext.get_params(floating=True, is_yield=False, extract_independent=True)) == { mu2, sigma2, yield1, yield2, # fracs depend on them } assert set( sum1_ext.get_params(floating=None, is_yield=False, extract_independent=True)) == { mu_nofloat, mu2, sigma2, yield1, yield2 } assert set( sum1_ext.get_params(floating=False, is_yield=False, extract_independent=True)) == {mu_nofloat} assert set( sum1_ext.get_params(floating=True, is_yield=None, extract_independent=True)) == { mu2, sigma2, yield1, yield2, yield1, yield2 } assert set( sum1_ext.get_params(floating=None, is_yield=None, extract_independent=True)) == { mu_nofloat, mu2, sigma2, yield1, yield2 } assert set( sum1_ext.get_params(floating=False, is_yield=None, extract_independent=True)) == {mu_nofloat} yield1.floating = False assert set( sum1_ext.get_params(floating=False, is_yield=None, extract_independent=True)) == {mu_nofloat, yield1} yield1.floating = True assert set( sum1_ext.get_params(floating=True, is_yield=True, extract_independent=True)) == {yield1, yield2} assert set( sum1_ext.get_params(floating=None, is_yield=True, extract_independent=True)) == {yield1, yield2} assert (set( sum1_ext.get_params(floating=False, is_yield=True, extract_independent=True)) == set())
def initial_fitter(data, obs): num_events = len(data['data'].index) bgr_yields = [] bgr_models = [] for sample, df in data.items(): if sample not in ('data', 'W+jets'): print('==========') print(f'Fitting {sample} background') mu = zfit.Parameter(f"mu_{sample}", 80., 60., 120.) sigma = zfit.Parameter(f'sigma_{sample}', 8., 1., 100.) alpha = zfit.Parameter(f'alpha_{sample}', -.5, -10., 0.) n = zfit.Parameter(f'n_{sample}', 120., 0.01, 500.) n_bgr = zfit.Parameter(f'yield_{sample}', int(0.01 * num_events), 0., int(0.5 * num_events), step_size=1) bgr_frag_model = zfit.pdf.CrystalBall(obs=obs, mu=mu, sigma=sigma, alpha=alpha, n=n) bgr_frag_model = bgr_frag_model.create_extended(n_bgr) bgr_models.append(bgr_frag_model) bgr_yields.append(n_bgr) bgr_data = format_data(df, obs) # Create NLL nll = zfit.loss.ExtendedUnbinnedNLL(model=bgr_frag_model, data=bgr_data) # Create minimizer minimizer = zfit.minimize.Minuit(verbosity=0, use_minuit_grad=True) result = minimizer.minimize(nll) if result.valid: print("Result is valid") print("Converged:", result.converged) # param_errors = result.hesse() params = result.params print(params) if not bgr_frag_model.is_extended: raise Warning('MODEL NOT EXTENDED') else: raise Warning(f'Background {sample} fit failed') if sample == 'W+jets': print('==========') print(f'Fitting {sample} signal') mu = zfit.Parameter(f"mu_{sample}", 80., 60., 120.) sigma = zfit.Parameter(f'sigma_{sample}', 8., 1., 100.) alpha = zfit.Parameter(f'alpha_{sample}', -.5, -10., 0.) n = zfit.Parameter(f'n_{sample}', 120., 0.01, 500.) n_sig = zfit.Parameter(f'yield_{sample}', int(0.9 * num_events), 0., int(1.1 * num_events), step_size=1) signal_model = zfit.pdf.CrystalBall(obs=obs, mu=mu, sigma=sigma, alpha=alpha, n=n) signal_model = signal_model.create_extended(n_sig) signal_data = format_data(df, obs) # Create NLL nll = zfit.loss.ExtendedUnbinnedNLL(model=signal_model, data=signal_data) # Create minimizer minimizer = zfit.minimize.Minuit(verbosity=0, use_minuit_grad=True) result = minimizer.minimize(nll) if result.valid: print("Result is valid") print("Converged:", result.converged) # param_errors = result.hesse() params = result.params print(params) if not signal_model.is_extended: raise Warning('MODEL NOT EXTENDED') sig_parameters = { param[0].name: param[1]['value'] for param in result.params.items() } else: print('Minimization failed') raise ValueError('Signal fit failed') mu = zfit.Parameter('data_mu', sig_parameters['mu_W+jets'], 60., 120.) sigma = zfit.Parameter('data_sigma', sig_parameters['sigma_W+jets'], 1., 100.) alpha = zfit.Parameter('data_alpha', sig_parameters['alpha_W+jets'], floating=False) n = zfit.Parameter('data_n', sig_parameters['n_W+jets'], floating=False) n_sig = zfit.Parameter('sig_yield', int(0.9 * num_events), 0., int(1.1 * num_events), step_size=1) n_bgr = zfit.ComposedParameter('bgr_yield', sum_func, params=bgr_yields) data_model = zfit.pdf.CrystalBall(obs=obs, mu=mu, sigma=sigma, alpha=alpha, n=n) data_model = data_model.create_extended(n_sig) bgr_models.append(data_model) models = bgr_models for model in models: if not model.is_extended: raise Warning(f'A MODEL {model} IS NOT EXTENDED') data_fit = zfit.pdf.SumPDF(models) data_to_fit = format_data(data['data'], obs, 'data') # Create NLL nll = zfit.loss.ExtendedUnbinnedNLL(model=data_fit, data=data_to_fit) # Create minimizer minimizer = zfit.minimize.Minuit(verbosity=0, use_minuit_grad=True) result = minimizer.minimize(nll, params=[n_sig, mu, sigma, n_bgr]) if result.valid: print("Result is valid") print("Converged:", result.converged) param_errors = result.hesse() params = result.params print(params) if not result.valid: print("Error calculation failed \nResult is not valid") return None else: return data_fit, models, { param[0].name: { "value": param[1]['value'], "error": err[1]['error'] } for param, err in zip(result.params.items(), param_errors.items()) } else: print('Minimization failed \nResult: \n{0}'.format(result)) return None
def test_hypotest(benchmark, n_bins, hypotest, eager): """Benchmark the performance of pyhf.utils.hypotest() for various numbers of bins and different backends. Args: benchmark: pytest benchmark backend: `pyhf` tensorlib given by pytest parameterization n_bins: `list` of number of bins given by pytest parameterization Returns: None """ source = generate_source_static(n_bins) signp = source["bindata"]["sig"] bkgnp = source["bindata"]["bkg"] uncnp = source["bindata"]["bkgerr"] datanp = source["bindata"]["data"] if "pyhf" in hypotest: hypotest = hypotest_pyhf if eager: pyhf.set_backend("numpy") else: pyhf.set_backend("jax") pdf = uncorrelated_background(signp, bkgnp, uncnp) data = datanp + pdf.config.auxdata benchmark(hypotest, pdf, data) elif hypotest == "zfit": with zfit.run.set_graph_mode(not eager): hypotest = hypotest_zfit obs = zfit.Space( "signal", binning=zfit.binned.RegularBinning(n_bins, -0.5, n_bins + 0.5, name="signal"), ) zdata = zfit.data.BinnedData.from_tensor(obs, datanp) zmcsig = zfit.data.BinnedData.from_tensor(obs, signp) zmcbkg = zfit.data.BinnedData.from_tensor(obs, bkgnp) shapesys = { f"shapesys_{i}": zfit.Parameter(f"shapesys_{i}", 1, 0.1, 10) for i in range(n_bins) } bkgmodel = BinnedTemplatePDFV1(zmcbkg, sysshape=shapesys) # sigyield = zfit.Parameter('sigyield', znp.sum(zmcsig.values())) mu = zfit.Parameter("mu", 1, 0.1, 10) # sigmodeltmp = BinnedTemplatePDFV1(zmcsig) sigyield = zfit.ComposedParameter( "sigyield", lambda params: params["mu"] * znp.sum(zmcsig.values()), params={"mu": mu}, ) sigmodel = BinnedTemplatePDFV1(zmcsig, extended=sigyield) zmodel = BinnedSumPDF([sigmodel, bkgmodel]) unc = np.array(uncnp) / np.array(bkgnp) constraint = zfit.constraint.GaussianConstraint( list(shapesys.values()), np.ones_like(unc).tolist(), unc) nll = zfit.loss.ExtendedBinnedNLL(zmodel, zdata, constraints=constraint) minimizer = zfit.minimize.Minuit(tol=1e-3, gradient=False) nll.value() nll.value() nll.gradient() nll.gradient() benchmark(hypotest, minimizer, nll) assert True
def test_simple_examples_1D(): import zfit.data import zfit.z.numpy as znp bkgnp = [50.0, 60.0] signp = [5.0, 10.0] datanp = [60.0, 80.0] uncnp = [5.0, 12.0] serialized = ("""{ "channels": [ { "name": "singlechannel", "samples": [ { "name": "signal", """ + f""" "data": {signp}, """ """ "modifiers": [ { "name": "mu", "type": "normfactor", "data": null} ] }, { "name": "background", """ f'"data": {bkgnp},' """ "modifiers": [ {"name": "uncorr_bkguncrt", "type": "shapesys", """ f'"data": {uncnp}' """ } ] } ] } ], "observations": [ { """ f'"name": "singlechannel", "data": {datanp}' """ } ], "measurements": [ { "name": "Measurement", "config": {"poi": "mu", "parameters": []} } ], "version": "1.0.0" }""") obs = zfit.Space("signal", binning=zfit.binned.RegularBinning(2, 0, 2, name="signal")) zdata = zfit.data.BinnedData.from_tensor(obs, datanp) zmcsig = zfit.data.BinnedData.from_tensor(obs, signp) zmcbkg = zfit.data.BinnedData.from_tensor(obs, bkgnp) shapesys = { f"shapesys_{i}": zfit.Parameter(f"shapesys_{i}", 1, 0.1, 10) for i in range(2) } bkgmodel = BinnedTemplatePDFV1(zmcbkg, sysshape=shapesys) # sigyield = zfit.Parameter('sigyield', znp.sum(zmcsig.values())) mu = zfit.Parameter("mu", 1, 0.1, 10) # sigmodeltmp = BinnedTemplatePDFV1(zmcsig) sigyield = zfit.ComposedParameter( "sigyield", lambda params: params["mu"] * znp.sum(zmcsig.values()), params={"mu": mu}, ) sigmodel = BinnedTemplatePDFV1(zmcsig, extended=sigyield) zmodel = BinnedSumPDF([sigmodel, bkgmodel]) unc = np.array(uncnp) / np.array(bkgnp) nll = zfit.loss.ExtendedBinnedNLL( zmodel, zdata, constraints=zfit.constraint.GaussianConstraint(list(shapesys.values()), [1, 1], unc), ) # print(nll.value()) # print(nll.gradient()) # minimizer = zfit.minimize.ScipyLBFGSBV1() # minimizer = zfit.minimize.IpyoptV1() minimizer = zfit.minimize.Minuit(tol=1e-5, gradient=False) result = minimizer.minimize(nll) result.hesse(method="hesse_np") # result.errors() print(result) # mu_z = sigmodel.get_yield() / znp.sum(zmcsig.values()) zbestfit = zfit.run(result.params) errors = [p["hesse"]["error"] for p in result.params.values()] # print('minval actual:', nll.value(), nll.gradient()) # errors = np.ones(3) * 0.1 # print('mu:', mu_z) spec = json.loads(serialized) workspace = pyhf.Workspace(spec) model = workspace.model(poi_name="mu") pars = model.config.suggested_init() data = workspace.data(model) model.logpdf(pars, data) bestfit_pars, twice_nll = pyhf.infer.mle.fit(data, model, return_fitted_val=True) diff = (bestfit_pars - zbestfit) / errors # print(bestfit_pars) np.testing.assert_allclose(diff, 0, atol=1e-3)
fig.savefig(output) if __name__ == '__main__': mplhep.style.use('LHCb2') args = parse_input() branches = read_branches_dict(args.input, args.tree, [args.branch, f'sw_{args.model}']) fit_params = load_params(args.params) obs = zfit.Space('x', limits=MODEL_BDY) # Create a scaling factor based on total yields fit_var = branches[args.branch] sweight = branches[f'sw_{args.model}'] fit_param_yld = zfit.ComposedParameter( 'yld', lambda raw: fit_var.size * raw, {'raw': fit_params[f'yld_{args.model}_ratio']}) fit_model_validate = MODELS[args.model](obs, fit_param_yld, fit_params) plot_splot(fit_var, fit_model_validate, sweight, output=args.output, data_range=MODEL_BDY, xlabel=args.xLabel, ylabel=args.yLabel, data_lbl=args.dataLabel, model_lbl=args.modelLabel, bins=args.bins)