def test_with_gauss_qtilde(n, min_x): sigma_x = 0.032 minimizer = Minuit() bounds = (-10, 10) obs = zfit.Space("x", limits=bounds) mean = zfit.Parameter("mean", n * sigma_x) sigma = zfit.Parameter("sigma", 1.0) model = zfit.pdf.Gauss(obs=obs, mu=mean, sigma=sigma) data = model.sample(n=1000) nll = UnbinnedNLL(model=model, data=data) minimum = minimizer.minimize(loss=nll) minimum.hesse() x = minimum.params[mean]["value"] x_err = minimum.params[mean]["minuit_hesse"]["error"] x_min = x - x_err * 3 x_max = x + x_err * 3 x_min = max([x_min, min_x]) poinull = POIarray(mean, np.linspace(x_min, x_max, 50)) calculator = AsymptoticCalculator(nll, minimizer) ci = ConfidenceInterval(calculator, poinull, qtilde=True) ci.interval(alpha=0.05, printlevel=1)
def test_with_gauss_fluctuations(): x_true = -2.0 minimizer = Minuit() bounds = (-10, 10) obs = zfit.Space("x", limits=bounds) mean = zfit.Parameter("mean", 0) sigma = zfit.Parameter("sigma", 1.0) model = zfit.pdf.Gauss(obs=obs, mu=mean, sigma=sigma) npzfile = f"{notebooks_dir}/toys/FC_toys_{x_true}.npz" data = zfit.data.Data.from_numpy(obs=obs, array=np.load(npzfile)["x"]) nll = UnbinnedNLL(model=model, data=data) minimum = minimizer.minimize(loss=nll) minimum.hesse() toys_fname = f"{notebooks_dir}/toys/FC_toys_{x_true}.yml" calculator = FrequentistCalculator.from_yaml(toys_fname, minimum, minimizer) keys = np.unique([k[0].value for k in calculator.keys()]) keys.sort() poinull = POIarray(mean, keys) ci = ConfidenceInterval(calculator, poinull, qtilde=False) with pytest.warns(UserWarning): ci.interval(alpha=0.05, printlevel=0) ci = ConfidenceInterval(calculator, poinull, qtilde=True) ci.interval(alpha=0.05, printlevel=0)
def test_unbinned_simultaneous_nll(): test_values = tf.constant(test_values_np) test_values = zfit.Data.from_tensor(obs=obs1, tensor=test_values) test_values2 = tf.constant(test_values_np2) test_values2 = zfit.Data.from_tensor(obs=obs1, tensor=test_values2) gaussian1, mu1, sigma1 = create_gauss1() gaussian2, mu2, sigma2 = create_gauss2() gaussian2 = gaussian2.create_extended(zfit.Parameter('yield_gauss2', 5)) nll = zfit.loss.UnbinnedNLL( model=[gaussian1, gaussian2], data=[test_values, test_values2], ) minimizer = Minuit(tolerance=1e-5) status = minimizer.minimize(loss=nll, params=[mu1, sigma1, mu2, sigma2]) params = status.params assert set(nll.get_params()) == {mu1, mu2, sigma1, sigma2} assert params[mu1]['value'] == pytest.approx(np.mean(test_values_np), rel=0.007) assert params[mu2]['value'] == pytest.approx(np.mean(test_values_np2), rel=0.007) assert params[sigma1]['value'] == pytest.approx(np.std(test_values_np), rel=0.007) assert params[sigma2]['value'] == pytest.approx(np.std(test_values_np2), rel=0.007)
def test_extended_unbinned_nll(): test_values = z.constant(test_values_np) test_values = zfit.Data.from_tensor(obs=obs1, tensor=test_values) gaussian3, mu3, sigma3, yield3 = create_gauss3ext() nll = zfit.loss.ExtendedUnbinnedNLL(model=gaussian3, data=test_values, fit_range=(-20, 20)) assert {mu3, sigma3, yield3} == nll.get_params() minimizer = Minuit() status = minimizer.minimize(loss=nll) params = status.params assert params[mu3]['value'] == pytest.approx(np.mean(test_values_np), rel=0.007) assert params[sigma3]['value'] == pytest.approx(np.std(test_values_np), rel=0.007) assert params[yield3]['value'] == pytest.approx(yield_true, rel=0.007)
def test_unbinned_simultaneous_nll(): mu1, mu2, nll, sigma1, sigma2 = create_simultaneous_loss() minimizer = Minuit(tol=1e-5) status = minimizer.minimize(loss=nll, params=[mu1, sigma1, mu2, sigma2]) params = status.params assert set(nll.get_params()) == {mu1, mu2, sigma1, sigma2} assert params[mu1]["value"] == pytest.approx(np.mean(test_values_np), rel=0.007) assert params[mu2]["value"] == pytest.approx(np.mean(test_values_np2), rel=0.007) assert params[sigma1]["value"] == pytest.approx(np.std(test_values_np), rel=0.007) assert params[sigma2]["value"] == pytest.approx(np.std(test_values_np2), rel=0.007)
def test_sweights(): minimizer = Minuit() mass, p, loss, Nsig, Nbkg, sig_p, bkg_p = get_data_and_loss() with pytest.raises(ModelNotFittedToData): compute_sweights(loss.model[0], mass) minimizer.minimize(loss) model = loss.model[0] assert is_sum_of_extended_pdfs(model) yields = [Nsig, Nbkg] sweights = compute_sweights(loss.model[0], mass) assert np.allclose( [np.sum(sweights[y]) / get_value(y.value()) for y in yields], 1.0) nbins = 30 hist_conf = dict(bins=nbins, range=[0, 10]) hist_sig_true_p, _ = np.histogram(sig_p, **hist_conf) sel = hist_sig_true_p != 0 hist_sig_true_p = hist_sig_true_p[sel] hist_sig_sweights_p = np.histogram(p, weights=sweights[Nsig], **hist_conf)[0][sel] assert chisquare(hist_sig_sweights_p, hist_sig_true_p)[-1] < 0.01 hist_bkg_true_p, _ = np.histogram(bkg_p, **hist_conf) sel = hist_bkg_true_p != 0 hist_bkg_true_p = hist_bkg_true_p[sel] hist_bkg_sweights_p = np.histogram(p, weights=sweights[Nbkg], **hist_conf)[0][sel] assert chisquare(hist_bkg_sweights_p, hist_bkg_true_p)[-1] < 0.01 with pytest.warns(AboveToleranceWarning): compute_sweights( loss.model[0], np.concatenate([mass, np.random.normal(0.8, 0.1, 100)])) with pytest.raises(ModelNotFittedToData): compute_sweights( loss.model[0], np.concatenate([mass, np.random.normal(0.8, 0.1, 1000)]))
def test_with_asymptotic_calculator(): loss, (Nsig, Nbkg) = create_loss() calculator = AsymptoticCalculator(loss, Minuit()) poinull = POI(Nsig, np.linspace(0.0, 25, 20)) poialt = POI(Nsig, 0) ul = UpperLimit(calculator, [poinull], [poialt]) ul_qtilde = UpperLimit(calculator, [poinull], [poialt], qtilde=True) limits = ul.upperlimit(alpha=0.05, CLs=True) # np.savez("cls_pvalues.npz", poivalues=poinull.value, **ul.pvalues(True)) # np.savez("clsb_pvalues.npz", poivalues=poinull.value, **ul.pvalues(False)) assert limits["observed"] == pytest.approx(15.725784747406346, rel=0.1) assert limits["expected"] == pytest.approx(11.927442041887158, rel=0.1) assert limits["expected_p1"] == pytest.approx(16.596396280677116, rel=0.1) assert limits["expected_p2"] == pytest.approx(22.24864429383046, rel=0.1) assert limits["expected_m1"] == pytest.approx(8.592750403611896, rel=0.1) assert limits["expected_m2"] == pytest.approx(6.400549971360598, rel=0.1) ul.upperlimit(alpha=0.05, CLs=False) ul_qtilde.upperlimit(alpha=0.05, CLs=True) # test error when scan range is too small with pytest.raises(POIRangeError): poinull = POI(Nsig, np.linspace(0.0, 12, 20)) ul = UpperLimit(calculator, [poinull], [poialt]) ul.upperlimit(alpha=0.05, CLs=True)
def test_constructor(): with pytest.raises(TypeError): BaseTest() loss, (mean, sigma) = create_loss() calculator = BaseCalculator(loss, Minuit()) poimean_1 = POI(mean, [1.0, 1.1, 1.2, 1.3]) poimean_2 = POI(mean, [1.2]) poisigma_1 = POI(sigma, [0.06, 0.08, 0.01, 0.012, 0.014]) poisigma_2 = POI(sigma, [0.1]) with pytest.raises(TypeError): BaseTest(calculator) with pytest.raises(ValueError): BaseTest(calculator, poimean_1) with pytest.raises(ValueError): BaseTest(calculator, [poimean_1], poimean_2) with pytest.raises(ValueError): BaseTest(calculator, [poimean_1], [poisigma_2]) with pytest.raises(ValueError): BaseTest(calculator, [poisigma_1], [poimean_2])
def test_with_asymptotic_calculator(): loss, mean = create_loss() calculator = AsymptoticCalculator(loss, Minuit()) poinull = POI(mean, np.linspace(1.15, 1.26, 100)) ci = ConfidenceInterval(calculator, [poinull]) interval = ci.interval() assert interval["lower"] == pytest.approx(1.1810371356602791, rel=0.1) assert interval["upper"] == pytest.approx(1.2156701172321935, rel=0.1) with pytest.raises(POIRangeError): poinull = POI(mean, np.linspace(1.2, 1.205, 50)) ci = ConfidenceInterval(calculator, [poinull]) ci.interval() with pytest.raises(POIRangeError): poinull = POI(mean, np.linspace(1.2, 1.26, 50)) ci = ConfidenceInterval(calculator, [poinull]) ci.interval() with pytest.raises(POIRangeError): poinull = POI(mean, np.linspace(1.17, 1.205, 50)) ci = ConfidenceInterval(calculator, [poinull]) ci.interval()
def test_extended_unbinned_nll(size): if size is None: test_values = z.constant(test_values_np) size = test_values.shape[0] else: test_values = create_test_values(size) test_values = zfit.Data.from_tensor(obs=obs1, tensor=test_values) gaussian3, mu3, sigma3, yield3 = create_gauss3ext() nll = zfit.loss.ExtendedUnbinnedNLL(model=gaussian3, data=test_values, fit_range=(-20, 20)) assert {mu3, sigma3, yield3} == nll.get_params() minimizer = Minuit(tol=1e-4) status = minimizer.minimize(loss=nll) params = status.params assert params[mu3]['value'] == pytest.approx(zfit.run(tf.math.reduce_mean(test_values)), rel=0.05) assert params[sigma3]['value'] == pytest.approx(zfit.run(tf.math.reduce_std(test_values)), rel=0.05) assert params[yield3]['value'] == pytest.approx(size, rel=0.005)
def test_constructors(): loss, (Nsig, poigen, poieval) = create_loss() ToyResult(poigen, poieval) with pytest.raises(TypeError): ToyResult(poigen, "poieval") with pytest.raises(TypeError): ToyResult(poieval, poieval) ToysManager(loss, Minuit())
def test_toymanager_attributes(): loss, (Nsig, poigen, poieval) = create_loss() tm = ToysManager.from_yaml( f"{notebooks_dir}/toys/discovery_freq_zfit_toys.yml", loss, Minuit() ) with pytest.raises(ParameterNotFound): ToysManager.from_yaml( f"{notebooks_dir}/toys/discovery_freq_zfit_toys.yml", create_loss_1(), Minuit(), ) tr = list(tm.values())[0] assert isinstance(tr, ToyResult) assert list(tm.keys())[0] == (poigen, poigen) assert (poigen, poieval) in tm.keys() assert tm.get_toyresult(poigen, poieval) == tr tr1 = ToyResult(poigen, poieval.append(1)) tm.add_toyresult(tr1) with pytest.raises(TypeError): tm.add_toyresult("tr1") assert (tr1.poigen, tr1.poieval) in tm.keys() tm.to_yaml(f"{pwd}/test_toyutils.yml") tm.to_yaml(f"{pwd}/test_toyutils.yml") tmc = ToysManager.from_yaml(f"{pwd}/test_toyutils.yml", loss, Minuit()) assert ( tm.get_toyresult(poigen, poieval).ntoys == tmc.get_toyresult(poigen, poieval).ntoys ) samplers = tm.sampler(floating_params=[poigen.parameter]) assert all(is_valid_data(s) for s in samplers) loss = tm.toys_loss(poigen.name) assert is_valid_loss(loss) os.remove(f"{pwd}/test_toyutils.yml")
def test_base_calculator(calculator): with pytest.raises(TypeError): calculator() loss, (mean, sigma) = create_loss() calc_loss = calculator(loss, Minuit()) bestfit = calc_loss.bestfit calc_fitresult = calculator(bestfit, calc_loss.minimizer) assert calc_loss.bestfit == calc_fitresult.bestfit assert calc_loss.loss == calc_fitresult.loss mean_poi = POI(mean, [1.15, 1.2, 1.25]) mean_nll = calc_loss.obs_nll(pois=[mean_poi]) calc_loss.obs_nll(pois=[mean_poi]) # get from cache assert mean_nll[0] >= mean_nll[1] assert mean_nll[2] >= mean_nll[1] assert calc_loss.obs_nll([mean_poi[0]]) == mean_nll[0] assert calc_loss.obs_nll([mean_poi[1]]) == mean_nll[1] assert calc_loss.obs_nll([mean_poi[2]]) == mean_nll[2] mean_poialt = POI(mean, 1.2) if calculator == BaseCalculator: with pytest.raises(NotImplementedError): calc_loss.pvalue(poinull=[mean_poi], poialt=[mean_poialt]) with pytest.raises(NotImplementedError): calc_loss.expected_pvalue(poinull=[mean_poi], poialt=[mean_poialt], nsigma=np.arange(-2, 3, 1)) with pytest.raises(NotImplementedError): calc_loss.expected_poi(poinull=[mean_poi], poialt=[mean_poialt], nsigma=np.arange(-2, 3, 1)) else: calc_loss.pvalue(poinull=[mean_poi], poialt=[mean_poialt]) calc_loss.expected_pvalue(poinull=[mean_poi], poialt=[mean_poialt], nsigma=np.arange(-2, 3, 1)) calc_loss.expected_poi(poinull=[mean_poi], poialt=[mean_poialt], nsigma=np.arange(-2, 3, 1)) model = calc_loss.model[0] sampler = model.create_sampler(n=10000) assert is_valid_data(sampler) loss = calc_loss.lossbuilder(model=[model], data=[sampler], weights=None) assert is_valid_loss(loss) with pytest.raises(ValueError): calc_loss.lossbuilder(model=[model, model], data=[sampler]) with pytest.raises(ValueError): calc_loss.lossbuilder(model=[model], data=[sampler, calc_loss.data[0]]) with pytest.raises(ValueError): calc_loss.lossbuilder(model=[model], data=[sampler], weights=[]) with pytest.raises(ValueError): calc_loss.lossbuilder(model=[model], data=[sampler], weights=[np.ones(10000), np.ones(10000)])
def test_unbinned_nll(weights, sigma, options): gaussian1, mu1, sigma1 = create_gauss1() gaussian2, mu2, sigma2 = create_gauss2() test_values = tf.constant(test_values_np) test_values = zfit.Data.from_tensor(obs=obs1, tensor=test_values, weights=weights) nll_object = zfit.loss.UnbinnedNLL(model=gaussian1, data=test_values, options=options) minimizer = Minuit(tol=1e-5) status = minimizer.minimize(loss=nll_object, params=[mu1, sigma1]) params = status.params rel_error = 0.12 if weights is None else 0.1 # more fluctuating with weights assert params[mu1]['value'] == pytest.approx(np.mean(test_values_np), rel=rel_error) assert params[sigma1]['value'] == pytest.approx(np.std(test_values_np), rel=rel_error) constraints = zfit.constraint.nll_gaussian(params=[mu2, sigma2], observation=[mu_constr[0], sigma_constr[0]], uncertainty=sigma()) nll_object = UnbinnedNLL(model=gaussian2, data=test_values, constraints=constraints, options=options) minimizer = Minuit(tol=1e-4) status = minimizer.minimize(loss=nll_object, params=[mu2, sigma2]) params = status.params if weights is None: assert params[mu2]['value'] > np.average(test_values_np, weights=weights) assert params[sigma2]['value'] < np.std(test_values_np)
def test_with_asymptotic_calculator(): loss, (Nsig, Nbkg) = create_loss() calculator = AsymptoticCalculator(loss, Minuit()) poinull = POI(Nsig, 0) discovery_test = Discovery(calculator, [poinull]) pnull, significance = discovery_test.result() assert pnull == pytest.approx(0.0007571045089567185, abs=0.05) assert significance == pytest.approx(3.1719464953752565, abs=0.05) assert significance >= 3
def test_attributes(): loss, (mean, sigma) = create_loss() calculator = BaseCalculator(loss, Minuit()) poimean_1 = POI(mean, [1.0, 1.1, 1.2, 1.3]) poimean_2 = POI(mean, [1.2]) test = BaseTest(calculator, [poimean_1], [poimean_2]) assert test.poinull == [poimean_1] assert test.poialt == [poimean_2] assert test.calculator == calculator
def test_attributes(): loss, (mean, sigma) = create_loss() calculator = BaseCalculator(loss, Minuit()) poimean_1 = POIarray(mean, [1.0, 1.1, 1.2, 1.3]) poimean_2 = POI(mean, 1.2) test = BaseTest(calculator, poimean_1, poimean_2) assert test.poinull == poimean_1 assert test.poialt == poimean_2 assert test.calculator == calculator
def test_frequentist_calculator_one_poi(constraint): with pytest.raises(TypeError): FrequentistCalculator() loss, (mean, sigma) = create_loss(constraint=constraint) calc = FrequentistCalculator(loss, Minuit(), ntoysnull=100, ntoysalt=100) assert calc.ntoysnull == 100 assert calc.ntoysalt == 100 samplers = calc.sampler(floating_params=[mean]) assert all(is_valid_data(s) for s in samplers) loss = calc.toys_loss(mean.name) assert is_valid_loss(loss)
def test_counting_with_asymptotic_calculator(): ( loss, Nsig, ) = create_loss_counting() calculator = AsymptoticCalculator(loss, Minuit()) poinull = POI(Nsig, 0) discovery_test = Discovery(calculator, poinull) pnull, significance = discovery_test.result() assert significance < 2
def test_counting_with_frequentist_calculator(): ( loss, Nsig, ) = create_loss_counting() calculator = FrequentistCalculator(loss, Minuit(), ntoysnull=1000) poinull = POI(Nsig, 0) discovery_test = Discovery(calculator, poinull) pnull, significance = discovery_test.result() assert significance < 2
def test_with_frequentist_calculator(): loss, (Nsig, Nbkg) = create_loss() calculator = FrequentistCalculator.from_yaml( f"{notebooks_dir}/toys/discovery_freq_zfit_toys.yml", loss, Minuit()) poinull = POI(Nsig, 0) discovery_test = Discovery(calculator, poinull) pnull, significance = discovery_test.result() assert pnull == pytest.approx(0.0004, rel=0.05, abs=0.0005) assert significance == pytest.approx(3.3527947805048592, rel=0.05, abs=0.1) assert significance >= 3
def test_asymptotic_calculator_two_pois(): loss, (mean, sigma) = create_loss() calc = AsymptoticCalculator(loss, Minuit()) poi_null = [POI(mean, [1.15, 1.2, 1.25]), POI(sigma, [0.05, 0.1])] poi_alt = [POI(mean, 1.2), POI(sigma, 0.1)] with pytest.raises(NotImplementedError): calc.check_pois(poi_null) with pytest.raises(NotImplementedError): calc.pvalue(poi_null, poi_alt) with pytest.raises(NotImplementedError): calc.expected_pvalue(poinull=poi_null, poialt=poi_alt, nsigma=np.arange(-2, 3, 1)) with pytest.raises(NotImplementedError): calc.expected_poi(poinull=poi_null, poialt=poi_alt, nsigma=np.arange(-2, 3, 1))
def test_constructor(): with pytest.raises(TypeError): UpperLimit() loss, (Nsig, Nbkg) = create_loss() calculator = BaseCalculator(loss, Minuit()) poi_1 = POI(Nsig, 0.0) poi_2 = POI(Nsig, 2.0) with pytest.raises(TypeError): UpperLimit(calculator) with pytest.raises(TypeError): UpperLimit(calculator, poi_1) with pytest.raises(TypeError): UpperLimit(calculator, [poi_1], poi_2)
def test_constructor(): with pytest.raises(TypeError): BaseTest() loss, (mean, sigma) = create_loss() calculator = BaseCalculator(loss, Minuit()) poimean = POIarray(mean, [1.0, 1.1, 1.2, 1.3]) poisigma = POI(sigma, 0.1) with pytest.raises(TypeError): BaseTest(calculator) with pytest.raises(TypeError): BaseTest(calculator, poimean, [poisigma]) with pytest.raises(TypeError): BaseTest("calculator", poimean, poisigma)
def test_constructor(): with pytest.raises(TypeError): ConfidenceInterval() loss, mean = create_loss() calculator = BaseCalculator(loss, Minuit()) poi_1 = POI(mean, 1.5) poi_2 = POI(mean, 1.2) with pytest.raises(TypeError): ConfidenceInterval(calculator) with pytest.raises(TypeError): ConfidenceInterval(calculator, [poi_1], poi_2, qtilde=True) with pytest.raises(TypeError): ConfidenceInterval(calculator, [poi_1], [poi_2], qtilde=False)
def test_asymptotic_calculator_one_poi(): with pytest.raises(TypeError): AsymptoticCalculator() loss, (mean, sigma) = create_loss() calc = AsymptoticCalculator(loss, Minuit()) poi_null = POIarray(mean, [1.15, 1.2, 1.25]) poi_alt = POI(mean, 1.2) dataset = calc.asimov_dataset(poi_alt) assert all(is_valid_data(d) for d in dataset) loss = calc.asimov_loss(poi_alt) assert is_valid_loss(loss) null_nll = calc.asimov_nll(pois=poi_null, poialt=poi_alt) assert null_nll[0] >= null_nll[1] assert null_nll[2] >= null_nll[1]
def test_constructor(): with pytest.raises(TypeError): Discovery() loss, (Nsig, Nbkg) = create_loss() calculator = BaseCalculator(loss, Minuit()) poi_1 = POI(Nsig, [0.0]) poi_2 = POI(Nsig, [2.0]) with pytest.raises(TypeError): Discovery(calculator) with pytest.raises(ValueError): Discovery(calculator, poi_1) with pytest.raises(TypeError): Discovery(calculator, [poi_1], poi_2) with pytest.raises(TypeError): Discovery(calculator, [poi_1], [poi_2])
def test_binned_nll(weights): obs = zfit.Space("obs1", limits=(-15, 25)) gaussian1, mu1, sigma1 = create_gauss1(obs=obs) gaussian2, mu2, sigma2 = create_gauss2(obs=obs) test_values_np = np.random.normal(loc=mu_true, scale=4, size=(10000, 1)) test_values = tf.constant(test_values_np) test_values = zfit.Data.from_tensor(obs=obs, tensor=test_values, weights=weights) test_values_binned = test_values.create_hist( converter=zfit.hist.histogramdd, bin_kwargs={"bins": 100}) nll_object = zfit.loss.BinnedNLL(model=gaussian1, data=test_values_binned) minimizer = Minuit() status = minimizer.minimize(loss=nll_object, params=[mu1, sigma1]) params = status.params rel_error = 0.035 if weights is None else 0.15 # more fluctuating with weights assert params[mu1]["value"] == pytest.approx(np.mean(test_values_np), rel=rel_error) assert params[sigma1]["value"] == pytest.approx(np.std(test_values_np), rel=rel_error) constraints = zfit.constraint.nll_gaussian( params=[mu2, sigma2], mu=[mu_constr[0], sigma_constr[0]], sigma=[mu_constr[1], sigma_constr[1]], ) nll_object = zfit.loss.BinnedNLL( model=gaussian2, data=test_values_binned, fit_range=(-np.infty, np.infty), constraints=constraints, ) minimizer = Minuit() status = minimizer.minimize(loss=nll_object, params=[mu2, sigma2]) params = status.params if weights is None: assert params[mu2]["value"] > np.mean(test_values_np) assert params[sigma2]["value"] < np.std(test_values_np)
def test_base_calculator(calculator): with pytest.raises(TypeError): calculator() loss, (mean, sigma) = create_loss() with pytest.raises(ValueError): calculator("loss", Minuit()) with pytest.raises(ValueError): calculator(loss, "Minuit()") calc_loss = calculator(loss, Minuit()) with pytest.raises(ValueError): calc_loss.bestfit = "bestfit" bestfit = calc_loss.bestfit calc_fitresult = calculator(bestfit, calc_loss.minimizer) assert calc_loss.bestfit == calc_fitresult.bestfit assert calc_loss.loss == calc_fitresult.loss mean_poi = POIarray(mean, [1.15, 1.2, 1.25]) mean_nll = calc_loss.obs_nll(pois=mean_poi) calc_loss.obs_nll(pois=mean_poi) # get from cache assert mean_nll[0] >= mean_nll[1] assert mean_nll[2] >= mean_nll[1] assert calc_loss.obs_nll(mean_poi[0]) == mean_nll[0] assert calc_loss.obs_nll(mean_poi[1]) == mean_nll[1] assert calc_loss.obs_nll(mean_poi[2]) == mean_nll[2] mean_poialt = POI(mean, 1.2) pvalue = lambda: calc_loss.pvalue(poinull=mean_poi, poialt=mean_poialt) exp_pvalue = lambda: calc_loss.expected_pvalue( poinull=mean_poi, poialt=mean_poialt, nsigma=np.arange(-2, 3, 1) ) exp_poi = lambda: calc_loss.expected_poi( poinull=mean_poi, poialt=mean_poialt, nsigma=np.arange(-2, 3, 1) ) if calculator == BaseCalculator: with pytest.raises(NotImplementedError): pvalue() with pytest.raises(NotImplementedError): exp_pvalue() else: pvalue() exp_pvalue() model = calc_loss.model[0] sampler = model.create_sampler(n=10000) assert is_valid_data(sampler) loss = calc_loss.lossbuilder(model=[model], data=[sampler], weights=None) assert is_valid_loss(loss) with pytest.raises(ValueError): calc_loss.lossbuilder(model=[model, model], data=[sampler]) with pytest.raises(ValueError): calc_loss.lossbuilder(model=[model], data=[sampler, calc_loss.data[0]]) with pytest.raises(ValueError): calc_loss.lossbuilder(model=[model], data=[sampler], weights=[]) with pytest.raises(ValueError): calc_loss.lossbuilder( model=[model], data=[sampler], weights=[np.ones(10000), np.ones(10000)] ) assert calc_loss.get_parameter(mean_poi.name) == mean with pytest.raises(KeyError): calc_loss.get_parameter("dummy_parameter")
def freq_calc(): loss, mean = create_loss() calculator = FrequentistCalculator.from_yaml( f"{notebooks_dir}/toys/ci_freq_zfit_toys.yml", loss, Minuit()) return mean, calculator