示例#1
0
def test_pulls(mock_draw):
    bestfit = np.asarray([0.8, 1.0, 1.05, 1.1])
    uncertainty = np.asarray([0.9, 1.0, 0.03, 0.7])
    labels = ["a", "b", "staterror_region[bin_0]", "c"]
    exclude_list = ["a"]
    folder_path = "tmp"
    fit_results = fit.FitResults(bestfit, uncertainty, labels, np.empty(0),
                                 1.0)

    filtered_bestfit = np.asarray([1.0, 1.1])
    filtered_uncertainty = np.asarray([1.0, 0.7])
    filtered_labels = np.asarray(["b", "c"])
    figure_path = pathlib.Path(folder_path) / "pulls.pdf"

    # with filtering
    visualize.pulls(
        fit_results,
        figure_folder=folder_path,
        exclude_list=exclude_list,
        method="matplotlib",
    )

    mock_draw.assert_called_once()
    assert np.allclose(mock_draw.call_args[0][0], filtered_bestfit)
    assert np.allclose(mock_draw.call_args[0][1], filtered_uncertainty)
    assert np.any([
        mock_draw.call_args[0][2][i] == filtered_labels[i]
        for i in range(len(filtered_labels))
    ])
    assert mock_draw.call_args[0][3] == figure_path
    assert mock_draw.call_args[1] == {}

    # without filtering via list, but with staterror removal
    # and fixed parameter removal
    fit_results.uncertainty[0] = 0.0

    bestfit_expected = np.asarray([1.0, 1.1])
    uncertainty_expected = np.asarray([1.0, 0.7])
    labels_expected = ["b", "c"]
    visualize.pulls(fit_results,
                    figure_folder=folder_path,
                    method="matplotlib")

    assert np.allclose(mock_draw.call_args[0][0], bestfit_expected)
    assert np.allclose(mock_draw.call_args[0][1], uncertainty_expected)
    assert np.any([
        mock_draw.call_args[0][2][i] == labels_expected[i]
        for i in range(len(labels_expected))
    ])
    assert mock_draw.call_args[0][3] == figure_path
    assert mock_draw.call_args[1] == {}

    # unknown plotting method
    with pytest.raises(NotImplementedError, match="unknown backend: unknown"):
        visualize.pulls(
            fit_results,
            figure_folder=folder_path,
            exclude_list=exclude_list,
            method="unknown",
        )
def test_correlation_matrix(mock_draw):
    corr_mat = np.asarray([[1.0, 0.2, 0.1], [0.2, 1.0, 0.1], [0.1, 0.1, 1.0]])
    corr_mat_pruned = np.asarray([[1.0, 0.2], [0.2, 1.0]])
    labels = ["a", "b", "c"]
    labels_pruned = ["a", "b"]
    folder_path = "tmp"
    figure_path = pathlib.Path(folder_path) / "correlation_matrix.pdf"
    fit_results = fit.FitResults(np.empty(0), np.empty(0), labels, corr_mat,
                                 1.0)

    # pruning with threshold
    visualize.correlation_matrix(fit_results,
                                 figure_folder=folder_path,
                                 pruning_threshold=0.15)

    mock_draw.assert_called_once()
    assert np.allclose(mock_draw.call_args[0][0], corr_mat_pruned)
    assert np.any([
        mock_draw.call_args[0][1][i] == labels[i]
        for i in range(len(labels_pruned))
    ])
    assert mock_draw.call_args[0][2] == figure_path
    assert mock_draw.call_args[1] == {"close_figure": False}

    # pruning of fixed parameter (all zeros in correlation matrix row/column), close
    # figure
    corr_mat_fixed = np.asarray([[1.0, 0.2, 0.0], [0.2, 1.0, 0.0],
                                 [0.0, 0.0, 0.0]])
    fit_results_fixed = fit.FitResults(np.empty(0), np.empty(0), labels,
                                       corr_mat_fixed, 1.0)
    visualize.correlation_matrix(fit_results_fixed,
                                 figure_folder=folder_path,
                                 close_figure=True)
    assert np.allclose(mock_draw.call_args_list[1][0][0], corr_mat_pruned)
    assert np.any([
        mock_draw.call_args_list[1][0][1][i] == labels[i]
        for i in range(len(labels_pruned))
    ])
    assert mock_draw.call_args[1] == {"close_figure": True}

    # unknown plotting method
    with pytest.raises(NotImplementedError, match="unknown backend: unknown"):
        visualize.correlation_matrix(fit_results,
                                     figure_folder=folder_path,
                                     method="unknown")
示例#3
0
def test_print_results(caplog):
    caplog.set_level(logging.DEBUG)

    bestfit = np.asarray([1.0, 2.0])
    uncertainty = np.asarray([0.1, 0.3])
    labels = ["param_A", "param_B"]
    fit_results = fit.FitResults(bestfit, uncertainty, labels, np.empty(0), 0.0)

    fit.print_results(fit_results)
    assert "param_A =  1.0000 +/- 0.1000" in [rec.message for rec in caplog.records]
    assert "param_B =  2.0000 +/- 0.3000" in [rec.message for rec in caplog.records]
    caplog.clear()
示例#4
0
def test_data_mc(mock_util, mock_validate, mock_fit, mock_vis, cli_helpers,
                 tmp_path):
    workspace = {"workspace": "mock"}
    workspace_path = str(tmp_path / "workspace.json")

    # need to save workspace to file since click looks for it
    with open(workspace_path, "w") as f:
        f.write('{"workspace": "mock"}')

    runner = CliRunner()

    # default
    result = runner.invoke(cli.data_mc, [workspace_path])
    assert result.exit_code == 0
    assert mock_util.call_args_list == [((workspace, ), {})]
    assert mock_validate.call_count == 0
    assert mock_fit.call_count == 0
    assert mock_vis.call_args_list == [(
        ("model", "data"),
        {
            "config": None,
            "figure_folder": "figures",
            "fit_results": None
        },
    )]

    # with config, post-fit, custom figure folder
    config = {"General": {"Measurement": "test_config"}}
    config_path = str(tmp_path / "config.yml")
    cli_helpers.write_config(config_path, config)
    fit_results = fit.FitResults(np.asarray([1.0]), np.asarray([0.1]),
                                 ["label"], np.asarray([[1.0]]), 1.0)

    result = runner.invoke(
        cli.data_mc,
        [
            workspace_path, "--config", config_path, "--postfit",
            "--figfolder", "folder"
        ],
    )
    assert result.exit_code == 0
    assert mock_util.call_args_list[-1] == ((workspace, ), {})
    assert mock_validate.call_args_list == [((config, ), {})]
    assert mock_fit.call_args_list == [(("model", "data"), {})]
    assert mock_vis.call_args_list[-1] == (
        ("model", "data"),
        {
            "config": config,
            "figure_folder": "folder",
            "fit_results": fit_results
        },
    )
示例#5
0
def test_ranking(mock_util, mock_fit, mock_rank, mock_vis, tmp_path):
    workspace = {"workspace": "mock"}
    bestfit = np.asarray([1.0])
    uncertainty = np.asarray([0.1])
    labels = ["label"]
    corr_mat = np.asarray([[1.0]])
    fit_results = fit.FitResults(bestfit, uncertainty, labels, corr_mat, 1.0)

    workspace_path = str(tmp_path / "workspace.json")

    # need to save workspace to file since click looks for it
    with open(workspace_path, "w") as f:
        f.write('{"workspace": "mock"}')

    runner = CliRunner()

    # default
    result = runner.invoke(cli.ranking, [workspace_path])
    assert result.exit_code == 0
    assert mock_util.call_args_list == [((workspace, ), {"asimov": False})]
    assert mock_fit.call_args_list == [(("model", "data"), {})]
    assert mock_rank.call_args_list == [(("model", "data"), {
        "fit_results": fit_results
    })]
    assert mock_vis.call_count == 1
    assert np.allclose(mock_vis.call_args[0][0].prefit_up, [[1.2]])
    assert np.allclose(mock_vis.call_args[0][0].prefit_down, [[0.8]])
    assert np.allclose(mock_vis.call_args[0][0].postfit_up, [[1.1]])
    assert np.allclose(mock_vis.call_args[0][0].postfit_down, [[0.9]])
    assert mock_vis.call_args[1] == {
        "figure_folder": "figures",
        "max_pars": 10
    }

    # Asimov, maximum amount of parameters, custom folder
    result = runner.invoke(
        cli.ranking,
        ["--asimov", "--max_pars", 3, "--figfolder", "folder", workspace_path],
    )
    assert result.exit_code == 0
    assert mock_util.call_args_list[-1] == ((workspace, ), {"asimov": True})
    assert mock_fit.call_args_list[-1] == (("model", "data"), {})
    assert mock_rank.call_args_list[-1] == (
        ("model", "data"),
        {
            "fit_results": fit_results
        },
    )
    assert mock_vis.call_args_list[-1][1] == {
        "figure_folder": "folder",
        "max_pars": 3
    }
示例#6
0
def test_FitResults():
    bestfit = np.asarray([1.0])
    uncertainty = np.asarray([0.1])
    labels = ["par_a"]
    corr_mat = np.asarray([[1.0]])
    best_twice_nll = 2.0
    fit_results = fit.FitResults(bestfit, uncertainty, labels, corr_mat, best_twice_nll)
    assert np.allclose(fit_results.bestfit, bestfit)
    assert np.allclose(fit_results.uncertainty, uncertainty)
    assert fit_results.labels == labels
    assert np.allclose(fit_results.corr_mat, corr_mat)
    assert fit_results.best_twice_nll == best_twice_nll
    assert fit_results.goodness_of_fit == -1
示例#7
0
def test_ranking(mock_fit, example_spec):
    example_spec["measurements"][0]["config"]["parameters"][0]["fixed"] = False
    bestfit = np.asarray([0.9, 1.0])
    uncertainty = np.asarray([0.02, 0.1])
    labels = ["staterror", "mu"]
    fit_results = fit.FitResults(bestfit, uncertainty, labels, np.empty(0), 0.0)
    ranking_results = fit.ranking(example_spec, fit_results)

    # correct call to fit
    expected_fix = [True, False]
    expected_inits = [[0.94956657, 1.0], [0.85043343, 1.0], [0.92, 1.0], [0.88, 1.0]]
    assert mock_fit.call_count == 4
    for i in range(4):
        assert np.allclose(
            mock_fit.call_args_list[i][1]["init_pars"], expected_inits[i]
        )
        assert np.allclose(mock_fit.call_args_list[i][1]["fix_pars"], expected_fix)
        assert mock_fit.call_args_list[i][1]["custom_fit"] is False

    # POI removed from fit results
    assert np.allclose(ranking_results.bestfit, [0.9])
    assert np.allclose(ranking_results.uncertainty, [0.02])
    assert ranking_results.labels == ["staterror"]

    # received correct mock results
    assert np.allclose(ranking_results.prefit_up, [0.3])
    assert np.allclose(ranking_results.prefit_down, [-0.3])
    assert np.allclose(ranking_results.postfit_up, [0.2])
    assert np.allclose(ranking_results.postfit_down, [-0.2])

    # fixed parameter in ranking, custom fit
    example_spec["measurements"][0]["config"]["parameters"][0]["fixed"] = True
    ranking_results = fit.ranking(example_spec, fit_results, custom_fit=True)
    # expect two calls in this ranking (and had 4 before, so 6 total): pre-fit
    # uncertainty is 0 since parameter is fixed, mock post-fit uncertainty is not 0
    assert mock_fit.call_count == 6
    assert mock_fit.call_args[1]["custom_fit"] is True
    assert np.allclose(ranking_results.prefit_up, [0.0])
    assert np.allclose(ranking_results.prefit_down, [0.0])
    assert np.allclose(ranking_results.postfit_up, [0.2])
    assert np.allclose(ranking_results.postfit_down, [-0.2])
示例#8
0
def test_fit(mock_util, mock_fit, mock_pulls, mock_corrmat, tmp_path):
    workspace = {"workspace": "mock"}
    bestfit = np.asarray([1.0])
    uncertainty = np.asarray([0.1])
    labels = ["label"]
    corr_mat = np.asarray([[1.0]])
    fit_results = fit.FitResults(bestfit, uncertainty, labels, corr_mat, 1.0)

    workspace_path = str(tmp_path / "workspace.json")

    # need to save workspace to file since click looks for it
    with open(workspace_path, "w") as f:
        f.write('{"workspace": "mock"}')

    runner = CliRunner()

    # default
    result = runner.invoke(cli.fit, [workspace_path])
    assert result.exit_code == 0
    assert mock_util.call_args_list == [((workspace, ), {"asimov": False})]
    assert mock_fit.call_args_list == [(("model", "data"), {
        "minos": None,
        "goodness_of_fit": False
    })]

    # Asimov
    result = runner.invoke(cli.fit, ["--asimov", workspace_path])
    assert result.exit_code == 0
    assert mock_util.call_args_list[-1] == ((workspace, ), {"asimov": True})
    assert mock_fit.call_args_list[-1] == (
        ("model", "data"),
        {
            "minos": None,
            "goodness_of_fit": False
        },
    )

    # MINOS for one parameter
    result = runner.invoke(cli.fit, ["--minos", "par", workspace_path])
    assert result.exit_code == 0
    assert mock_util.call_args_list[-1] == ((workspace, ), {"asimov": False})
    assert mock_fit.call_args_list[-1] == (
        ("model", "data"),
        {
            "minos": ["par"],
            "goodness_of_fit": False
        },
    )

    # MINOS for multiple parameters
    result = runner.invoke(
        cli.fit, ["--minos", "par_a", "--minos", "par_b", workspace_path])
    assert result.exit_code == 0
    assert mock_util.call_args_list[-1] == ((workspace, ), {"asimov": False})
    assert mock_fit.call_args_list[-1] == (
        ("model", "data"),
        {
            "minos": ["par_a", "par_b"],
            "goodness_of_fit": False
        },
    )

    # goodness-of-fit
    result = runner.invoke(cli.fit, ["--goodness_of_fit", workspace_path])
    assert result.exit_code == 0
    assert mock_util.call_args_list[-1] == ((workspace, ), {"asimov": False})
    assert mock_fit.call_args_list[-1] == (
        ("model", "data"),
        {
            "minos": None,
            "goodness_of_fit": True
        },
    )

    # pull plot
    result = runner.invoke(cli.fit, ["--pulls", workspace_path])
    assert result.exit_code == 0
    assert mock_pulls.call_args_list == [((fit_results, ), {
        "figure_folder": "figures"
    })]

    # correlation matrix plot
    result = runner.invoke(cli.fit, ["--corrmat", workspace_path])
    assert result.exit_code == 0
    assert mock_corrmat.call_args_list == [((fit_results, ), {
        "figure_folder": "figures"
    })]

    # both plots, different folder
    result = runner.invoke(
        cli.fit,
        ["--figfolder", "folder", "--pulls", "--corrmat", workspace_path])
    assert result.exit_code == 0
    assert mock_corrmat.call_args_list[-1] == (
        (fit_results, ),
        {
            "figure_folder": "folder"
        },
    )
    assert mock_pulls.call_args_list[-1] == (
        (fit_results, ),
        {
            "figure_folder": "folder"
        },
    )
示例#9
0
    runner = CliRunner()

    result = runner.invoke(cli.workspace, [config_path, workspace_path])
    assert result.exit_code == 0
    assert mock_validate.call_args_list == [((config, ), {})]
    assert mock_build.call_args_list == [((config, ), {})]
    assert json.loads(pathlib.Path(workspace_path).read_text()) == {
        "workspace": "mock"
    }


@mock.patch("cabinetry.visualize.correlation_matrix", autospec=True)
@mock.patch("cabinetry.visualize.pulls", autospec=True)
@mock.patch(
    "cabinetry.fit.fit",
    return_value=fit.FitResults(np.asarray([1.0]), np.asarray([0.1]),
                                ["label"], np.asarray([[1.0]]), 1.0),
    autospec=True,
)
@mock.patch(
    "cabinetry.model_utils.model_and_data",
    return_value=("model", "data"),
    autospec=True,
)
def test_fit(mock_util, mock_fit, mock_pulls, mock_corrmat, tmp_path):
    workspace = {"workspace": "mock"}
    bestfit = np.asarray([1.0])
    uncertainty = np.asarray([0.1])
    labels = ["label"]
    corr_mat = np.asarray([[1.0]])
    fit_results = fit.FitResults(bestfit, uncertainty, labels, corr_mat, 1.0)
示例#10
0
def test_ranking(mock_fit, example_spec):
    example_spec["measurements"][0]["config"]["parameters"][0]["fixed"] = False
    bestfit = np.asarray([0.9, 1.0])
    uncertainty = np.asarray([0.02, 0.1])
    labels = ["staterror", "mu"]
    fit_results = fit.FitResults(bestfit, uncertainty, labels, np.empty(0),
                                 0.0)
    model, data = model_utils.model_and_data(example_spec)
    ranking_results = fit.ranking(model, data, fit_results=fit_results)

    # correct call to fit
    expected_fix = [True, False]
    expected_inits = [[0.95019305, 1.0], [0.84980695, 1.0], [0.92, 1.0],
                      [0.88, 1.0]]
    assert mock_fit.call_count == 4
    for i in range(4):
        assert mock_fit.call_args_list[i][0] == (model, data)
        assert np.allclose(mock_fit.call_args_list[i][1]["init_pars"],
                           expected_inits[i])
        assert np.allclose(mock_fit.call_args_list[i][1]["fix_pars"],
                           expected_fix)
        assert mock_fit.call_args_list[i][1]["custom_fit"] is False

    # POI removed from fit results
    assert np.allclose(ranking_results.bestfit, [0.9])
    assert np.allclose(ranking_results.uncertainty, [0.02])
    assert ranking_results.labels == ["staterror"]

    # received correct mock results
    assert np.allclose(ranking_results.prefit_up, [0.3])
    assert np.allclose(ranking_results.prefit_down, [-0.3])
    assert np.allclose(ranking_results.postfit_up, [0.2])
    assert np.allclose(ranking_results.postfit_down, [-0.2])

    # fixed parameter in ranking, custom fit
    example_spec["measurements"][0]["config"]["parameters"][0]["fixed"] = True
    model, data = model_utils.model_and_data(example_spec)
    ranking_results = fit.ranking(model,
                                  data,
                                  fit_results=fit_results,
                                  custom_fit=True)
    # expect two calls in this ranking (and had 4 before, so 6 total): pre-fit
    # uncertainty is 0 since parameter is fixed, mock post-fit uncertainty is not 0
    assert mock_fit.call_count == 6
    assert mock_fit.call_args[1]["custom_fit"] is True
    assert np.allclose(ranking_results.prefit_up, [0.0])
    assert np.allclose(ranking_results.prefit_down, [0.0])
    assert np.allclose(ranking_results.postfit_up, [0.2])
    assert np.allclose(ranking_results.postfit_down, [-0.2])

    # no reference results
    ranking_results = fit.ranking(model, data, custom_fit=True)
    assert mock_fit.call_count == 9
    # reference fit
    assert mock_fit.call_args_list[-3] == ((model, data), {"custom_fit": True})
    # fits for impact
    assert mock_fit.call_args_list[-2][0] == (model, data)
    assert np.allclose(mock_fit.call_args_list[-2][1]["init_pars"], [1.2, 1.0])
    assert mock_fit.call_args_list[-2][1]["fix_pars"] == [True, False]
    assert mock_fit.call_args_list[-2][1]["custom_fit"] is True
    assert mock_fit.call_args_list[-1][0] == (model, data)
    assert np.allclose(mock_fit.call_args_list[-1][1]["init_pars"], [0.6, 1.0])
    assert mock_fit.call_args_list[-1][1]["fix_pars"] == [True, False]
    assert mock_fit.call_args_list[-1][1]["custom_fit"] is True
    # ranking results
    assert np.allclose(ranking_results.prefit_up, [0.0])
    assert np.allclose(ranking_results.prefit_down, [0.0])
    assert np.allclose(ranking_results.postfit_up, [0.3])
    assert np.allclose(ranking_results.postfit_down, [-0.3])
示例#11
0
    # including minos
    model, data = model_utils.model_and_data(example_spec)
    fit_results = fit._fit_model_custom(model, data, minos=["Signal strength"])
    assert mock_minos.call_count == 1
    # first argument to minos call is the Minuit instance
    assert mock_minos.call_args[0][1] == ["Signal strength"]
    assert mock_minos.call_args[0][2] == [
        "staterror_Signal-Region", "Signal strength"
    ]
    assert mock_minos.call_args[1] == {}


@mock.patch(
    "cabinetry.fit._fit_model_custom",
    return_value=fit.FitResults(np.asarray([1.2]), np.asarray([0.2]), ["par"],
                                np.empty(0), 2.0),
)
@mock.patch(
    "cabinetry.fit._fit_model_pyhf",
    return_value=fit.FitResults(np.asarray([1.1]), np.asarray([0.2]), ["par"],
                                np.empty(0), 2.0),
)
def test__fit_model(mock_pyhf, mock_custom, example_spec):
    model, data = model_utils.model_and_data(example_spec)

    # pyhf API
    fit_results = fit._fit_model(model, data)
    assert mock_pyhf.call_count == 1
    assert mock_pyhf.call_args[0][0].spec == model.spec
    assert mock_pyhf.call_args[0][1] == data
    assert mock_pyhf.call_args[1] == {
示例#12
0
def test_data_MC(
    mock_asimov,
    mock_unc,
    mock_stdev,
    mock_table_bin,
    mock_table_channel,
    mock_dict,
    mock_bins,
    mock_draw,
    example_spec,
):
    config = {"config": "abc"}
    figure_folder = "tmp"
    model, data = model_utils.model_and_data(example_spec)

    # pre-fit plot
    visualize.data_MC(model, data, config=config, figure_folder=figure_folder)

    # Asimov parameter calculation and pre-fit uncertainties
    assert mock_stdev.call_count == 1
    assert mock_asimov.call_args_list[0][0][0] == model
    assert mock_unc.call_count == 1
    assert mock_unc.call_args_list[0][0][0] == model

    # call to stdev calculation
    assert mock_stdev.call_count == 1
    assert mock_stdev.call_args_list[0][0][0] == model
    assert np.allclose(mock_stdev.call_args_list[0][0][1], [1.0, 1.0])
    assert np.allclose(mock_stdev.call_args_list[0][0][2], [0.04956657, 0.0])
    assert np.allclose(mock_stdev.call_args_list[0][0][3],
                       np.asarray([[1.0, 0.0], [0.0, 1.0]]))
    assert mock_stdev.call_args_list[0][1] == {}

    # yield table per bin
    assert mock_table_bin.call_count == 1
    assert mock_table_bin.call_args_list[0][0][0] == model
    assert mock_table_bin.call_args_list[0][0][1] == [[[51.839756]]]
    assert mock_table_bin.call_args_list[0][0][2] == [[0.3]]
    assert mock_table_bin.call_args_list[0][0][3] == [[data[0]]]
    assert mock_table_bin.call_args_list[0][1] == {}

    # yield table per channel
    assert mock_table_channel.call_count == 1
    assert mock_table_channel.call_args_list[0][0][0] == model
    assert mock_table_channel.call_args_list[0][0][1] == [[51.839756]]
    assert mock_table_channel.call_args_list[0][0][2] == [0.3]
    assert mock_table_channel.call_args_list[0][0][3] == [data[0]]
    assert mock_table_channel.call_args_list[0][1] == {}

    assert mock_dict.call_args_list == [[(config, "Signal Region"), {}]]
    assert mock_bins.call_args_list == [[({
        "Name": "region",
        "Variable": "x"
    }, ), {}]]

    expected_histograms = [
        {
            "label": "Signal",
            "isData": False,
            "yields": np.asarray([51.839756]),
            "variable": "x",
        },
        {
            "label": "Data",
            "isData": True,
            "yields": np.asarray(data[:1]),
            "variable": "x",
        },
    ]
    assert mock_draw.call_count == 1
    assert mock_draw.call_args_list[0][0][0] == expected_histograms
    assert np.allclose(mock_draw.call_args_list[0][0][1], np.asarray([0.3]))
    np.testing.assert_equal(mock_draw.call_args_list[0][0][2],
                            np.asarray([1, 2]))
    assert mock_draw.call_args_list[0][0][3] == pathlib.Path(
        "tmp/Signal-Region_prefit.pdf")
    assert mock_draw.call_args_list[0][1] == {
        "log_scale": None,
        "log_scale_x": False
    }

    # post-fit plot and custom scale
    fit_results = fit.FitResults(
        np.asarray([1.01, 1.1]),
        np.asarray([0.03, 0.1]),
        [],
        np.asarray([[1.0, 0.2], [0.2, 1.0]]),
        0.0,
    )
    visualize.data_MC(
        model,
        data,
        config=config,
        figure_folder=figure_folder,
        fit_results=fit_results,
        log_scale=False,
    )

    assert mock_asimov.call_count == 1  # no new call

    # call to stdev calculation
    assert mock_stdev.call_count == 2
    assert mock_stdev.call_args_list[1][0][0] == model
    assert np.allclose(mock_stdev.call_args_list[1][0][1], [1.01, 1.1])
    assert np.allclose(mock_stdev.call_args_list[1][0][2], [0.03, 0.1])
    assert np.allclose(mock_stdev.call_args_list[1][0][3],
                       np.asarray([[1.0, 0.2], [0.2, 1.0]]))
    assert mock_stdev.call_args_list[1][1] == {}

    assert mock_draw.call_count == 2
    # yield at best-fit point is different from pre-fit
    assert np.allclose(mock_draw.call_args_list[1][0][0][0]["yields"],
                       57.59396892)
    assert np.allclose(mock_draw.call_args_list[1][0][1], np.asarray([0.3]))
    np.testing.assert_equal(mock_draw.call_args_list[1][0][2],
                            np.asarray([1, 2]))
    assert mock_draw.call_args_list[1][0][3] == pathlib.Path(
        "tmp/Signal-Region_postfit.pdf")
    assert mock_draw.call_args_list[1][1] == {
        "log_scale": False,
        "log_scale_x": False
    }

    # no yield table
    visualize.data_MC(model, data, config=config, include_table=False)
    assert mock_table_bin.call_count == 2  # 2 calls from before
    assert mock_table_channel.call_count == 2

    # no config specified, default variable name and bin edges, data without auxdata
    visualize.data_MC(model, data[:1])
    assert mock_draw.call_args[0][0][0]["variable"] == "bin"
    assert mock_draw.call_args[0][0][1]["variable"] == "bin"
    assert mock_draw.call_args[0][0][1]["yields"] == np.asarray(data[:1])
    np.testing.assert_equal(mock_draw.call_args[0][2], np.asarray([0, 1]))

    # unknown plotting method
    with pytest.raises(NotImplementedError, match="unknown backend: unknown"):
        visualize.data_MC(model,
                          data,
                          config=config,
                          figure_folder=figure_folder,
                          method="unknown")
示例#13
0
def test_data_MC(
    mock_asimov,
    mock_unc,
    mock_stdev,
    mock_table,
    mock_dict,
    mock_bins,
    mock_draw,
    example_spec,
):
    config = {}
    figure_folder = "tmp"
    model_spec = pyhf.Workspace(example_spec).model().spec

    # pre-fit plot
    visualize.data_MC(config, example_spec, figure_folder=figure_folder)

    # Asimov parameter calculation and pre-fit uncertainties
    assert mock_stdev.call_count == 1
    assert mock_asimov.call_args_list[0][0][0].spec == model_spec
    assert mock_unc.call_count == 1
    assert mock_unc.call_args_list[0][0][0].spec == model_spec

    # call to stdev calculation
    assert mock_stdev.call_count == 1
    assert mock_stdev.call_args_list[0][0][0].spec == model_spec
    assert np.allclose(mock_stdev.call_args_list[0][0][1], [1.0, 1.0])
    assert np.allclose(mock_stdev.call_args_list[0][0][2], [0.04956657, 0.0])
    assert np.allclose(
        mock_stdev.call_args_list[0][0][3], np.asarray([[1.0, 0.0], [0.0, 1.0]])
    )
    assert mock_stdev.call_args_list[0][1] == {}

    # yield table
    assert mock_table.call_count == 1
    assert mock_table.call_args_list[0][0][0].spec == model_spec
    assert mock_table.call_args_list[0][0][1] == [np.asarray([[51.839756]])]
    assert mock_table.call_args_list[0][0][2] == [[0.3]]
    assert mock_table.call_args_list[0][0][3] == [np.asarray([475])]
    assert mock_table.call_args_list[0][1] == {}

    assert mock_dict.call_args_list == [[(config, "Signal Region"), {}]]
    assert mock_bins.call_args_list == [[({"Name": "region", "Variable": "x"},), {}]]

    expected_histograms = [
        {
            "label": "Signal",
            "isData": False,
            "yields": np.asarray([51.839756]),
            "variable": "x",
        },
        {
            "label": "Data",
            "isData": True,
            "yields": np.asarray([475]),
            "variable": "x",
        },
    ]
    assert mock_draw.call_count == 1
    assert mock_draw.call_args_list[0][0][0] == expected_histograms
    assert np.allclose(mock_draw.call_args_list[0][0][1], np.asarray([0.3]))
    assert np.allclose(mock_draw.call_args_list[0][0][2], np.asarray([1, 2]))
    assert mock_draw.call_args_list[0][0][3] == pathlib.Path(
        "tmp/Signal-Region_prefit.pdf"
    )
    assert mock_draw.call_args_list[0][1] == {"log_scale": None}

    # post-fit plot and custom scale
    fit_results = fit.FitResults(
        np.asarray([1.01, 1.1]),
        np.asarray([0.03, 0.1]),
        [],
        np.asarray([[1.0, 0.2], [0.2, 1.0]]),
        0.0,
    )
    visualize.data_MC(
        config,
        example_spec,
        figure_folder=figure_folder,
        fit_results=fit_results,
        log_scale=False,
    )

    assert mock_asimov.call_count == 1  # no new call

    # call to stdev calculation
    assert mock_stdev.call_count == 2
    assert mock_stdev.call_args_list[1][0][0].spec == model_spec
    assert np.allclose(mock_stdev.call_args_list[1][0][1], [1.01, 1.1])
    assert np.allclose(mock_stdev.call_args_list[1][0][2], [0.03, 0.1])
    assert np.allclose(
        mock_stdev.call_args_list[1][0][3], np.asarray([[1.0, 0.2], [0.2, 1.0]])
    )
    assert mock_stdev.call_args_list[1][1] == {}

    assert mock_draw.call_count == 2
    # yield at best-fit point is different from pre-fit
    assert np.allclose(mock_draw.call_args_list[1][0][0][0]["yields"], 57.59396892)
    assert np.allclose(mock_draw.call_args_list[1][0][1], np.asarray([0.3]))
    assert np.allclose(mock_draw.call_args_list[1][0][2], np.asarray([1, 2]))
    assert mock_draw.call_args_list[1][0][3] == pathlib.Path(
        "tmp/Signal-Region_postfit.pdf"
    )
    assert mock_draw.call_args_list[1][1] == {"log_scale": False}

    # no yield table
    visualize.data_MC(config, example_spec, include_table=False)
    assert mock_table.call_count == 2  # 2 calls from before

    # unknown plotting method
    with pytest.raises(NotImplementedError, match="unknown backend: unknown"):
        visualize.data_MC(
            config, example_spec, figure_folder=figure_folder, method="unknown"
        )