示例#1
0
def test_calculate_std(example_spec, example_spec_multibin):
    model = pyhf.Workspace(example_spec).model()
    parameters = np.asarray([1.05, 0.95])
    uncertainty = np.asarray([0.1, 0.1])
    corr_mat = np.asarray([[1.0, 0.2], [0.2, 1.0]])

    total_std = model_tools.calculate_std(model, parameters, uncertainty, corr_mat)
    expected_std = [[17.4320561320614]]
    assert np.allclose(ak.to_list(total_std), expected_std)

    # pre-fit
    parameters = np.asarray([1.0, 1.0])
    uncertainty = np.asarray([0.0495665682, 0.0])
    diag_corr_mat = np.diag([1.0, 1.0])
    total_std = model_tools.calculate_std(model, parameters, uncertainty, diag_corr_mat)
    expected_std = [[5.572758655480406]]  # the staterror
    assert np.allclose(ak.to_list(total_std), expected_std)

    # multiple channels, bins, staterrors
    model = pyhf.Workspace(example_spec_multibin).model()
    parameters = np.asarray([0.9, 1.05, 1.3, 0.95])
    uncertainty = np.asarray([0.1, 0.05, 0.3, 0.1])
    corr_mat = np.asarray(
        [
            [1.0, 0.1, 0.2, 0.1],
            [0.1, 1.0, 0.2, 0.3],
            [0.2, 0.2, 1.0, 0.3],
            [0.1, 0.3, 0.3, 1.0],
        ]
    )
    total_std = model_tools.calculate_std(model, parameters, uncertainty, corr_mat)
    expected_std = [[12.889685799118613, 2.6730057987217317], [3.469221814759039]]
    for i_reg in range(2):
        assert np.allclose(ak.to_list(total_std[i_reg]), expected_std[i_reg])
示例#2
0
def test_rename_outfile(tmpdir, script_runner):
    temp = tmpdir.join("parsed_output.json")
    command = 'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {0:s} --hide-progress'.format(
        temp.strpath
    )
    ret = script_runner.run(*shlex.split(command))

    tempout = tmpdir.join("rename_output.json")
    command = 'pyhf rename -m staterror_channel1 staterror_channelone --measurement GammaExample GamEx {0:s} --output-file {1:s}'.format(
        temp.strpath, tempout.strpath
    )
    ret = script_runner.run(*shlex.split(command))
    assert ret.success

    spec = json.loads(temp.read())
    ws = pyhf.Workspace(spec)
    assert 'GammaExample' in ws.measurement_names
    assert 'GamEx' not in ws.measurement_names
    assert 'staterror_channel1' in ws.parameters
    assert 'staterror_channelone' not in ws.parameters
    renamed_spec = json.loads(tempout.read())
    renamed_ws = pyhf.Workspace(renamed_spec)
    assert 'GammaExample' not in renamed_ws.measurement_names
    assert 'GamEx' in renamed_ws.measurement_names
    assert 'staterror_channel1' not in renamed_ws.parameters
    assert 'staterror_channelone' in renamed_ws.parameters
示例#3
0
    def wsMaker(self):
        """
        Apply each region patch (self.patches) to his associated json (self.inputJsons) to obtain the complete workspaces

        :returns: the list of patched workspaces
        """
        if self.patches == None:
            return None
        if self.nWS == 1:
            try:
                return [pyhf.Workspace(jsonpatch.apply_patch(self.inputJsons[0], self.patches[0]))]
            except (pyhf.exceptions.InvalidSpecification, KeyError) as e:
                logger.error("The json file is corrupted:\n{}".format(e))
                return None
        else:
            workspaces = []
            for js, patch in zip(self.inputJsons, self.patches):
                wsDict = jsonpatch.apply_patch(js, patch)
                try:
                    ws = pyhf.Workspace(wsDict)
                except (pyhf.exceptions.InvalidSpecification, KeyError) as e:
                    logger.error("Json file number {} is corrupted:\n{}".format(self.inputJsons.index(json), e))
                    return None
                workspaces.append(ws)
            return workspaces
示例#4
0
def test_model_and_data(example_spec):
    model, data = model_tools.model_and_data(example_spec)
    assert model.spec["channels"] == example_spec["channels"]
    assert model.config.modifier_settings == {
        "normsys": {"interpcode": "code4"},
        "histosys": {"interpcode": "code4p"},
    }
    assert data == [691, 1.0]

    # requesting Asimov dataset
    model, data = model_tools.model_and_data(pyhf.Workspace(example_spec), asimov=True)
    assert model.spec["channels"] == example_spec["channels"]
    assert model.config.modifier_settings == {
        "normsys": {"interpcode": "code4"},
        "histosys": {"interpcode": "code4p"},
    }
    assert data == [112.429786, 1.0]

    # test handing a workspace instead of JSON
    model, data = model_tools.model_and_data(pyhf.Workspace(example_spec))
    assert model.spec["channels"] == example_spec["channels"]
    assert model.config.modifier_settings == {
        "normsys": {"interpcode": "code4"},
        "histosys": {"interpcode": "code4p"},
    }
    assert data == [691, 1.0]

    # without auxdata
    model, data = model_tools.model_and_data(example_spec, include_auxdata=False)
    assert data == [691]
def test_calculate_stdev(example_spec, example_spec_multibin):
    model = pyhf.Workspace(example_spec).model()
    parameters = np.asarray([1.05, 0.95])
    uncertainty = np.asarray([0.1, 0.1])
    corr_mat = np.asarray([[1.0, 0.2], [0.2, 1.0]])

    total_stdev = model_utils.calculate_stdev(model, parameters, uncertainty,
                                              corr_mat)
    expected_stdev = [[8.03767016]]
    assert np.allclose(ak.to_list(total_stdev), expected_stdev)

    # pre-fit
    parameters = np.asarray([1.0, 1.0])
    uncertainty = np.asarray([0.0495665682, 0.0])
    diag_corr_mat = np.diag([1.0, 1.0])
    total_stdev = model_utils.calculate_stdev(model, parameters, uncertainty,
                                              diag_corr_mat)
    expected_stdev = [[2.56951880]]  # the staterror
    assert np.allclose(ak.to_list(total_stdev), expected_stdev)

    # multiple channels, bins, staterrors
    model = pyhf.Workspace(example_spec_multibin).model()
    parameters = np.asarray([0.9, 1.05, 1.3, 0.95])
    uncertainty = np.asarray([0.1, 0.05, 0.3, 0.1])
    corr_mat = np.asarray([
        [1.0, 0.1, 0.2, 0.1],
        [0.1, 1.0, 0.2, 0.3],
        [0.2, 0.2, 1.0, 0.3],
        [0.1, 0.3, 0.3, 1.0],
    ])
    total_stdev = model_utils.calculate_stdev(model, parameters, uncertainty,
                                              corr_mat)
    expected_stdev = [[8.056054, 1.670629], [2.775377]]
    for i_reg in range(2):
        assert np.allclose(total_stdev[i_reg], expected_stdev[i_reg])
def test_get_prefit_uncertainties(example_spec, example_spec_multibin,
                                  example_spec_shapefactor):
    model = pyhf.Workspace(example_spec).model()
    uncertainties = model_tools.get_prefit_uncertainties(model)
    assert np.allclose(uncertainties, [0.0, 0.0])

    model = pyhf.Workspace(example_spec_multibin).model()
    uncertainties = model_tools.get_prefit_uncertainties(model)
    assert np.allclose(uncertainties, [0.175, 0.375, 0.0, 0.2])

    model = pyhf.Workspace(example_spec_shapefactor).model()
    uncertainties = model_tools.get_prefit_uncertainties(model)
    assert np.allclose(uncertainties, [0.0, 0.0, 0.0])
示例#7
0
def test_get_prefit_uncertainties(example_spec, example_spec_multibin,
                                  example_spec_shapefactor):
    model = pyhf.Workspace(example_spec).model()
    unc = model_utils.get_prefit_uncertainties(model)
    assert np.allclose(unc, [0.0, 0.0])  # fixed parameter and normfactor

    model = pyhf.Workspace(example_spec_multibin).model()
    unc = model_utils.get_prefit_uncertainties(model)
    assert np.allclose(unc, [0.2, 0.4, 0.0, 0.125])

    model = pyhf.Workspace(example_spec_shapefactor).model()
    unc = model_utils.get_prefit_uncertainties(model)
    assert np.allclose(unc, [0.0, 0.0, 0.0])
示例#8
0
def test_unconstrained_parameter_count(example_spec, example_spec_shapefactor):
    model = pyhf.Workspace(example_spec).model()
    assert model_utils.unconstrained_parameter_count(model) == 1

    model = pyhf.Workspace(example_spec_shapefactor).model()
    assert model_utils.unconstrained_parameter_count(model) == 3

    # fixed parameters are skipped in counting
    example_spec_shapefactor["measurements"][0]["config"]["parameters"].append(
        {
            "name": "Signal strength",
            "fixed": True
        })
    model = pyhf.Workspace(example_spec_shapefactor).model()
    assert model_utils.unconstrained_parameter_count(model) == 2
示例#9
0
    def run_bkg(self):
        import json
        import copy
        import pyhf
        
        with open('{region}/BkgOnly.json'.format(region=self.region)) as fname:
            spec = json.load(fname)
            spec = copy.copy(spec)

        #switch the parameter of interest to the lumi from any signal strength
        spec["measurements"][0]["config"]["poi"] = "lumi"
        
        #load the workspace
        ws = pyhf.Workspace(spec)

        #load the model
        model = ws.model(
            measurement_name="NormalMeasurement",
            modifier_settings={
                "normsys": {"interpcode": "code4"},
                "histosys": {"interpcode": "code4p"},
            },
        )

        data = ws.data(model)
        
        self.model = model
        self.data = data
        self.next(self.fit_bkg)
示例#10
0
def test_wspace_unexpected_keyword_argument(simplemodels_model_data):
    model, data = simplemodels_model_data
    workspace = pyhf.Workspace.build(model, data)
    spec = dict(workspace)

    with pytest.raises(pyhf.exceptions.Unsupported):
        pyhf.Workspace(spec, abc=True)
示例#11
0
def model_and_data(
        spec: Dict[str, Any],
        asimov: bool = False,
        with_aux: bool = True) -> Tuple[pyhf.pdf.Model, List[float]]:
    """Returns model and data for a ``pyhf`` workspace specification.

    Args:
        spec (Dict[str, Any]): a ``pyhf`` workspace specification
        asimov (bool, optional): whether to return the Asimov dataset, defaults to False
        with_aux (bool, optional): whether to also return auxdata, defaults to True

    Returns:
        Tuple[pyhf.pdf.Model, List[float]]:
            - a HistFactory-style model in ``pyhf`` format
            - the data (plus auxdata if requested) for the model
    """
    workspace = pyhf.Workspace(spec)
    model = workspace.model(modifier_settings={
        "normsys": {
            "interpcode": "code4"
        },
        "histosys": {
            "interpcode": "code4p"
        },
    })  # use HistFactory InterpCode=4
    if not asimov:
        data = workspace.data(model, with_aux=with_aux)
    else:
        data = build_Asimov_data(model, with_aux=with_aux)
    return model, data
示例#12
0
def validate(ws: Dict[str, Any]) -> None:
    """Validates a workspace with ``pyhf``.

    Args:
        ws (Dict[str, Any]): the workspace to validate
    """
    pyhf.Workspace(ws)
示例#13
0
def validate(ws):
    """validate a workspace

    Args:
        ws (dict): pyhf-compatible HistFactory workspace
    """
    pyhf.Workspace(ws)
示例#14
0
def test_combine_outfile(tmpdir, script_runner):
    temp_1 = tmpdir.join("parsed_output.json")
    temp_2 = tmpdir.join("renamed_output.json")
    command = f'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {temp_1.strpath:s} --hide-progress'
    ret = script_runner.run(*shlex.split(command))

    rename_channels = {'channel1': 'channel2'}
    rename_measurements = {
        'ConstExample': 'OtherConstExample',
        'LogNormExample': 'OtherLogNormExample',
        'GaussExample': 'OtherGaussExample',
        'GammaExample': 'OtherGammaExample',
    }

    _opts_channels = ''.join(
        ' -c ' + ' '.join(item) for item in rename_channels.items()
    )
    _opts_measurements = ''.join(
        ' --measurement ' + ' '.join(item) for item in rename_measurements.items()
    )
    command = f"pyhf rename {temp_1.strpath:s} {_opts_channels:s} {_opts_measurements:s} --output-file {temp_2.strpath:s}"
    ret = script_runner.run(*shlex.split(command))

    tempout = tmpdir.join("combined_output.json")
    command = f'pyhf combine {temp_1.strpath:s} {temp_2.strpath:s} --output-file {tempout.strpath:s}'
    ret = script_runner.run(*shlex.split(command))
    assert ret.success

    combined_spec = json.loads(tempout.read())
    combined_ws = pyhf.Workspace(combined_spec)
    assert combined_ws.channels == ['channel1', 'channel2']
    assert len(combined_ws.measurement_names) == 8
def test_model_and_data(example_spec):
    model, data = model_tools.model_and_data(example_spec)
    assert model.spec["channels"] == example_spec["channels"]
    assert model.config.modifier_settings == {
        "normsys": {
            "interpcode": "code4"
        },
        "histosys": {
            "interpcode": "code4p"
        },
    }
    assert data == [691, 1.0]

    # requesting Asimov dataset
    # TODO: request asimov dataset by setting asimove=True
    # TODO: should return [112.429786, 1.0]

    # TODO: Need to test overloaded method as well

    # test handing a workspace instead of JSON
    model, data = model_tools.model_and_data(pyhf.Workspace(example_spec))
    assert model.spec["channels"] == example_spec["channels"]
    assert model.config.modifier_settings == {
        "normsys": {
            "interpcode": "code4"
        },
        "histosys": {
            "interpcode": "code4p"
        },
    }
    assert data == [691, 1.0]

    # without auxdata
    model, data = model_tools.model_and_data(example_spec, with_aux=False)
    assert data == [691]
示例#16
0
def test_patchset_apply(datadir):
    patchset = pyhf.PatchSet(
        json.load(open(datadir.join('example_patchset.json'))))
    ws = pyhf.Workspace(json.load(open(datadir.join('example_bkgonly.json'))))
    with mock.patch('pyhf.patchset.PatchSet.verify') as m:
        assert m.call_count == 0
        assert patchset.apply(ws, 'patch_channel1_signal_syst1')
        assert m.call_count == 1
示例#17
0
def test_build_Asimov_data(example_spec):
    ws = pyhf.Workspace(example_spec)
    model = ws.model()
    assert model_utils.build_Asimov_data(model) == [51.839756, 1]

    # without auxdata
    assert model_utils.build_Asimov_data(model, with_aux=False) == [51.839756]

    # respect nominal settings for normfactors
    example_spec["measurements"][0]["config"]["parameters"].append({
        "name":
        "Signal strength",
        "inits": [2.0]
    })
    ws = pyhf.Workspace(example_spec)
    model = ws.model()
    assert model_utils.build_Asimov_data(model, with_aux=False) == [103.679512]
示例#18
0
def test_workspace_without_validation(mocker, simplemodels_model_data):
    model, data = simplemodels_model_data

    mocker.patch('pyhf.utils.validate')
    ws = pyhf.Workspace.build(model, data, validate=False)
    assert pyhf.utils.validate.called is False

    pyhf.Workspace(dict(ws), validate=False)
    assert pyhf.utils.validate.called is False
示例#19
0
def test_get_asimov_parameters(example_spec, example_spec_shapefactor):
    model = pyhf.Workspace(example_spec).model()
    pars = model_utils.get_asimov_parameters(model)
    assert np.allclose(pars, [1.0, 1.0])

    model = pyhf.Workspace(example_spec_shapefactor).model()
    pars = model_utils.get_asimov_parameters(model)
    assert np.allclose(pars, [1.0, 1.0, 1.0])

    # respect nominal settings for normfactors
    example_spec["measurements"][0]["config"]["parameters"].append({
        "name":
        "Signal strength",
        "inits": [2.0]
    })
    model = pyhf.Workspace(example_spec).model()
    pars = model_utils.get_asimov_parameters(model)
    assert np.allclose(pars, [1.0, 2.0])
示例#20
0
def compare_nuisance(root_workspace, pyhf_json):
    # Get the root nuisance params
    infile = ROOT.TFile.Open(root_workspace)
    workspace = infile.Get("combined")
    mc = workspace.obj("ModelConfig")

    def exhaust_argset(s):
        it = s.fwdIterator()
        while True:
            n = it.next()
            if not n:
                break
            yield n

    pars = [
        x.GetName() for x in exhaust_argset(mc.GetNuisanceParameters())
    ] + [x.GetName() for x in exhaust_argset(mc.GetParametersOfInterest())]

    # Replace some strings to match root nuisance param names to pyhf naming scheme
    pars_root = [
        sub.replace("alpha_", "").replace("gamma_stat_", "staterror_").replace(
            "gamma_stat_", "staterror_").replace("lumi",
                                                 "Lumi").replace("_bin", "")
        for sub in pars
    ]

    # Get pyhf nuisance params
    ws = pyhf.Workspace(json.load(open(pyhf_json)))
    model = ws.model()

    pars_pyhf = []
    for k, v in model.config.par_map.items():
        sl = v["slice"]
        npars = sl.stop - sl.start
        if npars > 1 or "staterror" in k:
            for i in range(npars):
                pars_pyhf.append(f"{k}_{i}")
        else:
            pars_pyhf.append(k)

    # Compare the nuisance params
    nuisance_dict = {"root": pars_root, "pyhf": pars_pyhf}

    unique_dict = {"root": [], "pyhf": []}

    unique_dict["pyhf"] = set(nuisance_dict["pyhf"]) - set(
        nuisance_dict["root"])
    unique_dict["root"] = set(nuisance_dict["root"]) - set(
        nuisance_dict["pyhf"])

    print("Nuisance params unique to pyhf:")
    for param in unique_dict["pyhf"]:
        print(param)

    print("\nNuisance params unique to root:")
    for param in unique_dict["root"]:
        print(param)
示例#21
0
def test_prune_outfile(tmpdir, script_runner):
    temp = tmpdir.join("parsed_output.json")
    command = f'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {temp.strpath:s} --hide-progress'
    ret = script_runner.run(*shlex.split(command))

    tempout = tmpdir.join("prune_output.json")
    command = f'pyhf prune -m staterror_channel1 --measurement GammaExample {temp.strpath:s} --output-file {tempout.strpath:s}'
    ret = script_runner.run(*shlex.split(command))
    assert ret.success

    spec = json.loads(temp.read())
    ws = pyhf.Workspace(spec)
    assert 'GammaExample' in ws.measurement_names
    assert 'staterror_channel1' in ws.parameters
    pruned_spec = json.loads(tempout.read())
    pruned_ws = pyhf.Workspace(pruned_spec)
    assert 'GammaExample' not in pruned_ws.measurement_names
    assert 'staterror_channel1' not in pruned_ws.parameters
示例#22
0
def test_workspace_poiless(datadir):
    """
    Test that a workspace with a measurement with empty POI string is treated as POI-less
    """
    spec = json.load(open(datadir.join("poiless.json")))
    ws = pyhf.Workspace(spec)
    model = ws.model()

    assert model.config.poi_name is None
    assert model.config.poi_index is None
示例#23
0
def test__get_channel_bounds_indices(example_spec, example_spec_multibin):
    model = pyhf.Workspace(example_spec).model()
    indices = model_tools._get_channel_bounds_indices(model)
    assert indices == []

    model = pyhf.Workspace(example_spec_multibin).model()
    indices = model_tools._get_channel_bounds_indices(model)
    assert indices == [2]

    # add extra channel to model to test three channels (two indices needed)
    three_channel_model = copy.deepcopy(example_spec_multibin)
    extra_channel = copy.deepcopy(three_channel_model["channels"][0])
    extra_channel["name"] = "region_3"
    extra_channel["samples"][0]["modifiers"][0]["name"] = "staterror_region_3"
    three_channel_model["channels"].append(extra_channel)
    three_channel_model["observations"].append({"data": [35, 8], "name": "region_3"})
    model = pyhf.Workspace(three_channel_model).model()
    indices = model_tools._get_channel_bounds_indices(model)
    assert indices == [2, 3]
示例#24
0
def test__yields_per_bin(example_spec_multibin, example_spec_with_background):
    # multiple channels
    model = pyhf.Workspace(example_spec_multibin).model()
    yields = [[[25.0, 5.0]], [[8.0]]]
    total_stdev = [[5.0, 2.0], [1.0]]
    data = [[35, 8], [10]]

    yield_table = tabulate._yields_per_bin(model, yields, total_stdev, data)
    assert yield_table == [
        {
            "sample": "Signal",
            "region_1\nbin 1": "25.00",
            "region_1\nbin 2": "5.00",
            "region_2\nbin 1": "8.00",
        },
        {
            "sample": "total",
            "region_1\nbin 1": "25.00 \u00B1 5.00",
            "region_1\nbin 2": "5.00 \u00B1 2.00",
            "region_2\nbin 1": "8.00 \u00B1 1.00",
        },
        {
            "sample": "data",
            "region_1\nbin 1": "35.00",
            "region_1\nbin 2": "8.00",
            "region_2\nbin 1": "10.00",
        },
    ]

    # multiple samples
    model = pyhf.Workspace(example_spec_with_background).model()
    yields = [[[150.0], [50.0]]]
    total_stdev = [[8.60]]
    data = [[160]]

    yield_table = tabulate._yields_per_bin(model, yields, total_stdev, data)
    assert yield_table == [
        {"sample": "Background", "Signal Region\nbin 1": "150.00"},
        {"sample": "Signal", "Signal Region\nbin 1": "50.00"},
        {"sample": "total", "Signal Region\nbin 1": "200.00 \u00B1 8.60"},
        {"sample": "data", "Signal Region\nbin 1": "160.00"},
    ]
示例#25
0
def test__yields_per_channel(example_spec_multibin, example_spec_with_background):
    # multiple channels
    model = pyhf.Workspace(example_spec_multibin).model()
    yields = [[30], [8.0]]
    total_stdev = [5.39, 1.0]
    data = [43, 10]

    yield_table = tabulate._yields_per_channel(model, yields, total_stdev, data)
    assert yield_table == [
        {
            "sample": "Signal",
            "region_1": "30.00",
            "region_2": "8.00",
        },
        {
            "sample": "total",
            "region_1": "30.00 \u00B1 5.39",
            "region_2": "8.00 \u00B1 1.00",
        },
        {
            "sample": "data",
            "region_1": "43.00",
            "region_2": "10.00",
        },
    ]

    # multiple samples
    model = pyhf.Workspace(example_spec_with_background).model()
    yields = [[150.0, 50]]
    total_stdev = [8.60]
    data = [160]

    yield_table = tabulate._yields_per_channel(model, yields, total_stdev, data)
    assert yield_table == [
        {"sample": "Background", "Signal Region": "150.00"},
        {"sample": "Signal", "Signal Region": "50.00"},
        {"sample": "total", "Signal Region": "200.00 \u00B1 8.60"},
        {"sample": "data", "Signal Region": "160.00"},
    ]
示例#26
0
def test_wspace_immutable(simplemodels_model_data):
    model, data = simplemodels_model_data
    workspace = pyhf.Workspace.build(model, data)
    spec = dict(workspace)

    ws = pyhf.Workspace(spec)
    model = ws.model()
    before = model.config.suggested_init()
    spec["measurements"][0]["config"]["parameters"][0]["inits"] = [1.5]

    model = ws.model()
    after = model.config.suggested_init()

    assert before == after
示例#27
0
def fit(spec):
    workspace = pyhf.Workspace(spec)
    model = workspace.model()
    data = workspace.data(model)

    pyhf.set_backend("numpy", pyhf.optimize.minuit_optimizer(verbose=True))
    result = pyhf.infer.mle.fit(data, model, return_uncertainties=True)

    bestfit = result[:, 0]
    uncertainty = result[:, 1]
    labels = get_parameter_names(model)

    print_results(bestfit, uncertainty, labels)
    return bestfit, uncertainty, labels
def test_calculate_stdev(example_spec, example_spec_multibin):
    model = pyhf.Workspace(example_spec).model()
    parameters = np.asarray([1.05, 0.95])
    uncertainty = np.asarray([0.1, 0.1])
    corr_mat = np.asarray([[1.0, 0.2], [0.2, 1.0]])

    total_stdev_bin, total_stdev_chan = model_utils.calculate_stdev(
        model, parameters, uncertainty, corr_mat)
    assert np.allclose(total_stdev_bin, [[8.03150606]])
    assert np.allclose(total_stdev_chan, [8.03150606])

    # pre-fit
    parameters = np.asarray([1.0, 1.0])
    uncertainty = np.asarray([0.0495665682, 0.0])
    diag_corr_mat = np.diag([1.0, 1.0])
    total_stdev_bin, total_stdev_chan = model_utils.calculate_stdev(
        model, parameters, uncertainty, diag_corr_mat)
    assert np.allclose(total_stdev_bin, [[2.56754823]])  # the staterror
    assert np.allclose(total_stdev_chan, [2.56754823])

    # multiple channels, bins, staterrors
    model = pyhf.Workspace(example_spec_multibin).model()
    parameters = np.asarray([0.9, 1.05, 1.3, 0.95])
    uncertainty = np.asarray([0.1, 0.05, 0.3, 0.1])
    corr_mat = np.asarray([
        [1.0, 0.1, 0.2, 0.1],
        [0.1, 1.0, 0.2, 0.3],
        [0.2, 0.2, 1.0, 0.3],
        [0.1, 0.3, 0.3, 1.0],
    ])
    total_stdev_bin, total_stdev_chan = model_utils.calculate_stdev(
        model, parameters, uncertainty, corr_mat)
    expected_stdev_bin = [[8.056054, 1.670629], [2.775377]]
    expected_stdev_chan = [9.585327, 2.775377]
    for i_reg in range(2):
        assert np.allclose(total_stdev_bin[i_reg], expected_stdev_bin[i_reg])
        assert np.allclose(total_stdev_chan[i_reg], expected_stdev_chan[i_reg])
示例#29
0
def test_import_normfactor_bounds():
    parsed_xml = pyhf.readxml.parse(
        'validation/xmlimport_input2/config/example.xml',
        'validation/xmlimport_input2')

    ws = pyhf.Workspace(parsed_xml)
    assert ('SigXsecOverSM', 'normfactor') in ws.modifiers
    parameters = [
        p for p in ws.get_measurement(
            measurement_name='GaussExample')['config']['parameters']
        if p['name'] == 'SigXsecOverSM'
    ]
    assert len(parameters) == 1
    parameter = parameters[0]
    assert parameter['bounds'] == [[0, 10]]
示例#30
0
def test_reproducible_model_spec():
    ws = {
        "channels": [{
            "name":
            "SR",
            "samples": [{
                "data": [
                    10.0,
                ],
                "modifiers": [
                    {
                        "data": None,
                        "name": "mu",
                        "type": "normfactor"
                    },
                ],
                "name":
                "Signal",
            }],
        }],
        "measurements": [{
            "config": {
                "parameters": [{
                    "bounds": [[0, 5]],
                    "inits": [1],
                    "name": "mu"
                }],
                "poi": "mu",
            },
            "name": "minimal_example",
        }],
        "observations": [{
            "data": [12],
            "name": "SR"
        }],
        "version":
        "1.0.0",
    }
    workspace = pyhf.Workspace(ws)
    model_from_ws = workspace.model()

    assert model_from_ws.spec['parameters'] == [{
        'bounds': [[0, 5]],
        'inits': [1],
        'name': 'mu'
    }]
    assert pyhf.Model(model_from_ws.spec)