Exemple #1
0
def test_pdf_eval_2():
    tf_sess = tf.Session()
    backends = [
        numpy_backend(poisson_from_normal=True),
        pytorch_backend(),
        tensorflow_backend(session=tf_sess),
        mxnet_backend()
    ]

    values = []
    for b in backends:
        pyhf.set_backend(b)

        source = {
            "binning": [2, -0.5, 1.5],
            "bindata": {
                "data": [120.0, 180.0],
                "bkg": [100.0, 150.0],
                "bkgerr": [10.0, 10.0],
                "sig": [30.0, 95.0]
            }
        }

        pdf = hepdata_like(source['bindata']['sig'], source['bindata']['bkg'],
                           source['bindata']['bkgerr'])
        data = source['bindata']['data'] + pdf.config.auxdata

        v1 = pdf.logpdf(pdf.config.suggested_init(), data)
        values.append(pyhf.tensorlib.tolist(v1)[0])

    assert np.std(values) < 1e-6
Exemple #2
0
def test_pdf_eval_2(backend):
    source = {
        "binning": [2, -0.5, 1.5],
        "bindata": {
            "data": [120.0, 180.0],
            "bkg": [100.0, 150.0],
            "bkgerr": [10.0, 10.0],
            "sig": [30.0, 95.0],
        },
    }

    pdf = hepdata_like(source['bindata']['sig'], source['bindata']['bkg'],
                       source['bindata']['bkgerr'])
    data = source['bindata']['data'] + pdf.config.auxdata

    assert pytest.approx([-23.579605171119738],
                         rel=5e-5) == pyhf.tensorlib.tolist(
                             pdf.logpdf(pdf.config.suggested_init(), data))
Exemple #3
0
def test_hypotest(benchmark, backend, n_bins):
    """
    Benchmark the performance of pyhf.utils.hypotest()
    for various numbers of bins and different backends

    Args:
        benchmark: pytest benchmark
        backend: `pyhf` tensorlib given by pytest parameterization
        n_bins: `list` of number of bins given by pytest parameterization

    Returns:
        None
    """
    source = generate_source_static(n_bins)
    pdf = hepdata_like(source['bindata']['sig'], source['bindata']['bkg'],
                       source['bindata']['bkgerr'])
    data = source['bindata']['data'] + pdf.config.auxdata
    assert benchmark(hypotest, pdf, data)
def test_runOnePoint_q_mu(n_bins,
                          tolerance={
                              'numpy': 1e-02,
                              'tensors': 5e-03
                          }):
    """
    Check that the different backends all compute a test statistic
    that is within a specific tolerance of each other.

    Args:
        n_bins: `list` of number of bins given by pytest parameterization
        tolerance: `dict` of the maximum differences the test statistics
                    can differ relative to each other

    Returns:
        None
    """

    source = generate_source_static(n_bins)
    pdf = hepdata_like(source['bindata']['sig'], source['bindata']['bkg'],
                       source['bindata']['bkgerr'])
    data = source['bindata']['data'] + pdf.config.auxdata

    backends = [
        pyhf.tensor.numpy_backend(poisson_from_normal=True),
        pyhf.tensor.tensorflow_backend(session=tf.Session()),
        pyhf.tensor.pytorch_backend(),
        # mxnet_backend()
    ]

    test_statistic = []
    for backend in backends:
        pyhf.set_backend(backend)

        if isinstance(pyhf.tensorlib, pyhf.tensor.tensorflow_backend):
            tf.reset_default_graph()
            pyhf.tensorlib.session = tf.Session()

        q_mu = pyhf.runOnePoint(1.0, data, pdf, pdf.config.suggested_init(),
                                pdf.config.suggested_bounds())[0]
        test_statistic.append(q_mu)

    # compare to NumPy/SciPy
    test_statistic = np.array(test_statistic)
    numpy_ratio = np.divide(test_statistic, test_statistic[0])
    numpy_ratio_delta_unity = np.absolute(np.subtract(numpy_ratio, 1))

    # compare tensor libraries to each other
    tensors_ratio = np.divide(test_statistic[1], test_statistic[2])
    tensors_ratio_delta_unity = np.absolute(np.subtract(tensors_ratio, 1))

    try:
        assert (numpy_ratio_delta_unity < tolerance['numpy']).all()
    except AssertionError:
        print('Ratio to NumPy+SciPy exceeded tolerance of {}: {}'.format(
            tolerance['numpy'], numpy_ratio_delta_unity.tolist()))
        assert False
    try:
        assert (tensors_ratio_delta_unity < tolerance['tensors']).all()
    except AssertionError:
        print('Ratio between tensor backends exceeded tolerance of {}: {}'.
              format(tolerance['tensors'], tensors_ratio_delta_unity.tolist()))
        assert False