Esempio n. 1
0
def runOnePoint(pdf, data):
    if isinstance(pyhf.tensorlib, pyhf.tensor.tensorflow_backend):
        # Reset the TensorFlow graph and session for each run
        tf.reset_default_graph()
        pyhf.tensorlib.session = tf.Session()

    return pyhf.runOnePoint(1.0, data, pdf, pdf.config.suggested_init(),
                            pdf.config.suggested_bounds())
Esempio n. 2
0
def validate_runOnePoint(pdf, data, mu_test, expected_result, tolerance=1e-5):
    init_pars = pdf.config.suggested_init()
    par_bounds = pdf.config.suggested_bounds()

    CLs_obs, CLs_exp = pyhf.runOnePoint(mu_test, data, pdf, init_pars,
                                        par_bounds)[-2:]
    assert (CLs_obs - expected_result['obs']) / \
        expected_result['obs'] < tolerance
    for result, expected_result in zip(CLs_exp, expected_result['exp']):
        assert (result - expected_result) / \
            expected_result < tolerance
Esempio n. 3
0
def test_runOnePoint_q_mu(n_bins,
                          tolerance={
                              'numpy': 1e-02,
                              'tensors': 5e-03
                          }):
    """
    Check that the different backends all compute a test statistic
    that is within a specific tolerance of each other.

    Args:
        n_bins: `list` of number of bins given by pytest parameterization
        tolerance: `dict` of the maximum differences the test statistics
                    can differ relative to each other

    Returns:
        None
    """

    source = generate_source_static(n_bins)
    pdf = hepdata_like(source['bindata']['sig'], source['bindata']['bkg'],
                       source['bindata']['bkgerr'])
    data = source['bindata']['data'] + pdf.config.auxdata

    backends = [
        pyhf.tensor.numpy_backend(poisson_from_normal=True),
        pyhf.tensor.tensorflow_backend(session=tf.Session()),
        pyhf.tensor.pytorch_backend(),
        # mxnet_backend()
    ]

    test_statistic = []
    for backend in backends:
        pyhf.set_backend(backend)

        if isinstance(pyhf.tensorlib, pyhf.tensor.tensorflow_backend):
            tf.reset_default_graph()
            pyhf.tensorlib.session = tf.Session()

        q_mu = pyhf.runOnePoint(1.0, data, pdf, pdf.config.suggested_init(),
                                pdf.config.suggested_bounds())[0]
        test_statistic.append(q_mu)

    # compare to NumPy/SciPy
    test_statistic = np.array(test_statistic)
    numpy_ratio = np.divide(test_statistic, test_statistic[0])
    numpy_ratio_delta_unity = np.absolute(np.subtract(numpy_ratio, 1))

    # compare tensor libraries to each other
    tensors_ratio = np.divide(test_statistic[1], test_statistic[2])
    tensors_ratio_delta_unity = np.absolute(np.subtract(tensors_ratio, 1))

    try:
        assert (numpy_ratio_delta_unity < tolerance['numpy']).all()
    except AssertionError:
        print('Ratio to NumPy+SciPy exceeded tolerance of {}: {}'.format(
            tolerance['numpy'], numpy_ratio_delta_unity.tolist()))
        assert False
    try:
        assert (tensors_ratio_delta_unity < tolerance['tensors']).all()
    except AssertionError:
        print('Ratio between tensor backends exceeded tolerance of {}: {}'.
              format(tolerance['tensors'], tensors_ratio_delta_unity.tolist()))
        assert False