예제 #1
0
def test_gradients(chunksize):
    zfit.run.chunking.active = True
    zfit.run.chunking.max_n_points = chunksize

    param1 = zfit.Parameter("param1", 1.)
    param2 = zfit.Parameter("param2", 2.)

    gauss1 = Gauss(param1, 4, obs=obs1)
    gauss1.set_norm_range((-5, 5))
    gauss2 = Gauss(param2, 5, obs=obs1)
    gauss2.set_norm_range((-5, 5))

    data1 = zfit.Data.from_tensor(obs=obs1, tensor=ztf.constant(1., shape=(100,)))
    data1.set_data_range((-5, 5))
    data2 = zfit.Data.from_tensor(obs=obs1, tensor=ztf.constant(1., shape=(100,)))
    data2.set_data_range((-5, 5))

    nll = UnbinnedNLL(model=[gauss1, gauss2], data=[data1, data2])

    gradient1 = nll.gradients(params=param1)
    assert zfit.run(gradient1) == zfit.run(tf.gradients(ys=nll.value(), xs=param1))
    gradient2 = nll.gradients(params=[param2, param1])
    both_gradients_true = zfit.run(tf.gradients(ys=nll.value(), xs=[param2, param1]))
    assert zfit.run(gradient2) == both_gradients_true
    gradient3 = nll.gradients()
    assert frozenset(zfit.run(gradient3)) == frozenset(both_gradients_true)
예제 #2
0
def test_gradients(chunksize):
    from numdifftools import Gradient
    zfit.run.chunking.active = True
    zfit.run.chunking.max_n_points = chunksize

    initial1 = 1.
    initial2 = 2
    param1 = zfit.Parameter("param1", initial1)
    param2 = zfit.Parameter("param2", initial2)

    gauss1 = Gauss(param1, 4, obs=obs1)
    gauss1.set_norm_range((-5, 5))
    gauss2 = Gauss(param2, 5, obs=obs1)
    gauss2.set_norm_range((-5, 5))

    data1 = zfit.Data.from_tensor(obs=obs1,
                                  tensor=z.constant(1., shape=(100, )))
    data1.set_data_range((-5, 5))
    data2 = zfit.Data.from_tensor(obs=obs1,
                                  tensor=z.constant(1., shape=(100, )))
    data2.set_data_range((-5, 5))

    nll = UnbinnedNLL(model=[gauss1, gauss2], data=[data1, data2])

    def loss_func(values):
        for val, param in zip(values, nll.get_cache_deps(only_floating=True)):
            param.set_value(val)
        return nll.value().numpy()

    # theoretical, numerical = tf.test.compute_gradient(loss_func, list(params))
    gradient1 = nll.gradients(params=param1)
    gradient_func = Gradient(loss_func)
    # gradient_func = lambda *args, **kwargs: list(gradient_func_numpy(*args, **kwargs))
    assert gradient1[0].numpy() == pytest.approx(
        gradient_func([param1.numpy()]))
    param1.set_value(initial1)
    param2.set_value(initial2)
    params = [param2, param1]
    gradient2 = nll.gradients(params=params)
    both_gradients_true = list(
        reversed(list(gradient_func([initial1, initial2
                                     ]))))  # because param2, then param1
    assert [g.numpy() for g in gradient2] == pytest.approx(both_gradients_true)

    param1.set_value(initial1)
    param2.set_value(initial2)
    gradient3 = nll.gradients()
    assert frozenset([g.numpy() for g in gradient3
                      ]) == pytest.approx(frozenset(both_gradients_true))
예제 #3
0
파일: test_loss.py 프로젝트: olantwin/zfit
def test_gradients():
    param1 = Parameter("param111", 1.)
    param2 = Parameter("param222", 2.)

    gauss1 = Gauss(param1, 4, obs=obs1)
    gauss1.set_norm_range((-5, 5))
    gauss2 = Gauss(param2, 5, obs=obs1)
    gauss2.set_norm_range((-5, 5))

    data1 = zfit.data.Data.from_tensor(obs=obs1, tensor=ztf.constant(1., shape=(100,)))
    data1.set_data_range((-5, 5))
    data2 = zfit.data.Data.from_tensor(obs=obs1, tensor=ztf.constant(1., shape=(100,)))
    data2.set_data_range((-5, 5))

    nll = UnbinnedNLL(model=[gauss1, gauss2], data=[data1, data2])

    gradient1 = nll.gradients(params=param1)
    assert zfit.run(gradient1) == zfit.run(tf.gradients(nll.value(), param1))
    gradient2 = nll.gradients(params=[param2, param1])
    both_gradients_true = zfit.run(tf.gradients(nll.value(), [param2, param1]))
    assert zfit.run(gradient2) == both_gradients_true
    gradient3 = nll.gradients()
    assert frozenset(zfit.run(gradient3)) == frozenset(both_gradients_true)