Exemple #1
0
def test_log_likelihood():
    """
    Computes the log-likelihood "by hand" for a simple example and ensures
    that the one returned by xlogit is the same
    """
    P = 1  # Without panel data
    betas = np.array([.1, .1, .1, .1])
    X_, y_ = X.reshape(N, P, J, K), y.reshape(N, P, J, 1)

    # Compute log likelihood using xlogit
    model = MixedLogit()
    model._rvidx, model._rvdist = np.array([True, True]), np.array(['n', 'n'])
    draws = model._get_halton_draws(N, R, K)  # (N,Kr,R)
    panel_info = np.ones((N, P))
    obtained_loglik, _ = model._loglik_gradient(betas, X_, y_, panel_info,
                                                draws, None, None)

    # Compute expected log likelihood "by hand"
    Br = betas[None, [0, 1], None] + draws * betas[None, [2, 3], None]
    eXB = np.exp(np.einsum('npjk,nkr -> npjr', X_, Br))
    p = eXB / np.sum(eXB, axis=2, keepdims=True)
    expected_loglik = -np.sum(
        np.log((y_ * p).sum(axis=2).prod(axis=1).mean(axis=1)))

    assert pytest.approx(expected_loglik, obtained_loglik)
Exemple #2
0
def test_log_likelihood():
    """
    Computes the log-likelihood "by hand" for a simple example and ensures
    that the one returned by xlogit is the same
    """
    betas = np.array([.1, .1, .1, .1])
    X_, y_ = X.reshape(N, J, K), y.reshape(N, J, 1)

    # Compute log likelihood using xlogit
    model = MixedLogit()
    model._rvidx, model._rvdist = np.array([True, True]), np.array(['n', 'n'])
    draws = model._generate_halton_draws(N, R, K)  # (N,Kr,R)
    obtained_loglik = model._loglik_gradient(betas,
                                             X_,
                                             y_,
                                             None,
                                             draws,
                                             None,
                                             None, {
                                                 'samples': N,
                                                 'draws': R
                                             },
                                             return_gradient=False)

    # Compute expected log likelihood "by hand"
    Br = betas[None, [0, 1], None] + draws * betas[None, [2, 3], None]
    eXB = np.exp(np.einsum('njk,nkr -> njr', X_, Br))
    p = eXB / np.sum(eXB, axis=1, keepdims=True)
    expected_loglik = -np.sum(np.log((y_ * p).sum(axis=1).mean(axis=1)))

    assert expected_loglik == pytest.approx(obtained_loglik)