Пример #1
0
def test_update_alpha():
    ips = init_ips(M, K, alpha, docs)
    
    new_alpha = update_alpha(M, ips, alpha)

    # gradient should be close to zeros after convergence
    assert_array_almost_equal(gradient_g(M, new_alpha, ips),
                              np.zeros(new_alpha.shape))
Пример #2
0
def test_init_ips():
    actual_ips = init_ips(M, K, alpha, docs)

    expected_ips = np.asarray([[1 + 2. / K, 2 + 2. / K],
                               [1 + 3. / K, 2 + 3. / K],
                               [1 + 4. / K, 2 + 4. / K]],
                              dtype=np.float64)
    assert_array_almost_equal(actual_ips, expected_ips)
Пример #3
0
def test_m_step():
    # make sure that the lower bound increases
    ips = init_ips(M, K, alpha, docs)
    alpha_ = alpha
    old_lb_val = lower_bound(ips, phi, alpha, beta, docs, V)

    alpha_, beta_ = m_step(ips, phi, alpha, docs, V)
    new_lb_val = lower_bound(ips, phi, alpha_, beta_, docs, V)
    
    assert_true(new_lb_val >= old_lb_val)
Пример #4
0
def test_gradiant_g():
    ips = init_ips(M, K, alpha, docs)
    g = gradient_g(M, alpha, ips)

    for i in xrange(K):
        expected = (M * (psi(alpha.sum()) - psi(alpha[i])) +
                    np.sum([(psi(ips[d, i]) -
                             psi(ips[d, :].sum()))
                            for d in xrange(M)]))
        actual = g[i]
        assert_array_almost_equal(expected, actual)
Пример #5
0
def test_e_step_one_iter():
    ips = init_ips(M, K, alpha, docs)
    phi = np.array([np.zeros((docs[m].size, K), dtype=np.float64)
                    for m in xrange(M)],
                   dtype=np.object)

    # don't know why it converges just afer 1 iteration
    for i in xrange(1):
        phi, ips, grad_ips = e_step_one_iter(alpha, beta,
                                             docs, phi, ips)
        print(grad_ips)

    assert_true(np.abs(grad_ips).max() <= 1e-5)
Пример #6
0
def test_e_step():
    old_ips = init_ips(M, K, alpha, docs)
    new_ips, phi = e_step(alpha, beta, docs)

    # very permissive test(not sure how to test it better):
    # make sure new_ips changes
    assert_true(np.abs(new_ips - old_ips).min() >= 1e-5)

    # make sure sum to one
    for m in xrange(phi.size):
        assert_array_almost_equal(phi[m].sum(axis=1), 1)

    # make sure the lower bound increases
    old_lb_val = lower_bound(old_ips, phi, alpha, beta, docs, V)
    new_ips, new_phi = e_step(alpha, beta, docs)
    new_lb_val = lower_bound(new_ips, new_phi, alpha, beta, docs, V)

    assert_true(new_lb_val >= old_lb_val)
Пример #7
0
def test_lower_bound():
    ips = init_ips(M, K, alpha, docs)
    actual = lower_bound(ips, phi, alpha, beta, docs, V)

    # how to test if we don't want to calculate by hand
    # 1st, <= 0
    assert_true(actual <= 0)

    # 2nd, after training several iterations
    # the value should be increasing
    # as our goal is to maximize the lower bound
    old_lb_val = actual
    alpha_, beta_ = alpha, beta
    ips_, phi_ = ips, phi
    for i in xrange(10):
        ips_, phi_ = e_step(alpha_, beta_, docs)
        alpha_, beta_ = m_step(ips_, phi_, alpha_, docs, V)

        new_lb_val = lower_bound(ips_, phi_, alpha_, beta_, docs, V)
        assert_true(new_lb_val >= old_lb_val)

        old_lb_val = new_lb_val