def test_m_step(): # make sure that the lower bound increases ips = init_ips(M, K, alpha, docs) alpha_ = alpha old_lb_val = lower_bound(ips, phi, alpha, beta, docs, V) alpha_, beta_ = m_step(ips, phi, alpha, docs, V) new_lb_val = lower_bound(ips, phi, alpha_, beta_, docs, V) assert_true(new_lb_val >= old_lb_val)
def test_lower_bound(): ips = init_ips(M, K, alpha, docs) actual = lower_bound(ips, phi, alpha, beta, docs, V) # how to test if we don't want to calculate by hand # 1st, <= 0 assert_true(actual <= 0) # 2nd, after training several iterations # the value should be increasing # as our goal is to maximize the lower bound old_lb_val = actual alpha_, beta_ = alpha, beta ips_, phi_ = ips, phi for i in xrange(10): ips_, phi_ = e_step(alpha_, beta_, docs) alpha_, beta_ = m_step(ips_, phi_, alpha_, docs, V) new_lb_val = lower_bound(ips_, phi_, alpha_, beta_, docs, V) assert_true(new_lb_val >= old_lb_val) old_lb_val = new_lb_val