Example #1
0
def test_smoother_ref_traj1_1():
    """ test_smoother_ref_traj1_1 """
    traj = simple_traj1()
    theta = np.array([0.0, 0.0])
    smoother_ref = TrajectorySmootherRef(traj, theta)
    smoother_ref.computeProbs()
    smoother_1 = TrajectorySmoother1(traj, theta)
    smoother_1.computeProbs()
    check_probs(smoother_1, smoother_ref)
def test_smoother_ref_traj1_1():
  """ test_smoother_ref_traj1_1 """
  traj = simple_traj1()
  theta = np.array([0.0, 0.0])
  smoother_ref = TrajectorySmootherRef(traj, theta)
  smoother_ref.computeProbs()
  smoother_1 = TrajectorySmoother1(traj, theta)
  smoother_1.computeProbs()
  check_probs(smoother_1, smoother_ref)
def test_gradient_ref2():
  """ Test at a different point. """
  traj = simple_traj1()
  choices = [1, 0]
  fun = get_fun_ref(traj, choices)
  grad_fun = get_grad_ref(traj, choices)
  theta = np.array([1.0, -1.0])
  g_emp = compute_grad(fun, theta)
  g = grad_fun(theta)
  assert(np.abs(g - g_emp).max() < 1e-3), (g, g_emp)
def test_filter_ref_traj1_1():
  """ test_filter_ref_traj1_1 """
  traj = simple_traj1()
  theta = np.array([-1.0, 1.0])
  for k in range(traj.L):
    filter_ref = TrajectoryFilterRef(traj, theta, k)
    filter_ref.computeProbs()
    filter_1 = TrajectoryFilter1(traj, theta, k)
    filter_1.computeProbs()
    check_probs(filter_1, filter_ref)
Example #5
0
def test_viterbi_ref_1():
    """ test_viterbi_ref_1
  """
    traj = simple_traj1()
    theta = np.array([1.0, 0.0])
    viterbi = TrajectoryViterbiRef(traj, theta)
    viterbi.computeMostLikely()
    assert len(viterbi.most_likely) == traj.L
    assert viterbi.most_likely[0] == 0
    assert viterbi.most_likely[1] == 0
def test_viterbi_ref_1():
  """ test_viterbi_ref_1
  """
  traj = simple_traj1()
  theta = np.array([1.0, 0.0])
  viterbi = TrajectoryViterbiRef(traj, theta)
  viterbi.computeMostLikely()
  assert len(viterbi.most_likely) == traj.L
  assert viterbi.most_likely[0] == 0
  assert viterbi.most_likely[1] == 0
def test_Z1():
  """ Test of implementation 1. """
  traj = simple_traj1()
  theta = np.array([0.0, -1.0])
  choices = [1, 0]
  elts = LearningElements(traj, theta, choices)
  elts.computeLogZ()
  elts_ref = LearningElementsRef(traj, theta, choices)
  elts_ref.computeLogZ()
  assert(within(elts.logZ, elts_ref.logZ, 1e-5))
def test_filter_ref_traj1_1():
    """ test_filter_ref_traj1_1 """
    traj = simple_traj1()
    theta = np.array([-1.0, 1.0])
    for k in range(traj.L):
        filter_ref = TrajectoryFilterRef(traj, theta, k)
        filter_ref.computeProbs()
        filter_1 = TrajectoryFilter1(traj, theta, k)
        filter_1.computeProbs()
        check_probs(filter_1, filter_ref)
def test_hess_traj1_2():
  """ test_hess_traj1_2 """
  traj = simple_traj1()
  theta = np.array([0.0, -1.0])
  choices = [1, 0]
  elts = LearningElementsSecure(traj, theta, choices)
  elts.computeHessianLogZ()
  elts_ref = LearningElementsRef(traj, theta, choices)
  elts_ref.computeHessianLogZ()
  h = elts.hess_logZ
  h_ref = elts_ref.hess_logZ
  assert(np.abs(h - h_ref).max() < 1e-3), (h, h_ref)
def test_traj_1_2():
  """ test_traj_1_2 """
  traj = simple_traj1()
  theta = np.array([1.0, -1.0])
  choices = [1, 0]
  elts = LearningElementsSecure(traj, theta, choices)
  elts.computeLogZ()
  elts_ref = LearningElementsRef(traj, theta, choices)
  elts_ref.computeLogZ()
  assert(within(elts.Z, elts_ref.Z, 1e-5)), (elts.Z, elts_ref.Z, 1e-5)
  assert(within(elts.logZ, elts_ref.logZ, 1e-5)), \
    (elts.logZ, elts_ref.logZ, 1e-5)
def test_grad_traj1_2():
  """ test_grad_traj1_2
  Test of implementation 1 of gradient. """
  traj = simple_traj1()
  theta = np.array([0.0, -1.0])
  choices = [1, 0]
  elts = LearningElementsSecure(traj, theta, choices)
  elts.computeGradientLogZ()
  elts_ref = LearningElementsRef(traj, theta, choices)
  elts_ref.computeGradientLogZ()
  g = elts.grad_logZ
  g_ref = elts_ref.grad_logZ
  assert(np.abs(g - g_ref).max() < 1e-3), (g, g_ref)
def test_hessian_ref1():
  """ Simple test to check that the reference implementation of the hessian
  is equal to a empirical definition based on logZ.
  """
  traj = simple_traj1()
  choices = [1, 0]
  fun = get_fun_ref(traj, choices)
  hess_fun = get_hess_ref(traj, choices)
  # Do not forget to define the vector as floating point numbers!!!
  theta = np.array([0.0, 0.0])
  h_emp = compute_hess(fun, theta)
  h = hess_fun(theta)
  assert(np.abs(h - h_emp).max() < 1e-3), (h, h_emp)
def test_gradient_ref1():
  """ Simple test to check that the reference implementation of the gradient
  is equal to a empirical definition based on logZ.
  """
  traj = simple_traj1()
  choices = [1, 0]
  fun = get_fun_ref(traj, choices)
  grad_fun = get_grad_ref(traj, choices)
  # Do not forget to define the vector as floating point numbers!!!
  theta = np.array([0.0, 0.0])
  g_emp = compute_grad(fun, theta)
  g = grad_fun(theta)
  assert(np.abs(g - g_emp).max() < 1e-3), (g, g_emp)
def test_em_1():
    """ Very simple test: we pick some trajectories and verify that
  the LL increases with EM.
  """
    trajs = [simple_traj1(), simple_traj4(), simple_traj3()]
    theta_start = 0.1 * np.ones(2)
    history = learn_em(trajs_estim_obj_fun_1, trajs, theta_start)
    #  (ll_end, theta_end) = history[-1]
    # Very simple check here: we verify the progression goes upward
    # the likelihood:
    for t in range(len(history) - 1):
        (ll_1, _) = history[t]
        (ll_2, _) = history[t + 1]
        assert ll_1 <= ll_2
def test_em_1():
  """ Very simple test: we pick some trajectories and verify that
  the LL increases with EM.
  """
  trajs = [simple_traj1(), simple_traj4(), simple_traj3()]
  theta_start = 0.1 * np.ones(2)
  history = learn_em(trajs_estim_obj_fun_1, trajs, theta_start)
#  (ll_end, theta_end) = history[-1]
  # Very simple check here: we verify the progression goes upward 
  # the likelihood:
  for t in range(len(history)-1):
    (ll_1, _) = history[t]
    (ll_2, _) = history[t+1]
    assert ll_1 <= ll_2
Example #16
0
def test_viterbi_1_1():
    """ test_viterbi_1_1
  """
    traj = simple_traj1()
    theta = np.array([1.0, 0.0])
    viterbi_ref = TrajectoryViterbiRef(traj, theta)
    viterbi_ref.computeMostLikely()
    viterbi_1 = TrajectoryViterbi1(traj, theta)
    viterbi_1.computeMostLikely()
    assert len(viterbi_1.most_likely) == traj.L
    for l in range(traj.L):
        assert viterbi_1.most_likely[l] == viterbi_ref.most_likely[l]
        assert traj.num_choices[l] == len(viterbi_ref.most_likely_tree[l])
        for i in range(traj.num_choices[l]):
            assert viterbi_ref.most_likely_tree[l][i] == \
                   viterbi_1.most_likely_tree[l][i]
def test_viterbi_1_1():
  """ test_viterbi_1_1
  """
  traj = simple_traj1()
  theta = np.array([1.0, 0.0])
  viterbi_ref = TrajectoryViterbiRef(traj, theta)
  viterbi_ref.computeMostLikely()
  viterbi_1 = TrajectoryViterbi1(traj, theta)
  viterbi_1.computeMostLikely()
  assert len(viterbi_1.most_likely) == traj.L
  for l in range(traj.L):
    assert viterbi_1.most_likely[l] == viterbi_ref.most_likely[l]
    assert traj.num_choices[l] == len(viterbi_ref.most_likely_tree[l])
    for i in range(traj.num_choices[l]):
      assert viterbi_ref.most_likely_tree[l][i] == \
             viterbi_1.most_likely_tree[l][i]
def test_filter_ref_1():
  """ test_filter_ref_1
  """
  traj = simple_traj1()
  theta = np.array([1.0, -1.0])
  filter_0 = TrajectoryFilterRef(traj, theta, 0)
  filter_0.computeProbs()
  # The forward probabilities should equal the probabilities
  check_prob_fields(filter_0.forward, filter_0.probabilities)
  # Run the filter in inneficient smooting mode
  filter_L = TrajectoryFilterRef(traj, theta, traj.L)
  filter_L.computeProbs()
  smoother = TrajectorySmootherRef(traj, theta)
  smoother.computeProbs()
  check_prob_fields(filter_L.forward, smoother.forward)
  check_prob_fields(filter_L.backward, smoother.backward)
  check_prob_fields(filter_L.probabilities, smoother.probabilities)
def test_filter_ref_1():
    """ test_filter_ref_1
  """
    traj = simple_traj1()
    theta = np.array([1.0, -1.0])
    filter_0 = TrajectoryFilterRef(traj, theta, 0)
    filter_0.computeProbs()
    # The forward probabilities should equal the probabilities
    check_prob_fields(filter_0.forward, filter_0.probabilities)
    # Run the filter in inneficient smooting mode
    filter_L = TrajectoryFilterRef(traj, theta, traj.L)
    filter_L.computeProbs()
    smoother = TrajectorySmootherRef(traj, theta)
    smoother.computeProbs()
    check_prob_fields(filter_L.forward, smoother.forward)
    check_prob_fields(filter_L.backward, smoother.backward)
    check_prob_fields(filter_L.probabilities, smoother.probabilities)