def test_opt_traj5_1ref():
  """ test_opt_traj5_1 """
  traj = simple_traj5()
  start_theta = np.array([-1.0])
  choices = [1, 0, 2]
  traj_choices = [(traj, choices)]
  obj_fun = trajs_obj_fun_ref(traj_choices)
  (theta, ys) = optimize_function(obj_fun, start_theta, max_iters=20)
  assert ys[0] <= ys[-1], (theta, ys, len(ys))
Example #2
0
def test_opt_traj5_1ref():
    """ test_opt_traj5_1 """
    traj = simple_traj5()
    start_theta = np.array([-1.0])
    choices = [1, 0, 2]
    traj_choices = [(traj, choices)]
    obj_fun = trajs_obj_fun_ref(traj_choices)
    (theta, ys) = optimize_function(obj_fun, start_theta, max_iters=20)
    assert ys[0] <= ys[-1], (theta, ys, len(ys))
Example #3
0
def test_smoother_ref_traj5_1():
    """ test_smoother_ref_traj5_1 """
    traj = simple_traj5()
    theta = np.array([-1.0])
    smoother_ref = TrajectorySmootherRef(traj, theta)
    smoother_ref.computeProbs()
    smoother_1 = TrajectorySmoother1(traj, theta)
    smoother_1.computeProbs()
    check_probs(smoother_1, smoother_ref)
def test_smoother_ref_traj5_1():
  """ test_smoother_ref_traj5_1 """
  traj = simple_traj5()
  theta = np.array([-1.0])
  smoother_ref = TrajectorySmootherRef(traj, theta)
  smoother_ref.computeProbs()
  smoother_1 = TrajectorySmoother1(traj, theta)
  smoother_1.computeProbs()
  check_probs(smoother_1, smoother_ref)
def test_gradient_ref_traj5_1():
  """ test_gradient_ref_traj5_1. """
  traj = simple_traj5()
  choices = [1, 0, 2]
  fun = get_fun_ref(traj, choices)
  grad_fun = get_grad_ref(traj, choices)
  # Do not forget to define the vector as floating point numbers!!!
  theta = np.array([1.0])
  g_emp = compute_grad(fun, theta)
  g = grad_fun(theta)
  assert(np.abs(g - g_emp).max() < 1e-3), (g, g_emp)
def test_traj_5_2():
  """ test_traj_5_2 """
  traj = simple_traj5()
  theta = np.array([-1.0])
  choices = [1, 0, 2]
  elts = LearningElementsSecure(traj, theta, choices)
  elts.computeLogZ()
  elts_ref = LearningElementsRef(traj, theta, choices)
  elts_ref.computeLogZ()
  assert(within(elts.Z, elts_ref.Z, 1e-5)), (elts.Z, elts_ref.Z, 1e-5)
  assert(within(elts.logZ, elts_ref.logZ, 1e-5)), \
    (elts.logZ, elts_ref.logZ, 1e-5)
def test_grad_traj5_2():
  """ test_grad_traj5_2
  Test of implementation 1 of gradient. """
  traj = simple_traj5()
  theta = np.array([-1.0])
  choices = [1, 0, 2]
  elts = LearningElementsSecure(traj, theta, choices)
  elts.computeGradientLogZ()
  elts_ref = LearningElementsRef(traj, theta, choices)
  elts_ref.computeGradientLogZ()
  g = elts.grad_logZ
  g_ref = elts_ref.grad_logZ
  assert(np.abs(g - g_ref).max() < 1e-3), (g, g_ref)
def test_hess_traj5_2():
  """ test_hess_traj5_2 """
  traj = simple_traj5()
  theta = np.array([-1.0])
  choices = [1, 0, 2]
  elts = LearningElementsSecure(traj, theta, choices)
  elts.computeHessianLogZ()
  elts_ref = LearningElementsRef(traj, theta, choices)
  elts_ref.computeHessianLogZ()
  h = elts.hess_logZ
  h_ref = elts_ref.hess_logZ
  assert(np.abs(h - h_ref).max() < 1e-3), \
    (h, h_ref, elts.hess_Z, elts_ref.hess_Z, \
     elts.grad_Z, elts_ref.grad_Z, elts.Z, elts_ref.Z,)
def test_opt0_traj5_1():
  """ test_opt0_traj5_1 """
  traj = simple_traj5()
  start_theta = np.array([-1.0])
  choices = [1, 0, 2]
  traj_choices = [(traj, choices)]
  obj_fun_1 = trajs_obj_fun_1(traj_choices)
  obj_fun_0 = trajs_obj_fun_0(traj_choices)
  obj_fun_ref = trajs_obj_fun_ref(traj_choices)
  max_iters = 5
  (theta_0, ys_0) = optimize_function(obj_fun_0, start_theta, max_iters)
  (theta_1, ys_1) = optimize_function(obj_fun_1, start_theta, max_iters)
  (theta_ref, ys_ref) = optimize_function(obj_fun_ref, start_theta, max_iters)
  assert(np.abs(theta_0 - theta_ref).max() < 1e-3), (theta_0, theta_ref)
  assert(np.abs(theta_1 - theta_ref).max() < 1e-3), (theta_1, theta_ref)
  assert within(ys_0[0], ys_ref[0], 1e-3), (theta_ref, ys_ref, theta_0, ys_0)
  assert within(ys_1[0], ys_ref[0], 1e-3), (theta_ref, ys_ref, theta_1, ys_1)
Example #10
0
def test_opt0_traj5_1():
    """ test_opt0_traj5_1 """
    traj = simple_traj5()
    start_theta = np.array([-1.0])
    choices = [1, 0, 2]
    traj_choices = [(traj, choices)]
    obj_fun_1 = trajs_obj_fun_1(traj_choices)
    obj_fun_0 = trajs_obj_fun_0(traj_choices)
    obj_fun_ref = trajs_obj_fun_ref(traj_choices)
    max_iters = 5
    (theta_0, ys_0) = optimize_function(obj_fun_0, start_theta, max_iters)
    (theta_1, ys_1) = optimize_function(obj_fun_1, start_theta, max_iters)
    (theta_ref, ys_ref) = optimize_function(obj_fun_ref, start_theta,
                                            max_iters)
    assert (np.abs(theta_0 - theta_ref).max() < 1e-3), (theta_0, theta_ref)
    assert (np.abs(theta_1 - theta_ref).max() < 1e-3), (theta_1, theta_ref)
    assert within(ys_0[0], ys_ref[0], 1e-3), (theta_ref, ys_ref, theta_0, ys_0)
    assert within(ys_1[0], ys_ref[0], 1e-3), (theta_ref, ys_ref, theta_1, ys_1)
def test_hessian_ref_traj5_1():
  """ test_hessian_ref_traj5_1
  
  Simple test to check that the reference implementation of the hessian
  is equal to a empirical definition based on logZ.
  """
  traj = simple_traj5()
  choices = [1, 0, 2]
  # Do not forget to define the vector as floating point numbers!!!
  theta = np.array([1.0])
  # Z
  fun = get_fun_ref0(traj, choices)
  hess_fun = get_hess_ref0(traj, choices)
  h_emp = compute_hess(fun, theta)
  h = hess_fun(theta)
  assert(np.abs(h - h_emp).max() < 1e-2), (h, h_emp)
  # LogZ
  fun = get_fun_ref(traj, choices)
  hess_fun = get_hess_ref(traj, choices)
  h_emp = compute_hess(fun, theta)
  h = hess_fun(theta)
  assert(np.abs(h - h_emp).max() < 1e-3), (h, h_emp)