Beispiel #1
0
def test_gradient(func, low_range, high_range):
  data = [[0],[0],[0]]
  for i in range(3):
    data[0][0] = low_range + i * (high_range - low_range) / 3
    for j in range(3):
      data[1][0] = low_range + j * (high_range - low_range) / 3
      for k in range(3):
        data[2][0] = low_range + k * (high_range - low_range) / 3
        
        x = Matrix(3, 1, data)
        
        eval_native = func(x)
  
        _, finite_diff_derivative = autodiff.finite_difference(func, [x])
  
        # Forward mode autodiff
        eval_fwd, forward_grad, forward_ops = autodiff.compute_gradients(func, [x], 0, reverse_mode = False)
        test_assert(eval_native.compare(eval_fwd))
        test_assert(finite_diff_derivative.compare(forward_grad, 1e-3))
  
        # Reverse mode autodiff  
        eval_reverse, reverse_grad, reverse_ops = autodiff.compute_gradients(func, [x], 0, reverse_mode = True)
        test_assert(eval_native.compare(eval_reverse))
        test_assert(finite_diff_derivative.compare(reverse_grad, 1e-3))
  
        test_assert(reverse_ops <= forward_ops)
Beispiel #2
0
def train_linear_model(xs, ys, test_xs, test_ys):
  params = Matrix(xs.cols * ys.cols, 1)

  batch_size = 100

  print('Initial accuracy', eval_model(params, test_xs, test_ys))

  batch_indices = [i for i in range(xs.rows)]
  
  momentum = Matrix(xs.cols, 1)
  
  step_size = .5
  for epoch in range(5):
    random.shuffle(batch_indices)
    num_batches = len(batch_indices) // batch_size
    start = time.time()
    for i in range(num_batches):
      batch = batch_indices[i*batch_size:(i*batch_size + batch_size)]
      
      batch_xs = xs.gather_rows(batch)
      batch_ys = ys.gather_rows(batch)
      
      f_val, f_grad, opcount = autodiff.compute_gradients(error_batch_linear_model, [params, batch_xs, batch_ys], 0, reverse_mode = True)
      #f_val, f_grad = autodiff.f_d(error_batch_linear_model, [params, batch_xs, batch_ys], 0)
      
      #momentum = 0.9*momentum + f_grad * -step_size
      params += f_grad * -step_size
      if (i + 1) % 10 == 0:
        duration = time.time() - start
        start = time.time()
        print("Epoch %d, Batch %d (of %d) Error %f  e_grad_norm=%f (opcount=%d) (%fs)" % (epoch + 1, i + 1, num_batches, f_val, f_grad.euclidean_norm(), opcount, duration))

    accuracy = eval_model(params, test_xs, test_ys)
    print("Epoch %d accuracy %f" % (epoch + 1, accuracy))
Beispiel #3
0
def test_derivative(func, low_range, high_range):
  for i in range(10):
    x = low_range + i * (high_range - low_range) / 10
    eval_native = func(x)
    
    # Finite difference
    _, finite_diff_derivative = autodiff.finite_difference(func, [x])
    
    # Forward mode autodiff
    eval_fwd, forward_deriv, forward_ops = autodiff.compute_gradients(func, [x], 0, reverse_mode = False)
    test_assert(eval_native == eval_fwd)
    test_assert(abs(finite_diff_derivative - forward_deriv) < 1e-3)
    
    # Reverse mode autodiff
    eval_reverse, reverse_deriv, reverse_ops = autodiff.compute_gradients(func, [x], 0, reverse_mode = True)
    test_assert(eval_native == eval_reverse)
    test_assert(abs(finite_diff_derivative - reverse_deriv) < 1e-3)

    test_assert(reverse_ops <= forward_ops)
Beispiel #4
0
def test_jacobians():
  x = Matrix(3, 1, [[4.0], [7.0], [2.0]])
  correct = Matrix(3, 3, [[3.0, 4.5, 11.5], [4.0, 1.5, 9], [5.0, 1, 2]]).transpose()

  _, jacobian = autodiff.finite_difference(func_jacobian_matmul, [x])
  test_assert(correct.compare(jacobian, 1e-3))
  _, jacobian, _ = autodiff.compute_gradients(func_jacobian_matmul, [x], 0, reverse_mode = True)
  test_assert(correct.compare(jacobian, 1e-3))
  _, jacobian, _ = autodiff.compute_gradients(func_jacobian_matmul, [x], 0, reverse_mode = False)
  test_assert(correct.compare(jacobian, 1e-3))

  correct = Matrix(1, 3, [[0, 0, 0]])
  x = 2.5
  _, jacobian = autodiff.finite_difference(func_jacobian_const, [x])
  test_assert(correct.compare(jacobian, 1e-3))
  _, jacobian, _ = autodiff.compute_gradients(func_jacobian_const, [x], 0, reverse_mode = True)
  test_assert(correct.compare(jacobian, 1e-3))
  _, jacobian, _ = autodiff.compute_gradients(func_jacobian_const, [x], 0, reverse_mode = False)
  test_assert(correct.compare(jacobian, 1e-3))
Beispiel #5
0
def main():
    target_m = .7
    target_b = 4
    xs, ys = gen_data(target_m, target_b)
    params = Matrix(2, 1, [[-2], [0]])

    for i in range(1000):
        f_val, f_grad, opcount = autodiff.compute_gradients(error_batch,
                                                            [params, xs, ys],
                                                            0,
                                                            reverse_mode=True)
        params += f_grad * -4e-3
        if (i + 1) % 10 == 0:
            print("%d: Error %f  e_grad=%f  m=%f  b=%f (opcount=%d)" %
                  (i + 1, f_val, f_grad.reduce_sum(), params[0], params[1],
                   opcount))

    print("Final m=%f, b=%f" % (params[0], params[1]))
    print("Compared to target m=%f, b=%f" % (target_m, target_b))