Exemple #1
0
 def test_fd_problem(self):
     P = np.array([[1, 0, 0], [0, 2, 0], [0, 0, 4]])
     v = lambda x: 0.5 * x.T @ P @ x
     del_v = lambda x: x.T @ P
     x = np.array([[1], [2], [3]])
     # Default step
     p = opt.Problem(v)
     # Exact gradient
     g_ex = del_v(x)
     # Gradient evaluated at x
     g_fd1 = p.grad(x)
     # Get gradient then evaluate at x
     g = p.grad
     g_fd2 = g(x)
     self.assertTrue(np.linalg.norm(g_fd1 - g_ex) < 1e-3)
     self.assertTrue(np.linalg.norm(g_fd2 - g_ex) < 1e-3)
     # Specific step
     p = opt.Problem(v, grad_step=1e-9)
     # Exact gradient
     g_ex = del_v(x)
     # Gradient evaluated at x
     g_fd1 = p.grad(x)
     # Get gradient then evaluate at x
     g = p.grad
     g_fd2 = g(x)
     self.assertTrue(np.linalg.norm(g_fd1 - g_ex) < 1e-3)
     self.assertTrue(np.linalg.norm(g_fd2 - g_ex) < 1e-3)
Exemple #2
0
 def test_eq_const_init(self):
     v = lambda x: -x[0] - x[1]
     del_v = lambda x: np.ndarray([[-1, -1]])
     c1 = lambda x: x + 1
     c2 = lambda x: x + 2
     c = [c1, c2]
     p = opt.Problem(v, grad=del_v, eq_const=c)
     self.assertTrue(p.eq_const(4)[0] == 5)
     self.assertTrue(p.eq_const(4)[1] == 6)
     self.assertEqual(p.num_eq_const(), 2)
     self.assertEqual(p.num_ineq_const(), 0)
     p = opt.Problem(v, grad=del_v, ineq_const=c)
     self.assertEqual(p.num_eq_const(), 0)
     self.assertEqual(p.num_ineq_const(), 2)
Exemple #3
0
 def test_ft_hessian(self):
     P = np.array([[1, 0, 0], [0, 2, 0], [0, 0, 4]])
     v = lambda x: 0.5 * x.T @ P @ x
     del_v = lambda x: x.T @ P
     p = opt.Problem(v, del_v)
     x = np.array([[0], [0], [0]])
     P_fd = opt._fd_hessian(p.cost, x)
     self.assertTrue(np.all((P_fd - P) < 1e-4))
Exemple #4
0
 def setUp(self):
     a = 1
     b = np.array([[1], [2]])
     C = np.array([[12, 3], [3, 10]])
     v = lambda x : a + b.T @ x + x.T @ C @ x \
                    + 10 * np.log(1 + x[0, 0]**4) * np.sin(100 * x[0, 0]) \
                    + 10 * np.log(1 + x[1, 0]**4) * np.cos(100 * x[1, 0])
     self.p = opt.Problem(v)
Exemple #5
0
 def test_multivar_problem(self):
     P = np.array([[1, 0, 0], [0, 2, 0], [0, 0, 4]])
     v = lambda x: 0.5 * x.T @ P @ x
     del_v = lambda x: x.T @ P
     p = opt.Problem(v, del_v)
     self.assertEqual(p.cost(np.array([[1], [1], [1]])), 3.5)
     self.assertSequenceEqual(
         p.grad(np.array([[1], [1], [1]])).tolist(),
         np.array([[1, 2, 4]]).tolist())
Exemple #6
0
 def test_fd_grad(self):
     P = np.array([[1, 0, 0], [0, 2, 0], [0, 0, 4]])
     v = lambda x: 0.5 * x.T @ P @ x
     del_v = lambda x: x.T @ P
     p = opt.Problem(v, del_v)
     x = np.array([[1], [2], [3]])
     g_ex = p.grad(x)
     g_fd = opt._fd_grad(p.cost, x, h=1e-9)
     self.assertTrue(np.linalg.norm(g_fd - g_ex) < 1e-3)
Exemple #7
0
 def test_sec(self):
     P = np.array([[1, 0, 0], [0, 2, 0], [0, 0, 4]])
     v = lambda x: 0.5 * x.T @ P @ x
     del_v = lambda x: x.T @ P
     p = opt.Problem(v, del_v)
     x = np.array([[1], [1], [1]])
     x_opt = np.array([[0], [0], [0]])
     x_sec = opt.secant(p, x)
     self.assertTrue(np.linalg.norm(x_sec - x_opt) < 1e-6)
Exemple #8
0
 def test_cg(self):
     P = np.array([[1, 0, 0], [0, 2, 0], [0, 0, 4]])
     v = lambda x: 0.5 * x.T @ P @ x
     del_v = lambda x: x.T @ P
     p = opt.Problem(v, del_v)
     x = np.array([[1], [1], [1]])
     x_opt = np.array([[0], [0], [0]])
     x_cg = opt.conjugate_gradient(p, x)
     self.assertTrue(np.linalg.norm(x_cg - x_opt) < 1e-6)
Exemple #9
0
    def test_lag_new(self):

        P = np.array([[1, 0], [0, 2]])
        v = lambda x: 0.5 * x.T @ P @ x
        c = [lambda x: x[0, 0]**2 + x[1, 0]**2 - 25]
        p = opt.Problem(v, eq_const=c)

        x0 = np.array([[1], [1]])
        x = opt.lagrange_newton(p, x0, tol=1e-2)
        print(x)
Exemple #10
0
    def setUp(self):
        a = 5
        b = np.array([[1], [4], [5], [4], [2], [1]])
        C = 2 * np.array([[9, 1, 7, 5, 4, 7], [1, 11, 4, 2, 7, 5],
                          [7, 4, 13, 5, 0, 7], [5, 2, 5, 17, 1, 9],
                          [4, 7, 0, 1, 21, 15], [7, 5, 7, 9, 15, 27]])

        v = lambda x: a + b.T @ x + 0.5 * x.T @ C @ x
        self.p = opt.Problem(v)
        self.x_opt = -np.linalg.solve(C, b)
Exemple #11
0
 def setUp(self):
     # Objective function
     v = lambda x: np.abs(x[0, 0] - 2) + np.abs(x[1, 0] - 2)
     # Constraints
     h1 = lambda x: x[0, 0] - x[1, 0]**2
     h2 = lambda x: x[0, 0]**2 + x[1, 0]**2 - 1
     # Create Problem with equality and inequality constraints in lists
     self.p = opt.Problem(v, eq_const=[h2], ineq_const=[h1])
     # Known solution. Not used here
     self.x_opt = np.array([[np.sqrt(2) / 2], [np.sqrt(2) / 2]])
Exemple #12
0
 def setUp(self):
     a = 1
     b = np.array([[1], [2]])
     C = np.array([[12, 3], [3, 10]])
     v = lambda x : a + b.T @ x + x.T @ C @ x \
                    + 10 * np.log(1 + x[0, 0]**4) * np.sin(100 * x[0, 0]) \
                    + 10 * np.log(1 + x[1, 0]**4) * np.cos(100 * x[1, 0])
     # TODO Verify that this is the real optimum
     self.x_opt = np.array([[-0.01773056364041071], [-0.09577801844122487]])
     self.p = opt.Problem(v)
Exemple #13
0
 def test_step_size(self):
     P = np.array([[1, 0, 0], [0, 2, 0], [0, 0, 4]])
     v = lambda x: 0.5 * x.T @ P @ x
     del_v = lambda x: x.T @ P
     p = opt.Problem(v, del_v)
     # Compare step sizes at (1, 1, 1)
     x = np.array([[1], [1], [1]])
     s = -p.grad(x).T
     w_ideal = -(p.grad(x) @ s) / (s.T @ P @ s)
     w_armijo = opt._step_size(p, x, s)
     self.assertTrue(abs(w_armijo - w_ideal) / w_ideal < 0.1)
Exemple #14
0
    def setUp(self):
        # Set up problem
        a = 5
        b = np.array([[1], [4], [5], [4], [2], [1]])
        C = 2 * np.array([[9, 1, 7, 5, 4, 7], [1, 11, 4, 2, 7, 5],
                          [7, 4, 13, 5, 0, 7], [5, 2, 5, 17, 1, 9],
                          [4, 7, 0, 1, 21, 15], [7, 5, 7, 9, 15, 27]])

        v = lambda x: a + b.T @ x + 0.5 * x.T @ C @ x
        del_v = lambda x: b.T + x.T @ C
        # Create problem object
        self.p = opt.Problem(v, del_v)
        # Store known solution since this is a quadratic. Not used here
        self.x_opt = -np.linalg.solve(C, b)
Exemple #15
0
 def test_scalar_problem(self):
     v = lambda x: x * x
     del_v = lambda x: 2 * x
     p = opt.Problem(v, del_v)
     self.assertEqual(p.cost(-2), 4)
     self.assertEqual(p.grad(-2), -4)
Exemple #16
0
 def setUp(self):
     v = lambda x: -x[0, 0] * x[1, 0]
     h1 = lambda x: -x[0, 0] - x[1, 0]**2 + 1
     self.p = opt.Problem(v, eq_const=[h1])
Exemple #17
0
 def setUp(self):
     v = lambda x: np.abs(x[0, 0] - 2) + np.abs(x[1, 0] - 2)
     h1 = lambda x: x[0, 0] - x[1, 0]**2
     h2 = lambda x: x[0, 0]**2 + x[1, 0]**2 - 1
     self.p = opt.Problem(v, eq_const=[h2, h1])
     self.x_opt = np.array([[np.sqrt(2) / 2], [np.sqrt(2) / 2]])
Exemple #18
0
 def setUp(self):
     v = lambda x: np.log(x[0, 0]) - x[1, 0]
     h1 = lambda x: x[0, 0] - 1
     h2 = lambda x: x[0, 0]**2 + x[1, 0]**2 - 4
     self.p = opt.Problem(v, eq_const=[h2], ineq_const=[h1])
Exemple #19
0
 def setUp(self):
     v = lambda x: -x[0, 0] * x[1, 0]
     h1 = lambda x: -x[0, 0] - x[1, 0]**2 + 1
     h2 = lambda x: x[0, 0] + x[1, 0]
     self.p = opt.Problem(v, ineq_const=[h1, h2])
Exemple #20
0
 def setUp(self):
     v = lambda x : -np.sqrt((x[0, 0]**2 + 1) * (2 * x[1, 0]**2 + 1)) \
                    / (x[0, 0]**2 + x[1, 0]**2 + 0.5)
     self.x_opt = np.array([[0], [0]])
     self.p = opt.Problem(v)
Exemple #21
0
 def setUp(self):
     # Example 12.1.5 from Fletcher
     v = lambda x: -x[0, 0] - x[1, 0]
     del_v = lambda x: np.ndarray([[-1, -1]])
     c = [lambda x: 1 - x[0, 0]**2 - x[1, 0]**2]
     self.p = opt.Problem(v, eq_const=c)