def test_ApproxJacobian2(self):

        A = np.matrix("1.0 2.0; 3.0 4.0")

        def f(x):
            return A * x

        x0 = np.matrix("5.0; 6.0")
        dx = 1.e-6
        Df_x = F.approximateJacobian(f, x0, dx)

        # Make sure approximateJA
        self.assertEqual(Df_x.shape, (2, 2))
        npt.assert_array_almost_equal(Df_x, A)

        #test with a 3x3 array; shouldn't really be any different
        A = np.matrix("3.0 1.0 2.0; 9.0 0.0 12.0; 16.5 23.0 1.2")

        def f(x):
            return A * x

        x0 = np.matrix("5.0; 6.0; 17.2")
        dx = 1.e-6
        Df_x = F.approximateJacobian(f, x0, dx)
        npt.assert_array_almost_equal(Df_x, A)
Exemple #2
0
    def step(self, x, fx=None):
        """Take a single step of a Newton method, starting from x. If the
        argument fx is provided, assumes fx = f(x).

        """
        if fx is None:
            fx = self._f(x)

        #If analytical Jacobian is provided, proceed this way:
        if self._given_analytical_jacobian:
            Df_x = self._df(x)

        else:
            #if it is not provided:
            Df_x = F.approximateJacobian(self._f, x, self._dx)
        # linalg.solve(A,B) returns the matrix solution to AX = B, so
        # it gives (A^{-1}) B. np.matrix() promotes scalars to 1x1
        # matrices.
        if np.linalg.norm(Df_x) == 0:
            print("Adjusting Jacobian for singular derivative")
            Df_x += self._dx

        h = np.linalg.solve(np.matrix(Df_x), np.matrix(fx))

        # Suppose x was a scalar. At this point, h is a 1x1 matrix. If
        # we want to return a scalar value for our next guess, we need
        # to re-scalarize h before combining it with our previous
        # x. The function np.asscalar() will act on a numpy array or
        # matrix that has only a single data element inside and return
        # that element as a scalar.
        if np.isscalar(x):
            h = np.asscalar(h)

        return x - h  #this needs to be a subtraction, not addition to satisfy the Newton method
 def test_ApproxJacobianPolynomial(self):
     # p(x) = x^2 + 5x + 4
     p = F.Polynomial([4, 5, 1])
     x0 = 0
     dx = 1e-8
     Df_x = F.approximateJacobian(p, x0, dx)
     self.assertAlmostEqual(Df_x, 5.0)
Exemple #4
0
    def test_ApproxJacobian3(self):
        # Try doing non-float inputs of x and A and np.array input of x
        A = np.matrix([[1, 2], [3, 4]])

        def f(x):
            # The * operator for numpy matrices is overloaded to mean
            # matrix-multiplication, rather than elementwise
            # multiplication as it does for numpy arrays
            return A * x

        # The vector-valued function f defined above is the following:
        # if we let u = f(x), then
        #
        # u1 = x1 + 2 x2
        # u2 = 3 x1 + 4 x2
        #
        # The Jacobian of this function is constant and exactly equal
        # to the matrix A. approximateJacobian should thus return
        # something pretty close to A.

        x0 = np.array([[1], [3]])
        dx = 1.e-6
        Df_x = F.approximateJacobian(f, x0, dx)

        # Make sure approximateJA
        self.assertEqual(Df_x.shape, (2, 2))
        # numpy arrays and matrices vectorize comparisons. So if a & b
        # are arrays, the expression a==b will itself be an array of
        # booleans. But an array of booleans does not itself evaluate
        # to a clean boolean (this is an exception to the general
        # Python rule that "every object can be interpreted as a
        # boolean"), so normal assert statements will break. We need
        # array-specific assert statements found in numpy.testing
        npt.assert_array_almost_equal(Df_x, A)
Exemple #5
0
    def step(self, x, fx=None):
        """Take a single step of a Newton method, starting from x. If the
        argument fx is provided, assumes fx = f(x).

        """
        if fx is None:
            fx = self._f(x)

        if self._Df:
            Df_x = self._Df(x)
        else:
            Df_x = F.approximateJacobian(self._f, x, self._dx)

        h = np.linalg.solve(np.matrix(Df_x), np.matrix(fx))

        # Suppose x was a scalar. At this point, h is a 1x1 matrix. If
        # we want to return a scalar value for our next guess, we need
        # to re-scalarize h before combining it with our previous
        # x The function np.asscalar() will act on a numpy array or
        # matrix that has only a single data element inside and return
        # that element as a scalar.
        if np.isscalar(x):
            h = np.asscalar(h)

        return x - h
Exemple #6
0
    def step(self, x, fx=None):
        """Take a single step of a Newton method, starting from x. If the
        argument fx is provided, assumes fx = f(x).

        """
        if fx is None:
            fx = self._f(x)

        if self._Df is None:
            Df_x = F.approximateJacobian(self._f, x, self._dx)
        else:
            Df_x = self._Df(x)

        # linalg.solve(A,B) returns the matrix solution to AX = B, so
        # it gives (A^{-1}) B. np.matrix() promotes scalars to 1x1
        # matrices.
        h = np.linalg.solve(np.matrix(Df_x), np.matrix(fx))
        # Suppose x was a scalar. At this point, h is a 1x1 matrix. If
        # we want to return a scalar value for our next guess, we need
        # to re-scalarize h before combining it with our previous
        # x. The function np.asscalar() will act on a numpy array or
        # matrix that has only a single data element inside and return
        # that element as a scalar.

        # raise warnings when h is too big, might be abnormal
        thres = 1.e6
        if np.linalg.norm(h) > thres:
            warnings.warn("One step within iteration is too big, check x0 or f(x) for recalculation is recommended !")
        
        if np.isscalar(x):
            h = np.asscalar(h)

        return x - h
 def test_higherOrder(self):
     #test higher order expressions
     f = F.Polynomial([1, 2, 3, 4])
     x0 = 2
     dx = 1.e-6
     Df_x = F.approximateJacobian(f, x0, dx)
     self.assertAlmostEqual(Df_x, 2 + (3 * 2 * x0) + (4 * 3 * (x0**2)))
Exemple #8
0
 def test_ApproxJacobian3(self):
     f = lambda x: x**2 - 7 * x + 10
     #g = F.Polynomial([10, -7, 1])
     dx = 1.e-10
     for x0 in np.linspace(-2, 2, 11):
         Df_x = F.approximateJacobian(f, x0, dx)
         self.assertTrue(np.isscalar(Df_x))
         self.assertAlmostEqual(Df_x, (2 * x0 - 7), places=4)
Exemple #9
0
 def test_ApproxJacobian1D(self):
     for slope in range(-10,10):
         for x0 in range(-10,10):
             f = lambda x: slope*x + 5.0
             dx = 1.e-3
             Df_x = F.approximateJacobian(f, x0, dx)
             # If x and f are scalar-valued, Df_x should be, too
             self.assertTrue(np.isscalar(Df_x))
             self.assertAlmostEqual(Df_x, slope)
Exemple #10
0
 def testApproxJacobianND(self):
     for n in range(10):
         A = np.random.random((n,n))
         x0 = np.random.random((n,1))
         f = lambda x: np.dot(A,x)
         dx = 1.e-6
         Df_x = F.approximateJacobian(f,x0,dx)
         self.assertEqual(Df_x.shape,(n,n))
         np.testing.assert_array_almost_equal(Df_x,A)
Exemple #11
0
    def test_JacobianMethod(self):
        f = F.Polynomial([-15, 23, -9, 1])
        dx = 1.e-6
        method = "next"
        x0 = 2.5
        #print(x0)
        Df_x = F.approximateJacobian(f, x0, dx, method)
        self.assertTrue(np.isscalar(Df_x))
        self.assertAlmostEqual(Df_x, (3 * x0**2 - 18 * x0 + 23), places=5)

        method = "former"
        Df_x = F.approximateJacobian(f, x0, dx, method)
        self.assertTrue(np.isscalar(Df_x))
        self.assertAlmostEqual(Df_x, (3 * x0**2 - 18 * x0 + 23), places=5)

        method = "middle"
        Df_x = F.approximateJacobian(f, x0, dx, method)
        self.assertTrue(np.isscalar(Df_x))
        self.assertAlmostEqual(Df_x, (3 * x0**2 - 18 * x0 + 23), places=7)
Exemple #12
0
    def step(self, x, fx=None):
        """Take a single step of a Newton method, starting from x. If the
        argument fx is provided, assumes fx = f(x).

        """
        if fx is None:
            fx = self._f(x)

        # Find the derivative of f at x:
        # Check to see if analytical jacobian has been provided:
        if (hasattr(self, '_Df')):  #Analytical method provided
            Df_x = self._Df(x)
        else:  #Use the approximate Method
            Df_x = F.approximateJacobian(self._f, x, self._dx)

        if np.all(Df_x == 0):
            counter = self._maxiter
            while np.all(Df_x == 0):
                if counter == 0:
                    raise RuntimeError(
                        "Was not able to find a non-zero slope/jacobian near the provided x0"
                    )
                x = x + self._dx
                Df_x = F.approximateJacobian(self._f, x, self._dx)
                counter = counter - 1

        # linalg.solve(A,B) returns the matrix solution to AX = B, so
        # it gives (A^{-1}) B. np.matrix() promotes scalars to 1x1
        # matrices.
        h = np.linalg.solve(np.matrix(Df_x), np.matrix(fx))
        # Suppose x was a scalar. At this point, h is a 1x1 matrix. If
        # we want to return a scalar value for our next guess, we need
        # to re-scalarize h before combining it with our previous
        # x. The function np.asscalar() will act on a numpy array or
        # matrix that has only a single data element inside and return
        # that element as a scalar.
        if np.isscalar(x):
            h = np.asscalar(h)
        else:
            h = np.asarray(h)

        return x - h
    def test_ApproxJacobian1D(self):
        slope = 3.0

        def f(x):
            return slope * x + 5.0

        x0 = 2.0
        dx = 1.e-3
        Df_x = F.approximateJacobian(f, x0, dx)  # calculate derivative
        self.assertTrue(np.isscalar(Df_x))  # check Jacobian is a scalar
        self.assertAlmostEqual(Df_x, slope)
Exemple #14
0
    def test_ApproxJacobian2DHigherOrder(self):
        # Test higher order 2D function f where fx = x^2 and
        # fy = (y-3)^2. The Jacobian is [[2x 0],[0 2(y-3)]], which,
        # evaluated at x0 of [0.5, 2.5] should evaluate to
        # [[1.0,0.0],[0.0,-1.0]].
        def f(x):
            return np.matrix([[x[0, 0]**2], [(x[1, 0] - 3.0)**2]])

        x0 = np.matrix([[0.5], [2.5]])
        Df_x = F.approximateJacobian(f, x0)
        self.assertEqual(Df_x.shape, (2, 2))
        A = np.matrix([[1.0, 0.0], [0.0, -1.0]])
        npt.assert_array_almost_equal(Df_x, A)
Exemple #15
0
    def step(self, x, fx=None):
        """Take a single step of a Newton method, starting from x. If the
        argument fx is provided, assumes fx = f(x).

        """
        if fx is None:
            fx = self._f(x)

        if self._Df:
            Df_x = self._Df(x)
        else:
            Df_x = F.approximateJacobian(self._f, x, self._dx)

        # linalg.solve(A,B) returns the matrix solution to AX = B, so
        # it gives (A^{-1}) B. np.matrix() promotes scalars to 1x1
        # matrices.
        try:
            h = np.linalg.solve(np.matrix(Df_x), np.matrix(fx))
        except np.linalg.linalg.LinAlgError:
            if np.isscalar(x):
                epsilon = 0.1
            else:
                epsilon = np.ones_like(x) * 0.1
            x += epsilon
            fx = self._f(x)
            Df_x = F.approximateJacobian(self._f, x, self._dx)
            h = np.linalg.solve(np.matrix(Df_x), np.matrix(fx))

        # Suppose x was a scalar. At this point, h is a 1x1 matrix. If
        # we want to return a scalar value for our next guess, we need
        # to re-scalarize h before combining it with our previous
        # x. The function np.asscalar() will act on a numpy array or
        # matrix that has only a single data element inside and return
        # that element as a scalar.
        if np.isscalar(x):
            h = np.asscalar(h)

        return x - h
    def test_ApproxJacobian2D(self):
        # u1 = x1 + 2 x2
        # u2 = 3 x1 + 4 x2
        A = np.matrix("1.0 2.0; 3.0 4.0")

        def f(x):
            return A * x

        x0 = np.matrix("5.0; 6.0")
        dx = 1.e-6
        Df_x = F.approximateJacobian(f, x0, dx)  # calculate Jacobian array
        self.assertEqual(Df_x.shape,
                         (2, 2))  # check size of Jacobian array shape
        npt.assert_array_almost_equal(Df_x, A)
Exemple #17
0
    def test_ApproxJacobian1(self):
        slope = 3.0

        # Yes, you can define a function inside a function/method. And
        # it has scope only within the method within which it's
        # defined (unless you return it to the outside world, which
        # you can do in Python with no need for anything like C's
        # malloc() or C++'s new() )
        def f(x):
            return slope * x + 5.0

        x0 = 2.0
        dx = 1.e-3
        Df_x = F.approximateJacobian(f, x0, dx)
        # self.assertEqual(Df_x.shape, (1,1)
        # If x and f are scalar-valued, Df_x should be, too
        self.assertTrue(np.isscalar(Df_x))
        self.assertAlmostEqual(Df_x, slope)
Exemple #18
0
    def step(self, x, fx=None):
        """Take a single step of a Newton method, starting from x. If the
        argument fx is provided, assumes fx = f(x).

        """

        # Determine if analytic form or approximate form of Jacobian is to be used
        if fx is None:
            fx = self._f(x)
        if self._Df==0: # if Df is not supplied,then approximate Jacobian calculated
            Df_x = F.approximateJacobian(self._f, x, self._dx)
        else: # Df is supplied, then analytic Jacobian is calculated
            Df_x = F.AnalyticJacobian(self._Df,x)

        # Df_x^-1 f(x) is solved
        h = np.linalg.solve(np.matrix(Df_x), np.matrix(fx))

        # if x is a scalar, change h to scalar before 
        if np.isscalar(x):
            h = np.asscalar(h)

        return x - h
Exemple #19
0
    def test_ApproxJacobian2D(self):
        # numpy matrices can also be initialized with strings. The
        # semicolon separates rows; spaces (or commas) delimit entries
        # within a row.
        A = np.matrix("1.0 2.0; 3.0 4.0")

        def f(x):
            # The * operator for numpy matrices is overloaded to mean
            # matrix-multiplication, rather than elementwise
            # multiplication as it does for numpy arrays
            return A * x

        # The vector-valued function f defined above is the following:
        # if we let u = f(x), then
        #
        # u1 = x1 + 2 x2
        # u2 = 3 x1 + 4 x2
        #
        # The Jacobian of this function is constant and exactly equal
        # to the matrix A. approximateJacobian should thus return
        # something pretty close to A.

        x0 = np.matrix("5.0; 6.0")
        dx = 1.e-6
        Df_x = F.approximateJacobian(f, x0, dx)

        # Make sure approximateJA
        self.assertEqual(Df_x.shape, (2,2))
        # numpy arrays and matrices vectorize comparisons. So if a & b
        # are arrays, the expression a==b will itself be an array of
        # booleans. But an array of booleans does not itself evaluate
        # to a clean boolean (this is an exception to the general
        # Python rule that "every object can be interpreted as a
        # boolean"), so normal assert statements will break. We need
        # array-specific assert statements found in numpy.testing
        npt.assert_array_almost_equal(Df_x, A)