def test_sigma_condition(self):
     f = lambda x: x**2 - 1
     df = lambda x: 2 * x
     x0 = 1
     with self.assertRaises(ValueError):
         gradient_descent(f, df, x0, sigma=-100)
         gradient_descent(f, df, x0, sigma=100)
Пример #2
0
 def test_gradient_maxima(self,sigma=0.5):
     f = lambda x: -x**2
     df = lambda x: -2*x
     x0 = 0
     with self.assertRaises(ValueError):
         gradient_descent(f,df,x0,sigma=0.5)
         gradient_descent(f,df,x0,sigma=0.2)        
 def test_sigma_condition(self):
     f = lambda x: x**2 - 1
     df = lambda x: 2*x
     x0 = 1
     with self.assertRaises(ValueError):
         gradient_descent(f, df, x0, sigma=-100)
         gradient_descent(f, df, x0, sigma=100)
Пример #4
0
    def test_gradiet_descent_error(self):
	f = lambda x: x**2 - 1
        df = lambda x: 2*x
        x0 = 1
        with self.assertRaises(ValueError):
           gradient_descent(f,df,x0,sigma=1.1)
           gradient_descent(f,df,x0,sigma=-0.1)
 def test_gradient_error(self):
     # it demonstrates how to test if a
     # function raises an error
     f = lambda x: x**2 - 1
     df = lambda x: 2 * x
     x0 = -2
     with self.assertRaises(ValueError):
         gradient_descent(f, df, x0, sigma=1, epsilon=1e-8)
         gradient_descent(f, df, x0, sigma=0, epsilon=1e-8)
Пример #6
0
    def test_gradient_doubleWell(self,sigma=0.5):
        f = lambda x: 0.25*x**4 -0.5*x**2
        df = lambda x: x**3-x
        x0 = 0
        xmin_actual = -1
        xmin = gradient_descent(f,df,x0,sigma=0.5)
        self.assertAlmostEqual(xmin, xmin_actual)
	
	x0 = 0.1
        xmin_actual = 1
        xmin = gradient_descent(f,df,x0,sigma=0.5)
        self.assertAlmostEqual(xmin, xmin_actual)
Пример #7
0
    def test_double_well(self):
	# in this function there is a maximum in between 2 minima
	f = lambda x: 0.25*x**4 - 0.5*x**2
	df = lambda x: x**3 - x
	x0 = 0 #local maxima in between 2 minima
	x1 = gradient_descent(f, df, x0, sigma = 0.5)
	x1_actual = -1
	self.assertAlmostEqual(x1, x1_actual, places = 2)
	x0 = 0.1
	x1_actual = 1
	x1 = gradient_descent(f, df, x0, sigma = 0.5)
	self.assertAlmostEqual(x1, x1_actual, places = 2)
 def test_gradient_descent_robust(self):
     f = lambda x: 0.1*x**4-x**3+3.5*x**2-5*x+2.4
     df = lambda x: -5+7*x-3*x**2+0.4*x**3
     xf = gradient_descent(f,df,2.5,0.3,1e-10)
     xf_actual1 = 3.618034
     xf_actual2 = 1.381966
     self.assertTrue(round(xf-xf_actual1,7)==0 or round(xf-xf_actual2,7)==0)
Пример #9
0
    def test_gradient_parabola(self):
	f = lambda x: x**2 - 1
        df = lambda x: 2*x
        x0 = 1
	xmin_actual = 0.0
	xmin = gradient_descent(f,df,x0,sigma=0.5)
        self.assertAlmostEqual(xmin, xmin_actual)
Пример #10
0
 def test_gradient_quartic(self):
     f = lambda x: x**4
     df = lambda x: 4*x**3
     x0 = 0.5
     xmin_actual = 0.0
     xmin = gradient_descent(f,df,x0,sigma=0.6)
     self.assertAlmostEqual(xmin, xmin_actual,places=2)
Пример #11
0
    def test_convexfunctions(self):
        # this test verfies whether gradient_step works correctly for a variety of examples
        # verify the test on different function examples
	f = lambda x: x**2
        df = lambda x: 2*x
        x0 = 1
        x1 = gradient_descent(f, df, x0)
        x1_actual = 0
        self.assertAlmostEqual(x1, x1_actual)

	f = lambda x: x**4
        df = lambda x: 4*x**3
        x0 = 0.5
        x1 = gradient_descent(f, df, x0, sigma = 0.6)
        x1_actual = 0
        self.assertAlmostEqual(x1, x1_actual, places=2)
 def test_gradient_descent_robust(self):
     f = lambda x: 0.1 * x**4 - x**3 + 3.5 * x**2 - 5 * x + 2.4
     df = lambda x: -5 + 7 * x - 3 * x**2 + 0.4 * x**3
     xf = gradient_descent(f, df, 2.5, 0.3, 1e-10)
     xf_actual1 = 3.618034
     xf_actual2 = 1.381966
     self.assertTrue(
         round(xf - xf_actual1, 7) == 0 or round(xf - xf_actual2, 7) == 0)
Пример #13
0
    def test_gradient_descent_trig(self):
	# tests whether a trig function correctly outputs a local minima
	f = lambda x : sin(x)
	df = lambda x : cos(x)
	x0 = 0.1
	x1 = gradient_descent(f, df, x0, sigma = 0.8)
	x1_actual = -(1.0*numpy.pi)/2
	self.assertAlmostEqual(x1, x1_actual, places=2)
 def test_gradient_descent3(self):
     # scaling factor sigma is chosen to be small
     f = lambda x: x**3 - x
     df = lambda x: 3 * x**2 - 1
     x0 = 1
     x1 = gradient_descent(f, df, x0, sigma=0.1, epsilon=1e-8)
     x1_actual = 0.57735026
     self.assertAlmostEqual(x1, x1_actual)
    def test_gradient_descent2(self):
        # the global minimum of functions that have a single, global minimum
        f = lambda x: x**2 - 1
        df = lambda x: 2 * x
        x0 = -2
        x1 = gradient_descent(f, df, x0, sigma=0.5, epsilon=1e-8)

        x1_actual = 0.0
        self.assertAlmostEqual(x1, x1_actual)
 def test_gradient_descent1(self):
     #a example:f(x) = x**3 - 6*x**2 + 9*x +15
     f = lambda x: x**3 - 6 * x**2 + 9 * x + 15
     df = lambda x: 3 * x**2 - 12 * x + 9
     #correctly d when f'(x) = 0 but it's local maximun=m
     x0 = 1.6
     x1 = gradient_descent(f, df, x0, sigma=0.5, epsilon=1e-8)
     x1_actual = 99
     self.assertAlmostEqual(x1, x1_actual)
Пример #17
0
    def test_sigmaAndEpsilon(self):
	# this test determines whether gradient_step and gradient descent raises
	# value errors on sigma and epsilon
	f = lambda x: x**2 - 1
        df = lambda x: 2*x
        x = 1
        with self.assertRaises(ValueError):
		gradient_step(x, df, sigma=1.5)
		gradient_step(x, df, sigma=-1)
		gradient_descent(f, df, x, sigma=0.5, epsilon=-1)
		gradient_descent(f, df, x, sigma=0.5, epsilon=2)
		gradient_descent(f, df, x, sigma=2, epsilon=0.1)
 def test_gradient_descent_nearmin_smallsig(self):
     f = lambda x: 0.1*x**4-x**3+3.5*x**2-5*x+2.4
     df = lambda x: -5+7*x-3*x**2+0.4*x**3
     xf = gradient_descent(f,df,1.3,0.05,1e-10)
     xf_actual = 1.381966 # 1.3819660093917199
     self.assertAlmostEqual(xf, xf_actual)
 def test_gradient_descent(self):
     f = lambda x: x**2 - 1
     df = lambda x: 2*x
     xf = gradient_descent(f,df,1,0.5,1e-10)
     xf_actual = 0.0
     self.assertAlmostEqual(xf, xf_actual)
 def test_gradient_descent(self):
     f = lambda x: x**2 - 1
     df = lambda x: 2 * x
     xf = gradient_descent(f, df, 1, 0.5, 1e-10)
     xf_actual = 0.0
     self.assertAlmostEqual(xf, xf_actual)
 def test_gradient_descent_nearmin_smallsig(self):
     f = lambda x: 0.1 * x**4 - x**3 + 3.5 * x**2 - 5 * x + 2.4
     df = lambda x: -5 + 7 * x - 3 * x**2 + 0.4 * x**3
     xf = gradient_descent(f, df, 1.3, 0.05, 1e-10)
     xf_actual = 1.381966  # 1.3819660093917199
     self.assertAlmostEqual(xf, xf_actual)