def test_bfgs_madsen(self): from ch import SumOfSquares import scipy.optimize obj = Ch(lambda x: SumOfSquares(Madsen(x=x))) def errfunc(x): obj.x = Ch(x) return obj.r def gradfunc(x): obj.x = Ch(x) return obj.dr_wrt(obj.x).ravel() x0 = np.array((3., 1.)) # Optimize with built-in bfgs. # Note: with 8 iters, this actually requires 14 gradient evaluations. # This can be verified by setting "disp" to 1. #tm = time.time() x1 = scipy.optimize.fmin_bfgs(errfunc, x0, fprime=gradfunc, maxiter=8, disp=0) #print 'forward: took %.es' % (time.time() - tm,) self.assertTrue(obj.r / 2. < 0.386599528247) # Optimize with chumpy's minimize (which uses scipy's bfgs). obj.x = x0 minimize(fun=obj, x0=[obj.x], method='bfgs', options={'maxiter': 8}) self.assertTrue(obj.r / 2. < 0.386599528247)
def test_bfgs_madsen(self): from ch import SumOfSquares import scipy.optimize obj = Ch(lambda x : SumOfSquares(Madsen(x = x)) ) def errfunc(x): obj.x = Ch(x) return obj.r def gradfunc(x): obj.x = Ch(x) return obj.dr_wrt(obj.x).ravel() x0 = np.array((3., 1.)) # Optimize with built-in bfgs. # Note: with 8 iters, this actually requires 14 gradient evaluations. # This can be verified by setting "disp" to 1. #tm = time.time() x1 = scipy.optimize.fmin_bfgs(errfunc, x0, fprime=gradfunc, maxiter=8, disp=0) #print 'forward: took %.es' % (time.time() - tm,) self.assertLess(obj.r/2., 0.4) # Optimize with chumpy's minimize (which uses scipy's bfgs). obj.x = x0 minimize(fun=obj, x0=[obj.x], method='bfgs', options={'maxiter': 8, 'disp': False}) self.assertLess(obj.r/2., 0.4)