예제 #1
0
    def test_bfgs_madsen(self):
        from ch import SumOfSquares
        import scipy.optimize
        obj = Ch(lambda x: SumOfSquares(Madsen(x=x)))

        def errfunc(x):
            obj.x = Ch(x)
            return obj.r

        def gradfunc(x):
            obj.x = Ch(x)
            return obj.dr_wrt(obj.x).ravel()

        x0 = np.array((3., 1.))

        # Optimize with built-in bfgs.
        # Note: with 8 iters, this actually requires 14 gradient evaluations.
        # This can be verified by setting "disp" to 1.
        #tm = time.time()
        x1 = scipy.optimize.fmin_bfgs(errfunc,
                                      x0,
                                      fprime=gradfunc,
                                      maxiter=8,
                                      disp=0)
        #print 'forward: took %.es' % (time.time() - tm,)
        self.assertTrue(obj.r / 2. < 0.386599528247)

        # Optimize with chumpy's minimize (which uses scipy's bfgs).
        obj.x = x0
        minimize(fun=obj, x0=[obj.x], method='bfgs', options={'maxiter': 8})
        self.assertTrue(obj.r / 2. < 0.386599528247)
예제 #2
0
    def test_bfgs_madsen(self):
        from ch import SumOfSquares
        import scipy.optimize
        obj = Ch(lambda x : SumOfSquares(Madsen(x = x)) )
        
        def errfunc(x):
            obj.x = Ch(x)
            return obj.r
        
        def gradfunc(x):
            obj.x = Ch(x)
            return obj.dr_wrt(obj.x).ravel()
        
        x0 = np.array((3., 1.))

        # Optimize with built-in bfgs.
        # Note: with 8 iters, this actually requires 14 gradient evaluations.
        # This can be verified by setting "disp" to 1.
        #tm = time.time()
        x1 = scipy.optimize.fmin_bfgs(errfunc, x0, fprime=gradfunc, maxiter=8, disp=0)
        #print 'forward: took %.es' % (time.time() - tm,)
        self.assertLess(obj.r/2., 0.4)

        # Optimize with chumpy's minimize (which uses scipy's bfgs).
        obj.x = x0
        minimize(fun=obj, x0=[obj.x], method='bfgs', options={'maxiter': 8, 'disp': False})
        self.assertLess(obj.r/2., 0.4)
예제 #3
0
def Rosen():

    args = {'x1': Ch(-120.), 'x2': Ch(-100.)}
    r1 = Ch(lambda x1, x2: (x2 - x1**2.) * 10., args)
    r2 = Ch(lambda x1: x1 * -1. + 1, args)

    func = [r1, r2]

    return func, [args['x1'], args['x2']]
예제 #4
0
파일: linalg.py 프로젝트: pujades/chumpy
def lstsq(a, b, rcond=-1):
    if rcond != -1:
        raise Exception('non-default rcond not yet implemented')
        
    x = Ch(lambda a, b : pinv(a).dot(b))
    x.a = a
    x.b = b
    residuals = ch.sum(  (x.a.dot(x) - x.b) **2 , axis=0)
    rank = NotImplementedError
    s = NotImplementedError
    
    return x, residuals, rank, s
    def test_inv3(self):
        """Test linalg.inv with broadcasting support."""

        from linalg import Inv

        mtx1 = Ch(np.sin(2**np.arange(12)).reshape((3, 2, 2)))
        mtx1_inv = Inv(mtx1)
        dr = mtx1_inv.dr_wrt(mtx1)

        eps = 1e-5
        mtx2 = mtx1.r.copy()
        input_diff = np.sin(np.arange(mtx2.size)).reshape(mtx2.shape) * eps
        mtx2 += input_diff
        mtx2_inv = Inv(mtx2)

        output_diff_emp = (np.linalg.inv(mtx2) - np.linalg.inv(mtx1.r)).ravel()
        output_diff_pred = Inv(mtx1).dr_wrt(mtx1).dot(input_diff.ravel())

        # print output_diff_emp
        # print output_diff_pred

        self.assertTrue(
            np.max(np.abs(output_diff_emp.ravel() -
                          output_diff_pred.ravel())) < eps * 1e-3)
        self.assertTrue(
            np.max(np.abs(mtx1_inv.r - np.linalg.inv(mtx1.r)).ravel()) == 0)
예제 #6
0
 def test_dogleg_madsen(self):
     obj = Madsen(x=Ch(np.array((3., 1.))))
     minimize(fun=obj,
              x0=[obj.x],
              method='dogleg',
              options={
                  'maxiter': 34,
                  'disp': False
              })
     self.assertTrue(np.sum(obj.r**2) / 2 < 0.386599528247)
예제 #7
0
    def test_ic(self):
        child = Child(a=Ch(10))
        parent = Parent(child=child, aliased=Ch(50))

        junk = [parent.aliased_dependency for k in range(3)]
        self.assertTrue(parent.dcount == 1)
        self.assertTrue(parent.ocount == 0)
        self.assertTrue(parent.rcount == 0)

        junk = [parent.r for k in range(3)]
        self.assertTrue(parent.dcount == 1)
        self.assertTrue(parent.ocount == 1)
        self.assertTrue(parent.rcount == 1)

        parent.aliased = Ch(20)
        junk = [parent.aliased_dependency for k in range(3)]
        self.assertTrue(parent.dcount == 2)
        self.assertTrue(parent.ocount == 1)
        self.assertTrue(parent.rcount == 1)

        junk = [parent.r for k in range(3)]
        self.assertTrue(parent.dcount == 2)
        self.assertTrue(parent.ocount == 2)
        self.assertTrue(parent.rcount == 2)
    def test_inv2(self):
        from linalg import Inv

        eps = 1e-8
        idx = 13

        mtx1 = np.random.rand(100).reshape((10, 10))
        mtx2 = mtx1.copy()
        mtx2.ravel()[idx] += eps

        diff_emp = (np.linalg.inv(mtx2) - np.linalg.inv(mtx1)) / eps

        mtx1 = Ch(mtx1)
        diff_pred = Inv(mtx1).dr_wrt(mtx1)[:, 13].reshape(diff_emp.shape)
        #print diff_emp
        #print diff_pred
        #print diff_emp - diff_pred
        self.assertTrue(
            np.max(np.abs(diff_pred.ravel() - diff_emp.ravel())) < 1e-4)
    def test_svd(self):
        from linalg import Svd
        eps = 1e-3
        idx = 10

        data = np.sin(np.arange(300) * 100 + 10).reshape((-1, 3))
        data[3, :] = data[3, :] * 0 + 10
        data[:, 1] *= 2
        data[:, 2] *= 4
        data = data.copy()
        u, s, v = np.linalg.svd(data, full_matrices=False)
        data = Ch(data)
        data2 = data.r.copy()
        data2.ravel()[idx] += eps
        u2, s2, v2 = np.linalg.svd(data2, full_matrices=False)

        svdu, svdd, svdv = Svd(x=data)

        # test singular values
        diff_emp = (s2 - s) / eps
        diff_pred = svdd.dr_wrt(data)[:, idx]
        #print diff_emp
        #print diff_pred
        ratio = diff_emp / diff_pred
        #print ratio
        self.assertTrue(np.max(np.abs(ratio - 1.)) < 1e-4)

        # test V
        diff_emp = (v2 - v) / eps
        diff_pred = svdv.dr_wrt(data)[:, idx].reshape(diff_emp.shape)
        ratio = diff_emp / diff_pred
        #print ratio
        self.assertTrue(np.max(np.abs(ratio - 1.)) < 1e-2)

        # test U
        diff_emp = (u2 - u) / eps
        diff_pred = svdu.dr_wrt(data)[:, idx].reshape(diff_emp.shape)
        ratio = diff_emp / diff_pred
        #print ratio
        self.assertTrue(np.max(np.abs(ratio - 1.)) < 1e-2)
    def test_inv1(self):
        from linalg import Inv

        mtx1 = Ch(np.sin(2**np.arange(9)).reshape((3, 3)))
        mtx1_inv = Inv(mtx1)
        dr = mtx1_inv.dr_wrt(mtx1)

        eps = 1e-5
        mtx2 = mtx1.r.copy()
        input_diff = np.sin(np.arange(mtx2.size)).reshape(mtx2.shape) * eps
        mtx2 += input_diff
        mtx2_inv = Inv(mtx2)

        output_diff_emp = (np.linalg.inv(mtx2) - np.linalg.inv(mtx1.r)).ravel()
        output_diff_pred = Inv(mtx1).dr_wrt(mtx1).dot(input_diff.ravel())

        #print output_diff_emp
        #print output_diff_pred

        self.assertTrue(
            np.max(np.abs(output_diff_emp - output_diff_pred)) < eps * 1e-4)
        self.assertTrue(
            np.max(np.abs(mtx1_inv.r - np.linalg.inv(mtx1.r)).ravel()) == 0)
예제 #11
0
 def set_and_get_dr(self, x_in):
     self.x = Ch(x_in)
     return self.dr_wrt(self.x).flatten()
예제 #12
0
 def set_and_get_r(self, x_in):
     self.x = Ch(x_in)
     return col(self.r)
예제 #13
0
 def gradfunc(x):
     obj.x = Ch(x)
     return obj.dr_wrt(obj.x).ravel()
예제 #14
0
 def errfunc(x):
     obj.x = Ch(x)
     return obj.r
예제 #15
0
def ch(a_city, d_city, date):
    return Ch.get_ch(a_city, d_city, date)