コード例 #1
0
    def optimizeMatching(self):
	grd = self.getGradient(self.gradCoeff)
	[grd2] = self.dotProduct(grd, [grd])

        self.gradEps = np.sqrt(grd2) / 1000
        self.muEps = 1.0
        it = 0

        while (self.muEps > 0.005) & (it<self.maxIter_al)  :
            print 'Starting Minimization: gradEps = ', self.gradEps, ' muEps = ', self.muEps, ' mu = ', self.mu
            cg.cg(self, verb = self.verb, maxIter = self.maxIter_cg, TestGradient = self.testGradient, epsInit = 0.1)
            if self.nconstr == 0:
                break
            for t in range(self.lmb.shape[0]):
                self.lmb[t, :] -= self.cval[t, :]/self.mu
            print 'mean lambdas', np.fabs(self.lmb).sum() / self.lmb.size
            if self.converged:
                self.gradEps *= .75
                if (((self.cval**2).sum()/self.cval.size) > self.muEps**2):
                    self.mu *= 0.5
                else:
                    self.muEps = self.muEps /2
            else:
                self.mu *= 0.9
            self.obj = None
            it = it+1

            
        return self.fvDef
コード例 #2
0
    def optimizeMatching(self):
	self.coeffZ = 10.
        self.coeffAff = self.coeffAff2
	grd = self.getGradient(self.gradCoeff)
	[grd2] = self.dotProduct(grd, [grd])

        self.gradEps = np.sqrt(grd2) / 100
        self.coeffAff = self.coeffAff1
        self.muEps = 1.0
        it = 0
        while (self.muEps > 0.001) & (it<self.maxIter_al)  :
            logging.info('Starting Minimization: gradEps = %f muEps = %f mu = %f' %(self.gradEps, self.muEps,self.mu))
            #self.coeffZ = max(1.0, self.mu)
            cg.cg(self, verb = self.verb, maxIter = self.maxIter_cg, TestGradient = self.testGradient, epsInit=0.1)
            self.coeffAff = self.coeffAff2
            for t in range(self.Tsize+1):
                self.lmb[t, :] = -self.cval[t, :]/self.mu
            logging.info('mean lambdas %f' %(np.fabs(self.lmb).sum() / self.lmb.size))
            if self.converged:
                self.gradEps *= .75
                if (((self.cstr**2).sum()/self.cval.size) > self.muEps**2):
                    self.mu *= 0.5
                else:
                    self.muEps = self.muEps /2
            else:
                self.mu *= 0.9
            self.obj = None
            it = it+1
コード例 #3
0
    def computeTemplate(self):
        grd = self.getGradient(self.gradCoeff)
        [grd2] = self.dotProduct(grd, [grd])

        self.gradEps = max(0.1, np.sqrt(grd2) / 10000)
        cg.cg(self, verb = self.verb, maxIter = self.maxIter, TestGradient=True)
        return self
コード例 #4
0
    def optimizeMatching(self):
	self.coeffZ = 1.0
	grd = self.getGradient(self.gradCoeff)
	[grd2] = self.dotProduct(grd, [grd])

        self.gradEps = np.sqrt(grd2) / 1000
        self.muEps = 0.01
        it = 0
        self.muEpsCount = 1;
        while (self.muEpsCount < 20) & (it<self.maxIter_al)  :
            print 'Starting Minimization: gradEps = ', self.gradEps, ' muEps = ', self.muEps, ' mu = ', self.mu
            self.iter = 0 
            #self.coeffZ = max(1.0, self.mu)
            cg.cg(self, verb = self.verb, maxIter = self.maxIter_cg, TestGradient = self.testGradient, epsInit=0.1)
            if self.converged:
                if (((self.cval**2).sum()/self.cval.size) > self.muEps**2):
                    self.mu *= 0.1
                    self.gradEps *= 10
                else:
                    self.gradEps *= .5
                    for t in range(self.Tsize+1):
                        self.lmb[t, ...] -= 0.5*self.derCstrFun(self.cval[t, ...]/self.mu)/self.mu
                    print 'mean lambdas', np.fabs(self.lmb).sum() / self.lmb.size
                    self.muEps = np.sqrt((self.cval**2).sum()/(1.5*self.cval.size))
                    self.muEpsCount += 1
                    #self.muEps /2
            # else:
            #     self.mu *= 0.9
            self.obj = None
            it = it+1
コード例 #5
0
    def optimize(self):
        if self.gradEps < 0:
            grd = self.getGradient(self.gradCoeff)
            [grd2] = self.dotProduct(grd, [grd])
            self.gradEps = max(0.001, np.sqrt(grd2) / 10000)

        print 'Gradient lower bound: ', self.gradEps
        cg.cg(self, verb = True, maxIter = self.maxIter, TestGradient=self.testGradient)
コード例 #6
0
 def computeMatching(self):
     (rg,N,T) = self.getSimData()
     for it in range(500):
         self.optimizeN()
         conjugateGradient.cg(self, True, \
                     maxIter = self.nonlinear_cg_max_iter, \
                     TestGradient=True, epsInit=self.cg_init_eps)
         if (it % self.write_iter == 0):
             self.writeData("iter%d" % (it))
     return self
コード例 #7
0
    def optimizeMatching(self):
        grd = self.getGradient(self.gradCoeff)
        [grd2] = self.dotProduct(grd, [grd])

        self.gradEps = max(self.gradLB, np.sqrt(grd2) / 10000)
        print 'Gradient bound:', self.gradEps
        kk = 0
        while os.path.isfile(self.outputDir +'/'+ self.saveFile+str(kk)+'.vtk'):
            os.remove(self.outputDir +'/'+ self.saveFile+str(kk)+'.vtk')
            kk += 1
        cg.cg(self, verb = self.verb, maxIter = self.maxIter, TestGradient=self.testGradient)
コード例 #8
0
    def optimizeMatching(self):
        #print 'dataterm', self.dataTerm(self.fvDef)
        #print 'obj fun', self.objectiveFun(), self.obj0
        self.coeffAff = self.coeffAff2
        grd = self.getGradient(self.gradCoeff)
        [grd2] = self.dotProduct(grd, [grd])

        self.gradEps = max(0.001, np.sqrt(grd2) / 10000)
        logging.info('Gradient lower bound: %f' %(self.gradEps))
        self.coeffAff = self.coeffAff1
        cg.cg(self, verb = self.verb, maxIter = self.maxIter,TestGradient=self.testGradient, epsInit=0.1)
コード例 #9
0
    def optimizeMatching(self):
        #print 'dataterm', self.dataTerm(self.fvDef)
        #print 'obj fun', self.objectiveFun(), self.obj0
        grd = self.getGradient(self.gradCoeff)
        [grd2] = self.dotProduct(grd, [grd])

        self.gradEps = max(0.001, np.sqrt(grd2) / 10000)
        print 'Gradient lower bound:', self.gradEps
        #print 'x0:', self.x0
        #print 'y0:', self.y0
        
        cg.cg(self, verb = self.verb, maxIter = self.maxIter,TestGradient=self.testGradient, epsInit=0.1)
コード例 #10
0
ファイル: svm.py プロジェクト: sgmath12/ML_textmining
    def train(self, X, y, lr, batch_size, epoches, method="SGD"):
        total_training_cases = X.shape[0]
        num_iterations = total_training_cases // batch_size + 1
        beta = 0.000

        for epoch in range(epoches):
            total_loss = 0
            lr = lr / (1 + beta * epoch)
            totalI = []
            totaldw = []
            for i in range(num_iterations):
                start_idx = (i * batch_size) % total_training_cases
                X_batch = X[start_idx:start_idx + batch_size].toarray()
                y_batch = y[start_idx:start_idx + batch_size].reshape([-1, 1])

                dw, I = self.gradient(X_batch, y_batch, num_iterations)

                if method == "SGD":
                    self.w = self.w - lr * dw
                else:
                    dw = dw.reshape([-1])
                    I = I.squeeze()
                    I = np.argwhere(I > 0).squeeze()
                    totalI.append(I)
                    totaldw.append(dw)

            if method == "Newton":
                totaldw = sum(totaldw)
                totalI = np.concatenate(totalI)
                d, _ = cg(X, totalI, totaldw, 3631.3203125)
                self.w = self.w + d.reshape([-1, 1])

            total_loss = self.criterion(X, y)
            print(total_loss.squeeze())
コード例 #11
0
    def optimizeMatching(self):
        # obj0 = self.param.fun_obj0(self.fv1, self.param.KparDist) # / (self.param.sigmaError**2)
        # if self.dcurr:
        #     (obj, self.ct, self.St, self.bt, self.xt, self.xSt) = self.objectiveFunDef(self.at, self.Afft, withTrajectory=True)
        #     data = (self.xt[-1,:,:], self.xSt[-1,:,:,:], self.bt[-1,:,:])
        #     print 'objDef = ', obj, 'dataterm = ',  obj0 + self.dataTerm(data)* (self.param.sigmaError**2)
        #     print obj0 + surfaces.currentNormDef(self.fv0, self.fv1, self.param.KparDist)
        # else:
        #     (obj, self.ct, self.St, self.xt) = self.objectiveFunDef(self.at, self.Afft, withTrajectory=True)
        #     self.fvDef.updateVertices(np.squeeze(self.xt[-1, :, :]))
        #     print 'objDef = ', obj, 'dataterm = ',  obj0 + self.dataTerm(self.fvDef)

        if self.gradEps < 0:
            grd = self.getGradient(self.gradCoeff)
            [grd2] = self.dotProduct(grd, [grd])

            self.gradEps = max(0.001, np.sqrt(grd2) / 10000)

        print 'Gradient lower bound: ', self.gradEps
        cg.cg(self, verb = self.verb, maxIter = self.maxIter, TestGradient=self.testGradient)
コード例 #12
0
    def computeMatching(self):

        if  False:
            self.sigma *= 4
            self.sfactor /= 16
            self.khSmooth *= 1.44
            conjugateGradient.cg(self, True, maxIter=10, TestGradient=False, \
                                     epsInit=self.cg_init_eps)
            self.sigma /= 2
            self.sfactor *= 4
            self.khSmooth /= 1.2 
            self.z0 /= 4  
            #self.z0_state = self.z0.copy()  
            #print 'z0', (self.z0**2).sum()
            conjugateGradient.cg(self, True, maxIter=10, TestGradient=False, \
                                     epsInit=self.cg_init_eps)
            self.sigma /= 2
            self.sfactor *= 4
            self.khSmooth /= 1.2
            self.z0 /= 4 
            #self.z0_state = self.z0.copy()  
        conjugateGradient.cg(self, True, maxIter=10000, TestGradient=False, \
                            epsInit=self.cg_init_eps)
        return self
コード例 #13
0
 def computeMatching(self):
     conjugateGradient.cg(self, True, maxIter=1000, TestGradient=True, \
                         epsInit=self.cg_init_eps)
     return self