def mmr_train(self): if self.iperceptron == 0: mtra = self.mtrain KX=mmr_kernel(self,self.itrain,self.itrain,ioutput=0, \ itraintest=0,itraindata=0,itensor=self.kmode)[0] KY=mmr_kernel(self,self.itrain,self.itrain,ioutput=1, \ itraintest=0,itraindata=0)[0] cOptDual = base.cls_dual(None, None) self.dual = cOptDual self.csolver = mmr_solver_cls.cls_mmr_solver() self.dual.alpha=self.csolver.mmr_solver(KX,KY,self.penalty.c, \ self.penalty.d) ## estimate the bias for linear output kernel if self.YKernel.ifeature == 0: YTrain = self.YKernel.get_train_norm(self.itrain) ZW=np.dot(YTrain.T, \ (np.outer(self.dual.alpha,np.ones(mtra))*KX)) xbias = np.median(YTrain - ZW.T, axis=0) else: KY = self.YKernel.Kcross ZW = np.dot(KY.T, (np.outer(self.dual.alpha, np.ones(mtra)) * KX)) xbias = np.zeros(mtra) self.dual.bias = xbias elif self.iperceptron == 1: cOptDual = base.cls_dual(None, None) XTrainNorm0 = self.XKernel[0].get_train_norm() YTrainNorm = self.YKernel.get_train_norm(self.itrain) cOptDual.W=mmr_perceptron_primal(XTrainNorm0,YTrainNorm, \ self.perceptron.margin, \ self.perceptron.stepsize, \ self.perceptron.nrepeat) elif self.iperceptron == 2: cOptDual = base.cls_dual(None, None) XTrainNorm0 = self.XKernel[0].get_train_norm() YTrainNorm = self.YKernel.get_train_norm(self.itrain) cOptDual.alpha=mmr_perceptron_dual(XTrainNorm0,YTrainNorm, \ self.XKernel[0].kernel_params.ipar1, \ self.XKernel[0].kernel_params.ipar2, \ self.XKernel[0].kernel_type, \ self.perceptron.margin, \ self.perceptron.stepsize, \ self.perceptron.nrepeat) return (cOptDual)
def mmr_train(self): if self.iperceptron==0: mtra=self.mtrain KX=mmr_kernel(self,self.itrain,self.itrain,ioutput=0, \ itraintest=0,itraindata=0,itensor=self.kmode)[0] KY=mmr_kernel(self,self.itrain,self.itrain,ioutput=1, \ itraintest=0,itraindata=0)[0] cOptDual=base.cls_dual(None,None) self.dual=cOptDual self.csolver=mmr_solver_cls.cls_mmr_solver() self.dual.alpha=self.csolver.mmr_solver(KX,KY,self.penalty.c, \ self.penalty.d) ## estimate the bias for linear output kernel if self.YKernel.ifeature==0: YTrain=self.YKernel.get_train_norm(self.itrain) ZW=np.dot(YTrain.T, \ (np.outer(self.dual.alpha,np.ones(mtra))*KX)) xbias=np.median(YTrain-ZW.T,axis=0) else: KY=self.YKernel.Kcross ZW=np.dot(KY.T,(np.outer(self.dual.alpha,np.ones(mtra))*KX)) xbias=np.zeros(mtra) self.dual.bias=xbias elif self.iperceptron==1: cOptDual=base.cls_dual(None,None) XTrainNorm0=self.XKernel[0].get_train_norm() YTrainNorm=self.YKernel.get_train_norm(self.itrain) cOptDual.W=mmr_perceptron_primal(XTrainNorm0,YTrainNorm, \ self.perceptron.margin, \ self.perceptron.stepsize, \ self.perceptron.nrepeat) elif self.iperceptron==2: cOptDual=base.cls_dual(None,None) XTrainNorm0=self.XKernel[0].get_train_norm() YTrainNorm=self.YKernel.get_train_norm(self.itrain) cOptDual.alpha=mmr_perceptron_dual(XTrainNorm0,YTrainNorm, \ self.XKernel[0].kernel_params.ipar1, \ self.XKernel[0].kernel_params.ipar2, \ self.XKernel[0].kernel_type, \ self.perceptron.margin, \ self.perceptron.stepsize, \ self.perceptron.nrepeat) return(cOptDual)
def mvm_train(self): """ execute the trianing procedure Inputs: """ ## print('Generate kernels') time0 = time.time() self.compute_kernels() if self.verbose == 1: print('Kernel computation:', time.time() - time0) ## print('Solve optimization problem') time0 = time.time() self.KX=mmr_kernel(self,self.itrain,self.itrain,ioutput=0, \ itraintest=0, itensor=self.kmode)[0] self.KY = mmr_kernel(self, self.itrain, self.itrain, ioutput=1)[0] if self.verbose == 1: print('Kernel merge computation:', time.time() - time0) ## self.solvertime=time.time()-time0 ## t0=time.clock() cOptDual = base.cls_dual(None, None) self.dual = cOptDual time0 = time.time() cmvm_solver = mvm_solver_cls.cls_mvm_solver() self.dual.alpha = cmvm_solver.mvm_solver(self) self.solvertime = time.time() - time0 return (cOptDual)
def mvm_train(self): """ execute the trianing procedure Inputs: """ ## print('Generate kernels') time0=time.time() self.compute_kernels() if self.verbose==1: print('Kernel computation:',time.time()-time0) ## print('Solve optimization problem') time0=time.time() self.KX=mmr_kernel(self,self.itrain,self.itrain,ioutput=0, \ itraintest=0, itensor=self.kmode)[0] self.KY=mmr_kernel(self,self.itrain,self.itrain,ioutput=1)[0] if self.verbose==1: print('Kernel merge computation:',time.time()-time0) ## self.solvertime=time.time()-time0 ## t0=time.clock() cOptDual=base.cls_dual(None,None) self.dual=cOptDual time0=time.time() cmvm_solver=mvm_solver_cls.cls_mvm_solver() self.dual.alpha=cmvm_solver.mvm_solver(self) self.solvertime=time.time()-time0 return(cOptDual)
def load_mmr(self,cData): X_in=np.loadtxt(self.training_input) X_out=np.loadtxt(self.training_output) alpha=np.loadtxt(self.dualParams) kernelParams=np.loadtxt(self.kernelParams) (m,nx)=X_in.shape ny=X_out.shape[1] cData.training_center=np.mean(X_in,axis=0) ## loading the offline training parameters cData.dual=base.cls_dual(alpha,np.zeros(ny)+0.1) ## add an empty test row to the trainig, ## it will be replaced with the real test inputs X_in=[np.vstack((X_in,np.zeros((1,nx))))] ## list of views X_out=np.vstack((X_out,np.zeros((1,ny)))) ## subspace output kernel cData.YKernel=mmr_kernel_explicit.cls_feature(ifeature=0) cData.YKernel.load_data(X_out,ifeature=0) cData.YKernel.ifeature=0 cData.YKernel.title='output' ## setting output parameters cparams=cls_initial_params() cData.YKernel.kernel_params.set(cparams.get_yparams('kernel',0)) cData.YKernel.crossval.set(cparams.get_yparams('cross',0)) cData.YKernel.norm.set(cparams.get_yparams('norm',0)) idata=0 nview=1 for iview in range(nview): cData.XKernel[idata]=mmr_kernel_explicit.cls_feature(ifeature=0) cData.XKernel[idata].load_data(X_in[iview],ifeature=0) cData.XKernel[idata].title='input_'+str(iview) ## setting input parameters cData.XKernel[idata].kernel_params.set(cparams. \ get_xparams('kernel',idata)) ## kernel parameters cData.XKernel[idata].kernel_params.ipar1=kernelParams[0] cData.XKernel[idata].kernel_params.ipar2=kernelParams[1] cData.XKernel[idata].crossval.set(cparams.get_xparams('cross',idata)) cData.XKernel[idata].norm.set(cparams.get_xparams('norm',idata)) idata+=1 cData.ninputview=idata ## set active views cData.mdata=cData.YKernel.dataraw.shape[0] cData.nfold=1 cData.nrepeat=1 cData.kmode=1 ## =0 additive (feature concatenation) ## =1 multiplicative (fetaure tensor product) return