def optAnnoParal(self,i,logProd=True): """ Runs the single-start gradient ascent method to optimize a_{i}. Args: i: Iteration of the algorithm. logProd: True if we compute the logProduct for optimizeAn; False otherwise. """ n1=self._n1 tempN=i+self.numberTraining A=self.stat._k.A(self.dataObj.Xhist[0:tempN,:],noise=self.dataObj.varHist[0:tempN]) L=np.linalg.cholesky(A) if logProd: tempX=self.dataObj.Xhist[0:tempN,n1:self._dimW+n1] logProduct=self.stat.computeLogProductExpectationsForAn(tempX, tempN,self.stat._k) else: logProduct=None Xst=self.Obj.sampleFromX(1) args2={} args2['start']=Xst[0:0+1,:] args2['i']=i args2['L']=L args2['logProduct']=logProduct self.optRuns.append(misc.AnOptWrapper(self,**args2)) fl.writeSolution(self,self.optRuns[0])
def optAnParal(self,i,nStart,logProd=True,numProcesses=None): """ Runs in parallel the multi-start gradient ascent method to optimize a_{i}. It restarts the gradient ascent method nStart times. Args: i: Iteration of the algorithm. nStart: Number of restarts of the gradient ascent method. logProd: True if we compute the logProduct for optimizeAn; False otherwise. """ try: n1=self._n1 tempN=i+self.numberTraining A=self.stat._k.A(self.dataObj.Xhist[0:tempN,:],noise=self.dataObj.varHist[0:tempN]) L=np.linalg.cholesky(A) if logProd: tempX=self.dataObj.Xhist[0:tempN,n1:self._dimW+n1] logProduct=self.stat.computeLogProductExpectationsForAn(tempX, tempN,self.stat._k) else: logProduct=None args3={} args3['i']=i args3['L']=L args3['logProduct']=logProduct Xst=self.Obj.sampleFromX(nStart) jobs = [] pool = mp.Pool(processes=numProcesses) for j in range(nStart): args2=args3.copy() args2['start']=Xst[j:j+1,:] job = pool.apply_async(misc.AnOptWrapper, args=(self,), kwds=args2) jobs.append(job) pool.close() # signal that no more data coming in pool.join() # wait for all the tasks to complete except KeyboardInterrupt: print "Ctrl+c received, terminating and joining pool." pool.terminate() pool.join() for j in range(nStart): try: self.optRuns.append(jobs[j].get()) except Exception as e: print "Error optimizing An" if len(self.optRuns): j = np.argmax([o.fOpt for o in self.optRuns]) fl.writeSolution(self,self.optRuns[j]) self.optRuns=[] self.optPointsArray=[]
def optAnParal(self,i,nStart,numProcesses=None): try: tempN=self.numberTraining+i n1=self._n1 # dim=self.dimension args3={} args3['i']=i A=self.stat._k.A(self.dataObj.Xhist[0:tempN,:],noise=self.dataObj.varHist[0:tempN]) L=np.linalg.cholesky(A) args3['L']=L muStart=self.stat._k.mu y=self.dataObj.yHist[0:tempN,:] temp1=linalg.solve_triangular(L,np.array(y)-muStart,lower=True) args3['temp1']=temp1 Xst=self.Obj.sampleFromX(nStart) jobs = [] pool = mp.Pool(processes=numProcesses) for j in range(nStart): args2=args3.copy() args2['start']=Xst[j:j+1,:] job = pool.apply_async(misc.AnOptWrapper, args=(self,), kwds=args2) jobs.append(job) pool.close() # signal that no more data coming in pool.join() # wait for all the tasks to complete except KeyboardInterrupt: print "Ctrl+c received, terminating and joining pool." pool.terminate() pool.join() numStarts=nStart for j in range(numStarts): try: self.optRuns.append(jobs[j].get()) except Exception as e: print "Error optimizing An" if len(self.optRuns): j = np.argmax([o.fOpt for o in self.optRuns]) fl.writeSolution(self,self.optRuns[j]) self.optRuns=[] self.optPointsArray=[]
def optAnnoParal(self,i): tempN=self.numberTraining+i n1=self._n1 args3={} args3['i']=i A=self.stat._k.A(self.dataObj.Xhist[0:tempN,:],noise=self.dataObj.varHist[0:tempN]) L=np.linalg.cholesky(A) args3['L']=L muStart=self.stat._k.mu y=self.dataObj.yHist[0:tempN,:] temp1=linalg.solve_triangular(L,np.array(y)-muStart,lower=True) args3['temp1']=temp1 Xst=self.Obj.sampleFromX(1) args3['start']=Xst[0:1,:] self.optRuns.append(misc.AnOptWrapper(self,**args3)) fl.writeSolution(self,self.optRuns[0])