def split_test(): y0 = 3.2 yT = 1.5 T = 1 a = 0.9 p = 2 c = 0.5 f = lambda x : 100*np.cos(5*np.pi*x) problem = non_lin_problem(y0,yT,T,a,p,c=c,func=f) N = 1000 m = 3 mu = 1 res = problem.solve(N) import matplotlib.pyplot as plt plt.plot(res.x) plt.show() J = lambda x : problem.Penalty_Functional(x,N,m,mu) grad_J = lambda x : problem.Penalty_Gradient(x,N,m,mu) solver = SplitLbfgs(J,grad_J,np.zeros(N+m),m=m,options=problem.Lbfgs_options) res = solver.solve2()
def parallel_penalty_solve(self,N,m,mu_list,tol_list=None,x0=None,Lbfgs_options=None): comm = self.comm rank = self.rank if x0==None: x0= self.initial_control2(N,m=m) initial_counter = self.counter.copy() self.update_Lbfgs_options(Lbfgs_options) Result = [] for i in range(len(mu_list)): def J(u): self.counter[0]+=1 return self.parallel_penalty_functional(u,N,mu_list[i]) def grad_J(u): self.counter[1]+=1 return self.penalty_grad(u,N,m,mu_list[i]) solver = SplitLbfgs(J,grad_J,x0,options=self.Lbfgs_options,mpi=True) res = solver.mpi_solve() x0 = res.x res.add_FuncGradCounter(self.counter-initial_counter) Result.append(res) val =self.find_jump_diff(res.x) if self.rank == 0: print 'jump diff:',val return Result
def lagrange_penalty_solve(self,N,m,my_list,x0=None,Lbfgs_options=None): """ Solve the optimazation problem with augmented lagrange Arguments: * N: number of discritization points * m: number ot processes * my_list: list of penalty variables, that we want to solve the problem for. * x0: initial guess for control * Lbfgs_options: same as for class initialisation """ dt=float(self.T)/N if x0==None: x0 = self.Vec(self.initial_control(N,m=m)) x = None Result = [] G = np.zeros(m-1) for i in range(len(my_list)): print print my_list[i],G print def init_pen(self,y,u,my,N,k): return self.initial_lagrange(y,u,my,N,k,G) def J(u): return self.Lagrange_Penalty_Functional(u,N,m,my_list[i],G) def grad_J(u): l,L = self.adjoint_penalty_solver(u,N,m,my_list[i],init=init_pen ) g = np.zeros(len(u)) g[:N+1]=self.grad_J(u[:N+1],L,dt) for j in range(m-1): g[N+1+j]=l[j+1][0]-l[j][-1] + G[j] return g self.update_Lbfgs_options(Lbfgs_options) #solver = Lbfgs(J,grad_J,x0,options=Loptions) solver = SplitLbfgs(J,grad_J,x0.array(),options=self.Lbfgs_options) #res = solver.solve() res=solver.normal_solve() Result.append(res) x0 = res['control'] print y,Y = self.ODE_penalty_solver(res['control'].array(),N,m) for j in range(m-1): G[j]=G[j]-my_list[i]*(y[j][-1]-y[j+1][0]) if len(Result)==1: return res else: return Result
def PPCLBFGSsolve(self, N, m, my_list, tol_list=None, x0=None, options=None, scale=False): dt = float(self.T) / N if x0 == None: x0 = self.initial_control(N, m=m) result = [] PPC = self.PC_creator(N, m, step=1) if scale: scaler = {'m': m, 'factor': 1} else: scaler = None initial_counter = self.counter.copy() for i in range(len(my_list)): J, grad_J = self.generate_reduced_penalty(dt, N, m, my_list[i]) self.update_Lbfgs_options(options) Lbfgsopt = self.Lbfgs_options if tol_list != None: try: opt = {'jtol': tol_list[i]} self.update_Lbfgs_options(opt) Lbfgsopt = self.Lbfgs_options except: print 'no good tol_list' PPC = self.PC_creator(N, m, step=1, mu=my_list[i]) Solver = SplitLbfgs(J, grad_J, x0, m=m, Hinit=None, options=Lbfgsopt, ppc=PPC, scale=scaler) res = Solver.normal_solve() if scale: res.rescale() x0 = res.x res.add_FuncGradCounter(self.counter - initial_counter) result.append(res) if len(result) == 1: return res else: return result
def solve(self,N,x0=None,Lbfgs_options=None,algorithm='my_lbfgs'): """ Solve the optimazation problem without penalty Arguments: * N: number of discritization points * x0: initial guess for control * Lbfgs_options: same as for class initialisation """ self.t = np.linspace(0,self.T,N+1) dt=float(self.T)/N if x0==None: x0 = self.initial_control(N)# np.zeros(N+1) if algorithm=='my_lbfgs': x0 = self.Vec(x0) initial_counter = self.counter.copy() def J(u): self.counter[0]+=1 return self.Functional(u,N) def grad_J(u): self.counter[1]+=1 #l = self.adjoint_solver(u,N) return self.Gradient(u,N)#grad_J(u,l,dt) if algorithm=='my_lbfgs': self.update_Lbfgs_options(Lbfgs_options) #solver = Lbfgs(J,grad_J,x0,options=self.Lbfgs_options) solver=SplitLbfgs(J,grad_J,x0.array(), options=self.Lbfgs_options) #res = solver.solve() res = solver.normal_solve() elif algorithm=='my_steepest_decent': self.update_SD_options(Lbfgs_options) SDopt = self.SD_options Solver = SteepestDecent(J,grad_J,x0.copy(), options=SDopt) res = Solver.solve() import matplotlib.pyplot as plt #Y = self.ODE_solver(res.x,N) #plt.plot(Y) #plt.show() res.add_FuncGradCounter(self.counter-initial_counter) return res
def parallel_PPCLBFGSsolve(self,N,m,mu_list,tol_list=None,x0=None,options=None,scale=False): dt=float(self.T)/N comm = self.comm rank = self.rank if x0==None: x0 = self.initial_control2(N,m=m) Result = [] PPC = self.PC_maker4(N,m,comm,step=1) initial_counter = self.counter.copy() for i in range(len(mu_list)): def J(u): self.counter[0]+=1 return self.parallel_penalty_functional(u,N,mu_list[i]) def grad_J(u): self.counter[1]+=1 return self.penalty_grad(u,N,m,mu_list[i]) self.update_Lbfgs_options(options) Lbfgsopt = self.Lbfgs_options if tol_list!=None: try: opt = {'jtol':tol_list[i]} self.update_Lbfgs_options(opt) Lbfgsopt = self.Lbfgs_options except: print 'no good tol_list' Solver = SplitLbfgs(J,grad_J,x0,m=m,Hinit=None, options=Lbfgsopt,ppc=PPC,mpi=True) res = Solver.mpi_solve() x0 = res.x res.add_FuncGradCounter(self.counter-initial_counter) Result.append(res) val = self.find_jump_diff(res.x) if self.rank==0: print 'jump diff:',val return Result
def PPCLBFGSadaptive_solve(self, N, m, mu0=1, x0=None, options=None, scale=False, mu_stop_codition=None, mu_updater=None, tol_update=None): dt = float(self.T) / N if x0 == None: x0 = self.initial_control(N, m=m) result = [] PPC = self.PC_creator(N, m, step=1) if scale: scaler = {'m': m, 'factor': 1} else: scaler = None mu = mu0 if mu_stop_codition == None: mu_stop_codition = self.adaptive_stop_condition if mu_updater == None: mu_updater = self.adaptive_mu_update if tol_update == None: tol_update = lambda x, dt, mu: x while mu_stop_codition(mu0, dt, m): ###### OBS!!! ###### #if not self.adaptive_stop_condition(10*mu0,dt,m): #mu = 10*mu0 J, grad_J = self.generate_reduced_penalty(dt, N, m, mu) self.update_Lbfgs_options(options) Lbfgsopt = self.Lbfgs_options options.update({'jtol': tol_update(Lbfgsopt['jtol'], dt, mu)}) Solver = SplitLbfgs(J, grad_J, x0, m=m, Hinit=None, options=Lbfgsopt, ppc=PPC, scale=scaler) try: res = Solver.normal_solve() except Warning: try: print 'ai ai ai' mu = 2 * mu0 J, grad_J = self.generate_reduced_penalty(dt, N, m, mu) Solver = SplitLbfgs(J, grad_J, x0, m=m, Hinit=None, options=Lbfgsopt, ppc=PPC, scale=scaler) res = Solver.normal_solve() except Warning: return result if scale: res.rescale() x0 = res.x res.add_mu(mu) result.append(res) mu0 = mu mu = mu_updater(mu, dt, m, res.niter) ###### OBS!!! ###### return result
def penalty_solve(self,N,m,my_list,tol_list=None,x0=None,Lbfgs_options=None,algorithm='my_lbfgs',scale=False): """ Solve the optimazation problem with penalty Arguments: * N: number of discritization points * m: number ot processes * my_list: list of penalty variables, that we want to solve the problem for. * x0: initial guess for control * options: same as for class initialisation """ self.t,self.T_z = self.decompose_time(N,m) dt=float(self.T)/N if x0==None: x0 = self.initial_control(N,m=m)#np.zeros(N+m) x = None if algorithm=='my_lbfgs': x0 = self.Vec(x0) Result = [] initial_counter = self.counter.copy() for i in range(len(my_list)): #""" def J(u): self.counter[0]+=1 return self.Penalty_Functional(u,N,m,my_list[i]) def grad_J(u): self.counter[1]+=1 return self.Penalty_Gradient(u,N,m,my_list[i]) #""" #J,grad_J = self.generate_reduced_penalty(dt,N,m,my_list[i]) if algorithm=='my_lbfgs': self.update_Lbfgs_options(Lbfgs_options) if tol_list!=None: try: opt = {'jtol':tol_list[i]} self.update_Lbfgs_options(opt) except: print 'no good tol_list' if scale: scaler={'m':m,'factor':self.Lbfgs_options['scale_factor']} #solver = Lbfgs(J,grad_J,x0,options=self.Lbfgs_options,scale=scaler) solver = SplitLbfgs(J,grad_J,x0.array(),options=self.Lbfgs_options,scale=scaler) else: #solver = Lbfgs(J,grad_J,x0,options=self.Lbfgs_options) solver = SplitLbfgs(J,grad_J,x0.array(),m=m,options=self.Lbfgs_options) #res = solver.solve() res = solver.normal_solve() x0 = res['control'] #print J(x0.array()) elif algorithm=='my_steepest_decent': self.update_SD_options(Lbfgs_options) SDopt = self.SD_options if scale: scale = {'m':m,'factor':SDopt['scale_factor']} Solver = SteepestDecent(J,grad_J,x0.copy(), options=SDopt,scale=scale) res = Solver.solve() res.rescale() else: Solver = PPCSteepestDecent(J,grad_J,x0.copy(), lambda x: x,options=SDopt) res = Solver.split_solve(m) x0 = res.x.copy() elif algorithm=='slow_steepest_decent': self.update_SD_options(Lbfgs_options) SDopt = self.SD_options Solver = SteepestDecent(J,grad_J,x0.copy(), options=SDopt) res = Solver.solve() x0 = res.x.copy() elif algorithm == 'split_lbfgs': self.update_Lbfgs_options(Lbfgs_options) Solver = SplitLbfgs(J,grad_J,x0,m,options=self.Lbfgs_options) res = Solver.solve() x0 = res.x.copy() res.jump_diff=self.jump_diff Result.append(res) print 'jump diff:',self.jump_diff res.add_FuncGradCounter(self.counter-initial_counter) if len(Result)==1: return res else: return Result