def solver(self,
               opt,
               ic,
               start,
               end,
               Tn,
               algorithm='scipy_lbfgs',
               options=None):
        h = self.mesh.hmax()

        def J(x):
            loc_opt, loc_ic = self.get_opt(x, opt, ic, 1)

            U = self.PDE_solver(loc_ic, loc_opt, start, end, Tn)
            return self.J(loc_opt, loc_ic, U, start, end)

        def grad_J(x):

            loc_opt, loc_ic = self.get_opt(x, opt, ic, 1)

            P = self.adjoint_solver(loc_ic, loc_opt, start, end, Tn)

            return self.grad_J(P, loc_opt, loc_ic, h)

        control0 = self.get_control(opt, ic, 1)
        if algorithm == 'my_lbfgs':
            control0 = SimpleVector(control0)

            self.update_lbfgs_options(options)
            solver = Lbfgs(J, grad_J, control0, options=self.Lbfgs_options)

            res = solver.solve()
        elif algorithm == 'scipy_lbfgs':
            res = Mini(J,
                       control0.copy(),
                       method='L-BFGS-B',
                       jac=grad_J,
                       options={
                           'gtol': 1e-5,
                           'disp': True
                       })

        elif algorithm == 'my_steepest_decent':

            self.update_SD_options(options)
            SDopt = self.SD_options

            Solver = SteepestDecent(J, grad_J, control0.copy(), options=SDopt)
            res = Solver.solve()
        return res
示例#2
0
    def solve(self,N,x0=None,Lbfgs_options=None,algorithm='my_lbfgs'):
        """
        Solve the optimazation problem without penalty

        Arguments:
        * N: number of discritization points
        * x0: initial guess for control
        * Lbfgs_options: same as for class initialisation
        """
        self.t = np.linspace(0,self.T,N+1)
        dt=float(self.T)/N
        if x0==None:
            x0 = self.initial_control(N)# np.zeros(N+1)
        if algorithm=='my_lbfgs':
            x0 = self.Vec(x0)
        

        initial_counter = self.counter.copy()
        def J(u):
            self.counter[0]+=1
            return self.Functional(u,N)

        def grad_J(u):
            self.counter[1]+=1
            #l = self.adjoint_solver(u,N)
            return self.Gradient(u,N)#grad_J(u,l,dt)
       
        if algorithm=='my_lbfgs':
            self.update_Lbfgs_options(Lbfgs_options)
            
            #solver = Lbfgs(J,grad_J,x0,options=self.Lbfgs_options)
            solver=SplitLbfgs(J,grad_J,x0.array(),
                              options=self.Lbfgs_options)
            #res = solver.solve()
            res = solver.normal_solve()
        elif algorithm=='my_steepest_decent':
            self.update_SD_options(Lbfgs_options)
            SDopt = self.SD_options

            Solver = SteepestDecent(J,grad_J,x0.copy(),
                                    options=SDopt)
            res = Solver.solve()

            import matplotlib.pyplot as plt
            #Y = self.ODE_solver(res.x,N)
            #plt.plot(Y)
            #plt.show()
        res.add_FuncGradCounter(self.counter-initial_counter)
        return res
    def penalty_solver(self,opt,ic,start,end,Tn,m,mu_list,
                       algorithm='scipy_lbfgs',options=None):

        h = self.h
        X = Function(self.V)
        xN = self.xN
        control0 = self.get_control(opt,ic,m)
        if algorithm=='my_lbfgs':
            control0 = SimpleVector(control0)
        
        res =[]
        for k in range(len(mu_list)):
            
            J,grad_J = self.create_reduced_penalty_j(opt,ic,start,end,Tn,m,mu_list[k])

            
            if algorithm == 'my_steepest_decent':

                self.update_SD_options(options)
                SDopt = self.SD_options

                Solver = SteepestDecent(J,grad_J,control0.copy(),
                                        options=SDopt)
                res1 = Solver.solve()
                control0 = res1.x.copy()
            else:
                self.update_lbfgs_options(options)                

                if algorithm=='my_lbfgs':
                    solver = Lbfgs(J,grad_J,control0,options=self.Lbfgs_options)

                    res1 = solver.solve()
                    control0 = res1['control'].copy()
                elif algorithm=='scipy_lbfgs':
                    res1 = Mini(J,control0.copy(),method='L-BFGS-B',jac=grad_J,
                                options={'gtol':1e-6, 'disp':True,'maxcor':10})
                    control0 = res1.x.copy()


            res.append(res1)
        if len(res)==1:
            
            return res[0]
        
        return res
    def scaled_PPCSDsolve(self,
                          N,
                          m,
                          my_list,
                          tol_list=None,
                          x0=None,
                          options=None):

        dt = float(self.T) / N
        if x0 == None:
            x0 = np.zeros(N + m)

        result = []
        PPC = self.PC_maker2(N, m, step=1)
        initial_counter = self.counter.copy()
        for i in range(len(my_list)):

            J, grad_J = self.generate_reduced_penalty(dt, N, m, my_list[i])

            self.update_SD_options(options)
            SDopt = self.SD_options

            Solver = SteepestDecent(J,
                                    grad_J,
                                    x0,
                                    scale={'m': m},
                                    options=SDopt)

            res = Solver.PPC_solve(PPC)
            print res.x[N + 1:]
            res.rescale()
            print res.x[N + 1:]

            x0 = res.x
            result.append(res)
        res.add_FuncGradCounter(self.counter - initial_counter)
        if len(result) == 1:
            #y,Y = self.ODE_penalty_solver(x0,N,m)
            #import matplotlib.pyplot as plt
            #plt.plot(Y)
            #plt.show()
            return res
        else:
            return result
示例#5
0
    def solve(self,N,x0=None,Lbfgs_options=None,algorithm='my_lbfgs'):
        """
        Solve the optimazation problem without penalty

        Arguments:
        * N: number of discritization points
        * x0: initial guess for control
        * Lbfgs_options: same as for class initialisation
        """
        
        dt=float(self.T)/N
        if x0==None:
            x0 = np.zeros(N+1)
        if algorithm=='my_lbfgs':
            x0 = self.Vec(x0)
            
        def J(u):
            return self.Functional(u,N)

        def grad_J(u):
            l = self.adjoint_solver(u,N)
            return self.grad_J(u,l,dt)
       
        if algorithm=='my_lbfgs':
            self.update_Lbfgs_options(Lbfgs_options)
            solver = Lbfgs(J,grad_J,x0,options=self.Lbfgs_options)

            res = solver.solve()
        elif algorithm=='my_steepest_decent':
            self.update_SD_options(Lbfgs_options)
            SDopt = self.SD_options

            Solver = SteepestDecent(J,grad_J,x0.copy(),
                                    options=SDopt)
            res = Solver.solve()

            import matplotlib.pyplot as plt
            #Y = self.ODE_solver(res.x,N)
            #plt.plot(Y)
            #plt.show()

        return res
    def solver(self,opt,ic,start,end,Tn,algorithm='scipy_lbfgs',
               options=None):
        h = self.mesh.hmax()
        
        def J(x):
            loc_opt,loc_ic = self.get_opt(x,opt,ic,1)
            
            U = self.PDE_solver(loc_ic,loc_opt,start,end,Tn)
            return self.J(loc_opt,loc_ic,U,start,end)
        
        def grad_J(x):

            loc_opt,loc_ic = self.get_opt(x,opt,ic,1)
            
            P = self.adjoint_solver(loc_ic,loc_opt,start,end,Tn)

            return self.grad_J(P,loc_opt,loc_ic,h)


        control0 = self.get_control(opt,ic,1)
        if algorithm=='my_lbfgs':
            control0 = SimpleVector(control0)

            self.update_lbfgs_options(options)
            solver = Lbfgs(J,grad_J,control0,options=self.Lbfgs_options)
        
            res = solver.solve()
        elif algorithm=='scipy_lbfgs':
            res = Mini(J,control0.copy(),method='L-BFGS-B', 
                       jac=grad_J,options={'gtol': 1e-5, 'disp': True})

        elif algorithm=='my_steepest_decent':

            self.update_SD_options(options)
            SDopt = self.SD_options

            Solver = SteepestDecent(J,grad_J,control0.copy(),options=SDopt)
            res = Solver.solve()
        return res
示例#7
0
    def alternate_direction_penalty_solve(self,N,m,my_list,tol_list=None,x0=None,Lbfgs_options=None,algorithm='my_lbfgs',ppc=None):
        self.t,self.T_z = self.decompose_time(N,m)
        dt=float(self.T)/N
        if x0==None:
            x0 = self.initial_control(N,m=m)#np.zeros(N+m)
        x = None
        #if algorithm=='my_lbfgs':
            #x0 = self.Vec(x0)
        Result = []

        initial_counter = self.counter.copy()
        import matplotlib.pyplot as plt
        for i in range(len(my_list)):
            def J(u):   
                self.counter[0]+=1
                return self.Penalty_Functional(u,N,m,my_list[i])

            def grad_J(u):
                self.counter[1]+=1
                return self.Penalty_Gradient(u,N,m,my_list[i])
                
            
            J_lam = lambda u2: J(np.hstack((x0[:N+1],u2)))
            if ppc==None:
                grad_lam = lambda u2: grad_J(np.hstack((x0[:N+1],u2)))[N+1:]
            else:
                grad_lam = lambda u2: ppc(grad_J(np.hstack((x0[:N+1],u2)))[N+1:])

            self.update_SD_options(Lbfgs_options)
            SDopt = self.SD_options
            
            Solver =SteepestDecent(J_lam,grad_lam,x0.copy()[N+1:],options=SDopt)
            lam_res = Solver.solve()
            
            #x0[N+1:]=lam_res.x[:]

            J_v = lambda u3 : J(np.hstack((u3,x0[N+1:])))
            grad_v= lambda u3 : grad_J(np.hstack((u3,x0[N+1:])))[:N+1]
            
            Solver = SteepestDecent(J_v,grad_v,x0.copy()[:N+1],options=SDopt)

            v_res = Solver.solve()
            x0[N+1:]=lam_res.x[:]
            x0[:N+1]= v_res.x[:]
            
            plt.plot(x0[N+1:])
        plt.show()

        v_res.add_FuncGradCounter(self.counter-initial_counter)
        lam_res.add_FuncGradCounter(self.counter-initial_counter)
        res = [lam_res,v_res,x0]
        return res
示例#8
0
    def alternate_direction_penalty_solve(self,
                                          N,
                                          m,
                                          my_list,
                                          tol_list=None,
                                          x0=None,
                                          Lbfgs_options=None,
                                          algorithm='my_lbfgs',
                                          ppc=None):
        self.t, self.T_z = self.decompose_time(N, m)
        dt = float(self.T) / N
        if x0 == None:
            x0 = self.initial_control(N, m=m)  #np.zeros(N+m)
        x = None
        #if algorithm=='my_lbfgs':
        #x0 = self.Vec(x0)
        Result = []

        initial_counter = self.counter.copy()

        for i in range(len(my_list)):

            def J(u):
                self.counter[0] += 1
                return self.Penalty_Functional(u, N, m, my_list[i])

            def grad_J(u):
                self.counter[1] += 1
                return self.Penalty_Gradient(u, N, m, my_list[i])

            J_lam = lambda u2: J(np.hstack((x0[:N + 1], u2)))
            if ppc == None:
                grad_lam = lambda u2: grad_J(np.hstack(
                    (x0[:N + 1], u2)))[N + 1:]
            else:
                grad_lam = lambda u2: ppc(
                    grad_J(np.hstack((x0[:N + 1], u2)))[N + 1:])

            self.update_SD_options(Lbfgs_options)
            SDopt = self.SD_options

            Solver = SteepestDecent(J_lam,
                                    grad_lam,
                                    x0.copy()[N + 1:],
                                    options=SDopt)
            lam_res = Solver.solve()
            x0[N + 1:] = lam_res.x[:]
            x2 = self.partition_control(x0[:N + 1], N, m)
            v_res = []
            v_x = []
            ##########i=0################

            Ji = lambda v: self.alternate_Penalty_Functional(
                v, self.y0, x0[N + 1], N, m, my_list[i], 0)
            grad_Ji = lambda v: self.alternate_Penalty_Gradient(
                v, self.y0, x0[N + 1], N, m, my_list[i], 0)

            Solver = SteepestDecent(Ji, grad_Ji, x2[0].copy(), options=SDopt)
            #print Ji(x2[0])
            print 0
            v_res.append(Solver.solve())
            v_x.append(Solver.solve().x)
            ############i=2,..,m-1######################
            for j in range(1, m - 1):
                Ji = lambda v: self.alternate_Penalty_Functional(
                    v, x0[N + 1], x0[N + 1 + j], N, m, my_list[i], j)
                grad_Ji = lambda v: self.alternate_Penalty_Gradient(
                    v, x0[N + 1], x0[N + 1 + j], N, m, my_list[i], j)
                print j
                Solver = SteepestDecent(Ji,
                                        grad_Ji,
                                        x2[j].copy(),
                                        options=SDopt)

                v_res.append(Solver.solve())
                v_x.append(Solver.solve().x)
            ################i=m################
            Ji = lambda v: self.alternate_Penalty_Functional(
                v, x0[-1], self.yT, N, m, 1, m - 1)
            grad_Ji = lambda v: self.alternate_Penalty_Gradient(
                v, x0[-1], self.yT, N, m, 1, m - 1)
            print m - 1
            Solver = SteepestDecent(Ji, grad_Ji, x2[-1].copy(), options=SDopt)

            v_res.append(Solver.solve())
            v_x.append(Solver.solve().x)
            #################end################
            v_gather = self.explicit_gather(v_x, N, m)

            x0[:N + 1] = v_gather[:]
            #x0[N+1:]=lam_res.x[:]
            plt.plot(x0[N + 1:])
        plt.show()

        #v_res.add_FuncGradCounter(self.counter-initial_counter)
        lam_res.add_FuncGradCounter(self.counter - initial_counter)
        res = [lam_res, v_res, x0]
        return res
a = 0.9
yT = 5
alpha = 0.5

N = 500
m = 10
mu = 1
mu_list = [1, 10]
opt = {'maxiter': 500}
problem = Problem3(y0, yT, T, a, alpha, J, grad_J)
res1 = problem.penalty_solve(N, m, mu_list, algorithm='my_steepest_decent')

JJ, grad_JJ = problem.generate_reduced_penalty(1. / 500, N, m, mu)
x0 = np.zeros(N + m)
x0[N + 1:] = 0
solver2 = SteepestDecent(JJ, grad_JJ, x0, options=opt, scale={'m': m})
res2 = solver2.solve()

solver3 = SteepestDecent(JJ, grad_JJ, x0, options=opt)
#res3 = solver3.solve()
res3 = problem.penalty_solve(N,
                             m,
                             mu_list,
                             algorithm='my_steepest_decent',
                             scale=True)
#print res1.niter,res2.niter,res3.niter
for i in range(len(mu_list)):
    print res1[i].niter, res3[i].niter
plt.figure()
plt.plot(res1[-1].x[N + 1:], '>r')
#plt.plot(res2.x[N+1:],'b--')
示例#10
0
T =  1
y0 = 1
a =  1
yT = 3
alpha = 0.5

N = 500
m = 10
mu = 1

opt = {'maxiter':100}
problem = Problem3(y0,yT,T,a,alpha,J,grad_J)
res1=problem.penalty_solve(N,m,[mu],algorithm='my_steepest_decent')

JJ,grad_JJ = problem.generate_reduced_penalty(1./500,N,m,mu)

solver2 = SteepestDecent(JJ,grad_JJ,np.zeros(N+m)+1,
                         options=opt,scale={'m':m})
res2 = solver2.solve()

solver3 = SteepestDecent(JJ,grad_JJ,np.zeros(N+m)+1,options=opt)
res3 = solver3.solve()
print res1.niter,res2.niter,res3.niter

plt.plot(res1.x[N+1:],'>r')
plt.plot(res2.x[N+1:],'b--')
plt.plot(res3.x[N+1:])
plt.plot(res2.x[N+1:]*res2.scaler.gamma)
plt.show()
    def penalty_solver(self,
                       opt,
                       ic,
                       start,
                       end,
                       Tn,
                       m,
                       mu_list,
                       algorithm='scipy_lbfgs',
                       options=None):

        h = self.h
        X = Function(self.V)
        xN = self.xN
        control0 = self.get_control(opt, ic, m)
        if algorithm == 'my_lbfgs':
            control0 = SimpleVector(control0)

        res = []
        for k in range(len(mu_list)):

            J, grad_J = self.create_reduced_penalty_j(opt, ic, start, end, Tn,
                                                      m, mu_list[k])

            if algorithm == 'my_steepest_decent':

                self.update_SD_options(options)
                SDopt = self.SD_options

                Solver = SteepestDecent(J,
                                        grad_J,
                                        control0.copy(),
                                        options=SDopt)
                res1 = Solver.solve()
                control0 = res1.x.copy()
            else:
                self.update_lbfgs_options(options)

                if algorithm == 'my_lbfgs':
                    solver = Lbfgs(J,
                                   grad_J,
                                   control0,
                                   options=self.Lbfgs_options)

                    res1 = solver.solve()
                    control0 = res1['control'].copy()
                elif algorithm == 'scipy_lbfgs':
                    res1 = Mini(J,
                                control0.copy(),
                                method='L-BFGS-B',
                                jac=grad_J,
                                options={
                                    'gtol': 1e-6,
                                    'disp': True,
                                    'maxcor': 10
                                })
                    control0 = res1.x.copy()

            res.append(res1)
        if len(res) == 1:

            return res[0]

        return res
示例#12
0
    def penalty_solve(self,N,m,my_list,tol_list=None,x0=None,Lbfgs_options=None,algorithm='my_lbfgs',scale=False):
        """
        Solve the optimazation problem with penalty

        Arguments:
        * N: number of discritization points
        * m: number ot processes
        * my_list: list of penalty variables, that we want to solve the problem
                   for.
        * x0: initial guess for control
        * options: same as for class initialisation
        """
        self.t,self.T_z = self.decompose_time(N,m)
        dt=float(self.T)/N
        if x0==None:
            x0 = self.initial_control(N,m=m)#np.zeros(N+m)
        x = None
        if algorithm=='my_lbfgs':
            x0 = self.Vec(x0)
        Result = []

        initial_counter = self.counter.copy()

        for i in range(len(my_list)):
            #"""
            def J(u):   
                self.counter[0]+=1
                return self.Penalty_Functional(u,N,m,my_list[i])

            def grad_J(u):
                self.counter[1]+=1
                return self.Penalty_Gradient(u,N,m,my_list[i])
                #"""
                
            #J,grad_J = self.generate_reduced_penalty(dt,N,m,my_list[i])
            if algorithm=='my_lbfgs':
                self.update_Lbfgs_options(Lbfgs_options)
                if tol_list!=None:
                    try:
                        opt = {'jtol':tol_list[i]}
                        self.update_Lbfgs_options(opt)
                    except:
                        print 'no good tol_list'
                if scale:
                    scaler={'m':m,'factor':self.Lbfgs_options['scale_factor']}
                    
                    #solver = Lbfgs(J,grad_J,x0,options=self.Lbfgs_options,scale=scaler)
                    solver = SplitLbfgs(J,grad_J,x0.array(),options=self.Lbfgs_options,scale=scaler)
                else:
                    #solver = Lbfgs(J,grad_J,x0,options=self.Lbfgs_options)
                    solver = SplitLbfgs(J,grad_J,x0.array(),m=m,options=self.Lbfgs_options)
                #res = solver.solve()
                res = solver.normal_solve()
                
                x0 = res['control']
                #print J(x0.array())
            elif algorithm=='my_steepest_decent':

                self.update_SD_options(Lbfgs_options)
                SDopt = self.SD_options
                if scale:
                    
                    scale = {'m':m,'factor':SDopt['scale_factor']}
                    Solver = SteepestDecent(J,grad_J,x0.copy(),
                                             options=SDopt,scale=scale)
                    res = Solver.solve()
                    res.rescale()
                else:
                    Solver = PPCSteepestDecent(J,grad_J,x0.copy(),
                                               lambda x: x,options=SDopt)
                    res = Solver.split_solve(m)
                x0 = res.x.copy()
                
            elif algorithm=='slow_steepest_decent':
                self.update_SD_options(Lbfgs_options)
                SDopt = self.SD_options
                Solver = SteepestDecent(J,grad_J,x0.copy(),
                                        options=SDopt)
                res = Solver.solve()
                x0 = res.x.copy()
                
                
            elif algorithm == 'split_lbfgs':
                self.update_Lbfgs_options(Lbfgs_options)
                Solver = SplitLbfgs(J,grad_J,x0,m,options=self.Lbfgs_options)

                res = Solver.solve()
                x0 = res.x.copy()
            res.jump_diff=self.jump_diff
            Result.append(res)
            print 'jump diff:',self.jump_diff
        res.add_FuncGradCounter(self.counter-initial_counter)
        if len(Result)==1:
            return res
        else:
            return Result