Example #1
0
def opti(y0,a,T,yT,n,F,sol,adj,gr,printplot=False):

    t=np.linspace(0,T,n+1)
    dt=float(T)/n
    
    x0     = SimpleVector(np.zeros(n+1))
    szi_x0 = np.zeros(n+1)
    
    

    def J(u):
        return F(u,a,y0,yT,T)

    def grad_J(u):
        l = adj(y0,a,len(u)-1,u,T,yT)
        return gr(u,l,dt)
    
    def Mud_J(u):
        
        y  = SimpleVector(grad_J(u))
        y2 = SimpleVector(np.zeros(len(u)))
        s  = SimpleVector(u)
        s2 = SimpleVector(np.zeros(len(u)))
        
        
        return MuVector([y,y2]),MuVector([s,s2])

    res1 = minimize(J,szi_x0,method='L-BFGS-B', jac=grad_J,
                   options={'gtol': 1e-6, 'disp': False})

    options={"beta":1,"mem_lim" : 10,"return_data":True,"jtol": 1e-6,}

    S1 = Lbfgs(J,grad_J,x0,options=options)
    
    S2 = MuLbfgs(J,grad_J,x0,Mud_J,options=options)

    res2 = S1.solve()
    res3 = S2.solve()
    x1 = res1.x
    x2 = res2['control'].array()
    x3 = res3['control'].array()
    
    if printplot == True:
        print res1.nit,res2['iteration'],res3['iteration']
        
        print l2_error(x1,x2,dt)
        print l2_error(x1,x3,dt)
        print l2_error(x2,x3,dt)

        plot(t,x1)
        plot(t,x2)
        plot(t,x3)
        legend(['scipy','lbfgs','mu'])
        show()

    return res1,res2,res3
Example #2
0
def mini_solver(y0,a,T,yT,n,m,my_list,mem_limit,show_output=False):


    t=np.linspace(0,T,n+1)
    #initial guess for control and penalty control is set to be 0
    x0 = SimpleVector(np.zeros(n+m))
    
    dt = t[1]-t[0]

    def ser_J(u):
        return ser.Func(u,a,y0,yT,T)

    def ser_grad_J(u):
        l = ser.adjoint_solver(y0,a,len(u)-1,u,T,yT)
        return ser.L2_grad(u,l,dt)

    res1 = minimize(ser_J,np.zeros(n+1),method='L-BFGS-B', jac=ser_grad_J,
                   options={'gtol': 1e-6, 'disp': False})
    res2 = []
    
    for k in range(len(my_list)):
        #define reduced functional dependent only on u
        def J(u):
            y,Y=pen.solver(y0,a,n,m,u[:n+1],u[n+1:],T)
            return pen.Functional2(y,u[:n+1],u[n+1:],yT,T,my_list[k])
        
        #define our gradient using by solving adjoint equation
        def grad_J(u):
            #adjoint_solver(y0,a,n,m,u,lam,T,yT,my)
            l,L = pen.adjoint_solver(y0,a,n,m,u[:n+1],u[n+1:],T,yT,my_list[k])
            g = np.zeros(len(u))
            
            g[:n+1]=dt*(u[:n+1]+L)

            for i in range(m-1):
                g[n+1+i]=l[i+1][0]-l[i][-1]
                
            return g



        options={"mem_lim" : mem_limit,"return_data": True,"jtol" : 1e-4,}
        try:
            S = Lbfgs(J,grad_J,x0,options=options)

            data = S.solve()
        except Warning:
            data = {'control':x0,'iteration':-1,}
            
        res2.append(data)
        
    return res1,res2
def opti(y0, a, T, yT, n, F, sol, adj, gr, printplot=False):

    t = np.linspace(0, T, n + 1)
    dt = float(T) / n

    x0 = SimpleVector(np.zeros(n + 1))
    szi_x0 = np.zeros(n + 1)

    def J(u):
        return F(u, a, y0, yT, T)

    def grad_J(u):
        l = adj(y0, a, len(u) - 1, u, T, yT)
        return gr(u, l, dt)

    def Mud_J(u):

        y = SimpleVector(grad_J(u))
        y2 = SimpleVector(np.zeros(len(u)))
        s = SimpleVector(u)
        s2 = SimpleVector(np.zeros(len(u)))

        return MuVector([y, y2]), MuVector([s, s2])

    res1 = minimize(J,
                    szi_x0,
                    method='L-BFGS-B',
                    jac=grad_J,
                    options={
                        'gtol': 1e-6,
                        'disp': False
                    })

    options = {
        "beta": 1,
        "mem_lim": 10,
        "return_data": True,
        "jtol": 1e-6,
    }

    S1 = Lbfgs(J, grad_J, x0, options=options)

    S2 = MuLbfgs(J, grad_J, x0, Mud_J, options=options)

    res2 = S1.solve()
    res3 = S2.solve()
    x1 = res1.x
    x2 = res2['control'].array()
    x3 = res3['control'].array()

    if printplot == True:
        print res1.nit, res2['iteration'], res3['iteration']

        print l2_error(x1, x2, dt)
        print l2_error(x1, x3, dt)
        print l2_error(x2, x3, dt)

        plot(t, x1)
        plot(t, x2)
        plot(t, x3)
        legend(['scipy', 'lbfgs', 'mu'])
        show()

    return res1, res2, res3
Example #4
0
def mini_solver(y0, a, T, yT, n, m, my_list, mem_limit, show_output=False):

    t = np.linspace(0, T, n + 1)
    #initial guess for control and penalty control is set to be 0
    x0 = SimpleVector(np.zeros(n + m))

    dt = t[1] - t[0]

    def ser_J(u):
        return ser.Func(u, a, y0, yT, T)

    def ser_grad_J(u):
        l = ser.adjoint_solver(y0, a, len(u) - 1, u, T, yT)
        return ser.L2_grad(u, l, dt)

    res1 = minimize(ser_J,
                    np.zeros(n + 1),
                    method='L-BFGS-B',
                    jac=ser_grad_J,
                    options={
                        'gtol': 1e-6,
                        'disp': False
                    })
    res2 = []

    for k in range(len(my_list)):
        #define reduced functional dependent only on u
        def J(u):
            y, Y = pen.solver(y0, a, n, m, u[:n + 1], u[n + 1:], T)
            return pen.Functional2(y, u[:n + 1], u[n + 1:], yT, T, my_list[k])

        #define our gradient using by solving adjoint equation
        def grad_J(u):
            #adjoint_solver(y0,a,n,m,u,lam,T,yT,my)
            l, L = pen.adjoint_solver(y0, a, n, m, u[:n + 1], u[n + 1:], T, yT,
                                      my_list[k])
            g = np.zeros(len(u))

            g[:n + 1] = dt * (u[:n + 1] + L)

            for i in range(m - 1):
                g[n + 1 + i] = l[i + 1][0] - l[i][-1]

            return g

        options = {
            "mem_lim": mem_limit,
            "return_data": True,
            "jtol": 1e-4,
        }
        try:
            S = Lbfgs(J, grad_J, x0, options=options)

            data = S.solve()
        except Warning:
            data = {
                'control': x0,
                'iteration': -1,
            }

        res2.append(data)

    return res1, res2
Example #5
0
def model(train_data,feats_dict,template,K,freq_ar,sigma):
    """
    
    Minimize the objective function value using L-BFGS.
    
    Returns
    ----------
    x or"0": Ndarray or int
             If the minimization succeeds, i.e.parameters found passed the 
             convergence test, then return an array "x" that stores the 
             parameters found. If maximum number of iterations has been 
             reached and appropriate parameters have not been found yet, 
             then return 0.
                           
    """
    params=[train_data,feats_dict,template,freq_ar,K]
    #Get the number of parameters to be optimized.
    n=len(K)
    #call the Lbfgs class
    lbfgs=Lbfgs(n)
    #initialize the array of parameters and the approximate Hessian inverse
    x,h=lbfgs.lbfgs_init()   
    g,fx=g_f(x,params)
    g,fx=L2_regularize(x,g,fx,sigma)
    gnorm=np.linalg.norm(g)
    xnorm=np.linalg.norm(x)
    #Early exit if "x" is already a minimizer
    if gnorm <= lbfgs.epsilon*max(xnorm,1):
       return x
    h=np.mat(h)
    u=[]
    k=0
    while k < lbfgs.max_iterations:
        
        g=np.transpose(np.mat(g))
        #Calculate the direction
        drt=-h*g
        drt=np.array(np.transpose(drt))[0]
        g=np.array(np.transpose(g))[0]
        g_old=g
        #Perform a line search to find the appropriate step
        step,g,fx=lbfgs.linesearch(fx,g,drt,x,g_f,params)
        x_old=x
        x=x+step*drt
        g,fx=L2_regularize(x,g,fx,sigma)
        gnorm=np.linalg.norm(g)
        xnorm=np.linalg.norm(x)
        # If passed gradient convergence test, then return "x"
        if gnorm <= lbfgs.epsilon*max(xnorm,1):
          return x
        si=x-x_old
        yi=g-g_old
        crr_tpl=(si,yi)
        #Update the corrections
        if k <=lbfgs.m-1:
            u.append(crr_tpl)
        else:
            for i in range(lbfgs.m-1):
                u[i]=u[i+1]
            u[lbfgs.m-1]=crr_tpl
        #Update the approximate Hessian inverse
        h=lbfgs.update_hk(k,u,n)
        k+=1
    print 'Maximum number of iterations has been reached.'
    return 0
    


        
        









    
    
              
def mini_solver(y0,a,T,yT,n,m,my_list,show_output=False,mem_limit=15):
    
    t=np.linspace(0,T,n+1)
    #initial guess for control and penalty control is set to be 0
    x0 = SimpleVector(np.zeros(n+m))
    
    dt = t[1]-t[0]

    def ser_J(u):
        return ser.Func(u,a,y0,yT,T)

    def ser_grad_J(u):
        l = ser.adjoint_solver(y0,a,len(u)-1,u,T,yT)
        return ser.L2_grad(u,l,dt)

    res1 = minimize(ser_J,np.zeros(n+1),method='L-BFGS-B', jac=ser_grad_J,
                   options={'gtol': 1e-6, 'disp': False})
    res2 = []
    res3 = []
    
    H = None
    
    #solve problem for increasing mu
    for k in range(len(my_list)):
        #define reduced functional dependent only on u
        def J(u):
            y,Y=solver(y0,a,n,m,u[:n+1],u[n+1:],T)
            return Functional2(y,u[:n+1],u[n+1:],yT,T,my_list[k])
        
        #define our gradient using by solving adjoint equation
        def grad_J(u):
            #adjoint_solver(y0,a,n,m,u,lam,T,yT,my)
            l,L = adjoint_solver(y0,a,n,m,u[:n+1],u[n+1:],T,yT,my_list[k])
            g = np.zeros(len(u))
            
            g[:n+1]=dt*(u[:n+1]+L)

            for i in range(m-1):
                g[n+1+i]=l[i+1][0]-l[i][-1]
                
            return g
    
        def Mud_J(u):
            
            l,L,y,Y = adjoint_solver(y0,a,n,m,u[:n+1],u[n+1:],T,yT,my_list[k],get_y=True)
            
            u1   = u[:n+1]
            l1   = u[n+1:]
            du1  = float(T)*(u[:n+1]+L)/n
            ADJ1 = np.zeros(m-1)
            STA1 = np.zeros(m-1)
            for i in range(m-1):
                ADJ1[i] = l[i+1][0]
                STA1[i] = y[i][-1]

            y1 = np.zeros(len(u))
            y2 = np.zeros(len(u))
            
            y1[:n+1] = du1
            y1[n+1:] = ADJ1
            y2[n+1:] = l1 - STA1
            
            Y1 = SimpleVector(y1)
            Y2 = SimpleVector(y2)
            
            S1 = SimpleVector(u)
            S2 = SimpleVector(np.zeros(len(u)))
            return MuVector([Y1,Y2]),MuVector([S1,S2])

            
            
            
            

        #minimize J using initial guess x, and the gradient/functional above
        """
        default = {"jtol"                   : 1e-4,
                   "rjtol"                  : 1e-6,
                   "gtol"                   : 1e-4,
                   "rgtol"                  : 1e-5,
                   "maxiter"                :  200,
                   "display"                :    2,
                   "line_search"            : "strong_wolfe",
                   "line_search_options"    : ls,
                   "mem_lim"                : 5,
                   "Hinit"                  : "default",
                   "beta"                   : 1, 
                   "mu_val"                 : 1,
                   "old_hessian"            : None,
                   "penaly_number"          : 1,
                   "return_data"            : False, }
        """
        #mem_limit = 10
        options = {"mu_val": my_list[k], "old_hessian": H, 
                   "return_data": True,"mem_lim":mem_limit, "beta":1,
                   "save_number":-1,"jtol" : 1e-4,}
        
        options2={"mem_lim" : mem_limit,"return_data": True,"jtol" : 1e-4,}
        
        S1 = MuLbfgs(J,grad_J,x0,Mud_J,Hinit=None,options=options)
        S2 = Lbfgs(J,grad_J,x0,options=options2)
        try:
            data1 = S1.solve()
        except Warning:
            data1  = {'control'   : x0, 'iteration' : -1, 'lbfgs': H }
            
        except RuntimeError:
            data1  = {'control'   : x0, 'iteration' : -1, 'lbfgs': H }
        try:
            data2 = S2.solve()
        except:
            data2 = {'control'   : x0, 'iteration' : -1, 'lbfgs': H }

        res2.append(data1)
        res3.append(data2)
        
        
        H = data1['lbfgs']
        
        if show_output==True:
            
            x1 = data1['control']
            plot(t,x1.array()[:n+1])
            plot(t,res1.x)
            legend(["mu","normal"])
            print data1['iteration']
            show()
    
    return res1,res2,res3