Ejemplo n.º 1
0
def testNEdges(N):
    for M in range(3):
        (D, Pmax, Pmin, a, b, c, alpha, beta, gamma, delta, eta, UR,
         DR) = load(N)
        D = (M + 1) * D / 3
        B = loss(N)

        n = 2**(N - 1)
        elements = np.arange(0, N)
        Limits = np.vstack((Pmin, Pmax))
        N_edges = 0
        for k in range(N):
            index = np.append(elements[:k], elements[k + 1:])
            for i in range(n):
                binary = bin(i)
                coord = list(binary)[2:]  #get rid of '0b'
                coord = np.array([int(i) for i in coord])
                seq = np.zeros(N - 1, dtype=int)
                seq[N - 1 - len(coord):] = coord
                V_sup = np.zeros(N)
                V_sup[index] = Limits[seq, index]
                V_sup[k] = Pmax[k]
                D_sup = sum(V_sup) - V_sup @ B @ V_sup

                V_inf = np.zeros(N)
                V_inf[index] = Limits[seq, index]
                V_inf[k] = Pmin[k]
                D_inf = sum(V_inf) - V_inf @ B @ V_sup
                if (D_inf <= D and D_sup >= D):
                    N_edges = N_edges + 1

        print(N_edges, 'edges for problem of size ', N, ' and ', D, 'demand')
Ejemplo n.º 2
0
def RelaxTime(N, Demand):
    (Unused, Pmax, Pmin, a, b, c, alpha, beta, gamma, delta, eta, UR,
     DR) = load(N)
    B = loss(N)
    price = SimplePriceFun(Pmin, Pmax, a, b, c, alpha, beta, gamma, Demand)

    Time = np.zeros(2)
    problems = ["Convex relaxation", "Linear Relax"]
    n_meth = 0

    for method in problems:
        m = gp.Model(method)
        m.setParam('OutputFlag', False)
        P = m.addVars(range(N), lb=Pmin, ub=Pmax, name='P')
        PL = m.addVar()
        x = m.addVars(N)
        y = m.addVars(N)
        for i in range(N):
            m.addConstr(x[i] == delta[i] * P[i])
            m.addGenConstrExp(x[i], y[i])

        if (method == "Convex relaxation"):
            m.addQConstr(PL >= sum(
                sum(P[i] * P[j] * B[i, j] for j in range(N))
                for i in range(N)))
            m.addConstr(P.sum() - PL == Demand, name='Demand')

        elif (method == "Linear Relax"):

            [opt, P0] = Solve(N, price, 1, Demand)
            n = np.ones(N) - 2 * B @ P0
            k = -np.dot(n, P0)
            m.addConstr(sum(P[i] * n[i] for i in range(N)) + k >= 0)

        t0 = time.time()

        Cost = sum(a[k] + b[k] * P[k] + c[k] * P[k] * P[k] for k in range(N))
        Emission = sum(alpha[k] + beta[k] * P[k] + gamma[k] * P[k] * P[k] +
                       eta[k] * y[k] for k in range(N))

        obj = price * Emission + Cost
        m.setObjective(obj)
        m.optimize()
        t1 = time.time()
        Time[n_meth] = t1 - t0
        n_meth = n_meth + 1
    return (Time)
Ejemplo n.º 3
0
def LinearRelaxation(N, D):
    (Unused, Pmax, Pmin, a, b, c, alpha, beta, gamma, delta, eta, UR,
     DR) = load(N)
    B = loss(N)
    #Upper bound
    (n, k_upper, p) = LinUpperB(Pmin, Pmax, B, D)

    #Lower bound
    model = gp.Model('Lower bound of demand')
    P = model.addVars(range(N), lb=Pmin, ub=Pmax, name='P')
    PL = model.addVar()
    model.addQConstr(PL >= sum(
        sum(P[i] * P[j] * B[i, j] for j in range(N)) for i in range(N)))
    model.addConstr(P.sum() - PL == D, name='Demand')
    obj = sum(P[i] * n[i] for i in range(N))
    model.setObjective(obj)
    model.setParam('OutputFlag', False)
    model.optimize()
    k_lower = -obj.getValue()
    return (n, k_upper, k_lower)
Ejemplo n.º 4
0
def Solve(N,w_E,w_C,D, method='trust-const'):
    (Unused,Pmax,Pmin,a,b,c,alpha,beta,gamma,delta,eta,UR,DR) = load(N)
    B=loss(N)
    bnds=np.transpose(np.vstack((Pmin,Pmax)))
    P0=Pmin.copy()    
    def objective(P):
        Cost = sum(a[i]+b[i]*P[i]+c[i]*P[i]*P[i] for i in range(N))
        Emission = sum(alpha[i]+beta[i]*P[i]+gamma[i]*P[i]*P[i] +eta[i]*np.exp(P[i]*delta[i]) for i in range(N))
        return (w_E*Emission+w_C*Cost)
    
    def Gradient(P):
        GradC=b+2*c*P
        GradE= beta+2*gamma*P+delta*eta*np.exp(delta*P)
        Grad=w_C*GradC+w_E*GradE
        return(Grad)
        
    def Hessian(P):
        Hess= 2*w_C*c+w_E*(2*gamma+delta*delta*eta*np.exp(delta*P))
        H=Hess*np.eye(N)
        return(H)        
        
    def cons_f(P):
        PL=sum(sum(P[i]*P[j]*B[i,j] for j in range(N)) for i in range(N))
        sum_eq=sum(P)-PL-D
        return (sum_eq)
    
    def cons_J(P):
        Jac=np.ones(N)-2*P@B
        return(Jac)
    def cons_H(P,v):
        return(-2*v*B)
    
    if (method=='SLSQP'):
        const=[{'type': 'eq', 'fun': cons_f}]    
        solution = minimize(objective,P0, method='SLSQP',jac=Gradient, bounds=bnds,constraints=const)
    else:
        NL_const = NonlinearConstraint(cons_f, 0, 0, jac=cons_J, hess=cons_H)
        solution = minimize(objective,P0, method='trust-constr',jac=Gradient,
                            hess=Hessian,constraints=NL_const, bounds=bnds)
    P = solution.x
    return(objective(P),P)
Ejemplo n.º 5
0
def SolveGurobi(N,w_E,w_C, Demand, method="ConvexRelax"):
    (Unused,Pmax,Pmin,a,b,c,alpha,beta,gamma,delta,eta,UR,DR) = load(N)
    B=loss(N)
    m = gp.Model('Static Nonlinear EED with transmission losses')
    Pow = m.addVars(range(N),lb=Pmin,ub=Pmax, name='P')
    PLoss = m.addVar()
    x = m.addVars(N)
    y = m.addVars(N) 
    for n in range(N):
        m.addConstr(x[n]==delta[n]*Pow[n])
        m.addGenConstrExp(x[n], y[n])  
        
    if (method=="NonConvex"): 
        m.setParam('NonConvex', 2)
        m.addQConstr(PLoss== sum( sum(Pow[i]*Pow[j]*B[i][j] for j in range(N))for i in range(N)))
        m.addConstr(Pow.sum() == Demand+PLoss)
        
    elif (method=="ConvexRelax"):
        m.addQConstr(PLoss>= sum( sum(Pow[i]*Pow[j]*B[i][j] for j in range(N))for i in range(N)))
        m.addConstr(Pow.sum() == Demand+PLoss)
        
    elif (method=="LinRelax"):
        t=time.time()
        (n,k_upper, k_lower) = LinearRelaxation(N,Demand)
        print(time.time()-t,' sec to add for computing the extreme points')
        m.addConstr( sum(Pow[i]*n[i] for i in range(N))+k_upper<=0)
        m.addConstr( sum(Pow[i]*n[i] for i in range(N))+k_lower>=0)     
    
    Cost = sum(a[k]+b[k]*Pow[k]+c[k]*Pow[k]*Pow[k] for k in range(N))
    Emission = sum(alpha[k]+beta[k]*Pow[k]+gamma[k]*Pow[k]*Pow[k]+eta[k]*y[k] for k in range(N))
    
    obj= w_E*Emission + w_C*Cost 
    m.setObjective(obj)
    m.setParam( 'OutputFlag', False )
    m.optimize()
    
    opt=obj.getValue()
    P=np.zeros(N)
    for i in range(N):
        P[i] = Pow[i].x     
    return(opt,P)
Ejemplo n.º 6
0
def figures(N):
    plt.close("all")
    
    (d,Pmax,Pmin,a,b,c,alpha,beta,gamma,delta,eta,UR,DR) = load(N)
    B=loss(N)
    M=np.zeros([N+1,N+1])
    M[:-1,:-1]=B
    M[-1,:-1]=-np.ones(N)*0.5
    M[:-1,-1] = -np.ones(N)*0.5
    M[-1,-1] = d
    [l,v]=np.linalg.eig(B)
    samples=10**(5-N)
    xc=-np.linalg.inv(B).dot(M[:-1,-1])
    center=np.transpose(np.repeat([xc],samples, axis=0))
    detB=np.linalg.det(B)
    detM=np.linalg.det(M)
    
    rad=np.zeros(N)
    for i in range(N):   
        v[i]=v[i]/np.linalg.norm(v[i])
        rad[i]=np.sqrt(-detM/(l[i]*detB))    
        
    if N==2:
        """Plot the entire ellipse """
        theta=np.linspace(0,2*PI,num=samples)
        ellipse=np.array([rad[0]*np.sin(theta), rad[1]*np.cos(theta)])
        P=v.dot(ellipse)+center 
        plt.figure()
        plt.plot(P[0],P[1])
        plt.xlabel('$P_1$')
        plt.ylabel('$P_2$')
        plt.title('Domain')
        plt.grid()
        """ end """
        
        theta=np.linspace(101*PI/80,51*PI/40,num=samples)
        ellipse=np.array([rad[0]*np.sin(theta), rad[1]*np.cos(theta)])
        P=v.dot(ellipse)+center   
        
        plt.figure()
        plt.plot(P[0],P[1], label='Ellipsoid')
        plt.vlines(x=Pmin[0], ymin=Pmin[1], ymax=Pmax[1], linewidth=2.0)
        plt.vlines(x=Pmax[0], ymin=Pmin[1], ymax=Pmax[1], linewidth=2.0)       
        plt.hlines(y=Pmin[1], xmin=Pmin[0], xmax=Pmax[0], linewidth=2.0)
        plt.hlines(y=Pmax[1], xmin=Pmin[0], xmax=Pmax[0], linewidth=2.0)  
        plt.xlabel('$P_1$')
        plt.ylabel('$P_2$')
        plt.title('Domain')
        plt.grid()
        
        plt.figure(3)
        plt.xlabel('Emission [lb]')
        plt.ylabel('Cost [$]')
        plt.title('Achievable domain of the objective')
        plt.grid()

        box=np.zeros(N)
        index=0
        for i in range(samples):
            const=np.zeros(2*N)
            const[:N]=P[:,i]-Pmin
            const[N:]=Pmax-P[:,i]         
        
            if all(const>=0):                    
                Cost = sum(a[k]+b[k]*P[k,i]+c[k]*P[k,i]*P[k,i] for k in range(N))
                Emission = sum(alpha[k]+beta[k]*P[k,i]+gamma[k]*P[k,i]*P[k,i] for k in range(N))
                plt.figure(3)
                plt.plot(Emission,Cost,'b.',label=str(i))
                
                if index==0:
                    box= P[:,i-1]
                    index=i                 
                box=np.vstack((box,P[:,i]))
        box=np.vstack((box,P[:,index+len(box)]))
        box=box.T
       
        (n,k,p)=LinUpperB(Pmin,Pmax,B,d)
        
        x=np.linspace(min(p[:,0]),Pmax[0])
        y=-(n[0]*x+k)/n[1]
    
        it=10
        P_relax=np.zeros([N,it])
        price= SimplePriceFun(Pmin,Pmax,a,b,c,alpha,beta,gamma,d)
        w= np.linspace(0, 1 , num=it)
        for i in range(it) : 
            w_E=price*w[i]
            w_C=1-w[i]
            [opt,P_opt]=SolveGurobi(N,w_E,w_C, d, method="ConvexRelax")
            
            P_relax[:,i]=P_opt.copy()
        
        plt.figure()
        plt.plot(box[0],box[1],label="Demand constraint")
        plt.plot(x,y, label = "Linear adjustment")    
        plt.plot(P_relax[0],P_relax[1],'ro', label='Relaxed Solutions')

        
        plt.xlabel('$P_1$')
        plt.ylabel('$P_2$')
        plt.title('Relaxed convex set')
        plt.grid()
        plt.legend()
        
    elif N==3:
        
        theta=np.linspace(2.32,2.34,num=samples)
        phi=np.linspace(4.92,4.935,num=samples)
        
        ellipse=np.zeros([N,samples,samples])
        ellipse[0]= rad[0]*np.outer(np.sin(theta),np.sin(phi))
        ellipse[1]= rad[1]*np.outer(np.cos(theta),np.sin(phi))
        ellipse[2]= rad[2]*np.outer(np.ones(samples),np.cos(phi))
        
        P=np.zeros([N,samples,samples])
        for i in range(samples):  
            P[:,:,i]=v.dot(ellipse[:,:,i])+center

        plt.figure(1)
        plt.xlabel('Emission [lb]')
        plt.ylabel('Cost [$]')
        plt.title('Achievable domain of the objective')
        plt.grid()
        box=np.zeros([N,samples,samples])
        for i in range(samples):
            for j in range(samples):
                const=np.zeros(2*N)
                const[:N]=P[:,i,j]-Pmin
                const[N:]=Pmax-P[:,i,j]
                     
                if all(const>=0):
                    Cost = sum(a[k]+b[k]*P[k,i,j]+c[k]*P[k,i,j]*P[k,i,j] for k in range(N))
                    Emission = sum(alpha[k]+beta[k]*P[k,i,j]+gamma[k]*P[k,i,j]*P[k,i,j] for k in range(N))
                    plt.figure(1)
                    plt.plot(Emission,Cost,'b.')
                    box[:,i,j]= P[:,i,j]
                    P[:,i,j]=np.zeros(N) #In order to display in red the box points
                    
        
    
        fig=plt.figure()
        ax = fig.add_subplot(111, projection='3d')
        x=np.linspace(Pmin[0],Pmax[0])
        y=np.linspace(Pmin[1],Pmax[1])
        z=np.linspace(Pmin[2],Pmax[2])
        
        [Yz,Zy]=np.meshgrid(y,z)
        [Xz,Zx]=np.meshgrid(x,z)
        [Xy,Yx]=np.meshgrid(x,y)
        m=50
        Xmin=Pmin[0]*np.ones([m,m])
        Xmax=Pmax[0]*np.ones([m,m])     
        Ymin=Pmin[1]*np.ones([m,m])
        Ymax=Pmax[1]*np.ones([m,m])   
        Zmin=Pmin[2]*np.ones([m,m])
        Zmax=Pmax[2]*np.ones([m,m])    
        ax.plot_wireframe(Xmax,Yz,Zy,rstride=5, cstride=5)
        ax.plot_wireframe(Xmin,Yz,Zy,rstride=5, cstride=5)
        ax.plot_wireframe(Xz,Ymax,Zx,rstride=5, cstride=5)
        ax.plot_wireframe(Xy,Yx,Zmax,rstride=5, cstride=5)
    
        box=box[np.nonzero(box)]
        box=box.reshape(N,int(len(box)/N))
        
        ax.scatter(P[0,:,:], P[1,:,:], P[2,:,:],c='b', marker='o', s=0.5)
        ax.scatter(box[0],box[1], box[2], c='r',s=2)
        ax.set_xlabel('$P_1$')
        ax.set_ylabel('$P_2$')
        ax.set_zlabel('$P_3$')
        plt.grid()
        plt.title('Domain')
        
        
        
        """ Linear approximation """
        (n,k,p)=LinUpperB(Pmin,Pmax,B,d)

        xx=np.linspace(min(p[:,0]),Pmax[0])
        yy=np.linspace(min(p[:,1]),Pmax[1])
        
        [Xx,Yy]=np.meshgrid(xx,yy)    
        Z=-(n[0]*Xx+n[1]*Yy+k)/n[2]
         
        fig=plt.figure()
        ax = fig.add_subplot(111, projection='3d')
        ax.plot_surface(Xx,Yy, Z, label="Linear approximation")
        ax.scatter(box[0],box[1], box[2], c='r',s=2, label= 'Domain of definition')

        plt.title('Linear Approximation of the domain')
        ax.set_xlabel('$P_1$')
        ax.set_ylabel('$P_2$')
        ax.set_zlabel('$P_3$')
        plt.grid()
    
    
        Dist=np.zeros(len(box[0]))
        for i in range(len(box[0])):
            Dist[i]=abs(box[:,i].dot(n)+k)/np.linalg.norm(n)
                    
        fig=plt.figure()
        ax = fig.add_subplot(111, projection='3d')
        ax.scatter(box[0],box[1], Dist, c='b') 
        plt.title('Distance of the Linear Approximation')
        ax.set_xlabel('$P_1$')
        ax.set_ylabel('$P_2$')
        ax.set_zlabel('$Error$')
    
    
    
    """ Get the maximum error """
    model = gp.Model('Max distance')
    
    P = model.addVars(range(N),lb=Pmin,ub=Pmax, name='P')
    model.setParam('NonConvex', 2)
    PL = model.addVar()
    model.addQConstr(PL== sum( sum(P[i]*P[j]*B[i,j] for j in range(N))for i in range(N)))
    
    model.addConstr(P.sum()-PL == d, name='Demand')
    dist=-(sum(P[i]*n[i] for i in range(N))+k)/np.linalg.norm(n)
    model.setObjective(dist, GRB.MAXIMIZE)
    model.setParam( 'OutputFlag', False )
    model.optimize()
    
    print(dist.getValue()) #0.15661626890427272
    
            
Ejemplo n.º 7
0
def DEED_PL(N,method):
    (StaticD,Pmax,Pmin,a,b,c,alpha,beta,gamma,delta,eta,UR,DR) = load(N)
    B=loss(N)
    T=24    
    t1=int(T/3)
    theta= np.linspace(-PI/2, PI/2, num=t1)
    theta2= np.linspace(-PI, 3*PI/2, num=T-t1)
    Demand=np.zeros(T)
    phi= 0.15
    Demand[:t1]= 0.75*StaticD*(1+phi*np.sin(theta))
    for i in np.arange(0,T-t1):
        #Modified demand otherwise the problem is not feasible anymore...
        Demand[i+t1]= max(min(Demand[i+t1-1]+0.9*sum(UR)*np.sin(theta2[i]), 0.8*sum(Pmax)), sum(Pmin))
    Demand[-1]=Demand[0]
      
    price = SimplePriceFun(Pmin,Pmax,a,b,c,alpha,beta,gamma, np.mean(Demand))
    SR=0.05*Demand
    SR2=SR/3
         
    it=10
    C= np.zeros(it)
    E= np.zeros(it)
    w= np.linspace(0.001, 0.999 , num=it)
    
    LB= np.repeat([Pmin],T, axis=0)
    UB= np.repeat([Pmax],T, axis=0)
    
    model = gp.Model('Non-Convex Problem')
    P = model.addVars(T,N,lb=LB, ub=UB, name='P')
    PL = model.addVars(T)

    x = model.addVars(T,N)
    y = model.addVars(T,N)
    z = model.addVars(T,N)
    m = model.addVars(T,N)
    m2 = model.addVars(T,N)
    model.Params.NonConvex = 2
    
    for t in range(T):
        """Losses on the transfers """

        model.addQConstr(PL[t]<= sum( sum(P[t,i]*P[t,j]*B[i,j] for j in range(N))for i in range(N)))
        model.addQConstr(PL[t]>= sum( sum(P[t,i]*P[t,j]*B[i,j] for j in range(N))for i in range(N)))

        model.addConstr(sum(P[t,k] for k in range(N))-PL[t] == Demand[t], name='Demand at '+ str(t))        
        
        for n in range(0,N):
            if t>0:
                """ Ramp rate limits """
                model.addConstr(P[t,n] <= P[t-1,n] + UR[n], name='Maxramp'+ str(t) + str(n))
                model.addConstr(P[t,n] >= P[t-1,n] - DR[n], name='Minramp'+ str(t) + str(n))
            
            """ Reserve constraints"""
            model.addConstr(z[t,n]==Pmax[n]-P[t,n])
            model.addConstr(m[t,n]==min_([z[t,n],UR[n]]))
            model.addConstr(m2[t,n]==min_([z[t,n],UR[n]/6]))
            
            """ Exponential term of Emission objective """
            model.addConstr(x[t,n]==delta[n]*P[t,n])
            model.addGenConstrExp(x[t,n], y[t,n])
            
        model.addConstr(sum(m[t,i]for i in range(N))>=SR[t])
        model.addConstr(sum(m2[t,i]for i in range(N))>=SR2[t])
            
        
    Cost = sum(sum(a[k]+b[k]*P[t,k]+c[k]*P[t,k]*P[t,k] for k in range(N)) for t in range(T))  
    Emission = sum(sum(alpha[k]+beta[k]*P[t,k]+gamma[k]*P[t,k]*P[t,k]+eta[k]*y[t,k] for k in range(N))for t in range(T))
    
    if N==6:
        LimE=np.linspace(14700,17000,num=it)
        LimF=np.linspace(1010000, 1050000,num=it) 
        
    elif N==10:
        LimE=np.linspace(57000,63000,num=it)
        LimF=np.linspace(1795000,1875000,num=it)
    
    
    for i in range(it) :     
        if (method== "scal"):
            obj= price*w[i]*Emission+ (1-w[i])*Cost 
            
        elif (method=="LimE"):
            obj= Cost 
            if i>0:  
                model.remove(const)
            const=model.addQConstr(Emission<=LimE[i])

        elif (method=="LimF"):
            obj=Emission
            if i>0:  
                model.remove(const)
            const=model.addQConstr(Cost<=LimF[it-i-1])
        
        model.setObjective(obj)
        model.setParam( 'OutputFlag', False )
        model.Params.timeLimit = 500
        model.update()
        tm=1
        model.optimize()
        
        # In case the problem takes too much time
        while (model.status!=2):
            model.update()
            model.optimize()
            tm=tm+1
            print(model.SolCount)    
        print(tm, 'Time periods')
        print(model.status)

        C[i]=Cost.getValue()
        E[i]=Emission.getValue()
    return(E,C,price,w,T)
Ejemplo n.º 8
0
def RelaxErrors(N, Demand):
    (Unused, Pmax, Pmin, a, b, c, alpha, beta, gamma, delta, eta, UR,
     DR) = load(N)
    B = loss(N)
    price = SimplePriceFun(Pmin, Pmax, a, b, c, alpha, beta, gamma, Demand)
    it = 10
    C = np.zeros(it)
    E = np.zeros(it)

    problems = [
        "Convex relaxation", "Linear lower bound", "Linear upper bound",
        "Linear lower bound LVH", "Linear upper bound LVH"
    ]
    n_meth = 0
    wErrror = np.zeros([len(problems), 2])
    plt.figure()
    prop_cycle = plt.rcParams['axes.prop_cycle']
    colors = prop_cycle.by_key()['color']
    for method in problems:
        m = gp.Model(method)
        m.setParam('OutputFlag', False)
        P = m.addVars(range(N), lb=Pmin, ub=Pmax, name='P')
        PL = m.addVar()
        x = m.addVars(N)
        y = m.addVars(N)
        for i in range(N):
            m.addConstr(x[i] == delta[i] * P[i])
            m.addGenConstrExp(x[i], y[i])

        if (method == "Non-convex problem"):
            m.setParam('NonConvex', 2)
            m.addQConstr(PL == sum(
                sum(P[i] * P[j] * B[i, j] for j in range(N))
                for i in range(N)))
            m.addConstr(P.sum() == Demand + PL)
            methname = method
            line = '-'

        elif (method == "Convex relaxation"):
            m.addQConstr(PL >= sum(
                sum(P[i] * P[j] * B[i, j] for j in range(N))
                for i in range(N)))
            m.addConstr(P.sum() - PL == Demand, name='Demand')
            methname = method
            line = '-'

        elif (method == "Linear lower bound"):
            E_truth = E.copy()
            C_truth = C.copy()
            (n, k_upper, k_lower) = LinearRelaxation(N, Demand)
            m.addConstr(sum(P[i] * n[i]
                            for i in range(N)) + k_lower >= 0)  #Lowerbound
            n_meth = n_meth + 1
            methname = "Linear lower and upper bound"
            line = '--'

        elif (method == "Linear upper bound"):
            E_linL = E.copy()
            C_linL = C.copy()
            m.addConstr(sum(P[i] * n[i]
                            for i in range(N)) + k_upper >= 0)  #Upperbound
            methname = ''

        elif (method == "Linear lower bound LVH"):
            E_linU = E.copy()
            C_linU = C.copy()

            model = gp.Model('Find P0')
            model.setParam('OutputFlag', False)

            model.setParam('NonConvex', 2)
            Pow = model.addVars(range(N), lb=Pmin, ub=Pmax, name='P')
            PLoss = model.addVar()
            model.addQConstr(PLoss == sum(
                sum(Pow[i] * Pow[j] * B[i, j] for j in range(N))
                for i in range(N)))
            model.addConstr(Pow.sum() - PLoss == Demand, name='Demand')
            model.setObjective(0)
            model.optimize()
            P0 = np.zeros(N)
            for i in range(N):
                P0[i] = Pow[i].x

            (n, k_lower, k_upper) = get_approx_planes(P0, B, Demand, Pmin,
                                                      Pmax)
            m.addConstr(sum(P[i] * n[i]
                            for i in range(N)) + k_lower >= 0)  #Lowerbound
            n_meth = n_meth + 1
            methname = "Linear lower and upper bound LVH"
            line = ':'

        elif (method == "Linear upper bound LVH"):
            E_LVHL = E.copy()
            C_LVHL = C.copy()
            m.addConstr(sum(P[i] * n[i]
                            for i in range(N)) + k_upper >= 0)  #Upperbound
            methname = ''

        index = 0
        while (method != problems[index]):
            index = index + 1

        Cost = sum(a[k] + b[k] * P[k] + c[k] * P[k] * P[k] for k in range(N))
        Emission = sum(alpha[k] + beta[k] * P[k] + gamma[k] * P[k] * P[k] +
                       eta[k] * y[k] for k in range(N))

        w = np.linspace(0.001, 0.999, num=it)
        for i in range(it):
            obj = price * w[i] * Emission + (1 - w[i]) * Cost
            m.setObjective(obj)
            m.optimize()

            C[i] = Cost.getValue()
            E[i] = Emission.getValue()

            if (n_meth > 0):
                E_error = abs(E[i] - E_truth[i])
                C_error = abs(C[i] - C_truth[i])
                wErrror[index, 0] = max(wErrror[index, 0],
                                        round(E_error / E_truth[i], 5))
                wErrror[index, 1] = max(wErrror[index, 1],
                                        round(C_error / C_truth[i], 5))

        plt.plot(E, C, '.', color=colors[n_meth], label=methname)
        [contE, s] = Hermite(E, C, price * w, 1 - w)
        plt.plot(contE, s, color=colors[n_meth], linestyle=line)

    plt.xlabel('Emission [lb]')
    plt.ylabel('Cost [$]')
    plt.title('Comparison of the relaxations of ' + str(N) +
              ' generators problem and demand = ' + str(int(Demand)) + ' MW')
    plt.grid()
    plt.legend()

    Error1 = np.zeros([len(problems) - 1, 2])
    [E_error, C_error] = HermiteError(E_truth, C_truth, E_linL, C_linL,
                                      price * w, 1 - w)
    Error1[0, 0] = E_error
    Error1[0, 1] = C_error

    [E_error, C_error] = HermiteError(E_truth, C_truth, E_LVHL, C_LVHL,
                                      price * w, 1 - w)
    Error1[2, 0] = E_error
    Error1[2, 1] = C_error

    Error2 = np.zeros([len(problems) - 1, 2])
    [E_error, C_error] = SmartHermiteError(E_truth, C_truth, E_linL, C_linL,
                                           price * w, 1 - w)
    Error2[0, 0] = E_error
    Error2[0, 1] = C_error

    [E_error, C_error] = SmartHermiteError(E_truth, C_truth, E_LVHL, C_LVHL,
                                           price * w, 1 - w)
    Error2[2, 0] = E_error
    Error2[2, 1] = C_error
    return (Error1, Error2, wErrror)
Ejemplo n.º 9
0
def SQP(N,w_E,w_C,D,Pl=0,Pu=0, figures='False'): 
    (Unused,Pmax,Pmin,a,b,c,alpha,beta,gamma,delta,eta,UR,DR) = load(N)
    B=loss(N)
    if (type(Pl)!=int):  
            Pmin=Pl.copy()
            Pmax=Pu.copy()
    t0=time.time()
    
    """Computing P0"""    
    bnds=np.transpose(np.vstack((Pmin,Pmax)))
    P0=Pmin.copy()    
    def objective(P):
        return (0)
    def Gradient(P):
        return(np.zeros(N))
    def Hessian(P):
        return(np.zeros([N,N]))        
    def cons_f(P):
        PL=sum(sum(P[i]*P[j]*B[i,j] for j in range(N)) for i in range(N))
        sum_eq=sum(P)-PL-D
        return (sum_eq)  

    if (N<=10):
        const=[{'type': 'eq', 'fun': cons_f}]
        solution = minimize(objective ,P0, method='SLSQP',jac=Gradient, bounds=bnds,constraints=const)
    else: 
        def cons_J(P):
            Jac=np.ones(N)-2*P@B
            return(Jac)
        def cons_H(P,v):
            return(-2*v*B)
        NL_const = NonlinearConstraint(cons_f, 0, 0, jac=cons_J, hess=cons_H)
        solution = minimize(objective ,P0, method='trust-constr',jac=Gradient,
                            hess=Hessian,constraints=NL_const, bounds=bnds)
    P0 = solution.x
    tol=1e-2
    Maxiter=25
    Obj=np.zeros(Maxiter)
    C = sum(a[k]+b[k]*P0[k]+c[k]*P0[k]*P0[k] for k in range(N))
    E = sum(alpha[k]+beta[k]*P0[k]+gamma[k]*P0[k]*P0[k]+eta[k]*np.exp(delta[k]*P0[k]) for k in range(N))
    
    Obj[0]=w_C*C+w_E*E
    Pk=P0.copy()
    it=1    
    stepsize=1
    while (it<Maxiter  and stepsize>tol): #and tol<Obj[it-1]-opt
        
        model=gp.Model('SQP Step')
        model.setParam( 'OutputFlag', False )
        DeltaP = model.addVars(range(N),lb=Pmin-Pk,ub=Pmax-Pk)

        Surplus=sum(Pk)-Pk@B@Pk-D
        model.addConstr(Surplus+sum(DeltaP[k]*(1-2*Pk@B[k]) for k in range(N))==0)          
              
        GradC=b+c*Pk*2
        GradE= beta+gamma*Pk*2+delta*eta*np.exp(delta*Pk)
        Grad=w_C*GradC+w_E*GradE
        Hessian= w_C*2*c+w_E*(2*gamma+delta*delta*eta*np.exp(delta*Pk))
        Lagr=sum(DeltaP[k]*DeltaP[k]*Hessian[k] for k in range(N))
        objective = sum(Grad[k]*DeltaP[k] for k in range(N)) + 0.5*Lagr
        model.setObjective(objective)
        model.optimize()

        Prev=Pk.copy()    
        for i in range(N):
            Pk[i] = Pk[i] + DeltaP[i].x        
        
        stepsize=np.linalg.norm(Prev-Pk)
        C = sum(a[k]+b[k]*Pk[k]+c[k]*Pk[k]*Pk[k] for k in range(N))
        E = sum(alpha[k]+beta[k]*Pk[k]+gamma[k]*Pk[k]*Pk[k]+eta[k]*np.exp(delta[k]*Pk[k]) for k in range(N))
        Obj[it]=w_C*C+w_E*E
        
        if( (it % 10)==0):
            print(it, " of ", Maxiter)
        it=it+1
        
    if (figures==True):
        t1=time.time()
        [opt,P_opt]=Solve(N,w_E,w_C,D) 
        t2=time.time()
        plt.figure()
    
        Pos=Obj[:it]-np.ones(it)*opt
        Neg=-Pos.copy()
        
        Pos=(Obj[:it]-np.ones(it)*opt>0)*Pos
        Neg=(Obj[:it]-np.ones(it)*opt<0)*Neg
        plt.plot(range(it),Pos, label='Positive Part ')
        plt.plot(range(it),Neg, label='Negative Part ')
    
        plt.xlabel('Iterations')
        plt.ylabel('$f_k-f*$')
        plt.title("Rate of convergence of SQP method ")
        plt.legend()
        plt.grid()  
        print(t1-t0, "sec for SQP ")
        print(t2-t1, "sec for Scipy ")
        print('\007')        
    return(E,C,Pk)
Ejemplo n.º 10
0
def GradMethod(N=10, method='ConvexRelax', solver='Gurobi'): 
    plt.close("all")

    (Demand,Pmax,Pmin,a,b,c,alpha,beta,gamma,delta,eta,UR,DR) = load(N)
    B=loss(N)
    price = SimplePriceFun(Pmin,Pmax,a,b,c,alpha,beta,gamma,Demand)
    
    model=gp.Model('Projection Model')
    model.setParam( 'OutputFlag', False )
    P = model.addVars(range(N),lb=Pmin,ub=Pmax)
    PL = model.addVar()
    if (method=="NonConvex"): 
            model.setParam('NonConvex', 2)
            model.addQConstr(PL== sum( sum(P[i]*P[j]*B[i][j] for j in range(N))for i in range(N)))   
            model.addConstr(P.sum() == Demand+PL)  
            
    else:
        model.addQConstr(PL>= sum( sum(P[i]*P[j]*B[i,j] for j in range(N))for i in range(N)))
        model.addConstr(P.sum()-PL == Demand, name='Demand')
   
    if (solver=='Gurobi'):
        t0=time.time() 
        [opt,P_opt]=SolveGurobi(N,price,1,Demand, method)
        t1=time.time()
        print(t1-t0 ,'sec for Gurobi')
        
        """Computing P0"""                    
        model.setObjective(0)
        model.optimize()
        t2=time.time()
        print(t2-t1 ,'P0')

        P0=np.zeros(N)
        for i in range(N):
            P0[i] = P[i].x 
    
    else:
        t0=time.time() 
        [opt,P_opt]=Solve(N,price,1,Demand) 
        t1=time.time()
        print(t1-t0 ,'sec for Scipy')
        
        """Computing P0"""
        bnds=np.transpose(np.vstack((Pmin,Pmax)))
        P0=Pmin.copy()    
        def objective(P):
            return (0)
        def Gradient(P):
            return(np.zeros(N))
        def Hessian(P):
            return(np.zeros([N,N]))        
        def cons_f(P):
            PL=sum(sum(P[i]*P[j]*B[i,j] for j in range(N)) for i in range(N))
            sum_eq=sum(P)-PL-Demand
            return (sum_eq)  
        if (N<=10):
            const=[{'type': 'eq', 'fun': cons_f}]
            solution = minimize(objective ,P0, method='SLSQP',jac=Gradient, bounds=bnds,constraints=const)
        else: 
            def cons_J(P):
                Jac=np.ones(N)-2*P@B
                return(Jac)
            def cons_H(P,v):
                return(-2*v*B)
            NL_const = NonlinearConstraint(cons_f, 0, 0, jac=cons_J, hess=cons_H)
            solution = minimize(objective ,P0, method='trust-constr',jac=Gradient,
                                hess=Hessian,constraints=NL_const, bounds=bnds)
        P0 = solution.x 
        t2=time.time()
        print(t2-t1 ,'P0')
        
    
    print()
    print("Gradient Method")
    tol=1e-2
    L=max(2*c+price*(2*gamma+delta*delta*eta*np.exp(delta*Pmax)))
    mu=min(2*c+price*(2*gamma+delta*delta*eta*np.exp(delta*Pmin)))

    Maxiter=int(0.25*(1+L/mu)*np.log(L*np.linalg.norm(P0-P_opt)**2/(2*tol)))+1
    Maxiter=min(Maxiter,50) #Otherwise too large vector of iterates

    Obj=np.zeros(Maxiter)
    C = sum(a[k]+b[k]*P0[k]+c[k]*P0[k]*P0[k] for k in range(N))
    E = sum(alpha[k]+beta[k]*P0[k]+gamma[k]*P0[k]*P0[k]+eta[k]*np.exp(delta[k]*P0[k]) for k in range(N))
    Obj[0]=C+price*E
    
    #Used if method=ConvexRelax
    GradRate=np.zeros(Maxiter)
    normP0=np.linalg.norm(P0-P_opt)**2
    GradRate[0]=L/2*normP0 

    Pk=P0.copy()
    it=1
    if (method=='NonConvex'):
        print(L,mu)
        h=1/L
    else:
        h=2/(mu+L)
    
    while (it<Maxiter and tol<Obj[it-1]-opt):
        
        GradC=b+c*Pk*2
        GradE= beta+gamma*Pk*2+delta*eta*np.exp(delta*Pk)
        Grad=GradC+price*GradE    
        Pk=Pk-h*Grad
 
        projection= sum((P[i]-Pk[i])*(P[i]-Pk[i]) for i in range(N))
        model.setObjective(projection)
        model.optimize()
        
        if model.Status!= GRB.OPTIMAL:
            print('Optimization was stopped with status ' + str(model.Status))
            
        for i in range(N):
            Pk[i] = P[i].x 
   
        C = sum(a[k]+b[k]*Pk[k]+c[k]*Pk[k]*Pk[k] for k in range(N))
        E = sum(alpha[k]+beta[k]*Pk[k]+gamma[k]*Pk[k]*Pk[k]+eta[k]*np.exp(delta[k]*Pk[k]) for k in range(N))
        
        Obj[it]=C+price*E    
        GradRate[it]=L/2*((L-mu)/(L+mu))**(2*it)*normP0
        if( (it % 10)==0):
            print(it, " of ", Maxiter)
        it=it+1    
    
    plt.figure()
    if (method=='ConvexRelax'):
        plt.plot(range(it),GradRate[:it],'b--', label='Gradient theoretical rate')
        
    plt.plot(range(it),Obj[:it]-np.ones(it)*opt,'b', label='Gradient Method ')
    plt.xlabel('Iterations')
    plt.ylabel('$f_k-f*$')
    plt.title('Rate of convergence of the Gradient method ')
    plt.legend()
    plt.grid(True)   
    t3=time.time()    
    print(t3-t1, "for gradient")
Ejemplo n.º 11
0
def SPG(N=10, solver="Gurobi"):
    (Demand,Pmax,Pmin,a,b,c,alpha,beta,gamma,delta,eta,UR,DR) = load(N)
    if (N==40): # Default demand not suited for problem with Transmission Losses
        Demand=7500
    B=loss(N)
    price = SimplePriceFun(Pmin,Pmax,a,b,c,alpha,beta,gamma,Demand)
    
    t0=time.time() 
    if (solver=='Gurobi'):
        [opt,P_opt]=SolveGurobi(N,price,1,Demand, 'ConvexRelax')
        t1=time.time()
        print(t1-t0 ,' sec for Gurobi')
        print(opt)
        #Gurobi does not provide an accurate solution
        [opt,P_opt]=Solve(N,price,1,Demand) 
        print(opt)
        
    else:   
        [opt,P_opt]=Solve(N,price,1,Demand) 
        t1=time.time()
        print(t1-t0 ,' sec for scipy')
    
    bnds=np.transpose(np.vstack((Pmin,Pmax)))
    P0=Pmin.copy()    
    def objective(P): return (0)
    def Gradient(P): return(np.zeros(N))
    def Hessian(P): return(np.zeros([N,N]))        
    def cons_f(P):
        PL=sum(sum(P[i]*P[j]*B[i,j] for j in range(N)) for i in range(N))
        sum_eq=sum(P)-PL-Demand
        return (sum_eq)  
    if (N<=10):
        const=[{'type': 'eq', 'fun': cons_f}]
        solution = minimize(objective ,P0, method='SLSQP',jac=Gradient, bounds=bnds,constraints=const)
    else: 
        def cons_J(P):
            Jac=np.ones(N)-2*P@B
            return(Jac)
        def cons_H(P,v):
            return(-2*v*B)
        NL_const = NonlinearConstraint(cons_f, 0, 0, jac=cons_J, hess=cons_H)
        solution = minimize(objective ,P0, method='trust-constr',jac=Gradient,
                            hess=Hessian,constraints=NL_const, bounds=bnds)
    P0 = solution.x 
    t2=time.time()
    print(t2-t1 ,'P0')

    tol=1e-2
    L=max(2*c+price*(2*gamma+delta*delta*eta*np.exp(delta*Pmax)))
    C = sum(a[k]+b[k]*P0[k]+c[k]*P0[k]*P0[k] for k in range(N))
    E = sum(alpha[k]+beta[k]*P0[k]+gamma[k]*P0[k]*P0[k]+eta[k]*np.exp(delta[k]*P0[k]) for k in range(N))

    Maxiter=50
    Obj=np.zeros(Maxiter)
    Obj[0]=C+price*E
    
    print()
    print("Spectral Projected Gradient")
    model=gp.Model('Projection Model')
    P = model.addVars(range(N),lb=Pmin,ub=Pmax)
    PL = model.addVar()    
    model.addQConstr(PL>= sum( sum(P[i]*P[j]*B[i,j] for j in range(N))for i in range(N)))
    model.addConstr(P.sum()-PL == Demand, name='Demand')   
    model.setParam( 'OutputFlag', False )

    it=1
    Pk=P0.copy()
    dk=np.zeros([N])
    stepmin=1e-10
    stepmax=1e10
    stepsize=1/L
    sigma1=0.1
    sigma2=0.9
    g=1e-4
    M=10
    while (it<Maxiter and tol<Obj[it-1]-opt):     
        GradC=b+c*Pk*2
        GradE= beta+gamma*Pk*2+delta*eta*np.exp(delta*Pk)
        Grad=GradC+price*GradE        
        Prev=Pk.copy()
        Pk=Pk-stepsize*Grad
        projection= sum((P[i]-Pk[i])*(P[i]-Pk[i]) for i in range(N)) 
        model.setObjective(projection)
        model.optimize()
        for i in range(N):
            dk[i] = P[i].x - Prev[i]     
        
        coeff=1
        index=max(0,it-M)
        fmax=max(Obj[index:it])
        Pk=Prev+coeff*dk
        C = sum(a[k]+b[k]*Pk[k]+c[k]*Pk[k]*Pk[k] for k in range(N))
        E = sum(alpha[k]+beta[k]*Pk[k]+gamma[k]*Pk[k]*Pk[k]+eta[k]*np.exp(delta[k]*Pk[k]) for k in range(N))
        fk=C+price*E
        while(fk>fmax+g*coeff*Grad@dk):
            
            Num=-0.5*coeff**2*Grad@dk
            Denom= fk-Obj[it-1]-coeff*Grad@dk
            temp=Num/Denom
            if (temp>=sigma1 and temp<=coeff*sigma2):
                coeff=temp         
            else:
                coeff=coeff/2
            
            Pk=Prev+coeff*dk
            C = sum(a[k]+b[k]*Pk[k]+c[k]*Pk[k]*Pk[k] for k in range(N))
            E = sum(alpha[k]+beta[k]*Pk[k]+gamma[k]*Pk[k]*Pk[k]+eta[k]*np.exp(delta[k]*Pk[k]) for k in range(N))
            fk=C+price*E
        
        sk=Pk-Prev
        yk=2*c*(Pk-Prev) + price*(2*gamma*(Pk-Prev)+ delta*eta*(np.exp(delta*Pk)-np.exp(delta*Prev)))
        if sk@yk<=0: stepsize=stepmax
        else: stepsize=max(stepmin,min(sk@sk/(sk@yk),stepmax))
        Obj[it]=fk
        
        if( (it % 10)==0):
            print(it, " of ", Maxiter)
        it=it+1
        
    t3=time.time()
    print(t3-t1, "sec for SPG")
    
    plt.figure()
    plt.plot(range(it),Obj[:it]-np.ones(it)*opt, label='SPG')
    plt.title('Rate of convergence of the Spectral Projected Gradient')
    plt.xlabel('Iterations')
    plt.ylabel('$f_k-f*$')
    plt.legend()
    plt.grid(True)

    
Ejemplo n.º 12
0
def AccMethod(N=10, method='ConvexRelax', solver='Gurobi'): 
    (Demand,Pmax,Pmin,a,b,c,alpha,beta,gamma,delta,eta,UR,DR) = load(N)
    B=loss(N)
    price = SimplePriceFun(Pmin,Pmax,a,b,c,alpha,beta,gamma,Demand)
    
    model=gp.Model('Projection Model')
    model.setParam( 'OutputFlag', False )
    P = model.addVars(range(N),lb=Pmin,ub=Pmax)
    PL = model.addVar()
    if (method=="NonConvex"): 
            model.setParam('NonConvex', 2)
            model.addQConstr(PL== sum( sum(P[i]*P[j]*B[i][j] for j in range(N))for i in range(N)))   
            model.addConstr(P.sum() == Demand+PL)  
            
    elif (method=="ConvexRelax"):
        model.addQConstr(PL>= sum( sum(P[i]*P[j]*B[i,j] for j in range(N))for i in range(N)))
        model.addConstr(P.sum()-PL == Demand, name='Demand')
    
    if (solver=='Gurobi'):
        t0=time.time() 
        [opt,P_opt]=SolveGurobi(N,price,1,Demand, method)
        t1=time.time()
        print(t1-t0 ,'sec for Gurobi')
        
        """Computing P0"""            
        model.setObjective(0)
        model.optimize()
        t2=time.time()     
        print(t2-t1 ,'P0')

        P0=np.zeros(N)
        for i in range(N):
            P0[i] = P[i].x   
    
    else:
        t0=time.time() 
        [opt,P_opt]=Solve(N,price,1,Demand) 
        t1=time.time()
        print(t1-t0 ,'sec for Scipy')
        
        """Computing P0"""
        bnds=np.transpose(np.vstack((Pmin,Pmax)))
        P0=Pmin.copy()    
        def objective(P):
            return (0)
        def Gradient(P):
            return(np.zeros(N))
        def Hessian(P):
            return(np.zeros([N,N]))        
        def cons_f(P):
            PL=sum(sum(P[i]*P[j]*B[i,j] for j in range(N)) for i in range(N))
            sum_eq=sum(P)-PL-Demand
            return (sum_eq)  
        if (N<=10):
            const=[{'type': 'eq', 'fun': cons_f}]
            solution = minimize(objective ,P0, method='SLSQP',jac=Gradient, bounds=bnds,constraints=const)
        else: 
            def cons_J(P):
                Jac=np.ones(N)-2*P@B
                return(Jac)
            def cons_H(P,v):
                return(-2*v*B)
            NL_const = NonlinearConstraint(cons_f, 0, 0, jac=cons_J, hess=cons_H)
            solution = minimize(objective ,P0, method='trust-constr',jac=Gradient,
                                hess=Hessian,constraints=NL_const, bounds=bnds)
        P0 = solution.x 
        t2=time.time()
        print(t2-t1 ,'P0')
    
    tol=1e-2
    L=max(2*c+price*(2*gamma+delta*delta*eta*np.exp(delta*Pmax)))
    mu=min(2*c+price*(2*gamma+delta*delta*eta*np.exp(delta*Pmin)))
    C = sum(a[k]+b[k]*P0[k]+c[k]*P0[k]*P0[k] for k in range(N))
    E = sum(alpha[k]+beta[k]*P0[k]+gamma[k]*P0[k]*P0[k]+eta[k]*np.exp(delta[k]*P0[k]) for k in range(N))
    f0= C+price*E
    Maxiter= int(np.sqrt(L/mu)*np.log(2*(f0-opt)/tol))+1
    Maxiter=min(Maxiter,50)
    Obj=np.zeros(Maxiter)
    Obj[0]=f0
    AccRate=np.zeros(Maxiter)
    AccRate[0]=2*(Obj[0]-opt)    
    
    print()
    print("Accelerated Gradient")

    it=1
    Pk=P0.copy()
    yk=Pk.copy()
    stepsize=(np.sqrt(L)-np.sqrt(mu))/(np.sqrt(L)+np.sqrt(mu))
    while (it<Maxiter and tol<Obj[it-1]-opt):      
        GradC=b+c*yk*2
        GradE= beta+gamma*yk*2+delta*eta*np.exp(delta*yk)
        Grad=GradC+price*GradE        
        Prev=Pk.copy()
        Pk=yk-Grad/L
        projection= sum((P[i]-Pk[i])*(P[i]-Pk[i]) for i in range(N))
        
        model.setObjective(projection)
        model.optimize()

        for i in range(N):
            Pk[i] = P[i].x              
        yk=Pk+stepsize*(Pk-Prev)       
        
        C = sum(a[k]+b[k]*Pk[k]+c[k]*Pk[k]*Pk[k] for k in range(N))
        E = sum(alpha[k]+beta[k]*Pk[k]+gamma[k]*Pk[k]*Pk[k]+eta[k]*np.exp(delta[k]*Pk[k]) for k in range(N))
        Obj[it]=C+price*E
        AccRate[it]=2*(1-np.sqrt(mu/L))**it*(Obj[0]-opt)
    
        if( (it % 10)==0):
            print(it, " of ", Maxiter)
        it=it+1
    t3=time.time()
    print(t3-t1, "sec for Accelerated")
    
    plt.figure(1)
    if (method=='ConvexRelax'):        
        plt.plot(range(it),AccRate[:it], '--', color='orange', label= 'Accelerated gradient theoretical rate')       
    plt.plot(range(it),Obj[:it]-np.ones(it)*opt, color='orange', label='Accelerated Gradient Method')
    
    
    plt.title('Rate of convergence of the two methods')
    plt.xlabel('Iterations')
    plt.ylabel('$f_k-f*$')
    plt.legend()
    plt.grid(True)
Ejemplo n.º 13
0
def TimeRelaxation():
    Number = [100]  #[2,3,6,10,40,100]
    size = len(Number)
    nRelax = 3
    Computime = np.zeros([size, nRelax])
    # Comparison of the 2 Linear Relaxations
    for nPb in range(size):
        N = Number[nPb]
        (D, Pmax, Pmin, a, b, c, alpha, beta, gamma, delta, eta, UR,
         DR) = load(N)
        if (N == 40):
            D = 7500
        B = loss(N)

        t1 = time.time()
        """ Linear relaxation using N points """
        LinearRelaxation(N, D)

        t2 = time.time()
        """ Linear relaxation using 1 feasible point and Gurobi"""
        model = gp.Model('Find P0')
        model.setParam('OutputFlag', False)
        model.setParam('NonConvex', 2)
        Pow = model.addVars(range(N), lb=Pmin, ub=Pmax, name='P')
        PLoss = model.addVar()
        model.addQConstr(PLoss == sum(
            sum(Pow[i] * Pow[j] * B[i, j] for j in range(N))
            for i in range(N)))
        model.addConstr(Pow.sum() - PLoss == D, name='Demand')
        model.setObjective(0)
        model.optimize()
        P0 = np.zeros(N)
        for i in range(N):
            P0[i] = Pow[i].x
        get_approx_planes(P0, B, D, Pmin, Pmax, True)

        t3 = time.time()
        """ Linear relaxation using 1 feasible point and Scipy"""
        bnds = np.transpose(np.vstack((Pmin, Pmax)))
        P0 = Pmin.copy()

        def objective(P):
            return (0)

        def Gradient(P):
            return (np.zeros(N))

        def Hessian(P):
            return (np.zeros([N, N]))

        def cons_f(P):
            PL = sum(
                sum(P[i] * P[j] * B[i, j] for j in range(N)) for i in range(N))
            sum_eq = sum(P) - PL - D
            return (sum_eq)

        def cons_J(P):
            Jac = np.ones(N) - 2 * P @ B
            return (Jac)

        def cons_H(P, v):
            return (-2 * v * B)

        if N <= 10:
            const = [{'type': 'eq', 'fun': cons_f}]
            solution = minimize(objective,
                                P0,
                                method='SLSQP',
                                jac=Gradient,
                                bounds=bnds,
                                constraints=const)
        else:
            NL_const = NonlinearConstraint(cons_f,
                                           0,
                                           0,
                                           jac=cons_J,
                                           hess=cons_H)
            solution = minimize(objective,
                                P0,
                                method='trust-constr',
                                jac=Gradient,
                                hess=Hessian,
                                constraints=NL_const,
                                bounds=bnds)
        P0 = solution.x
        (n, k_lower, k_upper) = get_approx_planes(P0, B, D, Pmin, Pmax, True)
        t4 = time.time()

        Computime[nPb] = np.array([[t2 - t1, t3 - t2, t4 - t3]])

    bw = 0.25
    opacity = 0.8
    prop_cycle = plt.rcParams['axes.prop_cycle']
    colors = prop_cycle.by_key()['color']
    plt.figure()
    plt.bar(np.arange(size),
            Computime[:, 0],
            bw,
            alpha=opacity,
            color=colors[0],
            label='Relaxation: N points')
    plt.bar(np.arange(size) + bw,
            Computime[:, 1],
            bw,
            alpha=opacity,
            color=colors[1],
            label='Relaxation: 1 point and Gurobi')
    plt.bar(np.arange(size) + 2 * bw,
            Computime[:, 2],
            bw,
            alpha=opacity,
            color=colors[2],
            label='Relaxation: 1 point and Scipy')

    plt.xlabel('Problem size')
    plt.ylabel('Time [s]')
    plt.title(
        'Computational time for the relaxations for different problem sizes')
    plt.xticks(
        np.arange(size) + bw, ('N=2', 'N=3', 'N=6', 'N=10', 'N=40', 'N=100'))
    plt.legend()
    plt.tight_layout()
    plt.show()

    #Comparison of Time for one Linear, quadratic, nonconvex problem
    Computime2 = np.zeros([size, nRelax])
    for nPb in range(size):
        N = Number[nPb]
        (D, Pmax, Pmin, a, b, c, alpha, beta, gamma, delta, eta, UR,
         DR) = load(N)
        if (N == 40):
            D = 7500
        B = loss(N)

        t = RelaxTime(N, D)

        Computime2[nPb, :-1] = t
        t0 = time.time()
        Solve(N, 1, 1, D)
        t1 = time.time()
        Computime2[nPb, -1] = t1 - t0

    bw = 0.25
    opacity = 0.8
    prop_cycle = plt.rcParams['axes.prop_cycle']
    colors = prop_cycle.by_key()['color']
    plt.figure()
    plt.bar(np.arange(size),
            Computime2[:, 0],
            bw,
            alpha=opacity,
            color=colors[0],
            label='Convex Relaxation')
    plt.bar(np.arange(size) + bw,
            Computime2[:, 1],
            bw,
            alpha=opacity,
            color=colors[1],
            label='Linear Relaxation')
    plt.bar(np.arange(size) + 2 * bw,
            Computime2[:, 2],
            bw,
            alpha=opacity,
            color=colors[2],
            label='Scipy on non-convex')

    plt.xlabel('Problem size')
    plt.ylabel('Time [s]')
    plt.title(
        'Computational time for solving one optimization problem for different problem sizes'
    )
    plt.xticks(
        np.arange(size) + bw, ('N=2', 'N=3', 'N=6', 'N=10', 'N=40', 'N=100'))
    plt.legend()
    plt.tight_layout()
    plt.show()
Ejemplo n.º 14
0
def eConstE_SQP(N, LimE, D):
    (Unused, Pmax, Pmin, a, b, c, alpha, beta, gamma, delta, eta, UR,
     DR) = load(N)
    B = loss(N)
    (Zones, Units) = zones(N)
    """Computing P0 without POZ"""
    bnds = np.transpose(np.vstack((Pmin, Pmax)))

    def P0_Obj(P):
        return (0)

    def P0_Grad(P):
        return (np.zeros(N))

    def Objective(P):
        Obj = sum(a[i] + b[i] * P[i] + c[i] * P[i] * P[i] for i in range(N))
        return (Obj)

    def Gradient(P):
        Grad = b + 2 * c * P
        return (Grad)

    def cons_f(P):
        PL = sum(
            sum(P[i] * P[j] * B[i, j] for j in range(N)) for i in range(N))
        sum_eq = sum(P) - PL - D
        return (sum_eq)

    def cons_J(P):
        Jac = np.ones(N) - 2 * P @ B
        return (Jac)

    def cons_C(P):
        constraint = LimE - sum(alpha[k] + beta[k] * P[k] + gamma[k] * P[k] *
                                P[k] + eta[k] * np.exp(delta[k] * P[k])
                                for k in range(N))
        return (constraint)

    def cons_GradC(P):
        Grad = -beta - 2 * gamma * P - delta * eta * np.exp(delta * P)
        return (Grad)

    const = [{'type': 'eq', 'fun': cons_f}, {'type': 'ineq', 'fun': cons_C}]
    solution = minimize(P0_Obj,
                        Pmin,
                        method='SLSQP',
                        jac=P0_Grad,
                        bounds=bnds,
                        constraints=const)
    P0 = solution.x
    tol = 1e-2
    Maxiter = 100
    Obj = np.zeros(Maxiter)
    Obj[0] = sum(a[k] + b[k] * P0[k] + c[k] * P0[k] * P0[k] for k in range(N))

    Pk = P0.copy()
    Prev = P0.copy()
    it = 1
    stepsize = 1
    while (it < Maxiter and stepsize > tol):
        model = gp.Model('SQP Step, MIQP')
        model.setParam('OutputFlag', False)
        DeltaP = model.addVars(range(N), lb=Pmin - Pk, ub=Pmax - Pk)
        x = model.addVars(N)
        y = model.addVars(N)
        for i in range(N):
            model.addConstr(x[i] == delta[i] * DeltaP[i])
            model.addGenConstrExp(x[i], y[i])

        Surplus = sum(Pk) - Pk @ B @ Pk - D
        model.addConstr(Surplus + sum(DeltaP[k] * (1 - 2 * Pk @ B[k])
                                      for k in range(N)) == 0)

        model.addQConstr(
            LimE -
            sum(alpha[k] + beta[k] * (Pk[k] + DeltaP[k]) + gamma[k] *
                (Pk[k] + DeltaP[k]) *
                (Pk[k] + DeltaP[k]) + eta[k] * np.exp(delta[k] * Pk[k]) * y[k]
                for k in range(N)) >= 0)

        for i in range(N):  # POZ
            Zon_i = Zones[Units == i]
            n_i = len(Zon_i)
            if n_i >= 1:
                bb = model.addVars(range(n_i + 1), vtype=GRB.BINARY)
                model.addConstr(DeltaP[i] <= Zon_i[0, 0] * bb[0] +
                                (1 - bb[0]) * Pmax[i] - Pk[i])
                for j in np.arange(1, n_i):
                    model.addConstr(
                        DeltaP[i] >= Zon_i[j - 1, 1] * bb[j] - Pk[i])
                    model.addConstr(DeltaP[i] <= Zon_i[j, 0] * bb[j] +
                                    (1 - bb[j]) * Pmax[i] - Pk[i])
                model.addConstr(DeltaP[i] >= Zon_i[-1, 1] * bb[n_i] - Pk[i])
                model.addConstr(bb.sum() == 1)

        Grad = b + c * Pk * 2
        Hessian = 2 * c
        Lagr = sum(DeltaP[k] * DeltaP[k] * Hessian[k] for k in range(N))
        objec = sum(Grad[k] * DeltaP[k] for k in range(N)) + 0.5 * Lagr
        model.setObjective(objec)
        model.optimize()
        PPrev = Prev.copy()
        Prev = Pk.copy()
        for i in range(N):
            Pk[i] = Pk[i] + DeltaP[i].x
        stepsize = np.linalg.norm(Prev - Pk)

        if np.linalg.norm(PPrev - Pk) < tol:
            res = sum(Pk) - Pk @ B @ Pk - D
            resPrev = sum(Prev) - Prev @ B @ Prev - D
            if (abs(res) <= 1e-4 or abs(resPrev) <= 1e-4):
                if (resPrev == max(res, resPrev) and res <= 0):
                    Pk = Prev.copy()
                break

            (LowerB, UpperB) = InZone(Pk, Pmin, Pmax, Zones, Units)
            Limits = np.transpose(np.vstack((LowerB, UpperB)))
            sol = minimize(Objective,
                           Pk,
                           method='SLSQP',
                           jac=Gradient,
                           bounds=Limits,
                           constraints=const)
            Pk = sol.x
            obj = sum(a[k] + b[k] * Pk[k] + c[k] * Pk[k] * Pk[k]
                      for k in range(N))

            (LowerB1, UpperB1) = InZone(Prev, Pmin, Pmax, Zones, Units)
            obj1 = Obj[it - 1]
            if np.prod(LowerB == LowerB1) == 0:
                Limits = np.transpose(np.vstack((LowerB1, UpperB1)))
                sol1 = minimize(Objective,
                                Prev,
                                method='SLSQP',
                                jac=Gradient,
                                bounds=Limits,
                                constraints=const)
                Prev = sol1.x
                obj1 = sum(a[k] + b[k] * Prev[k] + c[k] * Prev[k] * Prev[k]
                           for k in range(N))
                if obj1 < obj:
                    Pk = Prev.copy()
            stepsize = -1

        Obj[it] = sum(a[k] + b[k] * Pk[k] + c[k] * Pk[k] * Pk[k]
                      for k in range(N))
        if ((it % 10) == 0):
            print(it, " of ", Maxiter)
        it = it + 1
    """Figures"""
    opt = Obj[it - 1] + 1e-6
    plt.figure()

    Pos = Obj[:it] - np.ones(it) * opt
    Neg = -Pos.copy()

    Pos = (Obj[:it] - np.ones(it) * opt > 0) * Pos
    Neg = (Obj[:it] - np.ones(it) * opt < 0) * Neg
    plt.plot(range(it), Pos, label='Positive Part ')
    plt.plot(range(it), Neg, label='Negative Part ')

    plt.xlabel('Iterations')
    plt.ylabel('$f_k-f*$')
    plt.title("Rate of convergence of eConstE ")
    plt.legend()
    plt.grid()

    return (LimE - cons_C(Pk), Obj[it - 1], Pk)
Ejemplo n.º 15
0
def SQP_MINLP(N, w_E, w_C, D):
    (Unused, Pmax, Pmin, a, b, c, alpha, beta, gamma, delta, eta, UR,
     DR) = load(N)
    B = loss(N)
    (Zones, Units) = zones(N)
    """Computing P0 without POZ"""
    bnds = np.transpose(np.vstack((Pmin, Pmax)))
    P0 = Pmin.copy()

    def objective(P):
        return (0)

    def Gradient(P):
        return (np.zeros(N))

    def Hessian(P):
        return (np.zeros([N, N]))

    def cons_f(P):
        PL = sum(
            sum(P[i] * P[j] * B[i, j] for j in range(N)) for i in range(N))
        sum_eq = sum(P) - PL - D
        return (sum_eq)

    def cons_J(P):
        Jac = np.ones(N) - 2 * P @ B
        return (Jac)

    if (N <= 10):
        const = [{'type': 'eq', 'fun': cons_f, 'jac': cons_J}]
        solution = minimize(objective,
                            P0,
                            method='SLSQP',
                            jac=Gradient,
                            bounds=bnds,
                            constraints=const)
    else:

        def cons_H(P, v):
            return (-2 * v * B)

        NL_const = NonlinearConstraint(cons_f, 0, 0, jac=cons_J, hess=cons_H)
        solution = minimize(objective,
                            P0,
                            method='trust-constr',
                            jac=Gradient,
                            hess=Hessian,
                            constraints=NL_const,
                            bounds=bnds)
    P0 = solution.x
    tol = 1e-2
    Maxiter = 100
    Obj = np.zeros(Maxiter)
    C = sum(a[k] + b[k] * P0[k] + c[k] * P0[k] * P0[k] for k in range(N))
    E = sum(alpha[k] + beta[k] * P0[k] + gamma[k] * P0[k] * P0[k] +
            eta[k] * np.exp(delta[k] * P0[k]) for k in range(N))
    Obj[0] = w_C * C + w_E * E
    Pk = P0.copy()
    Prev = P0.copy()
    it = 1
    stepsize = 1
    while (it < Maxiter and stepsize > tol):
        model = gp.Model('SQP Step, MIQP')
        model.setParam('OutputFlag', False)
        DeltaP = model.addVars(range(N), lb=Pmin - Pk, ub=Pmax - Pk)

        Surplus = sum(Pk) - Pk @ B @ Pk - D
        model.addConstr(Surplus + sum(DeltaP[k] * (1 - 2 * Pk @ B[k])
                                      for k in range(N)) >= 0)
        for i in range(N):  # POZ
            Zon_i = Zones[Units == i]
            n_i = len(Zon_i)
            if n_i >= 1:
                bb = model.addVars(range(n_i + 1), vtype=GRB.BINARY)
                model.addConstr(DeltaP[i] <= Zon_i[0, 0] * bb[0] +
                                (1 - bb[0]) * Pmax[i] - Pk[i])
                for j in np.arange(1, n_i):
                    model.addConstr(
                        DeltaP[i] >= Zon_i[j - 1, 1] * bb[j] - Pk[i])
                    model.addConstr(DeltaP[i] <= Zon_i[j, 0] * bb[j] +
                                    (1 - bb[j]) * Pmax[i] - Pk[i])
                model.addConstr(DeltaP[i] >= Zon_i[-1, 1] * bb[n_i] - Pk[i])
                model.addConstr(bb.sum() == 1)

        GradC = b + c * Pk * 2
        GradE = beta + gamma * Pk * 2 + delta * eta * np.exp(delta * Pk)
        Grad = w_C * GradC + w_E * GradE
        Hessian = w_C * 2 * c + w_E * (
            2 * gamma + delta * delta * eta * np.exp(delta * Pk))
        Lagr = sum(DeltaP[k] * DeltaP[k] * Hessian[k] for k in range(N))
        objec = sum(Grad[k] * DeltaP[k] for k in range(N)) + 0.5 * Lagr
        model.setObjective(objec)
        model.optimize()
        PPrev = Prev.copy()
        Prev = Pk.copy()
        for i in range(N):
            Pk[i] = Pk[i] + DeltaP[i].x

        stepsize = np.linalg.norm(Prev - Pk)

        if np.linalg.norm(PPrev -
                          Pk) < tol:  # The algorithm cycles between 2 zones
            res = sum(Pk) - Pk @ B @ Pk - D
            resPrev = sum(Prev) - Prev @ B @ Prev - D
            if (resPrev == max(res, resPrev) and res <= 0):
                Pk = Prev.copy()

            (opt, p) = Solve(N, w_E, w_C, D)
            stepsize = -1

        C = sum(a[k] + b[k] * Pk[k] + c[k] * Pk[k] * Pk[k] for k in range(N))
        E = sum(alpha[k] + beta[k] * Pk[k] + gamma[k] * Pk[k] * Pk[k] +
                eta[k] * np.exp(delta[k] * Pk[k]) for k in range(N))
        Obj[it] = w_C * C + w_E * E
        if ((it % 10) == 0):
            print(it, " of ", Maxiter)
        it = it + 1
    return (E, C, Pk)
Ejemplo n.º 16
0
def eConstF_SQP(N, LimF, D):
    (Unused, Pmax, Pmin, a, b, c, alpha, beta, gamma, delta, eta, UR,
     DR) = load(N)
    B = loss(N)
    (Zones, Units) = zones(N)
    """Computing P0 without POZ"""
    bnds = np.transpose(np.vstack((Pmin, Pmax)))

    def P0_Obj(P):
        return (0)

    def P0_Grad(P):
        return (np.zeros(N))

    def Objective(P):
        Obj = sum(alpha[i] + beta[i] * P[i] + gamma[i] * P[i] * P[i] +
                  eta[i] * np.exp(P[i] * delta[i]) for i in range(N))
        return (Obj)

    def Gradient(P):
        Grad = beta + 2 * gamma * P + delta * eta * np.exp(delta * P)
        return (Grad)

    def cons_f(P):
        PL = sum(
            sum(P[i] * P[j] * B[i, j] for j in range(N)) for i in range(N))
        sum_eq = sum(P) - PL - D
        return (sum_eq)

    def cons_J(P):
        Jac = np.ones(N) - 2 * P @ B
        return (Jac)

    def cons_C(P):
        constraint = LimF - sum(a[k] + b[k] * P[k] + c[k] * P[k] * P[k]
                                for k in range(N))
        return (constraint)

    def cons_GradC(P):
        Grad = -b - 2 * c * P
        return (Grad)

    const = [{'type': 'eq', 'fun': cons_f}, {'type': 'ineq', 'fun': cons_C}]
    solution = minimize(P0_Obj,
                        Pmin,
                        method='SLSQP',
                        jac=P0_Grad,
                        bounds=bnds,
                        constraints=const)
    P0 = solution.x
    tol = 1e-2
    Maxiter = 100
    Obj = np.zeros(Maxiter)
    Obj[0] = sum(alpha[k] + beta[k] * P0[k] + gamma[k] * P0[k] * P0[k] +
                 eta[k] * np.exp(delta[k] * P0[k]) for k in range(N))

    Pk = P0.copy()
    Prev = P0.copy()
    it = 1
    stepsize = 1
    while (it < Maxiter and stepsize > tol):
        model = gp.Model('SQP Step, MILP')
        model.setParam('OutputFlag', False)
        DeltaP = model.addVars(range(N), lb=Pmin - Pk, ub=Pmax - Pk)
        Surplus = sum(Pk) - Pk @ B @ Pk - D
        model.addConstr(Surplus + sum(DeltaP[k] * (1 - 2 * Pk @ B[k])
                                      for k in range(N)) >= 0)

        model.addQConstr(LimF - sum(a[k] + b[k] * (Pk[k] + DeltaP[k]) + c[k] *
                                    (Pk[k] + DeltaP[k]) * (Pk[k] + DeltaP[k])
                                    for k in range(N)) >= 0)

        for i in range(N):  # POZ
            Zon_i = Zones[Units == i]
            n_i = len(Zon_i)
            if n_i >= 1:
                bb = model.addVars(range(n_i + 1), vtype=GRB.BINARY)
                model.addConstr(DeltaP[i] <= Zon_i[0, 0] * bb[0] +
                                (1 - bb[0]) * Pmax[i] - Pk[i])
                for j in np.arange(1, n_i):
                    model.addConstr(
                        DeltaP[i] >= Zon_i[j - 1, 1] * bb[j] - Pk[i])
                    model.addConstr(DeltaP[i] <= Zon_i[j, 0] * bb[j] +
                                    (1 - bb[j]) * Pmax[i] - Pk[i])
                model.addConstr(DeltaP[i] >= Zon_i[-1, 1] * bb[n_i] - Pk[i])
                model.addConstr(bb.sum() == 1)

        Grad = beta + gamma * Pk * 2 + delta * eta * np.exp(delta * Pk)
        Hessian = 2 * gamma + delta * delta * eta * np.exp(delta * Pk)
        Lagr = sum(DeltaP[k] * DeltaP[k] * Hessian[k] for k in range(N))
        objec = sum(Grad[k] * DeltaP[k] for k in range(N)) + 0.5 * Lagr
        model.setObjective(objec)
        model.optimize()
        PPrev = Prev.copy()
        Prev = Pk.copy()

        for i in range(N):
            Pk[i] = Pk[i] + DeltaP[i].x
        stepsize = np.linalg.norm(Prev - Pk)

        if np.linalg.norm(PPrev -
                          Pk) < tol:  # The algorithm cycles between 2 zones
            res = sum(Pk) - Pk @ B @ Pk - D
            resPrev = sum(Prev) - Prev @ B @ Prev - D
            if (abs(res) <= 1e-4 or abs(resPrev) <= 1e-4):
                if (resPrev == max(res, resPrev) and res <= 0):
                    Pk = Prev.copy()
                break
            # Compares the best solution of the 2 zones
            (LowerB, UpperB) = InZone(Pk, Pmin, Pmax, Zones,
                                      Units)  #Current zone
            Limits = np.transpose(np.vstack((LowerB, UpperB)))
            sol = minimize(Objective,
                           Pk,
                           method='SLSQP',
                           jac=Gradient,
                           bounds=Limits,
                           constraints=const)
            Pk = sol.x
            obj = sum(alpha[k] + beta[k] * Pk[k] + gamma[k] * Pk[k] * Pk[k] +
                      eta[k] * np.exp(delta[k] * Pk[k]) for k in range(N))

            (LowerB1, UpperB1) = InZone(Prev, Pmin, Pmax, Zones,
                                        Units)  #Previous zone
            obj1 = Obj[it - 1]
            if np.prod(LowerB == LowerB1) == 0:  #Different zone
                Limits = np.transpose(np.vstack((LowerB1, UpperB1)))
                sol1 = minimize(Objective,
                                Prev,
                                method='SLSQP',
                                jac=Gradient,
                                bounds=Limits,
                                constraints=const)
                Prev = sol1.x
                obj1 = sum(alpha[k] + beta[k] * Prev[k] +
                           gamma[k] * Prev[k] * Prev[k] +
                           eta[k] * np.exp(delta[k] * Prev[k])
                           for k in range(N))
                if obj1 < obj:
                    Pk = Prev.copy()
            stepsize = -1

        Obj[it] = sum(alpha[k] + beta[k] * Pk[k] + gamma[k] * Pk[k] * Pk[k] +
                      eta[k] * np.exp(delta[k] * Pk[k]) for k in range(N))
        if ((it % 10) == 0):
            print(it, " of ", Maxiter)
        it = it + 1
    return (Obj[it - 1], LimF - cons_C(Pk), Pk)