Exemple #1
0
def mse(x, xhat):
    # TODO check hypotheses in more-1993 (10.1080/10556789308805542)
    # and work out analytically
    a = x.real()
    b = x.imag()

    c = xhat.real()
    d = xhat.imag()

    k = (a.T * a + b.T * b)[0]
    assert (k > 0.0)
    f = zeros(2, 1)
    f[0] = -2 * (a.T * c + b.T * d)
    f[1] = -2 * (-b.T * c + a.T * d)

    def fw(l):
        w = [0.0, 0.0]
        w[0] = -0.5 * f[0] / (k + l)
        w[1] = -0.5 * f[1] / (k + l)
        return w

    def fphi(l):
        w = fw(l)
        return w[0]**2 + w[1]**2 - 1

    lopt = fsolve(fphi, -k + 1e-7)
    wopt = fw(lopt)
    copt = matrix(wopt[0][0] + 1j * wopt[1][0])
    msev = (blas.nrm2(copt * x - xhat)**2) / (blas.nrm2(x)**2)
    return copt[0], msev
Exemple #2
0
def mse(x, xhat):
    # TODO check hypotheses in more-1993 (10.1080/10556789308805542)
    # and work out analytically
    a = x.real()
    b = x.imag()

    c = xhat.real()
    d = xhat.imag()

    k = (a.T*a + b.T*b)[0]
    assert(k > 0.0)
    f = zeros(2, 1)
    f[0] = -2*(a.T*c + b.T*d)
    f[1] = -2*(-b.T*c + a.T*d)

    def fw(l):
        w = [0.0, 0.0]
        w[0] = -0.5*f[0]/(k + l)
        w[1] = -0.5*f[1]/(k + l)
        return w

    def fphi(l):
        w = fw(l)
        return w[0]**2 + w[1]**2 - 1

    lopt = fsolve(fphi, -k + 1e-7)
    wopt = fw(lopt)
    copt = matrix(wopt[0][0] + 1j*wopt[1][0])
    msev = (blas.nrm2(copt*x - xhat)**2)/(blas.nrm2(x)**2)
    return copt[0], msev
Exemple #3
0
def checksol(sol, A, B, C = None, d = None, G = None, h = None): 
    """
    Check optimality conditions

        C * x  + G' * z + A'(Z) + d = 0  
        G * x <= h 
        z >= 0,  || Z || < = 1
        z' * (h - G*x) = 0
        tr (Z' * (A(x) + B)) = || A(x) + B ||_*.

    """

    p, q = B.size
    n = A.size[1]
    if G is None: G = spmatrix([], [], [], (0, n))
    if h is None: h = matrix(0.0, (0, 1))
    m = h.size[0]
    if C is None: C = spmatrix(0.0, [], [], (n,n))
    if d is None: d = matrix(0.0, (n, 1))

    if sol['status'] is 'optimal':

        res = +d
        base.symv(C, sol['x'], res, beta = 1.0)
        base.gemv(G, sol['z'], res, beta = 1.0, trans = 'T')
        base.gemv(A, sol['Z'], res, beta = 1.0, trans = 'T')
        print "Dual residual: %e" %blas.nrm2(res)

        if m:
           print "Minimum primal slack (scalar inequalities): %e" \
               %min(h - G*sol['x'])
           print "Minimum dual slack (scalar inequalities): %e" \
               %min(sol['z'])

        if p:
            s = matrix(0.0, (p,1))
            X = matrix(A*sol['x'], (p, q)) + B
            lapack.gesvd(+X, s)
            nrmX = sum(s)
            lapack.gesvd(+sol['Z'], s)
            nrmZ = max(s)
            print "Norm of Z: %e" %nrmZ
            print "Nuclear norm of A(x) + B: %e" %nrmX
            print "Inner product of Z and A(x) + B: %e" \
                %blas.dot(sol['Z'], X)
        
    elif sol['status'] is 'primal infeasible':

        res = matrix(0.0, (n,1))
        base.gemv(G, sol['z'], res, beta = 1.0, trans = 'T')
        print "Dual residual: %e" %blas.nrm2(res)
        print "h' * z = %e" %blas.dot(h, sol['z'])
        print "Minimum dual slack (scalar inequalities): %e" \
            %min(sol['z'])


    else:
        pass
Exemple #4
0
def checksol(sol, A, B, C=None, d=None, G=None, h=None):
    """
    Check optimality conditions

        C * x  + G' * z + A'(Z) + d = 0  
        G * x <= h 
        z >= 0,  || Z || < = 1
        z' * (h - G*x) = 0
        tr (Z' * (A(x) + B)) = || A(x) + B ||_*.

    """

    p, q = B.size
    n = A.size[1]
    if G is None: G = spmatrix([], [], [], (0, n))
    if h is None: h = matrix(0.0, (0, 1))
    m = h.size[0]
    if C is None: C = spmatrix(0.0, [], [], (n, n))
    if d is None: d = matrix(0.0, (n, 1))

    if sol['status'] is 'optimal':

        res = +d
        base.symv(C, sol['x'], res, beta=1.0)
        base.gemv(G, sol['z'], res, beta=1.0, trans='T')
        base.gemv(A, sol['Z'], res, beta=1.0, trans='T')
        print "Dual residual: %e" % blas.nrm2(res)

        if m:
            print "Minimum primal slack (scalar inequalities): %e" \
                %min(h - G*sol['x'])
            print "Minimum dual slack (scalar inequalities): %e" \
                %min(sol['z'])

        if p:
            s = matrix(0.0, (p, 1))
            X = matrix(A * sol['x'], (p, q)) + B
            lapack.gesvd(+X, s)
            nrmX = sum(s)
            lapack.gesvd(+sol['Z'], s)
            nrmZ = max(s)
            print "Norm of Z: %e" % nrmZ
            print "Nuclear norm of A(x) + B: %e" % nrmX
            print "Inner product of Z and A(x) + B: %e" \
                %blas.dot(sol['Z'], X)

    elif sol['status'] is 'primal infeasible':

        res = matrix(0.0, (n, 1))
        base.gemv(G, sol['z'], res, beta=1.0, trans='T')
        print "Dual residual: %e" % blas.nrm2(res)
        print "h' * z = %e" % blas.dot(h, sol['z'])
        print "Minimum dual slack (scalar inequalities): %e" \
            %min(sol['z'])

    else:
        pass
Exemple #5
0
    def callback(self, xk=None, every=10):
        'Callback function after each iteration of minres'
        self.iter += 1

        # Access current iteration from lin_solver
        xy = numpy_to_cvxopt_matrix(xk)

        # Set x[I], y, and czl czl(if nonempty)
        nI = len(self.PDAS.I)
        ny = self.PDAS.QP.numeq
        ncL = len(self.PDAS.cAL)
        ncU = len(self.PDAS.cAU)
        self.PDAS.x[self.PDAS.realI + self.PDAS.F] = xy[:nI]
        self.PDAS.y = xy[nI:]
        if self.PDAS.czl.size[0] > 0:
            self.PDAS.czl[self.PDAS.cAL] = xy[nI + ny:nI + ny + ncL]
            self.PDAS.czu[self.PDAS.cAU] = xy[nI + ny + ncL:]

        # Set residuals, and inv_norm estimate
        self.PDAS.CG_r = self.Lhs * xy - self.rhs

        # If iter not modulo of 'every', do nothing
        if self.iter % every != 0:
            return

        if self.correct_inv:
            self.inv_norm = min(
                1.0e+8,
                max(nrm2(xy) / nrm2(self.rhs + self.PDAS.CG_r), self.inv_norm))

        # Compute tilde v
        Qinvv = self.PDAS.CG_r[len(self.PDAS.realI):]
        rI = self.PDAS.CG_r[:len(self.PDAS.realI)]

        lapack.sytrs(self.PDAS.Q, self.PDAS.ipiv, Qinvv)

        viration = nrm2(rI - self.SI.T * Qinvv) * self.PDAS.inv_norm * matrix(
            1.0, (len(self.PDAS.realI), 1))

        self.err_lb = -viration
        self.err_ub = viration

        # Update other variables
        self.PDAS._back_substitute()

        # Caveat z_A shifted
        self.PDAS.zu[
            self.PDAS.AU] = self.PDAS.zu[self.PDAS.AU] + self.SA.T * Qinvv

        self.PDAS.identify_violation_inexact_c(self.err_lb, self.err_ub,
                                               self.SaQinvSi)
        frame = inspect.currentframe().f_back
        # self.iter = frame.f_locals['itn']

        # If condition satisfied, terminate the linear equation solve
        if self.PDAS.ask('conditioner') is True:
            set_in_frame(inspect.currentframe().f_back, 'istop', 9)
    def F(x=None, z=None):
        if x is None:
            return 0, matrix(0.0, (n, 1))
        if max(abs(x)) >= 1.0:
            return None
        r = -b
        blas.gemv(A, x, r, beta=-1.0)
        w = x**2
        f = 0.5 * blas.nrm2(r)**2 - sum(log(1 - w))
        gradf = div(x, 1.0 - w)
        blas.gemv(A, r, gradf, trans='T', beta=2.0)
        if z is None:
            return f, gradf.T
        else:

            def Hf(u, v, alpha=1.0, beta=0.0):
                """
                   v := alpha * (A'*A*u + 2*((1+w)./(1-w)).*u + beta *v
               """
                v *= beta
                v += 2.0 * alpha * mul(div(1.0 + w, (1.0 - w)**2), u)
                blas.gemv(A, u, r)
                blas.gemv(A, r, v, alpha=alpha, beta=1.0, trans='T')

            return f, gradf.T, Hf
Exemple #7
0
def pcg0(H,c,A,b,x0,fA=None,callback=None):
    '''
    Projected CG method to solve the problem: {min 1/2x'Hx + c'x | Ax = b}.
    Initial point x0 must safisty Ax0 = b. Unstable version, not recommended.
    '''
    # Initialize some variables
    r = H*x0 + c
    r = project(A,r)
    g = project(A,r)
    p = -copy(g)
    x = copy(x0)

    while True:
        alpha = dotu(r,g)/dotu(p,H*p)
        x = x+ alpha*p
        r2 = r + alpha*H*p
        g2 = project(A,r2)
        # Do iterative refinement
        # for i in range(5000):
        #     g2 = project(A,g2)
        beta = dotu(r2,g2)/dotu(r,g)
        p = -g2 + beta*p
        g = copy(g2)
        r = copy(r2)
        if nrm2(r) < 1e-16:
            break
    return x
Exemple #8
0
def F2(x=None, z=None):
    if x is None: return 0, matrix(0.0, (n, 1))
    f = blas.nrm2(b - A * x)**2
    df = 2.0 * (A.T * -b).T
    if z is None: return f, df
    H = z[0] * 2.0 * A.T * A
    return f, df, H
Exemple #9
0
def pcg0(H, c, A, b, x0, fA=None, callback=None):
    '''
    Projected CG method to solve the problem: {min 1/2x'Hx + c'x | Ax = b}.
    Initial point x0 must safisty Ax0 = b. Unstable version, not recommended.
    '''
    # Initialize some variables
    r = H * x0 + c
    r = project(A, r)
    g = project(A, r)
    p = -copy(g)
    x = copy(x0)

    while True:
        alpha = dotu(r, g) / dotu(p, H * p)
        x = x + alpha * p
        r2 = r + alpha * H * p
        g2 = project(A, r2)
        # Do iterative refinement
        # for i in range(5000):
        #     g2 = project(A,g2)
        beta = dotu(r2, g2) / dotu(r, g)
        p = -g2 + beta * p
        g = copy(g2)
        r = copy(r2)
        if nrm2(r) < 1e-16:
            break
    return x
Exemple #10
0
    def _gen_bandsdp(self,n,m,bw,seed):
        """Random data generator for SDP with band structure"""
        setseed(seed)

        I = matrix([ i for j in range(n) for i in range(j,min(j+bw+1,n))])
        J = matrix([ j for j in range(n) for i in range(j,min(j+bw+1,n))])
        V = spmatrix(0.,I,J,(n,n))
        Il = misc.sub2ind((n,n),I,J)
        Id = matrix([i for i in range(len(Il)) if I[i]==J[i]])

        # generate random y with norm 1
        y0 = normal(m,1)
        y0 /= blas.nrm2(y0)

        # generate random S0
        S0 = mk_rand(V,cone='posdef',seed=seed)
        X0 = mk_rand(V,cone='completable',seed=seed)

        # generate random A1,...,Am and set A0 = sum Ai*yi + S0
        A_ = normal(len(I),m+1,std=1./len(I))
        u = +S0.V
        blas.gemv(A_[:,1:],y0,u,beta=1.0)
        A_[:,0] = u
        # compute b
        x = +X0.V
        x[Id] *= 0.5
        self._b = matrix(0.,(m,1))
        blas.gemv(A_[:,1:],x,self._b,trans='T',alpha=2.0)
        # form A
        self._A = spmatrix(A_[:],
                     [i for j in range(m+1) for i in Il],
                     [j for j in range(m+1) for i in Il],(n**2,m+1))

        self._X0 = X0; self._y0 = y0; self._S0 = S0
Exemple #11
0
def F2(x=None, z=None):
    if x is None: return 0, matrix(0.0,(n,1))
    f = blas.nrm2(b - A*x)**2
    df = 2.0 * (A.T * -b).T
    if z is None: return f, df
    H = z[0] * 2.0 * A.T * A
    return f, df, H
Exemple #12
0
def F1(x=None, z=None):
    if x is None: return 0, matrix(0.0, (n, 1))
    f = blas.nrm2(b - A * x)**2
    # df = (A.T * -b).T
    df = (2.0 * A.T * (A * x - b)).T
    # df = (2 * A.T * A * x - 2 * A.T * b).T
    if z is None: return f, df
    H = z[0] * 2.0 * A.T * A
    return f, df, H
Exemple #13
0
def F1(x=None, z=None):
    if x is None: return 0, matrix(0.0,(n,1))
    f = blas.nrm2(b - A*x)**2
    # df = (A.T * -b).T
    df = (2.0 * A.T * (A*x - b)).T
    # df = (2 * A.T * A * x - 2 * A.T * b).T
    if z is None: return f, df
    H = z[0] * 2.0 * A.T * A
    return f, df, H
Exemple #14
0
    def _generate_bounds(self, B=None):
        'A general obtain bounds from an estimate of norm(invHii): B'
        if B is None or self.PDAS.inv_norm is None:
            B = 1.1 * self.linsol.inv_norm

        viration = nrm2(self.linsol.r) * B * matrix(1.0, self.linsol.r0.size)

        self.solution_dist_lb = -viration
        self.solution_dist_ub = viration
Exemple #15
0
    def callback(self, xk=None):
        'Callback function after each iteration of minres'

        # Access current iteration from lin_solver

        xy = numpy_to_cvxopt_matrix(xk)

        # Set x[I], y, and czl czl(if nonempty)
        nI = len(self.PDAS.I)
        ny = self.PDAS.QP.numeq
        ncL = len(self.PDAS.cAL)
        ncU = len(self.PDAS.cAU)
        self.PDAS.x[self.PDAS.I] = xy[0:nI]
        self.PDAS.y = xy[nI:nI + ny]
        if self.PDAS.czl.size[0] > 0:
            self.PDAS.czl[self.PDAS.cAL] = xy[nI + ny:nI + ny + ncL]
            self.PDAS.czu[self.PDAS.cAU] = xy[nI + ny + ncL:]

        # Set residuals, and inv_norm estimate
        self.PDAS.CG_r = self.Lhs * xy - self.rhs
        self.inv_norm = max(
            nrm2(xy) / nrm2(self.rhs + self.PDAS.CG_r), self.inv_norm)

        # Obtain bounds from an estimate of norm(invHii): B
        # if self.PDAS.inv_norm is None or len(self.PDAS._ObserverList['monitor']) < 1:
        #     print 'dynamic'
        #     self.PDAS.inv_norm = max(self.PDAS.inv_norm, 1.1*self.inv_norm)

        B = self.PDAS.inv_norm

        viration = nrm2(self.PDAS.CG_r) * B * matrix(1.0, self.r0.size)

        self.err_lb = -viration
        self.err_ub = viration

        # Update other variables
        self.PDAS._back_substitute()
        self.PDAS.identify_violation_inexact(self.err_lb, self.err_ub)
        frame = inspect.currentframe().f_back
        self.iter = frame.f_locals['itn']

        # If condition satisfied, terminate the linear equation solve
        if self.PDAS.ask('conditioner') is True:
            set_in_frame(inspect.currentframe().f_back, 'istop', 9)
Exemple #16
0
def poweriter(H, x=None, maxit=1000, tol=1.0e-2):
    'Function to estimate the spectral norm of a matrix.'
    n = H.size[1]
    if x is None:
        x = matrix(1.0, (n, 1)) / sqrt(n)

    vec = H * x
    norm = dot(x, vec)
    x = vec / nrm2(vec)

    it = 0
    for it in range(maxit):
        norm_pre = copy(norm)
        vec = H * x
        x = vec / nrm2(vec)
        norm = dot(x, vec)
        if abs(norm - norm_pre) < tol:
            break

    return (norm, it, x)
Exemple #17
0
def F2(x=None, z=None):
    if x is None: return 0, matrix(0.0,(p,1))
    # H = matrix(rosen_hess(numpy.array(x).ravel()))
    # H = matrix(rosen_hess(theta))
    H = matrix(numpy.eye(p))
    # g = matrix(rosen_der(numpy.array(x).ravel()))
    g = matrix(rosen_der(theta))
    f = blas.nrm2(H*x+g)**2
    df = 2.0*(H.T * (H*x+g)).T
    if z is None: return f, df
    H = z[0] * 2.0 * H.T * H
    return f, df, H
Exemple #18
0
    def _gen_randsdp(self,V,m,d,seed):
        """
        Random data generator
        """
        setseed(seed)
        n = self._n
        V = chompack.tril(V)
        N = len(V)
        I = V.I; J = V.J
        Il = misc.sub2ind((n,n),I,J)
        Id = matrix([i for i in range(len(Il)) if I[i]==J[i]])

        # generate random y with norm 1
        y0 = normal(m,1)
        y0 /= blas.nrm2(y0)

        # generate random S0, X0
        S0 = mk_rand(V,cone='posdef',seed=seed)
        X0 = mk_rand(V,cone='completable',seed=seed)

        # generate random A1,...,Am
        if type(d) is float:
            nz = min(max(1, int(round(d*N))), N)
            A = sparse([[spmatrix(normal(N,1),Il,[0 for i in range(N)],(n**2,1))],
                        [spmatrix(normal(nz*m,1),
                                  [i for j in range(m) for i in random.sample(Il,nz)],
                                  [j for j in range(m) for i in range(nz)],(n**2,m))]])
        elif type(d) is list:
            if len(d) == m:
                nz = [min(max(1, int(round(v*N))), N) for v in d]
                nnz = sum(nz)
                A = sparse([[spmatrix(normal(N,1),Il,[0 for i in range(N)],(n**2,1))],
                            [spmatrix(normal(nnz,1),
                                      [i for j in range(m) for i in random.sample(Il,nz[j])],
                                      [j for j in range(m) for i in range(nz[j])],(n**2,m))]])
        else: raise TypeError

        # compute A0
        u = +S0.V
        for k in range(m):
            base.gemv(A[:,k+1][Il],matrix(y0[k]),u,beta=1.0,trans='N')
        A[Il,0] = u
        self._A = A

        # compute b
        X0[Il[Id]] *= 0.5
        self._b = matrix(0.,(m,1))
        u = matrix(0.)
        for k in range(m):
            base.gemv(A[:,k+1][Il],X0.V,u,trans='T',alpha=2.0)
            self._b[k] = u
Exemple #19
0
def cvx():
    # Figures 6.8-10, pages 313-314
    # Quadratic smoothing.

    from math import pi
    from cvxopt import blas, lapack, matrix, sin, mul, normal

    n = 4000
    t = matrix(list(range(n)), tc='d')
    ex = 0.5 * mul(sin(2 * pi / n * t), sin(0.01 * t))
    corr = ex + 0.05 * normal(n, 1)

    # A = D'*D is an n by n tridiagonal matrix with -1.0 on the
    # upper/lower diagonal and 1, 2, 2, ..., 2, 2, 1 on the diagonal.
    Ad = matrix([1.0] + (n - 2) * [2.0] + [1.0])
    As = matrix(-1.0, (n - 1, 1))

    nopts = 50
    deltas = -10.0 + 20.0 / (nopts - 1) * matrix(list(range(nopts)))
    cost1, cost2 = [], []
    for delta in deltas:
        xr = +corr
        lapack.ptsv(1.0 + 10 ** delta * Ad, 10 ** delta * As, xr)
        cost1 += [blas.nrm2(xr - corr)]
        cost2 += [blas.nrm2(xr[1:] - xr[:-1])]

    # Find solutions with ||xhat - xcorr || roughly equal to 8.0, 3.1, 1.0.
    time.sleep(1)
    mv1, k1 = min(zip([abs(c - 8.0) for c in cost1], range(nopts)))
    xr1 = +corr
    lapack.ptsv(1.0 + 10 ** deltas[k1] * Ad, 10 ** deltas[k1] * As, xr1)
    mv2, k2 = min(zip([abs(c - 3.1) for c in cost1], range(nopts)))
    xr2 = +corr
    lapack.ptsv(1.0 + 10 ** deltas[k2] * Ad, 10 ** deltas[k2] * As, xr2)
    mv3, k3 = min(zip([abs(c - 1.0) for c in cost1], range(nopts)))
    xr3 = +corr
    lapack.ptsv(1.0 + 10 ** deltas[k3] * Ad, 10 ** deltas[k3] * As, xr3)
Exemple #20
0
def _findChebyshevCenter(G, h, full_output=False):
    # return the binding constraints
    bindingIndex, dualHull, G, h, x0 = bindingConstraint(G, h, None, True)
    # Chebyshev center
    R = variable()
    xc = variable(2)
    m = len(h)
    op(-R, [G[k, :] * xc + R * blas.nrm2(G[k, :]) <= h[k]
            for k in range(m)] + [R >= 0]).solve()
    R = R.value
    xc = xc.value

    if full_output:
        return numpy.array(xc).flatten(), G, h
    else:
        return numpy.array(xc).flatten()
Exemple #21
0
def _conelp_scale(P, inplace = True, **kwargs):
    """
    Scale cone LP
    """
    inplace = kwargs.get('inplace', True)
    c,G,h,dims = P.problem_data
    cp = G.CCS[0]
    V = abs(G.V)
    u = matrix([max(abs(V[cp[i]:cp[i+1]])) if cp[i+1]-cp[i]>0 else 1.0 for i in range(len(cp)-1)])
    u = max(u,abs(c))
    G = G*spmatrix(div(1.0,u),range(len(u)),range(len(u)))
    c = div(c,u)
    nrm2h = blas.nrm2(h)
    if inplace: P.cost_scale *= max(1.0,nrm2h)
    if nrm2h > 1.0:  h /= nrm2h
    if inplace: P.problem_data = (c,G,h,dims)
    return c,G,h,dims
Exemple #22
0
def cg(A, b, x0=None, eps=1e-8):
    """ Conjugate gradient solve Ax = b """
    if not x0: x0 = matrix(0.0, b.size)
    r = b - A * x0
    w = -r
    z = A * w
    a = (r.T * w) / (w.T * z)
    x = x0 + a * w
    for i in range(A.size[0]):
        r = r - a * z
        if blas.nrm2(r) < eps:
            break
        B = (r.T * z) / (w.T * z)
        w = -r + B*w
        z = A * w
        a = (r.T * w) / (w.T * z)
        x = x + a * w
    ITERS.append(i)
    return x
Exemple #23
0
    def _gen_bandsdp(self, n, m, bw, seed):
        """Random data generator for SDP with band structure"""
        setseed(seed)

        I = matrix(
            [i for j in xrange(n) for i in xrange(j, min(j + bw + 1, n))])
        J = matrix(
            [j for j in xrange(n) for i in xrange(j, min(j + bw + 1, n))])
        V = spmatrix(0., I, J, (n, n))
        Il = misc.sub2ind((n, n), I, J)
        Id = matrix([i for i in xrange(len(Il)) if I[i] == J[i]])

        # generate random y with norm 1
        y0 = normal(m, 1)
        y0 /= blas.nrm2(y0)

        # generate random S0
        S0 = mk_rand(V, cone='posdef', seed=seed)
        X0 = mk_rand(V, cone='completable', seed=seed)

        # generate random A1,...,Am and set A0 = sum Ai*yi + S0
        A_ = normal(len(I), m + 1, std=1. / len(I))
        u = +S0.V
        blas.gemv(A_[:, 1:], y0, u, beta=1.0)
        A_[:, 0] = u
        # compute b
        x = +X0.V
        x[Id] *= 0.5
        self._b = matrix(0., (m, 1))
        blas.gemv(A_[:, 1:], x, self._b, trans='T', alpha=2.0)
        # form A
        self._A = spmatrix(A_[:], [i for j in xrange(m + 1) for i in Il],
                           [j for j in xrange(m + 1) for i in Il],
                           (n**2, m + 1))

        self._X0 = X0
        self._y0 = y0
        self._S0 = S0
Exemple #24
0
 def F(x = None, z = None):
     if x is None: 
         return 0, matrix(0.0, (n,1))
     if max(abs(x)) >= 1.0: 
         return None 
     r = - b
     blas.gemv(A, x, r, beta = -1.0)
     w = x**2
     f = 0.5 * blas.nrm2(r)**2  - sum(log(1-w))
     gradf = div(x, 1.0 - w)
     blas.gemv(A, r, gradf, trans = 'T', beta = 2.0)
     if z is None:
         return f, gradf.T
     else:
         def Hf(u, v, alpha = 1.0, beta = 0.0):
            """
                v := alpha * (A'*A*u + 2*((1+w)./(1-w)).*u + beta *v
            """
            v *= beta
            v += 2.0 * alpha * mul(div(1.0+w, (1.0-w)**2), u)
            blas.gemv(A, u, r)
            blas.gemv(A, r, v, alpha = alpha, beta = 1.0, trans = 'T')
         return f, gradf.T, Hf
Exemple #25
0
                             axis=0))

hTemp1 = matrix(numpy.append(hTemp,hTemp1))
dims1 = {'l': G.shape[0], 'q': [n+1,n+1], 's': []}

solSOCP = solvers.conelp(c, GTemp1, hTemp1, dims1)

print solSOCP['x'][1::]

## testing the change in objective function

print F(matrix(theta))[0]
print F(matrix(theta) + solSOCP['x'][1::])[0]
print F(matrix(theta) + sol1['x'])[0]

blas.nrm2(solSOCP['x'][1::])
blas.nrm2(sol1['x'])
blas.nrm2(qpOut['x'])

blas.nrm2(hTemp1[-n::] - matrix(GTemp)[-n::,:] * solSOCP['x'][1::] )

blas.nrm2(hTemp[1::] - GTemp[1::,1::] * sol1['x'] )

# now an socp

print sol1['x']
print sol2['x']
print qpOut['x']
print solSOCP['x'][1::]
## sqp
Exemple #26
0
#     minimize    t + u*alpha
#     subject to  [u*I, 0; 0, t] - [A, b]'*[A, b] >= 0.
#
# Two variables (t, u).

G = matrix(0.0, ((n+1)**2, 2)) 
G[-1, 0] = -1.0    # coefficient of t
G[: (n+1)**2-1 : n+2, 1] = -1.0    # coefficient of u
h = matrix( [ [ A.T * A,  b.T * A ], [ A.T * b, b.T * b ] ] ) 
c = matrix(1.0, (2,1))

nopts = 40
alpha1 = [2.0/(nopts//2-1) * alpha for alpha in range(nopts//2) ] + \
    [ 2.0 + (15.0 - 2.0)/(nopts//2) * alpha for alpha in 
        range(1,nopts//2+1) ]
lbnds = [ blas.nrm2(b)**2 ]
for alpha in alpha1[1:]:  
    c[1:] = alpha
    lbnds += [ -blas.dot(c, solvers.sdp(c, Gs=[G], hs=[h])['x']) ]

nopts = 10
alpha2 = [ 1.0/(nopts-1) * alpha for alpha in range(nopts) ] 
ubnds = [ blas.nrm2(b)**2 ]
for alpha in alpha2[1:]:  
    c[1:] = alpha
    ubnds += [ blas.dot(c, solvers.sdp(c, Gs=[G], hs=[-h])['x']) ]

try: import pylab
except ImportError: pass
else:
    pylab.figure(1, facecolor='w')
Exemple #27
0
def sysid(y, u, vsig, svth=None):
    """
    System identification using the subspace method and nuclear norm 
    optimization.  Estimate a linear time-invariant state-space model 
    given inputs and outputs.  The algorithm is described in [1].
    

    INPUT
    y       'd' matrix of size (p, N).  y are the measured outputs, p is 
            the number of outputs, and N is the number of data points 
            measured. 
    
    u       'd' matrix of size (m, N).  u are the inputs, m is the number 
            of inputs, and N is the number of data points.
    
    vsig    a weighting parameter in the nuclear norm optimization, its 
            value is approximately the 1-sigma output noise level
    
    svth    an optional parameter, if specified, the model order is 
            determined as the number of singular values greater than svth 
            times the maximum singular value.  The default value is 1E-3 
    
    OUTPUT
    sol     a dictionary with the following words
            -- 'A', 'B', 'C', 'D' are the state-space matrices
            -- 'svN', the original singular values of the Hankel matrix
            -- 'sv', the optimized singular values of the Hankel matrix
            -- 'x0', the initial state x(0)
            -- 'n', the model order

    [1] Zhang Liu and Lieven Vandenberghe. "Interior-point method for 
        nuclear norm approximation with application to system 
        identification."  

    """

    m, N, p = u.size[0], u.size[1], y.size[0]
    if y.size[1] != N:
        raise ValueError, "y and u must have the same length"

    # Y = G*X + H*U + V, Y has size a x b, U has size c x b, Un has b x d
    r = min(int(30 / p), int((N + 1.0) / (p + m + 1) + 1.0))
    a = r * p
    c = r * m
    b = N - r + 1
    d = b - c

    # construct Hankel matrix Y
    Y = Hankel(y, r, b, p=p, q=1)

    # construct Hankel matrix U
    U = Hankel(u, r, b, p=m, q=1)

    # compute Un = null(U) and YUn = Y*Un
    Vt = matrix(0.0, (b, b))
    Stemp = matrix(0.0, (c, 1))
    Un = matrix(0.0, (b, d))
    YUn = matrix(0.0, (a, d))
    lapack.gesvd(U, Stemp, jobvt='A', Vt=Vt)
    Un[:, :] = Vt.T[:, c:]
    blas.gemm(Y, Un, YUn)

    # compute original singular values
    svN = matrix(0.0, (min(a, d), 1))
    lapack.gesvd(YUn, svN)

    # variable, [y(1);...;y(N)]
    # form the coefficient matrices for the nuclear norm optimization
    # minimize | Yh * Un |_* + alpha * | y - yh |_F
    AA = Hankel_basis(r, b, p=p, q=1)
    A = matrix(0.0, (a * d, p * N))
    temp = spmatrix([], [], [], (a, b), 'd')
    temp2 = matrix(0.0, (a, d))
    for ii in xrange(p * N):
        temp[:] = AA[:, ii]
        base.gemm(temp, Un, temp2)
        A[:, ii] = temp2[:]
    B = matrix(0.0, (a, d))

    # flip the matrix if columns is more than rows
    if a < d:
        Itrans = [i + j * a for i in xrange(a) for j in xrange(d)]
        B[:] = B[Itrans]
        B.size = (d, a)
        for ii in xrange(p * N):
            A[:, ii] = A[Itrans, ii]

    # regularized term
    x0 = y[:]
    Qd = matrix(2.0 * svN[0] / p / N / (vsig**2), (p * N, 1))

    # solve the nuclear norm optimization
    sol = nrmapp(A, B, C=base.spdiag(Qd), d=-base.mul(x0, Qd))
    status = sol['status']
    x = sol['x']

    # construct YhUn and take the svd
    YhUn = matrix(B)
    blas.gemv(A, x, YhUn, beta=1.0)
    if a < d:
        YhUn = YhUn.T
    Uh = matrix(0.0, (a, d))
    sv = matrix(0.0, (d, 1))
    lapack.gesvd(YhUn, sv, jobu='S', U=Uh)

    # determine model order
    if svth is None:
        svth = 1E-3
    svthn = sv[0] * svth
    n = 1
    while sv[n] >= svthn and n < 10:
        n = n + 1

    # estimate A, C
    Uhn = Uh[:, :n]
    for ii in xrange(n):
        blas.scal(sv[ii], Uhn, n=a, offset=ii * a)
    syseC = Uhn[:p, :]
    Als = Uhn[:-p, :]
    Bls = Uhn[p:, :]
    lapack.gels(Als, Bls)
    syseA = Bls[:n, :]
    Als[:, :] = Uhn[:-p, :]
    Bls[:, :] = Uhn[p:, :]
    blas.gemm(Als, syseA, Bls, beta=-1.0)
    Aerr = blas.nrm2(Bls)

    # stabilize A
    Sc = matrix(0.0, (n, n), 'z')
    w = matrix(0.0, (n, 1), 'z')
    Vs = matrix(0.0, (n, n), 'z')

    def F(w):
        return (abs(w) < 1.0)

    Sc[:, :] = syseA
    ns = lapack.gees(Sc, w, Vs, select=F)
    while ns < n:
        #print "stabilize matrix A"
        w[ns:] = w[ns:]**-1
        Sc[::n + 1] = w
        Sc = Vs * Sc * Vs.H
        syseA[:, :] = Sc.real()
        Sc[:, :] = syseA
        ns = lapack.gees(Sc, w, Vs, select=F)

    # estimate B,D,x0 stored in vector [x0; vec(D); vec(B)]
    F1 = matrix(0.0, (p * N, n))
    F1[:p, :] = syseC
    for ii in xrange(1, N):
        F1[ii * p:(ii + 1) * p, :] = F1[(ii - 1) * p:ii * p, :] * syseA
    F2 = matrix(0.0, (p * N, p * m))
    ut = u.T
    for ii in xrange(p):
        F2[ii::p, ii::p] = ut
    F3 = matrix(0.0, (p * N, n * m))
    F3t = matrix(0.0, (p * (N - 1), n * m))
    for ii in xrange(1, N):
        for jj in xrange(p):
            for kk in xrange(n):
                F3t[jj:jj + (N - ii) * p:p,
                    kk::n] = ut[:N - ii, :] * F1[(ii - 1) * p + jj, kk]
        F3[ii * p:, :] = F3[ii * p:, :] + F3t[:(N - ii) * p, :]

    F = matrix([[F1], [F2], [F3]])
    yls = y[:]
    Sls = matrix(0.0, (F.size[1], 1))
    Uls = matrix(0.0, (F.size[0], F.size[1]))
    Vtls = matrix(0.0, (F.size[1], F.size[1]))
    lapack.gesvd(F, Sls, jobu='S', jobvt='S', U=Uls, Vt=Vtls)
    Frank = len([ii for ii in xrange(Sls.size[0]) if Sls[ii] >= 1E-6])
    #print 'Rank deficiency = ', F.size[1] - Frank
    xx = matrix(0.0, (F.size[1], 1))
    xx[:Frank] = Uls.T[:Frank, :] * yls
    xx[:Frank] = base.mul(xx[:Frank], Sls[:Frank]**-1)
    xx[:] = Vtls.T[:, :Frank] * xx[:Frank]
    blas.gemv(F, xx, yls, beta=-1.0)
    xxerr = blas.nrm2(yls)

    x0 = xx[:n]
    syseD = xx[n:n + p * m]
    syseD.size = (p, m)
    syseB = xx[n + p * m:]
    syseB.size = (n, m)

    return {'A': syseA, 'B': syseB, 'C': syseC, 'D': syseD, 'svN': svN, 'sv': \
        sv, 'x0': x0, 'n': n, 'Aerr': Aerr, 'xxerr': xxerr}
#     minimize    t + u*alpha
#     subject to  [u*I, 0; 0, t] - [A, b]'*[A, b] >= 0.
#
# Two variables (t, u).

G = matrix(0.0, ((n + 1)**2, 2))
G[-1, 0] = -1.0  # coefficient of t
G[:(n + 1)**2 - 1:n + 2, 1] = -1.0  # coefficient of u
h = matrix([[A.T * A, b.T * A], [A.T * b, b.T * b]])
c = matrix(1.0, (2, 1))

nopts = 40
alpha1 = [2.0/(nopts//2-1) * alpha for alpha in range(nopts//2) ] + \
    [ 2.0 + (15.0 - 2.0)/(nopts//2) * alpha for alpha in
        range(1,nopts//2+1) ]
lbnds = [blas.nrm2(b)**2]
for alpha in alpha1[1:]:
    c[1:] = alpha
    lbnds += [-blas.dot(c, solvers.sdp(c, Gs=[G], hs=[h])['x'])]

nopts = 10
alpha2 = [1.0 / (nopts - 1) * alpha for alpha in range(nopts)]
ubnds = [blas.nrm2(b)**2]
for alpha in alpha2[1:]:
    c[1:] = alpha
    ubnds += [blas.dot(c, solvers.sdp(c, Gs=[G], hs=[-h])['x'])]

try:
    import pylab
except ImportError:
    pass
Exemple #29
0
# with n variables, and matrices A(x), B of size p x q.

setseed(0)

p, q, n = 100, 100, 100
A = normal(p*q, n)
B = normal(p, q)


# options['feastol'] = 1e-6
# options['refinement'] = 3

sol = nucnrm.nrmapp(A, B)

x = sol['x']
Z = sol['Z']

s = matrix(0.0, (p,1))
X = matrix(A *x, (p, q)) + B
lapack.gesvd(+X, s)
nrmX = sum(s)
lapack.gesvd(+Z, s)
nrmZ = max(s)
res = matrix(0.0, (n, 1))
blas.gemv(A, Z, res, beta = 1.0, trans = 'T')

print "\nNuclear norm of A(x) + B: %e" %nrmX
print "Inner product of B and Z: %e" %blas.dot(B, Z)
print "Maximum singular value of Z: %e" %nrmZ
print "Euclidean norm of A'(Z): %e" %blas.nrm2(res)
Exemple #30
0
# Inequality description G*x <= h with h = 1
G, h = matrix(0.0, (m, 2)), matrix(0.0, (m, 1))
G = (X[:m, :] - X[1:, :]) * matrix([0., -1., 1., 0.], (2, 2))
h = (G * X.T)[::m + 1]
G = mul(h[:, [0, 0]]**-1, G)
h = matrix(1.0, (m, 1))

# Chebyshev center
#
# maximizse   R
# subject to  gk'*xc + R*||gk||_2 <= hk,  k=1,...,m
#             R >= 0

R = variable()
xc = variable(2)
op(-R, [G[k, :] * xc + R * blas.nrm2(G[k, :]) <= h[k]
        for k in range(m)] + [R >= 0]).solve()
R = R.value
xc = xc.value

if pylab_installed:
    pylab.figure(1, facecolor='w')

    # polyhedron
    for k in range(m):
        edge = X[[k,k+1],:] + 0.1 * matrix([1., 0., 0., -1.], (2,2)) * \
            (X[2*[k],:] - X[2*[k+1],:])
        pylab.plot(edge[:, 0], edge[:, 1], 'k')

    # 1000 points on the unit circle
    nopts = 1000
Exemple #31
0
G, h = matrix(0.0, (m,2)), matrix(0.0, (m,1))
G = (X[:m,:] - X[1:,:]) * matrix([0., -1., 1., 0.], (2,2))
h = (G * X.T)[::m+1]
G = mul(h[:,[0,0]]**-1, G)
h = matrix(1.0, (m,1))


# Chebyshev center
#
# maximizse   R 
# subject to  gk'*xc + R*||gk||_2 <= hk,  k=1,...,m
#             R >= 0

R = variable()
xc = variable(2)
op(-R, [ G[k,:]*xc + R*blas.nrm2(G[k,:]) <= h[k] for k in range(m) ] + 
    [ R >= 0] ).solve()
R = R.value    
xc = xc.value    

if pylab_installed:
    pylab.figure(1, facecolor='w')

    # polyhedron
    for k in range(m):
        edge = X[[k,k+1],:] + 0.1 * matrix([1., 0., 0., -1.], (2,2)) * \
            (X[2*[k],:] - X[2*[k+1],:])
        pylab.plot(edge[:,0], edge[:,1], 'k')


    # 1000 points on the unit circle
        # z[n:] = D2^1/2 * ( -x[:n] - x[n:] - bz[n:] ).
        z[:n] = mul(W['di'][:n], x[:n] - x[n:] - z[:n])
        z[n:] = mul(W['di'][n:], -x[:n] - x[n:] - z[n:])

    return g


x = solvers.coneqp(P, q, G, h, kktsolver=Fkkt)['x'][:n]

I = [k for k in range(n) if abs(x[k]) > 1e-2]
xls = +y
lapack.gels(A[:, I], xls)
ybp = A[:, I] * xls[:len(I)]

print("Sparse basis contains %d basis functions." % len(I))
print("Relative RMS error = %.1e." % (blas.nrm2(ybp - y) / blas.nrm2(y)))

if pylab_installed:
    pylab.figure(2, facecolor='w')
    pylab.subplot(211)
    pylab.plot(ts, y, '-', ts, ybp, 'r--')
    pylab.xlabel('t')
    pylab.ylabel('y(t), yhat(t)')
    pylab.axis([0, 1, -1.5, 1.5])
    pylab.title('Signal and basis pursuit approximation (fig. 6.22)')
    pylab.subplot(212)
    pylab.plot(ts, y - ybp, '-')
    pylab.xlabel('t')
    pylab.ylabel('y(t)-yhat(t)')
    pylab.axis([0, 1, -0.05, 0.05])
Exemple #33
0
        def f(x, y, z):
            """
            On entry bx, bz are stored in x, z.  On exit x, z contain the solution,
            with z scaled: z./di is returned instead of z.
            """

            # Maps to our variables x,y,z and t
            if DEBUG > 0:
                print "... Computing ..."
                print "bx = %sbz = %s" % (x.T, z.T),
            a = []
            b = x[n * r:n * r + n]
            c = []
            d = []
            for i in range(r):
                a.append(x[i * n:(i + 1) * n])
                c.append(z[i * n:(i + 1) * n])
                d.append(z[(i + r) * n:(i + r + 1) * n])

            if DEBUG:
                # Now solves using cvxopt
                xp = +x
                zp = +z
                solve(xp, y, zp)

            # First phase
            for i in range(r):
                blas.trsm(cholK, a[i])

                Bai = B * a[i]

                c[i] = -Bai - c[i]
                blas.trsm(L22[i], c[i])

                d[i] = Bai - L32[i] * c[i] - d[i]
                blas.trsm(L33[i], d[i])

                b = b + L42[i] * c[i] + L43[i] * d[i]

            blas.trsm(L44, b)

            # Second phase
            blas.trsm(L44, b, transA='T')

            for i in range(r):
                d[i] = d[i] - L43[i].T * b
                blas.trsm(L33[i], d[i], transA='T')

                c[i] = c[i] - L32[i].T * d[i] - L42[i].T * b
                blas.trsm(L22[i], c[i], transA='T')

                a[i] = a[i] + B.T * (c[i] - d[i])
                blas.trsm(cholK, a[i], transA='T')

            # Store in vectors and scale

            x[n * r:n * r + n] = b
            for i in range(r):
                x[i * n:(i + 1) * n] = a[i]
                z[i * n:(i + 1) * n] = c[i]
                z[(i + r) * n:(i + r + 1) * n] = d[i]

            z[:] = mul(Wd, z)

            if DEBUG:
                print "x  = %s" % x.T,
                print "z  = %s" % z.T,
                print "Delta(x) = %s" % (x - xp).T,
                print "Delta(z) = %s" % (z - zp).T,
                delta = blas.nrm2(x - xp) + blas.nrm2(z - zp)
                if (delta > 1e-8):
                    print "--- DELTA TOO HIGH = %.3e ---" % delta
Exemple #34
0
#solvers.options['show_progress'] = False

# Extreme points and inequality description of Voronoi region around
# first symbol (at the origin).
m = 6
V = matrix([
    1.0, 1.0, -1.0, 2.0, -2.0, 1.0, -2.0, -1.0, 0.0, -2.0, 1.5, -1.0, 1.0, 1.0
], (2, m + 1))

# A and b are lists with the inequality descriptions of the regions.
A = [matrix([-(V[1, :m] - V[1, 1:]), V[0, :m] - V[0, 1:]]).T]
b = [mul(A[0], V[:, :m].T) * matrix(1.0, (2, 1))]

# List of symbols.
C = [ matrix(0.0, (2,1)) ] + \
    [ 2.0 * b[0][k] / blas.nrm2(A[0][k,:])**2 * A[0][k,:].T for k in
    range(m) ]

# Voronoi set around C[1]
A += [matrix(0.0, (3, 2))]
b += [matrix(0.0, (3, 1))]
A[1][0, :] = -A[0][0, :]
b[1][0] = -b[0][0]
A[1][1, :] = (C[m] - C[1]).T
b[1][1] = 0.5 * A[1][1, :] * (C[m] + C[1])
A[1][2, :] = (C[2] - C[1]).T
b[1][2] = 0.5 * A[1][2, :] * (C[2] + C[1])

# Voronoi set around C[2], ..., C[5]
for k in range(2, 6):
    A += [matrix(0.0, (3, 2))]
Exemple #35
0
print("Computing completion with factored updates..")
Lc2 = Y.copy()       # make a copy of Y
completion(Lc2, factored_updates = True) # compute completion (with factored updates); overwrites Lc2

print("Applying Hessian factors..")
U = At.copy()
fupd = False
hessian(L, Y, U, adj = False, inv = False, factored_updates = fupd)
hessian(L, Y, U, adj = True, inv = False, factored_updates = fupd)
hessian(L, Y, U, adj = True, inv = True, factored_updates = fupd)
hessian(L, Y, U, adj = False, inv = True, factored_updates = fupd)

print("\nEvaluating errors:\n")
# Compute norm of error: A - L*L.T
tmp = (A-At).spmatrix().V
print("Cholesky factorization/product     :  err = %.3e" % (blas.nrm2(tmp)))

# Compute norm of error: L - Lc
tmp = (L.spmatrix()-Lc.spmatrix()).V
print("Projected inverse/completion       :  err = %.3e" % (blas.nrm2(tmp)))

# Compute norm of error: L - Lc2
tmp = (L.spmatrix()-Lc2.spmatrix()).V
print("Projected inverse/completion (upd) :  err = %.3e" % (blas.nrm2(tmp)))

# Compute norm of error: At - U
tmp = (At-U).spmatrix().V
print("Hessian factors NN/TN/TI/NI        :  err = %.3e" % (blas.nrm2(tmp)))


# Test triangular matrix products and solve
Exemple #36
0
# The robust LP example of section 10.5 (Examples).

from cvxopt import normal, uniform
from cvxopt.modeling import variable, dot, op, sum
from cvxopt.blas import nrm2

m, n = 500, 100
A = normal(m, n)
b = uniform(m)
c = normal(n)

x = variable(n)
op(dot(c, x), A * x + sum(abs(x)) <= b).solve()

x2 = variable(n)
y = variable(n)
op(dot(c, x2), [A * x2 + sum(y) <= b, -y <= x2, x2 <= y]).solve()

print("\nDifference between two solutions %e" % nrm2(x.value - x2.value))
Exemple #37
0
P = matrix(0.0, (2*n,2*n))
P[:n,:n] = A.T*A
q = matrix(0.0, (2*n,1))
q[:n] = -A.T*b
I = matrix(0.0, (n,n)) 
I[::n+1] = 1.0
G = matrix([[I, -I, matrix(0.0, (1,n))], [-I, -I, matrix(1.0, (1,n))]])
h = matrix(0.0, (2*n+1,1))

# Least-norm solution
xln = matrix(0.0, (n,1))
xln[:m] = b
lapack.gels(+A, xln)

nopts = 100
res = [ blas.nrm2(b) ]
card = [ 0 ]
alphas = blas.asum(xln)/(nopts-1) * matrix(range(1,nopts), tc='d')
for alpha in alphas:

    #    minimize    ||A*x-b||_2
    #    subject to  ||x||_1 <= alpha

    h[-1] = alpha
    x = solvers.qp(P, q, G, h)['x'][:n]
    xmax = max(abs(x))
    I = [ k for k in range(n) if abs(x[k]) > tol*xmax ]
    if len(I) <= m:
        xs = +b 
        lapack.gels(A[:,I], xs)
        x[:] = 0.0
Exemple #38
0
# The 1-norm support vector classifier of section 10.5 (Examples).

from cvxopt import normal, setseed
from cvxopt.modeling import variable, op, max, sum
from cvxopt.blas import nrm2

m, n = 500, 100
A = normal(m,n) 

x = variable(A.size[1],'x')  
u = variable(A.size[0],'u')  
op(sum(abs(x)) + sum(u), [A*x >= 1-u, u >= 0]).solve()

x2 = variable(A.size[1],'x')  
op(sum(abs(x2)) + sum(max(0, 1 - A*x2))).solve() 

print("\nDifference between two solutions: %e" %nrm2(x.value - x2.value))
Exemple #39
0
        # z[:n] = D1^1/2 * (  x[:n] - x[n:] - bz[:n] )
        # z[n:] = D2^1/2 * ( -x[:n] - x[n:] - bz[n:] ).
        z[:n] = mul( W['di'][:n],  x[:n] - x[n:] - z[:n] ) 
        z[n:] = mul( W['di'][n:], -x[:n] - x[n:] - z[n:] ) 

    return g

x = solvers.coneqp(P, q, G, h, kktsolver = Fkkt)['x'][:n]

I = [ k for k in range(n) if abs(x[k]) > 1e-2 ]
xls = +y
lapack.gels(A[:,I], xls)
ybp = A[:,I]*xls[:len(I)]

print("Sparse basis contains %d basis functions." %len(I))
print("Relative RMS error = %.1e." %(blas.nrm2(ybp-y) / blas.nrm2(y)))

if pylab_installed:
    pylab.figure(2, facecolor='w')
    pylab.subplot(211)
    pylab.plot(ts, y, '-', ts, ybp, 'r--')
    pylab.xlabel('t')
    pylab.ylabel('y(t), yhat(t)')
    pylab.axis([0, 1, -1.5, 1.5])
    pylab.title('Signal and basis pursuit approximation (fig. 6.22)')
    pylab.subplot(212)
    pylab.plot(ts, y-ybp, '-')
    pylab.xlabel('t')
    pylab.ylabel('y(t)-yhat(t)')
    pylab.axis([0, 1, -0.05, 0.05])
           
Exemple #40
0
def chebyshev_center(G, h, progress=False):
    """
    Calculates the center point of the largest sphere that can fit within
    constraints specified by Gx <= h.

    Parameters
    ----------
    G: pandas.DataFrame
        Array that specifies Gx <= h.
    h: pandas.Series
        The limits specifying Gx <= h.
    progress: bool
        True if detailed progress text from optimization should be shown.

    Returns
    -------
    x: pd.Series
        Centroid with index names equal to the column names of G.
    """
    
    # Aligning input
    h = h.ix[G.index]

    if h.isnull().values.any() or G.isnull().values.any():
        msg = 'Row indeces of G and h must match and contain no NaN entries.'
        omfa.logger.error(msg)
        raise ValueError(msg)

    if progress:
        solvers.options['show_progress'] = True
    else:
        solvers.options['show_progress'] = False

    # Setting up LP problem
    m, n = G.shape

    R = variable()
    x = variable(n)

    G_opt = matrix(np.array(G, dtype=np.float64))
    h_opt = matrix(np.array(h, dtype=np.float64))

    inequality_constraints = [G_opt[k,:]*x + R*blas.nrm2(G_opt[k,:]) <= h_opt[k] 
                              for k in range(m)]

    model = op(-R, inequality_constraints + [ R >= 0] )
    model.solve()

    x = pd.Series(x.value, index=G.columns)

    # Checking output
    if model.status != 'optimal':
        if all(G.dot(x) <= h):
            msg = ('Centroid was not found, '
                   'but the last calculated point is feasible.')
            omfa.logger.warn(msg)
        else:
            msg = 'Optimization calculatoin failed on a non-feasible point.'
            sol = '\nSet progress=True for more details.'
            omfa.logger.error(msg + sol)
            raise RuntimeError(msg + sol)

    return(x)
Exemple #41
0
else:
    pylab_installed = True

# Extreme points and inequality description of Voronoi region around
# first symbol (0,0).
m = 6
V = matrix([
    1.0, 1.0, -1.0, 2.0, -2.0, 1.0, -2.0, -1.0, 0.0, -2.0, 1.5, -1.0, 1.0, 1.0
], (2, m + 1))

A0 = matrix([-(V[1, :m] - V[1, 1:]), V[0, :m] - V[0, 1:]]).T
b0 = mul(A0, V[:, :m].T) * matrix(1.0, (2, 1))

# List of symbols.
C = [ matrix(0.0, (2,1)) ] + \
    [ 2.0 * b0[k] / blas.nrm2(A0[k,:])**2 * A0[k,:].T for k in range(m) ]

# Voronoi set around C[1]
A1, b1 = matrix(0.0, (3, 2)), matrix(0.0, (3, 1))
A1[0, :] = -A0[0, :]
b1[0] = -b0[0]
A1[1, :] = (C[m] - C[1]).T
b1[1] = 0.5 * A1[1, :] * (C[m] + C[1])
A1[2, :] = (C[2] - C[1]).T
b1[2] = 0.5 * A1[2, :] * (C[2] + C[1])

# Voronoi set around C[2]
A2, b2 = matrix(0.0, (3, 2)), matrix(0.0, (3, 1))
A2[0, :] = -A0[1, :]
b2[0] = -b0[1]
A2[1, :] = (C[1] - C[2]).T
Exemple #42
0
#
# with P = E(u^2) * B'*B = (1/3) * B'*B

S = A.T * A + (1.0 / 3.0) * B.T * B
xstoch = A.T * b
lapack.posv(S, xstoch)

# Worst case approximation.
#
# minimize max_{-1 <= u <= 1} ||A*u - b||_2^2.

xwc = wcls(A, [B], b)

nopts = 500
us = -2.0 + (2.0 - (-2.0)) / (nopts - 1) * matrix(list(range(nopts)), tc='d')
rnom = [blas.nrm2((A + u * B) * xnom - b) for u in us]
rstoch = [blas.nrm2((A + u * B) * xstoch - b) for u in us]
rwc = [blas.nrm2((A + u * B) * xwc - b) for u in us]

if pylab_installed:
    pylab.figure(1, facecolor='w')
    pylab.plot(us, rnom, us, rstoch, us, rwc)
    pylab.plot([-1, -1], [0, 12], '--k', [1, 1], [0, 12], '--k')
    pylab.axis([-2.0, 2.0, 0.0, 12.0])
    pylab.xlabel('u')
    pylab.ylabel('r(u)')
    pylab.text(us[9], rnom[9], 'nominal')
    pylab.text(us[9], rstoch[9], 'stochastic')
    pylab.text(us[9], rwc[9], 'worst case')
    pylab.title('Robust least-squares (fig.6.15)')
Exemple #43
0
from cvxopt import matrix, mul, exp, normal, solvers, blas

# solvers.options['show_progress'] = False

# Extreme points and inequality description of Voronoi region around
# first symbol (at the origin).
m = 6
V = matrix([1.0, 1.0, -1.0, 2.0, -2.0, 1.0, -2.0, -1.0, 0.0, -2.0, 1.5, -1.0, 1.0, 1.0], (2, m + 1))

# A and b are lists with the inequality descriptions of the regions.
A = [matrix([-(V[1, :m] - V[1, 1:]), V[0, :m] - V[0, 1:]]).T]
b = [mul(A[0], V[:, :m].T) * matrix(1.0, (2, 1))]

# List of symbols.
C = [matrix(0.0, (2, 1))] + [2.0 * b[0][k] / blas.nrm2(A[0][k, :]) ** 2 * A[0][k, :].T for k in range(m)]

# Voronoi set around C[1]
A += [matrix(0.0, (3, 2))]
b += [matrix(0.0, (3, 1))]
A[1][0, :] = -A[0][0, :]
b[1][0] = -b[0][0]
A[1][1, :] = (C[m] - C[1]).T
b[1][1] = 0.5 * A[1][1, :] * (C[m] + C[1])
A[1][2, :] = (C[2] - C[1]).T
b[1][2] = 0.5 * A[1][2, :] * (C[2] + C[1])

# Voronoi set around C[2], ..., C[5]
for k in range(2, 6):
    A += [matrix(0.0, (3, 2))]
    b += [matrix(0.0, (3, 1))]
Exemple #44
0
def sysid(y, u, vsig, svth = None):

    """
    System identification using the subspace method and nuclear norm 
    optimization.  Estimate a linear time-invariant state-space model 
    given inputs and outputs.  The algorithm is described in [1].
    

    INPUT
    y       'd' matrix of size (p, N).  y are the measured outputs, p is 
            the number of outputs, and N is the number of data points 
            measured. 
    
    u       'd' matrix of size (m, N).  u are the inputs, m is the number 
            of inputs, and N is the number of data points.
    
    vsig    a weighting parameter in the nuclear norm optimization, its 
            value is approximately the 1-sigma output noise level
    
    svth    an optional parameter, if specified, the model order is 
            determined as the number of singular values greater than svth 
            times the maximum singular value.  The default value is 1E-3 
    
    OUTPUT
    sol     a dictionary with the following words
            -- 'A', 'B', 'C', 'D' are the state-space matrices
            -- 'svN', the original singular values of the Hankel matrix
            -- 'sv', the optimized singular values of the Hankel matrix
            -- 'x0', the initial state x(0)
            -- 'n', the model order

    [1] Zhang Liu and Lieven Vandenberghe. "Interior-point method for 
        nuclear norm approximation with application to system 
        identification."  

    """

    m, N, p = u.size[0], u.size[1], y.size[0]
    if y.size[1] != N:
        raise ValueError, "y and u must have the same length"
           
    # Y = G*X + H*U + V, Y has size a x b, U has size c x b, Un has b x d
    r = min(int(30/p),int((N+1.0)/(p+m+1)+1.0))
    a = r*p
    c = r*m
    b = N-r+1
    d = b-c
    
    # construct Hankel matrix Y
    Y = Hankel(y,r,b,p=p,q=1)
    
    # construct Hankel matrix U
    U = Hankel(u,r,b,p=m,q=1)
    
    # compute Un = null(U) and YUn = Y*Un
    Vt = matrix(0.0,(b,b))
    Stemp = matrix(0.0,(c,1))
    Un = matrix(0.0,(b,d))
    YUn = matrix(0.0,(a,d))
    lapack.gesvd(U,Stemp,jobvt='A',Vt=Vt)
    Un[:,:] = Vt.T[:,c:]
    blas.gemm(Y,Un,YUn)
    
    # compute original singular values
    svN = matrix(0.0,(min(a,d),1))
    lapack.gesvd(YUn,svN)
    
    # variable, [y(1);...;y(N)]
    # form the coefficient matrices for the nuclear norm optimization
    # minimize | Yh * Un |_* + alpha * | y - yh |_F
    AA = Hankel_basis(r,b,p=p,q=1)
    A = matrix(0.0,(a*d,p*N))
    temp = spmatrix([],[],[],(a,b),'d')
    temp2 = matrix(0.0,(a,d))
    for ii in xrange(p*N):
        temp[:] = AA[:,ii]
        base.gemm(temp,Un,temp2)
        A[:,ii] = temp2[:]
    B = matrix(0.0,(a,d))

    # flip the matrix if columns is more than rows
    if a < d:
        Itrans = [i+j*a for i in xrange(a) for j in xrange(d)]
        B[:] = B[Itrans]
        B.size = (d,a)
        for ii in xrange(p*N):
            A[:,ii] = A[Itrans,ii]
      
    # regularized term
    x0 = y[:]
    Qd = matrix(2.0*svN[0]/p/N/(vsig**2),(p*N,1))

    # solve the nuclear norm optimization
    sol = nrmapp(A, B, C = base.spdiag(Qd), d = -base.mul(x0, Qd))
    status = sol['status']
    x = sol['x']
    
    # construct YhUn and take the svd
    YhUn = matrix(B)
    blas.gemv(A,x,YhUn,beta=1.0)
    if a < d:
        YhUn = YhUn.T
    Uh = matrix(0.0,(a,d))
    sv = matrix(0.0,(d,1))
    lapack.gesvd(YhUn,sv,jobu='S',U=Uh)

    # determine model order
    if svth is None:
        svth = 1E-3
    svthn = sv[0]*svth
    n=1
    while sv[n] >= svthn and n < 10:
        n=n+1
    
    # estimate A, C
    Uhn = Uh[:,:n]
    for ii in xrange(n):
        blas.scal(sv[ii],Uhn,n=a,offset=ii*a)
    syseC = Uhn[:p,:]
    Als = Uhn[:-p,:]
    Bls = Uhn[p:,:]
    lapack.gels(Als,Bls)
    syseA = Bls[:n,:]
    Als[:,:] = Uhn[:-p,:]
    Bls[:,:] = Uhn[p:,:]
    blas.gemm(Als,syseA,Bls,beta=-1.0)
    Aerr = blas.nrm2(Bls)
    
    # stabilize A
    Sc = matrix(0.0,(n,n),'z')
    w = matrix(0.0, (n,1), 'z')
    Vs = matrix(0.0, (n,n), 'z')
    def F(w):
        return (abs(w) < 1.0)
    
    Sc[:,:] = syseA
    ns = lapack.gees(Sc, w, Vs, select = F)
    while ns < n:
        #print "stabilize matrix A"
        w[ns:] = w[ns:]**-1
        Sc[::n+1] = w
        Sc = Vs*Sc*Vs.H
        syseA[:,:] = Sc.real()
        Sc[:,:] = syseA
        ns = lapack.gees(Sc, w, Vs, select = F)

    # estimate B,D,x0 stored in vector [x0; vec(D); vec(B)]
    F1 = matrix(0.0,(p*N,n))
    F1[:p,:] = syseC
    for ii in xrange(1,N):
        F1[ii*p:(ii+1)*p,:] = F1[(ii-1)*p:ii*p,:]*syseA
    F2 = matrix(0.0,(p*N,p*m))
    ut = u.T
    for ii in xrange(p):
        F2[ii::p,ii::p] = ut
    F3 = matrix(0.0,(p*N,n*m))
    F3t = matrix(0.0,(p*(N-1),n*m))
    for ii in xrange(1,N):
        for jj in xrange(p):
            for kk in xrange(n):
                F3t[jj:jj+(N-ii)*p:p,kk::n] = ut[:N-ii,:]*F1[(ii-1)*p+jj,kk]
        F3[ii*p:,:] = F3[ii*p:,:] + F3t[:(N-ii)*p,:]
    
    F = matrix([[F1],[F2],[F3]])
    yls = y[:]
    Sls = matrix(0.0,(F.size[1],1))
    Uls = matrix(0.0,(F.size[0],F.size[1]))
    Vtls = matrix(0.0,(F.size[1],F.size[1]))
    lapack.gesvd(F, Sls, jobu='S', jobvt='S', U=Uls, Vt=Vtls)
    Frank=len([ii for ii in xrange(Sls.size[0]) if Sls[ii] >= 1E-6])
    #print 'Rank deficiency = ', F.size[1] - Frank
    xx = matrix(0.0,(F.size[1],1))
    xx[:Frank] = Uls.T[:Frank,:] * yls
    xx[:Frank] = base.mul(xx[:Frank],Sls[:Frank]**-1)
    xx[:] = Vtls.T[:,:Frank]*xx[:Frank] 
    blas.gemv(F,xx,yls,beta=-1.0)
    xxerr = blas.nrm2(yls)
    
    x0 = xx[:n]
    syseD = xx[n:n+p*m]
    syseD.size = (p,m)
    syseB = xx[n+p*m:]
    syseB.size = (n,m)
    
    return {'A': syseA, 'B': syseB, 'C': syseC, 'D': syseD, 'svN': svN, 'sv': \
        sv, 'x0': x0, 'n': n, 'Aerr': Aerr, 'xxerr': xxerr}
Exemple #45
0
def solve(A, b, C, L, dims, proxqp=None, sigma=1.0, rho=1.0, **kwargs):
    """
    
    Solves the SDP

        min.  < c, x > 
        s.t.  A(x) = b
              x >= 0

    and its dual

        max.  -< b, y > 
        s.t.  s >= 0.
             c + A'(y) = s 
    
    
    Input arguments.
    
        A   is an N x M sparse matrix where N = sum_i ns[i]**2 and M = sum_j ms[j]
            and ns and ms are the SDP variable sizes and constraint block lengths respectively.
            
            The expression A(x) = b can be written as A.T*xtilde = b, where
            xtilde is a stacked vector of vectorized versions of xi.
        
        b   is a stacked vector containing constraint vectors of 
                        size m_i x 1.
    
        C   is a stacked vector containing vectorized 'd' matrices 
            c_k of size n_k**2 x 1, representing symmetric matrices.



        L  is an N X P sparse matrix, where L.T*X = 0 represents the consistency
            constraints. If an index k appears in different cliques i,j, and
            in converted form are indexed by it, jt, then L[it,l] = 1, 
            L[jt,l] = -1 for some l.
            
        dims    is a dictionary containing conic dimensions.
            dims['l'] contains number of linear variables under nonnegativity constrant
            dims['q'] contains a list of quadratic cone orders (not implemented!)
            dims['s'] contains a list of semidefinite cone matrix orders
        
        proxqp   is either a function pointer to a prox implementation, or, if 
                the problem has block-diagonal correlative sparsity, a pointer 
                to the prox implementation of a single clique. The choices are:
                
                proxqp_general : solves prox for general sparsity pattern
                
                proxqp_clique : solves prox for a single dense clique with 
                                only semidefinite variables.
                
                proxqp_clique_SNL : solves prox for sensor network localization 
                                    problem
        
        sigma is a nonnegative constant (step size)
        
        rho is a nonnegative constaint between 0 and 2 (overrelaxation parameter)
        
        In addition, the following paramters are optional:
        
            maxiter : maximum number of iterations (default 100)
            
            reltol : relative tolerance (default 0.01). 
                        If rp < reltol and rd < reltol and iteration < maxiter, 
                        solver breaks and returns current value.
                        
            adaptive : boolean toggle on whether adaptive step size should be 
                        used. (default False)
            
            mu, tau, tauscale : parameters for adaptive step size (see paper)

            multiprocess : number of parallel processes (default 1). 
                            if multiprocess = 1, no parallelization is used.
                            
            blockdiagonal : boolean toggle on whether problem has block diagonal
                            correlative sparsity. Note that even if the problem
                            does have block-diagonal correlative sparsity, if
                            this parameter is set to False, then general mode 
                            is used. (default False)
                            

            verbose : toggle printout (default True)
            
            log_cputime : toggle whether cputime should be logged.
	    
	    
    The output is returned in a dictionary with the following files:
        
        x : primal variable in stacked form (X = [x0, ..., x_{N-1}]) where
            xk is the vectorized form of the nk x nk submatrix variable.
        
        y, z : iterates in Spingarn's method
        
        cputime, walltime : total cputime and walltime, respectively, spent in 
                            main loop. If log_cputime is False, then cputime is 
                            returned as 0.
        
        primal, rprimal, rdual : evolution of primal optimal value, primal 
                                residual, and dual residual (resp.)
        
        sigma : evolution of step size sigma (changes if adaptive step size is used.)
    

    """

    solvers.options['show_progress'] = False
    maxiter = kwargs.get('maxiter', 100)
    reltol = kwargs.get('reltol', 0.01)
    adaptive = kwargs.get('adaptive', False)
    mu = kwargs.get('mu', 2.0)
    tau = kwargs.get('tau', 1.5)
    multiprocess = kwargs.get('multiprocess', 1)
    tauscale = kwargs.get('tauscale', 0.9)
    blockdiagonal = kwargs.get('blockdiagonal', False)
    verbose = kwargs.get('verbose', True)
    log_cputime = kwargs.get('log_cputime', True)

    if log_cputime:
        try:
            import psutil
        except (ImportError):
            assert False, "Python package psutil required to log cputime. Package can be downloaded at http://code.google.com/p/psutil/"

    #format variables
    nl, ns = dims['l'], dims['s']
    C = C[nl:]
    L = L[nl:, :]
    As, bs = [], []
    cons = []
    offset = 0
    for k in xrange(len(ns)):
        Atmp = sparse(A[nl + offset:nl + offset + ns[k]**2, :])
        J = list(set(list(Atmp.J)))
        Atmp = Atmp[:, J]
        if len(sparse(Atmp).V) == Atmp[:].size[0]: Atmp = matrix(Atmp)
        else: Atmp = sparse(Atmp)
        As.append(Atmp)
        bs.append(b[J])
        cons.append(J)

        offset += ns[k]**2

    if blockdiagonal:
        if sum([len(c) for c in cons]) > len(b):
            print "Problem does not have block-diagonal correlative sparsity. Switching to general mode."
            blockdiagonal = False

    #If not block-diagonal correlative sprasity, represent A as a list of lists:
    #   A[i][j] is a matrix (or spmatrix) if ith clique involves jth constraint block
    #Otherwise, A is a list of matrices, where A[i] involves the ith clique and
    #ith constraint block only.

    if not blockdiagonal:
        while sum([len(c) for c in cons]) > len(b):
            tobreak = False
            for i in xrange(len(cons)):
                for j in xrange(i):
                    ci, cj = set(cons[i]), set(cons[j])
                    s1 = ci.intersection(cj)
                    if len(s1) > 0:
                        s2 = ci.difference(cj)
                        s3 = cj.difference(ci)
                        cons.append(list(s1))
                        if len(s2) > 0:
                            s2 = list(s2)
                            if not (s2 in cons): cons.append(s2)
                        if len(s3) > 0:
                            s3 = list(s3)
                            if not (s3 in cons): cons.append(s3)

                        cons.pop(i)
                        cons.pop(j)
                        tobreak = True

                        break
                if tobreak: break

        As, bs = [], []
        for i in xrange(len(cons)):
            J = cons[i]
            bs.append(b[J])
            Acol = []
            offset = 0
            for k in xrange(len(ns)):
                Atmp = sparse(A[nl + offset:nl + offset + ns[k]**2, J])
                if len(Atmp.V) == 0:
                    Acol.append(0)
                elif len(Atmp.V) == Atmp[:].size[0]:
                    Acol.append(matrix(Atmp))
                else:
                    Acol.append(Atmp)
                offset += ns[k]**2
            As.append(Acol)

    ms = [len(i) for i in bs]
    bs = matrix(bs)
    meq = L.size[1]

    if (not blockdiagonal) and multiprocess > 1:
        print "Multiprocessing mode can only be used if correlative sparsity is block diagonal. Switching to sequential mode."
        multiprocess = 1

    assert rho > 0 and rho < 2, 'Overrelaxaton parameter (rho) must be (strictly) between 0 and 2'

    # create routine for projecting on { x | L*x = 0 }
    #{ x | L*x = 0 } -> P = I - L*(L.T*L)i *L.T
    LTL = spmatrix([], [], [], (meq, meq))
    offset = 0
    for k in ns:
        Lk = L[offset:offset + k**2, :]
        base.syrk(Lk, LTL, trans='T', beta=1.0)
        offset += k**2
    LTLi = cholmod.symbolic(LTL, amd.order(LTL))
    cholmod.numeric(LTL, LTLi)

    #y = y - L*LTLi*L.T*y
    nssq = sum(matrix([nsk**2 for nsk in ns]))

    def proj(y, ip=True):
        if not ip: y = +y
        tmp = matrix(0.0, size=(meq, 1))

        ypre = +y
        base.gemv(L,y,tmp,trans='T',\
            m = nssq, n = meq, beta = 1)

        cholmod.solve(LTLi, tmp)
        base.gemv(L,tmp,y,beta=1.0,alpha=-1.0,trans='N',\
            m = nssq, n = meq)
        if not ip: return y

    time_to_solve = 0

    #initialize variables
    X = C * 0.0
    Y = +X
    Z = +X
    dualS = +X
    dualy = +b
    PXZ = +X

    proxargs = {
        'C': C,
        'A': As,
        'b': bs,
        'Z': Z,
        'X': X,
        'sigma': sigma,
        'dualS': dualS,
        'dualy': dualy,
        'ns': ns,
        'ms': ms,
        'multiprocess': multiprocess
    }

    if blockdiagonal: proxqp = proxqp_blockdiagonal(proxargs, proxqp)
    else: proxqp = proxqp_general

    if log_cputime: utime = psutil.cpu_times()[0]
    wtime = time.time()
    primal = []
    rpvec, rdvec = [], []
    sigmavec = []
    for it in xrange(maxiter):
        pv, gap = proxqp(proxargs)

        blas.copy(Z, Y)
        blas.axpy(X, Y, alpha=-2.0)
        proj(Y, ip=True)

        #PXZ = sigma*(X-Z)
        blas.copy(X, PXZ)
        blas.scal(sigma, PXZ)
        blas.axpy(Z, PXZ, alpha=-sigma)

        #z = z + rho*(y-x)
        blas.axpy(X, Y, alpha=1.0)
        blas.axpy(Y, Z, alpha=-rho)

        xzn = blas.nrm2(PXZ)
        xn = blas.nrm2(X)
        xyn = blas.nrm2(Y)
        proj(PXZ, ip=True)

        rdual = blas.nrm2(PXZ)
        rpri = sqrt(abs(xyn**2 - rdual**2)) / sigma

        if log_cputime: cputime = psutil.cpu_times()[0] - utime
        else: cputime = 0

        walltime = time.time() - wtime

        if rpri / max(xn, 1.0) < reltol and rdual / max(1.0, xzn) < reltol:
            break

        rpvec.append(rpri / max(xn, 1.0))
        rdvec.append(rdual / max(1.0, xzn))
        primal.append(pv)
        if adaptive:
            if (rdual / xzn * mu < rpri / xn):
                sigmanew = sigma * tau
            elif (rpri / xn * mu < rdual / xzn):
                sigmanew = sigma / tau
            else:
                sigmanew = sigma
            if it % 10 == 0 and it > 0 and tau > 1.0:
                tauscale *= 0.9
                tau = 1 + (tau - 1) * tauscale
            sigma = max(min(sigmanew, 10.0), 0.1)
        sigmavec.append(sigma)
        if verbose:
            if log_cputime:
                print "%d: primal = %e, gap = %e, (rp,rd) = (%e,%e), sigma = %f, (cputime,walltime) = (%f, %f)" % (
                    it, pv, gap, rpri / max(xn, 1.0), rdual / max(1.0, xzn),
                    sigma, cputime, walltime)
            else:
                print "%d: primal = %e, gap = %e, (rp,rd) = (%e,%e), sigma = %f, walltime = %f" % (
                    it, pv, gap, rpri / max(xn, 1.0), rdual / max(1.0, xzn),
                    sigma, walltime)

    sol = {}
    sol['x'] = X
    sol['y'] = Y
    sol['z'] = Z
    sol['cputime'] = cputime
    sol['walltime'] = walltime
    sol['primal'] = primal
    sol['rprimal'] = rpvec
    sol['rdual'] = rdvec
    sol['sigma'] = sigmavec
    return sol
Exemple #46
0
        def f(x, y, z):
            """
            On entry bx, bz are stored in x, z.  On exit x, z contain the solution,
            with z scaled: z./di is returned instead of z.
            """

            # Maps to our variables x,y,z and t
            if DEBUG > 0:
                print "... Computing ..."
                print "bx = %sbz = %s" % (x.T, z.T),
            a = []
            b = x[n*r:n*r + n]
            c = []
            d = []
            for i in range(r):
                a.append(x[i*n:(i+1)*n])
                c.append(z[i*n:(i+1)*n])
                d.append(z[(i+r)*n:(i+r+1)*n])

            if DEBUG:
                # Now solves using cvxopt
                xp = +x
                zp = +z
                solve(xp,y,zp)

            # First phase
            for i in range(r):
                blas.trsm(cholK, a[i])

                Bai = B * a[i]

                c[i] = - Bai - c[i]
                blas.trsm(L22[i], c[i])

                d[i] =  Bai - L32[i] * c[i] - d[i]
                blas.trsm(L33[i], d[i])

                b = b + L42[i] * c[i] + L43[i] * d[i]

            blas.trsm(L44, b)

            # Second phase
            blas.trsm(L44, b, transA='T')

            for i in range(r):
                d[i] = d[i] - L43[i].T * b
                blas.trsm(L33[i], d[i], transA='T')

                c[i] = c[i] - L32[i].T * d[i] - L42[i].T * b
                blas.trsm(L22[i], c[i], transA='T')

                a[i] = a[i] + B.T * (c[i] - d[i])
                blas.trsm(cholK, a[i], transA='T')

            # Store in vectors and scale

            x[n*r:n*r + n] = b
            for i in range(r):
                x[i*n:(i+1)*n] = a[i]
                z[i*n:(i+1)*n] = c[i]
                z[(i+r)*n:(i+r+1)*n] = d[i]

            z[:] = mul( Wd, z)

            if DEBUG:
                print "x  = %s" % x.T,
                print "z  = %s" % z.T,
                print "Delta(x) = %s" % (x - xp).T,
                print "Delta(z) = %s" % (z - zp).T,
                delta= blas.nrm2(x-xp) + blas.nrm2(z-zp)
                if (delta > 1e-8):
                    print "--- DELTA TOO HIGH = %.3e ---" % delta
Exemple #47
0
# with P = E(u^2) * B'*B = (1/3) * B'*B

S = A.T * A + (1.0/3.0) * B.T * B
xstoch = A.T * b 
lapack.posv(S, xstoch)


# Worst case approximation.
#
# minimize max_{-1 <= u <= 1} ||A*u - b||_2^2.

xwc = wcls(A, [B], b)

nopts = 500
us = -2.0 + (2.0 - (-2.0))/(nopts-1) * matrix(list(range(nopts)),tc='d')
rnom = [ blas.nrm2( (A+u*B)*xnom - b) for u in us ]
rstoch = [ blas.nrm2( (A+u*B)*xstoch - b) for u in us ]
rwc = [ blas.nrm2( (A+u*B)*xwc - b) for u in us ]

if pylab_installed:
    pylab.figure(1, facecolor='w')
    pylab.plot(us, rnom, us, rstoch, us, rwc)
    pylab.plot([-1, -1], [0, 12], '--k', [1, 1], [0, 12], '--k')
    pylab.axis([-2.0, 2.0, 0.0, 12.0])
    pylab.xlabel('u')
    pylab.ylabel('r(u)')
    pylab.text(us[9], rnom[9], 'nominal')
    pylab.text(us[9], rstoch[9], 'stochastic')
    pylab.text(us[9], rwc[9], 'worst case')
    pylab.title('Robust least-squares (fig.6.15)')
Exemple #48
0
    pylab.ylabel('xcor[i]')
    pylab.xlabel('i')


# A = D'*D is an n by n tridiagonal matrix with -1.0 on the 
# upper/lower diagonal and 1, 2, 2, ..., 2, 2, 1 on the diagonal.
Ad = matrix([1.0] + (n-2)*[2.0] + [1.0])
As = matrix(-1.0, (n-1,1))

nopts = 50
deltas = -10.0 + 20.0/(nopts-1) * matrix(list(range(nopts)))
cost1, cost2 = [], []
for delta in deltas:
    xr = +corr 
    lapack.ptsv(1.0 + 10**delta * Ad, 10**delta *As, xr)
    cost1 += [blas.nrm2(xr - corr)] 
    cost2 += [blas.nrm2(xr[1:] - xr[:-1])] 

# Find solutions with ||xhat - xcorr || roughly equal to 8.0, 3.1, 1.0.
mv1, k1 = min(zip([abs(c - 8.0) for c in cost1], range(nopts)))
xr1 = +corr 
lapack.ptsv(1.0 + 10**deltas[k1] * Ad, 10**deltas[k1] *As, xr1)
mv2, k2 = min(zip([abs(c - 3.1) for c in cost1], range(nopts)))
xr2 = +corr 
lapack.ptsv(1.0 + 10**deltas[k2] * Ad, 10**deltas[k2] *As, xr2)
mv3, k3 = min(zip([abs(c - 1.0) for c in cost1], range(nopts)))
xr3 = +corr 
lapack.ptsv(1.0 + 10**deltas[k3] * Ad, 10**deltas[k3] *As, xr3)

if pylab_installed:
    pylab.figure(2, facecolor='w')
Exemple #49
0
m = 6
V = matrix([ 1.0,  1.0, 
            -1.0,  2.0,
            -2.0,  1.0,
            -2.0, -1.0,
             0.0, -2.0,
             1.5, -1.0,
             1.0,  1.0 ], (2,m+1))

# A and b are lists with the inequality descriptions of the regions.
A = [ matrix( [-(V[1,:m] - V[1,1:]), V[0,:m] - V[0,1:]] ).T ]
b = [ mul(A[0], V[:,:m].T) * matrix(1.0, (2,1)) ]

# List of symbols.
C = [ matrix(0.0, (2,1)) ] + \
    [ 2.0 * b[0][k] / blas.nrm2(A[0][k,:])**2 * A[0][k,:].T for k in 
    range(m) ]

# Voronoi set around C[1]
A += [ matrix(0.0, (3,2)) ] 
b += [ matrix(0.0, (3,1)) ]
A[1][0,:] = -A[0][0,:]
b[1][0] = -b[0][0]
A[1][1,:] = (C[m] - C[1]).T
b[1][1] = 0.5 * A[1][1,:] * ( C[m] + C[1] )
A[1][2,:] = (C[2] - C[1]).T
b[1][2] = 0.5 * A[1][2,:] * ( C[2] + C[1] )

# Voronoi set around C[2], ..., C[5]
for k in range(2, 6):
    A += [ matrix(0.0, (3,2)) ] 
Exemple #50
0
# first symbol (0,0).
m = 6
V = matrix([ 1.0,  1.0, 
            -1.0,  2.0,
            -2.0,  1.0,
            -2.0, -1.0,
             0.0, -2.0,
             1.5, -1.0,
             1.0,  1.0 ], (2,m+1))

A0 = matrix([-(V[1,:m] - V[1,1:]), V[0,:m] - V[0,1:]]).T
b0 = mul(A0, V[:,:m].T) * matrix(1.0, (2,1))

# List of symbols.
C = [ matrix(0.0, (2,1)) ] + \
    [ 2.0 * b0[k] / blas.nrm2(A0[k,:])**2 * A0[k,:].T for k in range(m) ]

# Voronoi set around C[1]
A1, b1 = matrix(0.0, (3,2)), matrix(0.0, (3,1))
A1[0,:] = -A0[0,:]
b1[0] = -b0[0]
A1[1,:] = (C[m] - C[1]).T
b1[1] = 0.5 * A1[1,:] * ( C[m] + C[1] )
A1[2,:] = (C[2] - C[1]).T
b1[2] = 0.5 * A1[2,:] * ( C[2] + C[1] )

# Voronoi set around C[2]
A2, b2 = matrix(0.0, (3,2)), matrix(0.0, (3,1))
A2[0,:] = -A0[1,:]
b2[0] = -b0[1]
A2[1,:] = (C[1] - C[2]).T
Exemple #51
0

# Quadratic smoothing.
# A = D'*D is an n by n tridiagonal matrix with -1.0 on the
# upper/lower diagonal and 1, 2, 2, ..., 2, 2, 1 on the diagonal.
Ad = matrix([1.0] + (n - 2) * [2.0] + [1.0])
As = matrix(-1.0, (n - 1, 1))

nopts = 100
deltas = -10.0 + 20.0 / (nopts - 1) * matrix(list(range(nopts)))

cost1, cost2 = [], []
for delta in deltas:
    xr = +corr
    lapack.ptsv(1.0 + 10 ** delta * Ad, 10 ** delta * As, xr)
    cost1 += [blas.nrm2(xr - corr)]
    cost2 += [blas.nrm2(xr[1:] - xr[:-1])]

# Find solutions with ||xhat - xcorr || roughly equal to 4, 7, 10.
mv1, k1 = min(zip([abs(c - 10.0) for c in cost1], range(nopts)))
xr1 = +corr
lapack.ptsv(1.0 + 10 ** deltas[k1] * Ad, 10 ** deltas[k1] * As, xr1)
mv2, k2 = min(zip([abs(c - 7.0) for c in cost1], range(nopts)))
xr2 = +corr
lapack.ptsv(1.0 + 10 ** deltas[k2] * Ad, 10 ** deltas[k2] * As, xr2)
mv3, k3 = min(zip([abs(c - 4.0) for c in cost1], range(nopts)))
xr3 = +corr
lapack.ptsv(1.0 + 10 ** deltas[k3] * Ad, 10 ** deltas[k3] * As, xr3)

if pylab_installed:
    pylab.figure(2, facecolor="w")