示例#1
0
    def add_projection(self, A, alpha = 1.0, beta = 1.0, reordered=False):
        """
        Add projection of a dense matrix :math:`A` to :py:class:`cspmatrix`.

            X := alpha*proj(A) + beta*X
        """
        assert self.is_factor is False, "cannot project matrix onto a cspmatrix factor"
        assert isinstance(A, matrix), "argument A must be a dense matrix"
        
        symb = self.symb
        blkval = self.blkval

        n = symb.n
        snptr = symb.snptr
        snode = symb.snode
        relptr = symb.relptr
        snrowidx = symb.snrowidx
        sncolptr = symb.sncolptr
        blkptr = symb.blkptr

        if self.symb.p is not None and reordered is False:
            A = tril(A)
            A = A+A.T
            A[::A.size[0]+1] *= 0.5
            A = A[self.symb.p,self.symb.p]

        # for each block ...
        for k in range(self.symb.Nsn):
            nn = snptr[k+1]-snptr[k]
            na = relptr[k+1]-relptr[k]
            nj = nn + na
             
            blkval[blkptr[k]:blkptr[k+1]] = beta*blkval[blkptr[k]:blkptr[k+1]] + alpha*(A[snrowidx[sncolptr[k]:sncolptr[k+1]],snode[snptr[k]:snptr[k+1]]][:])
        
        return 
示例#2
0
def embed(A, colcount, snode, snptr, snpar, snpost):
    """
    Compute filled pattern.

       colptr, rowidx = embed(A, colcount, snode, snptr, snpar, snpost)

    PURPOSE
    Computes rowindices and column pointer for representative vertices in supernodes.

    ARGUMENTS
    A         sparse matrix

    colcount  vector with column counts

    snode     vector with supernodes
 
    snptr     vector with offsets

    snpar     vector with supernodal parent indices

    snpost    vector with supernodal post ordering

    RETURNS
    colptr    vector with offsets 

    rowidx    vector with rowindices 
    """

    Alo = tril(A)
    cp, ri, _ = Alo.CCS
    N = len(snpar)

    # colptr for compressed cholesky factor
    colptr = matrix(0, (N + 1, 1))
    for k in range(N):
        colptr[k + 1] = colptr[k] + colcount[snode[snptr[k]]]
    rowidx = matrix(-1, (colptr[-1], 1))
    cnnz = matrix(0, (N, 1))

    # compute compressed sparse representation
    for k in range(N):
        p = snptr[k]
        Nk = snptr[k + 1] - p
        nk = cp[snode[p] + 1] - cp[snode[p]]
        rowidx[colptr[k]:colptr[k] + nk] = ri[cp[snode[p]]:cp[snode[p] + 1]]
        cnnz[k] = nk
        for i in range(1, Nk):
            nk = cp[snode[p + i] + 1] - cp[snode[p + i]]
            cnnz[k] = lmerge(rowidx, ri, colptr[k], cp[snode[p + i]], cnnz[k],
                             nk)

    for k in snpost:
        p = snptr[k]
        Nk = snptr[k + 1] - p
        if snpar[k] != k:
            cnnz[snpar[k]] = lmerge(rowidx, rowidx, colptr[snpar[k]],
                                    colptr[k] + Nk, cnnz[snpar[k]],
                                    cnnz[k] - Nk)

    return colptr, rowidx
示例#3
0
def embed(A, colcount, snode, snptr, snpar, snpost):
    """
    Compute filled pattern.

       colptr, rowidx = embed(A, colcount, snode, snptr, snpar, snpost)

    PURPOSE
    Computes rowindices and column pointer for representative vertices in supernodes.

    ARGUMENTS
    A         sparse matrix

    colcount  vector with column counts

    snode     vector with supernodes
 
    snptr     vector with offsets

    snpar     vector with supernodal parent indices

    snpost    vector with supernodal post ordering

    RETURNS
    colptr    vector with offsets 

    rowidx    vector with rowindices 
    """

    Alo = tril(A)
    cp,ri,_ = Alo.CCS
    N = len(snpar)

    # colptr for compressed cholesky factor
    colptr = matrix(0,(N+1,1))
    for k in range(N):
        colptr[k+1] = colptr[k] + colcount[snode[snptr[k]]]
    rowidx = matrix(-1,(colptr[-1],1))
    cnnz = matrix(0,(N,1))

    # compute compressed sparse representation
    for k in range(N):
        p = snptr[k]
        Nk = snptr[k+1]-p
        nk = cp[snode[p]+1] - cp[snode[p]]
        rowidx[colptr[k]:colptr[k]+nk] = ri[cp[snode[p]]:cp[snode[p]+1]]
        cnnz[k] = nk
        for i in range(1,Nk):
            nk = cp[snode[p+i]+1]-cp[snode[p+i]]
            cnnz[k] = lmerge(rowidx, ri, colptr[k], cp[snode[p+i]], cnnz[k], nk)

    for k in snpost:
        p = snptr[k]
        Nk = snptr[k+1]-p
        if snpar[k] != k:
            cnnz[snpar[k]] = lmerge(rowidx,rowidx,colptr[snpar[k]], colptr[k]+Nk,cnnz[snpar[k]], cnnz[k]-Nk)

    return colptr, rowidx
示例#4
0
    def _iadd_spmatrix(self, X, alpha = 1.0):
        """
        Add a sparse matrix :math:`X` to :py:class:`cspmatrix`.
        """
        assert self.is_factor is False, "cannot add spmatrix to a cspmatrix factor"

        n = self.symb.n
        snptr = self.symb.snptr
        snode = self.symb.snode
        relptr = self.symb.relptr
        snrowidx = self.symb.snrowidx
        sncolptr = self.symb.sncolptr
        blkptr = self.symb.blkptr
        blkval = self.blkval

        if self.symb.p is not None:
            Xp = tril(perm(symmetrize(X),self.symb.p))
        else:
            Xp = tril(X)
        cp, ri, val = Xp.CCS

        # for each block ...
        for k in range(self.symb.Nsn):
            nn = snptr[k+1]-snptr[k]
            na = relptr[k+1]-relptr[k]
            nj = nn + na

            r = list(snrowidx[sncolptr[k]:sncolptr[k+1]])
            # copy cols from A to block
            for i in range(nn):
                j = snode[snptr[k]+i]
                offset = blkptr[k] + nj*i
                # extract correct indices and add values
                I = [offset + r.index(idx) for idx in ri[cp[j]:cp[j+1]]]

                blkval[I] += alpha*val[cp[j]:cp[j+1]]
        return
示例#5
0
    def _iadd_spmatrix(self, X, alpha=1.0):
        """
        Add a sparse matrix :math:`X` to :py:class:`cspmatrix`.
        """
        assert self.is_factor is False, "cannot add spmatrix to a cspmatrix factor"

        n = self.symb.n
        snptr = self.symb.snptr
        snode = self.symb.snode
        relptr = self.symb.relptr
        snrowidx = self.symb.snrowidx
        sncolptr = self.symb.sncolptr
        blkptr = self.symb.blkptr
        blkval = self.blkval

        if self.symb.p is not None:
            Xp = tril(perm(symmetrize(X), self.symb.p))
        else:
            Xp = tril(X)
        cp, ri, val = Xp.CCS

        # for each block ...
        for k in range(self.symb.Nsn):
            nn = snptr[k + 1] - snptr[k]
            na = relptr[k + 1] - relptr[k]
            nj = nn + na

            r = list(snrowidx[sncolptr[k]:sncolptr[k + 1]])
            # copy cols from A to block
            for i in range(nn):
                j = snode[snptr[k] + i]
                offset = blkptr[k] + nj * i
                # extract correct indices and add values
                I = [offset + r.index(idx) for idx in ri[cp[j]:cp[j + 1]]]

                blkval[I] += alpha * val[cp[j]:cp[j + 1]]
        return
示例#6
0
    def add_projection(self, A, alpha=1.0, beta=1.0, reordered=False):
        """
        Add projection of a dense matrix :math:`A` to :py:class:`cspmatrix`.

            X := alpha*proj(A) + beta*X
        """
        assert self.is_factor is False, "cannot project matrix onto a cspmatrix factor"
        assert isinstance(A, matrix), "argument A must be a dense matrix"

        symb = self.symb
        blkval = self.blkval

        n = symb.n
        snptr = symb.snptr
        snode = symb.snode
        relptr = symb.relptr
        snrowidx = symb.snrowidx
        sncolptr = symb.sncolptr
        blkptr = symb.blkptr

        if self.symb.p is not None and reordered is False:
            A = tril(A)
            A = A + A.T
            A[::A.size[0] + 1] *= 0.5
            A = A[self.symb.p, self.symb.p]

        # for each block ...
        for k in range(self.symb.Nsn):
            nn = snptr[k + 1] - snptr[k]
            na = relptr[k + 1] - relptr[k]
            nj = nn + na

            blkval[blkptr[k]:blkptr[k + 1]] = beta * blkval[blkptr[k]:blkptr[
                k + 1]] + alpha * (A[snrowidx[sncolptr[k]:sncolptr[k + 1]],
                                     snode[snptr[k]:snptr[k + 1]]][:])

        return
示例#7
0
def convert_block(G, h, dim, **kwargs):
    r"""
    Applies the clique conversion method to a single positive
    semidefinite block of a cone linear program

    .. math::
        \begin{array}{ll}
           \mbox{maximize}   & -h^T z \\
           \mbox{subject to} &  G^T z + c = 0 \\
                             &  \mathbf{smat}(z)\ \ \text{psd completable}
        \end{array}

    After conversion, the above problem is converted to a block-diagonal one 

    .. math::
        \begin{array}{ll}
           \mbox{maximize}   & -h_b^T z_b  \\
           \mbox{subject to} &  G_b^T z_b + c = 0 \\
                             &  G_c^T z_b = 0 \\
                             &  \mathbf{smat}(z_b)\ \ \text{psd block-diagonal}
        \end{array}
                             
    where :math:`z_b` is a vector representation of a block-diagonal
    matrix. The constraint :math:`G_b^T z_b + c = 0` corresponds to
    the original constraint :math:`G'z + c = 0`, and the constraint
    :math:`G_c^T z_b = 0` is a coupling constraint.
       
    :param G:                 :py:class:`spmatrix`
    :param h:                 :py:class:`matrix`
    :param dim:               integer
    :param merge_function:    routine that implements a merge heuristic (optional)
    :param coupling:          mode of conversion (optional)
    :param max_density:       float (default: 0.4)

    The following example illustrates how to apply the conversion method to a one-block SDP:
    
    .. code-block:: python

        block = (G, h, dim) 
        blockc, blk2sparse, symb = convert_block(*block)
    
    The return value `blk2sparse` is a 4-tuple
    (`blki,I,J,n`) that defines a mapping between the sparse
    matrix representation and the converted block-diagonal
    representation. If `blkvec` represents a block-diagonal matrix,
    then

    .. code-block:: python

        S = spmatrix(blkvec[blki], I, J) 

    maps `blkvec` into is a sparse matrix representation of the
    matrix. Similarly, a sparse matrix `S` can be converted to the
    block-diagonal matrix representation using the code
    
    .. code-block:: python

        blkvec = matrix(0.0, (len(S),1), tc=S.typecode)
        blkvec[blki] = S.V

    The optional argument `max_density` controls whether or not to perform
    conversion based on the aggregate sparsity of the block. Specifically,
    conversion is performed whenever the number of lower triangular nonzeros
    in the aggregate sparsity pattern is less than or equal to `max_density*dim`.
        
    The optional argument `coupling` controls the introduction
    of equality constraints in the conversion. Possible values
    are *full* (default), *sparse*, *sparse+tri*, and any nonnegative
    integer. Full coupling results in a conversion in which all
    coupling constraints are kept, and hence the converted problem is
    equivalent to the original problem. Sparse coupling yeilds a
    conversion in which only the coupling constraints corresponding to
    nonzero entries in the aggregate sparsity pattern are kept, and
    sparse-plus-tridiagonal (*sparse+tri*) yeilds a conversion with
    tridiagonal coupling in addition to coupling constraints corresponding
    to nonzero entries in the aggregate sparsity pattern. Setting `coupling`
    to a nonnegative integer *k* yields a conversion with coupling
    constraints corresponding to entries in a band with half-bandwidth *k*.

    .. seealso::

        M. S. Andersen, A. Hansson, and L. Vandenberghe, `Reduced-Complexity
        Semidefinite Relaxations of Optimal Power Flow Problems
        <http://dx.doi.org/10.1109/TPWRS.2013.2294479>`_,
        IEEE Transactions on Power Systems, 2014.
        
    """

    merge_function = kwargs.get('merge_function', None)
    coupling = kwargs.get('coupling', 'full')
    tskip = kwargs.get('max_density', 0.4)

    tc = G.typecode

    ###
    ### Find filled pattern, compute symbolic factorization using AMD
    ### ordering, and do "symbolic conversion"
    ###

    # find aggregate sparsity pattern
    h = sparse(h)
    LIa = matrix(list(set(G.I).union(set(h.I))))
    Ia = [i % dim for i in LIa]
    Ja = [j // dim for j in LIa]
    Va = spmatrix(1., Ia, Ja, (dim, dim))

    # find permutation, symmetrize, and permute
    Va = symmetrize(tril(Va))

    # if not very sparse, skip decomposition
    if float(len(Va)) / Va.size[0]**2 > tskip:
        return (G, h, None, [dim]), None, None

    # compute symbolic factorization
    F = symbolic(Va, merge_function=merge_function, p=amd.order)
    p = F.p
    ip = F.ip
    Va = F.sparsity_pattern(reordered=True, symmetric=True)

    # symbolic conversion
    if coupling == 'sparse': coupling = tril(Va)
    elif coupling == 'sparse+tri':
        coupling = tril(Va)
        coupling += spmatrix(1.0,[i for j in range(Va.size[0]) for i in range(j,min(Va.size[0],j+2))],\
                             [j for j in range(Va.size[0]) for i in range(j,min(Va.size[0],j+2))],Va.size)
    elif type(coupling) is int:
        assert coupling >= 0
        bw = +coupling
        coupling = spmatrix(1.0,[i for j in range(Va.size[0]) for i in range(j,min(Va.size[0],j+bw+1))],\
                            [j for j in range(Va.size[0]) for i in range(j,min(Va.size[0],j+bw+1))],Va.size)

    dims, sparse_to_block, constraints = symb_to_block(F, coupling=coupling)

    # dimension of block-diagonal representation
    N = sum([d**2 for d in dims])

    ###
    ### Convert problem data
    ###

    m = G.size[1]  # cols in G
    cp, ri, val = G.CCS

    IV = []  # list of m (row, value) tuples
    J = []
    for j in range(m):
        iv = []
        for i in range(cp[j + 1] - cp[j]):
            row = ri[cp[j] + i] % dim
            col = ri[cp[j] + i] // dim
            if row < col: continue  # ignore upper triangular entries
            k1 = ip[row]
            k2 = ip[col]
            blk_idx = sparse_to_block[min(k1, k2) * dim + max(k1, k2)]
            if k1 == k2:
                iv.append((blk_idx[0], val[cp[j] + i]))
            elif k1 > k2:
                iv.append((blk_idx[0], val[cp[j] + i]))
                iv.append((blk_idx[1], val[cp[j] + i].conjugate()))
            else:
                iv.append((blk_idx[0], val[cp[j] + i].conjugate()))
                iv.append((blk_idx[1], val[cp[j] + i]))
        iv.sort(key=lambda x: x[0])
        IV.extend(iv)
        J.extend(len(iv) * [j])

    # build G_converted
    I, V = zip(*IV)
    G_converted = spmatrix(V, I, J, (N, m), tc=tc)

    # convert and build new h
    _, ri, val = h.CCS
    iv = []
    for i in range(len(ri)):
        row = ri[i] % dim
        col = ri[i] // dim
        if row < col: continue  # ignore upper triangular entries
        k1 = ip[row]
        k2 = ip[col]
        blk_idx = sparse_to_block[min(k1, k2) * dim + max(k1, k2)]
        if k1 == k2:
            iv.append((blk_idx[0], val[i]))
        elif k1 > k2:
            iv.append((blk_idx[0], val[i]))
            iv.append((blk_idx[1], val[i].conjugate()))
        else:
            iv.append((blk_idx[0], val[i].conjugate()))
            iv.append((blk_idx[1], val[i]))

    iv.sort(key=lambda x: x[0])
    if iv:
        I, V = zip(*iv)
    else:
        I, V = [], []
    h_converted = spmatrix(V, I, len(I) * [0], (N, 1), tc=tc)

    ###
    ### Build matrix representation of coupling constraints
    ###

    IV = []  # list of (row, value) tuples
    J = []
    ncon = 0
    for j in range(len(constraints)):
        iv = []
        if len(constraints[j]) == 2:
            ii, jj = constraints[j]
            iv = sorted([(ii, 1.0), (jj, -1.0)], key=lambda x: x[0])
            jl = 2 * [ncon]
            ncon += 1
        elif len(constraints[j]) == 4:
            i1, j1, i2, j2 = constraints[j]
            iv = sorted([(i1, 1.0), (i2, 1.0), (j1, -1.0), (j2, -1.0)],
                        key=lambda x: x[0])
            jl = 4 * [ncon]
            ncon += 1
            if tc == 'z':
                iv.extend(
                    sorted([(i1, complex(0.0, 1.0)), (i2, complex(0.0, -1.0)),
                            (j1, complex(0.0, -1.0)), (j2, complex(0.0, 1.0))],
                           key=lambda x: x[0]))
                jl.extend(4 * [ncon])
                ncon += 1
        IV.extend(iv)
        J.extend(jl)

    # build G_converted
    if IV: I, V = zip(*IV)
    else: I, V = [], []
    G_coupling = spmatrix(V, I, J, (N, ncon), tc=tc)

    # generate indices for reverse mapping (block_to_sparse)
    idx = []
    for k in sparse_to_block.keys():
        k1 = p[k % dim]
        k2 = p[k // dim]
        idx.append((min(k1, k2) * dim + max(k1, k2), sparse_to_block[k][0]))

    idx.sort()
    idx, blki = zip(*idx)
    blki = matrix(blki)
    I = [v % dim for v in idx]
    J = [v // dim for v in idx]
    n = sum([di**2 for di in dims])

    return (G_converted, h_converted, G_coupling, dims), (blki, I, J, n), F
示例#8
0
    def spmatrix(self, reordered = True, symmetric = False):
        """
        Converts the :py:class:`cspmatrix` :math:`A` to a sparse matrix. A reordered
        matrix is returned if the optional argument `reordered` is
        `True` (default), and otherwise the inverse permutation is applied. Only the
        default options are allowed if the :py:class:`cspmatrix` :math:`A` represents
        a Cholesky factor. 

        :param reordered:  boolean (default: True)
        :param symmetric:  boolean (default: False)			   
        """
        n = self.symb.n
        snptr = self.symb.snptr
        snode = self.symb.snode
        relptr = self.symb.relptr
        snrowidx = self.symb.snrowidx
        sncolptr = self.symb.sncolptr
        blkptr = self.symb.blkptr
        blkval = self.blkval
        
        if self.is_factor:
            if symmetric: raise ValueError("'symmetric = True' not implemented for Cholesky factors")
            if not reordered: raise ValueError("'reordered = False' not implemented for Cholesky factors")
            snpost = self.symb.snpost
            blkval = +blkval
            for k in snpost:
                j = snode[snptr[k]]            # representative vertex
                nn = snptr[k+1]-snptr[k]       # |Nk|
                na = relptr[k+1]-relptr[k]     # |Ak|
                if na == 0: continue
                nj = na + nn
                if nn == 1:
                    blas.scal(blkval[blkptr[k]],blkval,offset = blkptr[k]+1,n=na)
                else:
                    blas.trmm(blkval,blkval, transA = "N", diag = "N", side = "R",uplo = "L", \
                              m = na, n = nn, ldA = nj, ldB = nj, \
                              offsetA = blkptr[k],offsetB = blkptr[k] + nn)

        cc = matrix(0,(n,1))  # count number of nonzeros in each col
        for k in range(self.symb.Nsn):
            nn = snptr[k+1]-snptr[k]
            na = relptr[k+1]-relptr[k]
            nj = nn + na
            for i in range(nn):
                j = snode[snptr[k]+i]
                cc[j] = nj - i

        # build col. ptr
        cp = [0]
        for i in range(n): cp.append(cp[-1] + cc[i])
        cp = matrix(cp)

        # copy data and row indices
        val = matrix(0.0, (cp[-1],1))
        ri = matrix(0, (cp[-1],1))
        for k in range(self.symb.Nsn):
            nn = snptr[k+1]-snptr[k]
            na = relptr[k+1]-relptr[k]
            nj = nn + na
            for i in range(nn):
                j = snode[snptr[k]+i]
                blas.copy(blkval, val, offsetx = blkptr[k]+nj*i+i, offsety = cp[j], n = nj-i)
                ri[cp[j]:cp[j+1]] = snrowidx[sncolptr[k]+i:sncolptr[k+1]]

        I = []; J = []
        for i in range(n):
            I += list(ri[cp[i]:cp[i+1]])
            J += (cp[i+1]-cp[i])*[i]

        tmp = spmatrix(val, I, J, (n,n))  # tmp is reordered and lower tril.
        
        if reordered or self.symb.p is None:
            # reordered matrix (do not apply inverse permutation)
            if not symmetric: return tmp
            else: return symmetrize(tmp)
        else:
            # apply inverse permutation            
            tmp = perm(symmetrize(tmp), self.symb.ip)
            if symmetric: return tmp
            else: return tril(tmp) 
示例#9
0
    def spmatrix(self, reordered=True, symmetric=False):
        """
        Converts the :py:class:`cspmatrix` :math:`A` to a sparse matrix. A reordered
        matrix is returned if the optional argument `reordered` is
        `True` (default), and otherwise the inverse permutation is applied. Only the
        default options are allowed if the :py:class:`cspmatrix` :math:`A` represents
        a Cholesky factor. 

        :param reordered:  boolean (default: True)
        :param symmetric:  boolean (default: False)			   
        """
        n = self.symb.n
        snptr = self.symb.snptr
        snode = self.symb.snode
        relptr = self.symb.relptr
        snrowidx = self.symb.snrowidx
        sncolptr = self.symb.sncolptr
        blkptr = self.symb.blkptr
        blkval = self.blkval

        if self.is_factor:
            if symmetric:
                raise ValueError(
                    "'symmetric = True' not implemented for Cholesky factors")
            if not reordered:
                raise ValueError(
                    "'reordered = False' not implemented for Cholesky factors")
            snpost = self.symb.snpost
            blkval = +blkval
            for k in snpost:
                j = snode[snptr[k]]  # representative vertex
                nn = snptr[k + 1] - snptr[k]  # |Nk|
                na = relptr[k + 1] - relptr[k]  # |Ak|
                if na == 0: continue
                nj = na + nn
                if nn == 1:
                    blas.scal(blkval[blkptr[k]],
                              blkval,
                              offset=blkptr[k] + 1,
                              n=na)
                else:
                    blas.trmm(blkval,blkval, transA = "N", diag = "N", side = "R",uplo = "L", \
                              m = na, n = nn, ldA = nj, ldB = nj, \
                              offsetA = blkptr[k],offsetB = blkptr[k] + nn)

        cc = matrix(0, (n, 1))  # count number of nonzeros in each col
        for k in range(self.symb.Nsn):
            nn = snptr[k + 1] - snptr[k]
            na = relptr[k + 1] - relptr[k]
            nj = nn + na
            for i in range(nn):
                j = snode[snptr[k] + i]
                cc[j] = nj - i

        # build col. ptr
        cp = [0]
        for i in range(n):
            cp.append(cp[-1] + cc[i])
        cp = matrix(cp)

        # copy data and row indices
        val = matrix(0.0, (cp[-1], 1))
        ri = matrix(0, (cp[-1], 1))
        for k in range(self.symb.Nsn):
            nn = snptr[k + 1] - snptr[k]
            na = relptr[k + 1] - relptr[k]
            nj = nn + na
            for i in range(nn):
                j = snode[snptr[k] + i]
                blas.copy(blkval,
                          val,
                          offsetx=blkptr[k] + nj * i + i,
                          offsety=cp[j],
                          n=nj - i)
                ri[cp[j]:cp[j + 1]] = snrowidx[sncolptr[k] + i:sncolptr[k + 1]]

        I = []
        J = []
        for i in range(n):
            I += list(ri[cp[i]:cp[i + 1]])
            J += (cp[i + 1] - cp[i]) * [i]

        tmp = spmatrix(val, I, J, (n, n))  # tmp is reordered and lower tril.

        if reordered or self.symb.p is None:
            # reordered matrix (do not apply inverse permutation)
            if not symmetric: return tmp
            else: return symmetrize(tmp)
        else:
            # apply inverse permutation
            tmp = perm(symmetrize(tmp), self.symb.ip)
            if symmetric: return tmp
            else: return tril(tmp)
示例#10
0
def convert_block(G, h, dim, **kwargs):
    r"""
    Applies the clique conversion method to a single positive
    semidefinite block of a cone linear program

    .. math::
        \begin{array}{ll}
           \mbox{maximize}   & -h^T z \\
           \mbox{subject to} &  G^T z + c = 0 \\
                             &  \mathbf{smat}(z)\ \ \text{psd completable}
        \end{array}

    After conversion, the above problem is converted to a block-diagonal one 

    .. math::
        \begin{array}{ll}
           \mbox{maximize}   & -h_b^T z_b  \\
           \mbox{subject to} &  G_b^T z_b + c = 0 \\
                             &  G_c^T z_b = 0 \\
                             &  \mathbf{smat}(z_b)\ \ \text{psd block-diagonal}
        \end{array}
                             
    where :math:`z_b` is a vector representation of a block-diagonal
    matrix. The constraint :math:`G_b^T z_b + c = 0` corresponds to
    the original constraint :math:`G'z + c = 0`, and the constraint
    :math:`G_c^T z_b = 0` is a coupling constraint.
       
    :param G:                 :py:class:`spmatrix`
    :param h:                 :py:class:`matrix`
    :param dim:               integer
    :param merge_function:    routine that implements a merge heuristic (optional)
    :param coupling:          mode of conversion (optional)
    :param max_density:       float (default: 0.4)

    The following example illustrates how to apply the conversion method to a one-block SDP:
    
    .. code-block:: python

        block = (G, h, dim) 
        blockc, blk2sparse, symb = convert_block(*block)
    
    The return value `blk2sparse` is a 4-tuple
    (`blki,I,J,n`) that defines a mapping between the sparse
    matrix representation and the converted block-diagonal
    representation. If `blkvec` represents a block-diagonal matrix,
    then

    .. code-block:: python

        S = spmatrix(blkvec[blki], I, J) 

    maps `blkvec` into is a sparse matrix representation of the
    matrix. Similarly, a sparse matrix `S` can be converted to the
    block-diagonal matrix representation using the code
    
    .. code-block:: python

        blkvec = matrix(0.0, (len(S),1), tc=S.typecode)
        blkvec[blki] = S.V

    The optional argument `max_density` controls whether or not to perform
    conversion based on the aggregate sparsity of the block. Specifically,
    conversion is performed whenever the number of lower triangular nonzeros
    in the aggregate sparsity pattern is less than or equal to `max_density*dim`.
        
    The optional argument `coupling` controls the introduction
    of equality constraints in the conversion. Possible values
    are *full* (default), *sparse*, *sparse+tri*, and any nonnegative
    integer. Full coupling results in a conversion in which all
    coupling constraints are kept, and hence the converted problem is
    equivalent to the original problem. Sparse coupling yeilds a
    conversion in which only the coupling constraints corresponding to
    nonzero entries in the aggregate sparsity pattern are kept, and
    sparse-plus-tridiagonal (*sparse+tri*) yeilds a conversion with
    tridiagonal coupling in addition to coupling constraints corresponding
    to nonzero entries in the aggregate sparsity pattern. Setting `coupling`
    to a nonnegative integer *k* yields a conversion with coupling
    constraints corresponding to entries in a band with half-bandwidth *k*.

    .. seealso::

        M. S. Andersen, A. Hansson, and L. Vandenberghe, `Reduced-Complexity
        Semidefinite Relaxations of Optimal Power Flow Problems
        <http://dx.doi.org/10.1109/TPWRS.2013.2294479>`_,
        IEEE Transactions on Power Systems, 2014.
        
    """
    
    merge_function = kwargs.get('merge_function', None)
    coupling = kwargs.get('coupling', 'full')
    tskip = kwargs.get('max_density',0.4)
    
    tc = G.typecode
    
    ###
    ### Find filled pattern, compute symbolic factorization using AMD
    ### ordering, and do "symbolic conversion"
    ###
    
    # find aggregate sparsity pattern
    h = sparse(h)
    LIa = matrix(list(set(G.I).union(set(h.I))))
    Ia = [i%dim for i in LIa]
    Ja = [j//dim for j in LIa]
    Va = spmatrix(1.,Ia,Ja,(dim,dim))
    
    # find permutation, symmetrize, and permute
    Va = symmetrize(tril(Va))
        
    # if not very sparse, skip decomposition 
    if float(len(Va))/Va.size[0]**2 > tskip:
        return (G, h, None, [dim]), None, None
    
    # compute symbolic factorization 
    F = symbolic(Va, merge_function = merge_function, p = amd.order)
    p = F.p
    ip = F.ip
    Va = F.sparsity_pattern(reordered = True, symmetric = True)
    
    # symbolic conversion
    if coupling == 'sparse': coupling = tril(Va)
    elif coupling == 'sparse+tri': 
        coupling = tril(Va)
        coupling += spmatrix(1.0,[i for j in range(Va.size[0]) for i in range(j,min(Va.size[0],j+2))],\
                             [j for j in range(Va.size[0]) for i in range(j,min(Va.size[0],j+2))],Va.size)
    elif type(coupling) is int:
        assert coupling >= 0
        bw = +coupling
        coupling = spmatrix(1.0,[i for j in range(Va.size[0]) for i in range(j,min(Va.size[0],j+bw+1))],\
                            [j for j in range(Va.size[0]) for i in range(j,min(Va.size[0],j+bw+1))],Va.size)
        
    dims, sparse_to_block, constraints = symb_to_block(F, coupling = coupling)
        
    # dimension of block-diagonal representation
    N = sum([d**2 for d in dims])      

    ###
    ### Convert problem data 
    ###
    
    m = G.size[1]           # cols in G
    cp, ri, val = G.CCS   
    
    IV = []                 # list of m (row, value) tuples
    J = []
    for j in range(m):
        iv = []
        for i in range(cp[j+1]-cp[j]):
            row = ri[cp[j]+i]%dim
            col = ri[cp[j]+i]//dim
            if row < col: continue   # ignore upper triangular entries
            k1 = ip[row]
            k2 = ip[col]
            blk_idx = sparse_to_block[min(k1,k2)*dim + max(k1,k2)]
            if k1 == k2:
                iv.append((blk_idx[0], val[cp[j]+i]))
            elif k1 > k2:
                iv.append((blk_idx[0], val[cp[j]+i]))
                iv.append((blk_idx[1], val[cp[j]+i].conjugate()))
            else:
                iv.append((blk_idx[0], val[cp[j]+i].conjugate()))
                iv.append((blk_idx[1], val[cp[j]+i]))                    
        iv.sort(key=lambda x: x[0])
        IV.extend(iv)
        J.extend(len(iv)*[j])
                    
    # build G_converted
    I, V = zip(*IV)
    G_converted = spmatrix(V, I, J, (N, m), tc = tc)
        
    # convert and build new h
    _, ri, val = h.CCS
    iv = []
    for i in range(len(ri)):
        row = ri[i]%dim
        col = ri[i]//dim
        if row < col: continue   # ignore upper triangular entries
        k1 = ip[row]
        k2 = ip[col]
        blk_idx = sparse_to_block[min(k1,k2)*dim + max(k1,k2)]
        if k1 == k2:
            iv.append((blk_idx[0], val[i]))
        elif k1 > k2:
            iv.append((blk_idx[0], val[i]))
            iv.append((blk_idx[1], val[i].conjugate()))
        else:
            iv.append((blk_idx[0], val[i].conjugate()))
            iv.append((blk_idx[1], val[i]))
    
    iv.sort(key=lambda x: x[0])
    if iv:
        I, V = zip(*iv)
    else:
        I, V = [], []
    h_converted = spmatrix(V, I, len(I)*[0], (N, 1), tc = tc)
    
    ###
    ### Build matrix representation of coupling constraints
    ###
    
    IV = []   # list of (row, value) tuples
    J = []
    ncon = 0
    for j in range(len(constraints)):
        iv = []
        if len(constraints[j]) == 2:
            ii, jj = constraints[j]
            iv = sorted([(ii, 1.0), (jj, -1.0)],key=lambda x: x[0])
            jl = 2*[ncon]
            ncon += 1
        elif len(constraints[j]) == 4:
            i1,j1,i2,j2 = constraints[j]
            iv = sorted([(i1, 1.0), (i2, 1.0), (j1, -1.0), (j2, -1.0)],key=lambda x: x[0])
            jl = 4*[ncon]
            ncon += 1
            if tc == 'z':
                iv.extend(sorted([(i1, complex(0.0,1.0)), (i2, complex(0.0,-1.0)),
                           (j1, complex(0.0,-1.0)), (j2, complex(0.0,1.0))],key=lambda x: x[0]))
                jl.extend(4*[ncon])
                ncon += 1
        IV.extend(iv)
        J.extend(jl)
                
    # build G_converted
    if IV: I, V = zip(*IV)
    else: I, V = [], []
    G_coupling = spmatrix(V, I, J, (N, ncon), tc = tc)
            
    # generate indices for reverse mapping (block_to_sparse)
    idx = []
    for k in sparse_to_block.keys():
        k1 = p[k%dim]
        k2 = p[k//dim]
        idx.append((min(k1,k2)*dim + max(k1,k2), sparse_to_block[k][0]))

    idx.sort()
    idx, blki = zip(*idx)
    blki = matrix(blki)
    I = [v%dim for v in idx]
    J = [v//dim for v in idx]
    n = sum([di**2 for di in dims])
    
    return (G_converted, h_converted, G_coupling, dims), (blki, I, J, n), F