示例#1
0
def _updateTrustRegion(x, fx, oldFx, oldDeltaX, p, radius, g, oldGrad, H, func, grad, z, G, h, y, A, b):

    # readjust the bounds and initial value if possible
    # as we try our best to use warm start
    GTemp = numpy.append(numpy.zeros((1,p)), numpy.eye(p), axis=0)
    hTemp = numpy.zeros(p+1)
    hTemp[0] += radius

    if G is not None:
        GTemp = numpy.append(G, GTemp, axis=0)
        hTemp = numpy.append(h - G.dot(x), hTemp)
        dims = {'l': G.shape[0], 'q': [p+1], 's':  []}
    else:
        dims = {'l': 0, 'q': [p+1], 's':  []}

    if A is not None:
        bTemp = b - A.dot(x)
    else:
        bTemp = None

    # solving the QP to get the descent direction
    try:
        if A is not None:
            qpOut = solvers.coneqp(matrix(H), matrix(g), matrix(GTemp), matrix(hTemp), dims, matrix(A), matrix(bTemp))
            # print qpOut
        else:
            qpOut = solvers.coneqp(matrix(H), matrix(g), matrix(GTemp), matrix(hTemp), dims)
    except Exception as e:
        raise e

    # exact the descent diretion and do a line search
    deltaX = numpy.array(qpOut['x'])
    # diffM is the difference between the real objective
    # function and M, the quadratic approximation 
    # M = diffM(deltaX.flatten(), g.flatten(), H)
    M = _diffM(g.flatten(), H)
    
    newFx = func(x + deltaX)
    predRatio = (fx - newFx) / M(deltaX)
        
    if predRatio>=0.75:
        radius = min(2.0*radius, maxRadius)
    elif predRatio<=0.25:
        radius *= 0.25

    if predRatio>=0.25:
        oldGrad = g.copy()
        x += deltaX
        oldFx = fx
        fx = newFx
        update = True
    else:
        update = False

    if G is not None:
        z[:] = numpy.array(qpOut['z'])[G.shape[0]]
    if A is not None:
        y[:] = numpy.array(qpOut['y'])

    return x, update, radius, deltaX, z, y, fx, oldFx, oldGrad, qpOut['iterations']
示例#2
0
def QuadMatch(K, B=1):
    """
    Return the top B solutions in increasing objective value to the following
    quadratic optimization problem (with symmetry eliminated)
    minimize    u^T K u
    subject to  u in {-1, +1}^n
                sum_i u_i = 0
                u_1 = -1

    Args:
        K: d by d array representing a PSD matrix
        B: number of solutions
    Returns:
        list of lists of +/-1 denoting assignment
    """
    allocations = []
    obj_value = []
    n = len(K)
    k = n / 2
    K1 = np.dot(np.ones((1, n)), K)
    K11 = np.dot(K1, np.ones((n, 1)))
    result = solvers.coneqp(
        P=matrix(4. * K[1:, 1:]),
        q=matrix(-4 * K1[0, 1:]),
        A=matrix(np.ones((1, n - 1))),
        b=matrix(np.array(k).reshape(-1, 1)),
    )
    allocation = [
        1 if zz > 0.5 else 0 for zz in np.array(result['x']).flatten()
    ]
    allocations.append([0] + allocation)
    obj_value.append(result['primal objective'])
    for b in range(B - 1):
        try:
            result = solvers.coneqp(
                P=matrix(4. * K[1:, 1:]),
                q=matrix(-4 * K1[0, 1:]),
                A=matrix(np.ones((1, n - 1))),
                b=matrix(np.array(k).reshape(-1, 1)),
                G=matrix(np.array(allocations).astype(float)[:, 1:]),
                h=matrix((k - 1) * np.ones((b + 1, 1))))
            allocation = [
                1 if zz > 0.5 else 0 for zz in np.array(result['x']).flatten()
            ]
            allocations.append([0] + allocation)
            obj_value.append(result['primal objective'])
        except:
            break
    return [[2 * zz - 1 for zz in z] for z in allocations]
def Robust_opt(mean, cov, sigma, t, eta):
    N = len(mean)
    a = 2 / t
    kappa = np.sqrt(chi2.ppf(eta, df=N))
    Y = np.linalg.cholesky(sigma)

    P_0 = np.hstack([a * cov, np.array([[0.0] for i in range(N)])])
    P_0 = np.vstack([P_0, np.array([0.0 for i in range(N + 1)])])
    P = matrix(P_0)

    q = matrix(np.append((-1) * mean, [kappa]))

    A = matrix(np.array([[1.0 for i in range(N)] + [0.0]]), tc='d')
    b = matrix(np.array([1.0]), tc='d')

    I = matrix(0.0, (N + 1, N + 1))
    I[::N + 2] = 1.0
    G_1 = np.hstack([(-1) * Y.T, np.array([[0.0] for i in range(N)])])
    G_1 = np.vstack([np.array([0.0 for i in range(N)] + [-1.0]), G_1])
    G = matrix([-I, matrix(G_1)])
    h = matrix((N + 1) * [0.0] + (N + 1) * [0.0])

    dims = {'l': N + 1, 'q': [N + 1], 's': []}

    sol = solvers.coneqp(P, q, G, h, dims, A, b)
    w = sol['x'][:-1]
    #CE_0 = np.dot(mean,w)[0] - 0.5*a*np.dot(np.dot(w.T, cov), w)[0][0]

    return w
示例#4
0
def find_nearest_valid_distribution(u_alpha, kernel, initial=None, reg=0):
    """ (solution,distance_sqd)=find_nearest_valid_distribution(u_alpha,kernel):
    Given a n-vector u_alpha summing to 1, with negative terms, 
    finds the distance (squared) to the nearest n-vector summing to 1, 
    with non-neg terms. Distance calculated using nxn matrix kernel. 
    Regularization parameter reg -- 

    min_v (u_alpha - v)^\top K (u_alpha - v) + reg* v^\top v"""

    P = matrix(2 * kernel)
    n = kernel.shape[0]
    q = matrix(np.dot(-2 * kernel, u_alpha))
    A = matrix(np.ones((1, n)))
    b = matrix(1.)
    G = spmatrix(-1., range(n), range(n))
    h = matrix(np.zeros(n))
    dims = {'l': n, 'q': [], 's': []}
    solvers.options['show_progress'] = False
    solution = solvers.coneqp(
        P,
        q,
        G,
        h,
        dims,
        A,
        b,
        initvals=initial
    )
    distance_sqd = solution['primal objective'] + np.dot(u_alpha.T,
                                                         np.dot(kernel, u_alpha))[0, 0]
    return (solution, distance_sqd)
示例#5
0
def testqp(opts):
    A = matrix([ [ .3, -.4,  -.2,  -.4,  1.3 ], 
                 [ .6, 1.2, -1.7,   .3,  -.3 ],
                 [-.3,  .0,   .6, -1.2, -2.0 ] ])
    b = matrix([ 1.5, .0, -1.2, -.7, .0])
    m, n = A.size

    I = matrix(0.0, (n,n))
    I[::n+1] = 1.0
    G = matrix([-I, matrix(0.0, (1,n)), I])
    h = matrix(n*[0.0] + [1.0] + n*[0.0])
    dims = {'l': n, 'q': [n+1], 's': []}
    P = A.T*A
    q = -A.T*b

    #localcones.options.update(opts)
    #sol = localcones.coneqp(P, q, G, h, dims, kktsolver='chol')
    solvers.options.update(opts)
    sol = solvers.coneqp(P, q, G, h, dims)
    if sol['status'] == 'optimal':
        print "x=\n", helpers.strSpe(sol['x'], "%.5f")
        print "s=\n", helpers.strSpe(sol['s'], "%.5f")
        print "z=\n", helpers.strSpe(sol['z'], "%.5f")
        print "\n *** running GO test ***"
        helpers.run_go_test("../testconeqp", {'x': sol['x'], 's': sol['s'], 'z': sol['z']})
def l1regls(A, b, gamma): 
    """ 
        minimize  0.5 * ||A*x-b||_2^2  + gamma*sum_k |x_k| 

    with complex data. 
    """ 

    A=matrix(A)
    b=matrix(b)
    m, n = A.size 

    # Solve as 
    # 
    #     minimize  0.5 * || AA*u - bb ||_2^2 + gamma* sum(t) 
    #     subject   || (u[k], u[k+n]) ||_2 <= t[k], k = 0, ..., n-1. 
    # 
    # with real data and u = ( Re(x), Im(x) ). 

    AA = matrix([ [A.real(), A.imag()], [-A.imag(), A.real()] ]) 
    bb = matrix([ b.real(), b.imag() ]) 
    
    # P = [AA'*AA, 0; 0, 0] 
    P = matrix(0.0, (3*n, 3*n)) 
    P[0:2*n,0:2*n]=AA.T*AA

    # q = [-AA'*bb; gamma*ones] 
    q = matrix([-AA.T * bb, matrix(gamma, (n,1))]) 
    
    # n second order cone constraints || (u[k], u[k+n]) ||_2 <= t[k] 
    I = matrix(0.0, (n,n)) 
    I[::n+1] = -1.0 
    G = matrix(0.0, (3*n, 3*n)) 
    G[1::3, :n] = I 
    G[2::3, n:2*n] = I 
    G[::3, -n:] = I 
    
    
    
    def Gfun(x, y, alpha = 1.0, beta = 0.0, trans = 'N'):
        gx=matrix(0.0,x.size)        
        if trans=='N':
            gx[1::3,:]=-x[  :  n,:]
            gx[2::3,:]=-x[ n:2*n,:]
            gx[ ::3,:]=-x[-n:   ,:]
        elif trans=='T':
            gx[  :  n,:]=-x[1::3,:]
            gx[ n:2*n,:]=-x[2::3,:]
            gx[-n:   ,:]=-x[ ::3,:]
        y[:,:]=alpha*gx+beta*y
        
    h = matrix(0.0, (3*n, 1)) 

    dims = {'l': 0, 'q': n*[3], 's': []} 
    factor=mykktchol(P)
    sol = solvers.coneqp(P, q, Gfun, h, dims,kktsolver=factor) 
    
    return sol['x'][:n] + 1j*sol['x'][n:2*n] 
示例#7
0
def run():
    vi = solvers.coneqp(fP, f, G, h=c, dims=dims, kktsolver=fKKT,
            xnewcopy=newcopy, xdot=dot, xaxpy=axpy, xscal=scal)['x']

    v  = np.zeros((n+2,))
    v[1:-1] = vi[:,0].T

    print 'CG iters:', ITERS
    pretty_print(x,v)
示例#8
0
def regress_erp(y, test_idx, predictor, events,  ns):
    event_types = events['uniqueLabel']
    labels = events['label']
    latencies = events['latencyInFrame']

    train_idx = ~test_idx
    ytrn = matrix(y[train_idx].tolist()).T

    #There is a specific test_set to use
    if (len(np.where(test_idx)[0])!=0):
        tst_start_idx = min(np.where(test_idx)[0])
        tst_end_idx = max(np.where(test_idx)[0])

    #Test on all the data
    else:
        tst_start_idx = min(np.where(~test_idx)[0])
        tst_end_idx = max(np.where(~test_idx)[0])

    train_idx_list= np.where(train_idx==1)[0]
    train_idx_list = array(train_idx_list, dtype=np.int).tolist()

    #Solve the system of equations y = Ax
    P = predictor[train_idx_list,:].T*predictor[train_idx_list,:]
    q = -predictor[train_idx_list, :].T*ytrn
    rerp_vec = solvers.coneqp(P, q)['x']

    yestimate = array(predictor*rerp_vec)
    y_temp = matrix(y.tolist()).T
    noise = y_temp-yestimate


    events_to_test = np.where((array(latencies)<tst_end_idx) & (array(latencies)>tst_start_idx))[0]
    gc.disable()
    #Compute performance stats
    stats = np.empty((len(event_types),2))
    for i, this_type in enumerate(event_types):
        this_stat = np.empty((0,2))
        for j, event_idx in enumerate(events_to_test):
            this_event=labels[event_idx]
            if this_event==this_type:
                start_idx = latencies[event_idx];
                end_idx = np.minimum(tst_end_idx, start_idx+ns)

                yblock = y[start_idx:end_idx]
                noiseblock = noise[start_idx:end_idx]
                this_stat = np.append(this_stat, array([[sp.var(yblock)], [sp.var(noiseblock)]]).T, axis=0)

        rov_raw = this_stat[:,0]-this_stat[:,1]
        rov_nor = rov_raw/this_stat[:,0]
        rov = array([sp.mean(rov_raw), sp.mean(rov_nor)])
        stats[i,:] =  rov

    gc.enable()
    return stats, np.reshape(array(rerp_vec),(-1, ns)).T
示例#9
0
def solve_generalized_mom_conelp(MM,
                                 constraints,
                                 W=None,
                                 absslack=1e-4,
                                 totalslack=1e-2,
                                 maxiter=1):
    """
    solve using iterative GMM using the cone linear program
    W is a specific weight matrix

    we give generous bound for each constraint, and then harsh bound for
    g'Wg
    @params
    constraints - E[g(x,X)] = f(x) - phi(X) that are supposed to be 0
    Eggt - the function handle takes current f(x) and estimates
    E[g(x,X)g(x,X)'] \in \Re^{n \times n}, the information matrix
    maxiter - times to run the iterative GMM
    """

    N = len(constraints)
    D = len(MM.matrix_monos)
    sr = len(MM.row_monos)

    A, b = MM.get_Ab(constraints, cvxoptmode=False)
    # augumented constraint matrix introduces slack variables g
    A_aug = sparse(matrix(sc.hstack((A, 1 * sc.eye(N + 1)[:, :-1]))))
    P = spdiag([matrix(0 * np.eye(D)), matrix(np.eye(N))])
    b = matrix(b)

    indicatorlist = MM.get_LMI_coefficients()
    G = sparse(indicatorlist).trans()
    V, I, J = G.V, G.I, G.J,
    Gaug = sparse(spmatrix(V, I, J, size=(sr * sr, N + D)))
    h = matrix(np.zeros((sr * sr, 1)))

    dims = {}
    dims['l'] = 0
    dims['q'] = []
    dims['s'] = [sr]

    Bf = MM.get_Bflat()
    R = np.random.rand(len(MM), len(MM))
    #W = R.dot(R.T)
    W = np.eye(len(MM))
    w = Bf.dot(W.flatten())[:, np.newaxis]
    q = matrix(np.vstack((w, np.zeros((N, 1)))))

    #ipdb.set_trace()
    for i in xrange(maxiter):
        w = Bf.dot(W.flatten())[:, np.newaxis]
        sol = cvxsolvers.coneqp(P, q, G=Gaug, h=h, dims=dims, A=A_aug, b=b)
    sol['x'] = sol['x'][0:D]
    return sol
示例#10
0
文件: cone.py 项目: smhjn/cvxFin
    def solve(self):
        if len(self.aux.G) == 0:
            raise ArithmeticError('No constraints')

        A = solvers.coneqp(P=matrix(self.__P),
                           q=matrix(self.__q),
                           G=matrix(numpy.array(self.aux.G)),
                           h=matrix(numpy.array(self.aux.h)),
                           dims=self.aux.dims)

        if A["status"] == "optimal":
            return numpy.array(A['x']).transpose()[0], A
        else:
            raise ArithmeticError("Solution not optimal: " + A["status"])
示例#11
0
文件: solvers.py 项目: sidaw/mompy
def solve_generalized_mom_conelp(MM, constraints, W=None, absslack=1e-4, totalslack=1e-2, maxiter = 1):
    """
    solve using iterative GMM using the cone linear program
    W is a specific weight matrix

    we give generous bound for each constraint, and then harsh bound for
    g'Wg
    @params
    constraints - E[g(x,X)] = f(x) - phi(X) that are supposed to be 0
    Eggt - the function handle takes current f(x) and estimates
    E[g(x,X)g(x,X)'] \in \Re^{n \times n}, the information matrix
    maxiter - times to run the iterative GMM
    """
    
    N = len(constraints)
    D = len(MM.matrix_monos)
    sr = len(MM.row_monos)
    
    A,b = MM.get_Ab(constraints, cvxoptmode = False)
    # augumented constraint matrix introduces slack variables g
    A_aug = sparse(matrix(sc.hstack((A, 1*sc.eye(N+1)[:,:-1]))))
    P = spdiag([matrix(0*np.eye(D)), matrix(np.eye(N))])
    b = matrix(b)
       
    indicatorlist = MM.get_LMI_coefficients()
    G = sparse(indicatorlist).trans()
    V,I,J = G.V, G.I, G.J, 
    Gaug = sparse(spmatrix(V,I,J,size=(sr*sr, N + D)))
    h = matrix(np.zeros((sr*sr,1)))

    dims = {}
    dims['l'] = 0
    dims['q'] = []
    dims['s'] = [sr]

    Bf = MM.get_Bflat()
    R = np.random.rand(len(MM), len(MM))
    #W = R.dot(R.T)
    W = np.eye(len(MM))
    w = Bf.dot(W.flatten())[:,np.newaxis]
    q = matrix(np.vstack( (w,np.zeros((N,1))) ))
    
    #ipdb.set_trace()
    for i in xrange(maxiter):
        w = Bf.dot(W.flatten())[:,np.newaxis]
        sol = cvxsolvers.coneqp(P, q, G=Gaug, h=h, dims=dims, A=A_aug, b=b)
    sol['x'] = sol['x'][0:D]
    return sol
示例#12
0
def filter_copycounts(graph):
    # Same as filter_copycounts_inc_nodes except doesn't use edge weights
    # Currently not used
    A = buildMatrix(graph)
    [n,m]=A.size
    I = spmatrix(1.0, range(m), range(m))
    c = matrix(map(float,graph.edge_weights),(m,1))
    q = -c  #check if this is a row vector
    G = -I
    h = matrix(0.,(m,1)) # zero matrix
    dims = {'l': G.size[0], 'q': [], 's': []}
    b = matrix(0.,(n,1))
    x=solvers.coneqp(I, q, G, h, dims, A, b)['x']
    y = numpy.array(x)
    graph.filter_update(y)
    return x
示例#13
0
def filter_copycounts(graph):
    # Same as filter_copycounts_inc_nodes except doesn't use edge weights
    # Currently not used
    A = buildMatrix(graph)
    [n, m] = A.size
    I = spmatrix(1.0, range(m), range(m))
    c = matrix(map(float, graph.edge_weights), (m, 1))
    q = -c  #check if this is a row vector
    G = -I
    h = matrix(0., (m, 1))  # zero matrix
    dims = {'l': G.size[0], 'q': [], 's': []}
    b = matrix(0., (n, 1))
    x = solvers.coneqp(I, q, G, h, dims, A, b)['x']
    y = numpy.array(x)
    graph.filter_update(y)
    return x
示例#14
0
    def solve_lbd(self, I_p):
        """ lbd=diag(Lambda) update with linear programming
        """
        c, G, h, A, b, primalstart = self.compute_constraints_Diag(
            self.alpha, self.X, self.H, self.trace)

        sol = solvers.coneqp(matrix(self.gamma * I_p),
                             c,
                             G,
                             h,
                             A=A,
                             b=b,
                             primalstart=primalstart)
        lbd_new = abs(np.array(sol['x']).ravel())

        return lbd_new
示例#15
0
def solve_generalized_mom_coneqp(MM, constraints, pconstraints=None, maxiter = 1):
    """
    solve using iterative GMM using the quadratic cone program
    func_W takes a solved instance and returns the weighting matrix,
    this function has access to individual data points
    @params
    constraints - E[g(x,X)] = f(x) - h(X) that are supposed to be 0
    Eggt - the function handle takes current f(x) and estimates
    E[g(x,X)g(x,X)'] \in \Re^{n \times n}, the information matrix
    maxiter - times to run the iterative GMM
    """
    N = len(constraints)
    D = len(MM.matrix_monos)
    sr = len(MM.row_monos)
    
    A,b = MM.get_Ab(constraints, cvxoptmode = False)
    #ipdb.set_trace()
    # augumented constraint matrix introduces slack variables g
    A_aug = sparse(matrix(sc.hstack((A, 1*sc.eye(N+1)[:,:-1]))))
    P = spdiag([matrix(0*np.eye(D)), matrix(np.eye(N))])
    b = matrix(b)
       
    indicatorlist = MM.get_LMI_coefficients()
    G = sparse(indicatorlist).trans()
    V,I,J = G.V, G.I, G.J, 
    Gaug = sparse(spmatrix(V,I,J,size=(sr*sr, N + D)))
    h = matrix(np.zeros((sr*sr,1)))

    dims = {}
    dims['l'] = 0
    dims['q'] = []
    dims['s'] = [sr]

    Bf = MM.get_Bflat()
    R = np.random.rand(len(MM), len(MM))
    W = R.dot(R.T)
    W = np.eye(len(MM))
    w = Bf.dot(W.flatten())[:,np.newaxis]
    q = 1e-5*matrix(np.vstack( (w,np.zeros((N,1))) ))
    
    #ipdb.set_trace()
    for i in xrange(maxiter):
        w = Bf.dot(W.flatten())[:,np.newaxis]
        sol = solvers.coneqp(P, q, G=Gaug, h=h, dims=dims, A=A_aug, b=b)
    sol['x'] = sol['x'][0:D]
    return sol
示例#16
0
def testqp(opts):
    A = matrix([ [ .3, -.4,  -.2,  -.4,  1.3 ], 
                 [ .6, 1.2, -1.7,   .3,  -.3 ],
                 [-.3,  .0,   .6, -1.2, -2.0 ] ])
    b = matrix([ 1.5, .0, -1.2, -.7, .0])
    m, n = A.size

    I = matrix(0.0, (n,n))
    I[::n+1] = 1.0
    G = matrix([-I, matrix(0.0, (1,n)), I])
    h = matrix(n*[0.0] + [1.0] + n*[0.0])
    dims = {'l': n, 'q': [n+1], 's': []}
    P = A.T*A
    q = -A.T*b

    solvers.options.update(opts)
    sol = solvers.coneqp(P, q, G, h, dims, kktsolver='ldl')
    if sol['status'] == 'optimal':
        print "x=\n", helpers.str2(sol['x'], "%.9f")
        print "s=\n", helpers.str2(sol['s'], "%.9f")
        print "z=\n", helpers.str2(sol['z'], "%.9f")
        helpers.run_go_test("../testconeqp", {'x': sol['x'], 's': sol['s'], 'z': sol['z']})
示例#17
0
def testqp(opts):
    A = matrix([[0.3, -0.4, -0.2, -0.4, 1.3], [0.6, 1.2, -1.7, 0.3, -0.3], [-0.3, 0.0, 0.6, -1.2, -2.0]])
    b = matrix([1.5, 0.0, -1.2, -0.7, 0.0])
    m, n = A.size

    I = matrix(0.0, (n, n))
    I[:: n + 1] = 1.0
    G = matrix([-I, matrix(0.0, (1, n)), I])
    h = matrix(n * [0.0] + [1.0] + n * [0.0])
    dims = {"l": n, "q": [n + 1], "s": []}
    P = A.T * A
    q = -A.T * b

    # localcones.options.update(opts)
    # sol = localcones.coneqp(P, q, G, h, dims, kktsolver='chol')
    solvers.options.update(opts)
    sol = solvers.coneqp(P, q, G, h, dims)
    if sol["status"] == "optimal":
        print "x=\n", helpers.strSpe(sol["x"], "%.5f")
        print "s=\n", helpers.strSpe(sol["s"], "%.5f")
        print "z=\n", helpers.strSpe(sol["z"], "%.5f")
        print "\n *** running GO test ***"
        helpers.run_go_test("../testconeqp", {"x": sol["x"], "s": sol["s"], "z": sol["z"]})
示例#18
0
# The quadratic cone program of section 8.2 (Quadratic cone programs).
  
# minimize   (1/2)*x'*A'*A*x - b'*A*x
# subject to x >= 0
#            ||x||_2 <= 1

from cvxopt import matrix, base, solvers
A = matrix([ [ .3, -.4,  -.2,  -.4,  1.3 ], 
             [ .6, 1.2, -1.7,   .3,  -.3 ],
             [-.3,  .0,   .6, -1.2, -2.0 ] ])
b = matrix([ 1.5, .0, -1.2, -.7, .0])
m, n = A.size

I = matrix(0.0, (n,n))
I[::n+1] = 1.0
G = matrix([-I, matrix(0.0, (1,n)), I])
h = matrix(n*[0.0] + [1.0] + n*[0.0])
dims = {'l': n, 'q': [n+1], 's': []}
x = solvers.coneqp(A.T*A, -A.T*b, G, h, dims)['x']
print("\nx = \n")
print(x)
示例#19
0
def nrmapp(A, B, C=None, d=None, G=None, h=None):
    """

    Solves the regularized nuclear norm approximation problem 
    
        minimize    || A(x) + B ||_* + 1/2 x'*C*x + d'*x
        subject to  G*x <= h

    and its dual

        maximize    -h'*z + tr(B'*Z) - 1/2 v'*C*v 
        subject to  d + G'*z + A'(Z) = C*v 
                    z >= 0
                    || Z || <= 1.

    A(x) is a linear mapping that maps n-vectors x to (p x q)-matrices A(x).

    ||.||_* is the nuclear norm (sum of singular values).  

    A'(Z) is the adjoint mapping of A(x).

    ||.|| is the maximum singular value norm.


    INPUT 

    A       real dense or sparse matrix of size (p*q, n).  Its columns are
            the coefficients A_i of the mapping 

                A: reals^n --> reals^pxq,   A(x) = sum_i=1^n x_i * A_i, 
                     
            stored in column-major order, as p*q-vectors.
        
    B       real dense or sparse matrix of size (p, q), with p >= q.
    
    C       real symmetric positive semidefinite dense or sparse matrix of 
            order n.  Only the lower triangular part of C is accessed.
            The default value is a zero matrix.

    d       real dense matrix of size (n, 1).  The default value is a zero
            vector.
    
    G       real dense or sparse matrix of size (m, n), with m >= 0.  
            The default value is a matrix of size (0, n).
    
    h       real dense matrix of size (m, 1).  The default value is a 
            matrix of size (0, 1).


    OUTPUT

    status  'optimal', 'primal infeasible', or 'unknown'. 

    x       'd' matrix of size (n, 1) if status is 'optimal'; 
            None otherwise.

    z       'd' matrix of size (m, 1) if status is 'optimal' or 'primal 
            infeasible'; None otherwise.

    Z       'd' matrix of size (p, q) if status is 'optimal' or 'primal
            infeasible'; None otherwise.


    If status is 'optimal', then x, z, Z are approximate solutions of the
    optimality conditions

        C * x  + G' * z + A'(Z) + d = 0  
        G * x <= h 
        z >= 0,  || Z || < = 1
        z' * (h - G*x) = 0
        tr (Z' * (A(x) + B)) = || A(x) + B ||_*.

    The last (complementary slackness) condition can be replaced by the
    following.  If the singular value decomposition of A(x) + B is

        A(x) + B = [ U1  U2 ] * diag(s, 0) * [ V1  V2 ]',

    with s > 0, then

        Z = U1 * V1' + U2 * W * V2',  || W || <= 1. 


    If status is 'primal infeasible', then Z = 0 and z is a certificate of
    infeasibility for the inequalities G * x <= h, i.e., a vector that
    satisfies

        h' * z = 1,  G' * z = 0,  z >= 0.

    """

    if type(B) not in (matrix, spmatrix) or B.typecode is not 'd':
        raise TypeError, "B must be a real dense or sparse matrix"
    p, q = B.size
    if p < q:
        raise ValueError, "row dimension of B must be greater than or "\
            "equal to column dimension"

    if type(A) not in (matrix, spmatrix) or A.typecode is not 'd' or \
        A.size[0] != p*q:
        raise TypeError, "A must be a real dense or sparse matrix with "\
            "p*q rows if B has size (p, q)"
    n = A.size[1]

    if G is None: G = spmatrix([], [], [], (0, n))
    if h is None: h = matrix(0.0, (0, 1))
    if type(h) is not matrix or h.typecode is not 'd' or h.size[1] != 1:
        raise TypeError, "h must be a real dense matrix with one column"
    m = h.size[0]
    if type(G) not in (matrix, spmatrix) or G.typecode is not 'd' or \
        G.size != (m, n):
        raise TypeError, "G must be a real dense matrix or sparse matrix "\
            "of size (m, n) if h has length m and A has n columns"

    if C is None: C = spmatrix(0.0, [], [], (n, n))
    if d is None: d = matrix(0.0, (n, 1))
    if type(C) not in (matrix, spmatrix) or C.typecode is not 'd' or \
        C.size != (n,n):
        raise TypeError, "C must be real dense or sparse matrix of size "\
            "(n, n) if A has n columns"
    if type(d) is not matrix or d.typecode is not 'd' or d.size != (n, 1):
        raise TypeError, "d must be a real matrix of size (n, 1) if A has "\
            "n columns"

    # The problem is solved as a cone program
    #
    #     minimize    (1/2) * x'*C*x + d'*x  + (1/2) * (tr X1 + tr X2)
    #     subject to  G*x <= h
    #                 [ X1         (A(x) + B)' ]
    #                 [ A(x) + B   X2          ]  >= 0.
    #
    # The primal variable is stored as a list [ x, X1, X2 ].

    def xnewcopy(u):
        return [matrix(u[0]), matrix(u[1]), matrix(u[2])]

    def xdot(u, v):
        return blas.dot(u[0], v[0]) + misc.sdot2(u[1], v[1]) + \
            misc.sdot2(u[2], v[2])

    def xscal(alpha, u):
        blas.scal(alpha, u[0])
        blas.scal(alpha, u[1])
        blas.scal(alpha, u[2])

    def xaxpy(u, v, alpha=1.0):
        blas.axpy(u[0], v[0], alpha)
        blas.axpy(u[1], v[1], alpha)
        blas.axpy(u[2], v[2], alpha)

    def Pf(u, v, alpha=1.0, beta=0.0):
        base.symv(C, u[0], v[0], alpha=alpha, beta=beta)
        blas.scal(beta, v[1])
        blas.scal(beta, v[2])

    c = [d, matrix(0.0, (q, q)), matrix(0.0, (p, p))]
    c[1][::q + 1] = 0.5
    c[2][::p + 1] = 0.5

    # If V is a p+q x p+q matrix
    #
    #         [ V11  V12 ]
    #     V = [          ]
    #         [ V21  V22 ]
    #
    # with V11 q x q,  V21 p x q, V12 q x p, and V22 p x p, then I11, I21,
    # I22 are the index sets defined by
    #
    #     V[I11] = V11[:],  V[I21] = V21[:],  V[I22] = V22[:].
    #

    I11 = matrix([i + j * (p + q) for j in xrange(q) for i in xrange(q)])
    I21 = matrix([q + i + j * (p + q) for j in xrange(q) for i in xrange(p)])
    I22 = matrix([(p + q) * q + q + i + j * (p + q) for j in xrange(p)
                  for i in xrange(p)])

    dims = {'l': m, 'q': [], 's': [p + q]}
    hh = matrix(0.0, (m + (p + q)**2, 1))
    hh[:m] = h
    hh[m + I21] = B[:]

    def Gf(u, v, alpha=1.0, beta=0.0, trans='N'):

        if trans == 'N':

            # v[:m] := alpha * G * u[0] + beta * v[:m]
            base.gemv(G, u[0], v, alpha=alpha, beta=beta)

            # v[m:] := alpha * [-u[1],  -A(u[0])';  -A(u[0]), -u[2]]
            #          + beta * v[m:]
            blas.scal(beta, v, offset=m)
            v[m + I11] -= alpha * u[1][:]
            v[m + I21] -= alpha * A * u[0]
            v[m + I22] -= alpha * u[2][:]

        else:

            # v[0] := alpha * ( G.T * u[:m] - 2.0 * A.T * u[m + I21] )
            #         + beta v[1]
            base.gemv(G, u, v[0], trans='T', alpha=alpha, beta=beta)
            base.gemv(A,
                      u[m + I21],
                      v[0],
                      trans='T',
                      alpha=-2.0 * alpha,
                      beta=1.0)

            # v[1] := -alpha * u[m + I11] + beta * v[1]
            blas.scal(beta, v[1])
            blas.axpy(u[m + I11], v[1], alpha=-alpha)

            # v[2] := -alpha * u[m + I22] + beta * v[2]
            blas.scal(beta, v[2])
            blas.axpy(u[m + I22], v[2], alpha=-alpha)

    def Af(u, v, alpha=1.0, beta=0.0, trans='N'):
        if trans == 'N':
            pass
        else:
            blas.scal(beta, v[0])
            blas.scal(beta, v[1])
            blas.scal(beta, v[2])

    L1 = matrix(0.0, (q, q))
    L2 = matrix(0.0, (p, p))
    T21 = matrix(0.0, (p, q))
    s = matrix(0.0, (q, 1))
    SS = matrix(0.0, (q, q))
    V1 = matrix(0.0, (q, q))
    V2 = matrix(0.0, (p, p))
    As = matrix(0.0, (p * q, n))
    As2 = matrix(0.0, (p * q, n))
    tmp = matrix(0.0, (p, q))
    a = matrix(0.0, (p + q, p + q))
    H = matrix(0.0, (n, n))
    Gs = matrix(0.0, (m, n))
    Q1 = matrix(0.0, (q, p + q))
    Q2 = matrix(0.0, (p, p + q))
    tau1 = matrix(0.0, (q, 1))
    tau2 = matrix(0.0, (p, 1))
    bz11 = matrix(0.0, (q, q))
    bz22 = matrix(0.0, (p, p))
    bz21 = matrix(0.0, (p, q))

    # Suppose V = [V1; V2] is p x q with V1 q x q.  If v = V[:] then
    # v[Itriu] are the strict upper triangular entries of V1 stored
    # columnwise.
    Itriu = [i + j * p for j in xrange(1, q) for i in xrange(j)]

    # v[Itril] are the strict lower triangular entries of V1 stored rowwise.
    Itril = [j + i * p for j in xrange(1, q) for i in xrange(j)]

    # v[Idiag] are the diagonal entries of V1.
    Idiag = [i * (p + 1) for i in xrange(q)]

    # v[Itriu2] are the upper triangular entries of V1, with the diagonal
    # entries stored first, followed by the strict upper triangular entries
    # stored columnwise.
    Itriu2 = Idiag + Itriu

    # If V is a q x q matrix and v = V[:], then v[Itril2] are the strict
    # lower triangular entries of V stored columnwise and v[Itril3] are
    # the strict lower triangular entries stored rowwise.
    Itril2 = [i + j * q for j in xrange(q) for i in xrange(j + 1, q)]
    Itril3 = [i + j * q for i in xrange(q) for j in xrange(i)]

    P = spmatrix(0.0, Itriu, Itril, (p * q, p * q))
    D = spmatrix(1.0, range(p * q), range(p * q))
    DV = matrix(1.0, (p * q, 1))

    def F(W):
        """
        Create a solver for the linear equations

                                C * ux + G' * uzl - 2*A'(uzs21) = bx
                                                         -uzs11 = bX1
                                                         -uzs22 = bX2
                                            G * ux - Dl^2 * uzl = bzl
            [ -uX1   -A(ux)' ]          [ uzs11 uzs21' ]     
            [                ] - r*r' * [              ] * r*r' = bzs
            [ -A(ux) -uX2    ]          [ uzs21 uzs22  ]

        where Dl = diag(W['l']), r = W['r'][0].  

        On entry, x = (bx, bX1, bX2) and z = [ bzl; bzs[:] ].
        On exit, x = (ux, uX1, uX2) and z = [ Dl*uzl; (r'*uzs*r)[:] ].


        1. Compute matrices V1, V2 such that (with T = r*r')
        
               [ V1   0   ] [ T11  T21' ] [ V1'  0  ]   [ I  S' ]
               [          ] [           ] [         ] = [       ]
               [ 0    V2' ] [ T21  T22  ] [ 0    V2 ]   [ S  I  ]
        
           and S = [ diag(s); 0 ], s a positive q-vector.

        2. Factor the mapping X -> X + S * X' * S:

               X + S * X' * S = L( L'( X )). 

        3. Compute scaled mappings: a matrix As with as its columns the 
           coefficients of the scaled mapping 

               L^-1( V2' * A() * V1' ) 

           and the matrix Gs = Dl^-1 * G.

        4. Cholesky factorization of H = C + Gs'*Gs + 2*As'*As.

        """

        # 1. Compute V1, V2, s.

        r = W['r'][0]

        # LQ factorization R[:q, :] = L1 * Q1.
        lapack.lacpy(r, Q1, m=q)
        lapack.gelqf(Q1, tau1)
        lapack.lacpy(Q1, L1, n=q, uplo='L')
        lapack.orglq(Q1, tau1)

        # LQ factorization R[q:, :] = L2 * Q2.
        lapack.lacpy(r, Q2, m=p, offsetA=q)
        lapack.gelqf(Q2, tau2)
        lapack.lacpy(Q2, L2, n=p, uplo='L')
        lapack.orglq(Q2, tau2)

        # V2, V1, s are computed from an SVD: if
        #
        #     Q2 * Q1' = U * diag(s) * V',
        #
        # then V1 = V' * L1^-1 and V2 = L2^-T * U.

        # T21 = Q2 * Q1.T
        blas.gemm(Q2, Q1, T21, transB='T')

        # SVD T21 = U * diag(s) * V'.  Store U in V2 and V' in V1.
        lapack.gesvd(T21, s, jobu='A', jobvt='A', U=V2, Vt=V1)

        #        # Q2 := Q2 * Q1' without extracting Q1; store T21 in Q2
        #        this will requires lapack.ormlq or lapack.unmlq

        # V2 = L2^-T * U
        blas.trsm(L2, V2, transA='T')

        # V1 = V' * L1^-1
        blas.trsm(L1, V1, side='R')

        # 2. Factorization X + S * X' * S = L( L'( X )).
        #
        # The factor L is stored as a diagonal matrix D and a sparse lower
        # triangular matrix P, such that
        #
        #     L(X)[:] = D**-1 * (I + P) * X[:]
        #     L^-1(X)[:] = D * (I - P) * X[:].

        # SS is q x q with SS[i,j] = si*sj.
        blas.scal(0.0, SS)
        blas.syr(s, SS)

        # For a p x q matrix X, P*X[:] is Y[:] where
        #
        #     Yij = si * sj * Xji  if i < j
        #         = 0              otherwise.
        #
        P.V = SS[Itril2]

        # For a p x q matrix X, D*X[:] is Y[:] where
        #
        #     Yij = Xij / sqrt( 1 - si^2 * sj^2 )  if i < j
        #         = Xii / sqrt( 1 + si^2 )         if i = j
        #         = Xij                            otherwise.
        #
        DV[Idiag] = sqrt(1.0 + SS[::q + 1])
        DV[Itriu] = sqrt(1.0 - SS[Itril3]**2)
        D.V = DV**-1

        # 3. Scaled linear mappings

        # Ask :=  V2' * Ask * V1'
        blas.scal(0.0, As)
        base.axpy(A, As)
        for i in xrange(n):
            # tmp := V2' * As[i, :]
            blas.gemm(V2,
                      As,
                      tmp,
                      transA='T',
                      m=p,
                      n=q,
                      k=p,
                      ldB=p,
                      offsetB=i * p * q)
            # As[:,i] := tmp * V1'
            blas.gemm(tmp,
                      V1,
                      As,
                      transB='T',
                      m=p,
                      n=q,
                      k=q,
                      ldC=p,
                      offsetC=i * p * q)

        # As := D * (I - P) * As
        #     = L^-1 * As.
        blas.copy(As, As2)
        base.gemm(P, As, As2, alpha=-1.0, beta=1.0)
        base.gemm(D, As2, As)

        # Gs := Dl^-1 * G
        blas.scal(0.0, Gs)
        base.axpy(G, Gs)
        for k in xrange(n):
            blas.tbmv(W['di'], Gs, n=m, k=0, ldA=1, offsetx=k * m)

        # 4. Cholesky factorization of H = C + Gs' * Gs + 2 * As' * As.

        blas.syrk(As, H, trans='T', alpha=2.0)
        blas.syrk(Gs, H, trans='T', beta=1.0)
        base.axpy(C, H)
        lapack.potrf(H)

        def f(x, y, z):
            """

            Solve 

                              C * ux + G' * uzl - 2*A'(uzs21) = bx
                                                       -uzs11 = bX1
                                                       -uzs22 = bX2
                                           G * ux - D^2 * uzl = bzl
                [ -uX1   -A(ux)' ]       [ uzs11 uzs21' ]     
                [                ] - T * [              ] * T = bzs.
                [ -A(ux) -uX2    ]       [ uzs21 uzs22  ]

            On entry, x = (bx, bX1, bX2) and z = [ bzl; bzs[:] ].
            On exit, x = (ux, uX1, uX2) and z = [ D*uzl; (r'*uzs*r)[:] ].

            Define X = uzs21, Z = T * uzs * T:   
 
                      C * ux + G' * uzl - 2*A'(X) = bx
                                [ 0  X' ]               [ bX1 0   ]
                            T * [       ] * T - Z = T * [         ] * T
                                [ X  0  ]               [ 0   bX2 ]
                               G * ux - D^2 * uzl = bzl
                [ -uX1   -A(ux)' ]   [ Z11 Z21' ]     
                [                ] - [          ] = bzs
                [ -A(ux) -uX2    ]   [ Z21 Z22  ]

            Return x = (ux, uX1, uX2), z = [ D*uzl; (rti'*Z*rti)[:] ].

            We use the congruence transformation 

                [ V1   0   ] [ T11  T21' ] [ V1'  0  ]   [ I  S' ]
                [          ] [           ] [         ] = [       ]
                [ 0    V2' ] [ T21  T22  ] [ 0    V2 ]   [ S  I  ]

            and the factorization 

                X + S * X' * S = L( L'(X) ) 

            to write this as

                                  C * ux + G' * uzl - 2*A'(X) = bx
                L'(V2^-1 * X * V1^-1) - L^-1(V2' * Z21 * V1') = bX
                                           G * ux - D^2 * uzl = bzl
                            [ -uX1   -A(ux)' ]   [ Z11 Z21' ]     
                            [                ] - [          ] = bzs,
                            [ -A(ux) -uX2    ]   [ Z21 Z22  ]

            or

                C * ux + Gs' * uuzl - 2*As'(XX) = bx
                                      XX - ZZ21 = bX
                                 Gs * ux - uuzl = D^-1 * bzl
                                 -As(ux) - ZZ21 = bbzs_21
                                     -uX1 - Z11 = bzs_11
                                     -uX2 - Z22 = bzs_22

            if we introduce scaled variables

                uuzl = D * uzl
                  XX = L'(V2^-1 * X * V1^-1) 
                     = L'(V2^-1 * uzs21 * V1^-1)
                ZZ21 = L^-1(V2' * Z21 * V1') 

            and define

                bbzs_21 = L^-1(V2' * bzs_21 * V1')
                                           [ bX1  0   ]
                     bX = L^-1( V2' * (T * [          ] * T)_21 * V1').
                                           [ 0    bX2 ]           
 
            Eliminating Z21 gives 

                C * ux + Gs' * uuzl - 2*As'(XX) = bx
                                 Gs * ux - uuzl = D^-1 * bzl
                                   -As(ux) - XX = bbzs_21 - bX
                                     -uX1 - Z11 = bzs_11
                                     -uX2 - Z22 = bzs_22 

            and eliminating uuzl and XX gives

                        H * ux = bx + Gs' * D^-1 * bzl + 2*As'(bX - bbzs_21)
                Gs * ux - uuzl = D^-1 * bzl
                  -As(ux) - XX = bbzs_21 - bX
                    -uX1 - Z11 = bzs_11
                    -uX2 - Z22 = bzs_22.


            In summary, we can use the following algorithm: 

            1. bXX := bX - bbzs21
                                        [ bX1 0   ]
                    = L^-1( V2' * ((T * [         ] * T)_21 - bzs_21) * V1')
                                        [ 0   bX2 ]

            2. Solve H * ux = bx + Gs' * D^-1 * bzl + 2*As'(bXX).

            3. From ux, compute 

                   uuzl = Gs*ux - D^-1 * bzl and 
                      X = V2 * L^-T(-As(ux) + bXX) * V1.

            4. Return ux, uuzl, 

                   rti' * Z * rti = r' * [ -bX1, X'; X, -bX2 ] * r
 
               and uX1 = -Z11 - bzs_11,  uX2 = -Z22 - bzs_22.

            """

            # Save bzs_11, bzs_22, bzs_21.
            lapack.lacpy(z, bz11, uplo='L', m=q, n=q, ldA=p + q, offsetA=m)
            lapack.lacpy(z, bz21, m=p, n=q, ldA=p + q, offsetA=m + q)
            lapack.lacpy(z,
                         bz22,
                         uplo='L',
                         m=p,
                         n=p,
                         ldA=p + q,
                         offsetA=m + (p + q + 1) * q)

            # zl := D^-1 * zl
            #     = D^-1 * bzl
            blas.tbmv(W['di'], z, n=m, k=0, ldA=1)

            # zs := r' * [ bX1, 0; 0, bX2 ] * r.

            # zs := [ bX1, 0; 0, bX2 ]
            blas.scal(0.0, z, offset=m)
            lapack.lacpy(x[1], z, uplo='L', m=q, n=q, ldB=p + q, offsetB=m)
            lapack.lacpy(x[2],
                         z,
                         uplo='L',
                         m=p,
                         n=p,
                         ldB=p + q,
                         offsetB=m + (p + q + 1) * q)

            # scale diagonal of zs by 1/2
            blas.scal(0.5, z, inc=p + q + 1, offset=m)

            # a := tril(zs)*r
            blas.copy(r, a)
            blas.trmm(z,
                      a,
                      side='L',
                      m=p + q,
                      n=p + q,
                      ldA=p + q,
                      ldB=p + q,
                      offsetA=m)

            # zs := a'*r + r'*a
            blas.syr2k(r,
                       a,
                       z,
                       trans='T',
                       n=p + q,
                       k=p + q,
                       ldB=p + q,
                       ldC=p + q,
                       offsetC=m)

            # bz21 := L^-1( V2' * ((r * zs * r')_21 - bz21) * V1')
            #
            #                           [ bX1 0   ]
            #       = L^-1( V2' * ((T * [         ] * T)_21 - bz21) * V1').
            #                           [ 0   bX2 ]

            # a = [ r21 r22 ] * z
            #   = [ r21 r22 ] * r' * [ bX1, 0; 0, bX2 ] * r
            #   = [ T21  T22 ] * [ bX1, 0; 0, bX2 ] * r
            blas.symm(z,
                      r,
                      a,
                      side='R',
                      m=p,
                      n=p + q,
                      ldA=p + q,
                      ldC=p + q,
                      offsetB=q)

            # bz21 := -bz21 + a * [ r11, r12 ]'
            #       = -bz21 + (T * [ bX1, 0; 0, bX2 ] * T)_21
            blas.gemm(a,
                      r,
                      bz21,
                      transB='T',
                      m=p,
                      n=q,
                      k=p + q,
                      beta=-1.0,
                      ldA=p + q,
                      ldC=p)

            # bz21 := V2' * bz21 * V1'
            #       = V2' * (-bz21 + (T*[bX1, 0; 0, bX2]*T)_21) * V1'
            blas.gemm(V2, bz21, tmp, transA='T', m=p, n=q, k=p, ldB=p)
            blas.gemm(tmp, V1, bz21, transB='T', m=p, n=q, k=q, ldC=p)

            # bz21[:] := D * (I-P) * bz21[:]
            #       = L^-1 * bz21[:]
            #       = bXX[:]
            blas.copy(bz21, tmp)
            base.gemv(P, bz21, tmp, alpha=-1.0, beta=1.0)
            base.gemv(D, tmp, bz21)

            # Solve H * ux = bx + Gs' * D^-1 * bzl + 2*As'(bXX).

            # x[0] := x[0] + Gs'*zl + 2*As'(bz21)
            #       = bx + G' * D^-1 * bzl + 2 * As'(bXX)
            blas.gemv(Gs, z, x[0], trans='T', alpha=1.0, beta=1.0)
            blas.gemv(As, bz21, x[0], trans='T', alpha=2.0, beta=1.0)

            # x[0] := H \ x[0]
            #      = ux
            lapack.potrs(H, x[0])

            # uuzl = Gs*ux - D^-1 * bzl
            blas.gemv(Gs, x[0], z, alpha=1.0, beta=-1.0)

            # bz21 := V2 * L^-T(-As(ux) + bz21) * V1
            #       = X
            blas.gemv(As, x[0], bz21, alpha=-1.0, beta=1.0)
            blas.tbsv(DV, bz21, n=p * q, k=0, ldA=1)
            blas.copy(bz21, tmp)
            base.gemv(P, tmp, bz21, alpha=-1.0, beta=1.0, trans='T')
            blas.gemm(V2, bz21, tmp)
            blas.gemm(tmp, V1, bz21)

            # zs := -zs + r' * [ 0, X'; X, 0 ] * r
            #     = r' * [ -bX1, X'; X, -bX2 ] * r.

            # a := bz21 * [ r11, r12 ]
            #   =  X * [ r11, r12 ]
            blas.gemm(bz21, r, a, m=p, n=p + q, k=q, ldA=p, ldC=p + q)

            # z := -z + [ r21, r22 ]' * a + a' * [ r21, r22 ]
            #    = rti' * uzs * rti
            blas.syr2k(r,
                       a,
                       z,
                       trans='T',
                       beta=-1.0,
                       n=p + q,
                       k=p,
                       offsetA=q,
                       offsetC=m,
                       ldB=p + q,
                       ldC=p + q)

            # uX1 = -Z11 - bzs_11
            #     = -(r*zs*r')_11 - bzs_11
            # uX2 = -Z22 - bzs_22
            #     = -(r*zs*r')_22 - bzs_22

            blas.copy(bz11, x[1])
            blas.copy(bz22, x[2])

            # scale diagonal of zs by 1/2
            blas.scal(0.5, z, inc=p + q + 1, offset=m)

            # a := r*tril(zs)
            blas.copy(r, a)
            blas.trmm(z,
                      a,
                      side='R',
                      m=p + q,
                      n=p + q,
                      ldA=p + q,
                      ldB=p + q,
                      offsetA=m)

            # x[1] := -x[1] - a[:q,:] * r[:q, :]' - r[:q,:] * a[:q,:]'
            #       = -bzs_11 - (r*zs*r')_11
            blas.syr2k(a, r, x[1], n=q, alpha=-1.0, beta=-1.0)

            # x[2] := -x[2] - a[q:,:] * r[q:, :]' - r[q:,:] * a[q:,:]'
            #       = -bzs_22 - (r*zs*r')_22
            blas.syr2k(a,
                       r,
                       x[2],
                       n=p,
                       alpha=-1.0,
                       beta=-1.0,
                       offsetA=q,
                       offsetB=q)

            # scale diagonal of zs by 1/2
            blas.scal(2.0, z, inc=p + q + 1, offset=m)

        return f

    if C:
        sol = solvers.coneqp(Pf,
                             c,
                             Gf,
                             hh,
                             dims,
                             Af,
                             kktsolver=F,
                             xnewcopy=xnewcopy,
                             xdot=xdot,
                             xaxpy=xaxpy,
                             xscal=xscal)
    else:
        sol = solvers.conelp(c,
                             Gf,
                             hh,
                             dims,
                             Af,
                             kktsolver=F,
                             xnewcopy=xnewcopy,
                             xdot=xdot,
                             xaxpy=xaxpy,
                             xscal=xscal)

    if sol['status'] is 'optimal':
        x = sol['x'][0]
        z = sol['z'][:m]
        Z = sol['z'][m:]
        Z.size = (p + q, p + q)
        Z = -2.0 * Z[-p:, :q]

    elif sol['status'] is 'primal infeasible':
        x = None
        z = sol['z'][:m]
        Z = sol['z'][m:]
        Z.size = (p + q, p + q)
        Z = -2.0 * Z[-p:, :q]

    else:
        x, z, Z = None, None, None

    return {'status': sol['status'], 'x': x, 'z': z, 'Z': Z}
示例#20
0
  def train(self, data, slices=[[0,[0]]]):
    #self.gammas = self.determine_gammas_from(data)
    self.gammas = [.1,]
    print "Gammas determined: %s" % str(self.gammas)
    
    # [gamma][sequence offset][dimension]
    #self.active_slices = np.mgrid[0:1,0:data.shape[1]].T.reshape(data.shape[1],2).tolist()
    # Make a single slice consisting of the 1st sequence element and all 3 dimensions
    #self.active_slices = [ [0,[0,1]], [0,[0]] ]
    self.active_slices = slices
    
    # Working with 1 sequence element for now
    sequences = data[:-1].astype('float32').reshape(data.shape[0]-1,1)
    labels = data[1:].astype('float32').reshape(data.shape[0]-1,1)
    
    l = sequences.shape[0]
    jitter = ( ( np.random.randn(l,1) / 10 ) ).astype('float32')
    jittery = ( ( np.random.randn(l,1) / 10 ) ).astype('float32')
    self.sequences = sequences + jitter
    self.labels = labels + jittery
    
    
    print "Calculating kernel matrix"
    kx = kernel_matrix(self.sequences.reshape(l,1,1), self.sequences.reshape(l,1,1), self.gammas[-1])
    ky = kernel_matrix(self.labels.reshape(l,1,1), self.labels.reshape(l,1,1), self.gammas[-1])
    sigma = 1000
    
    print "Constructing constraints"
    
    P = self.labels * self.labels.T * kx
    
    q = np.zeros((l,1))

    G_1 = self.labels.T * kx
    
    G_2 = -self.labels.T * kx
    
    h_1 = sigma + self.labels
    
    h_2 = sigma - self.labels
    
    G = np.vstack([G_1,G_2])
    h = np.vstack([h_1,h_2])
    
    A = kx
    
    b = np.ones((l,1))
        
    print "p(A[0])=%s" % A.shape[0]
    print "n(G[1],A[1])=%s or %s" % (G.shape[1], A.shape[1])
    print "rank P: %s" % rank(P)
    print "rank G: %s" % rank(G)
    print "rank A: %s" % rank(A)
    print "rand kernel: %s" % rank(kx)
    print "unique source: %s" % np.unique(self.sequences).shape[0]
    
    print "Solving"
    solution = solvers.coneqp( 
      matrix(P.astype('float')), 
      matrix(q.astype('float')), 
      matrix(G.astype('float')), 
      matrix(h.astype('float')),
      None,
      matrix(A.astype('float')),
      matrix(b.astype('float'))
    )
    
    print "Handling Solution"
    if solution['status'] == 'optimal':
      X = np.array( solution['x'] )
      #R_emp = np.array( solution['x'][-1] )
      #print solution['x']
      self.SV_mask = ( np.abs(X) < 1e-8 )
      self.beta = np.ma.compress_rows( np.ma.array( X, mask = self.SV_mask ) ).astype('float32')
      self.SVx = np.ma.compress_rows( np.ma.array( sequences, mask = np.repeat( self.SV_mask, sequences.shape[1], 1) ) ).astype('float32')
      self.SVy = np.ma.compress_rows( np.ma.array( labels.reshape(labels.shape[0],1), mask = self.SV_mask ) ).astype('float32')
      self.nSV = self.beta.shape[0]
    
      #print self.beta
      #print self.SVx.shape
      #print self.SVy.shape
      #print self.nSV
      #print self.SV_mask
      #print solution['x']
    print "--> SVM Trained: %s SV's of %s" % ( self.nSV, self.SV_mask.shape[0] ) 
示例#21
0
def l1regls(A, b):
    """
    
    Returns the solution of l1-norm regularized least-squares problem
  
        minimize || A*x - b ||_2^2  + || x ||_1.

    """

    m, n = A.size
    q = matrix(1.0, (2 * n, 1))
    q[:n] = -2.0 * A.T * b

    def P(u, v, alpha=1.0, beta=0.0):
        """
            v := alpha * 2.0 * [ A'*A, 0; 0, 0 ] * u + beta * v 
        """
        v *= beta
        v[:n] += alpha * 2.0 * A.T * (A * u[:n])

    def G(u, v, alpha=1.0, beta=0.0, trans='N'):
        """
            v := alpha*[I, -I; -I, -I] * u + beta * v  (trans = 'N' or 'T')
        """

        v *= beta
        v[:n] += alpha * (u[:n] - u[n:])
        v[n:] += alpha * (-u[:n] - u[n:])

    h = matrix(0.0, (2 * n, 1))

    # Customized solver for the KKT system
    #
    #     [  2.0*A'*A  0    I      -I     ] [x[:n] ]     [bx[:n] ]
    #     [  0         0   -I      -I     ] [x[n:] ]  =  [bx[n:] ].
    #     [  I        -I   -D1^-1   0     ] [zl[:n]]     [bzl[:n]]
    #     [ -I        -I    0      -D2^-1 ] [zl[n:]]     [bzl[n:]]
    #
    # where D1 = W['di'][:n]**2, D2 = W['di'][:n]**2.
    #
    # We first eliminate zl and x[n:]:
    #
    #     ( 2*A'*A + 4*D1*D2*(D1+D2)^-1 ) * x[:n] =
    #         bx[:n] - (D2-D1)*(D1+D2)^-1 * bx[n:] +
    #         D1 * ( I + (D2-D1)*(D1+D2)^-1 ) * bzl[:n] -
    #         D2 * ( I - (D2-D1)*(D1+D2)^-1 ) * bzl[n:]
    #
    #     x[n:] = (D1+D2)^-1 * ( bx[n:] - D1*bzl[:n]  - D2*bzl[n:] )
    #         - (D2-D1)*(D1+D2)^-1 * x[:n]
    #
    #     zl[:n] = D1 * ( x[:n] - x[n:] - bzl[:n] )
    #     zl[n:] = D2 * (-x[:n] - x[n:] - bzl[n:] ).
    #
    # The first equation has the form
    #
    #     (A'*A + D)*x[:n]  =  rhs
    #
    # and is equivalent to
    #
    #     [ D    A' ] [ x:n] ]  = [ rhs ]
    #     [ A   -I  ] [ v    ]    [ 0   ].
    #
    # It can be solved as
    #
    #     ( A*D^-1*A' + I ) * v = A * D^-1 * rhs
    #     x[:n] = D^-1 * ( rhs - A'*v ).

    S = matrix(0.0, (m, m))
    Asc = matrix(0.0, (m, n))
    v = matrix(0.0, (m, 1))

    def Fkkt(W):

        # Factor
        #
        #     S = A*D^-1*A' + I
        #
        # where D = 2*D1*D2*(D1+D2)^-1, D1 = d[:n]**-2, D2 = d[n:]**-2.

        d1, d2 = W['di'][:n]**2, W['di'][n:]**2

        # ds is square root of diagonal of D
        ds = math.sqrt(2.0) * div(mul(W['di'][:n], W['di'][n:]), sqrt(d1 + d2))
        d3 = div(d2 - d1, d1 + d2)

        # Asc = A*diag(d)^-1/2
        Asc = A * spdiag(ds**-1)

        # S = I + A * D^-1 * A'
        blas.syrk(Asc, S)
        S[::m + 1] += 1.0
        lapack.potrf(S)

        def g(x, y, z):

            x[:n] = 0.5 * (x[:n] - mul(d3, x[n:]) + mul(
                d1, z[:n] + mul(d3, z[:n])) - mul(d2, z[n:] - mul(d3, z[n:])))
            x[:n] = div(x[:n], ds)

            # Solve
            #
            #     S * v = 0.5 * A * D^-1 * ( bx[:n] -
            #         (D2-D1)*(D1+D2)^-1 * bx[n:] +
            #         D1 * ( I + (D2-D1)*(D1+D2)^-1 ) * bzl[:n] -
            #         D2 * ( I - (D2-D1)*(D1+D2)^-1 ) * bzl[n:] )

            blas.gemv(Asc, x, v)
            lapack.potrs(S, v)

            # x[:n] = D^-1 * ( rhs - A'*v ).
            blas.gemv(Asc, v, x, alpha=-1.0, beta=1.0, trans='T')
            x[:n] = div(x[:n], ds)

            # x[n:] = (D1+D2)^-1 * ( bx[n:] - D1*bzl[:n]  - D2*bzl[n:] )
            #         - (D2-D1)*(D1+D2)^-1 * x[:n]
            x[n:] = div( x[n:] - mul(d1, z[:n]) - mul(d2, z[n:]), d1+d2 )\
                - mul( d3, x[:n] )

            # zl[:n] = D1^1/2 * (  x[:n] - x[n:] - bzl[:n] )
            # zl[n:] = D2^1/2 * ( -x[:n] - x[n:] - bzl[n:] ).
            z[:n] = mul(W['di'][:n], x[:n] - x[n:] - z[:n])
            z[n:] = mul(W['di'][n:], -x[:n] - x[n:] - z[n:])

        return g

    return solvers.coneqp(P, q, G, h, kktsolver=Fkkt)['x'][:n]
示例#22
0
文件: tv.py 项目: sanurielf/cvxopt
def tv(delta):
    """
        minimize    (1/2) * ||x-corr||_2^2 + delta * sum(y)
        subject to  -y <= D*x <= y
    
    Variables x (n), y (n-1).
    """

    q = matrix(0.0, (2*n-1,1))
    q[:n] = -corr  
    q[n:] = delta

    def P(u, v, alpha = 1.0, beta = 0.0):
        """
            v := alpha*u + beta*v
        """

        v *= beta
        v[:n] += alpha*u[:n]


    def G(u, v, alpha = 1.0, beta = 0.0, trans = 'N'):
        """
           v := alpha*[D, -I;  -D, -I] * u + beta * v  (trans = 'N')
           v := alpha*[D, -I;  -D, -I]' * u + beta * v  (trans = 'T')

        For an n-vector z, D*z = z[1:] - z[:-1].
        For an (n-1)-vector z, D'*z = [-z;0] + [0; z].
        """

        v *= beta
        if trans == 'N':
            y = u[1:n] - u[:n-1]
            v[:n-1] += alpha*(y - u[n:])
            v[n-1:] += alpha*(-y - u[n:])
        else:
            y = u[:n-1] - u[n-1:]
            v[:n-1] -= alpha * y
            v[1:n] += alpha * y
            v[n:] -= alpha * (u[:n-1] + u[n-1:])

    h = matrix(0.0, (2*(n-1),1))


    # Customized solver for KKT system with coefficient
    #
    #     [  I    0    D'   -D' ] 
    #     [  0    0   -I    -I  ] 
    #     [  D   -I   -D1    0  ] 
    #     [ -D   -I    0    -D2 ].
     
    # Diagonal and subdiagonal.
    Sd = matrix(0.0, (n,1))
    Se = matrix(0.0, (n-1,1))

    def Fkkt(W):
        """
        Factor the tridiagonal matrix

             S = I + 4.0 * D' * diag( d1.*d2./(d1+d2) ) * D 

        with d1 = W['di'][:n-1]**2 = diag(D1^-1) 
        d2 = W['di'][n-1:]**2 = diag(D2^-1).
        """

        d1 = W['di'][:n-1]**2
        d2 = W['di'][n-1:]**2
        d = 4.0*div( mul(d1,d2), d1+d2) 
        Sd[:] = 1.0
        Sd[:n-1] += d
        Sd[1:] += d
        Se[:] = -d
        lapack.pttrf(Sd, Se)

        def g(x, y, z):

            """
            Solve 

                [  I   0   D'  -D' ] [x[:n]   ]    [bx[:n]   ]
                [  0   0  -I   -I  ] [x[n:]   ] =  [bx[n:]   ]
                [  D  -I  -D1   0  ] [z[:n-1] ]    [bz[:n-1] ]
                [ -D  -I   0   -D2 ] [z[n-1:] ]    [bz[n-1:] ].

            First solve
                 
                S*x[:n] = bx[:n] + D' * ( (d1-d2) ./ (d1+d2) .* bx[n:] 
                    + 2*d1.*d2./(d1+d2) .* (bz[:n-1] - bz[n-1:]) ).

            Then take

                x[n:] = (d1+d2)^-1 .* ( bx[n:] - d1.*bz[:n-1] 
                         - d2.*bz[n-1:]  + (d1-d2) .* D*x[:n] ) 
                z[:n-1] = d1 .* (D*x[:n] - x[n:] - bz[:n-1])
                z[n-1:] = d2 .* (-D*x[:n] - x[n:] - bz[n-1:]).
            """

            # y = (d1-d2) ./ (d1+d2) .* bx[n:] + 
            #     2*d1.*d2./(d1+d2) .* (bz[:n-1] - bz[n-1:])
            y = mul( div(d1-d2, d1+d2), x[n:]) + \
                mul( 0.5*d, z[:n-1]-z[n-1:] ) 

            # x[:n] += D*y
            x[:n-1] -= y
            x[1:n] += y

            # x[:n] := S^-1 * x[:n]
            lapack.pttrs(Sd, Se, x) 

            # u = D*x[:n]
            u = x[1:n] - x[0:n-1]

            # x[n:] = (d1+d2)^-1 .* ( bx[n:] - d1.*bz[:n-1] 
            #     - d2.*bz[n-1:]  + (d1-d2) .* u) 
            x[n:] = div( x[n:] - mul(d1, z[:n-1]) - 
                mul(d2, z[n-1:]) + mul(d1-d2, u), d1+d2 )

            # z[:n-1] = d1 .* (D*x[:n] - x[n:] - bz[:n-1])
            # z[n-1:] = d2 .* (-D*x[:n] - x[n:] - bz[n-1:])
            z[:n-1] = mul(W['di'][:n-1], u - x[n:] - z[:n-1])
            z[n-1:] = mul(W['di'][n-1:], -u - x[n:] - z[n-1:])

        return g

    return solvers.coneqp(P, q, G, h, kktsolver = Fkkt)['x'][:n]
        x[:n] = div(x[:n], ds)

        # x[n:] = (D1+D2)^-1 * ( bx[n:] - D1*bz[:n]  - D2*bz[n:] )
        #         - (D2-D1)*(D1+D2)^-1 * x[:n]
        x[n:] = div( x[n:] - mul(d1, z[:n]) - mul(d2, z[n:]), d1+d2 )\
                - mul( d3, x[:n] )

        # z[:n] = D1^1/2 * (  x[:n] - x[n:] - bz[:n] )
        # z[n:] = D2^1/2 * ( -x[:n] - x[n:] - bz[n:] ).
        z[:n] = mul(W['di'][:n], x[:n] - x[n:] - z[:n])
        z[n:] = mul(W['di'][n:], -x[:n] - x[n:] - z[n:])

    return g


x = solvers.coneqp(P, q, G, h, kktsolver=Fkkt)['x'][:n]

I = [k for k in range(n) if abs(x[k]) > 1e-2]
xls = +y
lapack.gels(A[:, I], xls)
ybp = A[:, I] * xls[:len(I)]

print("Sparse basis contains %d basis functions." % len(I))
print("Relative RMS error = %.1e." % (blas.nrm2(ybp - y) / blas.nrm2(y)))

if pylab_installed:
    pylab.figure(2, facecolor='w')
    pylab.subplot(211)
    pylab.plot(ts, y, '-', ts, ybp, 'r--')
    pylab.xlabel('t')
    pylab.ylabel('y(t), yhat(t)')
示例#24
0
### Simply QP
n = 4
S = matrix([[4e-2, 6e-3, -4e-3, 0.0], [6e-3, 1e-2, 0.0, 0.0],
            [-4e-3, 0.0, 2.5e-3, 0.0], [0.0, 0.0, 0.0, 0.0]])
pbar = matrix([.12, .10, .07, .03])
G = matrix(0.0, (n, n))
G[::n + 1] = -1.0
h = matrix(0.0, (n, 1))
A = matrix(1.0, (1, n))
b = matrix(1.0)

# Compute trade-off.
N = 100
mus = [10**(5.0 * t / N - 1.0) for t in range(N)]
sol = solvers.qp(mus[0] * S, -pbar, G, h, A, b)
sol = solvers.coneqp(mus[0] * S, -pbar, G, h, [], A, b)

portfolios = [solvers.qp(mu * S, -pbar, G, h, A, b)['x'] for mu in mus]

## From SCOP to Cone LP

c = matrix([-2., 1., 5.])
G = [matrix([[12., 13., 12.], [6., -3., -12.], [-5., -5., 6.]])]
G += [matrix([[3., 3., -1., 1.], [-6., -6., -9., 19.], [10., -2., -2., -3.]])]
h = [matrix([-12., -3., -2.]), matrix([27., 0., 3., -42.])]
sol = solvers.socp(c, Gq=G, hq=h)
sol['status']

c = matrix([-2., 1., 5.])
G = matrix([[12., 13., 12., 3., 3., -1., 1.],
            [6., -3., -12., -6., -6., -9., 19.],
示例#25
0
文件: qp_test.py 项目: bpiwowar/kqp
def doit(name, n,r, g, a, nu, Lambda):
    zero_n = spmatrix([],[],[],(n,n))
    id_n = spmatrix(1., range(n), range(n))

    print
    print
    print "// ------- Generated from qp_test.py ---"
    print "template<typename Scalar> int qp_test_%s() {" % name
    print
    print "typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;"
    print "typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;"
    print
    print "// Problem"
    print "int n = %d;" % n
    print "int r = %d;" % r
    print "Matrix g(n,n);"
    print "Matrix a(r, n);"
    print "Vector nu(n, 1);"
    print "Scalar lambda = %50g;" % Lambda

    print_cxx("a", a)
    print "a.adjointInPlace();"
    print_cxx("g", g)
    
    print_cxx("nu", nu)

    print
    print "// Solve"

    print "kqp::cvxopt::ConeQPReturn<Scalar> result;"
    print "solve_qp(r, lambda, g, a, nu, result, options);"
    print

    # Construct P

    print "/*"
    print "Constructing P..."
    l = []
    for i in range(r+1):
        sl = []
        for j in range(r+1):
            if i < r and i == j: sl.append(g)
            else: sl.append(zero_n)
        l.append(sl)
    P = sparse(l)


    print "Constructing q..."
    q = matrix(0., (n * r + n, 1))
    for i in range(r):
        if DEBUG > 0: print "a[%d] = %s" % (i, a[i*n:(i+1)*n,0].T),
        q[i*n:(i+1)*n,0] = - g * a[i*n:(i+1)*n,0]
    q[n*r:n*r+n] = Lambda / 2.
    if DEBUG > 1: print "q = %s" % q.T,

    print "Constructing G (%d x %d) and q" % (2 * n*r, n*r + n)
	
    s = []
    for i in range(r): s += [nu[i]] * n
    s_nr = spmatrix(s, range(n*r), range(n*r))
    id_col = []
    for i in range(r):
        id_col.append(-id_n)
    id_col = sparse([id_col])
    G = sparse([ [ -s_nr, s_nr ], [id_col, id_col ] ])
    h = matrix(0., (2*n*r,1))

    dims = {"l": h.size[0], "q": 0, "s": 0}

    sol = solvers.coneqp(P, q, G, h) #, kktsolver=solver(n,r,g))

    print "*/"
    print
    print "// Solution"
    if sol["status"] != "optimal": raise "Solution is not optimal..."
    
    print "Eigen::VectorXd s_x(n*(r+1));"
    print_cxx("s_x", sol["x"])

    print """
示例#26
0
def filter_copycounts_inc_nodes(graph):
    '''This fucntion runs the minimum cost flow algorithm to filter the copycounts of the graph.  
    it has the option of either minimizing the normalized l1 norm of the error between the original copycounts and the 
    new copycounts, or minimizing the normalized l2 norm of the error between the original copycounts and the new copycounts.
    '''
    pen_constant = 10 #set this to 1 so that something like 1/10th of the flow is likely to flow through non-existent edges
    (A) = buildMatrixIncNodes(graph)
    [ta,tb]=A.size
    n = int(ta/2)
    m = int(tb)-n
    I = spmatrix(1.0, range(m+n), range(m+n))
    x = []
    for each in graph.edge_weights:
        x.append(float(each))
    for each in graph.node_weights:
        x.append(float(each))
    x_mat = matrix(x,(m+n,1))
    c = matrix(x,(m+n,1))
    L = matrix(graph.normalization,(m+n,1))
    penality = matrix(graph.penalization,(m+n,1))
    L_th = sum(L)/len(L)*0.001;

    for ctr in range(m+n):
        c[ctr] = x_mat[ctr]*L[ctr]
        if L[ctr]<L_th:
            x_mat[ctr]=0

    pen_cost = 1e10  #set ridiculously large number to force penalization to zero
    if run_penalized:
        q = -c+pen_cost*penality 
    else:
        q = -c
    G = -I

    h = - 0*x_mat # implies f>=0.1c
    dims = {'l': G.size[0], 'q': [], 's': []}
    b = matrix(0.,(2*n,1))
    P = spdiag(graph.normalization)

    #Run it unpenalized in order to calculate the scale for the pen_cost
    if use_norm == 'l2':
        sol=solvers.coneqp(P, q, G, h, dims, A, b)
        x=sol['x']
    elif use_norm == 'l1':
        ## L1 norm cvx_opt 
        L_root = L**(.5)
        c_l1 = matrix([[x_mat*0, L_root]])
        A_l1 = sparse([[A], [A*0]])
        b_l1 = b
        h_l1 = matrix([[h, x_mat, -x_mat]])
        G_l1 = sparse([[G, I, G], [0*I, G, G]])
        print('Generated the matrices, running the solver:')
        if use_GLPK:
		sol = solvers.lp(c_l1, G_l1, h_l1, A_l1, b_l1,solver='glpk')
        else:
		sol = solvers.lp(c_l1, G_l1, h_l1, A_l1, b_l1)

        print('Solver finished')
        x_l1 = sol['x']
        x = x_l1[:m+n, :]

    
    opt_val = sol['primal objective']
    
    
    #Run it penalized to obtain the final answer
    if run_penalized:
        pen_cost = pen_constant*abs(opt_val)/sum(x)  #this is the real value of penality
        q = -c+pen_cost*penality  #check if this is a row vector
        sol=solvers.coneqp(P, q, G, h, dims, A, b)
        x=sol['x']

    ''' Check for negative elements '''    
    i = 0
    for element in x:
        if cmp(element, 0) < 0:
            x[i] = 0.0
        i += 1

    y = numpy.array(x)
    graph.filter_update_incnodes(y,m,n)
    #print(y)
    return x
示例#27
0
def _reconEngine(mmTheta_, vvSig_, iK):
    """
    This is the engine of the reconstruction.

    Basis pursuit problem:

       minimize    ||A*x - y||_2^2 + k * ||x||_1

    where:
         1. A - Theta matrix
            A = B * C:
                   B - observation matirx
                   C - dictionary matrix

         2. y - observed signal
         3. x - vector with coefficients we are looking for
         4. k - noise wage coefficient

    ----------------------------------------------------------------------

    The engine of this reconstruction is based on the fact that a problem:

        minimize    ||A*x - y||_2^2 + ||x||_1

    can be translated to:

        minimize    x'*A'*A*x - 2.0*y'*A*x + 1'*u
        subject to  -u <= x <= u

    variables x (n),  u (n).

    ----------------------------------------------------------------------

    Args:
        mTheta_ (cvxopt matrix):  Theta matrix
        vvSig_ (cvxopt vector):   vector with an observed signal
        iK (float):               k parameter

    Returns:
        vvCoef (cvxopt vector):   vector with found coefficients

    NOTE: 'cvxopt vector' is nothing but a 'cvxopt matrix' of a size (N x 1)
    """

    # --------------------------------------------------------------------
    # Get the input Theta matrix and make it global
    global mmTh
    mmTh = mmTheta_

    # --------------------------------------------------------------------
    # Get the size of the Theta matrix
    global iR  # The number of rows
    global iC  # The number of columns
    iR, iC = mmTh.size

    # --------------------------------------------------------------------
    # Silence the outoput from the cvxopt
    solvers.options["show_progress"] = False

    # --------------------------------------------------------------------

    # Q = -2.0 * A' * y
    vvQ = matrix(float(iK), (2 * iC, 1))
    blas.gemv(mmTh, vvSig_, vvQ, alpha=-2.0, trans="T")

    # Run the solver
    mmH = matrix(0.0, (2 * iC, 1))
    vvCoef = solvers.coneqp(P, vvQ, G, mmH, kktsolver=Fkkt)["x"][:iC]

    # --------------------------------------------------------------------
    # Return the vecto with coefficients
    return vvCoef
示例#28
0
# The quadratic cone program of section 8.2 (Quadratic cone programs).

# minimize   (1/2)*x'*A'*A*x - b'*A*x
# subject to x >= 0
#            ||x||_2 <= 1

from cvxopt import matrix, solvers
A = matrix([[.3, -.4, -.2, -.4, 1.3], [.6, 1.2, -1.7, .3, -.3],
            [-.3, .0, .6, -1.2, -2.0]])
b = matrix([1.5, .0, -1.2, -.7, .0])
m, n = A.size

I = matrix(0.0, (n, n))
I[::n + 1] = 1.0
G = matrix([-I, matrix(0.0, (1, n)), I])
h = matrix(n * [0.0] + [1.0] + n * [0.0])
dims = {'l': n, 'q': [n + 1], 's': []}
x = solvers.coneqp(A.T * A, -A.T * b, G, h, dims)['x']
print("\nx = \n")
print(x)
示例#29
0
文件: kkt.py 项目: daleroberts/cvxopt
	K = matrix(P + R)
	
	def solve(bx, by, bz):
	    # bx <- bx - W^{-1} W^{-T} * bz
            #     = bx - R * bz
            gemv(R, bz, bx, trans='N', alpha=-1.0, beta=1.0)
            
            # solve K x = bx - R * bz -> bx
            gesv(K, bx)
            
            # bz <- W^{-T}(-x - bz)
            gemv(Wd,(-bx-bz),bz)

	return solve

ui = solvers.coneqp(P, f, G, c)['x']
u  = np.zeros((n+2,))
u[1:-1] = ui[:,0].T

vi = solvers.coneqp(P, f, G, c, kktsolver=fKKT1)['x']
v  = np.zeros((n+2,))
v[1:-1] = vi[:,0].T

wi = solvers.coneqp(P, f, G, c, kktsolver=fKKT2)['x']
w  = np.zeros((n+2,))
w[1:-1] = wi[:,0].T

def pretty_print(*arrays):
	print 'Solution:'
	for i in range(0,n+2):
		for a in arrays:
示例#30
0
x = numpy.array(theta).reshape(p,1)
radius = 1.0

GTemp = numpy.append(numpy.zeros((1,p)), numpy.eye(p), axis=0)
hTemp = numpy.zeros(p+1)
hTemp[0] += radius

GTemp = numpy.append(G, GTemp, axis=0)
hTemp = numpy.append(h - G.dot(x), hTemp)
dims = {'l': G.shape[0], 'q': [p+1], 's':  []}

H = rosen_hess(theta)
H = numpy.eye(p)
g = rosen_der(theta)

qpOut = solvers.coneqp(matrix(H), matrix(g), matrix(GTemp), matrix(hTemp), dims)

# converting to a cp

def F(x=None, z=None):
    if x is None: return 0, matrix(0.0,(p,1))
    # H = matrix(rosen_hess(numpy.array(x).ravel()))
    # H = matrix(rosen_hess(theta))
    H = matrix(numpy.eye(p))
    g = matrix(rosen_der(numpy.array(x).ravel()))
    g = matrix(rosen_der(theta))
    f = 0.5 * x.T * H * x + g.T * x
    df = (H * x + g).T
    #df = -(g).T
    if z is None: return f, df
    H = z[0] * H
示例#31
0
    def _coneqp(self, gram, targets, probs):
        n_quantiles = probs.size  # Number of quantiles to predict
        n_coefs = gram.shape[0]  # Number of variables
        n_samples = n_coefs // n_quantiles
        # Quantiles levels
        kronprobs = kron(ones(int(n_coefs / n_quantiles)), probs.squeeze())

        solvers.options['show_progress'] = self.verbose
        if self.tol:
            solvers.options['reltol'] = self.tol

        if self.eps == 0:
            # Quadratic part of the objective
            gram = matrix(gram)
            # Linear part of the objective
            q_lin = matrix(-kron(targets, ones(n_quantiles)))

            # LHS of the inequality constraint
            g_lhs = matrix(r_[eye(n_coefs), -eye(n_coefs)])
            # RHS of the inequality
            h_rhs = matrix(r_[self.reg_c_ * kronprobs,
                              self.reg_c_ * (1 - kronprobs)])
            # LHS of the equality constraint
            lhs_eqc = matrix(kron(ones(n_samples), eye(n_quantiles)))

            # Solve the dual optimization problem
            self.sol_ = solvers.qp(gram, q_lin, g_lhs, h_rhs, lhs_eqc,
                                   matrix(zeros(n_quantiles)))

            # Set coefs
            coefs = asarray(self.sol_['x'])
        else:
            def build_lhs(m, p):
                # m: n_samples
                # p: n_quantiles
                n = m*p  # n_variables

                # Get the norm bounds (m last variables)
                A = zeros(p+1)
                A[0] = -1
                A = kron(eye(m), A).T
                # Get the m p-long vectors
                B = kron(eye(m), c_[zeros(p), eye(p)].T)
                # Box constraint
                C = c_[r_[eye(n), -eye(n)], zeros((2*n, m))]
                # Set everything together
                C = r_[C, c_[B, A]]
                return C

            # Quadratic part of the objective
            gram = matrix(r_[c_[gram, zeros((n_coefs, n_samples))],
                             zeros((n_samples, n_coefs+n_samples))])
            # Linear part of the objective
            q_lin = matrix(r_[-kron(targets, ones(n_quantiles)),
                              ones(n_samples)*self.eps])

            # LHS of the inequality constraint
            g_lhs = matrix(build_lhs(n_samples, n_quantiles))
            # RHS of the inequality
            h_rhs = matrix(r_[self.reg_c_ * kronprobs,
                              self.reg_c_ * (1-kronprobs),
                              zeros(n_samples * (n_quantiles+1))])
            # LHS of the equality constraint
            lhs_eqc = matrix(c_[kron(ones(n_samples),
                                eye(n_quantiles)),
                                zeros((n_quantiles, n_samples))])
            # Parameters of the optimization problem
            dims = {'l': 2*n_coefs, 'q': [n_quantiles+1]*n_samples, 's': []}

            # Solve the dual optimization problem
            self.sol_ = solvers.coneqp(gram, q_lin, g_lhs, h_rhs, dims,
                                       lhs_eqc, matrix(zeros(n_quantiles)))

            # Set coefs
            coefs = asarray(self.sol_['x'][:n_coefs])

        # Set the intercept

        # Erase the previous intercept before prediction
        self.model_ = {'coefs': coefs, 'intercept': 0}
        predictions = self.predict(self.linop_.X)
        if predictions.ndim < 2:
            predictions = predictions.reshape(1, -1)  # 2D array
        intercept = [percentile(targets - pred, 100. * prob) for
                     (pred, prob) in zip(predictions, probs)]
        intercept = asarray(intercept).squeeze()
        self.model_ = {'coefs': coefs, 'intercept': intercept}
示例#32
0
    def OHMCPricer(self,
                   option_type='c',
                   func_list=[lambda x: x**0, lambda x: x]):
        def _calculate_Q_matrix(S_k, S_kp1, df, df2, func_list):
            dS = df2 * S_kp1 - S_k
            A = np.array([func(S_k) for func in func_list]).T
            B = (np.array([func(S_k) for func in func_list]) * dS).T
            return np.concatenate((-A, B), axis=1)

        price_matrix = self.price_matrix
        # k = n_steps
        dt = self.T / self.n_steps
        df = np.exp(-self.r * dt)
        df2 = np.exp(-(self.r - self.q) * dt)
        n_basis = len(func_list)
        n_trails = self.n_trails
        n_steps = self.n_steps

        if (option_type == "c"):
            payoff = (price_matrix[:, n_steps] - strike)
        elif (option_type == "p"):
            payoff = (strike - price_matrix[:, n_steps])
        else:
            print("please enter the option type: (c/p)")
            return

        payoff = matrix(np.where(payoff < 0, 0, payoff))
        vk = payoff * df
        #         print("regular MC price",regular_mc_price)

        # k = 1,...,n_steps-1
        for k in range(n_steps - 1, 0, -1):
            Sk = price_matrix[:, k]
            Skp1 = price_matrix[:, k + 1]
            Qk = matrix(_calculate_Q_matrix(Sk, Skp1, df, df2, func_list))
            P = Qk.T * Qk
            q = Qk.T * vk
            A = matrix(np.ones(n_trails, dtype=np.float64)).T * Qk
            b = -matrix(np.ones(n_trails, dtype=np.float64)).T * vk
            sol = solvers.coneqp(P=P, q=q, A=A, b=b)
            ak = sol["x"][:n_basis]
            bk = sol["x"][n_basis:]
            vk = matrix(
                np.array([func(price_matrix[:, k])
                          for func in func_list])).T * ak * df

        # k = 0
        v0 = vk
        S0 = price_matrix[:, 0]
        S1 = price_matrix[:, 1]
        dS0 = df2 * S1 - S0
        Q0 = np.concatenate(
            (-np.ones(n_trails)[:, np.newaxis], dS0[:, np.newaxis]), axis=1)
        Q0 = matrix(Q0)
        P = Q0.T * Q0
        q = Q0.T * v0
        A = matrix(np.ones(n_trails, dtype=np.float64)).T * Q0
        b = -matrix(np.ones(n_trails, dtype=np.float64)).T * v0
        C1 = matrix(ak).T * np.array([func(S1) for func in func_list]).T
        sol = solvers.coneqp(P=P, q=q, A=A, b=b)
        self.sol = sol
        residual_risk = (v0.T * v0 + 2 * sol["primal objective"]) / n_trails
        self.residual_risk = residual_risk[0]  # the value of unit matrix

        return sol["x"][0]
示例#33
0
def robsvm(X, d, gamma, P, e):
    """
    Solves the following robust SVM training problem:

       minimize    (1/2) w'*w + gamma*sum(v)
       subject to  diag(d)*(X*w + b*1) >= 1 - v + E*u
                   || S_j*w ||_2 <= u_j,  j = 1...t
                   v >= 0

    The variables are w, b, v, and u. The matrix E is a selector
    matrix with zeros and one '1' per row.  E_ij = 1 means that the
    i'th training vector is associated with the j'th uncertainty
    ellipsoid.

    A custom KKT solver that exploits low-rank structure is used, and
    a positive definite system of equations of order n is
    formed and solved at each iteration.

    ARGUMENTS

    X             m-by-n matrix with training vectors as rows

    d             m-vector with training labels (-1,+1)

    P             list of t symmetric matrices of order n

    e             m-vector where e[i] is the index of the uncertainty
                  ellipsoid associated with the i'th training vector

    RETURNS

    w        n-vector

    b        scalar

    u        t-vector

    v        m-vector

    iters    number of interior-point iterations

    """

    m,n = X.size
    assert type(P) is list, "P must be a list of t symmtric positive definite matrices of order n."
    k = len(P)
    if k > 0:
        assert e.size == (m,1), "e must be an m-vector."
        assert max(e) < k and min(e) >= 0, "e[i] must be in {0,1,...,k-1}."

    E = spmatrix(1.,e,range(m),(k,m)).T
    d = matrix(d,tc='d')
    q = matrix(0.0, (n+k+1+m,1))
    q[n+k+1:] = gamma
    h = matrix(0.0,(2*m+k*(n+1),1))
    h[:m] = -1.0

    # linear operators Q and G
    def Q(x, y, alpha = 1.0, beta = 0.0, trans = 'N'):
        y[:n] = alpha * x[:n] + beta * y[:n]

    def G(x, y, alpha = 1.0, beta = 0.0, trans = 'N'):
        """
        Implements the linear operator

               [ -DX    E   -d   -I ]  
               [  0     0    0   -I ]  
               [  0   -e_1'  0    0 ]
          G =  [ -P_1'  0    0    0 ]     
               [  .     .    .    . ]    
               [  0   -e_k'  0    0 ]        
               [ -P_k'  0    0    0 ]       

        and its adjoint G'.

        """
        if trans == 'N':
            tmp = +y[:m]
            # y[:m] = alpha*(-DXw + Et - d*b - v) + beta*y[:m]
            base.gemv(E, x[n:n+k], tmp, alpha = alpha, beta = beta)
            blas.axpy(x[n+k+1:], tmp, alpha = -alpha)
            blas.axpy(d, tmp, alpha = -alpha*x[n+k])
            y[:m] = tmp

            base.gemv(X, x[:n], tmp, alpha = alpha, beta = 0.0)
            tmp = mul(d,tmp)
            y[:m] -= tmp
            
            # y[m:2*m] = -v
            y[m:2*m] = -alpha * x[n+k+1:] + beta * y[m:2*m]

            # SOC 1,...,k
            for i in range(k):
                l = 2*m+i*(n+1)
                y[l] = -alpha * x[n+i] + beta * y[l]
                y[l+1:l+1+n] = -alpha * P[i] * x[:n] + beta * y[l+1:l+1+n];

        else:
            tmp1 = mul(d,x[:m])
            tmp2 = y[:n]
            blas.gemv(X, tmp1, tmp2, trans = 'T', alpha = -alpha, beta = beta)
            for i in range(k):
                l = 2*m+1+i*(n+1)
                blas.gemv(P[i], x[l:l+n], tmp2, trans = 'T', alpha = -alpha, beta = 1.0)
            y[:n] = tmp2

            tmp2 = y[n:n+k]
            base.gemv(E, x[:m], tmp2, trans = 'T', alpha = alpha, beta = beta)
            blas.axpy(x[2*m:2*m+k*(1+n):n+1], tmp2, alpha = -alpha)
            y[n:n+k] = tmp2

            y[n+k] = -alpha * blas.dot(d,x[:m]) + beta * y[n+k]
            y[n+k+1:] = -alpha * (x[:m] + x[m:2*m]) + beta * y[n+k+1:]

    # precompute products Pi'*Pi
    Pt = []
    for p in P:
        y = matrix(0.0, (n,n))
        blas.syrk(p, y, trans = 'T')
        Pt.append(y)

    # scaled hyperbolic Householder transformations
    def qscal(u, beta, v, inv = False):
        """
        Transforms the vector u as
           u := beta * (2*v*v' - J) * u
        if 'inv' is False and as
           u := (1/beta) * (2*J*v*v'*J - J) * u
        if 'inv' is True.
        """
        if not inv:
            tmp = blas.dot(u,v)
            u[0] *= -1
            u += 2 * v * tmp
            u *= beta
        else:
            u[0] *= -1.0
            tmp = blas.dot(v,u)
            u[0] -= 2*v[0] * tmp 
            u[1:] += 2*v[1:] * tmp
            u /= beta

    # custom KKT solver
    def F(W): 
        """
        Custom solver for the system

        [  It  0   0    Xt'     0     At1' ...  Atk' ][ dwt  ]   [ rwt ]
        [  0   0   0    -d'     0      0   ...   0   ][ db   ]   [ rb  ]
        [  0   0   0    -I     -I      0   ...   0   ][ dv   ]   [ rv  ]
        [  Xt -d  -I  -Wl1^-2                        ][ dzl1 ]   [ rl1 ]
        [  0   0  -I         -Wl2^-2                 ][ dzl2 ] = [ rl2 ]
        [ At1  0   0                -W1^-2           ][ dz1  ]   [ r1  ] 
        [  |   |   |                       .         ][  |   ]   [  |  ]
        [ Atk  0   0                          -Wk^-2 ][ dzk  ]   [ rk  ]

        where

        It = [ I 0 ]  Xt = [ -D*X E ]  Ati = [ 0   -e_i' ]  
             [ 0 0 ]                         [ -Pi   0   ] 

        dwt = [ dw ]  rwt = [ rw ]
              [ dt ]        [ rt ].

        """

        # scalings and 'intermediate' vectors
        # db = inv(Wl1)^2 + inv(Wl2)^2
        db = W['di'][:m]**2 + W['di'][m:2*m]**2
        dbi = div(1.0,db)
        
        # dt = I - inv(Wl1)*Dbi*inv(Wl1)
        dt = 1.0 - mul(W['di'][:m]**2,dbi)
        dtsqrt = sqrt(dt)

        # lam = Dt*inv(Wl1)*d
        lam = mul(dt,mul(W['di'][:m],d))

        # lt = E'*inv(Wl1)*lam
        lt = matrix(0.0,(k,1))
        base.gemv(E, mul(W['di'][:m],lam), lt, trans = 'T')

        # Xs = sqrt(Dt)*inv(Wl1)*X
        tmp = mul(dtsqrt,W['di'][:m])
        Xs = spmatrix(tmp,range(m),range(m))*X

        # Es = D*sqrt(Dt)*inv(Wl1)*E
        Es = spmatrix(mul(d,tmp),range(m),range(m))*E

        # form Ab = I + sum((1/bi)^2*(Pi'*Pi + 4*(v'*v + 1)*Pi'*y*y'*Pi)) + Xs'*Xs
        #  and Bb = -sum((1/bi)^2*(4*ui*v'*v*Pi'*y*ei')) - Xs'*Es
        #  and D2 = Es'*Es + sum((1/bi)^2*(1+4*ui^2*(v'*v - 1))
        Ab = matrix(0.0,(n,n))
        Ab[::n+1] = 1.0
        base.syrk(Xs,Ab,trans = 'T', beta = 1.0)
        Bb = matrix(0.0,(n,k))
        Bb = -Xs.T*Es # inefficient!?
        D2 = spmatrix(0.0,range(k),range(k))
        base.syrk(Es,D2,trans = 'T', partial = True)
        d2 = +D2.V
        del D2
        py = matrix(0.0,(n,1))
        for i in range(k):
            binvsq = (1.0/W['beta'][i])**2
            Ab += binvsq*Pt[i]
            dvv = blas.dot(W['v'][i],W['v'][i])
            blas.gemv(P[i], W['v'][i][1:], py, trans = 'T', alpha = 1.0, beta = 0.0)
            blas.syrk(py, Ab, alpha = 4*binvsq*(dvv+1), beta = 1.0)
            Bb[:,i] -= 4*binvsq*W['v'][i][0]*dvv*py
            d2[i] += binvsq*(1+4*(W['v'][i][0]**2)*(dvv-1))
        
        d2i = div(1.0,d2)
        d2isqrt = sqrt(d2i)

        # compute a = alpha - lam'*inv(Wl1)*E*inv(D2)*E'*inv(Wl1)*lam
        alpha = blas.dot(lam,mul(W['di'][:m],d))
        tmp = matrix(0.0,(k,1))
        base.gemv(E,mul(W['di'][:m],lam), tmp, trans = 'T')
        tmp = mul(tmp, d2isqrt) #tmp = inv(D2)^(1/2)*E'*inv(Wl1)*lam
        a = alpha - blas.dot(tmp,tmp)

        # compute M12 = X'*D*inv(Wl1)*lam + Bb*inv(D2)*E'*inv(Wl1)*lam
        tmp = mul(tmp, d2isqrt)
        M12 = matrix(0.0,(n,1))
        blas.gemv(Bb,tmp,M12, alpha = 1.0)
        tmp = mul(d,mul(W['di'][:m],lam))
        blas.gemv(X,tmp,M12, trans = 'T', alpha = 1.0, beta = 1.0)

        # form and factor M
        sBb = Bb * spmatrix(d2isqrt,range(k), range(k)) 
        base.syrk(sBb, Ab, alpha = -1.0, beta = 1.0)
        M = matrix([[Ab, M12.T],[M12, a]])
        lapack.potrf(M)
        
        def f(x,y,z):
            
            # residuals
            rwt = x[:n+k]
            rb = x[n+k]
            rv = x[n+k+1:n+k+1+m]
            iw_rl1 = mul(W['di'][:m],z[:m])
            iw_rl2 = mul(W['di'][m:2*m],z[m:2*m])
            ri = [z[2*m+i*(n+1):2*m+(i+1)*(n+1)] for i in range(k)]
            
            # compute 'derived' residuals 
            # rbwt = rwt + sum(Ai'*inv(Wi)^2*ri) + [-X'*D; E']*inv(Wl1)^2*rl1
            rbwt = +rwt
            for i in range(k):
                tmp = +ri[i]
                qscal(tmp,W['beta'][i],W['v'][i],inv=True)
                qscal(tmp,W['beta'][i],W['v'][i],inv=True)
                rbwt[n+i] -= tmp[0]
                blas.gemv(P[i], tmp[1:], rbwt, trans = 'T', alpha = -1.0, beta = 1.0)
            tmp = mul(W['di'][:m],iw_rl1)
            tmp2 = matrix(0.0,(k,1))
            base.gemv(E,tmp,tmp2,trans='T')
            rbwt[n:] += tmp2
            tmp = mul(d,tmp) # tmp = D*inv(Wl1)^2*rl1
            blas.gemv(X,tmp,rbwt,trans='T', alpha = -1.0, beta = 1.0)
            
            # rbb = rb - d'*inv(Wl1)^2*rl1
            rbb = rb - sum(tmp)

            # rbv = rv - inv(Wl2)*rl2 - inv(Wl1)^2*rl1
            rbv = rv - mul(W['di'][m:2*m],iw_rl2) - mul(W['di'][:m],iw_rl1) 
            
            # [rtw;rtt] = rbwt + [-X'*D; E']*inv(Wl1)^2*inv(Db)*rbv 
            tmp = mul(W['di'][:m]**2, mul(dbi,rbv))
            rtt = +rbwt[n:] 
            base.gemv(E, tmp, rtt, trans = 'T', alpha = 1.0, beta = 1.0)
            rtw = +rbwt[:n]
            tmp = mul(d,tmp)
            blas.gemv(X, tmp, rtw, trans = 'T', alpha = -1.0, beta = 1.0)

            # rtb = rbb - d'*inv(Wl1)^2*inv(Db)*rbv
            rtb = rbb - sum(tmp)
            
            # solve M*[dw;db] = [rtw - Bb*inv(D2)*rtt; rtb + lt'*inv(D2)*rtt]
            tmp = mul(d2i,rtt)
            tmp2 = matrix(0.0,(n,1))
            blas.gemv(Bb,tmp,tmp2)
            dwdb = matrix([rtw - tmp2,rtb + blas.dot(mul(d2i,lt),rtt)]) 
            lapack.potrs(M,dwdb)

            # compute dt = inv(D2)*(rtt - Bb'*dw + lt*db)
            tmp2 = matrix(0.0,(k,1))
            blas.gemv(Bb, dwdb[:n], tmp2, trans='T')
            dt = mul(d2i, rtt - tmp2 + lt*dwdb[-1])

            # compute dv = inv(Db)*(rbv + inv(Wl1)^2*(E*dt - D*X*dw - d*db))
            dv = matrix(0.0,(m,1))
            blas.gemv(X,dwdb[:n],dv,alpha = -1.0)
            dv = mul(d,dv) - d*dwdb[-1]
            base.gemv(E, dt, dv, beta = 1.0)
            tmp = +dv  # tmp = E*dt - D*X*dw - d*db
            dv = mul(dbi, rbv + mul(W['di'][:m]**2,dv))

            # compute wdz1 = inv(Wl1)*(E*dt - D*X*dw - d*db - dv - rl1)
            wdz1 = mul(W['di'][:m], tmp - dv) - iw_rl1

            # compute wdz2 = - inv(Wl2)*(dv + rl2)
            wdz2 = - mul(W['di'][m:2*m],dv) - iw_rl2

            # compute wdzi = inv(Wi)*([-ei'*dt; -Pi*dw] - ri)
            wdzi = []
            tmp = matrix(0.0,(n,1))
            for i in range(k):
                blas.gemv(P[i],dwdb[:n],tmp, alpha = -1.0, beta = 0.0) 
                tmp1 = matrix([-dt[i],tmp])
                blas.axpy(ri[i],tmp1,alpha = -1.0)
                qscal(tmp1,W['beta'][i],W['v'][i],inv=True)
                wdzi.append(tmp1)

            # solution
            x[:n] = dwdb[:n]
            x[n:n+k] = dt
            x[n+k] = dwdb[-1]
            x[n+k+1:] = dv
            z[:m] = wdz1 
            z[m:2*m] = wdz2
            for i in range(k):
                z[2*m+i*(n+1):2*m+(i+1)*(n+1)] = wdzi[i]

        return f

    # solve cone QP and return solution
    sol = solvers.coneqp(Q, q, G, h, dims = {'l':2*m,'q':[n+1 for i in range(k)],'s':[]}, kktsolver = F)
    return sol['x'][:n], sol['x'][n+k], sol['x'][n:n+k], sol['x'][n+k+1:], sol['iterations']
示例#34
0
def softmargin_completion(Q, d, gamma):
    """
    Solves the QP

        minimize    (1/2)*y'*Qc^{-1}*y + gamma*sum(v)
        subject to  diag(d)*(y + b*ones) + v >= 1
                    v >= 0

    (with variables y, b, v) and its dual, the 'soft-margin' SVM problem,

        maximize    -(1/2)*z'*Qc*z + d'*z
        subject to  0 <= diag(d)*z <= gamma*ones
                    sum(z) = 0

    (with variables z).

    Qc is the max determinant completion of Q.


    Input arguments.

        Q is a sparse N x N sparse matrix with chordal sparsity pattern
            and a positive definite completion

        d is an N-vector of labels -1 or 1.

        gamma is a positive parameter.

        F is the chompack pattern corresponding to Q.  If F is None, the
            pattern is computed.


    Output.

        z, y, b, v, optval, L, iters

    """

    if verbose: solvers.options['show_progress'] = True
    else: solvers.options['show_progress'] = False

    N = Q.size[0]
    p = chompack.maxcardsearch(Q)
    symb = chompack.symbolic(Q, p)
    Qc = chompack.cspmatrix(symb) + Q

    # Qinv is the inverse of the max. determinant p.d. completion of Q
    Lc = Qc.copy()
    chompack.completion(Lc)
    Qinv = Lc.copy()
    chompack.llt(Qinv)
    Qinv = Qinv.spmatrix(reordered=False)
    Qinv = chompack.symmetrize(Qinv)

    def P(u, v, alpha=1.0, beta=0.0):
        """
            v := alpha * [ Qc^-1, 0, 0;  0, 0, 0;  0, 0, 0 ] * u + beta * v
        """

        v *= beta
        base.symv(Qinv, u, v, alpha=alpha, beta=1.0)

    def G(u, v, alpha=1.0, beta=0.0, trans='N'):
        """
        If trans is 'N':

            v := alpha * [ -diag(d),  -d,  -I;  0,  0,  -I ] * u + beta * v.

        If trans is 'T':

            v := alpha * [ -diag(d), 0;  -d', 0;  -I, -I ] * u + beta * v.
        """

        v *= beta

        if trans is 'N':
            v[:N] -= alpha * (base.mul(d, u[:N] + u[N]) + u[-N:])
            v[-N:] -= alpha * u[-N:]

        else:
            v[:N] -= alpha * base.mul(d, u[:N])
            v[N] -= alpha * blas.dot(d, u, n=N)
            v[-N:] -= alpha * (u[:N] + u[N:])

    K = spmatrix(0.0, Qinv.I, Qinv.J)
    dy1, dy2 = matrix(0.0, (N, 1)), matrix(0.0, (N, 1))

    def Fkkt(W):
        """
        Custom KKT solver for

            [  Qinv  0   0  -D    0  ] [ ux_y ]   [ bx_y ]
            [  0     0   0  -d'   0  ] [ ux_b ]   [ bx_b ]
            [  0     0   0  -I   -I  ] [ ux_v ] = [ bx_v ]
            [ -D    -d  -I  -D1   0  ] [ uz_z ]   [ bz_z ]
            [  0     0  -I   0   -D2 ] [ uz_w ]   [ bz_w ]

        with D1 = diag(d1), D2 = diag(d2), d1 = W['d'][:N]**2,
        d2 = W['d'][N:])**2.
        """

        d1, d2 = W['d'][:N]**2, W['d'][N:]**2
        d3, d4 = (d1 + d2)**-1, (d1**-1 + d2**-1)**-1

        # Factor the chordal matrix K = Qinv + (D_1+D_2)^-1.
        K.V = Qinv.V
        K[::N + 1] = K[::N + 1] + d3
        L = chompack.cspmatrix(symb) + K
        chompack.cholesky(L)

        # Solve (Qinv + (D1+D2)^-1) * dy2 = (D1 + D2)^{-1} * 1
        blas.copy(d3, dy2)
        chompack.trsm(L, dy2, trans='N')
        chompack.trsm(L, dy2, trans='T')

        def g(x, y, z):

            # Solve
            #
            #     [ K    d3    ] [ ux_y ]
            #     [            ] [      ] =
            #     [ d3'  1'*d3 ] [ ux_b ]
            #
            #         [ bx_y ]   [ D  ]
            #         [      ] - [    ] * D3 * (D2 * bx_v + bx_z - bx_w).
            #         [ bx_b ]   [ d' ]

            x[:N] -= mul(d, mul(d3, mul(d2, x[-N:]) + z[:N] - z[-N:]))
            x[N] -= blas.dot(d, mul(d3, mul(d2, x[-N:]) + z[:N] - z[-N:]))

            # Solve dy1 := K^-1 * x[:N]
            blas.copy(x, dy1, n=N)
            chompack.trsm(L, dy1, trans='N')
            chompack.trsm(L, dy1, trans='T')

            # Find ux_y = dy1 - ux_b * dy2 s.t
            #
            #     d3' * ( dy1 - ux_b * dy2 + ux_b ) = x[N]
            #
            # i.e.  x[N] := ( x[N] - d3'* dy1 ) / ( d3'* ( 1 - dy2 ) ).

            x[N] = ( x[N] - blas.dot(d3, dy1) ) / \
                ( blas.asum(d3) - blas.dot(d3, dy2) )
            x[:N] = dy1 - x[N] * dy2

            # ux_v = D4 * ( bx_v -  D1^-1 (bz_z + D * (ux_y + ux_b))
            #     - D2^-1 * bz_w )

            x[-N:] = mul(
                d4, x[-N:] - div(z[:N] + mul(d, x[:N] + x[N]), d1) -
                div(z[N:], d2))

            # uz_z = - D1^-1 * ( bx_z - D * ( ux_y + ux_b ) - ux_v )
            # uz_w = - D2^-1 * ( bx_w - uz_w )
            z[:N] += base.mul(d, x[:N] + x[N]) + x[-N:]
            z[-N:] += x[-N:]
            blas.scal(-1.0, z)

            # Return W['di'] * uz
            blas.tbmv(W['di'], z, n=2 * N, k=0, ldA=1)

        return g

    q = matrix(0.0, (2 * N + 1, 1))

    if weights is 'proportional':
        dlist = list(d)
        C1 = 0.5 * N * gamma / dlist.count(1)
        C2 = 0.5 * N * gamma / dlist.count(-1)
        gvec = matrix([C1 if w == 1 else C2 for w in dlist], (N, 1))
        del dlist
        q[-N:] = gvec
    elif weights is 'equal':
        q[-N:] = gamma

    h = matrix(0.0, (2 * N, 1))
    h[:N] = -1.0
    sol = solvers.coneqp(P, q, G, h, kktsolver=Fkkt)
    u = matrix(0.0, (N, 1))
    y, b, v = sol['x'][:N], sol['x'][N], sol['x'][N + 1:]
    z = mul(d, sol['z'][:N])
    base.symv(Qinv, y, u)
    optval = 0.5 * blas.dot(y, u) + gamma * sum(v)
    return y, b, v, z, optval, Lc, sol['iterations']
示例#35
0
    def _coneqp(self, gram, targets, probs):
        n_quantiles = probs.size  # Number of quantiles to predict
        n_coefs = gram.shape[0]  # Number of variables
        n_samples = n_coefs // n_quantiles
        # Quantiles levels
        kronprobs = kron(ones(int(n_coefs / n_quantiles)), probs.squeeze())

        solvers.options['show_progress'] = self.verbose
        if self.tol:
            solvers.options['reltol'] = self.tol

        if self.eps == 0:
            # Quadratic part of the objective
            gram = matrix(gram)
            # Linear part of the objective
            q_lin = matrix(-kron(targets, ones(n_quantiles)))

            # LHS of the inequality constraint
            g_lhs = matrix(r_[eye(n_coefs), -eye(n_coefs)])
            # RHS of the inequality
            h_rhs = matrix(r_[self.reg_c_ * kronprobs,
                              self.reg_c_ * (1 - kronprobs)])
            # LHS of the equality constraint
            lhs_eqc = matrix(kron(ones(n_samples), eye(n_quantiles)))

            # Solve the dual optimization problem
            self.sol_ = solvers.qp(gram, q_lin, g_lhs, h_rhs, lhs_eqc,
                                   matrix(zeros(n_quantiles)))

            # Set coefs
            coefs = asarray(self.sol_['x'])
        else:
            def build_lhs(m, p):
                # m: n_samples
                # p: n_quantiles
                n = m*p  # n_variables

                # Get the norm bounds (m last variables)
                A = zeros(p+1)
                A[0] = -1
                A = kron(eye(m), A).T
                # Get the m p-long vectors
                B = kron(eye(m), c_[zeros(p), eye(p)].T)
                # Box constraint
                C = c_[r_[eye(n), -eye(n)], zeros((2*n, m))]
                # Set everything together
                C = r_[C, c_[B, A]]
                return C

            # Quadratic part of the objective
            gram = matrix(r_[c_[gram, zeros((n_coefs, n_samples))],
                             zeros((n_samples, n_coefs+n_samples))])
            # Linear part of the objective
            q_lin = matrix(r_[-kron(targets, ones(n_quantiles)),
                              ones(n_samples)*self.eps])

            # LHS of the inequality constraint
            g_lhs = matrix(build_lhs(n_samples, n_quantiles))
            # RHS of the inequality
            h_rhs = matrix(r_[self.reg_c_ * kronprobs,
                              self.reg_c_ * (1-kronprobs),
                              zeros(n_samples * (n_quantiles+1))])
            # LHS of the equality constraint
            lhs_eqc = matrix(c_[kron(ones(n_samples),
                                eye(n_quantiles)),
                                zeros((n_quantiles, n_samples))])
            # Parameters of the optimization problem
            dims = {'l': 2*n_coefs, 'q': [n_quantiles+1]*n_samples, 's': []}

            # Solve the dual optimization problem
            self.sol_ = solvers.coneqp(gram, q_lin, g_lhs, h_rhs, dims,
                                       lhs_eqc, matrix(zeros(n_quantiles)))

            # Set coefs
            coefs = asarray(self.sol_['x'][:n_coefs])

        # Set the intercept

        # Erase the previous intercept before prediction
        self.model_ = {'coefs': coefs, 'intercept': 0}
        predictions = self.predict(self.linop_.X)
        if predictions.ndim < 2:
            predictions = predictions.reshape(1, -1)  # 2D array
        intercept = [percentile(targets - pred, 100. * prob) for
                     (pred, prob) in zip(predictions, probs)]
        intercept = asarray(intercept).squeeze()
        self.model_ = {'coefs': coefs, 'intercept': intercept}
示例#36
0
    def LSM3(self, option_type="c", func_list=[lambda x: x ** 0, lambda x: x],onlyITM=False,buy_cost=0,sell_cost=0):
        dt = self.T / self.n_steps
        df = np.exp(-self.r * dt)
        df2 = np.exp(-(self.r - self.q) * dt)
        K = self.K
        price_matrix = self.price_matrix
        n_trials = self.n_trials
        n_steps = self.n_steps
        exercise_matrix = np.zeros(price_matrix.shape,dtype=bool)
        american_values_matrix = np.zeros(price_matrix.shape)
        
        
        def __calc_american_values(payoff_fun,func_list, sub_price_matrix,sub_exercise_matrix,df,onlyITM=False):
            exercise_values_t = payoff_fun(sub_price_matrix[:,0])
            ITM_filter = exercise_values_t > 0
            OTM_filter = exercise_values_t <= 0
            n_sub_trials, n_sub_steps = sub_price_matrix.shape
            holding_values_t = np.zeros(n_sub_trials) # simulated samples: y
            exp_holding_values_t = np.zeros(n_sub_trials) # regressed results: E[y]
            
            itemindex = np.where(sub_exercise_matrix==1)
            # print(sub_exercise_matrix)
            for trial_i in range(n_sub_trials):                
                first = next(itemindex[1][i] for i,x in enumerate(itemindex[0]) if x==trial_i)
                payoff_i = payoff_fun(sub_price_matrix[trial_i, first])
                df_i = df**(n_sub_steps-first)
                holding_values_t[trial_i] = payoff_i*df_i
            
            A_matrix = np.array([func(sub_price_matrix[:,0]) for func in func_list]).T
            b_matrix = holding_values_t[:, np.newaxis] # g_tau|Fi
            ITM_A_matrix = A_matrix[ITM_filter, :]
            ITM_b_matrix = b_matrix[ITM_filter, :]           
            lr = LinearRegression(fit_intercept=False)
            lr.fit(ITM_A_matrix, ITM_b_matrix)
            exp_holding_values_t[ITM_filter] = np.dot(ITM_A_matrix, lr.coef_.T)[:, 0] # E[g_tau|Fi] only ITM
            
            if onlyITM:
                # Original LSM
                exp_holding_values_t[OTM_filter] = np.nan
            else:
                # non-conformed approximation: do not assure the continuity of the approximation.
                OTM_A_matrix = A_matrix[OTM_filter, :]
                OTM_b_matrix = b_matrix[OTM_filter, :]
                lr.fit(OTM_A_matrix, OTM_b_matrix)
                exp_holding_values_t[OTM_filter] = np.dot(OTM_A_matrix, lr.coef_.T)[:, 0] # E[g_tau|Fi] only OTM
            
            
            sub_exercise_matrix[:,0] = ITM_filter & (exercise_values_t>exp_holding_values_t)
            american_values_t = np.maximum(exp_holding_values_t,exercise_values_t)
            return american_values_t
        
        if (option_type == "c"):
            payoff_fun = lambda x: np.maximum(x - K, 0)
        elif (option_type == "p"):
            payoff_fun = lambda x: np.maximum(K - x, 0)
        
        # when contract is at the maturity
        stock_prices_t = price_matrix[:, -1]
        exercise_values_t = payoff_fun(stock_prices_t)
        holding_values_t = exercise_values_t
        american_values_matrix[:,-1] = exercise_values_t
        exercise_matrix[:,-1] = 1
        
        # before maturaty
        for i in np.arange(n_steps)[:0:-1]:
            sub_price_matrix = price_matrix[:,i:]
            sub_exercise_matrix = exercise_matrix[:,i:]
            american_values_t = __calc_american_values(payoff_fun,func_list,sub_price_matrix,sub_exercise_matrix,df,onlyITM)
            american_values_matrix[:,i] = american_values_t
        
        
        
        # obtain the optimal policies at the inception
        holding_matrix = np.zeros(exercise_matrix.shape, dtype=bool)
        for i in np.arange(n_trials):
            exercise_row = exercise_matrix[i, :]
            if (exercise_row.any()):
                exercise_idx = np.where(exercise_row == 1)[0][0]
                exercise_row[exercise_idx + 1:] = 0
                holding_matrix[i,:exercise_idx+1] = 1
            else:
                exercise_row[-1] = 1
                holding_matrix[i,:] = 1

        if onlyITM==False:
            # i=0
            # regular martingale pricing: LSM
            american_value1 = american_values_matrix[:,1].mean() * df
            # with delta hedging: OHMC
            
            # min dP0.T*dP0 + delta dS0.T dS0 delta + 2*dP0.T*delta*dS0
            # subject to: e.T * (dP0 + delta dS0) = 0
            # P = Q.T * Q
            # Q = dS0
            # q = 2*dP0.T*dS0
            # A = e.T * dS0
            # b = - e.T * dP0
            
            
            
            v0 = matrix((american_values_matrix[:,1] * df)[:,np.newaxis])
            S0 = price_matrix[:, 0]
            S1 = price_matrix[:, 1]
            dS0 = df2 * S1 * (1-sell_cost) - S0*(1+buy_cost)
            dP0 = american_values_matrix[:,1] * df - american_value1
            
            Q0 = dS0[:, np.newaxis]
            Q0 = matrix(Q0)
            P = Q0.T * Q0
            q = 2*matrix(dP0[:,np.newaxis]).T*Q0
            
            A = matrix(np.ones(n_trials, dtype=np.float64)).T * Q0
            b = - matrix(np.ones(n_trials, dtype=np.float64)).T * matrix(dP0[:,np.newaxis])
            
            sol = solvers.coneqp(P=P, q=q, A=A, b=b)
            self.sol = sol
            residual_risk = (v0.T * v0 + 2 * sol["primal objective"]) / n_trials
            self.residual_risk = residual_risk[0]  # the value of unit matrix
            
            delta_hedge = sol["x"][0]
            american_values_matrix[:,0] = american_value1
            
            self.american_values_matrix = american_values_matrix
            self.HLSM_price = american_value1
            self.HLSM_delta = - delta_hedge
            print("price: {}, delta-hedge: {}".format(american_value1,delta_hedge))
        
        self.holding_matrix = holding_matrix
        self.exercise_matrix = exercise_matrix
        
        pass
示例#37
0
    def solve(bx, by, bz):
        # solve K x = bx - R * bz -> bx
        x, info = cg(K, bx - R * bz)
        bx[:] = matrix(x)

        # bz <- W^{-T}(-x - bz)
        bz[:] = matrix(Wd * (-bx - bz))

    return solve


solvers.options['abstol'] = 1e-5
solvers.options['reltol'] = 1e-5
solvers.options['feastol'] = 1e-5

wi = solvers.coneqp(P, f, G, c, kktsolver=fKKT)['x']
w = np.zeros((n + 2, ))
w[1:-1] = wi[:, 0].T


def pretty_print(*arrays):
    print 'Solution:'
    for i in range(0, n + 2):
        for a in arrays:
            print "%+.4f" % (a[i]),
        print ""


def tikz_save(basename, *arrays):
    x = arrays[0]
    for i, a in enumerate(arrays[1:]):
示例#38
0
def _updateLineSearch(x, fx, oldFx, oldOldFx, oldDeltaX, g, H, func, grad, z, G, h, y, A, b):

    initVals = dict()
    initVals['x'] = matrix(oldDeltaX)
    # readjust the bounds and initial value if possible
    # as we try our best to use warm start
    if G is not None:
        hTemp = h - G.dot(x)
        dims = {'l': G.shape[0], 'q': [], 's':  []}
        initVals['z'] = matrix(z)
        s = hTemp - G.dot(oldDeltaX)

        while numpy.any(s<=0.0):
            oldDeltaX *= 0.5
            s = h - G.dot(oldDeltaX)
        initVals['s'] = matrix(s)
        initVals['x'] = matrix(oldDeltaX)

        #print initVals['s']
    else:
        hTemp = None
        dims = []

    if A is not None:
        initVals['y'] = matrix(y)
        bTemp = b - A.dot(x)
    else:
        bTemp = None

    # solving the QP to get the descent direction

    if A is not None:
        if G is not None:
            qpOut = solvers.coneqp(matrix(H), matrix(g), matrix(G), matrix(hTemp), dims, matrix(A), matrix(bTemp))
        else:
            qpOut = solvers.coneqp(matrix(H), matrix(g), None, None, None, matrix(A), matrix(bTemp))
    else:
        if G is not None:
            qpOut = solvers.coneqp(matrix(H), matrix(g), matrix(G), matrix(hTemp), dims, initvals=initVals)
        else:
            qpOut = solvers.coneqp(matrix(H), matrix(g))

    # exact the descent diretion and do a line search
    deltaX = numpy.array(qpOut['x'])
    oldOldFx = oldFx
    oldFx = fx
    oldGrad = g.copy()

    lineFunc = lineSearch(x, deltaX, func)
    #step, fx = exactLineSearch(1, x, deltaX, func)
    # step, fc, gc, fx, oldFx, new_slope = line_search(func,
    #                                                  grad,
    #                                                  x.ravel(),
    #                                                  deltaX.ravel(),
    #                                                  g.ravel(),
    #                                                  oldFx,
    #                                                  oldOldFx)

    # print step
    # if step is None:
    # step, fx = exactLineSearch2(1, lineFunc, deltaX.ravel().dot(g.ravel()), oldFx)
    # step, fx = exactLineSearch(1, lineFunc)
    # if fx >= oldFx:
    step, fx = backTrackingLineSearch(1, lineFunc, deltaX.ravel().dot(g.ravel()), alpha=0.0001, beta=0.8)

    x += step * deltaX
    if G is not None:
        z[:] = numpy.array(qpOut['z'])
    if A is not None:
        y[:] = numpy.array(qpOut['y'])

    return x, deltaX, z, y, fx, oldFx, oldOldFx, oldGrad, step, qpOut['iterations']
示例#39
0
def robsvm(X, d, gamma, P, e):
    """
    Solves the following robust SVM training problem:

       minimize    (1/2) w'*w + gamma*sum(v)
       subject to  diag(d)*(X*w + b*1) >= 1 - v + E*u
                   || S_j*w ||_2 <= u_j,  j = 1...t
                   v >= 0

    The variables are w, b, v, and u. The matrix E is a selector
    matrix with zeros and one '1' per row.  E_ij = 1 means that the
    i'th training vector is associated with the j'th uncertainty
    ellipsoid.

    A custom KKT solver that exploits low-rank structure is used, and
    a positive definite system of equations of order n is
    formed and solved at each iteration.

    ARGUMENTS

    X             m-by-n matrix with training vectors as rows

    d             m-vector with training labels (-1,+1)

    P             list of t symmetric matrices of order n

    e             m-vector where e[i] is the index of the uncertainty
                  ellipsoid associated with the i'th training vector

    RETURNS

    w        n-vector

    b        scalar

    u        t-vector

    v        m-vector

    iters    number of interior-point iterations

    """

    m, n = X.size
    assert type(
        P
    ) is list, "P must be a list of t symmtric positive definite matrices of order n."
    k = len(P)
    if k > 0:
        assert e.size == (m, 1), "e must be an m-vector."
        assert max(e) < k and min(e) >= 0, "e[i] must be in {0,1,...,k-1}."

    E = spmatrix(1., e, range(m), (k, m)).T
    d = matrix(d, tc='d')
    q = matrix(0.0, (n + k + 1 + m, 1))
    q[n + k + 1:] = gamma
    h = matrix(0.0, (2 * m + k * (n + 1), 1))
    h[:m] = -1.0

    # linear operators Q and G
    def Q(x, y, alpha=1.0, beta=0.0, trans='N'):
        y[:n] = alpha * x[:n] + beta * y[:n]

    def G(x, y, alpha=1.0, beta=0.0, trans='N'):
        """
        Implements the linear operator

               [ -DX    E   -d   -I ]  
               [  0     0    0   -I ]  
               [  0   -e_1'  0    0 ]
          G =  [ -P_1'  0    0    0 ]     
               [  .     .    .    . ]    
               [  0   -e_k'  0    0 ]        
               [ -P_k'  0    0    0 ]       

        and its adjoint G'.

        """
        if trans == 'N':
            tmp = +y[:m]
            # y[:m] = alpha*(-DXw + Et - d*b - v) + beta*y[:m]
            base.gemv(E, x[n:n + k], tmp, alpha=alpha, beta=beta)
            blas.axpy(x[n + k + 1:], tmp, alpha=-alpha)
            blas.axpy(d, tmp, alpha=-alpha * x[n + k])
            y[:m] = tmp

            base.gemv(X, x[:n], tmp, alpha=alpha, beta=0.0)
            tmp = mul(d, tmp)
            y[:m] -= tmp

            # y[m:2*m] = -v
            y[m:2 * m] = -alpha * x[n + k + 1:] + beta * y[m:2 * m]

            # SOC 1,...,k
            for i in range(k):
                l = 2 * m + i * (n + 1)
                y[l] = -alpha * x[n + i] + beta * y[l]
                y[l + 1:l + 1 +
                  n] = -alpha * P[i] * x[:n] + beta * y[l + 1:l + 1 + n]

        else:
            tmp1 = mul(d, x[:m])
            tmp2 = y[:n]
            blas.gemv(X, tmp1, tmp2, trans='T', alpha=-alpha, beta=beta)
            for i in range(k):
                l = 2 * m + 1 + i * (n + 1)
                blas.gemv(P[i],
                          x[l:l + n],
                          tmp2,
                          trans='T',
                          alpha=-alpha,
                          beta=1.0)
            y[:n] = tmp2

            tmp2 = y[n:n + k]
            base.gemv(E, x[:m], tmp2, trans='T', alpha=alpha, beta=beta)
            blas.axpy(x[2 * m:2 * m + k * (1 + n):n + 1], tmp2, alpha=-alpha)
            y[n:n + k] = tmp2

            y[n + k] = -alpha * blas.dot(d, x[:m]) + beta * y[n + k]
            y[n + k +
              1:] = -alpha * (x[:m] + x[m:2 * m]) + beta * y[n + k + 1:]

    # precompute products Pi'*Pi
    Pt = []
    for p in P:
        y = matrix(0.0, (n, n))
        blas.syrk(p, y, trans='T')
        Pt.append(y)

    # scaled hyperbolic Householder transformations
    def qscal(u, beta, v, inv=False):
        """
        Transforms the vector u as
           u := beta * (2*v*v' - J) * u
        if 'inv' is False and as
           u := (1/beta) * (2*J*v*v'*J - J) * u
        if 'inv' is True.
        """
        if not inv:
            tmp = blas.dot(u, v)
            u[0] *= -1
            u += 2 * v * tmp
            u *= beta
        else:
            u[0] *= -1.0
            tmp = blas.dot(v, u)
            u[0] -= 2 * v[0] * tmp
            u[1:] += 2 * v[1:] * tmp
            u /= beta

    # custom KKT solver
    def F(W):
        """
        Custom solver for the system

        [  It  0   0    Xt'     0     At1' ...  Atk' ][ dwt  ]   [ rwt ]
        [  0   0   0    -d'     0      0   ...   0   ][ db   ]   [ rb  ]
        [  0   0   0    -I     -I      0   ...   0   ][ dv   ]   [ rv  ]
        [  Xt -d  -I  -Wl1^-2                        ][ dzl1 ]   [ rl1 ]
        [  0   0  -I         -Wl2^-2                 ][ dzl2 ] = [ rl2 ]
        [ At1  0   0                -W1^-2           ][ dz1  ]   [ r1  ] 
        [  |   |   |                       .         ][  |   ]   [  |  ]
        [ Atk  0   0                          -Wk^-2 ][ dzk  ]   [ rk  ]

        where

        It = [ I 0 ]  Xt = [ -D*X E ]  Ati = [ 0   -e_i' ]  
             [ 0 0 ]                         [ -Pi   0   ] 

        dwt = [ dw ]  rwt = [ rw ]
              [ dt ]        [ rt ].

        """

        # scalings and 'intermediate' vectors
        # db = inv(Wl1)^2 + inv(Wl2)^2
        db = W['di'][:m]**2 + W['di'][m:2 * m]**2
        dbi = div(1.0, db)

        # dt = I - inv(Wl1)*Dbi*inv(Wl1)
        dt = 1.0 - mul(W['di'][:m]**2, dbi)
        dtsqrt = sqrt(dt)

        # lam = Dt*inv(Wl1)*d
        lam = mul(dt, mul(W['di'][:m], d))

        # lt = E'*inv(Wl1)*lam
        lt = matrix(0.0, (k, 1))
        base.gemv(E, mul(W['di'][:m], lam), lt, trans='T')

        # Xs = sqrt(Dt)*inv(Wl1)*X
        tmp = mul(dtsqrt, W['di'][:m])
        Xs = spmatrix(tmp, range(m), range(m)) * X

        # Es = D*sqrt(Dt)*inv(Wl1)*E
        Es = spmatrix(mul(d, tmp), range(m), range(m)) * E

        # form Ab = I + sum((1/bi)^2*(Pi'*Pi + 4*(v'*v + 1)*Pi'*y*y'*Pi)) + Xs'*Xs
        #  and Bb = -sum((1/bi)^2*(4*ui*v'*v*Pi'*y*ei')) - Xs'*Es
        #  and D2 = Es'*Es + sum((1/bi)^2*(1+4*ui^2*(v'*v - 1))
        Ab = matrix(0.0, (n, n))
        Ab[::n + 1] = 1.0
        base.syrk(Xs, Ab, trans='T', beta=1.0)
        Bb = matrix(0.0, (n, k))
        Bb = -Xs.T * Es  # inefficient!?
        D2 = spmatrix(0.0, range(k), range(k))
        base.syrk(Es, D2, trans='T', partial=True)
        d2 = +D2.V
        del D2
        py = matrix(0.0, (n, 1))
        for i in range(k):
            binvsq = (1.0 / W['beta'][i])**2
            Ab += binvsq * Pt[i]
            dvv = blas.dot(W['v'][i], W['v'][i])
            blas.gemv(P[i], W['v'][i][1:], py, trans='T', alpha=1.0, beta=0.0)
            blas.syrk(py, Ab, alpha=4 * binvsq * (dvv + 1), beta=1.0)
            Bb[:, i] -= 4 * binvsq * W['v'][i][0] * dvv * py
            d2[i] += binvsq * (1 + 4 * (W['v'][i][0]**2) * (dvv - 1))

        d2i = div(1.0, d2)
        d2isqrt = sqrt(d2i)

        # compute a = alpha - lam'*inv(Wl1)*E*inv(D2)*E'*inv(Wl1)*lam
        alpha = blas.dot(lam, mul(W['di'][:m], d))
        tmp = matrix(0.0, (k, 1))
        base.gemv(E, mul(W['di'][:m], lam), tmp, trans='T')
        tmp = mul(tmp, d2isqrt)  #tmp = inv(D2)^(1/2)*E'*inv(Wl1)*lam
        a = alpha - blas.dot(tmp, tmp)

        # compute M12 = X'*D*inv(Wl1)*lam + Bb*inv(D2)*E'*inv(Wl1)*lam
        tmp = mul(tmp, d2isqrt)
        M12 = matrix(0.0, (n, 1))
        blas.gemv(Bb, tmp, M12, alpha=1.0)
        tmp = mul(d, mul(W['di'][:m], lam))
        blas.gemv(X, tmp, M12, trans='T', alpha=1.0, beta=1.0)

        # form and factor M
        sBb = Bb * spmatrix(d2isqrt, range(k), range(k))
        base.syrk(sBb, Ab, alpha=-1.0, beta=1.0)
        M = matrix([[Ab, M12.T], [M12, a]])
        lapack.potrf(M)

        def f(x, y, z):

            # residuals
            rwt = x[:n + k]
            rb = x[n + k]
            rv = x[n + k + 1:n + k + 1 + m]
            iw_rl1 = mul(W['di'][:m], z[:m])
            iw_rl2 = mul(W['di'][m:2 * m], z[m:2 * m])
            ri = [
                z[2 * m + i * (n + 1):2 * m + (i + 1) * (n + 1)]
                for i in range(k)
            ]

            # compute 'derived' residuals
            # rbwt = rwt + sum(Ai'*inv(Wi)^2*ri) + [-X'*D; E']*inv(Wl1)^2*rl1
            rbwt = +rwt
            for i in range(k):
                tmp = +ri[i]
                qscal(tmp, W['beta'][i], W['v'][i], inv=True)
                qscal(tmp, W['beta'][i], W['v'][i], inv=True)
                rbwt[n + i] -= tmp[0]
                blas.gemv(P[i], tmp[1:], rbwt, trans='T', alpha=-1.0, beta=1.0)
            tmp = mul(W['di'][:m], iw_rl1)
            tmp2 = matrix(0.0, (k, 1))
            base.gemv(E, tmp, tmp2, trans='T')
            rbwt[n:] += tmp2
            tmp = mul(d, tmp)  # tmp = D*inv(Wl1)^2*rl1
            blas.gemv(X, tmp, rbwt, trans='T', alpha=-1.0, beta=1.0)

            # rbb = rb - d'*inv(Wl1)^2*rl1
            rbb = rb - sum(tmp)

            # rbv = rv - inv(Wl2)*rl2 - inv(Wl1)^2*rl1
            rbv = rv - mul(W['di'][m:2 * m], iw_rl2) - mul(W['di'][:m], iw_rl1)

            # [rtw;rtt] = rbwt + [-X'*D; E']*inv(Wl1)^2*inv(Db)*rbv
            tmp = mul(W['di'][:m]**2, mul(dbi, rbv))
            rtt = +rbwt[n:]
            base.gemv(E, tmp, rtt, trans='T', alpha=1.0, beta=1.0)
            rtw = +rbwt[:n]
            tmp = mul(d, tmp)
            blas.gemv(X, tmp, rtw, trans='T', alpha=-1.0, beta=1.0)

            # rtb = rbb - d'*inv(Wl1)^2*inv(Db)*rbv
            rtb = rbb - sum(tmp)

            # solve M*[dw;db] = [rtw - Bb*inv(D2)*rtt; rtb + lt'*inv(D2)*rtt]
            tmp = mul(d2i, rtt)
            tmp2 = matrix(0.0, (n, 1))
            blas.gemv(Bb, tmp, tmp2)
            dwdb = matrix([rtw - tmp2, rtb + blas.dot(mul(d2i, lt), rtt)])
            lapack.potrs(M, dwdb)

            # compute dt = inv(D2)*(rtt - Bb'*dw + lt*db)
            tmp2 = matrix(0.0, (k, 1))
            blas.gemv(Bb, dwdb[:n], tmp2, trans='T')
            dt = mul(d2i, rtt - tmp2 + lt * dwdb[-1])

            # compute dv = inv(Db)*(rbv + inv(Wl1)^2*(E*dt - D*X*dw - d*db))
            dv = matrix(0.0, (m, 1))
            blas.gemv(X, dwdb[:n], dv, alpha=-1.0)
            dv = mul(d, dv) - d * dwdb[-1]
            base.gemv(E, dt, dv, beta=1.0)
            tmp = +dv  # tmp = E*dt - D*X*dw - d*db
            dv = mul(dbi, rbv + mul(W['di'][:m]**2, dv))

            # compute wdz1 = inv(Wl1)*(E*dt - D*X*dw - d*db - dv - rl1)
            wdz1 = mul(W['di'][:m], tmp - dv) - iw_rl1

            # compute wdz2 = - inv(Wl2)*(dv + rl2)
            wdz2 = -mul(W['di'][m:2 * m], dv) - iw_rl2

            # compute wdzi = inv(Wi)*([-ei'*dt; -Pi*dw] - ri)
            wdzi = []
            tmp = matrix(0.0, (n, 1))
            for i in range(k):
                blas.gemv(P[i], dwdb[:n], tmp, alpha=-1.0, beta=0.0)
                tmp1 = matrix([-dt[i], tmp])
                blas.axpy(ri[i], tmp1, alpha=-1.0)
                qscal(tmp1, W['beta'][i], W['v'][i], inv=True)
                wdzi.append(tmp1)

            # solution
            x[:n] = dwdb[:n]
            x[n:n + k] = dt
            x[n + k] = dwdb[-1]
            x[n + k + 1:] = dv
            z[:m] = wdz1
            z[m:2 * m] = wdz2
            for i in range(k):
                z[2 * m + i * (n + 1):2 * m + (i + 1) * (n + 1)] = wdzi[i]

        return f

    # solve cone QP and return solution
    sol = solvers.coneqp(Q,
                         q,
                         G,
                         h,
                         dims={
                             'l': 2 * m,
                             'q': [n + 1 for i in range(k)],
                             's': []
                         },
                         kktsolver=F)
    return sol['x'][:n], sol['x'][
        n + k], sol['x'][n:n + k], sol['x'][n + k + 1:], sol['iterations']
示例#40
0
def proxqp_clique_SNL(c, A, b, z, rho):
    """
    Solves the 1-norm regularized conic LP

        min.  < c, x > + || A(x) - b ||_1 + (rho/2) || x - z ||^2
        s.t.  x >= 0

    for a single dense clique .
    
    The method is used in this package to solve the 
    sensor node localization problem
    
    Input arguments.

        c is a 'd' matrix of size n_k**2 x 1

        A is a 'd' matrix.  with size n_k**2 times m_k.  Each of its columns 
            represents a symmetric matrix of order n_k in unpacked column-major 
            order. The term  A ( x ) in the primal constraint is given by

                A(x) = A' * vec(x).

            The adjoint A'( y ) in the dual constraint is given by

                A'(y) = mat( A * y ).

            Only the entries of A corresponding to lower-triangular
            positions are accessed.
             

        b is a 'd' matrix of size m_k x 1.

        z is a 'd' matrix of size n_k**2 x 1
        
        rho is a positive scalar.  


    Output arguments.

        sol : Solution dictionary for quadratic optimization problem.
        
        primal : objective for optimization problem without prox term (trace C*X)

    """

    ns2, ms = A.size
    nl, msl = len(b) * 2, len(b)

    ns = int(sqrt(ns2))
    dims = {'l': nl, 'q': [], 's': [ns]}

    c = matrix([matrix(1.0, (nl, 1)), c])
    z = matrix([matrix(0.0, (nl, 1)), z])
    q = +c
    blas.axpy(z, q, alpha=-rho, offsetx=nl, offsety=nl)

    symmetrize(q, ns, offset=nl)
    q = q[:]
    h = matrix(0.0, (nl + ns2, 1))

    bz = +q
    xp = +q

    def P(u, v, alpha=1.0, beta=0.0):
        # v := alpha * rho * u + beta * v
        blas.scal(beta, v)
        blas.axpy(u, v, alpha=alpha * rho, offsetx=nl, offsety=nl)

    def xdot(x, y):
        misc.trisc(x, dims)
        adot = blas.dot(x, y)
        misc.triusc(x, dims)
        return adot

    def Gf(u, v, alpha=1.0, beta=0.0, trans='N'):
        # v = -alpha*u + beta * v
        blas.scal(beta, v)
        blas.axpy(u, v, alpha=-alpha)

    def Af(u, v, alpha=1.0, beta=0.0, trans="N"):

        # v := alpha * A(u) + beta * v if trans is 'N'
        # v := alpha * A'(u) + beta * v if trans is 'T'
        blas.scal(beta, v)
        if trans == "N":
            blas.axpy(u, v, alpha=alpha, n=nl / 2)
            blas.axpy(u, v, alpha=-alpha, offsetx=nl / 2, n=nl / 2)
            sgemv(A,
                  u,
                  v,
                  n=ns,
                  m=ms,
                  alpha=alpha,
                  beta=1.0,
                  trans="T",
                  offsetx=nl)

        elif trans == "T":
            blas.axpy(u, v, alpha=alpha, n=nl / 2)
            blas.axpy(u, v, alpha=-alpha, offsety=nl / 2, n=nl / 2)
            sgemv(A,
                  u,
                  v,
                  n=ns,
                  m=ms,
                  alpha=alpha,
                  beta=1.0,
                  trans="N",
                  offsety=nl)

    U = matrix(0.0, (ns, ns))
    Vt = matrix(0.0, (ns, ns))
    sv = matrix(0.0, (ns, 1))
    Gamma = matrix(0.0, (ns, ns))

    if type(A) is spmatrix:
        VecAIndex = +A[:].I
    As = matrix(A)
    Aspkd = matrix(0.0, ((ns + 1) * ns / 2, ms))
    tmp = matrix(0.0, (ms, 1))

    def F(W):
        # SVD R[j] = U[j] * diag(sig[j]) * Vt[j]
        lapack.gesvd(+W['r'][0], sv, jobu='A', jobvt='A', U=U, Vt=Vt)

        W2 = mul(+W['d'], +W['d'])

        # Vt[j] := diag(sig[j])^-1 * Vt[j]
        for k in xrange(ns):
            blas.tbsv(sv, Vt, n=ns, k=0, ldA=1, offsetx=k * ns)

        # Gamma[j] is an ns[j] x ns[j] symmetric matrix
        #  (sig[j] * sig[j]') ./  sqrt(1 + rho * (sig[j] * sig[j]').^2)
        # S = sig[j] * sig[j]'
        S = matrix(0.0, (ns, ns))
        blas.syrk(sv, S)
        Gamma = div(S, sqrt(1.0 + rho * S**2))
        symmetrize(Gamma, ns)

        # As represents the scaled mapping
        #
        #     As(x) = A(u * (Gamma .* x) * u')
        #    As'(y) = Gamma .* (u' * A'(y) * u)
        #
        # stored in a similar format as A, except that we use packed
        # storage for the columns of As[i][j].

        if type(A) is spmatrix:
            blas.scal(0.0, As)
            As[VecAIndex] = +A[VecAIndex]
        else:
            blas.copy(A, As)

        # As[i][j][:,k] = diag( diag(Gamma[j]))*As[i][j][:,k]
        # As[i][j][l,:] = Gamma[j][l,l]*As[i][j][l,:]
        for k in xrange(ms):
            cngrnc(U, As, trans='T', offsetx=k * (ns2))
            blas.tbmv(Gamma, As, n=ns2, k=0, ldA=1, offsetx=k * (ns2))

        misc.pack(As, Aspkd, {'l': 0, 'q': [], 's': [ns] * ms})

        # H is an m times m block matrix with i, k block
        #
        #      Hik = sum_j As[i,j]' * As[k,j]
        #
        # of size ms[i] x ms[k].  Hik = 0 if As[i,j] or As[k,j]
        # are zero for all j
        H = matrix(0.0, (ms, ms))
        blas.syrk(Aspkd, H, trans='T', beta=1.0, k=ns * (ns + 1) / 2)

        #H = H + spmatrix(W2[:nl/2] + W2[nl/2:] ,range(nl/2),range(nl/2))
        blas.axpy(W2, H, n=ms, incy=ms + 1, alpha=1.0)
        blas.axpy(W2, H, offsetx=ms, n=ms, incy=ms + 1, alpha=1.0)

        lapack.potrf(H)

        def solve(x, y, z):
            """
            Returns solution of 

                rho * ux + A'(uy) - r^-T * uz * r^-1 = bx
                A(ux)                                = by
                -ux               - r * uz * r'      = bz.

            On entry, x = bx, y = by, z = bz.
            On exit, x = ux, y = uy, z = uz.
            """

            # bz is a copy of z in the format of x
            blas.copy(z, bz)
            blas.axpy(bz, x, alpha=rho, offsetx=nl, offsety=nl)
            # x := Gamma .* (u' * x * u)
            #    = Gamma .* (u' * (bx + rho * bz) * u)

            cngrnc(U, x, trans='T', offsetx=nl)
            blas.tbmv(Gamma, x, n=ns2, k=0, ldA=1, offsetx=nl)
            blas.tbmv(+W['d'], x, n=nl, k=0, ldA=1)

            # y := y - As(x)
            #   := by - As( Gamma .* u' * (bx + rho * bz) * u)

            misc.pack(x, xp, dims)
            blas.gemv(Aspkd, xp, y, trans = 'T',alpha = -1.0, beta = 1.0, \
                m = ns*(ns+1)/2, n = ms,offsetx = nl)

            #y = y - mul(+W['d'][:nl/2],xp[:nl/2])+ mul(+W['d'][nl/2:nl],xp[nl/2:nl])
            blas.tbmv(+W['d'], xp, n=nl, k=0, ldA=1)
            blas.axpy(xp, y, alpha=-1, n=ms)
            blas.axpy(xp, y, alpha=1, n=ms, offsetx=nl / 2)

            # y := -y - A(bz)
            #    = -by - A(bz) + As(Gamma .*  (u' * (bx + rho * bz) * u)

            Af(bz, y, alpha=-1.0, beta=-1.0)

            # y := H^-1 * y
            #    = H^-1 ( -by - A(bz) + As(Gamma.* u'*(bx + rho*bz)*u) )
            #    = uy

            blas.trsv(H, y)
            blas.trsv(H, y, trans='T')

            # bz = Vt' * vz * Vt
            #    = uz where
            # vz := Gamma .* ( As'(uy)  - x )
            #     = Gamma .* ( As'(uy)  - Gamma .* (u'*(bx + rho *bz)*u) )
            #     = Gamma.^2 .* ( u' * (A'(uy) - bx - rho * bz) * u ).

            misc.pack(x, xp, dims)
            blas.scal(-1.0, xp)

            blas.gemv(Aspkd,
                      y,
                      xp,
                      alpha=1.0,
                      beta=1.0,
                      m=ns * (ns + 1) / 2,
                      n=ms,
                      offsety=nl)

            #xp[:nl/2] = xp[:nl/2] + mul(+W['d'][:nl/2],y)
            #xp[nl/2:nl] = xp[nl/2:nl] - mul(+W['d'][nl/2:nl],y)

            blas.copy(y, tmp)
            blas.tbmv(+W['d'], tmp, n=nl / 2, k=0, ldA=1)
            blas.axpy(tmp, xp, n=nl / 2)

            blas.copy(y, tmp)
            blas.tbmv(+W['d'], tmp, n=nl / 2, k=0, ldA=1, offsetA=nl / 2)
            blas.axpy(tmp, xp, alpha=-1, n=nl / 2, offsety=nl / 2)

            # bz[j] is xp unpacked and multiplied with Gamma
            blas.copy(xp, bz)  #,n = nl)
            misc.unpack(xp, bz, dims)
            blas.tbmv(Gamma, bz, n=ns2, k=0, ldA=1, offsetx=nl)

            # bz = Vt' * bz * Vt
            #    = uz
            cngrnc(Vt, bz, trans='T', offsetx=nl)

            symmetrize(bz, ns, offset=nl)

            # x = -bz - r * uz * r'
            # z contains r.h.s. bz;  copy to x
            #so far, z = bzc (untouched)
            blas.copy(z, x)
            blas.copy(bz, z)

            cngrnc(W['r'][0], bz, offsetx=nl)
            blas.tbmv(W['d'], bz, n=nl, k=0, ldA=1)

            blas.axpy(bz, x)
            blas.scal(-1.0, x)

        return solve

    sol = solvers.coneqp(P, q, Gf, h, dims, Af, b, None, F, xdot=xdot)
    primal = blas.dot(sol['s'], c)

    sol['s'] = sol['s'][nl:]
    sol['z'] = sol['z'][nl:]

    return sol, primal
示例#41
0
    
    def solve(bx, by, bz):
        # solve K x = bx - R * bz -> bx
        x, info = cg(K, bx - R * bz)
        bx[:] = matrix(x)
                
        # bz <- W^{-T}(-x - bz)
        bz[:] = matrix(Wd*(-bx-bz))

    return solve

solvers.options['abstol'] = 1e-5
solvers.options['reltol'] = 1e-5
solvers.options['feastol'] = 1e-5

wi = solvers.coneqp(P, f, G, c, kktsolver=fKKT)['x']
w  = np.zeros((n+2,))
w[1:-1] = wi[:,0].T

def pretty_print(*arrays):
    print 'Solution:'
    for i in range(0,n+2):
        for a in arrays:
            print "%+.4f" % (a[i]),
        print ""

def tikz_save(basename, *arrays):
    x = arrays[0]
    for i, a in enumerate(arrays[1:]):
        filename = "../Plots/%s-%d.table" % (basename, i)
        np.savetxt(filename, np.column_stack((x,a)), fmt="%.4f")
示例#42
0
def proxqp_clique(c, A, b, z, rho):
    """
    Solves the conic QP

        min.  < c, x > + (rho/2) || x - z ||^2
        s.t.  A(x) = b
              x >= 0

    and its dual

        max.  -< b, y > - 1/(2*rho) * || c + A'(y) - rho * z - s ||^2 
        s.t.  s >= 0.

    for a single dense clique. 
    
    If the problem has block-arrow correlative sparsity, then the previous
    function 
    
    X = proxqp(c,A,b,z,rho,**kwargs)
    
    is equivalent to
    
    for k in xrange(ncliques):
        X[k] = proxqp_clique(c[k],A[k][k],b[k],z[k],rho,**kwargs)
    
    and each call can be implemented in parallel.
    
    Input arguments.

        c is a 'd' matrix of size n_k**2 x 1

        A is a 'd' matrix.  with size n_k**2 times m_k.  Each of its columns 
            represents a symmetric matrix of order n_k in unpacked column-major 
            order. The term  A ( x ) in the primal constraint is given by

                A(x) = A' * vec(x).

            The adjoint A'( y ) in the dual constraint is given by

                A'(y) = mat( A * y ).
             

        b is a 'd' matrix of size m_k x 1.
        
        z is a 'd' matrix of size n_k**2 x 1
        
        rho is a positive scalar.  

    Output arguments.
    
        sol : Solution dictionary for quadratic optimization problem.
        
        primal : objective for optimization problem without prox term (trace C*X)

    """

    ns2, ms = A.size
    ns = int(sqrt(ns2))
    dims = {'l': 0, 'q': [], 's': [ns]}

    q = +c
    blas.axpy(z, q, alpha=-rho)
    symmetrize(q, ns, offset=0)
    q = q[:]
    h = matrix(0.0, (ns2, 1))

    bz = +q
    xp = +q

    def P(u, v, alpha=1.0, beta=0.0):
        # v := alpha * rho * u + beta * v
        #if not (beta==0.0):
        blas.scal(beta, v)
        blas.axpy(u, v, alpha=alpha * rho)

    def xdot(x, y):
        misc.trisc(x, {'l': 0, 'q': [], 's': [ns]})
        adot = blas.dot(x, y)
        misc.triusc(x, {'l': 0, 'q': [], 's': [ns]})
        return adot

    def Gf(u, v, alpha=1.0, beta=0.0, trans='N'):

        # v = -alpha*u + beta * v
        # u and v are vectors representing N symmetric matrices in the
        # cvxopt format.
        blas.scal(beta, v)
        blas.axpy(u, v, alpha=-alpha)

    def Af(u, v, alpha=1.0, beta=0.0, trans="N"):

        # v := alpha * A(u) + beta * v if trans is 'N'
        # v := alpha * A'(u) + beta * v if trans is 'T'
        blas.scal(beta, v)
        if trans == "N":
            sgemv(A,
                  u,
                  v,
                  n=ns,
                  m=ms,
                  alpha=alpha,
                  beta=1.0,
                  trans="T",
                  offsetx=0)
        elif trans == "T":
            sgemv(A,
                  u,
                  v,
                  n=ns,
                  m=ms,
                  alpha=alpha,
                  beta=1.0,
                  trans="N",
                  offsetx=0)

    U = matrix(0.0, (ns, ns))
    Vt = matrix(0.0, (ns, ns))
    sv = matrix(0.0, (ns, 1))
    Gamma = matrix(0.0, (ns, ns))

    if type(A) is spmatrix:
        VecAIndex = +A[:].I

    Aspkd = matrix(0.0, ((ns + 1) * ns / 2, ms))
    As = matrix(A)

    def F(W):
        # SVD R[j] = U[j] * diag(sig[j]) * Vt[j]
        lapack.gesvd(+W['r'][0], sv, jobu='A', jobvt='A', U=U, Vt=Vt)

        # Vt[j] := diag(sig[j])^-1 * Vt[j]
        for k in xrange(ns):
            blas.tbsv(sv, Vt, n=ns, k=0, ldA=1, offsetx=k * ns)

        # Gamma[j] is an ns[j] x ns[j] symmetric matrix
        #
        #  (sig[j] * sig[j]') ./  sqrt(1 + rho * (sig[j] * sig[j]').^2)

        # S = sig[j] * sig[j]'
        S = matrix(0.0, (ns, ns))
        blas.syrk(sv, S)
        Gamma = div(S, sqrt(1.0 + rho * S**2))
        symmetrize(Gamma, ns)

        # As represents the scaled mapping
        #
        #     As(x) = A(u * (Gamma .* x) * u')
        #    As'(y) = Gamma .* (u' * A'(y) * u)
        #
        # stored in a similar format as A, except that we use packed
        # storage for the columns of As[i][j].

        if type(A) is spmatrix:
            blas.scal(0.0, As)
            try:
                As[VecAIndex] = +A['s'][VecAIndex]
            except:
                As[VecAIndex] = +A[VecAIndex]
        else:
            blas.copy(A, As)

        # As[i][j][:,k] = diag( diag(Gamma[j]))*As[i][j][:,k]
        # As[i][j][l,:] = Gamma[j][l,l]*As[i][j][l,:]
        for k in xrange(ms):
            cngrnc(U, As, trans='T', offsetx=k * (ns2))
            blas.tbmv(Gamma, As, n=ns2, k=0, ldA=1, offsetx=k * (ns2))

        misc.pack(As, Aspkd, {'l': 0, 'q': [], 's': [ns] * ms})

        # H is an m times m block matrix with i, k block
        #
        #      Hik = sum_j As[i,j]' * As[k,j]
        #
        # of size ms[i] x ms[k].  Hik = 0 if As[i,j] or As[k,j]
        # are zero for all j
        H = matrix(0.0, (ms, ms))
        blas.syrk(Aspkd, H, trans='T', beta=1.0, k=ns * (ns + 1) / 2)

        lapack.potrf(H)

        def solve(x, y, z):
            """
            Returns solution of 

                rho * ux + A'(uy) - r^-T * uz * r^-1 = bx
                A(ux)                                = by
                -ux               - r * uz * r'      = bz.

            On entry, x = bx, y = by, z = bz.
            On exit, x = ux, y = uy, z = uz.
            """

            # bz is a copy of z in the format of x
            blas.copy(z, bz)
            blas.axpy(bz, x, alpha=rho)

            # x := Gamma .* (u' * x * u)
            #    = Gamma .* (u' * (bx + rho * bz) * u)

            cngrnc(U, x, trans='T', offsetx=0)
            blas.tbmv(Gamma, x, n=ns2, k=0, ldA=1, offsetx=0)

            # y := y - As(x)
            #   := by - As( Gamma .* u' * (bx + rho * bz) * u)
            #blas.copy(x,xp)
            #pack_ip(xp,n = ns,m=1,nl=nl)
            misc.pack(x, xp, {'l': 0, 'q': [], 's': [ns]})

            blas.gemv(Aspkd, xp, y, trans = 'T',alpha = -1.0, beta = 1.0, \
                m = ns*(ns+1)/2, n = ms,offsetx = 0)

            # y := -y - A(bz)
            #    = -by - A(bz) + As(Gamma .*  (u' * (bx + rho * bz) * u)
            Af(bz, y, alpha=-1.0, beta=-1.0)

            # y := H^-1 * y
            #    = H^-1 ( -by - A(bz) + As(Gamma.* u'*(bx + rho*bz)*u) )
            #    = uy

            blas.trsv(H, y)
            blas.trsv(H, y, trans='T')

            # bz = Vt' * vz * Vt
            #    = uz where
            # vz := Gamma .* ( As'(uy)  - x )
            #     = Gamma .* ( As'(uy)  - Gamma .* (u'*(bx + rho *bz)*u) )
            #     = Gamma.^2 .* ( u' * (A'(uy) - bx - rho * bz) * u ).
            #blas.copy(x,xp)
            #pack_ip(xp,n=ns,m=1,nl=nl)

            misc.pack(x, xp, {'l': 0, 'q': [], 's': [ns]})
            blas.scal(-1.0, xp)

            blas.gemv(Aspkd,
                      y,
                      xp,
                      alpha=1.0,
                      beta=1.0,
                      m=ns * (ns + 1) / 2,
                      n=ms,
                      offsety=0)

            # bz[j] is xp unpacked and multiplied with Gamma
            misc.unpack(xp, bz, {'l': 0, 'q': [], 's': [ns]})
            blas.tbmv(Gamma, bz, n=ns2, k=0, ldA=1, offsetx=0)

            # bz = Vt' * bz * Vt
            #    = uz
            cngrnc(Vt, bz, trans='T', offsetx=0)

            symmetrize(bz, ns, offset=0)

            # x = -bz - r * uz * r'
            # z contains r.h.s. bz;  copy to x
            blas.copy(z, x)
            blas.copy(bz, z)

            cngrnc(W['r'][0], bz, offsetx=0)
            blas.axpy(bz, x)
            blas.scal(-1.0, x)

        return solve

    #solvers.options['show_progress'] = True
    sol = solvers.coneqp(P, q, Gf, h, dims, Af, b, None, F, xdot=xdot)
    primal = blas.dot(c, sol['s'])
    return sol, primal
示例#43
0
文件: test.py 项目: bpiwowar/kqp
    print
    print "=== G"
    print G
    print
    print "=== h"
    print h.T
    print

    
print "=== Problem size: n=%d and r=%d ===\n\n" % (n, r)
print "* np = %d" % np
print "* lambda = %g" % Lambda

print "\n\n   [[[Solving with optimised]]]"
T1 = time()
sol = solvers.coneqp(P, q, G, h, kktsolver=solver(n,r,g))
print "Time taken = %s" % (time() - T1)
print sol['status']
if (n * r < 10): print "Solution = %s" % sol['x'].T

printing.options['width'] = n

nzeros=0
xi = sol['x'][n*r:n*(r+1)]
maxxi=max(xi)
print sum(xi[0:n])
for i in xrange(n):
    if xi[i]/maxxi < 1e-4: nzeros += 1

print "xi = %s" % sorted(xi.T/maxxi)
print "Sparsity = %d on %d" % (nzeros, n)
示例#44
0
    def F(x=None, z=None):
        if x is None: return 0, matrix(np.random.rand(n, 1))

        f = -1 * (sum((x**2))) + 1
        #print f,"DD",x,z
        Df = 2 * (x).T

        if z is None: return f, Df
        H = spdiag(z[0] * x**-2)
        return f, Df, H

    return solvers.cp(F, A=A, b=b)['x']


c2 = acent(matrix(A), matrix(c))
c2 = np.array(c2)
print c1
c2 = c2.reshape(dim)
print c2
print c1 / c2
print np.sum(c1**2), np.sum(c2**2), np.sum(c1**1), np.sum(c2**1)

dddd
xx = solvers.coneqp(matrix(P), matrix(q), matrix(A), matrix(c))
print xx['x']

s2 = np.sum(c**2)
s1 = np.sum(c)
s1 = np.sum(c)

print s1, s2
示例#45
0
        blas.gemv(Asc, v, x, alpha=-1.0, beta=1.0, trans='T')
        x[:n] = div(x[:n], ds)

        # x[n:] = (D1+D2)^-1 * ( bx[n:] - D1*bz[:n]  - D2*bz[n:] )
        #         - (D2-D1)*(D1+D2)^-1 * x[:n]         
        x[n:] = div( x[n:] - mul(d1, z[:n]) - mul(d2, z[n:]), d1+d2 )\
                - mul( d3, x[:n] )
	    
        # z[:n] = D1^1/2 * (  x[:n] - x[n:] - bz[:n] )
        # z[n:] = D2^1/2 * ( -x[:n] - x[n:] - bz[n:] ).
        z[:n] = mul( W['di'][:n],  x[:n] - x[n:] - z[:n] ) 
        z[n:] = mul( W['di'][n:], -x[:n] - x[n:] - z[n:] ) 

    return g

x = solvers.coneqp(P, q, G, h, kktsolver = Fkkt)['x'][:n]

I = [ k for k in range(n) if abs(x[k]) > 1e-2 ]
xls = +y
lapack.gels(A[:,I], xls)
ybp = A[:,I]*xls[:len(I)]

print("Sparse basis contains %d basis functions." %len(I))
print("Relative RMS error = %.1e." %(blas.nrm2(ybp-y) / blas.nrm2(y)))

if pylab_installed:
    pylab.figure(2, facecolor='w')
    pylab.subplot(211)
    pylab.plot(ts, y, '-', ts, ybp, 'r--')
    pylab.xlabel('t')
    pylab.ylabel('y(t), yhat(t)')
示例#46
0
def mcsvm(X, labels, gamma, kernel='linear', sigma=1.0, degree=1):
    """
    Solves the Crammer and Singer multiclass SVM training problem

        maximize    -(1/2) * tr(U' * Q * U) + tr(E' * U)  
        subject to  U <= gamma * E
                    U * 1_m = 0.

    The variable is an (N x m)-matrix U if N is the number of training
    examples and m the number of classes. 

    Q is a positive definite matrix of order N with Q[i,j] = K(xi, xj) 
    where K is a kernel function and xi is the ith row of X.

    The matrix E is an N x m matrix with E[i,j] = 1 if labels[i] = j
    and E[i,j] = 0 otherwise.

    Input arguments.

        X is a N x n matrix.  The rows are the training vectors.

        labels is a list of integers of length N with values 0, ..., m-1.
        labels[i] is the class of training example i.

        gamma is a positive parameter.

        kernel is a string with values 'linear' or 'poly'. 
        'linear':  K(u,v) = u'*v.
        'poly':    K(u,v) = (u'*v / sigma)**degree.

        sigma is a positive number.

        degree is a positive integer.


    Output.

        Returns a function classifier().  If Y is M x n then classifier(Y)
        returns a list with as its kth element

            argmax { j = 0, ..., m-1 | sum_{i=1}^N U[i,j] * K(xi, yk) }

        where yk' = Y[k, :], xi' = X[i, :], and U is the optimal solution
        of the QP.
    """

    N, n = X.size

    m = max(labels) + 1
    E = matrix(0.0, (N, m))
    E[matrix(range(N)) + N * matrix(labels)] = 1.0

    def G(x, y, alpha=1.0, beta=0.0, trans='N'):
        """
        If trans is 'N', x is an N x m matrix, and y is an N*m-vector.

            y := alpha * x[:] + beta * y.

        If trans is 'T', x is an N*m vector, and y is an N x m matrix.

            y[:] := alpha * x + beta * y[:].

        """

        blas.scal(beta, y)
        blas.axpy(x, y, alpha)

    h = matrix(gamma * E, (N * m, 1))

    ones = matrix(1.0, (m, 1))

    def A(x, y, alpha=1.0, beta=0.0, trans='N'):
        """
        If trans is 'N', x is an N x m matrix and y an N-vector.

            y := alpha * x * 1_m + beta y.

        If trans is 'T', x is an N vector and y an N x m matrix.

            y := alpha * x * 1_m' + beta y.
        """

        if trans == 'N':
            blas.gemv(x, ones, y, alpha=alpha, beta=beta)

        else:
            blas.scal(beta, y)
            blas.ger(x, ones, y, alpha=alpha)

    b = matrix(0.0, (N, 1))

    if kernel == 'linear' and N > n:

        def P(x, y, alpha=1.0, beta=0.0):
            """
            x and y are N x m matrices.   

                y =  alpha * X * X' * x + beta * y.

            """

            z = matrix(0.0, (n, m))
            blas.gemm(X, x, z, transA='T')
            blas.gemm(X, z, y, alpha=alpha, beta=beta)

    else:

        if kernel == 'linear':
            # Q = X * X'
            Q = matrix(0.0, (N, N))
            blas.syrk(X, Q)

        elif kernel == 'poly':
            # Q = (X * X' / sigma) ** degree
            Q = matrix(0.0, (N, N))
            blas.syrk(X, Q, alpha=1.0 / sigma)
            Q = Q**degree

        else:
            raise ValueError("invalid kernel type")

        def P(x, y, alpha=1.0, beta=0.0):
            """
            x and y are N x m matrices.   

                y =  alpha * Q * x + beta * y.

            """

            blas.symm(Q, x, y, alpha=alpha, beta=beta)

    if kernel == 'linear' and N > n:  # add separate code for n <= N <= m*n

        H = [matrix(0.0, (n, n)) for k in range(m)]
        S = matrix(0.0, (m * n, m * n))
        Xs = matrix(0.0, (N, n))
        wnm = matrix(0.0, (m * n, 1))
        wN = matrix(0.0, (N, 1))
        D = matrix(0.0, (N, 1))

        def kkt(W):
            """
            KKT solver for

                X*X' * ux  + uy * 1_m' + mat(uz) = bx
                                       ux * 1_m  = by
                            ux - d.^2 .* mat(uz) = mat(bz).

            ux and bx are N x m matrices.
            uy and by are N-vectors.
            uz and bz are N*m-vectors.  mat(uz) is the N x m matrix that 
                satisfies mat(uz)[:] = uz.
            d = mat(W['d']) a positive N x m matrix.

            If we eliminate uz from the last equation using 

                mat(uz) = (ux - mat(bz)) ./ d.^2
        
            we get two equations in ux, uy:

                X*X' * ux + ux ./ d.^2 + uy * 1_m' = bx + mat(bz) ./ d.^2
                                          ux * 1_m = by.

            From the 1st equation,

                uxk = (X*X' + Dk^-2)^-1 * (-uy + bxk + Dk^-2 * bzk)
                    = Dk * (I + Xk*Xk')^-1 * Dk * (-uy + bxk + Dk^-2 * bzk)

            for k = 1, ..., m, where Dk = diag(d[:,k]), Xk = Dk * X, 
            uxk is column k of ux, and bzk is column k of mat(bz).  

            We use the matrix inversion lemma

                ( I + Xk * Xk' )^-1 = I - Xk * (I + Xk' * Xk)^-1 * Xk'
                                    = I - Xk * Hk^-1 * Xk'
                                    = I - Xk * Lk^-T * Lk^-1 *  Xk'

            where Hk = I + Xk' * Xk = Lk * Lk' to write this as

                uxk = Dk * (I - Xk * Hk^-1 * Xk') * Dk *
                      (-uy + bxk + Dk^-2 * bzk)
                    = (Dk^2 - Dk^2 * X * Hk^-1 * X' * Dk^2) *
                      (-uy + bxk + Dk^-2 * bzk).

            Substituting this in the second equation gives an equation 
            for uy:

                sum_k (Dk^2 - Dk^2 * X * Hk^-1 * X' * Dk^2 ) * uy 
                    = -by + sum_k (Dk^2 - Dk^2 * X * Hk^-1 * X' * Dk^2) *
                      ( bxk + Dk^-2 * bzk ),

            i.e., with D = (sum_k Dk^2)^1/2,  Yk = D^-1 * Dk^2 * X * Lk^-T,

                D * ( I - sum_k Yk * Yk' ) * D * uy  
                    = -by + sum_k (Dk^2 - Dk^2 * X * Hk^-1 * X' * Dk^2) * 
                      ( bxk + Dk^-2 *bzk ).

            Another application of the matrix inversion lemma gives

                uy = D^-1 * (I + Y * S^-1 * Y') * D^-1 * 
                     ( -by + sum_k ( Dk^2 - Dk^2 * X * Hk^-1 * X' * Dk^2 ) *
                     ( bxk + Dk^-2 *bzk ) )

            with S = I - Y' * Y,  Y = [ Y1 ... Ym ].  


            Summary:

            1. Compute 

                   uy = D^-1 * (I + Y * S^-1 * Y') * D^-1 * 
                        ( -by + sum_k (Dk^2 - Dk^2 * X * Hk^-1 * X' * Dk^2)
                        * ( bxk + Dk^-2 *bzk ) )
 
            2. For k = 1, ..., m:

                   uxk = (Dk^2 - Dk^2 * X * Hk^-1 * X' * Dk^2) * 
                         (-uy + bxk + Dk^-2 * bzk)

            3. Solve for uz

                   d .* uz = ( ux - mat(bz) ) ./ d.
        
            Return ux, uy, d .* uz.

            """
            ###
            utime0, stime0 = cputime()
            ###

            d = matrix(W['d'], (N, m))
            dsq = matrix(W['d']**2, (N, m))

            # Factor the matrices
            #
            #     H[k] = I + Xk' * Xk
            #          = I + X' * Dk^2 * X.
            #
            # Dk = diag(d[:,k]).

            for k in range(m):

                # H[k] = I
                blas.scal(0.0, H[k])
                H[k][::n + 1] = 1.0

                # Xs = Dk * X
                #    = diag(d[:,k]]) * X
                blas.copy(X, Xs)
                for j in range(n):
                    blas.tbmv(d,
                              Xs,
                              n=N,
                              k=0,
                              ldA=1,
                              offsetA=k * N,
                              offsetx=j * N)

                # H[k] := H[k] + Xs' * Xs
                #       = I + Xk' * Xk
                blas.syrk(Xs, H[k], trans='T', beta=1.0)

                # Factorization H[k] = Lk * Lk'
                lapack.potrf(H[k])

###
            utime, stime = cputime()
            print("Factor Hk's: utime = %.2f, stime = %.2f" \
                %(utime-utime0, stime-stime0))
            utime0, stime0 = cputime()
            ###

            # diag(D) = ( sum_k d[:,k]**2 ) ** 1/2
            #         = ( sum_k Dk^2) ** 1/2.

            blas.gemv(dsq, ones, D)
            D[:] = sqrt(D)

            ###
            #            utime, stime = cputime()
            #            print("Compute D:  utime = %.2f, stime = %.2f" \
            #                %(utime-utime0, stime-stime0))
            utime0, stime0 = cputime()
            ###

            # S = I - Y'* Y is an m x m block matrix.
            # The i,j block of Y' * Y is
            #
            #     Yi' * Yj = Li^-1 * X' * Di^2 * D^-1 * Dj^2 * X * Lj^-T.
            #
            # We compute only the lower triangular blocks in Y'*Y.

            blas.scal(0.0, S)
            for i in range(m):
                for j in range(i + 1):

                    # Xs = Di * Dj * D^-1 * X
                    blas.copy(X, Xs)
                    blas.copy(d, wN, n=N, offsetx=i * N)
                    blas.tbmv(d, wN, n=N, k=0, ldA=1, offsetA=j * N)
                    blas.tbsv(D, wN, n=N, k=0, ldA=1)
                    for k in range(n):
                        blas.tbmv(wN, Xs, n=N, k=0, ldA=1, offsetx=k * N)

                    # block i, j of S is Xs' * Xs (as nonsymmetric matrix so we
                    # get the correct multiple after scaling with Li, Lj)
                    blas.gemm(Xs,
                              Xs,
                              S,
                              transA='T',
                              ldC=m * n,
                              offsetC=(j * n) * m * n + i * n)

###
            utime, stime = cputime()
            print("Form S:      utime = %.2f, stime = %.2f" \
                %(utime-utime0, stime-stime0))
            utime0, stime0 = cputime()
            ###

            for i in range(m):

                # multiply block row i of S on the left with Li^-1
                blas.trsm(H[i],
                          S,
                          m=n,
                          n=(i + 1) * n,
                          ldB=m * n,
                          offsetB=i * n)

                # multiply block column i of S on the right with Li^-T
                blas.trsm(H[i],
                          S,
                          side='R',
                          transA='T',
                          m=(m - i) * n,
                          n=n,
                          ldB=m * n,
                          offsetB=i * n * (m * n + 1))

            blas.scal(-1.0, S)
            S[::(m * n + 1)] += 1.0

            ###
            utime, stime = cputime()
            print("Form S (2):  utime = %.2f, stime = %.2f" \
                %(utime-utime0, stime-stime0))
            utime0, stime0 = cputime()
            ###

            # S = L*L'
            lapack.potrf(S)

            ###
            utime, stime = cputime()
            print("Factor S:    utime = %.2f, stime = %.2f" \
                %(utime-utime0, stime-stime0))
            utime0, stime0 = cputime()

            ###

            def f(x, y, z):
                """
                1. Compute 

                   uy = D^-1 * (I + Y * S^-1 * Y') * D^-1 * 
                        ( -by + sum_k (Dk^2 - Dk^2 * X * Hk^-1 * X' * Dk^2)
                        * ( bxk + Dk^-2 *bzk ) )
 
                2. For k = 1, ..., m:

                   uxk = (Dk^2 - Dk^2 * X * Hk^-1 * X' * Dk^2) * 
                         (-uy + bxk + Dk^-2 * bzk)

                3. Solve for uz

                   d .* uz = ( ux - mat(bz) ) ./ d.
        
                Return ux, uy, d .* uz.
                """

                ###
                utime0, stime0 = cputime()
                ###

                # xk := Dk^2 * xk + zk
                #     = Dk^2 * bxk + bzk
                blas.tbmv(dsq, x, n=N * m, k=0, ldA=1)
                blas.axpy(z, x)

                # y := -y + sum_k ( I - Dk^2 * X * Hk^-1 * X' ) * xk
                #    = -y + x*ones - sum_k Dk^2 * X * Hk^-1 * X' * xk

                # y := -y + x*ones
                blas.gemv(x, ones, y, alpha=1.0, beta=-1.0)

                # wnm = X' * x  (wnm interpreted as an n x m matrix)
                blas.gemm(X, x, wnm, m=n, k=N, n=m, transA='T', ldB=N, ldC=n)

                # wnm[:,k] = Hk \ wnm[:,k] (for wnm as an n x m matrix)
                for k in range(m):
                    lapack.potrs(H[k], wnm, offsetB=k * n)

                for k in range(m):

                    # wN = X * wnm[:,k]
                    blas.gemv(X, wnm, wN, offsetx=n * k)

                    # wN = Dk^2 * wN
                    blas.tbmv(dsq[:, k], wN, n=N, k=0, ldA=1)

                    # y := y - wN
                    blas.axpy(wN, y, -1.0)

                # y = D^-1 * (I + Y * S^-1 * Y') * D^-1 * y
                #
                # Y = [Y1 ... Ym ], Yk = D^-1 * Dk^2 * X * Lk^-T.

                # y := D^-1 * y
                blas.tbsv(D, y, n=N, k=0, ldA=1)

                # wnm =  Y' * y  (interpreted as an Nm vector)
                #     = [ L1^-1 * X' * D1^2 * D^-1 * y;
                #         L2^-1 * X' * D2^2 * D^-1 * y;
                #         ...
                #         Lm^-1 * X' * Dm^2 * D^-1 * y ]

                for k in range(m):

                    # wN = D^-1 * Dk^2 * y
                    blas.copy(y, wN)
                    blas.tbmv(dsq, wN, n=N, k=0, ldA=1, offsetA=k * N)
                    blas.tbsv(D, wN, n=N, k=0, ldA=1)

                    # wnm[:,k] = X' * wN
                    blas.gemv(X, wN, wnm, trans='T', offsety=k * n)

                    # wnm[:,k] = Lk^-1 * wnm[:,k]
                    blas.trsv(H[k], wnm, offsetx=k * n)

                # wnm := S^-1 * wnm  (an mn-vector)
                lapack.potrs(S, wnm)

                # y := y + Y * wnm
                #    = y + D^-1 * [ D1^2 * X * L1^-T ... D2^k * X * Lk^-T]
                #      * wnm

                for k in range(m):

                    # wnm[:,k] = Lk^-T * wnm[:,k]
                    blas.trsv(H[k], wnm, trans='T', offsetx=k * n)

                    # wN = X * wnm[:,k]
                    blas.gemv(X, wnm, wN, offsetx=k * n)

                    # wN = D^-1 * Dk^2 * wN
                    blas.tbmv(dsq, wN, n=N, k=0, ldA=1, offsetA=k * N)
                    blas.tbsv(D, wN, n=N, k=0, ldA=1)

                    # y += wN
                    blas.axpy(wN, y)

                # y := D^-1 *  y
                blas.tbsv(D, y, n=N, k=0, ldA=1)

                # For k = 1, ..., m:
                #
                # xk = (I - Dk^2 * X * Hk^-1 * X') * (-Dk^2 * y + xk)

                # x = x - [ D1^2 * y ... Dm^2 * y] (as an N x m matrix)
                for k in range(m):
                    blas.copy(y, wN)
                    blas.tbmv(dsq, wN, n=N, k=0, ldA=1, offsetA=k * N)
                    blas.axpy(wN, x, -1.0, offsety=k * N)

                # wnm  = X' * x (as an n x m matrix)
                blas.gemm(X, x, wnm, transA='T', m=n, n=m, k=N, ldB=N, ldC=n)

                # wnm[:,k] = Hk^-1 * wnm[:,k]
                for k in range(m):
                    lapack.potrs(H[k], wnm, offsetB=n * k)

                for k in range(m):

                    # wN = X * wnm[:,k]
                    blas.gemv(X, wnm, wN, offsetx=k * n)

                    # wN = Dk^2 * wN
                    blas.tbmv(dsq, wN, n=N, k=0, ldA=1, offsetA=k * N)

                    # x[:,k] := x[:,k] - wN
                    blas.axpy(wN, x, -1.0, n=N, offsety=k * N)

                # z := ( x - z ) ./ d
                blas.axpy(x, z, -1.0)
                blas.scal(-1.0, z)
                blas.tbsv(d, z, n=N * m, k=0, ldA=1)

                ###
                utime, stime = cputime()
                print("Solve:       utime = %.2f, stime = %.2f" \
                    %(utime-utime0, stime-stime0))


###

            return f

    else:

        H = [matrix(0.0, (N, N)) for k in range(m)]
        S = matrix(0.0, (N, N))

        def kkt(W):
            """
            KKT solver for

                Q * ux  + uy * 1_m' + mat(uz) = bx
                                    ux * 1_m  = by
                         ux - d.^2 .* mat(uz) = mat(bz).

            ux and bx are N x m matrices.
            uy and by are N-vectors.
            uz and bz are N*m-vectors.  mat(uz) is the N x m matrix that 
                satisfies mat(uz)[:] = uz.
            d = mat(W['d']) a positive N x m matrix.

            If we eliminate uz from the last equation using 

                mat(uz) = (ux - mat(bz)) ./ d.^2
        
            we get two equations in ux, uy:

                Q * ux + ux ./ d.^2 + uy * 1_m' = bx + mat(bz) ./ d.^2
                                       ux * 1_m = by.

            From the 1st equation 

                uxk = -(Q + Dk)^-1 * uy + (Q + Dk)^-1 * (bxk + Dk * bzk)

            where uxk is column k of ux, Dk = diag(d[:,k].^-2), and bzk is 
            column k of mat(bz).  Substituting this in the second equation
            gives an equation for uy.

            1. Solve for uy

                   sum_k (Q + Dk)^-1 * uy = 
                       sum_k (Q + Dk)^-1 * (bxk + Dk * bzk) - by.
 
            2. Solve for ux (column by column)

                   Q * ux + ux ./ d.^2 = bx + mat(bz) ./ d.^2 - uy * 1_m'.

            3. Solve for uz

                   mat(uz) = ( ux - mat(bz) ) ./ d.^2.
        
            Return ux, uy, d .* uz.
            """

            # D = d.^-2
            D = matrix(W['di']**2, (N, m))

            blas.scal(0.0, S)
            for k in range(m):

                # Hk := Q + Dk
                blas.copy(Q, H[k])
                H[k][::N + 1] += D[:, k]

                # Hk := Hk^-1
                #     = (Q + Dk)^-1
                lapack.potrf(H[k])
                lapack.potri(H[k])

                # S := S + Hk
                #    = S + (Q + Dk)^-1
                blas.axpy(H[k], S)

            # Factor S = sum_k (Q + Dk)^-1
            lapack.potrf(S)

            def f(x, y, z):

                # z := mat(z)
                #    = mat(bz)
                z.size = N, m

                # x := x + D .* z
                #    = bx + mat(bz) ./ d.^2
                x += mul(D, z)

                # y := y - sum_k (Q + Dk)^-1 * X[:,k]
                #    = by - sum_k (Q + Dk)^-1 * (bxk + Dk * bzk)
                for k in range(m):
                    blas.symv(H[k], x[:, k], y, alpha=-1.0, beta=1.0)

                # y := H^-1 * y
                #    = -uy
                lapack.potrs(S, y)

                # x[:,k] := H[k] * (x[:,k] + y)
                #         = (Q + Dk)^-1 * (bxk + bzk ./ d.^2 + y)
                #         = ux[:,k]
                w = matrix(0.0, (N, 1))
                for k in range(m):

                    # x[:,k] := x[:,k] + y
                    blas.axpy(y, x, offsety=N * k, n=N)

                    # w := H[k] * x[:,k]
                    #    = (Q + Dk)^-1 * (bxk + bzk ./ d.^2 + y)
                    blas.symv(H[k], x, w, offsetx=N * k)

                    # x[:,k] := w
                    #         = ux[:,k]
                    blas.copy(w, x, offsety=N * k)

                # y := -y
                #    = uy
                blas.scal(-1.0, y)

                # z := (x - z) ./ d
                blas.axpy(x, z, -1.0)
                blas.tbsv(W['d'], z, n=m * N, k=0, ldA=1)
                blas.scal(-1.0, z)
                z.size = N * m, 1

            return f

    utime0, stime0 = cputime()
    #    solvers.options['debug'] = True
    #    solvers.options['maxiters'] = 1
    solvers.options['refinement'] = 1
    sol = solvers.coneqp(P,
                         -E,
                         G,
                         h,
                         A=A,
                         b=b,
                         kktsolver=kkt,
                         xnewcopy=matrix,
                         xdot=blas.dot,
                         xaxpy=blas.axpy,
                         xscal=blas.scal)
    utime, stime = cputime()
    utime -= utime0
    stime -= stime0
    print("utime = %.2f, stime = %.2f" % (utime, stime))
    U = sol['x']

    if kernel == 'linear':

        # W = X' * U
        W = matrix(0.0, (n, m))
        blas.gemm(X, U, W, transA='T')

        def classifier(Y):
            # return [ argmax of Y[k,:] * W  for k in range(M) ]
            M = Y.size[0]
            S = Y * W
            c = []
            for i in range(M):
                a = zip(list(S[i, :]), range(m))
                a.sort(reverse=True)
                c += [a[0][1]]
            return c

    elif kernel == 'poly':

        def classifier(Y):
            M = Y.size[0]

            # K = Y * X' / sigma
            K = matrix(0.0, (M, N))
            blas.gemm(Y, X, K, transB='T', alpha=1.0 / sigma)

            S = K**degree * U

            c = []
            for i in range(M):
                a = zip(list(S[i, :]), range(m))
                a.sort(reverse=True)
                c += [a[0][1]]
            return c

    else:
        pass

    return classifier  #, utime, sol['iterations']
示例#47
0
        f = -1 * (sum((x ** 2))) + 1
        # print f,"DD",x,z
        Df = 2 * (x).T

        if z is None:
            return f, Df
        H = spdiag(z[0] * x ** -2)
        return f, Df, H

    return solvers.cp(F, A=A, b=b)["x"]


c2 = acent(matrix(A), matrix(c))
c2 = np.array(c2)
print c1
c2 = c2.reshape(dim)
print c2
print c1 / c2
print np.sum(c1 ** 2), np.sum(c2 ** 2), np.sum(c1 ** 1), np.sum(c2 ** 1)

dddd
xx = solvers.coneqp(matrix(P), matrix(q), matrix(A), matrix(c))
print xx["x"]


s2 = np.sum(c ** 2)
s1 = np.sum(c)
s1 = np.sum(c)

print s1, s2
示例#48
0
def l1regls(A, y):
    """
    
    Returns the solution of l1-norm regularized least-squares problem
  
        minimize || A*x - y ||_2^2  + || x ||_1.

    """

    m, n = A.size
    q = matrix(1.0, (2*n,1))
    q[:n] = -2.0 * A.T * y

    def P(u, v, alpha = 1.0, beta = 0.0 ):
        """
            v := alpha * 2.0 * [ A'*A, 0; 0, 0 ] * u + beta * v 
        """
        v *= beta
        v[:n] += alpha * 2.0 * A.T * (A * u[:n])


    def G(u, v, alpha=1.0, beta=0.0, trans='N'):
        """
            v := alpha*[I, -I; -I, -I] * u + beta * v  (trans = 'N' or 'T')
        """

        v *= beta
        v[:n] += alpha*(u[:n] - u[n:])
        v[n:] += alpha*(-u[:n] - u[n:])

    h = matrix(0.0, (2*n,1))


    # Customized solver for the KKT system 
    #
    #     [  2.0*A'*A  0    I      -I     ] [x[:n] ]     [bx[:n] ]
    #     [  0         0   -I      -I     ] [x[n:] ]  =  [bx[n:] ].
    #     [  I        -I   -D1^-1   0     ] [zl[:n]]     [bzl[:n]]
    #     [ -I        -I    0      -D2^-1 ] [zl[n:]]     [bzl[n:]]
    #
    # where D1 = W['di'][:n]**2, D2 = W['di'][n:]**2.
    #    
    # We first eliminate zl and x[n:]:
    #
    #     ( 2*A'*A + 4*D1*D2*(D1+D2)^-1 ) * x[:n] = 
    #         bx[:n] - (D2-D1)*(D1+D2)^-1 * bx[n:] + 
    #         D1 * ( I + (D2-D1)*(D1+D2)^-1 ) * bzl[:n] - 
    #         D2 * ( I - (D2-D1)*(D1+D2)^-1 ) * bzl[n:]           
    #
    #     x[n:] = (D1+D2)^-1 * ( bx[n:] - D1*bzl[:n]  - D2*bzl[n:] ) 
    #         - (D2-D1)*(D1+D2)^-1 * x[:n]         
    #
    #     zl[:n] = D1 * ( x[:n] - x[n:] - bzl[:n] )
    #     zl[n:] = D2 * (-x[:n] - x[n:] - bzl[n:] ).
    #
    # The first equation has the form
    #
    #     (A'*A + D)*x[:n]  =  rhs
    #
    # and is equivalent to
    #
    #     [ D    A' ] [ x:n] ]  = [ rhs ]
    #     [ A   -I  ] [ v    ]    [ 0   ].
    #
    # It can be solved as 
    #
    #     ( A*D^-1*A' + I ) * v = A * D^-1 * rhs
    #     x[:n] = D^-1 * ( rhs - A'*v ).

    S = matrix(0.0, (m,m))
    Asc = matrix(0.0, (m,n))
    v = matrix(0.0, (m,1))

    def Fkkt(W):

        # Factor 
        #
        #     S = A*D^-1*A' + I 
        #
        # where D = 2*D1*D2*(D1+D2)^-1, D1 = d[:n]**-2, D2 = d[n:]**-2.

        d1, d2 = W['di'][:n]**2, W['di'][n:]**2

        # ds is square root of diagonal of D
        ds = math.sqrt(2.0) * div( mul( W['di'][:n], W['di'][n:]), 
            sqrt(d1+d2) )
        d3 =  div(d2 - d1, d1 + d2)
     
        # Asc = A*diag(d)^-1/2
        Asc = A * spdiag(ds**-1)

        # S = I + A * D^-1 * A'
        blas.syrk(Asc, S)
        S[::m+1] += 1.0 
        lapack.potrf(S)

        def g(x, y, z):

            x[:n] = 0.5 * ( x[:n] - mul(d3, x[n:]) + 
                mul(d1, z[:n] + mul(d3, z[:n])) - mul(d2, z[n:] - 
                mul(d3, z[n:])) )
            x[:n] = div( x[:n], ds) 

            # Solve
            #
            #     S * v = 0.5 * A * D^-1 * ( bx[:n] - 
            #         (D2-D1)*(D1+D2)^-1 * bx[n:] + 
            #         D1 * ( I + (D2-D1)*(D1+D2)^-1 ) * bzl[:n] - 
            #         D2 * ( I - (D2-D1)*(D1+D2)^-1 ) * bzl[n:] )
                
            blas.gemv(Asc, x, v)
            lapack.potrs(S, v)
            
            # x[:n] = D^-1 * ( rhs - A'*v ).
            blas.gemv(Asc, v, x, alpha=-1.0, beta=1.0, trans='T')
            x[:n] = div(x[:n], ds)

            # x[n:] = (D1+D2)^-1 * ( bx[n:] - D1*bzl[:n]  - D2*bzl[n:] ) 
            #         - (D2-D1)*(D1+D2)^-1 * x[:n]         
            x[n:] = div( x[n:] - mul(d1, z[:n]) - mul(d2, z[n:]), d1+d2 )\
                - mul( d3, x[:n] )
                
            # zl[:n] = D1^1/2 * (  x[:n] - x[n:] - bzl[:n] )
            # zl[n:] = D2^1/2 * ( -x[:n] - x[n:] - bzl[n:] ).
            z[:n] = mul( W['di'][:n],  x[:n] - x[n:] - z[:n] ) 
            z[n:] = mul( W['di'][n:], -x[:n] - x[n:] - z[n:] ) 

        return g

    return solvers.coneqp(P, q, G, h, kktsolver = Fkkt)['x'][:n]
示例#49
0
def nrmapp(A, B, C = None, d = None, G = None, h = None): 
    """

    Solves the regularized nuclear norm approximation problem 
    
        minimize    || A(x) + B ||_* + 1/2 x'*C*x + d'*x
        subject to  G*x <= h

    and its dual

        maximize    -h'*z + tr(B'*Z) - 1/2 v'*C*v 
        subject to  d + G'*z + A'(Z) = C*v 
                    z >= 0
                    || Z || <= 1.

    A(x) is a linear mapping that maps n-vectors x to (p x q)-matrices A(x).

    ||.||_* is the nuclear norm (sum of singular values).  

    A'(Z) is the adjoint mapping of A(x).

    ||.|| is the maximum singular value norm.


    INPUT 

    A       real dense or sparse matrix of size (p*q, n).  Its columns are
            the coefficients A_i of the mapping 

                A: reals^n --> reals^pxq,   A(x) = sum_i=1^n x_i * A_i, 
                     
            stored in column-major order, as p*q-vectors.
        
    B       real dense or sparse matrix of size (p, q), with p >= q.
    
    C       real symmetric positive semidefinite dense or sparse matrix of 
            order n.  Only the lower triangular part of C is accessed.
            The default value is a zero matrix.

    d       real dense matrix of size (n, 1).  The default value is a zero
            vector.
    
    G       real dense or sparse matrix of size (m, n), with m >= 0.  
            The default value is a matrix of size (0, n).
    
    h       real dense matrix of size (m, 1).  The default value is a 
            matrix of size (0, 1).


    OUTPUT

    status  'optimal', 'primal infeasible', or 'unknown'. 

    x       'd' matrix of size (n, 1) if status is 'optimal'; 
            None otherwise.

    z       'd' matrix of size (m, 1) if status is 'optimal' or 'primal 
            infeasible'; None otherwise.

    Z       'd' matrix of size (p, q) if status is 'optimal' or 'primal
            infeasible'; None otherwise.


    If status is 'optimal', then x, z, Z are approximate solutions of the
    optimality conditions

        C * x  + G' * z + A'(Z) + d = 0  
        G * x <= h 
        z >= 0,  || Z || < = 1
        z' * (h - G*x) = 0
        tr (Z' * (A(x) + B)) = || A(x) + B ||_*.

    The last (complementary slackness) condition can be replaced by the
    following.  If the singular value decomposition of A(x) + B is

        A(x) + B = [ U1  U2 ] * diag(s, 0) * [ V1  V2 ]',

    with s > 0, then

        Z = U1 * V1' + U2 * W * V2',  || W || <= 1. 


    If status is 'primal infeasible', then Z = 0 and z is a certificate of
    infeasibility for the inequalities G * x <= h, i.e., a vector that
    satisfies

        h' * z = 1,  G' * z = 0,  z >= 0.

    """

    if type(B) not in (matrix, spmatrix) or B.typecode is not 'd':
        raise TypeError, "B must be a real dense or sparse matrix"
    p, q = B.size
    if p < q:
        raise ValueError, "row dimension of B must be greater than or "\
            "equal to column dimension"
    
    if type(A) not in (matrix, spmatrix) or A.typecode is not 'd' or \
        A.size[0] != p*q:
        raise TypeError, "A must be a real dense or sparse matrix with "\
            "p*q rows if B has size (p, q)"
    n = A.size[1]
    
    if G is None:  G = spmatrix([], [], [], (0, n))
    if h is None:  h = matrix(0.0, (0, 1))
    if type(h) is not matrix or h.typecode is not 'd' or h.size[1] != 1:
        raise TypeError, "h must be a real dense matrix with one column"
    m = h.size[0]
    if type(G) not in (matrix, spmatrix) or G.typecode is not 'd' or \
        G.size != (m, n):
        raise TypeError, "G must be a real dense matrix or sparse matrix "\
            "of size (m, n) if h has length m and A has n columns"
       
    if C is None: C = spmatrix(0.0, [], [], (n,n))
    if d is None: d = matrix(0.0, (n, 1))
    if type(C) not in (matrix, spmatrix) or C.typecode is not 'd' or \
        C.size != (n,n):
        raise TypeError, "C must be real dense or sparse matrix of size "\
            "(n, n) if A has n columns"
    if type(d) is not matrix or d.typecode is not 'd' or d.size != (n,1):
        raise TypeError, "d must be a real matrix of size (n, 1) if A has "\
            "n columns"


    # The problem is solved as a cone program
    #
    #     minimize    (1/2) * x'*C*x + d'*x  + (1/2) * (tr X1 + tr X2)
    #     subject to  G*x <= h
    #                 [ X1         (A(x) + B)' ]
    #                 [ A(x) + B   X2          ]  >= 0.
    #
    # The primal variable is stored as a list [ x, X1, X2 ].

    def xnewcopy(u): 
        return [ matrix(u[0]), matrix(u[1]), matrix(u[2]) ]
    def xdot(u,v):
        return blas.dot(u[0], v[0]) + misc.sdot2(u[1], v[1]) + \
            misc.sdot2(u[2], v[2])
    def xscal(alpha, u):
        blas.scal(alpha, u[0])
        blas.scal(alpha, u[1])
        blas.scal(alpha, u[2])
    def xaxpy(u, v, alpha = 1.0):
        blas.axpy(u[0], v[0], alpha)
        blas.axpy(u[1], v[1], alpha)
        blas.axpy(u[2], v[2], alpha)

    def Pf(u, v, alpha = 1.0, beta = 0.0):  
        base.symv(C, u[0], v[0], alpha = alpha, beta = beta)
        blas.scal(beta, v[1])
        blas.scal(beta, v[2])

    c = [ d, matrix(0.0, (q,q)), matrix(0.0, (p,p)) ]
    c[1][::q+1] = 0.5
    c[2][::p+1] = 0.5


    # If V is a p+q x p+q matrix 
    #
    #         [ V11  V12 ]
    #     V = [          ]
    #         [ V21  V22 ] 
    #
    # with V11 q x q,  V21 p x q, V12 q x p, and V22 p x p, then I11, I21,
    # I22 are the index sets defined by
    #
    #     V[I11] = V11[:],  V[I21] = V21[:],  V[I22] = V22[:].
    #

    I11 = matrix([ i + j*(p+q) for j in xrange(q) for i in xrange(q) ])
    I21 = matrix([ q + i + j*(p+q) for j in xrange(q) for i in xrange(p) ])
    I22 = matrix([ (p+q)*q + q + i + j*(p+q) for j in xrange(p) for 
       i in xrange(p) ])

    dims = {'l': m, 'q': [], 's': [p+q]}
    hh = matrix(0.0, (m + (p+q)**2, 1))
    hh[:m] = h
    hh[m + I21] = B[:]

    def Gf(u, v, alpha = 1.0, beta = 0.0, trans = 'N'):

        if trans == 'N':
 
            # v[:m] := alpha * G * u[0] + beta * v[:m]
            base.gemv(G, u[0], v, alpha = alpha, beta = beta)

            # v[m:] := alpha * [-u[1],  -A(u[0])';  -A(u[0]), -u[2]]
            #          + beta * v[m:]
            blas.scal(beta, v, offset = m)
            v[m + I11] -= alpha * u[1][:]
            v[m + I21] -= alpha * A * u[0]
            v[m + I22] -= alpha * u[2][:]

        else:   
           
            # v[0] := alpha * ( G.T * u[:m] - 2.0 * A.T * u[m + I21] )
            #         + beta v[1]
            base.gemv(G, u, v[0], trans = 'T', alpha = alpha, beta = beta)  
            base.gemv(A, u[m + I21], v[0], trans = 'T', alpha = -2.0*alpha,
                beta = 1.0)

            # v[1] := -alpha * u[m + I11] + beta * v[1]
            blas.scal(beta, v[1])
            blas.axpy(u[m + I11], v[1], alpha = -alpha)

            # v[2] := -alpha * u[m + I22] + beta * v[2]
            blas.scal(beta, v[2])
            blas.axpy(u[m + I22], v[2], alpha = -alpha)


    def Af(u, v, alpha = 1.0, beta = 0.0, trans = 'N'):
        if trans == 'N':
            pass
        else:
            blas.scal(beta, v[0])
            blas.scal(beta, v[1])
            blas.scal(beta, v[2])


    L1 = matrix(0.0, (q, q))
    L2 = matrix(0.0, (p, p))
    T21 = matrix(0.0, (p, q))
    s = matrix(0.0, (q, 1))
    SS = matrix(0.0, (q, q))
    V1 = matrix(0.0, (q, q))
    V2 = matrix(0.0, (p, p))
    As = matrix(0.0, (p*q, n))
    As2 = matrix(0.0, (p*q, n))
    tmp = matrix(0.0, (p, q))
    a = matrix(0.0, (p+q, p+q))
    H = matrix(0.0, (n,n))
    Gs = matrix(0.0, (m, n))
    Q1 = matrix(0.0, (q, p+q))
    Q2 = matrix(0.0, (p, p+q))
    tau1 = matrix(0.0, (q,1))
    tau2 = matrix(0.0, (p,1))
    bz11 = matrix(0.0, (q,q))
    bz22 = matrix(0.0, (p,p))
    bz21 = matrix(0.0, (p,q))

    # Suppose V = [V1; V2] is p x q with V1 q x q.  If v = V[:] then
    # v[Itriu] are the strict upper triangular entries of V1 stored
    # columnwise.
    Itriu = [ i + j*p for j in xrange(1,q) for i in xrange(j) ]

    # v[Itril] are the strict lower triangular entries of V1 stored rowwise.
    Itril = [ j + i*p for j in xrange(1,q) for i in xrange(j) ]

    # v[Idiag] are the diagonal entries of V1.
    Idiag = [ i*(p+1) for i in xrange(q) ]

    # v[Itriu2] are the upper triangular entries of V1, with the diagonal
    # entries stored first, followed by the strict upper triangular entries
    # stored columnwise.
    Itriu2 = Idiag + Itriu

    # If V is a q x q matrix and v = V[:], then v[Itril2] are the strict
    # lower triangular entries of V stored columnwise and v[Itril3] are
    # the strict lower triangular entries stored rowwise.
    Itril2 = [ i + j*q for j in xrange(q) for i in xrange(j+1,q) ]
    Itril3 = [ i + j*q for i in xrange(q) for j in xrange(i) ]

    P = spmatrix(0.0, Itriu, Itril, (p*q, p*q))
    D = spmatrix(1.0, range(p*q), range(p*q))
    DV = matrix(1.0, (p*q, 1))


    def F(W):
        """
        Create a solver for the linear equations

                                C * ux + G' * uzl - 2*A'(uzs21) = bx
                                                         -uzs11 = bX1
                                                         -uzs22 = bX2
                                            G * ux - Dl^2 * uzl = bzl
            [ -uX1   -A(ux)' ]          [ uzs11 uzs21' ]     
            [                ] - r*r' * [              ] * r*r' = bzs
            [ -A(ux) -uX2    ]          [ uzs21 uzs22  ]

        where Dl = diag(W['l']), r = W['r'][0].  

        On entry, x = (bx, bX1, bX2) and z = [ bzl; bzs[:] ].
        On exit, x = (ux, uX1, uX2) and z = [ Dl*uzl; (r'*uzs*r)[:] ].


        1. Compute matrices V1, V2 such that (with T = r*r')
        
               [ V1   0   ] [ T11  T21' ] [ V1'  0  ]   [ I  S' ]
               [          ] [           ] [         ] = [       ]
               [ 0    V2' ] [ T21  T22  ] [ 0    V2 ]   [ S  I  ]
        
           and S = [ diag(s); 0 ], s a positive q-vector.

        2. Factor the mapping X -> X + S * X' * S:

               X + S * X' * S = L( L'( X )). 

        3. Compute scaled mappings: a matrix As with as its columns the 
           coefficients of the scaled mapping 

               L^-1( V2' * A() * V1' ) 

           and the matrix Gs = Dl^-1 * G.

        4. Cholesky factorization of H = C + Gs'*Gs + 2*As'*As.

        """


        # 1. Compute V1, V2, s.  

        r = W['r'][0]

        # LQ factorization R[:q, :] = L1 * Q1.
        lapack.lacpy(r, Q1, m = q)
        lapack.gelqf(Q1, tau1)
        lapack.lacpy(Q1, L1, n = q, uplo = 'L')
        lapack.orglq(Q1, tau1)

        # LQ factorization R[q:, :] = L2 * Q2.
        lapack.lacpy(r, Q2, m = p, offsetA = q)
	lapack.gelqf(Q2, tau2)
        lapack.lacpy(Q2, L2, n = p, uplo = 'L')
        lapack.orglq(Q2, tau2)


        # V2, V1, s are computed from an SVD: if
        # 
        #     Q2 * Q1' = U * diag(s) * V',
        #
        # then V1 = V' * L1^-1 and V2 = L2^-T * U.
    
        # T21 = Q2 * Q1.T  
        blas.gemm(Q2, Q1, T21, transB = 'T')

        # SVD T21 = U * diag(s) * V'.  Store U in V2 and V' in V1.
        lapack.gesvd(T21, s, jobu = 'A', jobvt = 'A', U = V2, Vt = V1) 

#        # Q2 := Q2 * Q1' without extracting Q1; store T21 in Q2
#        this will requires lapack.ormlq or lapack.unmlq

        # V2 = L2^-T * U   
        blas.trsm(L2, V2, transA = 'T') 

        # V1 = V' * L1^-1 
        blas.trsm(L1, V1, side = 'R') 


        # 2. Factorization X + S * X' * S = L( L'( X )).  
        #
        # The factor L is stored as a diagonal matrix D and a sparse lower 
        # triangular matrix P, such that  
        #
        #     L(X)[:] = D**-1 * (I + P) * X[:] 
        #     L^-1(X)[:] = D * (I - P) * X[:].

        # SS is q x q with SS[i,j] = si*sj.
        blas.scal(0.0, SS)
        blas.syr(s, SS)    
        
        # For a p x q matrix X, P*X[:] is Y[:] where 
        #
        #     Yij = si * sj * Xji  if i < j
        #         = 0              otherwise.
        # 
        P.V = SS[Itril2]

        # For a p x q matrix X, D*X[:] is Y[:] where 
        #
        #     Yij = Xij / sqrt( 1 - si^2 * sj^2 )  if i < j
        #         = Xii / sqrt( 1 + si^2 )         if i = j
        #         = Xij                            otherwise.
        # 
        DV[Idiag] = sqrt(1.0 + SS[::q+1])
        DV[Itriu] = sqrt(1.0 - SS[Itril3]**2)
        D.V = DV**-1


        # 3. Scaled linear mappings 
         
        # Ask :=  V2' * Ask * V1' 
        blas.scal(0.0, As)
        base.axpy(A, As)
        for i in xrange(n):
            # tmp := V2' * As[i, :]
            blas.gemm(V2, As, tmp, transA = 'T', m = p, n = q, k = p,
                ldB = p, offsetB = i*p*q)
            # As[:,i] := tmp * V1'
            blas.gemm(tmp, V1, As, transB = 'T', m = p, n = q, k = q,
                ldC = p, offsetC = i*p*q)

        # As := D * (I - P) * As 
        #     = L^-1 * As.
        blas.copy(As, As2)
        base.gemm(P, As, As2, alpha = -1.0, beta = 1.0)
        base.gemm(D, As2, As)

        # Gs := Dl^-1 * G 
        blas.scal(0.0, Gs)
        base.axpy(G, Gs)
        for k in xrange(n):
            blas.tbmv(W['di'], Gs, n = m, k = 0, ldA = 1, offsetx = k*m)


        # 4. Cholesky factorization of H = C + Gs' * Gs + 2 * As' * As.

        blas.syrk(As, H, trans = 'T', alpha = 2.0)
        blas.syrk(Gs, H, trans = 'T', beta = 1.0)
        base.axpy(C, H)   
        lapack.potrf(H)


        def f(x, y, z):
            """

            Solve 

                              C * ux + G' * uzl - 2*A'(uzs21) = bx
                                                       -uzs11 = bX1
                                                       -uzs22 = bX2
                                           G * ux - D^2 * uzl = bzl
                [ -uX1   -A(ux)' ]       [ uzs11 uzs21' ]     
                [                ] - T * [              ] * T = bzs.
                [ -A(ux) -uX2    ]       [ uzs21 uzs22  ]

            On entry, x = (bx, bX1, bX2) and z = [ bzl; bzs[:] ].
            On exit, x = (ux, uX1, uX2) and z = [ D*uzl; (r'*uzs*r)[:] ].

            Define X = uzs21, Z = T * uzs * T:   
 
                      C * ux + G' * uzl - 2*A'(X) = bx
                                [ 0  X' ]               [ bX1 0   ]
                            T * [       ] * T - Z = T * [         ] * T
                                [ X  0  ]               [ 0   bX2 ]
                               G * ux - D^2 * uzl = bzl
                [ -uX1   -A(ux)' ]   [ Z11 Z21' ]     
                [                ] - [          ] = bzs
                [ -A(ux) -uX2    ]   [ Z21 Z22  ]

            Return x = (ux, uX1, uX2), z = [ D*uzl; (rti'*Z*rti)[:] ].

            We use the congruence transformation 

                [ V1   0   ] [ T11  T21' ] [ V1'  0  ]   [ I  S' ]
                [          ] [           ] [         ] = [       ]
                [ 0    V2' ] [ T21  T22  ] [ 0    V2 ]   [ S  I  ]

            and the factorization 

                X + S * X' * S = L( L'(X) ) 

            to write this as

                                  C * ux + G' * uzl - 2*A'(X) = bx
                L'(V2^-1 * X * V1^-1) - L^-1(V2' * Z21 * V1') = bX
                                           G * ux - D^2 * uzl = bzl
                            [ -uX1   -A(ux)' ]   [ Z11 Z21' ]     
                            [                ] - [          ] = bzs,
                            [ -A(ux) -uX2    ]   [ Z21 Z22  ]

            or

                C * ux + Gs' * uuzl - 2*As'(XX) = bx
                                      XX - ZZ21 = bX
                                 Gs * ux - uuzl = D^-1 * bzl
                                 -As(ux) - ZZ21 = bbzs_21
                                     -uX1 - Z11 = bzs_11
                                     -uX2 - Z22 = bzs_22

            if we introduce scaled variables

                uuzl = D * uzl
                  XX = L'(V2^-1 * X * V1^-1) 
                     = L'(V2^-1 * uzs21 * V1^-1)
                ZZ21 = L^-1(V2' * Z21 * V1') 

            and define

                bbzs_21 = L^-1(V2' * bzs_21 * V1')
                                           [ bX1  0   ]
                     bX = L^-1( V2' * (T * [          ] * T)_21 * V1').
                                           [ 0    bX2 ]           
 
            Eliminating Z21 gives 

                C * ux + Gs' * uuzl - 2*As'(XX) = bx
                                 Gs * ux - uuzl = D^-1 * bzl
                                   -As(ux) - XX = bbzs_21 - bX
                                     -uX1 - Z11 = bzs_11
                                     -uX2 - Z22 = bzs_22 

            and eliminating uuzl and XX gives

                        H * ux = bx + Gs' * D^-1 * bzl + 2*As'(bX - bbzs_21)
                Gs * ux - uuzl = D^-1 * bzl
                  -As(ux) - XX = bbzs_21 - bX
                    -uX1 - Z11 = bzs_11
                    -uX2 - Z22 = bzs_22.


            In summary, we can use the following algorithm: 

            1. bXX := bX - bbzs21
                                        [ bX1 0   ]
                    = L^-1( V2' * ((T * [         ] * T)_21 - bzs_21) * V1')
                                        [ 0   bX2 ]

            2. Solve H * ux = bx + Gs' * D^-1 * bzl + 2*As'(bXX).

            3. From ux, compute 

                   uuzl = Gs*ux - D^-1 * bzl and 
                      X = V2 * L^-T(-As(ux) + bXX) * V1.

            4. Return ux, uuzl, 

                   rti' * Z * rti = r' * [ -bX1, X'; X, -bX2 ] * r
 
               and uX1 = -Z11 - bzs_11,  uX2 = -Z22 - bzs_22.

            """

            # Save bzs_11, bzs_22, bzs_21.
            lapack.lacpy(z, bz11, uplo = 'L', m = q, n = q, ldA = p+q,
                offsetA = m)
            lapack.lacpy(z, bz21, m = p, n = q, ldA = p+q, offsetA = m+q)
            lapack.lacpy(z, bz22, uplo = 'L', m = p, n = p, ldA = p+q,
                offsetA = m + (p+q+1)*q)


            # zl := D^-1 * zl
            #     = D^-1 * bzl
            blas.tbmv(W['di'], z, n = m, k = 0, ldA = 1)


            # zs := r' * [ bX1, 0; 0, bX2 ] * r.

            # zs := [ bX1, 0; 0, bX2 ]
            blas.scal(0.0, z, offset = m)
            lapack.lacpy(x[1], z, uplo = 'L', m = q, n = q, ldB = p+q,
                offsetB = m)
            lapack.lacpy(x[2], z, uplo = 'L', m = p, n = p, ldB = p+q,
                offsetB = m + (p+q+1)*q)

            # scale diagonal of zs by 1/2
            blas.scal(0.5, z, inc = p+q+1, offset = m)

            # a := tril(zs)*r  
            blas.copy(r, a)
            blas.trmm(z, a, side = 'L', m = p+q, n = p+q, ldA = p+q, ldB = 
                p+q, offsetA = m)

            # zs := a'*r + r'*a 
            blas.syr2k(r, a, z, trans = 'T', n = p+q, k = p+q, ldB = p+q,
                ldC = p+q, offsetC = m)



            # bz21 := L^-1( V2' * ((r * zs * r')_21 - bz21) * V1')
            #
            #                           [ bX1 0   ]
            #       = L^-1( V2' * ((T * [         ] * T)_21 - bz21) * V1').
            #                           [ 0   bX2 ]

            # a = [ r21 r22 ] * z
            #   = [ r21 r22 ] * r' * [ bX1, 0; 0, bX2 ] * r
            #   = [ T21  T22 ] * [ bX1, 0; 0, bX2 ] * r
            blas.symm(z, r, a, side = 'R', m = p, n = p+q, ldA = p+q, 
                ldC = p+q, offsetB = q)
    
            # bz21 := -bz21 + a * [ r11, r12 ]'
            #       = -bz21 + (T * [ bX1, 0; 0, bX2 ] * T)_21
            blas.gemm(a, r, bz21, transB = 'T', m = p, n = q, k = p+q, 
                beta = -1.0, ldA = p+q, ldC = p)

            # bz21 := V2' * bz21 * V1'
            #       = V2' * (-bz21 + (T*[bX1, 0; 0, bX2]*T)_21) * V1'
            blas.gemm(V2, bz21, tmp, transA = 'T', m = p, n = q, k = p, 
                ldB = p)
            blas.gemm(tmp, V1, bz21, transB = 'T', m = p, n = q, k = q, 
                ldC = p)

            # bz21[:] := D * (I-P) * bz21[:] 
            #       = L^-1 * bz21[:]
            #       = bXX[:]
            blas.copy(bz21, tmp)
            base.gemv(P, bz21, tmp, alpha = -1.0, beta = 1.0)
            base.gemv(D, tmp, bz21)


            # Solve H * ux = bx + Gs' * D^-1 * bzl + 2*As'(bXX).

            # x[0] := x[0] + Gs'*zl + 2*As'(bz21) 
            #       = bx + G' * D^-1 * bzl + 2 * As'(bXX)
            blas.gemv(Gs, z, x[0], trans = 'T', alpha = 1.0, beta = 1.0)
            blas.gemv(As, bz21, x[0], trans = 'T', alpha = 2.0, beta = 1.0) 

            # x[0] := H \ x[0] 
            #      = ux
            lapack.potrs(H, x[0])


            # uuzl = Gs*ux - D^-1 * bzl
            blas.gemv(Gs, x[0], z, alpha = 1.0, beta = -1.0)

            
            # bz21 := V2 * L^-T(-As(ux) + bz21) * V1
            #       = X
            blas.gemv(As, x[0], bz21, alpha = -1.0, beta = 1.0)
            blas.tbsv(DV, bz21, n = p*q, k = 0, ldA = 1)
            blas.copy(bz21, tmp)
            base.gemv(P, tmp, bz21, alpha = -1.0, beta = 1.0, trans = 'T')
            blas.gemm(V2, bz21, tmp)
            blas.gemm(tmp, V1, bz21)


            # zs := -zs + r' * [ 0, X'; X, 0 ] * r
            #     = r' * [ -bX1, X'; X, -bX2 ] * r.

            # a := bz21 * [ r11, r12 ]
            #   =  X * [ r11, r12 ]
            blas.gemm(bz21, r, a, m = p, n = p+q, k = q, ldA = p, ldC = p+q)
            
            # z := -z + [ r21, r22 ]' * a + a' * [ r21, r22 ]
            #    = rti' * uzs * rti
            blas.syr2k(r, a, z, trans = 'T', beta = -1.0, n = p+q, k = p,
                offsetA = q, offsetC = m, ldB = p+q, ldC = p+q)  



            # uX1 = -Z11 - bzs_11 
            #     = -(r*zs*r')_11 - bzs_11
            # uX2 = -Z22 - bzs_22 
            #     = -(r*zs*r')_22 - bzs_22


            blas.copy(bz11, x[1])
            blas.copy(bz22, x[2])

            # scale diagonal of zs by 1/2
            blas.scal(0.5, z, inc = p+q+1, offset = m)

            # a := r*tril(zs)  
            blas.copy(r, a)
            blas.trmm(z, a, side = 'R', m = p+q, n = p+q, ldA = p+q, ldB = 
                p+q, offsetA = m)

            # x[1] := -x[1] - a[:q,:] * r[:q, :]' - r[:q,:] * a[:q,:]'
            #       = -bzs_11 - (r*zs*r')_11
            blas.syr2k(a, r, x[1], n = q, alpha = -1.0, beta = -1.0) 

            # x[2] := -x[2] - a[q:,:] * r[q:, :]' - r[q:,:] * a[q:,:]'
            #       = -bzs_22 - (r*zs*r')_22
            blas.syr2k(a, r, x[2], n = p, alpha = -1.0, beta = -1.0, 
                offsetA = q, offsetB = q)

            # scale diagonal of zs by 1/2
            blas.scal(2.0, z, inc = p+q+1, offset = m)


        return f


    if C:
        sol = solvers.coneqp(Pf, c, Gf, hh, dims, Af, kktsolver = F, 
            xnewcopy = xnewcopy, xdot = xdot, xaxpy = xaxpy, xscal = xscal) 
    else: 
        sol = solvers.conelp(c, Gf, hh, dims, Af, kktsolver = F, 
            xnewcopy = xnewcopy, xdot = xdot, xaxpy = xaxpy, xscal = xscal) 

    if sol['status'] is 'optimal':
        x = sol['x'][0]
        z = sol['z'][:m]
        Z = sol['z'][m:]
        Z.size = (p + q, p + q)
        Z = -2.0 * Z[-p:, :q]

    elif sol['status'] is 'primal infeasible':
        x = None
        z = sol['z'][:m]
        Z = sol['z'][m:]
        Z.size = (p + q, p + q)
        Z = -2.0 * Z[-p:, :q]

    else:
        x, z, Z = None, None, None

    return {'status': sol['status'], 'x': x, 'z': z, 'Z': Z }
示例#50
0
S = matrix([[ 4e-2,  6e-3, -4e-3,    0.0 ],
            [ 6e-3,  1e-2,  0.0,     0.0 ],
            [-4e-3,  0.0,   2.5e-3,  0.0 ],
            [ 0.0,   0.0,   0.0,     0.0 ]])
pbar = matrix([.12, .10, .07, .03])
G = matrix(0.0, (n,n))
G[::n+1] = -1.0
h = matrix(0.0, (n,1))
A = matrix(1.0, (1,n))
b = matrix(1.0)

# Compute trade-off.
N = 100
mus = [ 10**(5.0*t/N-1.0) for t in range(N) ]
sol = solvers.qp(mus[0]*S, -pbar, G, h, A, b)
sol = solvers.coneqp(mus[0]*S, -pbar, G, h, [], A, b)

portfolios = [ solvers.qp(mu*S, -pbar, G, h, A, b)['x'] for mu in mus ]



## From SCOP to Cone LP

c = matrix([-2., 1., 5.])
G = [ matrix( [[12., 13., 12.], [6., -3., -12.], [-5., -5., 6.]] ) ]
G += [ matrix( [[3., 3., -1., 1.], [-6., -6., -9., 19.], [10., -2., -2., -3.]] ) ]
h = [ matrix( [-12., -3., -2.] ),  matrix( [27., 0., 3., -42.] ) ]
sol = solvers.socp(c, Gq = G, hq = h)
sol['status']

示例#51
0
def l1regls(A, y, alpha=1.0, show_progress=1):
    """
    
    Returns the solution of l1-norm regularized least-squares problem
  
        minimize || A*x - y ||_2^2  + alpha * || x ||_1.

    Parameters: A : 2D cvxopt.matrix for the design matrix in (m, n)
                y : 2D cvxopt.matrix for the observation in (m, 1)
                alpha : float for the degree of shrinkage
                show_progress : bool, show solving progress
    Returns:    x : 2D cvxopt.matrix in (m, 1)
    Example:    A = matrix(np.array(-C, dtype=float))
                b = matrix(np.array(closure_int, dtype=float).reshape(C.shape[0], -1))
                x = np.round(l1reg_lstsq(A, b, alpha=1e-2))
    """
    solvers.options['show_progress'] = show_progress

    m, n = A.size
    q = matrix(alpha, (2 * n, 1))
    q[:n] = -2.0 * A.T * y

    def P(u, v, alpha=1.0, beta=0.0):
        """
            v := alpha * 2.0 * [ A'*A, 0; 0, 0 ] * u + beta * v 
        """
        v *= beta
        v[:n] += alpha * 2.0 * A.T * (A * u[:n])

    def G(u, v, alpha=1.0, beta=0.0, trans='N'):
        """
            v := alpha*[I, -I; -I, -I] * u + beta * v  (trans = 'N' or 'T')
        """

        v *= beta
        v[:n] += alpha * (u[:n] - u[n:])
        v[n:] += alpha * (-u[:n] - u[n:])

    h = matrix(0.0, (2 * n, 1))

    # Customized solver for the KKT system
    #
    #     [  2.0*A'*A  0    I      -I     ] [x[:n] ]     [bx[:n] ]
    #     [  0         0   -I      -I     ] [x[n:] ]  =  [bx[n:] ].
    #     [  I        -I   -D1^-1   0     ] [zl[:n]]     [bzl[:n]]
    #     [ -I        -I    0      -D2^-1 ] [zl[n:]]     [bzl[n:]]
    #
    # where D1 = W['di'][:n]**2, D2 = W['di'][n:]**2.
    #
    # We first eliminate zl and x[n:]:
    #
    #     ( 2*A'*A + 4*D1*D2*(D1+D2)^-1 ) * x[:n] =
    #         bx[:n] - (D2-D1)*(D1+D2)^-1 * bx[n:] +
    #         D1 * ( I + (D2-D1)*(D1+D2)^-1 ) * bzl[:n] -
    #         D2 * ( I - (D2-D1)*(D1+D2)^-1 ) * bzl[n:]
    #
    #     x[n:] = (D1+D2)^-1 * ( bx[n:] - D1*bzl[:n]  - D2*bzl[n:] )
    #         - (D2-D1)*(D1+D2)^-1 * x[:n]
    #
    #     zl[:n] = D1 * ( x[:n] - x[n:] - bzl[:n] )
    #     zl[n:] = D2 * (-x[:n] - x[n:] - bzl[n:] ).
    #
    # The first equation has the form
    #
    #     (A'*A + D)*x[:n]  =  rhs
    #
    # and is equivalent to
    #
    #     [ D    A' ] [ x:n] ]  = [ rhs ]
    #     [ A   -I  ] [ v    ]    [ 0   ].
    #
    # It can be solved as
    #
    #     ( A*D^-1*A' + I ) * v = A * D^-1 * rhs
    #     x[:n] = D^-1 * ( rhs - A'*v ).

    S = matrix(0.0, (m, m))
    Asc = matrix(0.0, (m, n))
    v = matrix(0.0, (m, 1))

    def Fkkt(W):

        # Factor
        #
        #     S = A*D^-1*A' + I
        #
        # where D = 2*D1*D2*(D1+D2)^-1, D1 = d[:n]**-2, D2 = d[n:]**-2.

        d1, d2 = W['di'][:n]**2, W['di'][n:]**2

        # ds is square root of diagonal of D
        ds = math.sqrt(2.0) * div(mul(W['di'][:n], W['di'][n:]), sqrt(d1 + d2))
        d3 = div(d2 - d1, d1 + d2)

        # Asc = A*diag(d)^-1/2
        Asc = A * spdiag(ds**-1)

        # S = I + A * D^-1 * A'
        blas.syrk(Asc, S)
        S[::m + 1] += 1.0
        lapack.potrf(S)

        def g(x, y, z):

            x[:n] = 0.5 * (x[:n] - mul(d3, x[n:]) + mul(
                d1, z[:n] + mul(d3, z[:n])) - mul(d2, z[n:] - mul(d3, z[n:])))
            x[:n] = div(x[:n], ds)

            # Solve
            #
            #     S * v = 0.5 * A * D^-1 * ( bx[:n] -
            #         (D2-D1)*(D1+D2)^-1 * bx[n:] +
            #         D1 * ( I + (D2-D1)*(D1+D2)^-1 ) * bzl[:n] -
            #         D2 * ( I - (D2-D1)*(D1+D2)^-1 ) * bzl[n:] )

            blas.gemv(Asc, x, v)
            lapack.potrs(S, v)

            # x[:n] = D^-1 * ( rhs - A'*v ).
            blas.gemv(Asc, v, x, alpha=-1.0, beta=1.0, trans='T')
            x[:n] = div(x[:n], ds)

            # x[n:] = (D1+D2)^-1 * ( bx[n:] - D1*bzl[:n]  - D2*bzl[n:] )
            #         - (D2-D1)*(D1+D2)^-1 * x[:n]
            x[n:] = div( x[n:] - mul(d1, z[:n]) - mul(d2, z[n:]), d1+d2 )\
                - mul( d3, x[:n] )

            # zl[:n] = D1^1/2 * (  x[:n] - x[n:] - bzl[:n] )
            # zl[n:] = D2^1/2 * ( -x[:n] - x[n:] - bzl[n:] ).
            z[:n] = mul(W['di'][:n], x[:n] - x[n:] - z[:n])
            z[n:] = mul(W['di'][n:], -x[:n] - x[n:] - z[n:])

        return g

    return solvers.coneqp(P, q, G, h, kktsolver=Fkkt)['x'][:n]
示例#52
0
    def LSM2(self, option_type="c", func_list=[lambda x: x ** 0, lambda x: x]):
        dt = self.T / self.n_steps
        df = np.exp(-self.r * dt)
        df2 = np.exp(-(self.r - self.q) * dt)
        K = self.K
        price_matrix = self.price_matrix
        n_trials = self.n_trials
        n_steps = self.n_steps
        exercise_matrix = np.zeros(price_matrix.shape,dtype=bool)
        american_values_matrix = np.zeros(price_matrix.shape)
        
        def __calc_american_values(payoff_fun,sub_price_matrix,sub_exercise_matrix,df):
            exercise_values_t = payoff_fun(sub_price_matrix[:,0])
            ITM_filter = exercise_values_t > 0
            n_sub_trials, n_sub_steps = sub_price_matrix.shape
            holding_values_t = np.zeros(n_sub_trials)
            itemindex = np.where(sub_exercise_matrix==1)
            for trial_i in range(n_sub_trials):                
                first = next(itemindex[1][i] for i,x in enumerate(itemindex[0]) if x==trial_i)
                payoff_i = payoff_fun(sub_price_matrix[trial_i, first])
                df_i = df**(n_sub_steps-first)
                holding_values_t[trial_i] = payoff_i*df_i
            
            A_matrix = np.array([func(sub_price_matrix[:,0]) for func in func_list]).T
            b_matrix = holding_values_t[:, np.newaxis] # g_tau|Fi
            A_prime_matrix = A_matrix[ITM_filter, :]
            b_prime_matrix = b_matrix[ITM_filter, :]
            lr = LinearRegression(fit_intercept=False)
            lr.fit(A_prime_matrix, b_prime_matrix)
            exp_holding_values_t = np.dot(A_matrix, lr.coef_.T)[:, 0] # E[g_tau|Fi] only ITM
            exp_holding_values_t[np.invert(ITM_filter)] = np.nan
            sub_exercise_matrix[:,0] = ITM_filter & (exercise_values_t>exp_holding_values_t)
            american_values_t = np.maximum(exp_holding_values_t,exercise_values_t)
            return american_values_t
        
        if (option_type == "c"):
            payoff_fun = lambda x: np.maximum(x - K, 0)
        elif (option_type == "p"):
            payoff_fun = lambda x: np.maximum(K - x, 0)
        
        # when contract is at the maturity
        stock_prices_t = price_matrix[:, -1]
        exercise_values_t = payoff_fun(stock_prices_t)
        holding_values_t = exercise_values_t
        american_values_matrix[:,-1] = exercise_values_t
        exercise_matrix[:,-1] = 1
        
        # before maturaty
        for i in np.arange(n_steps)[:0:-1]:
            # A1 only ITM
            sub_price_matrix = price_matrix[:,i:]
            sub_exercise_matrix = exercise_matrix[:,i:]
            american_values_t = __calc_american_values(payoff_fun,sub_price_matrix,sub_exercise_matrix,df)
            american_values_matrix[:,i] = american_values_t
        
        # i=0
        # regular martingale pricing: LSM
        american_value1 = american_values_matrix[:,1].mean() * df
        # with delta hedging: OHMC
        v0 = matrix((american_values_matrix[:,1] * df)[:,np.newaxis])
        S0 = price_matrix[:, 0]
        S1 = price_matrix[:, 1]
        dS0 = df * S1 - S0
        Q0 = np.concatenate((-np.ones(n_trials)[:, np.newaxis], dS0[:, np.newaxis]), axis=1)
        Q0 = matrix(Q0)
        P = Q0.T * Q0
        q = Q0.T * v0
        A = matrix(np.ones(n_trials, dtype=np.float64)).T * Q0
        b = - matrix(np.ones(n_trials, dtype=np.float64)).T * v0
        sol = solvers.coneqp(P=P, q=q, A=A, b=b)
        self.sol = sol
        residual_risk = (v0.T * v0 + 2 * sol["primal objective"]) / n_trials
        self.residual_risk = residual_risk[0]  # the value of unit matrix
        american_value2 = sol["x"][0]
        delta_hedge = sol["x"][1]
        american_values_matrix[:,0] = american_value2
        
        # obtain the optimal policies at the inception
        holding_matrix = np.zeros(exercise_matrix.shape, dtype=bool)
        for i in np.arange(n_trials):
            exercise_row = exercise_matrix[i, :]
            if (exercise_row.any()):
                exercise_idx = np.where(exercise_row == 1)[0][0]
                exercise_row[exercise_idx + 1:] = 0
                holding_matrix[i,:exercise_idx+1] = 1
            else:
                exercise_row[-1] = 1
                holding_matrix[i,:] = 1

        self.holding_matrix = holding_matrix
        self.exercise_matrix = exercise_matrix
        self.american_values_matrix = american_values_matrix
        
        self.american_price = american_value2
        self.american_delta = delta_hedge
        return american_value2, delta_hedge
示例#53
0
    def LSM2(self, option_type="c", func_list=[lambda x: x ** 0, lambda x: x],onlyITM=False,buy_cost=0,sell_cost=0):
        dt = self.T / self.n_steps
        df = np.exp(-self.r * dt)
        df2 = np.exp(-(self.r - self.q) * dt)
        K = self.K
        price_matrix = self.price_matrix
        n_trials = self.n_trials
        n_steps = self.n_steps
        exercise_matrix = np.zeros(price_matrix.shape,dtype=bool)
        american_values_matrix = np.zeros(price_matrix.shape)
        
        
        def __calc_american_values(payoff_fun,func_list, prices_t, american_values_tp1,df):
            exercise_values_t = payoff_fun(prices_t[:])
            ITM_filter = exercise_values_t > 0
            OTM_filter = exercise_values_t <= 0
            n_sub_trials = len(prices_t)
            holding_values_t = df*american_values_tp1 # simulated samples: y
            exp_holding_values_t = np.zeros(n_sub_trials) # regressed results: E[y]
            
            
            A_matrix = np.array([func(prices_t[:]) for func in func_list]).T
            b_matrix = holding_values_t[:, np.newaxis] # g_tau|Fi
            ITM_A_matrix = A_matrix[ITM_filter, :]
            ITM_b_matrix = b_matrix[ITM_filter, :]           
            lr = LinearRegression(fit_intercept=False)
            lr.fit(ITM_A_matrix, ITM_b_matrix)
            exp_holding_values_t[ITM_filter] = np.dot(ITM_A_matrix, lr.coef_.T)[:, 0] # E[g_tau|Fi] only ITM
            
            OTM_A_matrix = A_matrix[OTM_filter, :]
            OTM_b_matrix = b_matrix[OTM_filter, :]
            lr.fit(OTM_A_matrix, OTM_b_matrix)
            exp_holding_values_t[OTM_filter] = np.dot(OTM_A_matrix, lr.coef_.T)[:, 0] # E[g_tau|Fi] only OTM
     
            american_values_t = np.maximum(exp_holding_values_t,exercise_values_t)
            return american_values_t
        
        if (option_type == "c"):
            payoff_fun = lambda x: np.maximum(x - K, 0)
        elif (option_type == "p"):
            payoff_fun = lambda x: np.maximum(K - x, 0)
        
        # when contract is at the maturity
        exercise_values_t = payoff_fun(price_matrix[:,-1])
        american_values_matrix[:,-1] = exercise_values_t
        american_values_t = exercise_values_t
        
        # before maturaty
        for i in np.arange(n_steps)[:0:-1]:
            prices_t = price_matrix[:,i]
            american_values_tp1 = american_values_t
            american_values_t = __calc_american_values(payoff_fun,func_list,prices_t, american_values_tp1,df)
            american_values_matrix[:,i] = american_values_t
        
        
        
        # obtain the optimal policies at the inception
        

        
        # i=0
        # regular martingale pricing: LSM
        american_value1 = american_values_matrix[:,1].mean() * df
        # with delta hedging: OHMC
        v0 = matrix((american_values_matrix[:,1] * df)[:,np.newaxis])
        S0 = price_matrix[:, 0]
        S1 = price_matrix[:, 1]
        dS0 = df2 * S1 * (1-sell_cost) - S0*(1+buy_cost)
        Q0 = np.concatenate((-np.ones(n_trials)[:, np.newaxis], dS0[:, np.newaxis]), axis=1)
        Q0 = matrix(Q0)
        P = Q0.T * Q0
        q = Q0.T * v0
        A = matrix(np.ones(n_trials, dtype=np.float64)).T * Q0
        b = - matrix(np.ones(n_trials, dtype=np.float64)).T * v0
        sol = solvers.coneqp(P=P, q=q, A=A, b=b)
        self.sol = sol
        residual_risk = (v0.T * v0 + 2 * sol["primal objective"]) / n_trials
        self.residual_risk = residual_risk[0]  # the value of unit matrix
        american_value2 = sol["x"][0]
        delta_hedge = sol["x"][1]
        american_values_matrix[:,0] = american_value2
        self.american_values_matrix = american_values_matrix
        self.HLSM_price = american_value2
        self.HLSM_delta = - delta_hedge
        print("price: {}, delta-hedge: {}".format(american_value2,delta_hedge))
        
        pass
示例#54
0
def obj_tracking_err(s2_x_xb, s=None):
    """For details, see here.

    Parameters
    ----------
        s2_x_xb : array, shape (n_ + 1, n_ + 1)
        s : array, shape (k_, ) or int

    Returns
    -------
    w_star : array, shape (n_, )
    minus_te : scalar

    """
    # read the number of components in x
    n_ = np.shape(s2_x_xb)[0] - 1

    # shift indices of instruments by -1
    if s is None:
        s = np.arange(n_)
    elif np.isscalar(s):
        s = np.array([s - 1])
    else:
        s = s - 1

    ## Step 0: LCQP optimization setup

    # quadratic objective parameters
    s2_0 = s2_x_xb[:-1, :-1]
    u_0 = -(s2_x_xb[:-1, -1].reshape(-1, 1))
    v_0 = s2_x_xb[-1, -1]

    # linear constraint parameters
    c_sort = np.array([k for k in np.arange(0, n_) if k not in list(s)])

    if c_sort.size == 0:
        a_1 = np.ones((1, n_))
    else:
        # first row of a_1
        first_r = np.ones((1, n_))
        first_r[0, c_sort] = 0
        # rest rows of a_1
        rest_r = (np.eye(n_))[c_sort, :]
        a_1 = np.concatenate((first_r, rest_r))

    a_2 = np.zeros((c_sort.size + 1, 1))
    a_2[0, 0] = 1

    b_1 = np.eye(n_)
    b_2 = np.zeros((n_, 1))

    ## step 1: perform optimization using CVXOPT function solver.coneqp for LCQP

    # prepare data types for CVXPOT
    P = matrix(s2_0, tc='d')
    q = matrix(u_0, tc='d')
    A = matrix(a_1, tc='d')
    b = matrix(a_2, tc='d')
    G = matrix(-b_1, tc='d')
    h = matrix(b_2, tc='d')

    # run optimization function
    solvers.options['show_progress'] = False
    sol = solvers.coneqp(P, q, G, h, A=A, b=b)

    # prepare output
    w_star = np.array(sol['x'])
    minus_te = -(np.sqrt(w_star.T @ s2_0 @ w_star + 2 * w_star.T @ u_0 + v_0))

    return w_star, np.asscalar(minus_te.reshape(-1))
示例#55
0
    def OHMCPricer(self, option_type='c', isAmerican=False, func_list=[lambda x: x ** 0, lambda x: x]):
        def _calculate_Q_matrix(S_k, S_kp1, df, df2, func_list):
            dS = df2 * S_kp1 - S_k
            A = np.array([func(S_k) for func in func_list]).T
            B = (np.array([func(S_k) for func in func_list]) * dS).T
            return np.concatenate((-A, B), axis=1)

        price_matrix = self.price_matrix
        # k = n_steps
        dt = self.T / self.n_steps
        df = np.exp(- self.r * dt)
        df2 = np.exp(-(self.r - self.q) * dt)
        n_basis = len(func_list)
        n_trials = self.n_trials
        n_steps = self.n_steps
        strike = self.K

        if (option_type == "c"):
            payoff_fun = lambda x: np.maximum(x-strike,0)
            # payoff = (price_matrix[:, n_steps] - strike)
        elif (option_type == "p"):
            payoff_fun = lambda x: np.maximum(strike-x,0)
            # payoff = (strike - price_matrix[:, n_steps])
        else:
            print("please enter the option type: (c/p)")
            return

        if isAmerican is True:
            holding_matrix = self.holding_matrix
        else:
            holding_matrix = np.ones(price_matrix.shape,dtype=bool)

        # At maturity
        holding_filter_k = holding_matrix[:, n_steps]
        payoff = matrix(payoff_fun(price_matrix[holding_filter_k,n_steps]))
        vk = payoff * df
        Sk = price_matrix[holding_filter_k,n_steps]
        #         print("regular MC price",regular_mc_price)

        # k = n_steps-1,...,1
        for k in range(n_steps - 1, 0, -1):

            holding_filter_kp1 = holding_filter_k
            holding_filter_k = holding_matrix[:, k]
            Skp1 = price_matrix[holding_filter_kp1, k+1]
            Sk = price_matrix[holding_filter_kp1, k]
            Qk = matrix(_calculate_Q_matrix(Sk, Skp1, df, df2, func_list))
            P = Qk.T * Qk
            q = Qk.T * vk
            A = matrix(np.ones(holding_filter_kp1.sum(), dtype=np.float64)).T * Qk
            b = - matrix(np.ones(holding_filter_kp1.sum(), dtype=np.float64)).T * vk
            # print(Sk)
            # print(Skp1)

            sol = solvers.coneqp(P=P, q=q, A=A, b=b)
            ak = sol["x"][:n_basis]
            bk = sol["x"][n_basis:]
            vk = matrix(np.array([func(price_matrix[holding_filter_k, k]) for func in func_list])).T * ak * df
            # break

        # k = 0
        v0 = vk
        holding_filter_1 = holding_filter_k
        holding_filter_0 = holding_matrix[:, 0]
        S0 = price_matrix[holding_filter_1, 0]
        S1 = price_matrix[holding_filter_1, 1]
        dS0 = df2 * S1 - S0
        Q0 = np.concatenate((-np.ones(holding_filter_1.sum())[:, np.newaxis], dS0[:, np.newaxis]), axis=1)
        Q0 = matrix(Q0)
        P = Q0.T * Q0
        q = Q0.T * v0
        A = matrix(np.ones(holding_filter_1.sum(), dtype=np.float64)).T * Q0
        b = - matrix(np.ones(holding_filter_1.sum(), dtype=np.float64)).T * v0
        C1 = matrix(ak).T * np.array([func(S1) for func in func_list]).T
        sol = solvers.coneqp(P=P, q=q, A=A, b=b)
        self.sol = sol
        residual_risk = (v0.T * v0 + 2 * sol["primal objective"]) / holding_filter_1.sum()
        self.residual_risk = residual_risk[0]  # the value of unit matrix

        return sol["x"][0]
示例#56
0
def proxqp_general(proxargs):
    """
    Solves the conic QP

        min.  < c, x > + (rho/2) || x - z ||^2
        s.t.  A(x) = b
              x >= 0

    and its dual

        max.  -< b, y > - 1/(2*rho) * || c + A'(y) - rho * z - s ||^2 
        s.t.  s >= 0.

    The primal variable is x = matrix(x_0[:], ...,  x_{N-1}[:]) with x_k a symmetric
    matrix of order nk.
    The dual variables are y = matrix(y_0, ..., y_{M-1}), with y_k a column
    vector of length mk, and s = matrix(s_0[:], .., s_{N-1}[:]), with s_i a symmetric
    matrix of order nk.

    In the primal cost function, c = (c_0[:], ..., c_{N-1}[:]), with c_k a 
    symmetric matrix, and < c, x > = sum_j tr (c_j * x_j).
    In the dual cost function, b = matrix(b_0, ..., b_{M-1}), with b_k a 
    column vector, and < b, y > = sum_j b_i' * y_i.

    The mapping A(x) is defined as follows. 
    The value of u = A(x) is u = matrix(u_0, ..., u_{M-1}) with u_i a vector
    defined by

        u_i = sum_j Aij(x_j),  i = 0, ..., M-1.

    The adjoint v = A'(y)  is v = (v_0, ..., v_{N-1}) with v_j a matrix
    defined as

        v_j = sum_i Aij'(y_i),  j = 0, ..., N-1.

    If we expand the primal and dual problems we therefore have

        min.  sum_j < c_j, x_j >  + (rho/2) * sum_j || x_j - z_j ||_F^2 
        s.t.  sum_j Aij(x_j) = b_i, i = 0, ..., M-1 
              x_j >= 0, j = 0, ..., N-1

    with variables x_j and its dual

        max.  sum_i b_i'*y_i - 1/(2*rho) *
              sum_j || c_j - sum_i Aij'(y_i) + rho *z_j - s_j ||_F^2
        s.t.  s_j >= 0, j = 0, ..., N-1

    with variables y_i, s_j.

    Input arguments.
    
        proxargs['C']   is a stacked vector containing vectorized 'd' matrices 
                        c_k of size n_k**2 x 1, representing symmetric matrices.

        proxargs['A']   is a list of a list of either 'd' matrices, 
            'd' sparse matrices, or 0.  
            
            If A[i][j] = 0, then variable block i 
            is not involved in constraint block j. 
            
            Otherwise, A[i][j] has size n_j**2 times m_i.  Each of its columns 
            represents a symmetric matrix of order n_j in unpacked column-major 
            order. The term  Aij(x[j]) in the primal constraint is given by

                Aij(x_j) = A[i][j]' * vec(x_j).

            The adjoint Aij'(y[i]) in the dual constraint is given by

                Aij'(y_i) = mat( A[i][j] * y_i ).


        proxargs['b']   is a stacked vector containing constraint vectors of 
                        size m_i x 1.

        proxargs['Z']   is a stacked vector containing variable Z vectors of 
                        size n_k**2 x 1.
            
        proxargs['sigma'] is a positive scalar (step size).
        
        proxargs['X']   contains the primal variable X in stacked vector form. 
        
        proxargs['dualy']   contains the dual variable y 
        
        proxargs['dualS']   contains the primal variable S
	
	    On output, proxargs['X'], proxargs['dualy'], and proxargs['dualZ'] 
	    will be filled with prox optimal primal and dual variables.
	    
    Output arguments.

        primal : trace(C*X) where X is the optimal variable
        
        gap : trace(X*S) as computed by  CVXOPT
	
    """

    c, A, b = proxargs['C'], proxargs['A'], proxargs['b']

    z, X = proxargs['Z'], proxargs['X']
    rho = proxargs['sigma']
    ns, ms = proxargs['ns'], proxargs['ms']
    multiprocess = proxargs['multiprocess']

    N = len(A[0])
    M = len(A)

    def Pf(u, v, alpha=1.0, beta=0.0):

        # v[k] := alpha * rho * u[k] + beta * v[k]

        blas.scal(beta, v)
        blas.axpy(u, v, alpha=alpha * rho)

    q = (c - rho * z)[:]

    xp = +q[:]
    bz = +q[:]
    uy = +b[:]

    def Gf(u, v, alpha=1.0, beta=0.0, trans='N'):
        # v = -alpha*u + beta * v
        blas.scal(beta, v)
        blas.axpy(u, v, alpha=-alpha)

    h = matrix(0.0, (sum(nk**2 for nk in ns), 1))
    dims = {'l': 0, 'q': [], 's': ns}

    def Af(u, v, alpha=1.0, beta=0.0, trans='N'):

        # v := alpha * A(u) + beta * v if trans is 'N'
        # v := alpha * A'(u) + beta * v if trans is 'T'
        blas.scal(beta, v)
        if trans == 'N':
            offseti = 0
            for i in xrange(M):
                offsetj = 0
                for j in xrange(N):
                    if type(A[i][j]) is matrix or type(A[i][j]) is spmatrix:
                        sgemv(A[i][j],
                              u,
                              v,
                              n=ns[j],
                              m=ms[i],
                              trans='T',
                              alpha=alpha,
                              beta=1.0,
                              offsetx=offsetj,
                              offsety=offseti)
                    offsetj += ns[j]**2
                offseti += ms[i]
        else:
            offsetj = 0
            for j in xrange(N):
                offseti = 0
                for i in xrange(M):
                    if type(A[i][j]) is matrix or type(A[i][j]) is spmatrix:
                        sgemv(A[i][j],
                              u,
                              v,
                              n=ns[j],
                              m=ms[i],
                              trans='N',
                              alpha=alpha,
                              beta=1.0,
                              offsetx=offseti,
                              offsety=offsetj)
                    offseti += ms[i]
                offsetj += ns[j]**2

    def xdot(x, y):

        offset = 0
        for k in xrange(N):
            misc.trisc(x, {'l': offset, 'q': [], 's': [ns[k]]})
            offset += ns[k]**2

        a = blas.dot(x, y)
        offset = 0
        for k in xrange(N):
            misc.triusc(x, {'l': offset, 'q': [], 's': [ns[k]]})
            symmetrize(x, n=ns[k], offset=offset)
            offset += ns[k]**2

        return a

    U = [matrix(0.0, (nk, nk)) for nk in ns]
    Vt = [matrix(0.0, (nk, nk)) for nk in ns]
    sv = [matrix(0.0, (nk, 1)) for nk in ns]
    Gamma = [matrix(0.0, (nk, nk)) for nk in ns]
    As = [[+A[i][j] for j in xrange(N)] for i in xrange(M)]

    def F(W):

        for j in xrange(N):

            # SVD R[j] = U[j] * diag(sig[j]) * Vt[j]
            lapack.gesvd(+W['r'][j],
                         sv[j],
                         jobu='A',
                         jobvt='A',
                         U=U[j],
                         Vt=Vt[j])

            # Vt[j] := diag(sig[j])^-1 * Vt[j]
            for k in xrange(ns[j]):
                blas.tbsv(sv[j], Vt[j], n=ns[j], k=0, ldA=1, offsetx=k * ns[j])

            # Gamma[j] is an ns[j] x ns[j] symmetric matrix
            #
            #  (sig[j] * sig[j]') ./  sqrt(1 + rho * (sig[j] * sig[j]').^2)

            # S = sig[j] * sig[j]'
            S = matrix(0.0, (ns[j], ns[j]))
            blas.syrk(sv[j], S)
            Gamma[j][:] = div(S, sqrt(1.0 + rho * S**2))[:]
            symmetrize(Gamma[j], ns[j])

            # As represents the scaled mapping
            #
            #     As(x) = A(u * (Gamma .* x) * u')
            #    As'(y) = Gamma .* (u' * A'(y) * u)
            #
            # stored in a similar format as A, except that we use packed
            # storage for the columns of As[i][j].

            for i in xrange(M):

                if (type(A[i][j]) is matrix) or (type(A[i][j]) is spmatrix):

                    # As[i][j][:,k] = vec(
                    #     (U[j]' * mat( A[i][j][:,k] ) * U[j]) .* Gamma[j])

                    copy(A[i][j], As[i][j])
                    As[i][j] = matrix(As[i][j])
                    for k in xrange(ms[i]):
                        cngrnc(U[j],
                               As[i][j],
                               trans='T',
                               offsetx=k * (ns[j]**2),
                               n=ns[j])
                        blas.tbmv(Gamma[j],
                                  As[i][j],
                                  n=ns[j]**2,
                                  k=0,
                                  ldA=1,
                                  offsetx=k * (ns[j]**2))

                    # pack As[i][j] in place
                    #pack_ip(As[i][j], ns[j])
                    for k in xrange(As[i][j].size[1]):
                        tmp = +As[i][j][:, k]
                        misc.pack2(tmp, {'l': 0, 'q': [], 's': [ns[j]]})
                        As[i][j][:, k] = tmp

                else:
                    As[i][j] = 0.0

        # H is an m times m block matrix with i, k block
        #
        #      Hik = sum_j As[i,j]' * As[k,j]
        #
        # of size ms[i] x ms[k].  Hik = 0 if As[i,j] or As[k,j]
        # are zero for all j

        H = spmatrix([], [], [], (sum(ms), sum(ms)))
        rowid = 0
        for i in xrange(M):
            colid = 0
            for k in xrange(i + 1):
                sparse_block = True
                Hik = matrix(0.0, (ms[i], ms[k]))
                for j in xrange(N):
                    if (type(As[i][j]) is matrix) and \
                        (type(As[k][j]) is matrix):
                        sparse_block = False
                        # Hik += As[i,j]' * As[k,j]
                        if i == k:
                            blas.syrk(As[i][j],
                                      Hik,
                                      trans='T',
                                      beta=1.0,
                                      k=ns[j] * (ns[j] + 1) / 2,
                                      ldA=ns[j]**2)
                        else:
                            blas.gemm(As[i][j],
                                      As[k][j],
                                      Hik,
                                      transA='T',
                                      beta=1.0,
                                      k=ns[j] * (ns[j] + 1) / 2,
                                      ldA=ns[j]**2,
                                      ldB=ns[j]**2)
                if not (sparse_block):
                    H[rowid:rowid+ms[i], colid:colid+ms[k]] \
                        = sparse(Hik)
                colid += ms[k]
            rowid += ms[i]

        HF = cholmod.symbolic(H)
        cholmod.numeric(H, HF)

        def solve(x, y, z):
            """
            Returns solution of 

                rho * ux + A'(uy) - r^-T * uz * r^-1 = bx
                A(ux)                                = by
                -ux               - r * uz * r'      = bz.

            On entry, x = bx, y = by, z = bz.
            On exit, x = ux, y = uy, z = uz.
            """

            # bz is a copy of z in the format of x
            blas.copy(z, bz)
            # x := x + rho * bz
            #    = bx + rho * bz
            blas.axpy(bz, x, alpha=rho)

            # x := Gamma .* (u' * x * u)
            #    = Gamma .* (u' * (bx + rho * bz) * u)
            offsetj = 0
            for j in xrange(N):
                cngrnc(U[j], x, trans='T', offsetx=offsetj, n=ns[j])
                blas.tbmv(Gamma[j], x, n=ns[j]**2, k=0, ldA=1, offsetx=offsetj)
                offsetj += ns[j]**2

            # y := y - As(x)
            #   := by - As( Gamma .* u' * (bx + rho * bz) * u)

            blas.copy(x, xp)

            offsetj = 0
            for j in xrange(N):
                misc.pack2(xp, {'l': offsetj, 'q': [], 's': [ns[j]]})
                offsetj += ns[j]**2

            offseti = 0
            for i in xrange(M):
                offsetj = 0
                for j in xrange(N):
                    if type(As[i][j]) is matrix:
                        blas.gemv(As[i][j],
                                  xp,
                                  y,
                                  trans='T',
                                  alpha=-1.0,
                                  beta=1.0,
                                  m=ns[j] * (ns[j] + 1) / 2,
                                  n=ms[i],
                                  ldA=ns[j]**2,
                                  offsetx=offsetj,
                                  offsety=offseti)
                    offsetj += ns[j]**2
                offseti += ms[i]
            # y := -y - A(bz)
            #    = -by - A(bz) + As(Gamma .*  (u' * (bx + rho * bz) * u)

            Af(bz, y, alpha=-1.0, beta=-1.0)

            # y := H^-1 * y
            #    = H^-1 ( -by - A(bz) + As(Gamma.* u'*(bx + rho*bz)*u) )
            #    = uy

            cholmod.solve(HF, y)

            # bz = Vt' * vz * Vt
            #    = uz where
            # vz := Gamma .* ( As'(uy)  - x )
            #     = Gamma .* ( As'(uy)  - Gamma .* (u'*(bx + rho *bz)*u) )
            #     = Gamma.^2 .* ( u' * (A'(uy) - bx - rho * bz) * u ).
            blas.copy(x, xp)

            offsetj = 0
            for j in xrange(N):

                # xp is -x[j] = -Gamma .* (u' * (bx + rho*bz) * u)
                # in packed storage
                misc.pack2(xp, {'l': offsetj, 'q': [], 's': [ns[j]]})
                offsetj += ns[j]**2
            blas.scal(-1.0, xp)

            offsetj = 0
            for j in xrange(N):
                # xp +=  As'(uy)

                offseti = 0
                for i in xrange(M):
                    if type(As[i][j]) is matrix:
                        blas.gemv(As[i][j], y, xp, alpha = 1.0,
                             beta = 1.0, m = ns[j]*(ns[j]+1)/2, \
                                n = ms[i],ldA = ns[j]**2, \
                                offsetx = offseti, offsety = offsetj)
                    offseti += ms[i]

                # bz[j] is xp unpacked and multiplied with Gamma
                #unpack(xp, bz[j], ns[j])

                misc.unpack(xp,
                            bz, {
                                'l': 0,
                                'q': [],
                                's': [ns[j]]
                            },
                            offsetx=offsetj,
                            offsety=offsetj)

                blas.tbmv(Gamma[j],
                          bz,
                          n=ns[j]**2,
                          k=0,
                          ldA=1,
                          offsetx=offsetj)

                # bz = Vt' * bz * Vt
                #    = uz

                cngrnc(Vt[j], bz, trans='T', offsetx=offsetj, n=ns[j])
                symmetrize(bz, ns[j], offset=offsetj)
                offsetj += ns[j]**2

            # x = -bz - r * uz * r'
            blas.copy(z, x)
            blas.copy(bz, z)
            offsetj = 0
            for j in xrange(N):
                cngrnc(+W['r'][j], bz, offsetx=offsetj, n=ns[j])
                offsetj += ns[j]**2
            blas.axpy(bz, x)
            blas.scal(-1.0, x)

        return solve

    sol = solvers.coneqp(Pf, q, Gf, h, dims, Af, b, kktsolver=F, xdot=xdot)

    proxargs['X'][:] = +sol['s'][:]
    proxargs['dualy'][:] = +sol['y'][:]
    proxargs['dualS'][:] = +sol['z'][:]

    primal = blas.dot(proxargs['C'], proxargs['X'])
    gap = sol['gap']
    return primal, gap