示例#1
0
        def f(x, y, z):

            # z := - W**-T * z 
            z[:n] = -div( z[:n], d1 )
            z[n:2*n] = -div( z[n:2*n], d2 )
            z[2*n:] -= 2.0*v*( v[0]*z[2*n] - blas.dot(v[1:], z[2*n+1:]) ) 
            z[2*n+1:] *= -1.0
            z[2*n:] /= beta

            # x := x - G' * W**-1 * z
            x[:n] -= div(z[:n], d1) - div(z[n:2*n], d2) + As.T * z[-(m+1):]
            x[n:] += div(z[:n], d1) + div(z[n:2*n], d2) 

            # Solve for x[:n]:
            #
            #    S*x[:n] = x[:n] - (W1**2 - W2**2)(W1**2 + W2**2)^-1 * x[n:]
            
            x[:n] -= mul( div(d1**2 - d2**2, d1**2 + d2**2), x[n:]) 
            lapack.potrs(S, x)
            
            # Solve for x[n:]:
            #
            #    (d1**-2 + d2**-2) * x[n:] = x[n:] + (d1**-2 - d2**-2)*x[:n]
             
            x[n:] += mul( d1**-2 - d2**-2, x[:n])
            x[n:] = div( x[n:], d1**-2 + d2**-2)

            # z := z + W^-T * G*x 
            z[:n] += div( x[:n] - x[n:2*n], d1) 
            z[n:2*n] += div( -x[:n] - x[n:2*n], d2) 
            z[2*n:] += As*x[:n]
示例#2
0
def acent(A,b):
    """  
    Computes analytic center of A*x <= b with A m by n of rank n. 
    We assume that b > 0 and the feasible set is bounded.
    """

    MAXITERS = 100
    ALPHA = 0.01
    BETA = 0.5
    TOL = 1e-8

    ntdecrs = []
    m, n = A.size
    x = matrix(0.0, (n,1))
    H = matrix(0.0, (n,n))

    for iter in range(MAXITERS):
        
        # Gradient is g = A^T * (1./(b-A*x)).
        d = (b-A*x)**-1
        g = A.T * d

        # Hessian is H = A^T * diag(1./(b-A*x))^2 * A.
        Asc = mul( d[:,n*[0]], A)
        blas.syrk(Asc, H, trans='T')

        # Newton step is v = H^-1 * g.
        v = -g
        lapack.posv(H, v)

        # Directional derivative and Newton decrement.
        lam = blas.dot(g, v)
        ntdecrs += [ sqrt(-lam) ]
        print("%2d.  Newton decr. = %3.3e" %(iter,ntdecrs[-1]))
        if ntdecrs[-1] < TOL: return x, ntdecrs

        # Backtracking line search.
        y = mul(A*v, d)
        step = 1.0
        while 1-step*max(y) < 0: step *= BETA 
        while True:
            if -sum(log(1-step*y)) < ALPHA*step*lam: break
            step *= BETA
        x += step*v
示例#3
0
n = 4
S = matrix([[4e-2, 6e-3, -4e-3, 0.0], [6e-3, 1e-2, 0.0, 0.0],
            [-4e-3, 0.0, 2.5e-3, 0.0], [0.0, 0.0, 0.0, 0.0]])
pbar = matrix([.12, .10, .07, .03])

G = matrix(0.0, (n, n))
G[::n + 1] = -1.0
h = matrix(0.0, (n, 1))
A = matrix(1.0, (1, n))
b = matrix(1.0)

N = 100
mus = [10**(5.0 * t / N - 1.0) for t in range(N)]
options['show_progress'] = False
xs = [qp(mu * S, -pbar, G, h, A, b)['x'] for mu in mus]
returns = [dot(pbar, x) for x in xs]
risks = [sqrt(dot(x, S * x)) for x in xs]

try:
    import pylab
except ImportError:
    pass
else:
    pylab.figure(1, facecolor='w')
    pylab.plot(risks, returns)
    pylab.xlabel('standard deviation')
    pylab.ylabel('expected return')
    pylab.axis([0, 0.2, 0, 0.15])
    pylab.title('Risk-return trade-off curve (fig 4.12)')
    pylab.yticks([0.00, 0.05, 0.10, 0.15])
示例#4
0
def covsel(Y):
    """
    Returns the solution of
 
        minimize    -log det K + tr(KY)
        subject to  K_ij = 0  if (i,j) not in zip(I, J).

    Y is a symmetric sparse matrix with nonzero diagonal elements.
    I = Y.I,  J = Y.J.
    """

    cholmod.options['supernodal'] = 2

    I, J = Y.I, Y.J
    n, m = Y.size[0], len(I) 
    # non-zero positions for one-argument indexing 
    N = I + J*n         
    # position of diagonal elements
    D = [ k for k in range(m) if I[k]==J[k] ]  

    # starting point: symmetric identity with nonzero pattern I,J
    K = spmatrix(0.0, I, J) 
    K[::n+1] = 1.0

    # Kn is used in the line search
    Kn = spmatrix(0.0, I, J)

    # symbolic factorization of K 
    F = cholmod.symbolic(K)

    # Kinv will be the inverse of K
    Kinv = matrix(0.0, (n,n))

    for iters in range(100):

        # numeric factorization of K
        cholmod.numeric(K, F)
        d = cholmod.diag(F)

        # compute Kinv by solving K*X = I 
        Kinv[:] = 0.0
        Kinv[::n+1] = 1.0
        cholmod.solve(F, Kinv)
        
        # solve Newton system
        grad = 2 * (Y.V - Kinv[N])
        hess = 2 * ( mul(Kinv[I,J], Kinv[J,I]) + 
               mul(Kinv[I,I], Kinv[J,J]) )
        v = -grad
        lapack.posv(hess,v) 
                                                  
        # stopping criterion
        sqntdecr = -blas.dot(grad,v) 
        print("Newton decrement squared:%- 7.5e" %sqntdecr)
        if (sqntdecr < 1e-12):
            print("number of iterations: %d" %(iters+1))
            break

        # line search
        dx = +v
        dx[D] *= 2      
        f = -2.0*sum(log(d))      # f = -log det K
        s = 1
        for lsiter in range(50):
            Kn.V = K.V + s*dx
            try: 
                cholmod.numeric(Kn, F)
            except ArithmeticError: 
                s *= 0.5
            else:
                d = cholmod.diag(F)
                fn = -2.0 * sum(log(d)) + 2*s*blas.dot(v,Y.V)
                if (fn < f - 0.01*s*sqntdecr): break
                else: s *= 0.5

        K.V = Kn.V

    return K
示例#5
0
    def F(x=None, z=None):
        if x is None: return 0, matrix(1.0, (n, 1))
        if min(x) <= 0.0: return None
        f = -sum(log(x))
        Df = -(x**-1).T
        if z is None: return matrix(f), Df
        H = spdiag(z[0] * x**-2)
        return f, Df, H

    return solvers.cp(F, A=A, b=b)['x']


# Randomly generate a feasible problem

m, n = 50, 500
y = normal(m, 1)

# Random A with A'*y > 0.
s = uniform(n, 1)
A = normal(m, n)
r = s - A.T * y
# A = A - (1/y'*y) * y*r'
blas.ger(y, r, A, alpha=1.0 / blas.dot(y, y))

# Random feasible x > 0.
x = uniform(n, 1)
b = A * x

x = acent(A, b)