示例#1
0
def test():
    m, n = 100, 1000
    setseed()
    A = normal(m, n)
    b = normal(m)
    x = l1regls(A, b)
    return x
示例#2
0
    def get_GW_cut(self, graph):
        ########################################################################
        # RETURNS AVERAGE GEOMANNS WILLIAMSON CUT FOR A GIVEN GRAPH
        ########################################################################

        G = graph
        N = len(G.nodes())

        # Allocate weights to the edges.
        for (i, j) in G.edges():
            G[i][j]['weight'] = 1.0

        maxcut = pic.Problem()

        # Add the symmetric matrix variable.
        X = maxcut.add_variable('X', (N, N), 'symmetric')

        # Retrieve the Laplacian of the graph.
        LL = 1 / 4. * nx.laplacian_matrix(G).todense()
        L = pic.new_param('L', LL)

        # Constrain X to have ones on the diagonal.
        maxcut.add_constraint(pic.tools.diag_vect(X) == 1)

        # Constrain X to be positive semidefinite.
        maxcut.add_constraint(X >> 0)

        # Set the objective.
        maxcut.set_objective('max', L | X)

        # Solve the problem.
        maxcut.solve(verbose=0, solver='cvxopt')

        # Use a fixed RNG seed so the result is reproducable.
        cvx.setseed(1)

        # Perform a Cholesky factorization.
        V = X.value
        cvxopt.lapack.potrf(V)
        for i in range(N):
            for j in range(i + 1, N):
                V[i, j] = 0

        # Do up to 100 projections. Stop if we are within a factor 0.878 of the SDP
        # optimal value.
        count = 0
        obj_sdp = maxcut.obj_value()
        obj = 0
        while (obj < 0.878 * obj_sdp):
            r = cvx.normal(N, 1)
            x = cvx.matrix(np.sign(V * r))
            o = (x.T * L * x).value
            if o > obj:
                x_cut = x
                obj = o
            count += 1
        x = x_cut

        return obj, count
示例#3
0
 def test_l1(self):
     setseed(100)
     m, n = 500, 250
     P = normal(m, n)
     q = normal(m, 1)
     u1 = l1(P, q)
     u2 = l1blas(P, q)
     self.assertAlmostEqualLists(list(u1), list(u2), places=3)
示例#4
0
    def test_numpy_scalars(self):
        n = 6
        eps = 1e-6
        cvxopt.setseed(10)
        P0 = cvxopt.normal(n, n)
        eye = cvxopt.spmatrix(1.0, range(n), range(n))
        P0 = P0.T * P0 + eps * eye

        print P0

        P1 = cvxopt.normal(n, n)
        P1 = P1.T*P1
        P2 = cvxopt.normal(n, n)
        P2 = P2.T*P2
        P3 = cvxopt.normal(n, n)
        P3 = P3.T*P3

        q0 = cvxopt.normal(n, 1)
        q1 = cvxopt.normal(n, 1)
        q2 = cvxopt.normal(n, 1)
        q3 = cvxopt.normal(n, 1)

        r0 = cvxopt.normal(1, 1)
        r1 = cvxopt.normal(1, 1)
        r2 = cvxopt.normal(1, 1)
        r3 = cvxopt.normal(1, 1)

        slack = Variable()
        # Form the problem
        x = Variable(n)
        objective = Minimize( 0.5*quad_form(x,P0) + q0.T*x + r0 + slack)
        constraints = [0.5*quad_form(x,P1) + q1.T*x + r1 <= slack,
                       0.5*quad_form(x,P2) + q2.T*x + r2 <= slack,
                       0.5*quad_form(x,P3) + q3.T*x + r3 <= slack,
        ]

        # We now find the primal result and compare it to the dual result
        # to check if strong duality holds i.e. the duality gap is effectively zero
        p = Problem(objective, constraints)
        primal_result = p.solve(solver=SCS_MAT_FREE, verbose=True,
                   equil_steps=1, max_iters=5000, equil_p=2,
                   stoch=True, samples=10,
                   precond=True)

        # Note that since our data is random, we may need to run this program multiple times to get a feasible primal
        # When feasible, we can print out the following values
        print x.value # solution
        lam1 = constraints[0].dual_value
        lam2 = constraints[1].dual_value
        lam3 = constraints[2].dual_value
        print type(lam1)

        P_lam = P0 + lam1*P1 + lam2*P2 + lam3*P3
        q_lam = q0 + lam1*q1 + lam2*q2 + lam3*q3
        r_lam = r0 + lam1*r1 + lam2*r2 + lam3*r3
        dual_result = -0.5*q_lam.T.dot(P_lam).dot(q_lam) + r_lam
        print dual_result.shape
        self.assertEquals(intf.size(dual_result), (1,1))
示例#5
0
    def test_numpy_scalars(self):
        n = 6
        eps = 1e-6
        cvxopt.setseed(10)
        P0 = cvxopt.normal(n, n)
        eye = cvxopt.spmatrix(1.0, range(n), range(n))
        P0 = P0.T * P0 + eps * eye

        print P0

        P1 = cvxopt.normal(n, n)
        P1 = P1.T * P1
        P2 = cvxopt.normal(n, n)
        P2 = P2.T * P2
        P3 = cvxopt.normal(n, n)
        P3 = P3.T * P3

        q0 = cvxopt.normal(n, 1)
        q1 = cvxopt.normal(n, 1)
        q2 = cvxopt.normal(n, 1)
        q3 = cvxopt.normal(n, 1)

        r0 = cvxopt.normal(1, 1)
        r1 = cvxopt.normal(1, 1)
        r2 = cvxopt.normal(1, 1)
        r3 = cvxopt.normal(1, 1)

        slack = cp.Variable()
        # Form the problem
        x = cp.Variable(n)
        objective = cp.Minimize(0.5 * cp.quad_form(x, P0) + q0.T * x + r0 +
                                slack)
        constraints = [
            0.5 * cp.quad_form(x, P1) + q1.T * x + r1 <= slack,
            0.5 * cp.quad_form(x, P2) + q2.T * x + r2 <= slack,
            0.5 * cp.quad_form(x, P3) + q3.T * x + r3 <= slack,
        ]

        # We now find the primal result and compare it to the dual result
        # to check if strong duality holds i.e. the duality gap is effectively zero
        p = cp.Problem(objective, constraints)
        primal_result = p.solve()

        # Note that since our data is random, we may need to run this program multiple times to get a feasible primal
        # When feasible, we can print out the following values
        print x.value  # solution
        lam1 = constraints[0].dual_value
        lam2 = constraints[1].dual_value
        lam3 = constraints[2].dual_value
        print type(lam1)

        P_lam = P0 + lam1 * P1 + lam2 * P2 + lam3 * P3
        q_lam = q0 + lam1 * q1 + lam2 * q2 + lam3 * q3
        r_lam = r0 + lam1 * r1 + lam2 * r2 + lam3 * r3
        dual_result = -0.5 * q_lam.T.dot(P_lam).dot(q_lam) + r_lam
        print dual_result.shape
        self.assertEquals(intf.size(dual_result), (1, 1))
示例#6
0
def num(n=64,m=64,s=5,seed=3,opt='random',func='admit',tol=.01):
  '''
  Constructs a network utility maximization problem
  as a sigmoidal program
  
  maximize sum_i f_i(x_i)
  st       Ax <= c, 
           0<=x
           
  matrix A \in \reals^{m \times n} has s entries per row, on average
  capacities c are s/2 for every edge
  
  opt controls how the matrix A is chosen
      'ring': ring topology (m=n)
      'local': (m=n) prob 1/2 that flow i uses edge j for j \in [i,i+2s], 0 else
      'random': prob s/m that flow i uses edge j
  
  func controls how the function f is chosen
      'admit': admittance function (approximation to step function 
               with step of size 1 at x=1)
      'quadratic': f(x) = x^2
  '''
  cvxopt.setseed(seed)
  
  ## Set graph topology
  if opt == 'ring':
    m=n
    A = cvxopt.matrix(([1]*s+[0]*(n-s+1))*(n-1)+[1],(n,n),tc='d')
    c = cvxopt.matrix([s/2]*n,tc='d')
  elif opt == 'local':
    m=n
    probs = ([1]*s+[0]*(n-s+1))*(n-1)+[1]
    A = cvxopt.matrix([round(x*numpy.random.rand()) for x in probs],(n,n),tc='d')
    c = cvxopt.matrix([s/2]*n,tc='d')
    print 'There are',sum(A),'edges used in this NUM problem'
  elif opt == 'random':
    A = cvxopt.matrix([round(.5/(1-float(s)/m)*x) for x in numpy.random.rand(m*n)]
,(m,n),tc='d')
    print 'There are',sum(A),'edges used in this NUM problem'
    c = cvxopt.matrix([s/2]*m,tc='d')
  else:
    raise ValueError('unknown option %s'%opt)
  
  ## Set utility function
  # admission
  if func=='admit':
    fs = [(functions.admit, functions.admit_prime, 1) for i in range(n)]
  # quadratic
  elif func=='quadratic':
    fs = [(lambda x: pow(x,2), lambda x: 2*x, 0) for i in range(n)]
  else:
    raise ValueError('unknown function %s'%func)
  l = [0]*n
  u = [s/2]*n
  tol = tol
  name = 'num_%s_%s_n=%d_m=%d_s=%d'%(opt,func,n,m,s)
  return Problem(l,u,fs,A=A,b=c,tol=tol,name=name)    
示例#7
0
def num(n=64,m=64,s=5,seed=3,opt='random',func='admit',tol=.01):
  '''
  Constructs a network utility maximization problem
  as a sigmoidal program
  
  maximize sum_i f_i(x_i)
  st       Ax <= c, 
           0<=x
           
  matrix A \in \reals^{m \times n} has s entries per row, on average
  capacities c are s/2 for every edge
  
  opt controls how the matrix A is chosen
      'ring': ring topology (m=n)
      'local': (m=n) prob 1/2 that flow i uses edge j for j \in [i,i+2s], 0 else
      'random': prob s/m that flow i uses edge j
  
  func controls how the function f is chosen
      'admit': admittance function (approximation to step function 
               with step of size 1 at x=1)
      'quadratic': f(x) = x^2
  '''
  cvxopt.setseed(seed)
  
  ## Set graph topology
  if opt == 'ring':
    m=n
    A = cvxopt.matrix(([1]*s+[0]*(n-s+1))*(n-1)+[1],(n,n),tc='d')
    c = cvxopt.matrix([s/2]*n,tc='d')
  elif opt == 'local':
    m=n
    probs = ([1]*s+[0]*(n-s+1))*(n-1)+[1]
    A = cvxopt.matrix([round(x*numpy.random.rand()) for x in probs],(n,n),tc='d')
    c = cvxopt.matrix([s/2]*n,tc='d')
    print 'There are',sum(A),'edges used in this NUM problem'
  elif opt == 'random':
    A = cvxopt.matrix([round(.5/(1-float(s)/m)*x) for x in numpy.random.rand(m*n)]
,(m,n),tc='d')
    print 'There are',sum(A),'edges used in this NUM problem'
    c = cvxopt.matrix([s/2]*m,tc='d')
  else:
    raise ValueError('unknown option %s'%opt)
  
  ## Set utility function
  # admission
  if func=='admit':
    fs = [(functions.admit, functions.admit_prime, 1) for i in range(n)]
  # quadratic
  elif func=='quadratic':
    fs = [(lambda x: pow(x,2), lambda x: 2*x, 0) for i in range(n)]
  else:
    raise ValueError('unknown function %s'%func)
  l = [0]*n
  u = [s/2]*n
  tol = tol
  name = 'num_%s_%s_n=%d_m=%d_s=%d'%(opt,func,n,m,s)
  return Problem(l,u,fs,A=A,b=c,tol=tol,name=name)    
示例#8
0
 def test_l1(self):
     from cvxopt import normal, setseed
     import l1
     setseed(100)
     m,n = 500,250
     P = normal(m,n)
     q = normal(m,1)
     u1 = l1.l1(P,q)
     u2 = l1.l1blas(P,q)
     self.assertAlmostEqualLists(list(u1),list(u2),places=3)
示例#9
0
 def test_l1(self):
     from cvxopt import normal, setseed
     import l1
     setseed(100)
     m, n = 500, 250
     P = normal(m, n)
     q = normal(m, 1)
     u1 = l1.l1(P, q)
     u2 = l1.l1blas(P, q)
     self.assertAlmostEqualLists(list(u1), list(u2), places=3)
示例#10
0
 def test_l1(self):
     setseed(100)
     m,n = 500,250
     P = normal(m,n)
     q = normal(m,1)
     u1,st1 = l1(P,q)
     u2,st2 = l1blas(P,q)
     self.assertTrue(st1 == 'optimal')
     self.assertTrue(st2 == 'optimal')
     self.assertAlmostEqualLists(list(u1),list(u2),places=3)
示例#11
0
    def test_l1regls(self):
        setseed(100)
        m,n = 250,500
        A = normal(m,n)
        b = normal(m,1)

        x,st = l1regls(A,b)
        self.assertTrue(st == 'optimal')
        # Check optimality conditions (list should be empty, e.g., False)
        self.assertFalse([t for t in zip(A.T*(A*x-b),x) if abs(t[1])>1e-6 and abs(t[0]) > 1.0])
示例#12
0
    def test_l1regls(self):
        from cvxopt import normal, setseed
        import l1regls
        setseed(100)
        m,n = 250,500
        A = normal(m,n)
        b = normal(m,1)

        x = l1regls.l1regls(A,b)
        # Check optimality conditions (list should be empty, e.g., False)
        self.assertFalse([t for t in zip(A.T*(A*x-b),x) if abs(t[1])>1e-6 and abs(t[0]) > 1.0])
示例#13
0
    def test_l1regls(self):
        setseed(100)
        m, n = 250, 500
        A = normal(m, n)
        b = normal(m, 1)

        x = l1regls(A, b)
        # Check optimality conditions (list should be empty, e.g., False)
        self.assertFalse([
            t for t in zip(A.T * (A * x - b), x)
            if abs(t[1]) > 1e-6 and abs(t[0]) > 1.0
        ])
示例#14
0
    def setUp(self):
        """
        Use cvxopt to get ground truth values
        """

        from cvxopt import lapack, solvers, matrix, spdiag, log, div, normal, setseed
        from cvxopt.modeling import variable, op, max, sum
        solvers.options['show_progress'] = 0

        setseed()
        m, n = 100, 30
        A = normal(m, n)
        b = normal(m, 1)
        b /= (1.1 * max(abs(b)))
        self.m, self.n, self.A, self.b = m, n, A, b

        # l1 approximation
        # minimize || A*x + b ||_1
        x = variable(n)
        op(sum(abs(A * x + b))).solve()
        self.x1 = x.value

        # l2 approximation
        # minimize || A*x + b ||_2
        bprime = -matrix(b)
        Aprime = matrix(A)
        lapack.gels(Aprime, bprime)
        self.x2 = bprime[:n]

        # Deadzone approximation
        # minimize sum(max(abs(A*x+b)-0.5, 0.0))
        x = variable(n)
        dzop = op(sum(max(abs(A * x + b) - 0.5, 0.0)))
        dzop.solve()
        self.obj_dz = sum(
            np.max([np.abs(A * x.value + b) - 0.5,
                    np.zeros((m, 1))], axis=0))

        # Log barrier
        # minimize -sum (log ( 1.0 - (A*x+b)**2))
        def F(x=None, z=None):
            if x is None: return 0, matrix(0.0, (n, 1))
            y = A * x + b
            if max(abs(y)) >= 1.0: return None
            f = -sum(log(1.0 - y**2))
            gradf = 2.0 * A.T * div(y, 1 - y**2)
            if z is None: return f, gradf.T
            H = A.T * spdiag(2.0 * z[0] * div(1.0 + y**2, (1.0 - y**2)**2)) * A
            return f, gradf.T, H

        self.cxlb = solvers.cp(F)['x']
    def setUp(self):
        """
        Use cvxopt to get ground truth values
        """

        from cvxopt import lapack,solvers,matrix,spdiag,log,div,normal,setseed
        from cvxopt.modeling import variable,op,max,sum
        solvers.options['show_progress'] = 0

        setseed()
        m,n = 100,30
        A = normal(m,n)
        b = normal(m,1)
        b /= (1.1*max(abs(b)))
        self.m,self.n,self.A,self.b = m,n,A,b

        # l1 approximation
        # minimize || A*x + b ||_1
        x = variable(n)
        op(sum(abs(A*x+b))).solve()
        self.x1 = x.value

        # l2 approximation
        # minimize || A*x + b ||_2
        bprime = -matrix(b)
        Aprime = matrix(A)
        lapack.gels(Aprime,bprime)
        self.x2 = bprime[:n]

        # Deadzone approximation
        # minimize sum(max(abs(A*x+b)-0.5, 0.0))
        x = variable(n)
        dzop = op(sum(max(abs(A*x+b)-0.5, 0.0)))
        dzop.solve()
        self.obj_dz = sum(np.max([np.abs(A*x.value+b)-0.5,np.zeros((m,1))],axis=0))

        # Log barrier
        # minimize -sum (log ( 1.0 - (A*x+b)**2))
        def F(x=None, z=None):
            if x is None: return 0, matrix(0.0,(n,1))
            y = A*x+b
            if max(abs(y)) >= 1.0: return None
            f = -sum(log(1.0 - y**2))
            gradf = 2.0 * A.T * div(y, 1-y**2)
            if z is None: return f, gradf.T
            H = A.T * spdiag(2.0*z[0]*div(1.0+y**2,(1.0-y**2)**2))*A
            return f,gradf.T,H
        self.cxlb = solvers.cp(F)['x']
示例#16
0
def test_minres():

    setseed(2)
    n=35
    G=matrix(np.eye(n), tc='d')
    for jj in range(5):
        gg=normal(n,1)
        hh=gg*gg.T
        G+=(hh+hh.T)*0.5
        G+=normal(n,1)*normal(1,n)

    G=(G+G.T)/2

    b=normal(n,1)


    svx=+b
    gesv(G,svx)
    
    tol=1e-10
    show=False
    maxit=None
    t1=t.time()

    # Create a MINRES class
    m = MINRES(G,b)

    m.option['show'] = show
    m.option['rtol'] = tol

    m.solve()

    mg=max(G-G.T)
    if mg>1e-14:sym='No'
    else: sym='Yes'
    alg='MINRES'

    print alg
    print "Is linear operator symmetric? (Symmetry is required) " + sym
    print "n: %3g  iterations:   %3g" % (n, m.iter)
    print " norms computed in ", alg
    print " ||x||  %9.4e  ||r|| %9.4e " %( nrm2(m.x), nrm2(G*m.x -m.b))
示例#17
0
    def test_case3(self):
        m, n = 500, 100
        setseed(100)
        A = normal(m,n)
        b = normal(m)

        x1 = variable(n)
        lp1 = op(max(abs(A*x1-b)))
        lp1.solve()
        self.assertTrue(lp1.status == 'optimal')

        x2 = variable(n)
        lp2 = op(sum(abs(A*x2-b)))
        lp2.solve()
        self.assertTrue(lp2.status == 'optimal')

        x3 = variable(n)
        lp3 = op(sum(max(0, abs(A*x3-b)-0.75, 2*abs(A*x3-b)-2.25)))
        lp3.solve()
        self.assertTrue(lp3.status == 'optimal')
示例#18
0
def test_minres():

    setseed(2)
    n = 35
    G = matrix(np.eye(n), tc='d')
    for jj in range(5):
        gg = normal(n, 1)
        hh = gg * gg.T
        G += (hh + hh.T) * 0.5
        G += normal(n, 1) * normal(1, n)

    G = (G + G.T) / 2

    b = normal(n, 1)

    svx = +b
    gesv(G, svx)

    tol = 1e-10
    show = False
    maxit = None
    t1 = t.time()

    # Create a MINRES class
    m = MINRES(G, b)

    m.option['show'] = show
    m.option['rtol'] = tol

    m.solve()

    mg = max(G - G.T)
    if mg > 1e-14: sym = 'No'
    else: sym = 'Yes'
    alg = 'MINRES'

    print alg
    print "Is linear operator symmetric? (Symmetry is required) " + sym
    print "n: %3g  iterations:   %3g" % (n, m.iter)
    print " norms computed in ", alg
    print " ||x||  %9.4e  ||r|| %9.4e " % (nrm2(m.x), nrm2(G * m.x - m.b))
示例#19
0
    def test_case3(self):
        m, n = 500, 100
        setseed(100)
        A = normal(m, n)
        b = normal(m)

        x1 = variable(n)
        lp1 = op(max(abs(A * x1 - b)))
        lp1.solve()
        self.assertTrue(lp1.status == 'optimal')

        x2 = variable(n)
        lp2 = op(sum(abs(A * x2 - b)))
        lp2.solve()
        self.assertTrue(lp2.status == 'optimal')

        x3 = variable(n)
        lp3 = op(
            sum(max(0,
                    abs(A * x3 - b) - 0.75, 2 * abs(A * x3 - b) - 2.25)))
        lp3.solve()
        self.assertTrue(lp3.status == 'optimal')
示例#20
0
文件: robls.py 项目: cvxopt/cvxopt
    m, n = A.size
    def F(x=None, z=None):
        if x is None: return 0, matrix(0.0, (n,1))
        y = A*x-b
        w = sqrt(rho + y**2)
        f = sum(w)
        Df = div(y, w).T * A 
        if z is None: return f, Df 
        H = A.T * spdiag(z[0]*rho*(w**-3)) * A
        return f, Df, H

    return solvers.cp(F)['x']


setseed()
m, n  = 500, 100
A = normal(m,n)
b = normal(m,1)
xh = robls(A,b,0.1)

try: import pylab
except ImportError: pass
else:

    # Least-squares solution.
    pylab.subplot(211)
    xls = +b
    lapack.gels(+A,xls)
    rls =  A*xls[:n] - b
    pylab.hist(list(rls), m//5)
示例#21
0
from cvxpy import *
from mixed_integer import *
import cvxopt

# Feature selection on a linear kernel SVM classifier.
# Uses the Alternating Direction Method of Multipliers
# with a (non-convex) cardinality constraint.

# Generate data.
cvxopt.setseed(1)
N = 50
M = 40
n = 10
data = []
for i in range(N):
    data += [(1, cvxopt.normal(n, mean=1.0, std=2.0))]
for i in range(M):
    data += [(-1, cvxopt.normal(n, mean=-1.0, std=2.0))]

# Construct problem.
gamma = Parameter(sign="positive")
gamma.value = 0.1
# 'a' is a variable constrained to have at most 6 non-zero entries.
a = SparseVar(n, nonzeros=6)
b = Variable()

slack = [pos(1 - label * (sample.T * a - b)) for (label, sample) in data]
objective = Minimize(norm2(a) + gamma * sum(slack))
p = Problem(objective)
# Extensions can attach new solve methods to the CVXPY Problem class.
p.solve(method="admm")
示例#22
0
maxcut.add_constraint(pic.tools.diag_vect(X) == 1)

# Constrain X to be positive semidefinite.
maxcut.add_constraint(X >> 0)

# Set the objective.
maxcut.set_objective('max', L | X)

#print(maxcut)

# Solve the problem.
maxcut.solve(verbose=0, solver='cvxopt')

#print('bound from the SDP relaxation: {0}'.format(maxcut.obj_value()))
# Use a fixed RNG seed so the result is reproducable.
cvx.setseed(1)

# Perform a Cholesky factorization.
V = X.value
cvxopt.lapack.potrf(V)
for i in range(N):
    for j in range(i + 1, N):
        V[i, j] = 0

# Do up to 100 projections. Stop if we are within a factor 0.878 of the SDP
# optimal value.
count = 0
obj_sdp = maxcut.obj_value()
obj = 0
while (count < 100 or obj < 0.878 * obj_sdp):
    r = cvx.normal(20, 1)
示例#23
0
    def run(self, lam=10.0, mu=0.0, eps=0.0, s0_val=0.001, verbose=False):
        G_tmp, h_tmp = get_G_h(self.var_No)
        h_eps = matrix(0.0, (self.G_No,1))
        G_x=[]; G_i=[]; G_j=[]
        for G_name in self.SG:
            G = self.SG.node[G_name]
            if G['type'] == 'G':
                g_cnt = G['cnt']
                h_eps[g_cnt] = G['intensity'] + eps
                for I_name in self.SG[G_name]:
                    i_cnt = self.SG.node[I_name]['cnt']
                    G_x.append(1.0)
                    G_i.append(g_cnt)
                    G_j.append(i_cnt)
                G_x.append(-1.0)
                G_i.append(g_cnt)
                G_j.append(self.GI_No + self.M_No + g_cnt)
        G_tmp2 = spmatrix( G_x, G_i, G_j, size=( self.G_No, self.var_No ) )
        G = sparse([G_tmp, G_tmp2])
        h = matrix([h_tmp, h_eps])

        A_tmp, b = get_A_b(self.SG, self.M_No, self.I_No, self.GI_No)
        A_eps = spmatrix([],[],[], (self.I_No, self.G_No))
        A = sparse([[A_tmp],[A_eps]])

        x0 = get_initvals(self.var_No)
        x0['s'] = matrix(s0_val, size=h.size)

        c = matrix([matrix( -1.0,       size=(self.GI_No,1) ),
                    matrix( mu,         size=(self.M_No,1)  ),
                    matrix( (1.0+lam),  size=(self.G_No,1)  )   ])

        setseed(randint(0,1000000))
            # the BLAS library performs calculations in parallel
            # and their order depends upon the seed.
            # It's possible to run the solver with the same input and
            # have different outputs.
            # Some of them have the optimal flag.
            # So, it is a poor man's solution to numeric instability problem.
        self.sol = solvers.conelp(c=c,G=G,h=h,A=A,b=b,primalstart=x0)
        Xopt = self.sol['x']

        alphas = [] # reporting results
        for N_name in self.SG:
            N = self.SG.node[N_name]
            if N['type'] == 'M':
                N['estimate'] = Xopt[self.GI_No + N['cnt']]
                alphas.append(N.copy())
            if N['type'] == 'G':
                N['estimate'] = 0.0
                for I_name in self.SG[N_name]:
                    NI = self.SG.edge[N_name][I_name]
                    assert Xopt[NI['cnt']] > -0.01, "Optimization need to be checked: seriously negative result obtained."
                    estimate = max(Xopt[NI['cnt']], 0.0)
                    NI['estimate'] = Xopt[NI['cnt']]
                    g_cnt = self.GI_No + self.M_No + N['cnt']
                    N['relaxation'] = Xopt[g_cnt]
                    N['estimate'] += Xopt[NI['cnt']]
        res = { 'alphas':   alphas,
                'L1_error': self.get_L1_error(),
                'L2_error': self.get_L2_error(),
                'underestimates': self.get_L1_signed_error(sign=1.0),
                'overestimates':  self.get_L1_signed_error(sign=-1.0),
                'status':sol['status'],
                'SG': self.SG     }
        if verbose:
            res['param']= {'c':c,'G':G,'h':h,'A':A,'b':b,'x0':x0}
            res['sol']  = self.sol
        return res
def blockarrow_problem(p,r,s,arrowwidth,seed=None):

    """
    Generates problem variables A, b, C, L for converted and unconverted
    problems. In the unconverted form, the aggregate sprasity is
    block-diagonal-arrow, and in the converted form, the correlative 
    sparsity is block-diagonal.
    
    ARGUMENTS:
        p : order of each diagonal block
        
        r : number of diagonal blocks
        
        s : number of constraints per clique
        
        arrowwidth : width of arrow in aggregate sparsity pattern
        
        seed : random generator seed
    
    RETURNS: dictionary with following fields
    
        Au, bu, Cu, X0u : unconverted problem parameters and feasible starting
            point X0u. Each is given as a matrix.
                    
        Sp : Aggregate sparsity pattern, represented by binary matrix
    
    """
    def get_blockarrow_index(k,p,r,arrowwidth):
        """
        Macro for getting indices of kth blockarrow clique.
        """
        index = range(k*p,(k+1)*p)
        index.extend(range(r*p,r*p+arrowwidth))
        return index
    
    def get_psd_matrix(p):
        tmp = matrix(normal((p)**2),(p,p))/2.0
        tmp = tmp + tmp.T
        while(1):
            try:
                lapack.potrf(+tmp)
                break
            except:
                tmp = tmp + .1*eye(p)
        return tmp
        
    setseed(seed)
    
    NVar = p*r+arrowwidth
    NCon = s*r
    
    # Generate sparsity pattern
    
    I = []
    J = []
    val = []

    for k in xrange(r):
        I.extend(range(p*k,p*k+p)*(p))
        ji = [[i]*(p) for i in range(p*k,p*k+p)]
        J.extend([i for sublist in ji for i in sublist])
    for k in xrange(r):
        ji = [[i]*(arrowwidth) for i in range(p*k,p*k+p)]
        I.extend(range(r*p,r*p+arrowwidth)*(p))
        J.extend([i for sublist in ji for i in sublist])
        J.extend(range(r*p,r*p+arrowwidth)*(p))
        I.extend([i for sublist in ji for i in sublist])
    k = r
    ji = [[i]*(arrowwidth) for i in range(p*k,p*k+arrowwidth)]
    I.extend(range(r*p,r*p+arrowwidth)*(arrowwidth))
    J.extend([i for sublist in ji for i in sublist])
    
    # Generate X and S
    
    X = spmatrix(0.0,I,J,size=(NVar,NVar))
    S = spmatrix(0.0,I,J,size=(NVar,NVar))
    
    for k in xrange(r):
        index = get_blockarrow_index(k,p,r,arrowwidth)
        tmp = get_psd_matrix(p+arrowwidth)
        X[index,index] = tmp
        tmp = get_psd_matrix(p+arrowwidth)
        S[index,index] = S[index,index] + tmp
    Sp = spmatrix(1,I,J,size=(NVar,NVar))    
    # Generate A and b
    b = matrix(0.0,(NCon,1));
    AI,AJ,AV = [],[],[]
    C = +S
    for k in xrange(r):
        index = get_blockarrow_index(k,p,r,arrowwidth)
        block = []
        for i in xrange(s):
            square = matrix(normal((p+arrowwidth)**2),(p+arrowwidth,p+arrowwidth))/2.0
            square = square + square.T
            C[index,index] = C[index,index] + normal(1)[0]*square
            block.append(square[:].T)
        block = matrix(block).T   
        b[s*k:(k+1)*s] =  block.T*X[index,index][:]
        for i in xrange(s):
            AI.extend([(l+NVar*j)  for j in index for l in index])
            AJ.extend([(s*k+i)] * ((p+arrowwidth)**2))
        AV.extend(list(block[:]))


    A = spmatrix(AV,AI,AJ,size=(NVar**2,NCon))
    return A,b,C,Sp
示例#25
0
from cvxopt import matrix, spmatrix, normal, setseed, blas, lapack, solvers
import nucnrm
import numpy as np

# Solves a randomly generated nuclear norm minimization problem 
#
#    minimize || A(x) + B ||_*
#
# with n variables, and matrices A(x), B of size p x q.

setseed(0)



m, K = 2, 4
p, q, n = 3, 1, 8

U = np.random.randn(m,K)
A = np.zeros((p*q,n))
A[0,1] = U[0,0]
A[0,2] = U[1,0]
A[1,3] = U[0,0]
A[1,4] = U[1,0]
A[2,5] = U[0,0]
A[2,6] = U[1,0]
	
A = matrix(A)
B = matrix(np.zeros((p,q)))

G = np.zeros((1,n))
G[0,0] = U[0,0]
示例#26
0
def update(rule, x, A, b, loss, args, block, iteration):
  f_func = loss.f_func
  g_func = loss.g_func
  h_func = loss.h_func
  lipschitz = loss.lipschitz
  #mipschitz = loss.mipschitz

  # L2 = args["L2"]
  
  block_size = 0 if block is None else block.size
  param_size = x.size

  print("block size: %d, param size: %d" % (block_size, param_size))

  if rule in ["quadraticEg", "Lb"]:    
    """This computes the eigen values of the lipschitz values corresponding to the block"""
    # WE NEED TO DOUBLE CHECK THIS!
    G = g_func(x, A, b, block)
    L_block =loss.Lb_func(x, A, b, block)
    x[block] = x[block] - G / L_block

    return x, args

  elif rule in ["newtonUpperBound", "Hb"]:    

    G = g_func(x, A, b, block)
    H = loss.Hb_func(x, A, b, block)
    d = - np.linalg.pinv(H).dot(G)

    x[block] = x[block] + d

    return x, args

  elif rule == "LA":
    G = g_func(x, A, b, block)
    L_block =loss.Lb_func(x, A, b, block)
    
    Lb = np.max(args["LA_lipschitz"][block])
    
    while True:
      x_new = x.copy()
      x_new[block] = x_new[block] - G / Lb

      RHS = f_func(x,A,b) - (1./(2. * Lb)) * (G**2).sum()
      LHS = f_func(x_new,A,b)
      
      if LHS <= RHS:
        break

      Lb *= 2.

    args["LA_lipschitz"][block] = Lb

    return x_new, args

  # Line Search
  elif rule in ["LS", "LS-full"]:    

    H = h_func(x, A, b, block)

    g = g_func(x, A, b, block)

    f_simple = lambda x: f_func(x, A, b)
    d_func = lambda alpha: (- alpha * Main.solve(H, g))

    alpha = line_search.perform_line_search(x.copy(), g, 
                                block, f_simple, d_func, alpha0=1.0,
                                proj=None)
    print("alpha: %f" % alpha)

    x[block] = x[block] + d_func(alpha)
    return x, args

  elif rule in ["SDDM", "SDDM-full"]:
    if iteration == 0:
      Main.reset_solver()

    reuse_solver = False
    if rule == "SDDM-full":
      if args["loss"] == "ls" and args["L2"] == 0 and args["L1"] == 0:
        # Hessian will be constant for all iterates
        reuse_solver = True

    H = h_func(x, A, b, block)

    if not issymmetric(H):
      print("not symmetric possibly due to numerical issues")
      H = np.tril(H) + np.triu(H.T, 1)

    dd = diagonal_dominance(H)
    if not np.all(dd > 0):
      if np.all(dd == 0):
        # If we are only diagonally dominant, increase the diagonaly slightly
        # so we are positive definite
        H[np.diag_indices_from(H)] += 1e-4
      else:
        print("not SDD")
        # Increase the diagonal by the sum of the absolute values
        # of the corresponding row to make it diagonally dominant
        res = np.sum(np.abs(H), axis=1) - 2*np.abs(np.diag(H))
        res[res < 0] = 0
        H[np.diag_indices_from(H)] += (res * np.where(np.diag(H)>=0, 1, -1))


    if not ismmatrix(H):
      print("not M matrix")
      # Set positive off-diagonal values to 0 since we need an M-matrix
      diag = np.diag(H).copy()
      H[H > 0] = 0; diag[diag < 0] = 0;
      H[np.diag_indices_from(H)] += diag

    g = g_func(x, A, b, block) 

    f_simple = lambda x: f_func(x, A, b)
    d_func = lambda alpha: (alpha * Main.solve_SDDM(H, -g, reuse_solver=reuse_solver))
    alpha = line_search.perform_line_search(x.copy(), g, 
                                block, f_simple, d_func, alpha0=1.0,
                                proj=None)
    print("alpha: %f" % alpha)

    if rule == "SDDM-full":
      x = x + d_func(alpha)
    else:
      x[block] = x[block] + d_func(alpha)

    return x, args

  ### Constrained update rules
  elif rule in ["Lb-NN"]:
    G = g_func(x, A, b, block)
    L_block =loss.Lb_func(x, A, b, block)
    x[block] = x[block] - G / L_block

    x[block] = np.maximum(x[block], 0.)

    return x, args

  elif rule == "TMP-NN":
    L = lipschitz[block]

    grad_list = g_func(x, A, b, block)
    hess_list = h_func(x, A, b, block)

    H = np.zeros((block_size, block_size))
    G = np.zeros(block_size) 

    # The active set is on the bound close to x=0
    active = np.logical_and(x[block] < 1e-4, grad_list > 0)
    work = np.logical_not(active)

    # active
    ai = np.where(active == 1)[0]
    gA = grad_list[active]

    G[ai] = gA / (np.sum(L[active]))
    H[np.ix_(ai, ai)] = np.eye(ai.size)
    # work set
    wi = np.where(work == 1)[0]

    gW = grad_list[work]
    hW = hess_list[work][:, work]

    G[wi] = gW
    H[np.ix_(wi, wi)] = hW

    # Perform Line search
    alpha = 1.0
    
    u_func = lambda alpha: (- alpha * np.dot(Main.solve(H, G)))
    f_simple = lambda x: f_func(x, A, b, assert_nn=0)

    alpha = line_search.perform_line_search(x.copy(), G, 
                              block, f_simple, u_func, alpha0=1.0,
                                proj=lambda x: np.maximum(0, x))


    x[block] = np.maximum(x[block] + u_func(alpha), 0)

    return x, args

  elif rule == "qp-nn":
    cvxopt.setseed(1)
    non_block = np.delete(np.arange(param_size), block)
    k = block.size

    # 0.5*xb ^T (Ab^T Ab) xb + xb^T[Ab^T (Ac xc - b) + lambda*ones(nb)]
    Ab = matrix(A[:, block])
    bb = matrix(A[:, non_block].dot(x[non_block]) - b)

    P = Ab.T*Ab
    q = (Ab.T*bb + args["L1"]*matrix(np.ones(k)))

    G = matrix(-np.eye(k))
    h = matrix(np.zeros(k))
    x_block = np.array(solvers.qp(P=P, q=q, 
                                G=G, h=h, solver = "glpk")['x']).ravel()

    # cvxopt.solvers.options['maxiters'] = 1000
    cvxopt.solvers.options['abstol'] = 1e-16
    cvxopt.solvers.options['reltol'] = 1e-16
    cvxopt.solvers.options['feastol'] = 1e-16
    x[block] = np.maximum(x_block, 0)


    return x, args

  ### BELIEF PROPAGATION ALGORITHMS

  elif rule in ["bpExact", "bpExact-lap"]:
      n_params = x.size
      all_indices = np.arange(n_params)
      
      non_block_indices = np.delete(all_indices, block)
      
      A_bc = A[block][:, non_block_indices]
      A_bb = A[block][:, block]
      
      b_prime = A_bc.dot(x[non_block_indices]) - b[block]

      if rule == "bpExact":
        x[block] = Main.solve(A_bb, -b_prime)
        # are you missing the x[block] + ?
        # Ans:
        # No, this is the exact update of the objective function formulation under
        # Appendix B. Derivation of Block Belief Propagation Update.
      else:
        x[block] = Main.solve_SDDM(A_bb, -b_prime)

      return x, args

  elif rule == "bpExact-lap-full":
      if iteration == 0:
        Main.reset_solver()

      x = Main.solve_SDDM(A, b, reuse_solver=True)
      return x, args


  elif rule == "bpExact-full":
      x = Main.solve(A, b)
      return x, args


  elif rule == "bpGabp":
      A_sub = A[block][:, block]

      ######## ADDED
      _, n_features = A.shape
      all_indices = np.arange(n_features)
      non_block_indices = np.delete(all_indices, block)
      
      A_bc = A[block][:, non_block_indices]
      b_sub = A_bc.dot(x[non_block_indices]) - b[block]
      b_sub = - b_sub
      
      #########
      max_iter = 100
      epsilon = 1e-8
      
      #import pdb; pdb.set_trace()
      P = np.diag(np.diag(A_sub))
      U = np.diag(b_sub / np.diag(A_sub))
      
      n_features = A_sub.shape[0]
      
      # Stage 2 - iterate
      for iteration in range(max_iter):
         # record last round messages for convergence detection
         old_U = U.copy(); 
      
         for i in range(n_features):
             for j in range(n_features):
               
               if (i != j and A_sub[i,j] != 0):
                   # Compute P i\j - line 2
                   p_i_minus_j = np.sum(P[:,i]) - P[j,i]  
                   assert(p_i_minus_j != 0);
              
                   # Compute P ij - line 2
                   P[i,j] = -A_sub[i,j] * A_sub[j,i] / p_i_minus_j;
                   
                   # Compute U i\j - line 2
                   h_i_minus_j = (np.sum(P[:,i] * U[:,i]) - P[j,i]*U[j,i]) / p_i_minus_j;

                   # Compute U ij - line 3
                   U[i,j] = - A_sub[i,j] * h_i_minus_j / P[i,j];
                   #import pdb;pdb.set_trace()
                   
         
         # Stage 3 - convergence detection
         if (np.sum(np.sum((U - old_U)**2)) < epsilon):
               #print 'GABP converged in round %d ' % iteration
               break
       
      # Stage 4 - infer
      Pf = np.zeros(n_features);
      x_tmp = np.zeros(n_features);
      
      for i in range(n_features):
         Pf[i] = np.sum(P[:,i]); 
         x_tmp[i] = np.sum(U[:,i] * P[:,i]) / Pf[i];

      ##### Exact
      #x_exact = block_update(A, b, theta, block)
      x[block] = x_tmp

      
      return x, args

  else:
    print(("update rule %s doesn't exist" % rule))
    raise
def update_Caratheodory(rule, x, A, b, loss, args, block, iteration):
  f_func = loss.f_func
  g_func = loss.g_func
  h_func = loss.h_func
  lipschitz = loss.lipschitz

  block_size = block.size
  param_size = x.size

  if rule in ["quadraticEg", "Lb"]:    
    """This computes the eigen values of the lipschitz values corresponding to the block"""

    G, G_persample = g_func(x, A, b, block)
    L_block = loss.Lb_func(x, A, b, block)
    d = - G / L_block
    factor = 1 / L_block
    w_star, idx_star, _ = recomb_step(G / A.shape[0], G_persample)
    w_star *= A.shape[0]
    x_tm1 = np.copy(x[block])
    iter_ca, x[block] = Caratheodory_Acceleration(A[idx_star[:,None],block], 
                                  b[idx_star], w_star,
                                  h_func(x_tm1, A, b, block),factor,
                                  x_tm1, G, 
                                  args["L2"], args["L1"])

  elif rule in ["newtonUpperBound", "Hb"]:    

    G, G_persample = g_func(x, A, b, block)
    H = loss.Hb_func(x, A, b, block)
    d = - np.linalg.pinv(H).dot(G)
    
    factor = np.linalg.pinv(H)
    w_star, idx_star, _ = recomb_step(G / A.shape[0], G_persample)
    w_star = w_star * A.shape[0]

    x_tm1 = np.copy(x[block])

    iter_ca, x[block] = Caratheodory_Acceleration(A[idx_star[:,None],block],b[idx_star],w_star,
                                  H,factor,
                                  x_tm1, G, 
                                  args["L2"], args["L1"])

  elif rule == "LA":
    G, G_persample = g_func(x, A, b, block)
    L_block =loss.Lb_func(x, A, b, block)
    Lb = np.max(args["LA_lipschitz"][block])
    
    while True:
      x_new = x.copy()
      x_new[block] = x_new[block] - G / Lb

      RHS = f_func(x,A,b) - (1./(2. * Lb)) * (G**2).sum()
      LHS = f_func(x_new,A,b)
      
      if LHS <= RHS:
        break

      Lb *= 2.

    args["LA_lipschitz"][block] = Lb
    
    d = - G / Lb
    factor = 1 / Lb
    w_star, idx_star, _ = recomb_step(G / A.shape[0], G_persample)
    w_star = w_star * A.shape[0]
    x_tm1 = np.copy(x[block])

    iter_ca, x[block] = Caratheodory_Acceleration(A[idx_star[:,None],block],b[idx_star],w_star,
                                  h_func(x_tm1, A, b, block),factor,
                                  x_tm1, G, 
                                  args["L2"], args["L1"])

  ### Constrained update rules
  elif rule in ["Lb-NN"]:
    G, G_persample = g_func(x, A, b, block)
    L_block =loss.Lb_func(x, A, b, block)
    d = - G / L_block
    factor = 1 / L_block
    w_star, idx_star, _ = recomb_step(G / A.shape[0], G_persample)
    w_star = w_star * A.shape[0]

    x_tm1 = np.copy(x[block])

    iter_ca, x[block] = Caratheodory_Acceleration(A[idx_star[:,None],block],b[idx_star],w_star,
                                  h_func(x, A, b, block),factor,
                                  x_tm1, G, 
                                  args["L2"], args["L1"], True)

  elif rule == "TMP-NN":
    L = lipschitz[block]

    grad_list, G_persample = g_func(x, A, b, block)
    hess_list = h_func(x, A, b, block)

    H = np.zeros((block_size, block_size))
    G = np.zeros(block_size) 

    # The active set is on the bound close to x=0
    active = np.logical_and(x[block] < 1e-4, grad_list > 0)
    work = np.logical_not(active)

    # active
    ai = np.where(active == 1)[0]
    gA = grad_list[active]

    G[ai] = gA / (np.sum(L[active]))
    H[np.ix_(ai, ai)] = np.eye(ai.size)
    # work set
    wi = np.where(work == 1)[0]

    gW = grad_list[work]
    hW = hess_list[work][:, work]

    G[wi] = gW
    H[np.ix_(wi, wi)] = hW

    # Perform Line search
    alpha = 1.0
    
    u_func = lambda alpha: (- alpha * np.dot(np.linalg.inv(H), G))
    f_simple = lambda x: f_func(x, A, b, assert_nn=0)

    alpha = line_search.perform_line_search(x.copy(), G, 
                              block, f_simple, u_func, alpha0=1.0,
                                proj=lambda x: np.maximum(0, x))

    factor = alpha * np.linalg.inv(H)
    w_star, idx_star, _ = recomb_step(G / A.shape[0], G_persample)
    w_star = w_star * A.shape[0]
    x_tm1 = np.copy(x[block])
    iter_ca, x[block] = Caratheodory_Acceleration(A[idx_star[:,None],block],b[idx_star],w_star,
                                  hess_list,factor, #x_tm1, 
                                  x_tm1, grad_list, 
                                  args["L2"], args["L1"], True)
                                  
  elif rule == "qp-nn":
    cvxopt.setseed(1)
    non_block = np.delete(np.arange(param_size), block)
    k = block.size

    # 0.5*xb ^T (Ab^T Ab) xb + xb^T[Ab^T (Ac xc - b) + lambda*ones(nb)]
    Ab = matrix(A[:, block])
    bb = matrix(A[:, non_block].dot(x[non_block]) - b)

    P = Ab.T*Ab
    q = (Ab.T*bb + args["L1"]*matrix(np.ones(k)))

    G = matrix(-np.eye(k))
    h = matrix(np.zeros(k))
    x_block = np.array(solvers.qp(P=P, q=q, 
                                G=G, h=h, solver = "glpk")['x']).ravel()

    # cvxopt.solvers.options['maxiters'] = 1000
    cvxopt.solvers.options['abstol'] = 1e-16
    cvxopt.solvers.options['reltol'] = 1e-16
    cvxopt.solvers.options['feastol'] = 1e-16

    x_old = x.copy()
    d = x_block - x_old[block]

    factor = 1.
    G, G_persample = g_func(x, A, b, block)
    w_star, idx_star, ERR = recomb_step(d/ A.shape[0], G_persample)
    if ERR !=0:
      iter_ca = 0
      return x, args, iter_ca
    
    w_star = w_star * A.shape[0]
    x_tm1 = np.copy(x[block])

    iter_ca, x[block] = Caratheodory_Acceleration(A[idx_star[:,None],block],b[idx_star],w_star,
                                  h_func(x, A, b, block),factor, #x_tm1, 
                                  x_tm1, G, 
                                  args["L2"], args["L1"], True)

  else:
    print(("update rule %s doesn't exist" % rule))
    raise

  return x, args, iter_ca
from qcqprel import *
"""
Problem

Minimize   (Ax+b)'(Ax+b) + c' x + d
s.t.        x' x <= 1
	    x[0]*x[1]= 0.05
We solve this program with SDP Relaxation of QCQP
"""
n = 9
m = 5

Id = matrix(0., (n, n))
Id[::n + 1] = 1.

setseed(2)
A = normal(m, n)
A2 = A.T * A

b = normal(m, 1)

c = normal(n, 1)
d = 2.

Z = matrix(0., (n, n))
Z[1, 0] = 1.
beq0 = -0.05 * 2

# 0. SDP Relaxation of QCQP
relP0 = {'P0': A2, 'b0': 2 * A.T * b + c, 'c0': d + b.T * b}
relG0 = {
def update(rule, x, A, b, loss, args, block, iteration):
    f_func = loss.f_func
    g_func = loss.g_func
    h_func = loss.h_func
    lipschitz = loss.lipschitz
    #mipschitz = loss.mipschitz

    # L2 = args["L2"]

    block_size = block.size
    param_size = x.size

    if rule in ["quadraticEg", "Lb"]:
        """This computes the eigen values of the lipschitz values corresponding to the block"""
        # WE NEED TO DOUBLE CHECK THIS!
        G = g_func(x, A, b, block)
        L_block = loss.Lb_func(x, A, b, block)
        x[block] = x[block] - G / L_block

        return x, args

    elif rule in ["newtonUpperBound", "Hb"]:

        G = g_func(x, A, b, block)
        H = loss.Hb_func(x, A, b, block)
        d = -np.linalg.pinv(H).dot(G)

        x[block] = x[block] + d

        return x, args

    elif rule == "LA":
        G = g_func(x, A, b, block)
        L_block = loss.Lb_func(x, A, b, block)

        Lb = np.max(args["LA_lipschitz"][block])

        while True:
            x_new = x.copy()
            x_new[block] = x_new[block] - G / Lb

            RHS = f_func(x, A, b) - (1. / (2. * Lb)) * (G**2).sum()
            LHS = f_func(x_new, A, b)

            if LHS <= RHS:
                break

            Lb *= 2.

        args["LA_lipschitz"][block] = Lb

        return x_new, args

    # Line Search
    elif rule in ["LS"]:

        H = h_func(x, A, b, block)

        g = g_func(x, A, b, block)

        f_simple = lambda x: f_func(x, A, b)
        d_func = lambda alpha: (-alpha * np.dot(np.linalg.pinv(H), g))

        alpha = line_search.perform_line_search(x.copy(),
                                                g,
                                                block,
                                                f_simple,
                                                d_func,
                                                alpha0=1.0,
                                                proj=None)

        x[block] = x[block] + d_func(alpha)
        return x, args

    ### Constrained update rules
    elif rule in ["Lb-NN"]:
        G = g_func(x, A, b, block)
        L_block = loss.Lb_func(x, A, b, block)
        x[block] = x[block] - G / L_block

        x[block] = np.maximum(x[block], 0.)

        return x, args

    elif rule == "TMP-NN":
        L = lipschitz[block]

        grad_list = g_func(x, A, b, block)
        hess_list = h_func(x, A, b, block)

        H = np.zeros((block_size, block_size))
        G = np.zeros(block_size)

        # The active set is on the bound close to x=0
        active = np.logical_and(x[block] < 1e-4, grad_list > 0)
        work = np.logical_not(active)

        # active
        ai = np.where(active == 1)[0]
        gA = grad_list[active]

        G[ai] = gA / (np.sum(L[active]))
        H[np.ix_(ai, ai)] = np.eye(ai.size)
        # work set
        wi = np.where(work == 1)[0]

        gW = grad_list[work]
        hW = hess_list[work][:, work]

        G[wi] = gW
        H[np.ix_(wi, wi)] = hW

        # Perform Line search
        alpha = 1.0

        u_func = lambda alpha: (-alpha * np.dot(np.linalg.inv(H), G))
        f_simple = lambda x: f_func(x, A, b, assert_nn=0)

        alpha = line_search.perform_line_search(
            x.copy(),
            G,
            block,
            f_simple,
            u_func,
            alpha0=1.0,
            proj=lambda x: np.maximum(0, x))

        x[block] = np.maximum(x[block] + u_func(alpha), 0)

        return x, args

    elif rule == "qp-nn":
        cvxopt.setseed(1)
        non_block = np.delete(np.arange(param_size), block)
        k = block.size

        # 0.5*xb ^T (Ab^T Ab) xb + xb^T[Ab^T (Ac xc - b) + lambda*ones(nb)]
        Ab = matrix(A[:, block])
        bb = matrix(A[:, non_block].dot(x[non_block]) - b)

        P = Ab.T * Ab
        q = (Ab.T * bb + args["L1"] * matrix(np.ones(k)))

        G = matrix(-np.eye(k))
        h = matrix(np.zeros(k))
        x_block = np.array(solvers.qp(P=P, q=q, G=G, h=h,
                                      solver="glpk")['x']).ravel()

        # cvxopt.solvers.options['maxiters'] = 1000
        cvxopt.solvers.options['abstol'] = 1e-16
        cvxopt.solvers.options['reltol'] = 1e-16
        cvxopt.solvers.options['feastol'] = 1e-16
        x[block] = np.maximum(x_block, 0)

        return x, args

    ### BELIEF PROPAGATION ALGORITHMS

    elif rule == "bpExact":
        n_params = x.size
        all_indices = np.arange(n_params)

        non_block_indices = np.delete(all_indices, block)

        A_bc = A[block][:, non_block_indices]
        A_bb = A[block][:, block]

        b_prime = A_bc.dot(x[non_block_indices]) - b[block]

        x[block] = np.linalg.inv(A_bb).dot(
            -b_prime)  # are you missing the x[block] + ?
        # Ans:
        # No, this is the exact update of the objective function formulation under
        # Appendix B. Derivation of Block Belief Propagation Update.

        return x, args

    elif rule == "bpGabp":
        A_sub = A[block][:, block]

        ######## ADDED
        _, n_features = A.shape
        all_indices = np.arange(n_features)
        non_block_indices = np.delete(all_indices, block)

        A_bc = A[block][:, non_block_indices]
        b_sub = A_bc.dot(x[non_block_indices]) - b[block]
        b_sub = -b_sub

        #########
        max_iter = 100
        epsilon = 1e-8

        #import pdb; pdb.set_trace()
        P = np.diag(np.diag(A_sub))
        U = np.diag(b_sub / np.diag(A_sub))

        n_features = A_sub.shape[0]

        # Stage 2 - iterate
        for iteration in range(max_iter):
            # record last round messages for convergence detection
            old_U = U.copy()

            for i in range(n_features):
                for j in range(n_features):

                    if (i != j and A_sub[i, j] != 0):
                        # Compute P i\j - line 2
                        p_i_minus_j = np.sum(P[:, i]) - P[j, i]
                        assert (p_i_minus_j != 0)

                        # Compute P ij - line 2
                        P[i, j] = -A_sub[i, j] * A_sub[j, i] / p_i_minus_j

                        # Compute U i\j - line 2
                        h_i_minus_j = (np.sum(P[:, i] * U[:, i]) -
                                       P[j, i] * U[j, i]) / p_i_minus_j

                        # Compute U ij - line 3
                        U[i, j] = -A_sub[i, j] * h_i_minus_j / P[i, j]
                        #import pdb;pdb.set_trace()

            # Stage 3 - convergence detection
            if (np.sum(np.sum((U - old_U)**2)) < epsilon):
                #print 'GABP converged in round %d ' % iteration
                break

        # Stage 4 - infer
        Pf = np.zeros(n_features)
        x_tmp = np.zeros(n_features)

        for i in range(n_features):
            Pf[i] = np.sum(P[:, i])
            x_tmp[i] = np.sum(U[:, i] * P[:, i]) / Pf[i]

        ##### Exact
        #x_exact = block_update(A, b, theta, block)
        x[block] = x_tmp

        return x, args

    else:
        print(("update rule %s doesn't exist" % rule))
        raise
示例#30
0
from cvxpy import *
from mixed_integer import *
import cvxopt

# Feature selection on a linear kernel SVM classifier.
# Uses the Alternating Direction Method of Multipliers
# with a (non-convex) cardinality constraint.

# Generate data.
cvxopt.setseed(1)
N = 50
M = 40
n = 10
data = []
for i in range(N):
    data += [(1,cvxopt.normal(n, mean=1.0, std=2.0))]
for i in range(M):
    data += [(-1,cvxopt.normal(n, mean=-1.0, std=2.0))]

# Construct problem.
gamma = Parameter(sign="positive")
gamma.value = 0.1
# 'a' is a variable constrained to have at most 6 non-zero entries.
a = SparseVar(n,nonzeros=6)
b = Variable()

slack = [pos(1 - label*(sample.T*a - b)) for (label,sample) in data]
objective = Minimize(norm2(a) + gamma*sum(slack))
p = Problem(objective)
# Extensions can attach new solve methods to the CVXPY Problem class. 
p.solve(method="admm")
示例#31
0
文件: qp_test.py 项目: bpiwowar/kqp
"""


# --- Simple test

n = 2
r = 2
g = matrix([1,0, 0,1], (n,n), 'd')
a = matrix([1, 0, 0, 0.4], (n*r,1), 'd')
nu = matrix(1.,(n,1),'d')
doit("simple", n, r, g, a, nu, 1.)


# --- Random test

setseed(1)
n = 8
r = 5
g = uniform(n,n)
g = g * g.T
a = uniform(n*r,1)
nu = matrix(1.,(n,1),'d')
doit("random", n, r, g, a, nu, 1.)

# --- Simple test

n = 2
r = 2
g = matrix([1,0, 0,1], (n,n), 'd')
a = matrix([1, 0, 0, 0.4], (n*r,1), 'd')
示例#32
0
        }
"""

# --- Simple test

n = 2
r = 2
g = matrix([1,0, 0,1], (n,n), 'd')
x = matrix(1., (n*(r+1),1))
z = matrix(0., (2*n*r,1))
W = {'d': matrix(1., (2*n*r,1))}
doit("simple", n, r, g, W, x, z)

# --- Random test (diagonal g)

setseed(0)
n = 5
r = 10

g = matrix(0., (n,n), 'd')
g[::n+1] = 1

W = {'d': uniform(2*n*r, 1) }

x = uniform(n*(r+1),1)
z = uniform(2*n*r,1)

doit("diagonal_g", n, r, g, W, x, z)

# --- Constant diagonal
示例#33
0
def find_max_cut(graph, positions):
    # Move G to LCF notation.
    G = nx.convert_matrix.from_scipy_sparse_matrix(graph)
    pos = {i: positions[i] for i in range(len(positions))}

    # Generate edge capacities.
    c = {}
    for e in sorted(G.edges(data=True)):
        capacity = 1
        e[2]['capacity'] = capacity
        c[(e[0], e[1])] = capacity
        c[(e[1], e[0])] = capacity

    # Convert the capacities to a PICOS expression.
    cc = pic.new_param('c', c)

    # Set source and sink nodes for flow computation.
    s = 16
    t = 10

    # Set node colors.
    N = len(positions)
    node_colors = ['lightgrey'] * N
    node_colors[s] = 'lightgreen'  # Source is green.
    node_colors[t] = 'lightblue'  # Sink is blue.

    # Define a plotting helper that closes the old and opens a new figure.
    def new_figure():
        try:
            global fig
            pylab.close(fig)
        except NameError:
            pass
        fig = pylab.figure(figsize=(11, 8))
        fig.gca().axes.get_xaxis().set_ticks([])
        fig.gca().axes.get_yaxis().set_ticks([])

    # Plot the graph with the edge capacities.
    new_figure()
    nx.draw_networkx(G, pos, node_color=node_colors)
    labels = {
        e: '{} | {}'.format(c[(e[0], e[1])], c[(e[1], e[0])])
        for e in G.edges if e[0] < e[1]
    }
    nx.draw_networkx_edge_labels(G, pos, edge_labels=labels)
    pylab.show()

    # Make G undirected.
    G = nx.Graph(G)

    # Allocate weights to the edges.
    for (i, j) in G.edges():
        G[i][j]['weight'] = 1

    maxcut = pic.Problem()

    # Add the symmetric matrix variable.
    X = maxcut.add_variable('X', (N, N), 'symmetric')

    # Retrieve the Laplacian of the graph.
    LL = 1 / 4. * nx.laplacian_matrix(G).todense()
    L = pic.new_param('L', LL)

    # Constrain X to have ones on the diagonal.
    maxcut.add_constraint(pic.diag_vect(X) == 1)

    # Constrain X to be positive semidefinite.
    maxcut.add_constraint(X >> 0)

    # Set the objective.
    maxcut.set_objective('max', L | X)

    # print(maxcut)

    # Solve the problem.
    maxcut.solve(solver='cvxopt')

    # print('bound from the SDP relaxation: {0}'.format(maxcut.obj_value()))

    # Use a fixed RNG seed so the result is reproducable.
    cvx.setseed(1)

    # Perform a Cholesky factorization.
    V = X.value
    cvxopt.lapack.potrf(V)
    for i in range(N):
        for j in range(i + 1, N):
            V[i, j] = 0

    # Do up to 100 projections. Stop if we are within a factor 0.878 of the SDP
    # optimal value.
    count = 0
    obj_sdp = maxcut.obj_value()
    obj = 0
    while (count < 100 or obj < 0.878 * obj_sdp):
        r = cvx.normal(20, 1)
        x = cvx.matrix(np.sign(V * r))
        o = (x.T * L * x).value
        if o > obj:
            x_cut = x
            obj = o
        count += 1
    x = x_cut

    # Extract the cut and the seperated node sets.
    S1 = [n for n in range(N) if x[n] < 0]
    S2 = [n for n in range(N) if x[n] > 0]
    cut = [(i, j) for (i, j) in G.edges() if x[i] * x[j] < 0]
    leave = [e for e in G.edges if e not in cut]

    # Close the old figure and open a new one.
    new_figure()

    # Assign colors based on set membership.
    node_colors = [('lightgreen' if n in S1 else 'lightblue')
                   for n in range(N)]

    # Draw the nodes and the edges that are not in the cut.
    nx.draw_networkx(G, pos, node_color=node_colors, edgelist=leave)
    labels = {e: '{}'.format(G[e[0]][e[1]]['weight']) for e in leave}
    nx.draw_networkx_edge_labels(G, pos, edge_labels=labels)

    # Draw the edges that are in the cut.
    nx.draw_networkx_edges(G, pos, edgelist=cut, edge_color='r')
    labels = {e: '{}'.format(G[e[0]][e[1]]['weight']) for e in cut}
    nx.draw_networkx_edge_labels(G, pos, edge_labels=labels, font_color='r')

    # Show the relaxation optimum value and the cut capacity.
    rval = maxcut.obj_value()
    sval = sum(G[e[0]][e[1]]['weight'] for e in cut)
    fig.suptitle(
        'SDP relaxation value: {0:.1f}\nCut value: {1:.1f} = {2:.3f}×{0:.1f}'.
        format(rval, sval, sval / rval),
        fontsize=16,
        y=0.97)

    # Show the figure.
    pylab.show()

    return S1, S2
示例#34
0
    k_type = 'rbf'
    # attention: this is the shape parameter of a Gaussian
    # which is 1/sigma^2
    k_param = 2.4

    N_pos = 10
    N_neg = 10
    N_unl = 10

    # generate training labels
    Dy = np.zeros(N_pos+N_neg+N_unl, dtype=np.int)
    Dy[:N_pos] = 1
    Dy[N_pos+N_unl:] = -1

    # generate training data
    co.setseed(11)
    Dtrainp = co.normal(2,N_pos)*0.6
    Dtrainu = co.normal(2,N_unl)*0.6
    Dtrainn = co.normal(2,N_neg)*0.6
    Dtrain21 = Dtrainn-1
    Dtrain21[0,:] = Dtrainn[0,:]+1
    Dtrain22 = -Dtrain21

    # training data
    Dtrain = co.matrix([[Dtrainp], [Dtrainu], [Dtrainn+0.8]])

    Dtrain = np.array(Dtrain)

    # build the training kernel
    kernel = get_kernel(Dtrain, Dtrain, type=k_type, param=k_param)
示例#35
0
    def run(self, L1_x=.001, L2_x=.001, L1_alpha=.001, L2_alpha=.001, verbose=False):
        '''Perform deconvolution that minimizes the mean square error.'''


        if verbose:
            print('Preparing matrices')
            print(L1_x, L2_x, L1_alpha, L2_alpha)

        P, q = get_P_q(self.SG, self.M_No, self.var_No, L1_x, L2_x, L1_alpha, L2_alpha)
        x0   = get_initvals(self.var_No)
        G, h = get_G_h(self.var_No)
        A, b = get_A_b(self.SG, self.M_No, self.I_No, self.GI_No)

        setseed(randint(0,1000000))
        # this is to test from different points
        # apparently this is used by the asynchroneous BLAS library
        # I hate the asynchroneous BLAS library
        try:
            if verbose:
                print('optimizing')
            self.sol = solvers.qp(P, q, G, h, A, b, initvals=x0)
            if verbose:
                print('finished')
            Xopt = self.sol['x']
            #################### reporting results
            alphas = []
            for N_name in self.SG:
                N = self.SG.node[N_name]
                if N['type'] == 'M':
                    N['estimate'] = Xopt[self.GI_No + N['cnt']]
                    alphas.append(N.copy())
                if N['type'] == 'G':
                    N['estimate'] = 0.0
                    for I_name in self.SG[N_name]:
                        NI = self.SG.edge[N_name][I_name]
                        NI['estimate'] = Xopt[NI['cnt']]
                        N['estimate'] += Xopt[NI['cnt']]

            res = { 'alphas':   alphas,
                    'L1_error': self.get_L1_error(),
                    'L2_error': self.get_L2_error(),
                    'underestimates': self.get_L1_signed_error(sign=1.0),
                    'overestimates':  self.get_L1_signed_error(sign=-1.0),
                    'status':   self.sol['status'],
                    'SG':       self.SG
            }
            if verbose:
                res['param']= {'P':P,'q':q,'G':G,'h':h,'A':A,'b':b,'x0':x0}
                res['sol']  = self.sol
        except ValueError as ve:
            print ve
            res = { 'SG':               self.SG,
                    'status':           'ValueError',
                    'L1_error':         self.get_sum_of_node_intensities(),
                    'overestimates':    0.0 }
            res['underestimates'] = res['L1_error']
            if verbose:
                res['param']= {'P':P,'q':q,'G':G,'h':h,'A':A,'b':b,'x0':x0}
                res['exception'] = ve

            print res['L1_error']
            # traceback.print_exc()

        return res
示例#36
0
from cvxopt import matrix, spmatrix, normal, setseed, blas, lapack, solvers
import nucnrm
import numpy as np

# Solves a randomly generated nuclear norm minimization problem
#
#    minimize || A(x) + B ||_*
#
# with n variables, and matrices A(x), B of size p x q.

setseed(0)

m, K = 2, 4
p, q, n = 3, 1, 8

U = np.random.randn(m, K)
A = np.zeros((p * q, n))
A[0, 1] = U[0, 0]
A[0, 2] = U[1, 0]
A[1, 3] = U[0, 0]
A[1, 4] = U[1, 0]
A[2, 5] = U[0, 0]
A[2, 6] = U[1, 0]

A = matrix(A)
B = matrix(np.zeros((p, q)))

G = np.zeros((1, n))
G[0, 0] = U[0, 0]
G[0, 1] = U[1, 0]
G = matrix(G)
示例#37
0
文件: qp_test.py 项目: bpiwowar/kqp
        }
"""


# --- Simple test

n = 2
r = 2
g = matrix([1, 0, 0, 1], (n, n), 'd')
a = matrix([1, 0, 0, 0.4], (n * r, 1), 'd')
nu = matrix(1., (n, 1), 'd')
doit("simple", n, r, g, a, nu, 1.)

# --- Random test

setseed(1)
n = 8
r = 5
g = uniform(n, n)
g = g * g.T
a = uniform(n * r, 1)
nu = matrix(1., (n, 1), 'd')
doit("random", n, r, g, a, nu, 1.)

# --- Simple test

n = 2
r = 2
g = matrix([1, 0, 0, 1], (n, n), 'd')
a = matrix([1, 0, 0, 0.4], (n * r, 1), 'd')
示例#38
0
    P_NORM = 1.1  # mixing coefficient lp-norm regularizer
    N_pos = 100
    N_neg = 100
    N_unl = 10

    # 1. STEP: GENERATE DATA
    # 1.1. generate training labels
    yp = co.matrix(1, (1, N_pos), 'i')
    yu = co.matrix(0, (1, N_unl), 'i')
    yn = co.matrix(-1, (1, N_neg), 'i')
    Dy = co.matrix([[yp], [yu], [yn], [yn], [yn], [yn]])
    Dy = np.array(Dy)
    Dy = Dy.reshape((Dy.size))

    # 1.2. generate training data
    co.setseed(11)
    Dtrainp = co.normal(2, N_pos) * 0.4
    Dtrainu = co.normal(2, N_unl) * 0.4
    Dtrainn = co.normal(2, N_neg) * 0.1
    Dtrain21 = Dtrainn - 1
    Dtrain21[0, :] = Dtrainn[0, :] + 1
    Dtrain22 = -Dtrain21

    # 1.3. concatenate training data
    Dtrain = co.matrix([[Dtrainp], [Dtrainu], [Dtrainn + 1.0], [Dtrainn - 1.0],
                        [Dtrain21], [Dtrain22]])
    Dtrain = np.array(Dtrain)

    # 1.4. generate test data on a grid
    delta = 0.25
    x = np.arange(-3.0, 3.0, delta)
示例#39
0
def main(dataset_name, xp_path, data_path, load_config, load_model, seed,
         kernel, kappa, hybrid, load_ae, n_jobs_dataloader, normal_class):
    """
    (Hybrid) SSAD for anomaly detection as in Goernitz et al., Towards Supervised Anomaly Detection, JAIR, 2013.

    :arg DATASET_NAME: Name of the dataset to load.
    :arg XP_PATH: Export path for logging the experiment.
    :arg DATA_PATH: Root path of data.
    """

    # Get configuration
    cfg = Config(locals().copy())

    # Set up logging
    logging.basicConfig(level=logging.INFO)
    logger = logging.getLogger()
    logger.setLevel(logging.INFO)
    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    log_file = xp_path + '/log.txt'
    file_handler = logging.FileHandler(log_file)
    file_handler.setLevel(logging.INFO)
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)

    # Print paths
    logger.info('Log file is %s.' % log_file)
    logger.info('Data path is %s.' % data_path)
    logger.info('Export path is %s.' % xp_path)

    # Print experimental setup
    logger.info('Dataset: %s' % dataset_name)
    logger.info('Normal class: %d' % normal_class)
    # If specified, load experiment config from JSON-file
    if load_config:
        cfg.load_config(import_json=load_config)
        logger.info('Loaded configuration from %s.' % load_config)

    # Print SSAD configuration
    logger.info('SSAD kernel: %s' % cfg.settings['kernel'])
    logger.info('Kappa-paramerter: %.2f' % cfg.settings['kappa'])
    logger.info('Hybrid model: %s' % cfg.settings['hybrid'])

    # Set seed
    if cfg.settings['seed'] != -1:
        random.seed(cfg.settings['seed'])
        np.random.seed(cfg.settings['seed'])
        co.setseed(cfg.settings['seed'])
        torch.manual_seed(cfg.settings['seed'])
        torch.cuda.manual_seed(cfg.settings['seed'])
        torch.backends.cudnn.deterministic = True
        logger.info('Set seed to %d.' % cfg.settings['seed'])

    # Use 'cpu' as device for SSAD
    device = 'cpu'
    torch.multiprocessing.set_sharing_strategy(
        'file_system')  # fix multiprocessing issue for ubuntu
    logger.info('Computation device: %s' % device)
    logger.info('Number of dataloader workers: %d' % n_jobs_dataloader)

    # Load data
    dataset = load_dataset(dataset_name,
                           data_path,
                           normal_class,
                           random_state=np.random.RandomState(
                               cfg.settings['seed']))

    # Initialize SSAD model
    ssad = SSAD(kernel=cfg.settings['kernel'],
                kappa=cfg.settings['kappa'],
                hybrid=cfg.settings['hybrid'])

    # If specified, load model parameters from already trained model
    if load_model:
        ssad.load_model(import_path=load_model, device=device)
        logger.info('Loading model from %s.' % load_model)

    # If specified, load model autoencoder weights for a hybrid approach
    if hybrid and load_ae is not None:
        ssad.load_ae(dataset_name, model_path=load_ae)
        logger.info('Loaded pretrained autoencoder for features from %s.' %
                    load_ae)

    # Train model on dataset
    ssad.train(dataset, device=device, n_jobs_dataloader=n_jobs_dataloader)

    # Test model
    ssad.test(dataset, device=device, n_jobs_dataloader=n_jobs_dataloader)

    # Save results and configuration
    ssad.save_results(export_json=xp_path + '/results.json')
    cfg.save_config(export_json=xp_path + '/config.json')

    # Plot most anomalous and most normal test samples
    indices, labels, scores = zip(*ssad.results['test_scores'])
    indices, labels, scores = np.array(indices), np.array(labels), np.array(
        scores)
    idx_all_sorted = indices[np.argsort(
        scores)]  # from lowest to highest score
    idx_normal_sorted = indices[labels == 0][np.argsort(
        scores[labels == 0])]  # from lowest to highest score

    if dataset_name in ('mnist', 'fmnist', 'cifar10'):

        if dataset_name in ('mnist', 'fmnist'):
            X_all_low = dataset.test_set.data[idx_all_sorted[:32],
                                              ...].unsqueeze(1)
            X_all_high = dataset.test_set.data[idx_all_sorted[-32:],
                                               ...].unsqueeze(1)
            X_normal_low = dataset.test_set.data[idx_normal_sorted[:32],
                                                 ...].unsqueeze(1)
            X_normal_high = dataset.test_set.data[idx_normal_sorted[-32:],
                                                  ...].unsqueeze(1)

        if dataset_name == 'cifar10':
            X_all_low = torch.tensor(
                np.transpose(dataset.test_set.data[idx_all_sorted[:32], ...],
                             (0, 3, 1, 2)))
            X_all_high = torch.tensor(
                np.transpose(dataset.test_set.data[idx_all_sorted[-32:], ...],
                             (0, 3, 1, 2)))
            X_normal_low = torch.tensor(
                np.transpose(
                    dataset.test_set.data[idx_normal_sorted[:32], ...],
                    (0, 3, 1, 2)))
            X_normal_high = torch.tensor(
                np.transpose(
                    dataset.test_set.data[idx_normal_sorted[-32:], ...],
                    (0, 3, 1, 2)))

        plot_images_grid(X_all_low, export_img=xp_path + '/all_low', padding=2)
        plot_images_grid(X_all_high,
                         export_img=xp_path + '/all_high',
                         padding=2)
        plot_images_grid(X_normal_low,
                         export_img=xp_path + '/normals_low',
                         padding=2)
        plot_images_grid(X_normal_high,
                         export_img=xp_path + '/normals_high',
                         padding=2)
示例#40
0
    x0 = matrix([uls[:n], 1.1 * abs(rls)])
    s0 = +h
    Fi(x0, s0, alpha=-1, beta=1)

    # z0 = [ (1+w)/2; (1-w)/2 ] where w = (.9/||rls||_inf) * rls
    # if rls is nonzero and w = 0 otherwise.
    if max(abs(rls)) > 1e-10:
        w = .9 / max(abs(rls)) * rls
    else:
        w = matrix(0.0, (m, 1))
    z0 = matrix([.5 * (1 + w), .5 * (1 - w)])

    dims = {'l': 2 * m, 'q': [], 's': []}
    sol = solvers.conelp(c,
                         Fi,
                         h,
                         dims,
                         kktsolver=Fkkt,
                         primalstart={
                             'x': x0,
                             's': s0
                         },
                         dualstart={'z': z0})
    return sol['x'][:n], sol['z'][m:] - sol['z'][:m]


setseed()
m, n = 1000, 100
P, q = normal(m, n), normal(m, 1)
x, y = l1(P, q)
示例#41
0
def main(dataset_name, xp_path, data_path, load_config, load_model,
         ratio_known_normal, ratio_known_outlier, ratio_pollution, seed,
         kernel, kappa, hybrid, load_ae, n_jobs_dataloader, normal_class,
         known_outlier_class, n_known_outlier_classes):
    """
    (Hybrid) SSAD for anomaly detection as in Goernitz et al., Towards Supervised Anomaly Detection, JAIR, 2013.

    :arg DATASET_NAME: Name of the dataset to load.
    :arg XP_PATH: Export path for logging the experiment.
    :arg DATA_PATH: Root path of data.
    """

    # Get configuration
    cfg = Config(locals().copy())

    # Set up logging
    logging.basicConfig(level=logging.INFO)
    logger = logging.getLogger()
    logger.setLevel(logging.INFO)
    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    log_file = xp_path + '/log.txt'
    file_handler = logging.FileHandler(log_file)
    file_handler.setLevel(logging.INFO)
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)

    # Print paths
    logger.info('Log file is %s.' % log_file)
    logger.info('Data path is %s.' % data_path)
    logger.info('Export path is %s.' % xp_path)

    # Print experimental setup
    logger.info('Dataset: %s' % dataset_name)
    logger.info('Normal class: %d' % normal_class)
    logger.info('Ratio of labeled normal train samples: %.2f' %
                ratio_known_normal)
    logger.info('Ratio of labeled anomalous samples: %.2f' %
                ratio_known_outlier)
    logger.info('Pollution ratio of unlabeled train data: %.2f' %
                ratio_pollution)
    if n_known_outlier_classes == 1:
        logger.info('Known anomaly class: %d' % known_outlier_class)
    else:
        logger.info('Number of known anomaly classes: %d' %
                    n_known_outlier_classes)

    # If specified, load experiment config from JSON-file
    if load_config:
        cfg.load_config(import_json=load_config)
        logger.info('Loaded configuration from %s.' % load_config)

    # Print SSAD configuration
    logger.info('SSAD kernel: %s' % cfg.settings['kernel'])
    logger.info('Kappa-paramerter: %.2f' % cfg.settings['kappa'])
    logger.info('Hybrid model: %s' % cfg.settings['hybrid'])

    # Set seed
    if cfg.settings['seed'] != -1:
        random.seed(cfg.settings['seed'])
        np.random.seed(cfg.settings['seed'])
        co.setseed(cfg.settings['seed'])
        torch.manual_seed(cfg.settings['seed'])
        torch.cuda.manual_seed(cfg.settings['seed'])
        torch.backends.cudnn.deterministic = True
        logger.info('Set seed to %d.' % cfg.settings['seed'])

    # Use 'cpu' as device for SSAD
    device = 'cpu'
    torch.multiprocessing.set_sharing_strategy(
        'file_system')  # fix multiprocessing issue for ubuntu
    logger.info('Computation device: %s' % device)
    logger.info('Number of dataloader workers: %d' % n_jobs_dataloader)

    # Load data
    #dataset = load_dataset(dataset_name, data_path, normal_class, known_outlier_class, n_known_outlier_classes,
    #                       ratio_known_normal, ratio_known_outlier, ratio_pollution,
    #                       random_state=np.random.RandomState(cfg.settings['seed']))

    dataset = None
    # Log random sample of known anomaly classes if more than 1 class
    #if n_known_outlier_classes > 1:
    #    logger.info('Known anomaly classes: %s' % (dataset.known_outlier_classes,))

    # Initialize SSAD model
    ssad = SSAD(kernel=cfg.settings['kernel'],
                kappa=cfg.settings['kappa'],
                hybrid=cfg.settings['hybrid'])

    # If specified, load model parameters from already trained model
    if load_model:
        ssad.load_model(import_path=load_model, device=device)
        logger.info('Loading model from %s.' % load_model)

    # If specified, load model autoencoder weights for a hybrid approach
    if hybrid and load_ae is not None:
        ssad.load_ae(dataset_name, model_path=load_ae)
        logger.info('Loaded pretrained autoencoder for features from %s.' %
                    load_ae)

    # Train model on dataset
    ssad.train(dataset, device=device, n_jobs_dataloader=n_jobs_dataloader)

    # Test model
    ssad.test(dataset, device=device, n_jobs_dataloader=n_jobs_dataloader)

    # Save results and configuration
    #ssad.save_results(export_json=xp_path + '/results.json')
    #cfg.save_config(export_json=xp_path + '/config.json')

    # Plot most anomalous and most normal test samples
    indices, labels, scores = zip(*ssad.results['test_scores'])
    indices, labels, scores = np.array(indices), np.array(labels), np.array(
        scores)
    idx_all_sorted = indices[np.argsort(
        scores)]  # from lowest to highest score
    idx_normal_sorted = indices[labels == 0][np.argsort(
        scores[labels == 0])]  # from lowest to highest score
    '''