コード例 #1
0
	def train_dc(self, max_iter=50):
		""" Solve the optimization problem with a  
		    sequential convex programming/DC-programming
		    approach: 
		    Iteratively, find the most likely configuration of
		    the latent variables and then, optimize for the
		    model parameter using fixed latent states.
		"""
		N = self.sobj.get_num_samples()
		DIMS = self.sobj.get_num_dims()
		
		# intermediate solutions
		# latent variables
		latent = [0.0]*N

		sol = normal(DIMS,1)
		psi = matrix(0.0, (DIMS,N)) # (dim x exm)
		old_psi = matrix(0.0, (DIMS,N)) # (dim x exm)
		threshold = matrix(0.0)

		obj = -1
		iter = 0 

		# terminate if objective function value doesn't change much
		while iter<max_iter and (iter<2 or sum(sum(abs(np.array(psi-old_psi))))>=0.001):
			print('Starting iteration {0}.'.format(iter))
			print(sum(sum(abs(np.array(psi-old_psi)))))
			iter += 1
			old_psi = matrix(psi)

			# 1. linearize
			# for the current solution compute the 
			# most likely latent variable configuration
			mean = matrix(0.0, (DIMS, 1))
			for i in range(N):
				(foo, latent[i], psi[:,i]) = self.sobj.argmax(sol, i, add_prior=True)
				mean += psi[:,i]

			mpsi = matrix(psi)
			mean /= float(N)
			#for i in range(N):
				#mphi[:,i] -= mean

			# 2. solve the intermediate convex optimization problem 
			A = mpsi*mpsi.trans()
			print A.size
			W = matrix(0.0, (DIMS,DIMS))
			syev(A,W,jobz='V')
			print W
			print A
			print A*A.trans()
			#sol = (W[3:,0].trans() * A[:,3:].trans()).trans()
			#sol = (W[3:,0].trans() * A[:,3:].trans()).trans()
			sol = A[:,DIMS-1]
			print sol

		print(sum(sum(abs(np.array(psi-old_psi)))))
		self.sol = sol
		self.latent = latent
		return (sol, latent, threshold)
コード例 #2
0
ファイル: cvx_utils.py プロジェクト: deccs/PLearn
 def using_numerical_derivatives(params=None, z=None):
     out = _using_numerical_derivatives(params, z)
     print "OUT",
     for o in out:
         print o
     if len(out)==3:
         from cvxopt.lapack import syev
         W = cvx.matrix(0.0, (6,1), 'd')
         syev(out[-1], W)
         print "EIGEN", W
     return out
コード例 #3
0
ファイル: cvx_utils.py プロジェクト: zbxzc35/PLearn
 def using_numerical_derivatives(params=None, z=None):
     out = _using_numerical_derivatives(params, z)
     print "OUT",
     for o in out:
         print o
     if len(out) == 3:
         from cvxopt.lapack import syev
         W = cvx.matrix(0.0, (6, 1), 'd')
         syev(out[-1], W)
         print "EIGEN", W
     return out
コード例 #4
0
    def fit(self, max_iter=50):
        """ Solve the optimization problem with a
            sequential convex programming/DC-programming
            approach:
            Iteratively, find the most likely configuration of
            the latent variables and then, optimize for the
            model parameter using fixed latent states.
        """
        N = self.sobj.get_num_samples()
        DIMS = self.sobj.get_num_dims()

        # intermediate solutions
        # latent variables
        latent = [0.0] * N

        sol = np.random.randn(DIMS)
        psi = np.zeros((DIMS, N))  # (dim x exm)
        old_psi = np.zeros((DIMS, N))  # (dim x exm)
        threshold = 0.
        obj = -1.
        iter = 0
        # terminate if objective function value doesn't change much
        while iter < max_iter and (
                iter < 2 or np.sum(abs(np.array(psi - old_psi))) >= 0.001):
            print('Starting iteration {0}.'.format(iter))
            print(np.sum(abs(np.array(psi - old_psi))))
            iter += 1
            old_psi = psi.copy()

            # 1. linearize
            # for the current solution compute the
            # most likely latent variable configuration
            mean = np.zeros(DIMS)
            for i in range(N):
                _, latent[i], psi[:, i] = self.sobj.argmax(sol, i)
                mean += psi[:, i]
            mean /= float(N)
            mpsi = psi - np.repeat(mean.reshape((DIMS, 1)), N, axis=1)

            # 2. solve the intermediate convex optimization problem
            A = mpsi.dot(mpsi.T)
            print A.shape
            W = np.zeros((DIMS, DIMS))
            syev(matrix(A), matrix(W), jobz='V')
            sol = np.array(A[:, DIMS - 1]).reshape(DIMS)

        print(np.sum(abs(np.array(psi - old_psi))))
        self.sol = sol
        self.latent = latent
        return sol, latent, threshold
コード例 #5
0
    def setUp(self):
        from cvxopt import matrix, normal, spdiag, misc, lapack
        from ubsdp import ubsdp

        m, n = 10, 10
        A = normal(m**2, n)

        # Z0 random positive definite with maximum e.v. less than 1.0.
        Z0 = normal(m, m)
        Z0 = Z0 * Z0.T
        w = matrix(0.0, (m, 1))
        a = +Z0
        lapack.syev(a, w, jobz='V')
        wmax = max(w)
        if wmax > 0.9: w = (0.9 / wmax) * w
        Z0 = a * spdiag(w) * a.T

        # c = -A'(Z0)
        c = matrix(0.0, (n, 1))
        misc.sgemv(A,
                   Z0,
                   c,
                   dims={
                       'l': 0,
                       'q': [],
                       's': [m]
                   },
                   trans='T',
                   alpha=-1.0)

        # Z1 = I - Z0
        Z1 = -Z0
        Z1[::m + 1] += 1.0

        x0 = normal(n, 1)
        X0 = normal(m, m)
        X0 = X0 * X0.T
        S0 = normal(m, m)
        S0 = S0 * S0.T
        # B = A(x0) - X0 + S0
        B = matrix(A * x0 - X0[:] + S0[:], (m, m))

        X = ubsdp(c, A, B)

        (self.m, self.n, self.c, self.A, self.B, self.Xubsdp) = (m, n, c, A, B,
                                                                 X)
コード例 #6
0
def sqrtm(A):
    """Returns the matrix square root of a positive semidefinite matrix."""
    if not isinstance(A, (matrix, spmatrix)) or rows(A) != cols(A) or eig(A)[0][0] < 0:
        raise TypeError('a symmetric positive semidefinite matrix is required')

    V = matrix(A)
    z = zeros(rows(A), 1, full=True)
    lapack.syev(V, z, jobz='V')

    # Round eigenvalues to deal with numerical instability.
    # Note: don't use cvxmod atoms pos or sqrt here: overkill.
    for i in range(len(z)):
        if z[i] <= 0:
            z[i] = 0
        else:
            z[i] = sqrt(z[i])

    return V*diag(z)*tp(V)
コード例 #7
0
ファイル: latent_pca.py プロジェクト: ZIYU-DEEP/tilitools
    def fit(self, max_iter=50):
        """ Solve the optimization problem with a
            sequential convex programming/DC-programming
            approach:
            Iteratively, find the most likely configuration of
            the latent variables and then, optimize for the
            model parameter using fixed latent states.
        """
        samples = self.sobj.get_num_samples()
        dims = self.sobj.get_num_dims()

        self.latent = np.random.randint(0, self.sobj.get_num_states(), samples)
        self.sol = np.random.randn(dims)
        psi = np.zeros((dims, samples))
        old_psi = np.zeros((dims, samples))
        threshold = 0.
        iter = 0
        # terminate if objective function value doesn't change much
        while iter < max_iter and (iter < 2 or np.sum(np.abs(psi-old_psi)) >= 0.001):
            print('Starting iteration {0}.'.format(iter))
            print(np.sum(np.abs(psi-old_psi)))
            iter += 1
            old_psi = psi.copy()

            # 1. linearize
            # for the current solution compute the
            # most likely latent variable configuration
            mean = np.zeros(dims)
            for i in range(samples):
                _, self.latent[i], psi[:, i] = self.sobj.argmax(self.sol, i)
                mean += psi[:, i]
            mean /= np.float(samples)
            mpsi = psi - mean.reshape((dims, 1))

            # 2. solve the intermediate convex optimization problem
            A = mpsi.dot(mpsi.T)
            W = np.zeros((dims, dims))
            syev(matrix(A), matrix(W), jobz='V')
            self.sol = np.array(A[:, dims-1]).ravel()
        return self.sol, self.latent, threshold
コード例 #8
0
    def setUp(self):
        from cvxopt import matrix, normal, spdiag, misc, lapack
        from ubsdp import ubsdp

        m, n = 10, 10
        A = normal(m**2, n)

        # Z0 random positive definite with maximum e.v. less than 1.0.
        Z0 = normal(m,m)
        Z0 = Z0 * Z0.T
        w = matrix(0.0, (m,1))
        a = +Z0
        lapack.syev(a, w, jobz = 'V')
        wmax = max(w)
        if wmax > 0.9:  w = (0.9/wmax) * w
        Z0 = a * spdiag(w) * a.T

        # c = -A'(Z0)
        c = matrix(0.0, (n,1))
        misc.sgemv(A, Z0, c, dims = {'l': 0, 'q': [], 's': [m]}, trans = 'T', alpha = -1.0)

        # Z1 = I - Z0
        Z1 = -Z0
        Z1[::m+1] += 1.0

        x0 = normal(n,1)
        X0 = normal(m,m)
        X0 = X0*X0.T
        S0 = normal(m,m)
        S0 = S0*S0.T
        # B = A(x0) - X0 + S0
        B = matrix(A*x0 - X0[:] + S0[:], (m,m))

        X = ubsdp(c, A, B)

        (self.m, self.n, self.c, self.A, self.B, self.Xubsdp) = (m, n, c, A, B, X)
コード例 #9
0
ファイル: sdp_upper_bound.py プロジェクト: wsgan001/itce2011
@author: Nguyen Huu Hiep
'''

from cvxopt import matrix, normal, spdiag, misc, lapack
from ubsdp import ubsdp

m, n = 50, 50
A = normal(m**2, n)

# Z0 random positive definite with maximum e.v. less than 1.0.
Z0 = normal(m, m)
Z0 = Z0 * Z0.T
w = matrix(0.0, (m, 1))
a = +Z0
lapack.syev(a, w, jobz='V')
wmax = max(w)
if wmax > 0.9: w = (0.9 / wmax) * w
Z0 = a * spdiag(w) * a.T

# c = -A'(Z0)
c = matrix(0.0, (n, 1))
misc.sgemv(A, Z0, c, dims={'l': 0, 'q': [], 's': [m]}, trans='T', alpha=-1.0)

# Z1 = I - Z0
Z1 = -Z0
Z1[::m + 1] += 1.0

x0 = normal(n, 1)
X0 = normal(m, m)
X0 = X0 * X0.T
コード例 #10
0
ファイル: sdp_upper_bound.py プロジェクト: hiepbkhn/itce2011
@author: Nguyen Huu Hiep
'''

from cvxopt import matrix, normal, spdiag, misc, lapack
from ubsdp import ubsdp

m, n = 50, 50
A = normal(m**2, n)

# Z0 random positive definite with maximum e.v. less than 1.0.
Z0 = normal(m,m)
Z0 = Z0 * Z0.T
w = matrix(0.0, (m,1))
a = +Z0
lapack.syev(a, w, jobz = 'V')
wmax = max(w)
if wmax > 0.9:  w = (0.9/wmax) * w
Z0 = a * spdiag(w) * a.T

# c = -A'(Z0)
c = matrix(0.0, (n,1))
misc.sgemv(A, Z0, c, dims = {'l': 0, 'q': [], 's': [m]}, trans = 'T', alpha = -1.0)

# Z1 = I - Z0
Z1 = -Z0
Z1[::m+1] += 1.0

x0 = normal(n,1)
X0 = normal(m,m)
X0 = X0*X0.T