def NNLS4activity(data, shapes, b_s):  # pass data
        t0 = time()
        S = zeros((L + 1, prod(dims[1:])))
        for ll in range(L):
            S[ll] = RegionAdd(
                zeros((1,) + dims[1:]), shapes[ll].reshape(1, -1), boxes[ll]).ravel()
        S[-1] = b_s
        # http://abel.ee.ucla.edu/cvxopt/userguide/coneprog.html#quadratic-programming
        P = matrix(S.dot(S.T))
        G = matrix(0.0, (L + 1, L + 1))
        G[::L + 2] = -1.0
        h = matrix(0.0, (L + 1, 1))

        def nnls(y):
            q = matrix(-S.dot(y))
            result = solvers.qp(P, q, G, h)  # , solver='mosek')
            return ravel(result['x'])

        activity = asarray([nnls(d.ravel()) for d in data]).T
        # def nnls(y, init): #initial conditions did't speed up
        #     q = matrix(-S.dot(y))
        # result = solvers.qp(P, q, G, h, initvals=init)  # , solver='mosek')
        #     return ravel(result['x'])
        # activity = asarray(
        #     [nnls(d.ravel(), activity[:, i / mb].tolist() + [b_t[i / mb]]) for i, d in enumerate(residual)]).T
        # Subtract background and neurons
        print 'Time for cvx: ', time() - t0
        tsub = time()
        residual = data - activity.T.dot(S).reshape(data.shape)
        tsub -= time()
        b_t = activity[-1]
        activity = activity[:-1]
        return residual, activity, b_t, tsub
示例#2
0
文件: pcg.py 项目: zhh210/pypdas
def test_pcg():
    'Test function for projected CG.'
    n = 10
    m = 4
    H = sprandsym(n,n)
    A = sp_rand(m,n,0.9)
    x0 = matrix(1,(n,1))
    b = A*x0
    c = matrix(1.0,(n,1))

    x_pcg = pcg(H,c,A,b,x0)


    Lhs1 = sparse([H,A])
    Lhs2 = sparse([A.T,spmatrix([],[],[],(m,m))])
    Lhs = sparse([[Lhs1],[Lhs2]])
    rhs = -matrix([c,spmatrix([],[],[],(m,1))])
    rhs2 = copy(rhs)
    linsolve(Lhs,rhs)
    #print rhs[:10]


    sol = solvers.qp(H,c,A=A,b=b)
    print ' cvxopt qp|   pCG'
    print matrix([[sol['x']],[x_pcg]])
    print 'Dual variables:'
    print sol['y']
    print 'KKT equation residuals:'
    print H*sol['x'] + c + A.T*sol['y']
示例#3
0
文件: bdd.py 项目: nicococo/tilitools
 def fit(self):
     """
     train a BDD
     """
     # number of training samples
     n = self.n
     # the kernel matrix
     K = self.kernel
     # the covariance matrix
     C = self.cov_mat
     # the inverse of the covariance matrix
     C_inv = np.linalg.inv(C)
     # the diagonal matrix containing the sum of the rowa of the kernel matrix
     D = self.diagonal
     # parameter 0 < nu < 1, controlling the sparsity of the solution
     nu = self.nu
     # the mean vector
     m = -np.dot(D, np.ones(n))**nu
     # solve the quadratic program
     P = matrix(n*K + C_inv)
     q = matrix(-1. * (np.dot(D, np.ones(n)) + np.dot(C_inv, m)).T)
     sol = qp(P, q)
     # extract the solution
     self.alphas = sol['x']
     # BDD is trained
     self.is_trained = True
    def NNLS4shape(data, activity, b_t):  # pass data
        # P = matrix(activity.dot(activity.T))
        act = np.r_[activity, b_t.reshape(1, -1)]
        P = matrix(act.dot(act.T))
        G = matrix(0.0, (L + 1, L + 1))
        G[::L + 2] = -1.0
        h = matrix(0.0, (L + 1, 1))

        def nnls(i, y):  # add contraints to boxes
            q = matrix(-act.dot(y))
            Als = []
            for ll in range(L):
                if not ((boxes[ll][0, 0] <= i % dims[-2] < boxes[ll][0, 1]) and
                        (boxes[ll][1, 0] <= i / dims[-2] < boxes[ll][1, 1])):
                    Als += [ll]
            A = matrix(0.0, (len(Als), L + 1))
            for i, ll in enumerate(Als):
                A[i, ll] = 1
            b = matrix(0.0, (len(Als), 1))
            result = solvers.qp(P, q, G, h, A, b)
            return ravel(result['x'])
        S = asarray([nnls(i, d.ravel())
                     for i, d in enumerate(data.reshape(len(data), -1).T)]).T
        residual = data - act.T.dot(S).reshape(data.shape)
        b_s = S[-1]
        S = S[:-1]
        for ll in range(L):
            shapes[ll] = RegionCut(S[ll].reshape((1,) + dims[1:]), boxes[ll])[0]
        return residual, shapes, b_s
示例#5
0
文件: 05_svm.py 项目: Jieeee/csmath
def svm(pts, labels):
    """
    Support Vector Machine using CVXOPT in Python. This example is
    mean to illustrate how SVMs work.
    """
    n = len(pts[0])

    # x is a column vector [w b]^T

    # set up P
    P = matrix(0.0, (n+1,n+1))
    for i in range(n):
        P[i,i] = 1.0

    # q^t x
    # set up q
    q = matrix(0.0,(n+1,1))
    q[-1] = 1.0

    m = len(pts)
    # set up h
    h = matrix(-1.0,(m,1))

    # set up G
    G = matrix(0.0, (m,n+1))
    for i in range(m):
        G[i,:n] = -labels[i] * pts[i]
        G[i,n] = -labels[i]

    x = solvers.qp(P,q,G,h)['x']

    return P, q, h, G, x
示例#6
0
def _numpy_to_cvxopt_matrix(A):
    from cvxopt import matrix
    A = np.array(A, dtype=np.float64)
    if A.ndim == 1:
        return matrix(A, (A.shape[0], 1), 'd')
    else:
        return matrix(A, A.shape, 'd')
示例#7
0
def l1tf(corr, delta):
    """
    :param corr: Corrupted signal, should be a numpy array / pandas Series
    :param delta: Strength of regularization

    :return: The filtered series
    """

    m = float(corr.min())
    M = float(corr.max())
    denom = M - m
    # if denom == 0, corr is constant
    t = (corr-m) / (1 if denom == 0 else denom)

    if isinstance(corr, np.ndarray):
        values = matrix(t)
    elif isinstance(corr, pd.Series):
        values = matrix(t.values[:])
    else:
        raise ValueError("Wrong type for corr")

    values = _l1tf(values, delta)
    values = values * (M - m) + m

    if isinstance(corr, np.ndarray):
        values = np.asarray(values).squeeze()
    elif isinstance(corr, pd.Series):
        values = pd.Series(values, index=corr.index, name=corr.name)

    return values
示例#8
0
    def F(x=None,z=None):
        
        # Case 1
        if x is None and z is None:
            x0 = opt.matrix(1., (n,1))
            return len(fs),x0

        # Case 2
        elif x is not None and z is None:
            if all(list(map(lambda y: y(x),inds))):
                f = opt.matrix(0.0,(len(fs),1))
                for i in range(0,len(fs),1):
                    f[i] = fs[i](x)
                Df = opt.spmatrix(0.0,[],[],(0,n))
                for i in range(0,len(grads),1):
                    Df = opt.sparse([Df,grads[i](x).T])
                return f,Df
            else:
                return None,None

        # Case 3
        else:
            f = opt.matrix(0.0,(len(fs),1))
            for i in range(0,len(fs),1):
                f[i] = fs[i](x)
            Df = opt.spmatrix(0.0,[],[],(0,n))
            for i in range(0,len(grads),1):
                Df = opt.sparse([Df,grads[i](x).T])
            H = opt.spmatrix(0.0,[],[],(n,n))
            for i in range(0,len(hess),1):
                H = H + z[i]*hess[i](x)
            return f,Df,H
示例#9
0
    def test_pfcholesky(self):
        U = matrix(range(1,2*self.symb.n+1),(self.symb.n,2),tc='d')/self.symb.n
        alpha = matrix([1.2,-0.01])
        D = matrix(0.0,(2,2))
        D[::3] = alpha
        random.seed(1)
        V = matrix([random.random() for i in range(self.symb.n*3)],(self.symb.n,3))

        # PF Cholesky from spmatrix
        Lpf = cp.pfcholesky(self.A,U,alpha,p=amd.order)
        Vt = +V
        Lpf.trmm(Vt,trans='T')
        Lpf.trmm(Vt,trans='N')
        diff = list( (Vt - (cp.symmetrize(self.A) + U*D*U.T)*V)[:] )
        self.assertAlmostEqualLists(diff, len(diff)*[0.0])

        Lpf.trsm(Vt,trans='N')
        Lpf.trsm(Vt,trans='T')
        diff = list( (Vt-V)[:] )
        self.assertAlmostEqualLists(diff, len(diff)*[0.0])

        # PF Cholesky from cspmatrix factor
        L = cp.cspmatrix(self.symb) + self.A
        Lpf = cp.pfcholesky(L,U,alpha)
        Vt = +V
        Lpf.trmm(Vt,trans='T')
        Lpf.trmm(Vt,trans='N')
        diff = list( (Vt - (cp.symmetrize(self.A) + U*D*U.T)*V)[:] )
        self.assertAlmostEqualLists(diff, len(diff)*[0.0])

        Lpf.trsm(Vt,trans='N')
        Lpf.trsm(Vt,trans='T')
        diff = list( (Vt-V)[:] )
        self.assertAlmostEqualLists(diff, len(diff)*[0.0])
示例#10
0
    def F(x=None,z=None):
        
        # Case 1
        if(x is None and z is None):
            x0 = opt.matrix(np.ones((n,1)))*1.0
            return (len(fs),x0)

        # Case 2
        elif(x is not None and z is None):

            in_domain = map(lambda y: y(x),inds)
            if(reduce(lambda v,w: v and w,in_domain)):
                f = opt.matrix(0.0,(len(fs),1))
                for i in range(0,len(fs),1):
                    f[i] = fs[i](x)
                Df = opt.spmatrix(0.0,[],[],(0,n))
                for i in range(0,len(grads),1):
                    Df = opt.sparse([Df,grads[i](x).T])
                return (f,Df)
            else:
                return (None,None)

        # Case 3
        else:
            f = opt.matrix(0.0,(len(fs),1))
            for i in range(0,len(fs),1):
                f[i] = fs[i](x)
            Df = opt.spmatrix(0.0,[],[],(0,n))
            for i in range(0,len(grads),1):
                Df = opt.sparse([Df,grads[i](x).T])
            H = opt.spmatrix(0.0,[],[],(n,n))
            for i in range(0,len(hess),1):
                H = H + z[i]*hess[i](x)
            return (f,Df,H)
示例#11
0
    def solve_LP_problem(self):
        (f_coef_matrix, f_column_vector) = self.build_function_coef_matrix_and_column_vector()
        (d_coef_matrix, d_column_vector) = self.build_derivative_coef_matrix_and_column_vector()

        # Solve the LP problem by combining constraints for both function and derivative info.
        objective_function_vector = matrix(list(itertools.repeat(1.0, self.no_vars)))
        coef_matrix = sparse([f_coef_matrix, d_coef_matrix])
        column_vector = matrix([f_column_vector, d_column_vector])

        min_sol = solvers.lp(objective_function_vector, coef_matrix, column_vector)
        is_consistent = min_sol['x'] is not None

        # Print the LP problem for debugging purposes.
        if self.verbose:
            self.display_LP_problem(coef_matrix, column_vector)

        if is_consistent:
            self.min_heights = np.array(min_sol['x']).reshape(self.no_points_per_axis)
            print np.around(self.min_heights, decimals=2)

            # Since consistency has been established, solve the converse LP problem to get the
            # maximal bounding surface.
            max_sol = solvers.lp(-objective_function_vector, coef_matrix, column_vector)
            self.max_heights = np.array(max_sol['x']).reshape(self.no_points_per_axis)
            print np.around(self.max_heights, decimals=2)

            if self.plot_surfaces:
                self.plot_3D_objects_for_2D_case()

        else:
            print 'No witness for consistency found.'

        return is_consistent
示例#12
0
    def test_dual_variables(self):
        p = Problem(Minimize( norm1(self.x + self.z) ),
            [self.x >= [2,3],
             [[1,2],[3,4]]*self.z == [-1,-4],
             norm2(self.x + self.z) <= 100])
        result = p.solve()
        self.assertAlmostEqual(result, 4)
        self.assertItemsAlmostEqual(self.x.value, [4,3])
        self.assertItemsAlmostEqual(self.z.value, [-4,1])
        # Dual values
        self.assertItemsAlmostEqual(p.constraints[0].dual_value, [0, 1])
        self.assertItemsAlmostEqual(p.constraints[1].dual_value, [-1, 0.5])
        self.assertAlmostEqual(p.constraints[2].dual_value, 0)

        T = matrix(2,(2,3))
        c = matrix([3,4])
        p = Problem(Minimize(1),
            [self.A >= T*self.C,
             self.A == self.B,
             self.C == T.T])
        result = p.solve()
        # Dual values
        self.assertItemsAlmostEqual(p.constraints[0].dual_value, 4*[0])
        self.assertItemsAlmostEqual(p.constraints[1].dual_value, 4*[0])
        self.assertItemsAlmostEqual(p.constraints[2].dual_value, 6*[0])
示例#13
0
    def test_vstack(self):
        c = matrix(1, (1,5))
        p = Problem(Minimize(c * vstack(self.x, self.y)),
            [self.x == [1,2],
            self.y == [3,4,5]])
        result = p.solve()
        self.assertAlmostEqual(result, 15)

        c = matrix(1, (1,4))
        p = Problem(Minimize(c * vstack(self.x, self.x)),
            [self.x == [1,2]])
        result = p.solve()
        self.assertAlmostEqual(result, 6)


        c = matrix(1, (2,2))
        p = Problem( Minimize( sum(vstack(self.A, self.C)) ),
            [self.A >= 2*c,
            self.C == -2])
        result = p.solve()
        self.assertAlmostEqual(result, -4)

        c = matrix(1, (1,2))
        p = Problem( Minimize( sum(vstack(c*self.A, c*self.B)) ),
            [self.A >= 2,
            self.B == -2])
        result = p.solve()
        self.assertAlmostEqual(result, 0)

        c = matrix([1,-1])
        p = Problem( Minimize( c.T * vstack(square(self.a), sqrt(self.b))),
            [self.a == 2,
             self.b == 16])
        result = p.solve()
        self.assertAlmostEqual(result, 0)
示例#14
0
def svm_qp_primal_solver(X, y, C, tol=1e-6, max_iter=100, verbose=False):
    t1 = time.clock()
    N = X.shape[0]
    D = X.shape[1]
    P = np.zeros((1 + D + N, 1 + D + N))
    P[1:D + 1, 1:D + 1] = np.eye(D, D)
    P = cvxopt.matrix(P)
    q = np.zeros((1 + D + N, 1))
    q[1 + D:, 0] = C
    q = cvxopt.matrix(q)

    G = np.hstack((-y[:, np.newaxis],
                   -y[:, np.newaxis] * X,
                   -np.eye(N, N)))
    G = np.vstack((G, np.hstack((np.zeros((N, 1 + D)), -np.eye(N, N)))))
    G = cvxopt.matrix(G)
    h = np.array([-1.0] * N + [0.0] * N)
    h = cvxopt.matrix(h[:, np.newaxis])

    cvxopt.solvers.options['abstol'] = tol
    cvxopt.solvers.options['show_progress'] = verbose
    cvxopt.solvers.options['maxiters'] = max_iter
    res = cvxopt.solvers.qp(P=P, q=q, G=G, h=h)
    t2 = time.clock()
    w = np.array(res['x']).ravel()[1:1+D]
    w0 = res['x'][0]
    return {'status': 0 if res['status'] == 'optimal' else 1,
            'w': w,
            'w0': w0,
            'time': t2 - t1}
def main():

    # Cvxopt doesn't use the same definition notation than numpy.
    # Each inner list represents a *column* of the matrix, not a row !
    # The following matrix
    #
    #  A = ⎛   1  -1⎞
    #      ⎜-0.5  -1⎟
    #      ⎝  -2  -1⎠
    #
    # is defined
    #
    #  cvxopt.matrix([ [ 1.0, -0.5, -2.0],
    #                  [-1.0, -1.0, -1.0] ])

    c = cvxopt.matrix([ 0., 1. ])
    A = cvxopt.matrix([ [ 1.0, -0.5, -2.0],
                        [-1.0, -1.0, -1.0] ])
    b = cvxopt.matrix([ 5., -4., -7. ])

    # By default, cvxopt *minimize* the objective function.
    sol = cvxopt.solvers.lp(c, A, b)

    print('x* = ')
    print(sol['x'])
    
    print(sol)
示例#16
0
def objective_hyper(x, z, ks, p):
    """Objective function of UE program with hyperbolic delay functions
    f(x) = sum_i f_i(v_i) with v = sum_w x_w
    f_i(u) = ks[i,0]*u - ks[i,1]*log(ks[i,2]-u)
    
    Parameters
    ----------
    x,z: variables for the F(x,z) function for cvxopt.solvers.cp
    ks: matrix of size (n,3) 
    p: number of destinations
    (we use multiple-sources single-sink node-arc formulation)
    """
    n = ks.size[0]
    if x is None: return 0, matrix(1.0/p, (p*n,1))
    l = matrix(0.0, (n,1))
    for k in range(p): l += x[k*n:(k+1)*n]
    f, Df, H = 0.0, matrix(0.0, (1,n)), matrix(0.0, (n,1))
    for i in range(n):
        tmp = 1.0/(ks[i,2]-l[i])
        f += ks[i,0]*l[i] - ks[i,1]*np.log(max(ks[i,2]-l[i], 1e-13))
        Df[i] = ks[i,0] + ks[i,1]*tmp
        H[i] = ks[i,1]*tmp**2
    Df = matrix([[Df]]*p)
    if z is None: return f, Df
    return f, Df, sparse([[spdiag(z[0] * H)]*p]*p)
示例#17
0
	def get_2state_gaussian_seq(lens,dims=2,means1=[2,2,2,2],means2=[5,5,5,5],vars1=[1,1,1,1],vars2=[1,1,1,1],anom_prob=1.0):
		
		seqs = co.matrix(0.0, (dims, lens))
		lbls = co.matrix(0, (1,lens))
		marker = 0

		# generate first state sequence
		for d in range(dims):
			seqs[d,:] = co.normal(1,lens)*vars1[d] + means1[d]

		prob = np.random.uniform()
		if prob<anom_prob:		
			# add second state blocks
			while (True):
				max_block_len = 0.6*lens
				min_block_len = 0.1*lens
				block_len = np.int(max_block_len*np.single(co.uniform(1))+3)
				block_start = np.int(lens*np.single(co.uniform(1)))

				if (block_len - (block_start+block_len-lens)-3>min_block_len):
					break

			block_len = min(block_len,block_len - (block_start+block_len-lens)-3)
			lbls[block_start:block_start+block_len-1] = 1
			marker = 1
			for d in range(dims):
				#print block_len
				seqs[d,block_start:block_start+block_len-1] = co.normal(1,block_len-1)*vars2[d] + means2[d]

		return (seqs, lbls, marker)
def f_df_H(x=None, z=None):
    """
    fonction demandée par la fonction
    `solvers.cp <http://cvxopt.org/userguide/solvers.html#problems-with-nonlinear-objectives>`_.

    Elle répond aux trois cas :

        * ``F()`` : la fonction doit retourne le nombre de contraintes non linéaires (:math:`f_k`) et un premier point :math:`X_0`
        * ``F(x)`` : la fonction retourne l'évaluation de :math:`f_0` et de son gradient au point ``x``
        * ``F(x,z)`` : la fonction retourne l'évaluation de :math:`f_0`, son gradient et de la dérivée seconde au point ``x``

    Voir @see fn exercice_particulier1

    Le problème d'optimisation est le suivant :

    .. math::

        \\left\\{ \\begin{array}{l} \\min_{x,y} \\left\\{ x^2 + y^2 - xy + y \\right\\}  \\\\ sous \\; contrainte \\; x + 2y = 1 \\end{array}\\right.

    """
    if x is None:
        # cas 1
        x0 = matrix([[random.random(), random.random()]])
        return 0, x0
    f = x[0] ** 2 + x[1] ** 2 - x[0] * x[1] + x[1]
    d = matrix([x[0] * 2 - x[1], x[1] * 2 - x[0] + 1]).T
    h = matrix([[2.0, -1.0], [-1.0, 2.0]])
    if z is None:
        # cas 2
        return f, d
    else:
        # cas 3
        return f, d, h
def optim_central(pb):
    """
        Returns the power distribution u_sol of the central static QP.

        keyword arguments:
        pb -- dictionary of the problem (nbr of users, time step, max resources, max admissible power, thermal resistance,
         Thermal capacity, vector of the init temperature, value of the exterior temperature, reference temperature,
          comfort factor, size of the prediction horizon)
    """
    Rth = pb['Rth']
    Text = pb['Text']
    T_id = pb['T_id']
    Umax = pb['Umax']
    u_m = pb['u_m']
    alpha = pb['alpha']

    u_id = (T_id - Text) /Rth

    print(np.shape(alpha*u_id))
    print(np.shape(2*Rth**2))

    # Matrix definition
    P = matrix(2 * np.diag(alpha*Rth ** 2), tc='d')
    q = matrix(1 - 2*alpha*u_id * (Rth ** 2), tc='d')
    G = matrix(np.vstack((np.ones(len(Rth)), -np.identity(len(Rth)), np.identity(len(Rth)))), tc='d')
    h = matrix(np.hstack((Umax, np.zeros(len(Rth)), u_m)), tc='d')

    # Resolution
    sol = solvers.qp(P, q, G, h)

    # Solution
    u_sol = np.asarray(sol['x']).T[0]

    return u_sol,
示例#20
0
def trainConvexLossModel(feature_matrix, observation_matrix, opttype='ls', weights=None):
	
	N = feature_matrix.shape[0]
	p = feature_matrix.shape[1]

	if weights == None:
		weights = np.eye(N)

	sol = None
	gradient = np.zeros((N,p+1))
	loss = np.zeros((N,1))

	#solves least squares problems
	if opttype == 'ls':
		weighted_feature_matrix = np.dot(np.transpose(feature_matrix), weights)
		kernel = matrix(
							np.dot(weighted_feature_matrix,
									feature_matrix)
					   )
		obs = matrix(
							-np.dot(weighted_feature_matrix,
									observation_matrix)
					)
		
		#cvx do magic!
		sol = solvers.qp(kernel, obs)

		for i in range(0,N):
			residual = (np.dot(np.transpose(sol['x']),feature_matrix[i,:]) - observation_matrix[i])
			gradient[i,0:p] = residual*np.transpose(sol['x'])
			gradient[i,p] = residual*-1
			loss[i] = residual ** 2

	return (sol,loss,gradient)
示例#21
0
def cvxopt_lp(y, A):

    N = A.shape[1]
    AA = np.hstack((A, -A))
    c = np.ones((2*N, 1))

    G = np.vstack((np.zeros((2*N,2*N)), -np.eye(2*N)))
    h = np.zeros((4*N,1))

    # Convert numpy arrays to cvxopt matrices
    cvx_c = cvxopt.matrix(c)
    cvx_G = cvxopt.matrix(G)
    cvx_h = cvxopt.matrix(h)
    cvx_AA = cvxopt.matrix(AA)
    cvx_y = cvxopt.matrix(y.reshape(y.size,1))

    # Options
    cvxopt.solvers.options['show_progress'] = False
    #cvxopt.solvers.options['MOSEK'] = {mosek.iparam.log: 0}

    # Solve
    #res = cvxopt.solvers.lp(cvx_c, cvx_G, cvx_h, A=cvx_AA, b=cvx_y, solver='mosek')
    res = cvxopt.solvers.lp(cvx_c, cvx_G, cvx_h, A=cvx_AA, b=cvx_y)

    primal = np.squeeze(np.array(res['x']))
    gamma = primal[:N] - primal[N:]
    return gamma

    #lb = zeros(2*N,1);
    #ub = Inf*ones(2*N,1);
    ##[primal,obj,exitflag,output2,dual] = linprog(c,[],[],A,y,lb,ub,[],opt);
    ##[primal,~,~,~,~] = linprog(c,[],[],A,aggy,lb,ub);
    #[primal,obj,exitflag,output2,dual] = linprog(c,[],[],A,aggy,lb,ub);
    #gamma = primal(1:N) - primal((N+1):(2*N));
示例#22
0
def relative_idx(colptr, rowidx, snptr, snpar):
    """
    Compute relative indices of update matrices in frontal matrix of parent.
    """
    
    relptr = matrix(0, (len(snptr),1))
    relidx = matrix(-1, (colptr[-1],1))

    def lfind(a,b):
        i = 0
        ret = +a
        for k in range(len(a)):
            while a[k] != b[i]: i += 1
            ret[k] = i
            i += 1
        return ret
    
    for k in range(len(snpar)):
        p = snpar[k]
        relptr[k+1] = relptr[k]
        if p != -1:
            nk = snptr[k+1] - snptr[k]
            relptr[k+1] += colptr[k+1] - colptr[k] - nk
            relidx[relptr[k]:relptr[k+1]] = lfind(rowidx[colptr[k]+nk:colptr[k+1]], rowidx[colptr[p]:colptr[p+1]])

    return relptr, relidx[:relptr[k+1]]
 def F(x = None, z = None):
     if x is None: return 0, matrix(1/float(n), (n,1))
     val = ((x-y).T*(x-y))[0]
     Df = 2*(x-y).T
     if z is None: return val, Df
     H = 2*z[0]*matrix(np.eye(n))
     return val, Df, H
    def test_robust_regression(self):
        #
        # Compare cvxpy solutions to cvxopt ground truth
        #
        from cvxpy import (matrix, variable, program, minimize,
                           huber, sum, leq, geq, square, norm2,
                           ones, zeros, quad_form)
        tol_exp = 5 # Check solution to within 5 decimal places
        m, n = self.m, self.n
        A = matrix(self.A)
        b = matrix(self.b)

        # Method 1: using huber directly
        x = variable(n)
        p = program(minimize(sum(huber(A*x - b, 1.0))))
        p.solve(True)

        np.testing.assert_array_almost_equal(
            x.value, self.xh, tol_exp)

        # Method 2: solving the dual QP
        x = variable(n)
        u = variable(m)
        v = variable(m)
        p = program(minimize(0.5*quad_form(u, 1.0) + sum(v)),
                    [geq(u, 0.0), leq(u, 1.0), geq(v, 0.0),
                     leq(A*x - b, u + v), geq(A*x - b, -u - v)])
        p.solve(True)
        np.testing.assert_array_almost_equal(
            x.value, self.xh, tol_exp)
示例#25
0
 def __init__(self, rows=1, cols=1, *args, **kwargs):
     self._LB = Parameter(rows, cols)
     self._LB.value = cvxopt.matrix(0,(rows, cols), tc='d')
     self._UB = Parameter(rows, cols)
     self._UB.value = cvxopt.matrix(1,(rows, cols), tc='d')
     self._fix_values = cvxopt.matrix(False,(rows, cols))
     super(Boolean, self).__init__(rows, cols, *args, **kwargs)
示例#26
0
def simplex(graph, wp_trajs, withODs=False):
    """Build simplex constraints from waypoint trajectories wp_trajs
    wp_trajs is given by WP.get_wp_trajs()[1]
    
    Parameters:
    -----------
    graph: Graph object
    wp_trajs: list of waypoint trajectories with paths along this trajectory [(wp_traj, path_list, flow)]
    """
    if wp_trajs is None:
        return None, None
    n = len(wp_trajs)
    I, J, r, i = [], [], matrix(0.0, (n,1)), 0
    for wp_traj, path_ids, flow in wp_trajs:
        r[i] = flow
        for id in path_ids:
            I.append(i)
            J.append(graph.indpaths[id])
        i += 1
    U = spmatrix(1.0, I, J, (n, graph.numpaths))
    if not withODs: return U, r
    else:
        U1, r1 = path.simplex(graph)
        U, r = matrix([U, U1]), matrix([r, r1])
        if la.matrix_rank(U) < U.size[0]:
            logging.info('Remove redundant constraint(s)'); ind = find_basis(U.trans())
            return U[ind,:], r[ind]
        return U, r
示例#27
0
def solve_Q(x_dim, A, B, D):
  # x = [s vec(Z) vec(Q)]
  MAX_ITERS=30
  c_dim = 1 + 2 * x_dim*(x_dim+1)/2
  c = zeros(c_dim)
  c[0] = x_dim
  prev = 1
  for i in range(x_dim):
    vec_pos = prev + i * (i+1)/2 + i
    c[vec_pos] = 1
  cm = matrix(c)

  G = construct_coeff_matrix(x_dim, B)
  G = -G # set negative since s = h - Gx in cvxopt's sdp solver
  #print "G-shape"
  #print shape(G)
  Gs = [matrix(G)]

  h,_ = construct_const_matrix(x_dim, A, B, D)
  #print "h-shape"
  #print shape(h)
  hs = [matrix(h)]

  solvers.options['maxiters'] = MAX_ITERS
  sol = solvers.sdp(cm, Gs = Gs, hs=hs)
  #print sol['x']
  return sol, c, G, h
def MKL(eta_init, Kernels, Y, lambda_, risk = 'SVM'):
    # We use firstSolverMKL to get the value of the functionnnal J function of the variable eta
    #  We then apply a projected gradient descent to minimize J
    #   Input : - Data Kernels : list of arrays 
    #           - eta_init : array
    #           - lambda_ : penalization parameter (float)
    #           - Y : list

    solvers.options['show_progress'] = True
    nbKernels = len(Kernels)
    eta = projector(eta_init)
    Kernel = np.zeros((Kernels[0].shape[0], Kernels[0].shape[1]))    
    for i in range(nbKernels): Kernel = Kernel + eta[i]*Kernels[i]    
    gamma = solveDualProblem(Kernel, Y, lambda_,risk)
    grad = np.array([-lambda_*(gamma.T*matrix(Kernels[i])*gamma)[0] for i in range(nbKernels)])
    k = 1
    while 1:
        alpha = 1/float(k)
        eta_ = projector(np.asarray(eta - alpha*matrix(grad)))
        k = k + 1
        print sum(abs(eta-eta_))/sum(abs(eta))
        if k > 100 or sum(abs(eta-eta_))/sum(abs(eta))  < 1e-2: break
        Kernel = np.zeros((Kernels[0].shape[0], Kernels[0].shape[1]))    
        for i in range(nbKernels): Kernel = Kernel + eta_[i]*Kernels[i]    
        gamma = solveDualProblem(Kernel, Y, lambda_, risk)
        grad = np.array([-lambda_*(gamma.T*matrix(Kernels[i])*gamma)[0] for i in range(nbKernels)])
        eta = eta_        
    return eta, gamma
def quadprog(H, c, A, b):
    x = np.matrix(solvers.lp(matrix(np.zeros(c.shape)), matrix(-A), matrix(-b))['x'])
    # x = scipy.optimize.linprog(np.ones(c.shape).T[0], -np.array(A), -np.array(b).T[0])['x']
    # x = np.matrix(x).T
    workset = np.array(A * x <= b).T[0]
    max_iter = 200
    for k in xrange(max_iter):
        Aeq = A[np.where(workset == True)]
        g_k = H * x + c
        p_k, lambda_k = qpsub(H, g_k, Aeq, np.matlib.zeros((Aeq.shape[0], 1)))
        if np.linalg.norm(p_k) <= 1e-9:
            if np.min(lambda_k) > 0:
                break
            else:
                pos = np.argmin(lambda_k)
                for i in xrange(b.size):
                    if workset[i] and np.sum(workset[:i]) == pos:
                        workset[i] = False
                        break
        else:
            alpha = 1.0
            pos = -1
            for i in xrange(b.size):
                if not workset[i] and (A[i] * p_k)[0, 0] < 0:
                    now = np.abs((b[i] - A[i] * x) / (A[i] * p_k))[0, 0]
                    if now < alpha:
                        alpha = now
                        pos = i
            x += alpha * p_k
            if pos != -1:
                workset[pos] = True

    return x
示例#30
0
文件: hw8.py 项目: Kekeli/caltech-lfd
def getDualQuardraticParameters(trainingData, trainingResults, C, Q):
    'formulate the dual parameters for the given data'

    n_points = len(trainingResults)
    print 'No of points: ' + str(n_points)

    # Gram matrix
    K = zeros((n_points, n_points))
    for i in range(n_points):
       for j in range(n_points):
           # K[i,j] = dot(trainingData[i], trainingData[j])
           K[i,j] = polynomialKernel(trainingData[i], trainingData[j], Q)
    P = cvxopt.matrix(outer(trainingResults,trainingResults) * K, tc='d')

    q = -1.0 * cvxopt.matrix(ones((n_points, 1)), tc='d')
    G = -1.0 * cvxopt.matrix(identity(n_points), tc='d')
    h = cvxopt.matrix(zeros((n_points, 1)), tc='d')
    A = cvxopt.matrix(trainingResults, (1,n_points), tc='d')
    b = cvxopt.matrix([0.0])

    ' for soft margin'
    ' double the constraints'
    ' append rows from G_C to G'
    ' and rows from h_C to h'

    G_C = 1.0 * cvxopt.matrix(identity(n_points), tc='d')
    h_C = cvxopt.matrix(C * ones((n_points,1)), tc='d')

    foo = cvxopt.matrix( [G, G_C] )
    bar = cvxopt.matrix( [h, h_C])

    return P, q, foo, bar, A, b, K
示例#31
0
def convert(Aun, bun, Cun, S, dims, Atype=spmatrix, tfill=0, tsize=0):
    """
    Implements conversion where block-diagonal correlative sparsity is possible.
    
    Given an SDP problem with symmetric matrix variable X
    
    minimize trace(Cun*X)
    subject to
        Aun.T*X[:] = bun
        X == positive semidefinite
        
    converts to the problem with matrix variables Xi
    
    minimize sum_i trace(Ccon[i]*Xi)
    subject to
        Acon[i].T*Xi = bcon[i] for all i
        Xi == positive semidefinite for all i
    
    ARGUMENTS:
        Aun, bun, Cun : coefficients of unconverted problem
        S : sparsity pattern of unconverted problem 
            (Sij = 1 if (i,j) in aggregate sparsity pattern, Sij = 0 otherwise)
        tfill, tsize : parameters in Acon = Acon['s']
Ccon = Ccon['s']clique merging
        
    RETURNS:
        Acon, bcon, Ccon : list of coefficient matricies for each clique
        Lcon : sparse matrix containing consistency constraints (L.T*Xt = 0 
                where Xt is a stacked vector containing Xi's)
        dims: dictionary of dimensions:
            dims['l'] : number of linear variables in nonnegative cone
            dims['q'] : list of sizes of quadratic cones
            dims['s'] : list of orders of semidefinite cones
    """
    n = dims['s'][0]
    nl = dims['l']
    m = Aun.size[1]

    #Find clique tree decomposition of sparsity pattern
    if tsize > 0 or tfill > 0:
        symb = symbolic(S,
                        p=amd.order,
                        merge_function=merge_size_fill(tsize=tsize,
                                                       tfill=tfill))
    else:
        symb = symbolic(S, p=amd.order)
    ns = [len(clique) for clique in symb.cliques()]

    #permute coefficient and sparsity matricies
    Cunl = Cun[:nl]
    Cuns = reshape(Cun[nl:], n, n)
    Cuns = +Cuns[symb.p, symb.p]
    S = S[symb.p, symb.p]
    cliques = symb.cliques()

    Aunl = Aun[:nl, :]
    Auns = Aun[nl:, :]
    I, J, v = Auns.I, Auns.J, Auns.V
    j = [int(k / n) for k in list(I)]
    i = [int(I[k] - (j[k] * n)) for k in xrange(len(I))]
    Auns = spmatrix(v, symb.ip[i] + symb.ip[j] * n, J, Aun.size)

    cliques_all = []
    for cl in cliques:
        cliques_all.extend(cl)
    assert n == len(set(
        cliques_all)), "Each index must be included in at least one clique."

    #Convert C
    Ccon = []
    for clique in cliques:
        Ccon.append(+Cuns[clique, clique])
        Cuns[clique, clique] = 0.0

    #Convert A. Greedily assign constraints to cliques in which
    #nonzeros of Ak are fully contained in clique k

    Aconl = [[matrix(0.0, (0, nl))] for i in ns]
    Acons = [[matrix(0.0, (0, i**2))] for i in ns]
    bcon = [[] for k in xrange(len(ns))]

    constraint_order = []
    for k in xrange(len(ns)):
        clique = cliques[k]
        unwrapped = [(i + j * n) for i in list(clique) for j in list(clique)]
        for con in xrange(m):
            if con in constraint_order: continue
            AI = sparse(Auns[:, con]).I
            if set(AI).issubset(set(unwrapped)):
                Aconl[k].append(Aunl[:, con].T)
                Acons[k].append(Auns[unwrapped, con].T)
                bcon[k].append(bun[con])
                constraint_order.append(con)
        if Atype is matrix:
            Aconl[k] = matrix(Aconl[k]).T
            Acons[k] = matrix(Acons[k]).T
        else:
            Aconl[k] = sparse(Aconl[k]).T
            Acons[k] = sparse(Acons[k]).T

        bcon[k] = matrix(bcon[k])
    assert len(constraint_order) == Aun.size[
        1], "Conversion for not totally seperable not yet implemented"

    Acon = sparse([sparse([a.T for a in Aconl]).T, spdiag_rect(Acons)])
    bcon = matrix(bcon)
    Ccon = matrix([Cunl, matrix([c[:] for c in Ccon])])

    #get consistency constraints L*x = 0
    total_variables = [0]
    for k in ns:
        total_variables.append(total_variables[-1] + k**2)

    L = ([], [], [])
    ncol = 0
    for child in xrange(len(symb.snpar)):
        parent = symb.snpar[child]
        if parent == -1: continue

        for ip in xrange(len(cliques[parent])):
            for jp in xrange(ip + 1):

                for ic in xrange(len(cliques[child])):
                    if cliques[child][ic] != cliques[parent][ip]: continue
                    for jc in xrange(ic + 1):
                        if cliques[child][jc] != cliques[parent][jp]: continue

                        zp = jp * len(
                            cliques[parent]) + ip + total_variables[parent]
                        zc = jc * len(
                            cliques[child]) + ic + total_variables[child]

                        L[0].extend([1.0, -1.0])
                        L[1].extend([zp, zc])
                        L[2].extend([ncol, ncol])

                        if jp != ip:
                            zp = ip * len(
                                cliques[parent]) + jp + total_variables[parent]
                            zc = ic * len(
                                cliques[child]) + jc + total_variables[child]

                            L[0].extend([1.0, -1.0])
                            L[1].extend([zp, zc])
                            L[2].extend([ncol, ncol])
                        ncol += 1

    Lcon = spmatrix(L[0],
                    matrix(L[1]) + nl,
                    L[2],
                    size=(total_variables[-1] + nl, ncol))

    dims = {'l': nl, 'q': [], 's': ns}
    return Acon, bcon, Ccon, Lcon, dims
示例#32
0
def solve_lp_for_year(data, factors, targets, year, tol, weights=None):
    """
    Parameters
    ----------
    data: CPS data
    factors: growth factors
    targets: aggregate targets
    year: year LP is being solved for
    tol: tolerance
    """
    def target(target_val, pop, factor, value):
        return (target_val * pop / factor * 1000 - value)

    print(f"Preparing Coefficient Matrix for {year}")
    if not isinstance(weights, pd.Series):
        weights = data["s006"]
    # only use the blow up factors if we're not solving for 2014 weights
    s006 = np.where(
        data["e02400"] > 0,
        weights * factors["APOPSNR"][year],
        weights * factors["ARETS"][year]
    )
    single_returns = np.where(
        (data["mars"] == 1) & (data["filer"] == 1), s006, 0
    )
    joint_returns = np.where(
        (data["mars"] == 2) & (data["filer"] == 1), s006, 0
    )
    hh_returns = np.where(
        (data["mars"] == 4) & (data["filer"] == 1), s006, 0
    )
    returns_w_ss = np.where(
        (data["e02400"] > 0) & (data["filer"] == 1), s006, 0
    )
    dep_exemptions = np.where(
        data["mars"] == 2, data["XTOT"] - 2, data["XTOT"] - 1
    ) * s006
    interest = data["interest"] * s006
    dividend = data["divs"] * s006
    biz_income = np.where(
        data["e00900"] > 0, data["e00900"], 0
    ) * s006
    biz_loss = np.where(
        data["e00900"] < 0, -data["e00900"], 0
    ) * s006
    cap_gain = np.where(
        data["CGAGIX"] > 0, data["CGAGIX"], 0
    ) * s006
    pension = data["e01500"] * s006
    sch_e_income = np.where(
        data["rents"] > 0, data["rents"], 0
    ) * s006
    sch_e_loss = np.where(
        data["rents"] < 0, -data["rents"], 0
    ) * s006
    ss_income = np.where(
        data["filer"] == 1, data["e02400"], 0
    ) * s006
    ucomp = data["e02300"] * s006

    # wage distribution
    wage1 = np.where(
        data["agi"] <= 10000, data["e00200"], 0
    ) * s006
    wage2 = np.where(
        (data["agi"] > 10000) & (data["agi"] <= 20000),
        data["e00200"], 0
    ) * s006
    wage3 = np.where(
        (data["agi"] > 20000) & (data["agi"] <= 30000),
        data["e00200"], 0
    ) * s006
    wage4 = np.where(
        (data["agi"] > 30000) & (data["agi"] <= 40000),
        data["e00200"], 0
    ) * s006
    wage5 = np.where(
        (data["agi"] > 40000) & (data["agi"] <= 50000),
        data["e00200"], 0
    ) * s006
    wage6 = np.where(
        (data["agi"] > 50000) & (data["agi"] <= 75000),
        data["e00200"], 0
    ) * s006
    wage7 = np.where(
        (data["agi"] > 75000) & (data["agi"] <= 100000),
        data["e00200"], 0
    ) * s006
    wage8 = np.where(
        data["agi"] > 100000,
        data["e00200"], 0
    ) * s006
    lhs_vars = {}
    lhs_vars['single_returns'] = single_returns
    lhs_vars['joint_returns'] = joint_returns
    lhs_vars['hh_returns'] = hh_returns
    lhs_vars['returns_w_ss'] = returns_w_ss
    lhs_vars['dep_exemptions'] = dep_exemptions
    lhs_vars['interest'] = interest
    lhs_vars['dividend'] = dividend
    lhs_vars['biz_income'] = biz_income
    lhs_vars['biz_loss'] = biz_loss
    lhs_vars['cap_gain'] = cap_gain
    lhs_vars['pension'] = pension
    lhs_vars['sch_e_income'] = sch_e_income
    lhs_vars['sch_e_loss'] = sch_e_loss
    lhs_vars['ss_income'] = ss_income
    lhs_vars['ucomp'] = ucomp
    lhs_vars['wage1'] = wage1
    lhs_vars['wage2'] = wage2
    lhs_vars['wage3'] = wage3
    lhs_vars['wage4'] = wage4
    lhs_vars['wage5'] = wage5
    lhs_vars['wage6'] = wage6
    lhs_vars['wage7'] = wage7
    lhs_vars['wage8'] = wage8

    print(f"Preparing Targets for {year}")
    apopn = factors['APOPN'][year]
    aints = factors['AINTS'][year]
    adivs = factors['ADIVS'][year]
    aschci = factors['ASCHCI'][year]
    aschcl = factors['ASCHCL'][year]
    acgns = factors['ACGNS'][year]
    atxpy = factors['ATXPY'][year]
    aschei = factors['ASCHEI'][year]
    aschel = factors['ASCHEL'][year]
    asocsec = factors['ASOCSEC'][year]
    apopsnr = factors['APOPSNR'][year]
    aucomp = factors['AUCOMP'][year]
    awage = factors['AWAGE'][year]

    year = str(year)
    rhs_vars = {}

    rhs_vars['single_returns'] = targets[year]['Single'] - single_returns.sum()
    rhs_vars['joint_returns'] = targets[year]['Joint'] - joint_returns.sum()
    rhs_vars['hh_returns'] = targets[year]['HH'] - hh_returns.sum()
    target_name = 'SS_return'
    rhs_vars['returns_w_ss'] = targets[year][target_name] - returns_w_ss.sum()
    target_name = 'Dep_return'
    rhs_vars['dep_exemptions'] = (targets[year][target_name] -
                                  dep_exemptions.sum())
    rhs_vars['interest'] = target(targets[year]['INTS'], apopn, aints,
                                  interest.sum())
    rhs_vars['dividend'] = target(targets[year]['DIVS'], apopn, adivs,
                                  dividend.sum())
    rhs_vars['biz_income'] = target(targets[year]['SCHCI'], apopn, aschci,
                                    biz_income.sum())
    rhs_vars['biz_loss'] = target(targets[year]['SCHCL'], apopn, aschcl,
                                  biz_loss.sum())
    rhs_vars['cap_gain'] = target(targets[year]['CGNS'], apopn, acgns,
                                  cap_gain.sum())
    rhs_vars['pension'] = target(targets[year]['Pension'], apopn, atxpy,
                                 pension.sum())
    rhs_vars['sch_e_income'] = target(targets[year]['SCHEI'], apopn, aschei,
                                      sch_e_income.sum())
    rhs_vars['sch_e_loss'] = target(targets[year]['SCHEL'], apopn, aschel,
                                    sch_e_loss.sum())
    rhs_vars['ss_income'] = target(targets[year]['SS'], apopsnr, asocsec,
                                   ss_income.sum())
    rhs_vars['ucomp'] = target(targets[year]['UCOMP'], apopn, aucomp,
                               ucomp.sum())
    rhs_vars['wage1'] = target(targets[year]['wage1'], apopn, awage,
                               wage1.sum())
    rhs_vars['wage2'] = target(targets[year]['wage2'], apopn, awage,
                               wage2.sum())
    rhs_vars['wage3'] = target(targets[year]['wage3'], apopn, awage,
                               wage3.sum())
    rhs_vars['wage4'] = target(targets[year]['wage4'], apopn, awage,
                               wage4.sum())
    rhs_vars['wage5'] = target(targets[year]['wage5'], apopn, awage,
                               wage5.sum())
    rhs_vars['wage6'] = target(targets[year]['wage6'], apopn, awage,
                               wage6.sum())
    rhs_vars['wage7'] = target(targets[year]['wage7'], apopn, awage,
                               wage7.sum())
    rhs_vars['wage8'] = target(targets[year]['wage8'], apopn, awage,
                               wage8.sum())

    model_vars = ['single_returns', 'joint_returns', 'returns_w_ss',
                  'dep_exemptions', 'interest', 'biz_income',
                  'pension', 'ss_income', 'wage1', 'wage2', 'wage3',
                  'wage4', 'wage5', 'wage6', 'wage7', 'wage8']

    vstack_vars = []
    b = []  # list to hold the targets
    for var in model_vars:
        vstack_vars.append(lhs_vars[var])
        t = rhs_vars[var]
        b.append(t)
        # print(f'{var:14} {t:0.2f}') uncomment when moving to 3.6
        print('{:14} {:0.2f}'.format(var, t))

    vstack_vars = tuple(vstack_vars)
    one_half_lhs = np.vstack(vstack_vars)

    # coefficients for r and s
    a1 = np.array(one_half_lhs)
    a2 = np.array(-one_half_lhs)

    # set up LP model
    print('Constructing LP Model')
    N = len(data.index)
    c = cvxopt.matrix(np.ones(2 * N).tolist())

    # tolerance and non-negativity constraints
    G_values = np.append(np.ones(2 * N), -np.ones(2 * N)).tolist()
    G_row = np.concatenate((list(range(N)), list(range(N)),
                            [i + N for i in list(range(2 * N))])).tolist()
    G_row = [int(i) for i in G_row]
    G_col = np.concatenate((list(range(2 * N)), list(range(2 * N)))).tolist()
    G_col = [int(i) for i in G_col]

    G = cvxopt.spmatrix(G_values, G_row, G_col)
    h = cvxopt.matrix(np.append(tol * np.ones(N), np.zeros(2 * N)).tolist())

    # targets
    A = cvxopt.matrix(np.hstack([a1, a2]))
    b = cvxopt.matrix(b)

    print("Solving model")
    sol_cvxopt = cvxopt.solvers.lp(c=c, G=G, h=h, A=A, b=b, solver=None)

    # extract results and construct weights
    rs_val_cvxopt = np.array(sol_cvxopt["x"]).reshape((2 * N,))
    r_val_cvxopt = rs_val_cvxopt[:N]
    s_val_cvxopt = rs_val_cvxopt[N:]
    z = r_val_cvxopt - s_val_cvxopt

    return (1 + z) * s006 * 100
示例#33
0
except ImportError:
    pylab_installed = False
else:
    pylab_installed = True

# ===========================================================================================
# Function Name
#   Descrption
# ===========================================================================================
# *** Arguments ***
#   Name:Description
# ===========================================================================================
# Figures 6.8-10, pages 313-314
# Quadratic smoothing.
n = 4000
t = matrix(list(range(n)), tc='d')
ex = 0.5 * mul(sin(2 * pi / n * t), sin(0.01 * t))
corr = ex + 0.05 * normal(n, 1)

if pylab_installed:
    pylab.figure(1, facecolor='w', figsize=(8, 5))
    pylab.subplot(211)
    pylab.plot(t, ex)
    pylab.ylabel('x[i]')
    pylab.xlabel('i')
    pylab.title('Original and corrupted signal (fig. 6.8)')
    pylab.subplot(212)
    pylab.plot(t, corr)
    pylab.ylabel('xcor[i]')
    pylab.xlabel('i')
示例#34
0
文件: lsqr.py 项目: Magnulas/pmex
def lsqr(A,
         b,
         damp=0.0,
         atol=1e-8,
         btol=1e-8,
         conlim=1e8,
         itnlim=None,
         show=False,
         wantvar=False):
    """

    [ x, istop, itn, r1norm, r2norm, anorm, acond, arnorm, xnorm, var ]...
     = lsqr( m, n,  'aprod',  iw, rw, b, damp, atol, btol, conlim, itnlim, show );

     LSQR solves  Ax = b  or  min ||b - Ax||_2  if damp = 0,
     or   min || (b)  -  (  A   )x ||   otherwise.
              || (0)     (damp I)  ||2
     A  is an m by n matrix defined by  y = aprod( mode,m,n,x,iw,rw ),
     where the parameter 'aprodname' refers to a function 'aprod' that
     performs the matrix-vector operations.
     If mode = 1,   aprod  must return  y = Ax   without altering x.
     If mode = 2,   aprod  must return  y = A'x  without altering x.
     WARNING:   The file containing the function 'aprod'
                must not be called aprodname.m !!!!

    -----------------------------------------------------------------------
     LSQR uses an iterative (conjugate-gradient-like) method.
     For further information, see
     1. C. C. Paige and M. A. Saunders (1982a).
        LSQR: An algorithm for sparse linear equations and sparse least squares,
        ACM TOMS 8(1), 43-71.
     2. C. C. Paige and M. A. Saunders (1982b).
        Algorithm 583.  LSQR: Sparse linear equations and least squares problems,
        ACM TOMS 8(2), 195-209.
     3. M. A. Saunders (1995).  Solution of sparse rectangular systems using
        LSQR and CRAIG, BIT 35, 588-604.

     Input parameters:
     iw, rw      are not used by lsqr, but are passed to aprod.
     atol, btol  are stopping tolerances.  If both are 1.0e-9 (say),
                 the final residual norm should be accurate to about 9 digits.
                 (The final x will usually have fewer correct digits,
                 depending on cond(A) and the size of damp.)
     conlim      is also a stopping tolerance.  lsqr terminates if an estimate
                 of cond(A) exceeds conlim.  For compatible systems Ax = b,
                 conlim could be as large as 1.0e+12 (say).  For least-squares
                 problems, conlim should be less than 1.0e+8.
                 Maximum precision can be obtained by setting
                 atol = btol = conlim = zero, but the number of iterations
                 may then be excessive.
     itnlim      is an explicit limit on iterations (for safety).
     show = 1    gives an iteration log,
     show = 0    suppresses output.

     Output parameters:
     x           is the final solution.
     istop       gives the reason for termination.
     istop       = 1 means x is an approximate solution to Ax = b.
                 = 2 means x approximately solves the least-squares problem.
     r1norm      = norm(r), where r = b - Ax.
     r2norm      = sqrt( norm(r)^2  +  damp^2 * norm(x)^2 )
                 = r1norm if damp = 0.
     anorm       = estimate of Frobenius norm of Abar = [  A   ].
                                                        [damp*I]
     acond       = estimate of cond(Abar).
     arnorm      = estimate of norm(A'*r - damp^2*x).
     xnorm       = norm(x).
     var         (if present) estimates all diagonals of (A'A)^{-1} (if damp=0)
                 or more generally (A'A + damp^2*I)^{-1}.
                 This is well defined if A has full column rank or damp > 0.
                 (Not sure what var means if rank(A) < n and damp = 0.)


            1990: Derived from Fortran 77 version of LSQR.
     22 May 1992: bbnorm was used incorrectly.  Replaced by anorm.
     26 Oct 1992: More input and output parameters added.
     01 Sep 1994: Matrix-vector routine is now a parameter 'aprodname'.
                  Print log reformatted.
     14 Jun 1997: show  added to allow printing or not.
     30 Jun 1997: var   added as an optional output parameter.
     07 Aug 2002: Output parameter rnorm replaced by r1norm and r2norm.
                  Michael Saunders, Systems Optimization Laboratory,
                  Dept of MS&E, Stanford University.
    -----------------------------------------------------------------------
    """
    """
         Initialize.
    """
    n = len(b)
    m = n
    if itnlim is None: itnlim = 2 * n

    msg = ('The exact solution is  x = 0                              ',
           'Ax - b is small enough, given atol, btol                  ',
           'The least-squares solution is good enough, given atol     ',
           'The estimate of cond(Abar) has exceeded conlim            ',
           'Ax - b is small enough for this machine                   ',
           'The least-squares solution is good enough for this machine',
           'Cond(Abar) seems to be too large for this machine         ',
           'The iteration limit has been reached                      ')

    var = matrix(0., (n, 1))

    if show:
        print ' '
        print 'LSQR            Least-squares solution of  Ax = b'
        str1 = 'The matrix A has %8g rows  and %8g cols' % (m, n)
        str2 = 'damp = %20.14e    wantvar = %8g' % (damp, wantvar)
        str3 = 'atol = %8.2e                 conlim = %8.2e' % (atol, conlim)
        str4 = 'btol = %8.2e                 itnlim = %8g' % (btol, itnlim)
        print str1
        print str2
        print str3
        print str4

    itn = 0
    istop = 0
    nstop = 0
    ctol = 0
    if conlim > 0: ctol = 1 / conlim
    anorm = 0
    acond = 0
    dampsq = damp**2
    ddnorm = 0
    res2 = 0
    xnorm = 0
    xxnorm = 0
    z = 0
    cs2 = -1
    sn2 = 0
    """
    Set up the first vectors u and v for the bidiagonalization.
     These satisfy  beta*u = b,  alfa*v = A'u.
    """
    __x = matrix(0., (n, 1))  # a matrix for temporary holding
    v = matrix(0., (n, 1))
    u = +b
    x = matrix(0., (n, 1))
    alfa = 0
    beta = nrm2(u)
    w = matrix(0., (n, 1))

    if beta > 0:
        ### u = (1/beta) * u;
        ### v = feval( aprodname, 2, m, n, u, iw, rw );
        scal(1 / beta, u)
        A(u, v, trans='T')
        #v = feval( aprodname, 2, m, n, u, iw, rw );
        alfa = nrm2(v)

    if alfa > 0:
        ### v = (1/alfa) * v;
        scal(1 / alfa, v)
        copy(v, w)

    rhobar = alfa
    phibar = beta
    bnorm = beta
    rnorm = beta
    r1norm = rnorm
    r2norm = rnorm

    # reverse the order here from the original matlab code because
    # there was an error on return when arnorm==0
    arnorm = alfa * beta
    if arnorm == 0:
        print msg[0]
        return x, istop, itn, r1norm, r2norm, anorm, acond, arnorm, xnorm, var

    head1 = '   Itn      x[0]       r1norm     r2norm '
    head2 = ' Compatible   LS      Norm A   Cond A'

    if show:
        print ' '
        print head1, head2
        test1 = 1
        test2 = alfa / beta
        str1 = '%6g %12.5e' % (itn, x[0])
        str2 = ' %10.3e %10.3e' % (r1norm, r2norm)
        str3 = '  %8.1e %8.1e' % (test1, test2)
        print str1, str2, str3
    """
    %------------------------------------------------------------------
    %     Main iteration loop.
    %------------------------------------------------------------------
    """
    while itn < itnlim:
        itn = itn + 1
        """
        %     Perform the next step of the bidiagonalization to obtain the
        %     next  beta, u, alfa, v.  These satisfy the relations
        %                beta*u  =  a*v   -  alfa*u,
        %                alfa*v  =  A'*u  -  beta*v.
        """
        ### u    = feval( aprodname, 1, m, n, v, iw, rw )  -  alfa*u;
        copy(u, __x)
        A(v, u)
        axpy(__x, u, -alfa)

        beta = nrm2(u)
        if beta > 0:
            ### u     = (1/beta) * u;
            scal(1 / beta, u)
            anorm = sqrt(anorm**2 + alfa**2 + beta**2 + damp**2)
            ### v     = feval( aprodname, 2, m, n, u, iw, rw )  -  beta*v;
            copy(v, __x)
            A(u, v, trans='T')
            axpy(__x, v, -beta)

            alfa = nrm2(v)
            if alfa > 0:
                ### v = (1/alfa) * v;
                scal(1 / alfa, v)
        """
        %     Use a plane rotation to eliminate the damping parameter.
        %     This alters the diagonal (rhobar) of the lower-bidiagonal matrix.
        """

        rhobar1 = sqrt(rhobar**2 + damp**2)
        cs1 = rhobar / rhobar1
        sn1 = damp / rhobar1
        psi = sn1 * phibar
        phibar = cs1 * phibar
        """
        %     Use a plane rotation to eliminate the subdiagonal element (beta)
        %     of the lower-bidiagonal matrix, giving an upper-bidiagonal matrix.
        """

        ###cs      =   rhobar1/ rho;
        ###sn      =   beta   / rho;
        cs, sn, rho = SymOrtho(rhobar1, beta)

        theta = sn * alfa
        rhobar = -cs * alfa
        phi = cs * phibar
        phibar = sn * phibar
        tau = sn * phi
        """
        %     Update x and w.
        """
        t1 = phi / rho
        t2 = -theta / rho
        dk = (1 / rho) * w

        ### x       = x      +  t1*w;
        axpy(w, x, t1)
        ### w       = v      +  t2*w;
        scal(t2, w)
        axpy(v, w)
        ddnorm = ddnorm + nrm2(dk)**2
        if wantvar:
            ### var = var  +  dk.*dk;
            axpy(dk**2, var)
        """
        %     Use a plane rotation on the right to eliminate the
        %     super-diagonal element (theta) of the upper-bidiagonal matrix.
        %     Then use the result to estimate  norm(x).
        """

        delta = sn2 * rho
        gambar = -cs2 * rho
        rhs = phi - delta * z
        zbar = rhs / gambar
        xnorm = sqrt(xxnorm + zbar**2)
        gamma = sqrt(gambar**2 + theta**2)
        cs2 = gambar / gamma
        sn2 = theta / gamma
        z = rhs / gamma
        xxnorm = xxnorm + z**2
        """
        %     Test for convergence.
        %     First, estimate the condition of the matrix  Abar,
        %     and the norms of  rbar  and  Abar'rbar.
        """
        acond = anorm * sqrt(ddnorm)
        res1 = phibar**2
        res2 = res2 + psi**2
        rnorm = sqrt(res1 + res2)
        arnorm = alfa * abs(tau)
        """
        %     07 Aug 2002:
        %     Distinguish between
        %        r1norm = ||b - Ax|| and
        %        r2norm = rnorm in current code
        %               = sqrt(r1norm^2 + damp^2*||x||^2).
        %        Estimate r1norm from
        %        r1norm = sqrt(r2norm^2 - damp^2*||x||^2).
        %     Although there is cancellation, it might be accurate enough.
        """
        r1sq = rnorm**2 - dampsq * xxnorm
        r1norm = sqrt(abs(r1sq))
        if r1sq < 0: r1norm = -r1norm
        r2norm = rnorm
        """
        %     Now use these norms to estimate certain other quantities,
        %     some of which will be small near a solution.
        """
        test1 = rnorm / bnorm
        test2 = arnorm / (anorm * rnorm)
        test3 = 1 / acond
        t1 = test1 / (1 + anorm * xnorm / bnorm)
        rtol = btol + atol * anorm * xnorm / bnorm
        """
        %     The following tests guard against extremely small values of
        %     atol, btol  or  ctol.  (The user may have set any or all of
        %     the parameters  atol, btol, conlim  to 0.)
        %     The effect is equivalent to the normal tests using
        %     atol = eps,  btol = eps,  conlim = 1/eps.
        """
        if itn >= itnlim: istop = 7
        if 1 + test3 <= 1: istop = 6
        if 1 + test2 <= 1: istop = 5
        if 1 + t1 <= 1: istop = 4
        """
        %     Allow for tolerances set by the user.
        """
        if test3 <= ctol: istop = 3
        if test2 <= atol: istop = 2
        if test1 <= rtol: istop = 1
        """
        %     See if it is time to print something.
        """
        prnt = False
        if n <= 40: prnt = True
        if itn <= 10: prnt = True
        if itn >= itnlim - 10: prnt = True
        # if itn%10 == 0       : prnt = True;
        if test3 <= 2 * ctol: prnt = True
        if test2 <= 10 * atol: prnt = True
        if test1 <= 10 * rtol: prnt = True
        if istop != 0: prnt = True

        if prnt:
            if show:
                str1 = '%6g %12.5e' % (itn, x[0])
                str2 = ' %10.3e %10.3e' % (r1norm, r2norm)
                str3 = '  %8.1e %8.1e' % (test1, test2)
                str4 = ' %8.1e %8.1e' % (anorm, acond)
                print str1, str2, str3, str4

        if istop != 0: break
    """
    %     End of iteration loop.
    %     Print the stopping condition.
    """
    if show:
        print ' '
        print 'LSQR finished'
        print msg[istop]
        print ' '
        str1 = 'istop =%8g   r1norm =%8.1e' % (istop, r1norm)
        str2 = 'anorm =%8.1e   arnorm =%8.1e' % (anorm, arnorm)
        str3 = 'itn   =%8g   r2norm =%8.1e' % (itn, r2norm)
        str4 = 'acond =%8.1e   xnorm  =%8.1e' % (acond, xnorm)
        print str1 + '   ' + str2
        print str3 + '   ' + str4
        print ' '

    return x, istop, itn, r1norm, r2norm, anorm, acond, arnorm, xnorm, var
    """
示例#35
0
文件: SVM.py 项目: rose75519/project2
    def fit(self, X, y):
        n_samples, n_features = X.shape
        #print("\n\nNumber of examples in a sample = ",n_samples , ", Number of features = ", n_features)
        self._w = np.zeros(n_features)

        # Initialize the Gram matrix for taking the output from QP solution
        K = np.zeros((n_samples, n_samples))
        for i in range(n_samples):
            for j in range(n_samples):
                K[i, j] = self._kernel(X[i], X[j])
                #print("K[", i,",", j, "] = ", K[i,j])

        # Here we have to solve the convext optimization problem
        # min 1/2 x^T P x + q^T x
        # s.t.
        #  Gx <= h
        #  Ax = b

        P = cvxopt.matrix(np.outer(y, y) * K)
        q = cvxopt.matrix(-np.ones(n_samples))
        #q is a vector of ones
        A = cvxopt.matrix(y, (1, n_samples), 'd')
        #print(A.typecode)
        b = cvxopt.matrix(0.0)

        #G & h are required for soft-margin classifier

        if (self._kernel == self.linear_kernel):
            G = cvxopt.matrix(np.diag(np.ones(n_samples) * -1))
            #G is an identity matrix with −1s as its diagonal
            # so that our greater than is transformed into less than
            h = cvxopt.matrix(np.zeros(n_samples))
            #h is vector of zeros
        else:
            G_std = np.diag(np.ones(n_samples) * -1)
            h_std = np.identity(n_samples)

            G_slack = np.zeros(n_samples)
            h_slack = np.ones(n_samples) * self._C

            G = cvxopt.matrix(np.vstack((G_std, G_slack)))
            h = cvxopt.matrix(np.hstack((h_std, h_slack)))

        cvxopt.solvers.options['show_progress'] = False
        solution = cvxopt.solvers.qp(P, q, G, h, A, b)

        # Lagrange multipliers
        alpha = np.ravel(solution['x'])

        # Now figure out the Support Vectors i.e yi(xi.w + b) = 1
        # Check whether langrange multiplier has non-zero value
        sv = alpha > 1e-4
        self._alpha = alpha[sv]
        self._Support_Vectors = X[sv]
        self._Support_Vectors_Labels = y[sv]

        print("\n Total number of examples = ", n_samples)
        print("\n Total number of Support Vectors found = ",
              len(self._Support_Vectors))
        print("\n\n Support Vectors are: \n", self._Support_Vectors)
        print("\n\n Support Vectors Labels are: \n",
              self._Support_Vectors_Labels)

        #Now let us define the decision boundary
        #w = Σαi*yi*xi
        if (self._kernel == self.linear_kernel):
            for i in range(len(self._alpha)):
                #print(i, self._alpha[i], self._Support_Vectors_Labels[i], self._Support_Vectors[i])
                self._w += self._alpha[i] * self._Support_Vectors_Labels[
                    i] * self._Support_Vectors[i]
        else:
            self._w = None
        print("\n Weights are : ", self._w)

        #Now we need to find the margin
        #b = yi − wT xi
        ind = np.arange(len(alpha))[sv]
        self._b = y[ind] - np.dot(X[ind], self._w)
示例#36
0
def solve_svm(x, y, C):
    N, D = x.shape
    Y = y
    Y = np.matrix(Y)
    outer_prod = co.matrix(Y.dot(Y.T))
    x = co.matrix(x.copy())
    y = co.matrix(y.copy())

    # Q is the kernel matrix where each element i,j is k(i,j) or x_i' * x_j
    Q = co.matrix(0.0, size=(N, N))
    dotX = co.matrix(0.0, size=(N, N))
    # X * X_T = D
    # Y * Y_T = S
    # Q = S * D
    co.blas.syrk(x, dotX)
    Q = co.mul(outer_prod, dotX)
    q = co.matrix(-1.0, (N, 1))

    # G for inequalities
    # First half: alpha_i <= C
    # Second half: alpha_i >= 0
    G = co.spmatrix([], [], [], size=(2 * N, N))
    G[::2 * N + 1], G[N::2 * N + 1] = co.matrix(1.0, (N, 1)), co.matrix(
        -1.0, (N, 1))  # set diagonal to -1 to flip equality sign
    h = co.matrix(0.0, (2 * N, 1))
    h[:N] = C

    # Sum of alpha times y = 0
    A = co.matrix(y, (1, N))

    sol = co.solvers.qp(Q, q, G, h, A, co.matrix(0.0))
    alpha = sol['x']
    b = sol['y'][0]
    max_alpha = max(abs(alpha))
    print max_alpha
    svec = [k for k in range(N) if abs(alpha[k]) > 1e-5 * max_alpha]
    print "Support vectors: %d" % len(svec)
    # w = X_t * (alpha .* y)
    w = co.matrix(0.0, (D, 1))
    co.blas.gemv(x, co.mul(alpha, y), w, trans='T')

    def predict(new_x):
        projected = co.blas.dotu(w, new_x)
        return np.sign(projected + b)

    return w, predict, alpha
示例#37
0
        -1.0, (N, 1))  # set diagonal to -1 to flip equality sign
    h = co.matrix(0.0, (2 * N, 1))
    h[:N] = C

    # Sum of alpha times y = 0
    A = co.matrix(y, (1, N))

    sol = co.solvers.qp(Q, q, G, h, A, co.matrix(0.0))
    alpha = sol['x']
    b = sol['y'][0]
    max_alpha = max(abs(alpha))
    print max_alpha
    svec = [k for k in range(N) if abs(alpha[k]) > 1e-5 * max_alpha]
    print "Support vectors: %d" % len(svec)
    # w = X_t * (alpha .* y)
    w = co.matrix(0.0, (D, 1))
    co.blas.gemv(x, co.mul(alpha, y), w, trans='T')

    def predict(new_x):
        projected = co.blas.dotu(w, new_x)
        return np.sign(projected + b)

    return w, predict, alpha


#sol_comp = softmargin(co.matrix(data), co.matrix(labels), 0.1)
w, predict, alpha = solve_svm(data, labels, 0.1)
diff = np.array(
    [predict(co.matrix(data[i])) == labels[i] for i in range(data.shape[0])])
print diff.sum()
示例#38
0
solvers.options['show_progress'] = False
solvers.options['glpk'] = {'msg_lev': 'GLP_MSG_OFF'}

A = spmatrix(1.0, range(batchSize), [0]*batchSize, (batchSize,batchSize))
for i in range(1,batchSize):
    Ai = spmatrix(1.0, range(batchSize), [i]*batchSize, (batchSize,batchSize))
    A = sparse([A,Ai])

D = spmatrix(-1.0, range(batchSize), range(batchSize), (batchSize,batchSize))
DM = D
for i in range(1,batchSize):
    DM = sparse([DM, D])

A = sparse([[A],[DM]])

cr = matrix([-1.0/batchSize]*batchSize)
cf = matrix([1.0/batchSize]*batchSize)
c = matrix([cr,cf])

pStart = {}
pStart['x'] = matrix([matrix([1.0]*batchSize),matrix([-1.0]*batchSize)])
pStart['s'] = matrix([1.0]*(2*batchSize))
###############################################################################


def read_data(data_iter, batch_id):
    data = data_iter.next()
    batch_id += 1
    real_cpu, _ = data
    real_data = real_cpu.clone().to(device)
    real.resize_as_(real_data).copy_(real_data)
示例#39
0
 def init0(self, dae):
     dae.y[self.v] = matrix(self.voltage, (self.n, 1), 'd')
示例#40
0
plt.xlim(2, 4)

# hide tikcs
cur_axes = plt.gca()
cur_axes.axes.get_xaxis().set_ticks([])
cur_axes.axes.get_yaxis().set_ticks([])

plt.xlabel('$x_1$', fontsize=20)
plt.ylabel('$x_2$', fontsize=20)
# pdf.savefig()
plt.show()

from cvxopt import matrix, solvers
# build K
V = np.concatenate((X0.T, -X1.T), axis=1)
K = matrix(V.T.dot(V))

p = matrix(-np.ones((2 * N, 1)))
# build A, b, G, h
G = matrix(-np.eye(2 * N))
h = matrix(np.zeros((2 * N, 1)))
A = matrix(y)
b = matrix(np.zeros((1, 1)))
solvers.options['show_progress'] = False
sol = solvers.qp(K, p, G, h, A, b)

l = np.array(sol['x'])
print('lambda = \n', l.T)
S = np.where(l > 1e-6)[0]

VS = V[:, S]
示例#41
0
文件: SVC3.py 项目: kevin840307/ML
    def fit(self, x, y):
        '''
        SVM formula:
        K = kernel
        K(x, x') = z_n^T * z_m
        optimal(b, w)
        min 1/2 * sum_n(sum_m(a_n * a_m * y_n * y_m * K(x, x'))) - sum_n(a_n)
        subject to sum_n(y_n(a_n)) = 0
                   (a_n) >= 0

        sv = (a_s > 0)

        Quadratic Programming:
        optimal a←QP(Q, p, A, c)←QP(Q, p, A, c, B, b)
        min 1/2 * a^T * Q * a + p^T * a
        subject to a_m^T * u >= c_m


        objective function:
        K(x, x') = z_n^T * z_m
        Q = y_n * y_m * K(x, x')
        p = -1_N

        A and c are N conditions
        B and b are a condition
        constraints:
        A = n-th unit direction
        c = 0
        B = 1_n
        b = 0
        M = N = data size


        Correspondence cvxopt op:
        P = Q
        q = p
        G = A
        h = c
        B = A
        a = u
        '''
        assert ((len(x) == len(y))), "size error"
        assert ((len(x) == len(y)) & (len(x) > 0)), "input x error"

        x_len = len(x)
        dimension = len(x[0])
        y = np.reshape(y, (-1, 1))
        kernel_x = polynomial_kernel(x, x, self.zeta, self.gamma, self.Q)

        Q = cvxopt.matrix(np.dot(y, y.T) * kernel_x)
        p = cvxopt.matrix(-np.ones(x_len))
        A = cvxopt.matrix(-np.eye(x_len))
        c = cvxopt.matrix(np.zeros(x_len))
        B = cvxopt.matrix(np.reshape(y, (1, -1)))
        b = cvxopt.matrix(np.zeros(1))
        cvxopt.solvers.options['show_progress'] = False
        result = cvxopt.solvers.qp(Q, p, A, c, B, b)

        self.__alphas = np.array(result['x']).flatten()
        self.__sv = self.__alphas > 1e-6
        self.__w = np.sum(np.array(result['x'] * y * x), axis=0).reshape(-1)
        self.__support_vectors = x[self.__sv, :]
        self.__a_y = np.reshape(self.__alphas[self.__sv],
                                (-1, 1)) * np.reshape(y[self.__sv], (-1, 1))
        self.__b = np.sum(y[self.__sv])
        self.__b -= np.sum(
            self.__a_y *
            polynomial_kernel(self.__support_vectors, x[self.__sv], self.zeta,
                              self.gamma, self.Q))
        self.__b /= len(self.__support_vectors)
        '''
        Vertical dist:
            |(ax + by + c)| / sqrt(a^2 + b^2)
        '''
        self.__margin = 1. / np.linalg.norm(self.__w)
示例#42
0
文件: GRAM.py 项目: sspeng/MKLpy
    def _arrange_kernel(self):
        Y = [1 if y == self.classes_[1] else -1 for y in self.Y]
        nn = len(Y)
        nk = self.n_kernels
        YY = spdiag(Y)
        eta = [1.0 / nk] * nk

        actual_weights = eta[:]
        actual_ratio = None
        Kc = summation(self.KL, eta)
        _r, alpha = radius(Kc)
        _m, gamma = margin(Kc, Y)

        self._ratios = []

        cstep = self.step
        for i in xrange(self.max_iter):
            #print actual_ratio

            a = np.array(
                [1.0 - (alpha.T * matrix(K) * alpha)[0] for K in self.KL])
            b = np.array([(gamma.T * YY * matrix(K) * YY * gamma)[0]
                          for K in self.KL])
            den = [np.dot(eta, b)**2] * nk
            num = [
                sum([eta[s] * (a[s] * b[r] - a[r] * b[s]) for s in range(nk)])
                for r in range(nk)
            ]

            eta = [cstep * (num[k] / den[k]) + eta[k] for k in range(nk)]
            eta = [max(0, v) for v in eta]
            eta = np.array(eta) / sum(eta)

            Kc = summation(self.KL, eta)
            _r, alpha = radius(Kc, init_sol=alpha)
            _m, gamma = margin(Kc, Y, init_sol=gamma)

            new_ratio = _r**2 / _m**2
            if actual_ratio and abs(new_ratio - actual_ratio) / nn < self.tol:
                #completato
                #print i,'tol'
                self._ratios.append(new_ratio)
                actual_weights = eta
                #break;             #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
            elif new_ratio <= actual_ratio or not actual_ratio:
                #tutto in regola
                actual_ratio = new_ratio
                self._ratios.append(actual_ratio)
                #print i,'update',actual_ratio
                actual_weights = eta
            else:
                #print i,'revert'
                #supero il minimo
                eta = actual_weights
                cstep /= 1.50
                continue
        self._steps = i + 1

        self.weights = np.array(eta)
        self.ker_matrix = summation(self.KL, self.weights)
        return self.ker_matrix
示例#43
0
    def solve_via_data(self,
                       data,
                       warm_start,
                       verbose,
                       solver_opts,
                       solver_cache=None):
        import cvxopt
        import cvxopt.solvers
        # Save original cvxopt solver options.
        old_options = cvxopt.solvers.options.copy()
        # Save old data in case need to use robust solver.
        data[s.DIMS] = dims_to_solver_dict(data[s.DIMS])
        # User chosen KKT solver option.
        kktsolver = self.get_kktsolver_opt(solver_opts)
        # Cannot have redundant rows unless using robust LDL kktsolver.
        if kktsolver != s.ROBUST_KKTSOLVER:
            # Will detect infeasibility.
            if self.remove_redundant_rows(data) == s.INFEASIBLE:
                return {s.STATUS: s.INFEASIBLE}
        # Convert A, b, G, h, c to CVXOPT matrices.
        data[s.C] = intf.dense2cvxopt(data[s.C])
        var_length = data[s.C].size[0]
        if data[s.A] is None:
            data[s.A] = np.zeros((0, var_length))
            data[s.B] = np.zeros((0, 1))
        data[s.A] = intf.sparse2cvxopt(data[s.A])
        data[s.B] = intf.dense2cvxopt(data[s.B])
        if data[s.G] is None:
            data[s.G] = np.zeros((0, var_length))
            data[s.H] = np.zeros((0, 1))
        data[s.G] = intf.sparse2cvxopt(data[s.G])
        data[s.H] = intf.dense2cvxopt(data[s.H])

        # Apply any user-specific options.
        # Silence solver.
        solver_opts["show_progress"] = verbose
        # Rename max_iters to maxiters.
        if "max_iters" in solver_opts:
            solver_opts["maxiters"] = solver_opts["max_iters"]
        for key, value in list(solver_opts.items()):
            cvxopt.solvers.options[key] = value

        # Always do 1 step of iterative refinement after solving KKT system.
        if "refinement" not in cvxopt.solvers.options:
            cvxopt.solvers.options["refinement"] = 1

        try:
            if kktsolver == s.ROBUST_KKTSOLVER:
                # Get custom kktsolver.
                kktsolver = get_kktsolver(data[s.G], data[s.DIMS], data[s.A])
            results_dict = cvxopt.solvers.conelp(data[s.C],
                                                 data[s.G],
                                                 data[s.H],
                                                 data[s.DIMS],
                                                 data[s.A],
                                                 data[s.B],
                                                 kktsolver=kktsolver)
        # Catch exceptions in CVXOPT and convert them to solver errors.
        except ValueError:
            results_dict = {"status": "unknown"}

        # Restore original cvxopt solver options.
        self._restore_solver_options(old_options)

        # Construct solution.
        solution = {}
        status = self.STATUS_MAP[results_dict['status']]
        solution[s.STATUS] = status
        if solution[s.STATUS] in s.SOLUTION_PRESENT:
            primal_val = results_dict['primal objective']
            solution[s.VALUE] = primal_val + data[s.OFFSET]
            solution[s.PRIMAL] = results_dict['x']
            solution[s.EQ_DUAL] = results_dict['y']
            solution[s.INEQ_DUAL] = results_dict['z']
            # Need to multiply duals by Q and P_leq.
            if "Q" in data:
                y = results_dict['y']
                # Test if all constraints eliminated.
                if y.size[0] == 0:
                    dual_len = data["Q"].size[0]
                    solution[s.EQ_DUAL] = cvxopt.matrix(0., (dual_len, 1))
                else:
                    solution[s.EQ_DUAL] = data["Q"] * y
            if "P_leq" in data:
                leq_len = data[s.DIMS][s.LEQ_DIM]
                P_rows = data["P_leq"].size[0]
                new_len = P_rows + solution[s.INEQ_DUAL].size[0] - leq_len
                new_dual = cvxopt.matrix(0., (new_len, 1))
                z = solution[s.INEQ_DUAL][:leq_len]
                # Test if all constraints eliminated.
                if z.size[0] == 0:
                    new_dual[:P_rows] = 0
                else:
                    new_dual[:P_rows] = data["P_leq"] * z
                new_dual[P_rows:] = solution[s.INEQ_DUAL][leq_len:]
                solution[s.INEQ_DUAL] = new_dual

            for key in [s.PRIMAL, s.EQ_DUAL, s.INEQ_DUAL]:
                solution[key] = intf.cvxopt2dense(solution[key])
        return solution
示例#44
0
def markwoitz_portfolio(rate_rets,
                        cov_mat,
                        exp_rets,
                        target_ret=0.0006,
                        allow_short=True,
                        lmin=0,
                        lmax=1):
    # matrices conversion : P = covariance
    n = len(cov_mat)
    P = opt.matrix(cov_mat)
    q = opt.matrix(0.0, (n, 1))

    # constraints Gx <= h
    if not allow_short:
        # exp_rets * x >= 1 and x >= 0
        G = opt.matrix(
            np.vstack((-exp_rets.values, -np.identity(n), np.identity(n))))
        h = opt.matrix(
            np.vstack((-target_ret, -lmin + np.zeros((n, 1)), lmax + np.zeros(
                (n, 1)))))
    else:
        # exp_rets * x >= 1
        G = opt.matrix(-exp_rets.values).T
        h = opt.matrix(-target_ret)

    A = opt.matrix(1.0, (1, n))
    x = opt.matrix(1.0, (n, 1))
    b = opt.matrix(1.0)

    optsolvers.options['show_progress'] = False

    sol = optsolvers.qp(P, q, G, h, A, b)

    if sol['status'] != 'optimal':
        warnings.warn("Convergence problem")

    weights = pd.Series(sol['x'])
    ret = (opt.matrix(weights).T * opt.matrix(exp_rets))[0, 0]
    risk = np.sqrt(np.asmatrix(weights) * cov_mat * np.asmatrix(weights).T)

    return weights, ret, risk
def __search(sync_net, ini, fin, cost_function, skip, ret_tuple_as_trans_desc=False,
             max_align_time_trace=sys.maxsize):
    start_time = time.time()

    decorate_transitions_prepostset(sync_net)
    decorate_places_preset_trans(sync_net)

    incidence_matrix = inc_mat_construct(sync_net)
    ini_vec, fin_vec, cost_vec = utils.__vectorize_initial_final_cost(incidence_matrix, ini, fin, cost_function)

    closed = set()

    a_matrix = np.asmatrix(incidence_matrix.a_matrix).astype(np.float64)
    g_matrix = -np.eye(len(sync_net.transitions))
    h_cvx = np.matrix(np.zeros(len(sync_net.transitions))).transpose()
    cost_vec = [x * 1.0 for x in cost_vec]

    use_cvxopt = False
    if lp_solver.DEFAULT_LP_SOLVER_VARIANT == lp_solver.CVXOPT_SOLVER_CUSTOM_ALIGN or lp_solver.DEFAULT_LP_SOLVER_VARIANT == lp_solver.CVXOPT_SOLVER_CUSTOM_ALIGN_ILP:
        use_cvxopt = True

    if use_cvxopt:
        # not available in the latest version of PM4Py
        from cvxopt import matrix

        a_matrix = matrix(a_matrix)
        g_matrix = matrix(g_matrix)
        h_cvx = matrix(h_cvx)
        cost_vec = matrix(cost_vec)

    h, x = utils.__compute_exact_heuristic_new_version(sync_net, a_matrix, h_cvx, g_matrix, cost_vec, incidence_matrix,
                                                       ini,
                                                       fin_vec, lp_solver.DEFAULT_LP_SOLVER_VARIANT,
                                                       use_cvxopt=use_cvxopt)
    ini_state = utils.SearchTuple(0 + h, 0, h, ini, None, None, x, True)
    open_set = [ini_state]
    heapq.heapify(open_set)
    visited = 0
    queued = 0
    traversed = 0
    lp_solved = 1

    trans_empty_preset = set(t for t in sync_net.transitions if len(t.in_arcs) == 0)

    while not len(open_set) == 0:
        if (time.time() - start_time) > max_align_time_trace:
            return None

        curr = heapq.heappop(open_set)

        current_marking = curr.m
        # 11/10/2019 (optimization Y, that was optimization X,
        # but with the good reasons this way): avoid checking markings in the cycle using
        # the __get_alt function, but check them 'on the road'
        already_closed = current_marking in closed
        if already_closed:
            continue

        while not curr.trust:
            if (time.time() - start_time) > max_align_time_trace:
                return None
            h, x = utils.__compute_exact_heuristic_new_version(sync_net, a_matrix, h_cvx, g_matrix, cost_vec,
                                                               incidence_matrix, curr.m,
                                                               fin_vec, lp_solver.DEFAULT_LP_SOLVER_VARIANT,
                                                               use_cvxopt=use_cvxopt)
            lp_solved += 1

            # 11/10/19: shall not a state for which we compute the exact heuristics be
            # by nature a trusted solution?
            tp = utils.SearchTuple(curr.g + h, curr.g, h, curr.m, curr.p, curr.t, x, True)
            # 11/10/2019 (optimization ZA) heappushpop is slightly more efficient than pushing
            # and popping separately
            curr = heapq.heappushpop(open_set, tp)
            current_marking = curr.m

        # max allowed heuristics value (27/10/2019, due to the numerical instability of some of our solvers)
        if curr.h > lp_solver.MAX_ALLOWED_HEURISTICS:
            continue

        # 12/10/2019: do it again, since the marking could be changed
        already_closed = current_marking in closed
        if already_closed:
            continue

        # 12/10/2019: the current marking can be equal to the final marking only if the heuristics
        # (underestimation of the remaining cost) is 0. Low-hanging fruits
        if curr.h < 0.01:
            if current_marking == fin:
                return utils.__reconstruct_alignment(curr, visited, queued, traversed,
                                                     ret_tuple_as_trans_desc=ret_tuple_as_trans_desc,
                                                     lp_solved=lp_solved)

        closed.add(current_marking)
        visited += 1

        possible_enabling_transitions = copy(trans_empty_preset)
        for p in current_marking:
            for t in p.ass_trans:
                possible_enabling_transitions.add(t)

        enabled_trans = [t for t in possible_enabling_transitions if t.sub_marking <= current_marking]

        trans_to_visit_with_cost = [(t, cost_function[t]) for t in enabled_trans if not (
                t is not None and utils.__is_log_move(t, skip) and utils.__is_model_move(t, skip))]

        for t, cost in trans_to_visit_with_cost:
            traversed += 1
            new_marking = utils.add_markings(current_marking, t.add_marking)

            if new_marking in closed:
                continue
            g = curr.g + cost

            queued += 1
            h, x = utils.__derive_heuristic(incidence_matrix, cost_vec, curr.x, t, curr.h)
            trustable = utils.__trust_solution(x)
            new_f = g + h

            tp = utils.SearchTuple(new_f, g, h, new_marking, curr, t, x, trustable)
            heapq.heappush(open_set, tp)
示例#46
0
# objective = cvx.Minimize( sum(cvx.square(P - Z)) )
# constr = [cvx.constraints.semi_definite.SDP(P)]
# prob = cvx.Problem(objective, constr)
# prob.solve()

import cvxpy as cp
import numpy as np
import cvxopt

# create data P
P = cp.Parameter(3,3)
Z = cp.SDPVar(3,3)

objective = cp.Minimize( cp.lambda_max(P) - cp.lambda_min(P - Z) )
prob = cp.Problem(objective, 10*[Z >= 0])
P.value = cvxopt.matrix(np.matrix('4 1 3; 1 3.5 0.8; 3 0.8 1'))
prob.solve()



# [ 4,     1+2*j,     3-j       ; ...
#       1-2*j, 3.5,       0.8+2.3*j ; ...
#       3+j,   0.8-2.3*j, 4         ];
# 
# % Construct and solve the model
# n = size( P, 1 );
# cvx_begin sdp
#     variable Z(n,n) hermitian toeplitz
#     dual variable Q
#     minimize( norm( Z - P, 'fro' ) )
#     Z >= 0 : Q;
示例#47
0
文件: tds.py 项目: willeforce/andes
    def run(self, **kwargs):
        """
        Run time domain simulation

        Returns
        -------
        bool
            Success flag
        """

        # check if initialized
        if not self.initialized:
            if self.init() is False:
                logger.info('Call to TDS initialization failed in `tds.run()`.')

        ret = False
        system = self.system
        config = self.config
        dae = self.system.dae

        # maxit = config.maxit
        # tol = config.tol

        if system.pflow.solved is False:
            logger.warning('Power flow not solved. Simulation cannot continue.')
            return ret
        t0, _ = elapsed()
        t1 = t0

        self.streaming_init()

        logger.info('')
        logger.info('-> Time Domain Simulation: {} method, t={} s'
                    .format(self.config.method, self.config.tf))

        self.load_pert()

        self.run_step0()

        config.qrtstart = time()

        while self.t < config.tf:
            self.check_fixed_times()
            self.calc_time_step()

            if self.callpert is not None:
                self.callpert(self.t, self.system)

            if self.h == 0:
                break
            # progress time and set time in dae
            self.t += self.h
            dae.t = self.t

            # backup actual variables
            self.x0 = matrix(dae.x)
            self.y0 = matrix(dae.y)
            self.f0 = matrix(dae.f)

            # apply fixed_time interventions and perturbations
            self.event_actions()

            # reset flags used in each step
            self.err = 1
            self.niter = 0
            self.convergence = False

            self.implicit_step()

            if self.convergence is False:
                try:
                    self.restore_values()
                    continue
                except ValueError:
                    self.t = config.tf
                    ret = False
                    break

            self.step += 1
            self.compute_flows()
            system.varout.store(self.t, self.step)
            self.streaming_step()

            # plot variables and display iteration status
            perc = max(min((self.t - config.t0) / (config.tf - config.t0) * 100, 100), 0)

            # show iteration info every 30 seconds or every 20%

            t2, _ = elapsed(t1)
            if t2 - t1 >= 30:
                t1 = t2
                logger.info(' ({:.0f}%) time = {:.4f}s, step = {}, niter = {}'
                            .format(100 * self.t / config.tf, self.t, self.step,
                                    self.niter))

            if perc > self.next_pc or self.t == config.tf:
                self.next_pc += 20
                logger.info(' ({:.0f}%) time = {:.4f}s, step = {}, niter = {}'
                            .format(100 * self.t / config.tf, self.t, self.step, self.niter))

            # compute max rotor angle difference
            # diff_max = anglediff()

            # quasi-real-time check and wait
            rt_end = config.qrtstart + (self.t - config.t0) * config.kqrt

            if config.qrt:
                # the ending time has passed
                if time() - rt_end > 0:
                    # simulation is too slow
                    if time() - rt_end > config.kqrt:
                        logger.debug('Simulation over-run at t={:4.4g} s.'.format(self.t))
                # wait to finish
                else:
                    self.headroom += (rt_end - time())
                    while time() - rt_end < 0:
                        sleep(1e-5)

        if config.qrt:
            logger.debug('RT headroom time: {} s.'.format(str(self.headroom)))

        if self.t != config.tf:
            logger.error('Reached minimum time step. Convergence is not likely.')
            ret = False
        else:
            ret = True

        if system.config.dime_enable:
            system.streaming.finalize()

        _, s = elapsed(t0)

        if ret is True:
            logger.info(' Time domain simulation finished in {:s}.'.format(s))
        else:
            logger.info(' Time domain simulation failed in {:s}.'.format(s))

        self.success = ret
        self.dump_results(success=self.success)

        return ret
示例#48
0
    svm.train_dual()

    delta = 0.07
    x = np.arange(-4.0, 4.0, delta)
    y = np.arange(-4.0, 4.0, delta)
    X, Y = np.meshgrid(x, y)
    (sx, sy) = X.shape
    Xf = np.reshape(X, (1, sx * sy))
    Yf = np.reshape(Y, (1, sx * sy))
    Dtest = np.append(Xf, Yf, axis=0)
    print(Dtest.shape)
    print('halloooo')
    foo = 3 * delta

    # build test kernel
    kernel = Kernel.get_kernel(co.matrix(Dtest),
                               Dtrain[:,
                                      svm.get_support_dual()], ktype, kparam)

    (res, state) = svm.apply_dual(kernel)
    print(res.size)

    Z = np.reshape(res, (sx, sy))
    plt.contourf(X, Y, Z)
    plt.contour(X, Y, Z, [np.array(svm.get_threshold())[0, 0]])
    plt.scatter(Dtrain[0, svm.get_support_dual()],
                Dtrain[1, svm.get_support_dual()],
                40,
                c='k')
    plt.scatter(Dtrain[0, :], Dtrain[1, :], 10)
    plt.show()
示例#49
0
    tempwhole = np.matmul(np.matmul(yarrsqarg, tempwhole), yarrsqarg)
    #     if(debuggausssion): print(numtrainarg)
    #     if(debuggausssion): print(tempwhole.shape)
    return tempwhole


starttime = time.time()
for x in range(10):
    for y in range(10):
        if (x >= y):
            continue
        print("x is " + str(x) + " y is " + str(y))
        classindex = classtoindex[10 * x + y]
        numtrainthis = numtrainclass[classindex]
        yarrsq = np.diag(yarrclass[classindex])
        P = matrix(findPGaussian(xarrclass[classindex], numtrainthis, yarrsq))
        q = np.zeros((numtrainthis, 1))
        for z in range(numtrainthis):
            q[z][0] = -1.0
        q = matrix(q)
        G = np.zeros((2 * numtrainthis, numtrainthis))
        for z in range(numtrainthis):
            G[z][z] = -1.0
            G[numtrainthis + z][z] = 1.0
        G = matrix(G)
        h = np.zeros((2 * numtrainthis, 1))
        for z in range(numtrainthis):
            h[z][0] = 0.0
            h[numtrainthis + z][0] = C
        h = matrix(h)
        A = np.zeros((1, numtrainthis))
示例#50
0
文件: tds.py 项目: willeforce/andes
    def implicit_step(self):
        """
        Integrate one step using trapezoidal method. Sets convergence and niter flags.

        Returns
        -------
        None
        """
        config = self.config
        system = self.system
        dae = self.system.dae

        # constant short names
        In = spdiag([1] * dae.n)
        h = self.h

        while self.err > config.tol and self.niter < config.maxit:
            if self.t - self.t_jac >= 5:
                dae.rebuild = True
                self.t_jac = self.t
            elif self.niter > 4:
                dae.rebuild = True
            elif dae.factorize:
                dae.rebuild = True
            elif self.config.honest is True:
                dae.rebuild = True

            # rebuild Jacobian
            if dae.rebuild:
                dae.factorize = True
                system.call.int()
                dae.rebuild = False
            else:
                system.call.int_fg()

            # complete Jacobian matrix dae.Ac
            if config.method == 'euler':
                dae.Ac = sparse(
                    [[In - h * dae.Fx, dae.Gx], [-h * dae.Fy, dae.Gy]],
                    'd')
                dae.q = dae.x - self.x0 - h * dae.f

            elif config.method == 'trapezoidal':
                dae.Ac = sparse([[In - h * 0.5 * dae.Fx, dae.Gx],
                                 [-h * 0.5 * dae.Fy, dae.Gy]], 'd')
                dae.q = dae.x - self.x0 - h * 0.5 * (dae.f + self.f0)

            # windup limiters
            dae.reset_Ac()

            if dae.factorize:
                try:
                    self.F = self.solver.symbolic(dae.Ac)
                    dae.factorize = False
                except NotImplementedError:
                    pass

            self.inc = -matrix([dae.q, dae.g])

            try:
                N = self.solver.numeric(dae.Ac, self.F)
                self.solver.solve(dae.Ac, self.F, N, self.inc)
            except ArithmeticError:
                logger.error('Singular matrix')
                dae.check_diag(dae.Gy, 'unamey')
                dae.check_diag(dae.Fx, 'unamex')
                # force quit
                self.niter = config.maxit + 1
                break
            except ValueError:
                logger.warning('Unexpected symbolic factorization')
                dae.factorize = True
                continue
            except NotImplementedError:
                self.inc = self.solver.linsolve(dae.Ac, self.inc)

            inc_x = self.inc[:dae.n]
            inc_y = self.inc[dae.n:dae.m + dae.n]
            dae.x += inc_x
            dae.y += inc_y

            self.err = max(abs(self.inc))
            if np.isnan(self.inc).any():
                logger.error('Iteration error: NaN detected.')
                self.niter = config.maxit + 1
                break

            self.niter += 1

        if self.niter <= config.maxit:
            self.convergence = True
def hard_threshold_lstsq_solve(A, b, l2=0, C=None, d=None, x0=None, opts=None, l1=0.01):

    '''
    Basic CVX-based solver for l2-regularized sequantially hard-thresholded
    least-squares problems. The corresponding minimization problem reads

        minimize   0.5 * || Ax - b ||^2_2 + l2 * || x ||^2_2
            x

        subject to  Cx = d (equality constraints)

    where sparsity of the solution vector x is imposed by sequentially
    hard-thresholding the solution. See Brunton et al. (PNAS, 2016) for more
    details.

    This function serves as a wrapper around the CVXOPT QP solver.

    Inputs:
    ------
        A  : A m x n dense or sparse matrix or numpy array.
        b  : a m x 1 vector or numpy array.
        C  : A p x n dense or sparse matrix or numpy array.
        d  : A p x 1 vector or numpy array.

        (optional) x0 : A n x 1 vector corresponding to the initial guess.
        (optional) opts : A dictionnary of options to be passed to CVX.

    Outputs:
    -------
        output : Return dictionnary, the otput of CVXOPT QP solver.
    '''
    # --> Convert the matrices to CVX formats.
    A = numpy_to_cvxopt_matrix(A)
    b = numpy_to_cvxopt_matrix(b)
    C = numpy_to_cvxopt_matrix(C)
    d = numpy_to_cvxopt_matrix(d)

    # --> Sanity checks for the inputs dimensions.
    if len(A.size) != 2:
        raise warnings.warn('The input A is not a matrix, nor a two-dimensional \
        numpy array. It is transformed into a one-dimensional column vector.')
    else:
        m, n = A.size

    if b.size[0] != m:
        raise LinAlgError('A and b do not have the same number of rows.')

    if C is not None and d is None:
        raise ValueError('Matrix C has been given but not vector d. Please provide d.')
    if C is None and d is not None:
        raise ValueError('Vector d has been given but not matrix C. Please provide C.')

    if C is not None:
        k, l = C.size
        if l != n:
            raise LinAlgError('A and C do not have the same number of columns.')

        if d.size[0] != k:
            raise LinAlgError('C and d do not have the same number of rows.')

    # --> Check whether A is sparse or not.
    if sparse.issparse(A):
        sparse_case = True
    else:
        sparse_case = False

    # --> Sets up the problem.
    if m != n:
        P = A.T * A
        q = -A.T * b
    else:
        # Test if matrix is symmetric positive definite.
        spd = is_pos_def(A)
        if spd is True:
            P = A
            q = -b
        else:
            P = A.T * A
            q = -A.T * b

    # --> Ridge regularization if needed.
    if l2 < 0:
        raise ValueError('The l2-regularization weight cannot be negative.')
    if l2 > 0:
        nvars = A.size[1]
        if sparse_case is True:
            I = scipy_sparse_to_spmatrix(sparse.eye(nvars, nvars, format='coo'))
        else:
            I = matrix(np.eye(nvars), (nvars, nvars), 'd')
        P = P + l2 * I

    # --> Run the CVXOPT Quadratic Programming solver.
    #     First pass: Only the l2-penalization is accounted for.
    output = solvers.qp(P, q, None, None, C, d, None, x0)['x']
    x = np.asarray(output).squeeze()

    # -->  Get the indices of the non-zero regressors.
    #TODO: Implement a simple convergence test to avoid unnecessary computations.
    for i in range(5):
        xmax = abs(x[np.nonzero(x)]).mean()
        I = [k for k in range(n) if abs(x[k]) > l1*xmax]
        if C is None:
            output = solvers.qp(P[I, I], q[I], None, None, None, None, x[I])['x']
        else:
            output = solvers.qp(P[I, I], q[I], None, None, C[:, I], d, x[I])['x']
        coef = np.asarray(output).squeeze()
        x[:] = 0.
        x[I] = coef

    return x
示例#52
0
    def tip_of_stretched_cone(self,
                              c,
                              backend="all",
                              check=True,
                              constraint_error_tol=1e-4,
                              verbose=0):
        """
        **Description:**
        Finds the tip of the stretched cone via quadratic programming. The
        stretched cone is defined as the convex polyhedral region inside the
        cone that is at least a distance `c` from any of its defining
        hyperplanes.

        **Arguments:**
        - `c` *(float)*: A real positive number specifying the stretching
          of the cone (i.e. the minimum distance to the defining hyperplanes).
        - `backend` *(str, optional, default="all")*: String that
          specifies the optimizer to use. Options are "all", "mosek", and
          "cvxopt".
        - `checks` *(bool, optional, default=True)*: Flag that specifies
          whether to check if the output of the optimizer is consistent and
          satisfies constraint_error_tol.
        - `constraint_error_tol` *(float, optional, default=1e-4)*: Error
          tolerence for the linear constraints.
        - `verbose` *(int, optional, default=0)*: The verbosity level.
          - verbose = 0: Do not print anything.
          - verbose = 1: Print warnings when optimizers fail.

        **Returns:**
        *(numpy.ndarray)* The vector specifying the location of the tip.

        **Example:**
        We construct two cones and find the locations of the tips of the
        stretched cones.
        ```python {3,5}
        c1 = Cone([[1,0],[0,1]])
        c2 = Cone([[3,2],[5,3]])
        c1.tip_of_stretched_cone(1)
        # array([1., 1.])
        c2.tip_of_stretched_cone(1)
        # array([7.99999991, 4.99999994])
        ```
        """
        backends = ["all", "mosek", "cvxopt"]
        if backend not in backends:
            raise Exception("Invalid backend. "
                            f"The options are: {backends}.")
        if backend == "all":
            for b in backends[1:]:
                if b == "mosek" and not config.mosek_is_activated():
                    continue
                solution = self.tip_of_stretched_cone(
                    c,
                    backend=b,
                    check=check,
                    constraint_error_tol=constraint_error_tol,
                    verbose=verbose)
                if solution is not None:
                    return solution
            raise Exception("All available quadratic programming backends "
                            "have failed.")
        if backend == "mosek" and not config.mosek_is_activated():
            raise Exception("Mosek is not activated. See the website for how "
                            "to activate it.")
        hp = self.hyperplanes()
        optimization_done = False
        ## The problem is defined as:
        ## Minimize (1/2) x.Q.x + p.x
        ## Subject to G.x <= h
        Q = 2 * np.identity(hp.shape[1], dtype=float)
        p = np.zeros(hp.shape[1], dtype=float)
        h = np.full(hp.shape[0], (-c, ), dtype=float)
        G = -1 * hp.astype(dtype=float)
        Q_cvxopt = cvxopt.matrix(Q)
        p_cvxopt = cvxopt.matrix(p)
        h_cvxopt = cvxopt.matrix(h)
        G_cvxopt = cvxopt.matrix(G)
        if backend == "mosek":
            cvxopt.solvers.options["mosek"] = {
                mosek.iparam.num_threads: 1,
                mosek.iparam.log: 0
            }
        else:
            cvxopt.solvers.options["abstol"] = 1e-4
            cvxopt.solvers.options["reltol"] = 1e-4
            cvxopt.solvers.options["feastol"] = 1e-2
            cvxopt.solvers.options["maxiters"] = 1000
            cvxopt.solvers.options["show_progress"] = False
        try:
            solution = cvxopt.solvers.qp(
                Q_cvxopt,
                p_cvxopt,
                G_cvxopt,
                h_cvxopt,
                solver=("mosek" if backend == "mosek" else None))
            assert solution["status"] == "optimal"
        except:
            if verbose >= 1:
                print(f"Quadratic programming error: {backend} failed. "
                      f"Returned status: {solution['status']}")
        else:
            optimization_done = True
            solution_x = [x[0] for x in np.array(solution["x"]).tolist()]
            solution_val = solution["primal objective"]
        if optimization_done and check:
            res = max(np.dot(G, solution_x)) + c
            if res > constraint_error_tol or solution_val < 0:
                optimization_done = False
                if verbose >= 1:
                    print("Quadratic programming error: Large numerical "
                          "error. Try raising constraint_error_tol, or "
                          "using a different backend")
        if optimization_done:
            return np.array(solution_x)
示例#53
0
    def solve(self, solver=None):
        """Solves the LP.

    Args:
      solver: the solver to use ('blas', 'lapack', 'glpk'). Defaults to None,
        which then uses the cvxopt internal default.

    Returns:
      The solution as a dict of var label -> value, one for each variable.
    """
        # From http://cvxopt.org/userguide/coneprog.html#linear-programming,
        # CVXOPT uses the formulation:
        #    minimize: c^t x
        #       s.t.   Gx <= h
        #              Ax = b
        #
        # Here:
        #  - x is the vector the variables
        #  - c is the vector of objective coefficients
        #  - G is the matrix of LEQ (and GEQ) constraint coefficients
        #  - h is the vector or right-hand side values of the LEQ/GEQ constraints
        #  - A is the matrix of equality constraint coefficients
        #  - b is the vector of right-hand side values of the equality constraints
        #
        # This function builds these sparse matrices from the information it has
        # gathered, flipping signs where necessary, and adding equality constraints
        # for the upper and lower bounds of variables. It then calls the cvxopt
        # solver and maps back the values.
        num_vars = len(self._var_list)
        num_eq_cons = len(self._eq_cons_list)
        num_leq_cons = len(self._leq_cons_list)
        for var in self._var_list:
            if var.lb is not None:
                num_leq_cons += 1
            if var.ub is not None:
                num_leq_cons += 1
        # Make the matrices (some need to be dense).
        c = cvxopt.matrix([0.0] * num_vars)
        h = cvxopt.matrix([0.0] * num_leq_cons)
        g_mat = cvxopt.spmatrix([], [], [], (num_leq_cons, num_vars))
        a_mat = None
        b = None
        if num_eq_cons > 0:
            a_mat = cvxopt.spmatrix([], [], [], (num_eq_cons, num_vars))
            b = cvxopt.matrix([0.0] * num_eq_cons)
        # Objective coefficients: c
        for var_label in self._obj_coeffs:
            value = self._obj_coeffs[var_label]
            vid = self._vars[var_label].vid
            if self._objective == OBJ_MAX:
                c[vid] = -value  # negate the value because it's a max
            else:
                c[vid] = value  # min objective matches cvxopt
        # Inequality constraints: G, h
        row = 0
        for cons in self._leq_cons_list:
            # If it's >= then need to negate all coeffs and the rhs
            if cons.rhs is not None:
                h[row] = cons.rhs if cons.ctype == CONS_TYPE_LEQ else -cons.rhs
            for var_label in cons.coeffs:
                value = cons.coeffs[var_label]
                vid = self._vars[var_label].vid
                g_mat[(row,
                       vid)] = value if cons.ctype == CONS_TYPE_LEQ else -value
            row += 1
        # Inequality constraints: variables upper and lower bounds
        for var in self._var_list:
            if var.lb is not None:  # x_i >= lb has to be -x_i <= -lb
                g_mat[(row, var.vid)] = -1.0
                h[row] = -var.lb
                row += 1
            if var.ub is not None:  # x_i <= ub
                g_mat[(row, var.vid)] = 1.0
                h[row] = var.ub
                row += 1
        # Equality constraints: A, b
        if num_eq_cons > 0:
            row = 0
            for cons in self._eq_cons_list:
                b[row] = cons.rhs if cons.rhs is not None else 0.0
                for var_label in cons.coeffs:
                    value = cons.coeffs[var_label]
                    vid = self._vars[var_label].vid
                    a_mat[(row, vid)] = value
                row += 1
        # Solve!
        if num_eq_cons > 0:
            sol = cvxopt.solvers.lp(c, g_mat, h, a_mat, b, solver=solver)
        else:
            sol = cvxopt.solvers.lp(c, g_mat, h, solver=solver)
        return sol["x"]
def lstsq_solve(A, b, l2=0, C=None, d=None, x0=None, opts=None):

    '''
    Basic CVX-based solver for l2-regularized linear least-squares problems.
    The corresponding minimization problem reads

        minimize   0.5 * || Ax - b ||^2_2 + l2 * || x ||^2_2
            x

        subject to  Cx = d (equality constraints)
                    Gx =< h (linear inequalities)

    This function serves as a wrapper around the CVXOPT QP solver. Note that
    the Gx <= h linear inequalities have not been implemented yet.

    Inputs:
    ------
        A  : A m x n dense or sparse matrix or numpy array.
        b  : a m x 1 vector or numpy array.
        C  : A p x n dense or sparse matrix or numpy array.
        d  : A p x 1 vector or numpy array.
        l2 : Weight for the Thikonov regularization (Ridge).

        (optional) x0 : A n x 1 vector corresponding to the initial guess.
        (optional) opts : A dictionnary of options to be passed to CVX.

    Outputs:
    -------
        output : Return dictionnary, the output of CVXOPT QP solver.
    '''
    # --> Convert the matrices to CVX formats.
    A = numpy_to_cvxopt_matrix(A)
    b = numpy_to_cvxopt_matrix(b)
    C = numpy_to_cvxopt_matrix(C)
    d = numpy_to_cvxopt_matrix(d)

    # --> Sanity checks for the inputs dimensions.
    if len(A.size) != 2:
        raise warnings.warn('The input A is not a matrix, nor a two-dimensional \
        numpy array. It is transformed into a one-dimensional column vector.')
    else:
        m, n = A.size

    if b.size[0] != m:
        raise LinAlgError('A and b do not have the same number of rows.')

    if C is not None and d is None:
        raise ValueError('Matrix C has been given but not vector d. Please provide d.')
    if C is None and d is not None:
        raise ValueError('Vector d has been given but not matrix C. Please provide C.')

    if C is not None:
        k, l = C.size
        if l != n:
            raise LinAlgError('A and C do not have the same number of columns.')

        if d.size[0] != k:
            raise LinAlgError('C and d do not have the same number of rows.')

        if np.linalg.matrix_rank(C.T) < k:
            raise LinAlgError('C.T is rank-deficient. The svd subset selection procedure has not yet been implemented.')

    # --> Check whether A is sparse or not.
    if sparse.issparse(A):
        sparse_case = True
    else:
        sparse_case = False

    # --> Sets up the problem.
    if m != n:
        P = A.T * A
        q = -A.T * b
    else:
        # Test if matrix is symmetric positive definite.
        spd = is_pos_def(A)
        if spd is True:
            P = A
            q = -b
        else:
            P = A.T * A
            q = -A.T * b

    # --> Ridge regularization if needed.
    if l2 < 0:
        raise ValueError('The l2-regularization weight cannot be negative.')
    if l2 > 0:
        nvars = A.size[1]
        if sparse_case is True:
            I = scipy_sparse_to_spmatrix(sparse.eye(nvars, nvars, format='coo'))
        else:
            I = matrix(np.eye(nvars), (nvars, nvars), 'd')
        P = P + l2 * I

    # --> Run the CVXOPT Quadratic Programming solver.
    #     First pass: Only the l2-penalization is accounted for.
    output = solvers.qp(P, q, None, None, C, d, None, x0)['x']
    x = np.asarray(output).squeeze()

    return x
示例#55
0
    def wrench_in_positive_span(wrench_basis,
                                target_wrench,
                                force_limit,
                                num_fingers=1,
                                wrench_norm_thresh=1e-4,
                                wrench_regularizer=1e-10):
        """ Check whether a target can be exerted by positive combinations of wrenches in a given basis with L1 norm fonger force limit limit.

        Parameters
        ----------
        wrench_basis : 6xN :obj:`numpy.ndarray`
            basis for the wrench space
        target_wrench : 6x1 :obj:`numpy.ndarray`
            target wrench to resist
        force_limit : float
            L1 upper bound on the forces per finger (aka contact point)
        num_fingers : int
            number of contacts, used to enforce L1 finger constraint
        wrench_norm_thresh : float
            threshold to use to determine equivalence of target wrenches
        wrench_regularizer : float
            small float to make quadratic program positive semidefinite

        Returns
        -------
        int
            whether or not wrench can be resisted
        float
            minimum norm of the finger forces required to resist the wrench
        """
        num_wrenches = wrench_basis.shape[1]

        # quadratic and linear costs
        P = wrench_basis.T.dot(
            wrench_basis) + wrench_regularizer * np.eye(num_wrenches)
        q = -wrench_basis.T.dot(target_wrench)

        # inequalities
        lam_geq_zero = -1 * np.eye(num_wrenches)

        num_wrenches_per_finger = num_wrenches / num_fingers
        force_constraint = np.zeros([num_fingers, num_wrenches])
        for i in range(num_fingers):
            start_i = num_wrenches_per_finger * i
            end_i = num_wrenches_per_finger * (i + 1)
            force_constraint[i,
                             start_i:end_i] = np.ones(num_wrenches_per_finger)

        G = np.r_[lam_geq_zero, force_constraint]
        h = np.zeros(num_wrenches + num_fingers)
        for i in range(num_fingers):
            h[num_wrenches + i] = force_limit

        # convert to cvx and solve
        P = cvx.matrix(P)
        q = cvx.matrix(q)
        G = cvx.matrix(G)
        h = cvx.matrix(h)
        sol = cvx.solvers.qp(P, q, G, h)
        v = np.array(sol['x'])

        min_dist = np.linalg.norm(wrench_basis.dot(v).ravel() -
                                  target_wrench)**2

        # add back in the target wrench
        return min_dist < wrench_norm_thresh, np.linalg.norm(v)
示例#56
0
    def weights(self):
        """
            Given returns (as a column vector), sigmas (standard deviations, as a column vector), 
            correlation matrix, and target expected return of the portfolio/risk aversion, return
            optimal weights of (riskless asset [if exists] and) the risk assets.

            If constraints exist, input lower as a column vector of the lower bounds of the weights
            of risk assets, and upper as a column vector of the upper bounds of the weights of risk
            assets. 
            
            Here we impose no contrainsts on the riskless asset.
        """

        # solvers.qp(P,q,G,h,A,b)
        # minimize 1/2*x.T@P@x + q.T@x
        #      s.t G@x <= h
        #          A@x =  b

        if self.rateRiskFree is None:
            nAssets = self.nRiskAssets
        else:
            nAssets = self.nRiskAssets + 1

        # optimize given target return
        if self._mode == 'target return':

            if self.rateRiskFree is None:

                P = matrix(self.covMat)

                A = matrix(
                    np.vstack(
                        (np.ones(nAssets).reshape(1,
                                                  -1), self.assetReturns.T)))

            else:

                P = np.diag(np.zeros(nAssets))
                P[1:, 1:] = self.covMat
                P = matrix(P)

                A = matrix(
                    np.vstack(
                        (np.ones(nAssets).reshape(1, -1),
                         np.hstack(
                             ([[self.rateRiskFree]], self.assetReturns.T)))))

            q = matrix(0.0, (nAssets, 1))
            b = matrix([1.0, self.targetReturn])

        # optimize given risk aversion
        elif self._mode == 'risk aversion':

            if self.rateRiskFree is None:

                P = matrix(self.riskAversion * self.covMat)

                q = matrix(-self.assetReturns)

            else:

                P = np.diag(np.zeros(nAssets))
                P[1:, 1:] = self.covMat
                P = matrix(self.riskAversion * P)

                q = matrix(
                    np.vstack(([[-self.rateRiskFree]], -self.assetReturns)))

            A = matrix(np.ones(nAssets).reshape(1, -1))
            b = matrix([1.0])

        else:

            raise NameError(
                "mode can only be `risk aversion` or `target return`")

        # if no constraints
        if self.lowerBound is None and self.upperBound is None:
            sol = solvers.qp(P, q, None, None, A, b)
            return np.array(sol['x'])

        # if constraints
        if self.lowerBound is None:
            G = matrix(np.eye(nAssets))
            h = matrix(self.upperBound)

        elif self.upperBound is None:
            G = matrix(-np.eye(nAssets))
            h = matrix(self.lowerBound)

        else:
            G = matrix(np.vstack((np.eye(nAssets), -np.eye(nAssets))))
            h = matrix(np.vstack((self.upperBound, -self.lowerBound)))

        sol = solvers.qp(P, q, G, h, A, b)
        return np.array(sol['x'])
    def fit(self, training_set_inputs, training_set_outputs, routine = 'quadprog', verbose = False):
        if verbose:
            print()
            print("Building the SVM model...")

        classes = set(list(np.asfarray(training_set_outputs).flatten()))
        if len(classes) < 3:
            # Reinitializing some attributes to 0
            self.training_time = 0
            self.NbrIterations = 0
            # Input data
            X = training_set_inputs
            # Output data
            Y = training_set_outputs
            N = X.shape[0]
            '''
            the matrix form of the soft margins dual SVM problem we wish to solve is the following:
                                    1
                        min Γ(λ) = --- * λ.T * Y * K(X, X.T) * Y * λ - 1.T * λ
                         λ          2
                 subject to                 Y.T * λ = 0
                                            λ >= 0
                                            λ <= C
            '''
            start_time = time.time()
            # let's compute the kernel matrix
            K = np.zeros((N, N))
            for i in range(N):
                for j in range(N):
                    K[i, j] = self.__kernel(X[i], X[j])

            if routine == 'cvxopt':
                '''
                The routine offers us the following interface for solving quadratic optimization problems:
                                  1
                            min  --- * x.T * P * x + q.T * x
                             x    2
                     subject to          Gx <= h
                                         Ax = b

                Let's put the soft margin dual problem in this format in order to use the cvxopt routine to solve it.
                '''
                # mapping our quadratic dual problem to the stardard form of the "cvxopt" tool
                # for that we need to construct P, q, A, b, G, h matrices as follows
                P = cvxopt.matrix(Y * K * Y)
                q = cvxopt.matrix(-np.ones(N))
                G = cvxopt.matrix(np.vstack((np.diag(np.ones(N) * -1), np.identity(N))))
                h = cvxopt.matrix(np.hstack((np.zeros(N), np.ones(N) * self.C)))
                A = cvxopt.matrix(Y.astype('d'), (1, N))
                b = cvxopt.matrix(0.0)

                if verbose: cvxopt.solvers.options['show_progress'] = True
                else: cvxopt.solvers.options['show_progress'] = False
                solution = cvxopt.solvers.qp(P, q, G, h, A, b)
                lambdas = np.array(solution['x'])
                self.NbrIterations = solution['iterations']

            elif routine == 'quadprog':
                '''
                The routine offers us the following interface for solving quadratic optimization problems:
                                  1
                            min  --- * x.T * G * x - a.T * x
                             x    2
                     subject to          C.T * x >= b, where the equality constraints are the first rows of
                                                       the matrix C (the number of rows is specified using
                                                       the parameter 'meq' of the routine).

                This routine is gradient based and computes the gradient autonomously.
                Let's put the soft margin dual problem in this format in order to use the cvxopt routine to solve it.
                '''
                G = (Y * K * Y) + np.eye(N)*(1e-3) # adding the identity matrix times a small constant to G to avoid
                                                    # the following error: ValueError: matrix G is not positive definite
                                                    # as suggested here: https://github.com/facebookresearch/GradientEpisodicMemory/issues/2
                a = np.ones(N).reshape(-1,)
                # equality constraints
                C_eq = np.array(Y.astype('d'), dtype=float).reshape(1, N)
                b_eq = np.array([0.0],dtype=float)
                # inequality constraints
                C_ineq = np.array(np.vstack((np.identity(N), np.diag(np.ones(N) * -1))), dtype=float)
                b_ineq = np.array(np.hstack((np.zeros(N), np.ones(N) * -self.C)),dtype=float)
                # put all together
                C = np.concatenate((C_eq, C_ineq)).T
                b = np.concatenate((b_eq, b_ineq)).reshape(-1,)

                solution = quadprog.solve_qp(G, a, C, b, meq = 1)
                lambdas = np.array(solution[0])
                self.NbrIterations = solution[3][0]

            else:
                raise ValueError("Invalid value for parameter routine. The value should be in {'cvxopt', 'quadprog'}")

            self.training_time = time.time() - start_time
            # let's compute the bias
            # A small threshold (e.g., 1e-5) is chosen to find the support vectors
            # (corresponding to non-zero Lagrange multipliers, by complementary slackness condition).
            support_vector_indices = (lambdas > 0).reshape(-1) # some small threshold
            self.support_multipliers = lambdas[support_vector_indices]
            self.support_vectors = X[support_vector_indices]
            self.support_vector_labels = Y[support_vector_indices]

            # http://www.cs.cmu.edu/~guestrin/Class/10701-S07/Slides/kernels.pdf
            # bias = y_k - \sum z_i y_i  K(x_k, x_i)
            # Thus we can just predict an example with bias of zero, and
            # compute error.
            self.bias = 0.0
            bias = np.mean([y_k - np.sign(self.think(x_k)).item() for (y_k, x_k) in zip(self.support_vector_labels, self.support_vectors)])
            self.bias = bias

        else:
            '''
            We are dealing with a multi class classification problem
            '''
            raise ValueError("'training_set_outputs' have more than 2 distinct classes. Use MultiClassSVMClassifier for multi class classification.")
示例#58
0
    def fit(self, features, labels, iterations=16):
        """
        Compute the parameters of the SVM classifier regarding the features and the associated labels.
        :param features: array of features vectors
        :param labels: array of labels vectors corresponding to the features
        :param iterations: number of iteration to solve the quadratic problem
        :return:
        """
        n_samples, n_features = features.shape

        # 1) Gram matrix
        K = zeros((n_samples, n_samples))
        for i in range(n_samples):
            for j in range(n_samples):
                K[i, j] = self.kernel(features[i], features[j])

        P = matrix(outer(labels, labels) * K)
        q = matrix(ones(n_samples) * -1)
        A = matrix(labels, (1, n_samples))
        b = matrix(0.0)

        if self.C is None:
            G = matrix(diag(ones(n_samples) * -1))
            h = matrix(zeros(n_samples))
        else:
            tmp1 = diag(ones(n_samples) * -1)
            tmp2 = identity(n_samples)
            G = matrix(vstack((tmp1, tmp2)))
            tmp1 = zeros(n_samples)
            tmp2 = ones(n_samples) * self.C
            h = matrix(hstack((tmp1, tmp2)))

        # 2) Resolve QP problem
        options = dict()
        options['maxiters'] = iterations
        options['show_progress'] = False
        solution = qp(P, q, G, h, A, b, options=options)['x']

        # 3) Lagrange multipliers
        lagrange_multipliers = ravel(solution)

        # 4) Support vectors
        support_vectors = lagrange_multipliers > 1e-5
        ind = arange(len(lagrange_multipliers))[support_vectors]
        self.lagrange_multipliers = lagrange_multipliers[support_vectors]
        self.support_vectors = features[support_vectors]
        self.support_vectors_labels = labels[support_vectors]

        # 5) Bias
        for n in range(len(self.lagrange_multipliers)):
            self.bias += self.support_vectors_labels[n]
            self.bias -= sum(self.lagrange_multipliers *
                             self.support_vectors_labels *
                             K[ind[n], support_vectors])
        self.bias /= len(self.lagrange_multipliers)

        # 6) Weight vector
        if self.kernel == Kernel.linear():
            self.weights = zeros(n_features)
            for n in range(len(self.lagrange_multipliers)):
                self.weights += self.lagrange_multipliers[
                    n] * self.support_vectors_labels[n] * self.support_vectors[
                        n]
示例#59
0
    pylab_installed = False
else:
    pylab_installed = True
from pickle import load
solvers.options['show_progress'] = 0

data = load(open('polapprox.bin', 'rb'))
t, y = data['t'], data['y']
m = len(t)

# LS fit of 5th order polynomial
#
#     minimize ||A*x - y ||_2

n = 6
A = matrix([[t**k] for k in range(n)])
xls = +y
lapack.gels(+A, xls)
xls = xls[:n]

# Chebyshev fit of 5th order polynomial
#
#     minimize ||A*x - y ||_inf

xinf = variable(n)
op(max(abs(A * xinf - y))).solve()
xinf = xinf.value

if pylab_installed:
    pylab.figure(1, facecolor='w')
    pylab.plot(t, y, 'bo', mfc='w', mec='b')
示例#60
0
def uCEQ(s, Q1, Q2):

    Q1_flat = Q1[s].flatten()
    Q2_flat = Q2[s].flatten()

    c = -np.array(Q1_flat + Q2_flat, dtype="float")
    c = matrix(c)

    h = matrix(np.zeros(numA * (numA - 1) * 2 + numA**2))

    b = matrix(1.)

    A = np.ones(numA**2)
    A = np.matrix(A, dtype='float')
    A = matrix(A)

    # Construct G table
    # row player
    zeros = np.zeros((1, 5))
    for a in range(numA):
        diff = [Q1[s, a, i] - Q1[s, :, i] for i in range(numA)]
        diff_T = np.row_stack((diff)).T
        diff_T = np.delete(diff_T, a, axis=0)

        if a == 0:
            pi_p1 = np.row_stack((np.repeat(zeros, a * 4, axis=0), diff_T,
                                  np.repeat(zeros, (numA - a - 1) * 4,
                                            axis=0)))

        else:
            temp = np.row_stack((np.repeat(zeros, a * 4, axis=0), diff_T,
                                 np.repeat(zeros, (numA - a - 1) * 4, axis=0)))
            pi_p1 = np.column_stack((pi_p1, temp))
    pi_p1 = -pi_p1

    # column player
    for a in range(numA):
        diff = [Q2[s, :, a] - Q2[s, :, i] for i in range(numA)]
        diff = np.row_stack((diff))
        diff = np.delete(diff, a, axis=0)
        if a == 0:
            pi_p2 = np.row_stack((np.repeat(zeros, a * 4, axis=0), diff,
                                  np.repeat(zeros, (numA - a - 1) * 4,
                                            axis=0)))

        else:
            temp = np.row_stack((np.repeat(zeros, a * 4, axis=0), diff,
                                 np.repeat(zeros, (numA - a - 1) * 4, axis=0)))
            pi_p2 = np.column_stack((pi_p2, temp))
    pi_p2 = -pi_p2

    # non-negative constraints
    g_diag = np.zeros((numA**2, numA**2))
    np.fill_diagonal(g_diag, -1)

    # row stack matrixes
    g = np.row_stack((pi_p1, pi_p2, g_diag))
    G = matrix(g)
    solvers.options['show_progress'] = False
    solvers.options['feastol'] = 10e-5
    sol = solvers.lp(c, G, h, A, b, solver=None)
    sigmas = np.array(sol['x'].T)[0]
    sigmas -= sigmas.min() + 0.
    return sigmas.reshape((numA, numA)) / sigmas.sum(0)