Esempio n. 1
0
    def _cvxmod_minimize(self, lb, ub, **kwargs):
        """ solve quadratic problem using CVXMOD interface

        Keyword arguments:

        lb, ub  -- vectors of lower and upper bounds (potentially modified by
                   added tolerance)

        Modified member variables:

        status, solution, obj_value

        Returns: nothing
        """
        # shorter names
        v = self.cvxmodV
        A = self.cvxmodMatrix
        minus_w = cvxmod.matrix(self.obj.f)  # negative wildtype solution
        lb = cvxmod.matrix(lb)
        ub = cvxmod.matrix(ub)
        if self.weights is not None:
            weights = cvxmod.matrix(diag(sqrt(self.weights)))
        if self.Aineq is not None:
            Aineq = cvxmod.matrix(array(self.Aineq))
            bineq = cvxmod.matrix(self.bineq)
            if self.weights is None:
                p = cvxmod.problem(cvxmod.minimize(
                    cvxmod.norm2(v + minus_w)), [
                        cvxmod.abs(A * v) < self.matrix_tol,
                        Aineq * v <= bineq + self.matrix_tol, v >= lb, v <= ub
                    ])
            else:
                p = cvxmod.problem(
                    cvxmod.minimize(cvxmod.norm2(weights * (v + minus_w))), [
                        cvxmod.abs(A * v) < self.matrix_tol,
                        Aineq * v <= bineq + self.matrix_tol, v >= lb, v <= ub
                    ])
        else:
            if self.weights is None:
                p = cvxmod.problem(
                    cvxmod.minimize(cvxmod.norm2(v + minus_w)),
                    [cvxmod.abs(A * v) < self.matrix_tol, v >= lb, v <= ub])
            else:
                p = cvxmod.problem(
                    cvxmod.minimize(cvxmod.norm2(weights * (v + minus_w))),
                    [cvxmod.abs(A * v) < self.matrix_tol, v >= lb, v <= ub])

        self.status = cvxToSolverStatus(p.solve())

        if not v.value:
            self.solution = []
        else:
            self.solution = array(list(v.value))
        try:
            self.obj_value = p.value
        except cvxmod.OptvarValueError:
            self.obj_value = inf
Esempio n. 2
0
        def SDPToALocate(self, RN, ToA, ToAStd):
                """
                Apply SDP approximation and localization
                """
                RN = cvxm.matrix(RN)
                ToA = cvxm.matrix(ToA)
                
                c       = 3e08                                          # Speed of light
                RoA     = c*ToA
                RoAStd  = c*ToAStd
                RoAStd = cvxm.matrix(RoAStd)
                mtoa,ntoa=cvxm.size(RN)
                Im = cvxm.eye(mtoa)
                Y=cvxm.optvar('Y',mtoa+1,mtoa+1)
                t=cvxm.optvar('t',ntoa,1)
                prob=cvxm.problem(cvxm.minimize(cvxm.norm2(t)))
                prob.constr.append(Y>=0)
                prob.constr.append(Y[mtoa,mtoa]==1)
                for i in range(ntoa):
                    X0=cvxm.matrix([[Im, -cvxm.transpose(RN[:,i])],[-RN[:,i], cvxm.transpose(RN[:,i])*RN[:,i]]])
                    prob.constr.append(-t[i]<(cvxm.trace(X0*Y)-RoA[i]**2)*(1/RoAStd[i]))
                    prob.constr.append(t[i]>(cvxm.trace(X0*Y)-RoA[i]**2)*(1/RoAStd[i]))
                prob.solve()
                Pval=Y.value
                X_cvx=Pval[:2,-1]

                return X_cvx
Esempio n. 3
0
def solve(cols,W,penalty,par,parvalue):
    P=len(cols)
    N=len(cols[0])
    norm=eval("norm"+penalty)
    normFUN=eval("n"+penalty)
    normQX=0.0
    SUMDIFF=0.0
    X=matrix([list(x) for x in cols],(N*P,1))
    alpha=optvar("alpha",N*P)
    PARAM=float(parvalue)
    for i in range(N):
        for j in range(i+1,N):
            w=W[i][j]
            q=matrix(0.0,(1,N))
            q[i]=1
            q[j]=-1
            Qij=getQij(q,P)
            SUMDIFF+= norm(Qij*alpha)*w
            xidiff=[cols[k][i]-cols[k][j] for k in range(P)]
            normQX += normFUN(xidiff)*w
    from cvxmod.atoms import sum,square
    ## TDH alternate parameterization
    ##penalty_norm = 1.0/float(N*(N-1))
    ##SUMDIFF *= penalty_norm
    ##normQX  *= penalty_norm
    ##error=(0.5/N)*sum(square(X-alpha))
    error=0.5*sum(square(X-alpha))
    problems={
        "lambda":(error+PARAM*SUMDIFF,[]),
        "s":(error,[SUMDIFF*(1/normQX) <= PARAM]),
        }
    tomin,constraint=problems[par]
    p=problem(minimize(tomin),constraint)
    p.solve()
    return alpha
Esempio n. 4
0
def solve(cols, W, penalty, par, parvalue):
    P = len(cols)
    N = len(cols[0])
    norm = eval("norm" + penalty)
    normFUN = eval("n" + penalty)
    normQX = 0.0
    SUMDIFF = 0.0
    X = matrix([list(x) for x in cols], (N * P, 1))
    alpha = optvar("alpha", N * P)
    PARAM = float(parvalue)
    for i in range(N):
        for j in range(i + 1, N):
            w = W[i][j]
            q = matrix(0.0, (1, N))
            q[i] = 1
            q[j] = -1
            Qij = getQij(q, P)
            SUMDIFF += norm(Qij * alpha) * w
            xidiff = [cols[k][i] - cols[k][j] for k in range(P)]
            normQX += normFUN(xidiff) * w
    from cvxmod.atoms import sum, square
    ## TDH alternate parameterization
    ##penalty_norm = 1.0/float(N*(N-1))
    ##SUMDIFF *= penalty_norm
    ##normQX  *= penalty_norm
    ##error=(0.5/N)*sum(square(X-alpha))
    error = 0.5 * sum(square(X - alpha))
    problems = {
        "lambda": (error + PARAM * SUMDIFF, []),
        "s": (error, [SUMDIFF * (1 / normQX) <= PARAM]),
    }
    tomin, constraint = problems[par]
    p = problem(minimize(tomin), constraint)
    p.solve()
    return alpha
Esempio n. 5
0
        def SDPTDoALocate(self, RN1, RN2, TDoA, TDoAStd):
                """
                Apply SDP approximation and localization
                """
                RN1 = cvxm.matrix(RN1)
                RN2 = cvxm.matrix(RN2)
                TDoA = cvxm.matrix(TDoA)
                c       = 3e08                                          
                RDoA     = c*TDoA
                RDoAStd=cvxm.matrix(c*TDoAStd)
                mtdoa,ntdoa=cvxm.size(RN1)
                Im = cvxm.eye(mtdoa)
                Y=cvxm.optvar('Y',mtdoa+1,mtdoa+1)
                t=cvxm.optvar('t',ntdoa,1)
                prob=cvxm.problem(cvxm.minimize(cvxm.norm2(t)))
                prob.constr.append(Y>=0)
                prob.constr.append(Y[mtdoa,mtdoa]==1)
                for i in range(ntdoa):
                    X0=cvxm.matrix([[Im, -cvxm.transpose(RN1[:,i])],[-RN1[:,i], cvxm.transpose(RN1[:,i])*RN1[:,i]]])
                    X1=cvxm.matrix([[Im, -cvxm.transpose(RN2[:,i])],[-RN2[:,i], cvxm.transpose(RN2[:,i])*RN2[:,i]]])
                    prob.constr.append(-RDoAStd[i,0]*t[i]<cvxm.trace(X0*Y)+cvxm.trace(X1*Y)-RDoA[i,0]**2)
                    prob.constr.append(RDoAStd[i,0]*t[i]>cvxm.trace(X0*Y)+cvxm.trace(X1*Y)-RDoA[i,0]**2)
                prob.solve()
                Pval=Y.value
                X_cvx=Pval[:2,-1]

                return X_cvx
Esempio n. 6
0
        def SDPRSSLocate(self, RN, PL0, d0, RSS, RSSnp, RSSStd, Rest):

                RoA=self.getRange(RN, PL0, d0, RSS, RSSnp, RSSStd, Rest)
                

                RN=cvxm.matrix(RN)
                RSS=cvxm.matrix(RSS)
                RSSnp=cvxm.matrix(RSSnp)
                RSSStd=cvxm.matrix(RSSStd)
                PL0=cvxm.matrix(PL0)
                RoA=cvxm.matrix(RoA)
                mrss,nrss=cvxm.size(RN)
                
                Si = array([(1/d0**2)*10**((RSS[0,0]-PL0[0,0])/(5.0*RSSnp[0,0])),(1/d0**2)*10**((RSS[1,0]-PL0[1,0])/(5.0*RSSnp[1,0])),(1/d0**2)*10**((RSS[2,0]-PL0[2,0])/(5.0*RSSnp[2,0])),(1/d0**2)*10**((RSS[3,0]-PL0[3,0])/(5.0*RSSnp[3,0]))])
                #Si = array([(1/d0**2)*10**(-(RSS[0,0]-PL0[0,0])/(5.0*RSSnp[0,0])),(1/d0**2)*10**(-(RSS[0,1]-PL0[1,0])/(5.0*RSSnp[0,1])),(1/d0**2)*10**(-(RSS[0,2]-PL0[2,0])/(5.0*RSSnp[0,2])),(1/d0**2)*10**(-(RSS[0,3]-PL0[3,0])/(5.0*RSSnp[0,3]))])
                
                Im = cvxm.eye(mrss)
                Y=cvxm.optvar('Y',mrss+1,mrss+1)
                t=cvxm.optvar('t',nrss,1)

                prob=cvxm.problem(cvxm.minimize(cvxm.norm2(t)))
                prob.constr.append(Y>=0)
                prob.constr.append(Y[mrss,mrss]==1)
                for i in range(nrss):
                    X0=cvxm.matrix([[Im, -cvxm.transpose(RN[:,i])],[-RN[:,i], cvxm.transpose(RN[:,i])*RN[:,i]]])
                    prob.constr.append(-RSSStd[i,0]*t[i]<Si[i]*cvxm.trace(X0*Y)-1)
                    prob.constr.append(RSSStd[i,0]*t[i]>Si[i]*cvxm.trace(X0*Y)-1)
               
                prob.solve()
                
                Pval=Y.value
                X_cvx=Pval[:2,-1]
                
                return X_cvx
Esempio n. 7
0
	def _compute(self):
		start = datetime.datetime.now()
		
		C = self.C
		gamma = self.gamma
		Kcount = len( gamma )
		(N,d) = self.data.shape
		X = self.data
		
		# CMF of observations X
		Xcmf = ( (X.reshape(N,1,d) > transpose(X.reshape(N,1,d),[1,0,2])).prod(2).sum(1,dtype=float) / N ).reshape([N,1])
		
		# epsilon of observations X
		e = sqrt( (1./N) * ( Xcmf ) * (1.-Xcmf) ).reshape([N,1])
		
		K = self._K( Xcmf.reshape(N,1,d), transpose(Xcmf.reshape(N,1,d), [1,0,2]), gamma )

		xipos = cvxmod.optvar( 'xi+', N,1)
		xipos.pos = True
		xineg = cvxmod.optvar( 'xi-', N,1)
		xineg.pos = True
			
		alphas = list()
		expr = ( C*cvxmod.sum(xipos) ) + ( C*cvxmod.sum(xineg) )
		ineq = 0
		eq = 0
		
		for i in range( Kcount ):
			alpha = cvxmod.optvar( 'alpha(%s)' % i, N,1)
			alpha.pos = True
			
			alphas.append( alpha )
			expr += ( float(1./gamma[i]) * cvxmod.sum( alpha ) )
			ineq += ( cvxopt.matrix( K[i], (N,N) ) * alpha )
			eq += cvxmod.sum( alpha )
			
		objective = cvxmod.minimize( expr )
		
		ineq1 = ineq <= cvxopt.matrix( Xcmf + e ) + xineg
		ineq2 = ineq >= cvxopt.matrix( Xcmf - e ) - xipos
		eq1 = eq == cvxopt.matrix( 1.0 )
		

		# Solve!
		p = cvxmod.problem( objective = objective, constr = [ineq1,ineq2,eq1] )
		
		start = datetime.datetime.now()
		p.solve()
		duration = datetime.datetime.now() - start
		print "optimized in %ss" % (float(duration.microseconds)/1000000)
		
		self.Fl = Xcmf
		self.betas = [ ma.masked_less( alpha.value, 1e-4) for alpha in alphas ]
		
		print "SV's found: %s" % [ len( beta.compressed()) for beta in self.betas ]
Esempio n. 8
0
def fit_ellipse_squared(x, y):
    """
    fit ellipoid using squared loss
    """

    assert len(x) == len(y)

    N = len(x)
    D = 5

    dat = numpy.zeros((N, D))
    dat[:,0] = x*x
    dat[:,1] = y*y
    #dat[:,2] = x*y
    dat[:,2] = x
    dat[:,3] = y
    dat[:,4] = numpy.ones(N)


    print dat.shape
    dat = cvxmod.matrix(dat)
    #### parameters

    # data
    X = cvxmod.param("X", N, D)


    #### varibales

    # parameter vector
    theta = cvxmod.optvar("theta", D)

    # simple objective 
    objective = cvxmod.atoms.norm2(X*theta)

    # create problem                                    
    p = cvxmod.problem(cvxmod.minimize(objective))
    p.constr.append(theta[0] + theta[1] == 1)
    
    
    ###### set values
    X.value = dat
    #solver = "mosek" 
    #p.solve(lpsolver=solver)

    p.solve()
    
    cvxmod.printval(theta)

    theta_ = numpy.array(cvxmod.value(theta))
    ellipse = conic_to_ellipse(theta_)

    return ellipse
Esempio n. 9
0
    def compute_combinaison_safe(self,target,rcond = 0.0,regul = None):
        """
            Computes the combination of base targets allowing to reproduce
            'target' (or giving the best approximation), while keeping
            coefficients between 0 and 1.

            arguments :
            - target : target to fit
            - rcond : cut off on the singular values as a fraction of
                      the biggest one. Only base vectors corresponding
                      to singular values bigger than rcond*largest_singular_value
                      
            - regul : regularisation factor for least square fitting. This force
                      the algorithm to use fewer targets.
            
        """
        from cvxmod import optvar,param,norm2,norm1,problem,matrix,minimize
        if type(target) is str or type(target) is unicode :
            target = read_target(target)
        cond = self.s>= rcond*self.s[0]
        u = self.u[ : , cond ]
        vt = self.vt[ cond ]
        s = self.s[cond]
        
        t = target.flatten()
        dim,ntargets = self.vt.shape
        nvert = target.shape[0]
        
        pt = np.dot(u.T,t.reshape(nvert*3,1))
        A = param('A',value = matrix(s.reshape(dim,1)*vt))
        b = param('b',value = matrix(pt))
        x = optvar('x',ntargets)

        if regul is None : prob = problem(minimize(norm2(A*x-b)),[x>=0.,x<=1.])
        else : prob = problem(minimize(norm2(A*x-b) + regul * norm1(x)),[x>=0.,x<=1.])
        
        prob.solve()
        bs = np.array(x.value).flatten()
        # Body setting files have a precision of at most 1.e-3
        return bs*(bs>=1e-3)
Esempio n. 10
0
def compute_bbox_set_agreement(example_boxes, gold_boxes):
    nExB = len(example_boxes)
    nGtB = len(gold_boxes)
    if nExB == 0:
        if nGtB == 0:
            return 1
        else:
            return 0

    if nGtB == 0:
        print "WARNING: new object"
        return 0

    A = cvxmod.zeros(rows=nExB, cols=nGtB)

    for iBox, ex in enumerate(example_boxes):
        for jBox, gt in enumerate(gold_boxes):
            A[iBox, jBox] = ex.overlap_score(gt)

    S = []
    S2 = []

    for iBox, ex in enumerate(example_boxes):
        S_tmp = [0] * (iBox) * nGtB + [1] * nGtB + [0] * (nExB - iBox -
                                                          1) * nGtB

        S.append(S_tmp)

    for jBox in range(0, nGtB):
        S2_tmp = [0] * nExB * nGtB
        for j2 in range(0, nExB):
            S2_tmp[j2 * nGtB + jBox] = 1

        S2.append(S2_tmp)

    S = cvxmod.transpose(cvxmod.matrix(S, size=(nExB * nGtB, nExB)))
    S2 = cvxmod.transpose(cvxmod.matrix(S2, size=(nExB * nGtB, nGtB)))

    A2 = cvxmod.matrix(A, (1, nExB * nGtB))
    x = cvxmod.optvar('x', rows=nExB * nGtB, cols=1)

    p = cvxmod.problem(cvxmod.maximize(A2 * x))
    p.constr.append(x <= 1)
    p.constr.append(x >= 0)

    p.constr.append(S * x <= 1)
    p.constr.append(S2 * x <= 1)

    p.solve(True)
    overlap = cvxmod.value(p) / max(nExB, nGtB)
    assert (overlap < 1.0001)
    return overlap
Esempio n. 11
0
def reconstruct_target(target_file,base_prefix,regul = None):
	"""
		Reconstruct the target in 'target_file' using constrained, 
		and optionally regularized, least square optimisation.
		
		arguments :
			target_file : file contaiing the target to fit
			base_prefix : prefix for the files of the base.
	"""
	
	vlist = read_vertex_list(base_prefix+'_vertices.dat')
	t = read_target(target_file,vlist)
	U = load(base_prefix+"_U.npy").astype('float')
	S = load(base_prefix+"_S.npy").astype('float')
	V = load(base_prefix+"_V.npy").astype('float')

	ntargets,dim = V.shape
	nvert = len(t)
	pt = dot(U.T,t.reshape(nvert*3,1))
	pbase = S[:dim].reshape(dim,1)*V.T
	A = param('A',value = matrix(pbase))
	b = param('b',value = matrix(pt))
	x = optvar('x',ntargets)

	if regul is None : prob = problem(minimize(norm2(A*x-b)),[x>=0.,x<=1.])
	else : prob = problem(minimize(norm2(A*x-b) + regul * norm1(x)),[x>=0.,x<=1.])
	
	prob.solve()
	
	targ_names_file = base_prefix+"_names.txt"
	with open(targ_names_file) as f :
		tnames = [line.strip() for line in f.readlines() ]
	tnames.sort()
	
	base,ext = os.path.splitext(target_file)
	bs_name = base+".bs"
	with open(bs_name,"w") as f :
		for tn,v in zip(tnames,x.value):
			if v >= 1e-3 : f.write("%s %0.3f\n"%(tn,v))
Esempio n. 12
0
def reconstruct_target(target_file,base_prefix,regul = None):
	"""
		Reconstruct the target in 'target_file' using constrained, 
		and optionally regularized, least square optimisation.
		
		arguments :
			target_file : file contaiing the target to fit
			base_prefix : prefix for the files of the base.
	"""
	
	vlist = read_vertex_list(base_prefix+'_vertices.dat')
	t = read_target(target_file,vlist)
	U = load(base_prefix+"_U.npy").astype('float')
	S = load(base_prefix+"_S.npy").astype('float')
	V = load(base_prefix+"_V.npy").astype('float')

	ntargets,dim = V.shape
	nvert = len(t)
	pt = dot(U.T,t.reshape(nvert*3,1))
	pbase = S[:dim].reshape(dim,1)*V.T
	A = param('A',value = matrix(pbase))
	b = param('b',value = matrix(pt))
	x = optvar('x',ntargets)

	if regul is None : prob = problem(minimize(norm2(A*x-b)),[x>=0.,x<=1.])
	else : prob = problem(minimize(norm2(A*x-b) + regul * norm1(x)),[x>=0.,x<=1.])
	
	prob.solve()
	
	targ_names_file = base_prefix+"_names.txt"
	with open(targ_names_file) as f :
		tnames = [line.strip() for line in f.readlines() ]
	tnames.sort()
	
	base,ext = os.path.splitext(target_file)
	bs_name = base+".bs"
	with open(bs_name,"w") as f :
		for tn,v in zip(tnames,x.value):
			if v >= 1e-3 : f.write("%s %0.3f\n"%(tn,v))
def solve_rw_l1_cvxmod(A, y, iters=6):
    W = speye(A.size[1])
    x = optvar('x', A.size[1])
    epsilon = 0.5
    for i in range(iters):
        last_x = matrix(x.value) if x.value else None
        p = problem(minimize(norm1(W * x)), [A * x == y])
        p.solve(quiet=True, cvxoptsolver='glpk')
        ww = abs(x.value) + epsilon
        W = diag(matrix([1 / w for w in ww]))
        if last_x:
            err = ((last_x - x.value).T * (last_x - x.value))[0]
            if err < 1e-4:
                break
    return x.value
Esempio n. 14
0
def solve_rw_l1_cvxmod(A, y, iters=6):
    W = speye(A.size[1])
    x = optvar('x', A.size[1])
    epsilon = 0.5
    for i in range(iters):
        last_x = matrix(x.value) if x.value else None
        p = problem(minimize(norm1(W*x)), [A*x == y])
        p.solve(quiet=True, cvxoptsolver='glpk')
        ww = abs(x.value) + epsilon
        W = diag(matrix([1/w for w in ww]))
        if last_x:
            err = ( (last_x - x.value).T * (last_x - x.value) )[0]
            if err < 1e-4:
                break
    return x.value
Esempio n. 15
0
def interior_point(X, y, lam):
    """
    solve lasso using an interior point method
    requires cvxmod (Jacob Mattingley and Stephen Boyd)
    http://cvxmod.net/
    """
    import cvxmod as cvx
    n, m = X.shape
    X_cvx = cvx.matrix(np.array(X))
    y_cvx = cvx.matrix(np.array(y))
    theta = cvx.optvar('theta', m)
    p = cvx.problem(cvx.minimize(cvx.sum(cvx.atoms.power(X_cvx*theta - y_cvx, 2)) + 
                    (2*lam)*cvx.norm1(theta)))
    p.solve() 
    return np.array(cvx.value(theta))
Esempio n. 16
0
	def _compute(self):
		start = datetime.datetime.now()

		gamma = self.gamma
		(N,d) = self.data.shape
		X = self.data

		Xcmf = ( (X.reshape(N,1,d) > transpose(X.reshape(N,1,d),[1,0,2])).prod(2).sum(1,dtype=float) / N ).reshape([N,1])
		sigma = .75 / sqrt(N)
		
		K = self._K( X.reshape(N,1,d), transpose(X.reshape(N,1,d), [1,0,2]), gamma ).reshape([N,N])
		#NOTE: this integral depends on K being the gaussian kernel
		Kint =  ( (1.0/gamma)*scipy.special.ndtr( (X-X.T)/gamma ) )
		
		alpha = cvxmod.optvar( 'alpha',N,1)
		alpha.pos = True
		pK = cvxmod.param( 'K',N,N )
		pK.psd = True
		pK.value = cvxopt.matrix(K,(N,N) )
		pKint = cvxmod.param( 'Kint',N,N )
		pKint.value = cvxopt.matrix(Kint,(N,N))
		#pKint.pos = True
		pXcmf = cvxmod.param( 'Xcmf',N,1)
		pXcmf.value = cvxopt.matrix(Xcmf, (N,1))
		#pXcmf.pos = True
		
		objective = cvxmod.minimize( cvxmod.atoms.quadform(alpha, pK) )
		eq1 = cvxmod.abs( pXcmf - ( pKint * alpha ) ) <= sigma
		eq2 = cvxmod.sum( alpha ) == 1.0
		
		# Solve!
		p = cvxmod.problem( objective = objective, constr = [eq1, eq2] )
		
		start = datetime.datetime.now()
		p.solve()
		duration = datetime.datetime.now() - start
		print "optimized in %ss" % (float(duration.microseconds)/1000000)
		
		beta = ma.masked_less( alpha.value, 1e-7 )
		mask = ma.getmask( beta )
		data = ma.array(X,mask=mask)
		
		self.Fl = Xcmf
		self.beta = beta.compressed().reshape([ 1, len(beta.compressed()) ])
		self.SV = data.compressed().reshape([len(beta.compressed()),1])
		print "%s SV's found" % len(self.SV)
Esempio n. 17
0
def run_opt(feature_lists, reference_indices, alpha):
    """
    run_opt( feature_lists ) -> weights
    feature_lists is a list of I image_feature_sets
        image_feature_sets are a list of P stacked_features
        stacked_features are a list of N different feature types
    reference_indices is a set of indices which will be held out as "reference"
    performs the opt:
        min sum_{i_r in ref_idx} sum_{i < I not in ref_idx} sum_{p < P}
            w' * ||f_{i_r,p,n} - f_{i,p,n}||^2 - alpha/(P-1) * sum_{p'<p} || f_{i_r,p,n} - f_{i,p',n} ||^2
    """

    I = len(feature_lists)
    P = len(feature_lists[0])
    N = len(feature_lists[0][0])

    non_reference_indices = [i for i in range(I) if not i in reference_indices]
    closeness_reward = np.zeros(N)
    uniqueness_penalty = np.zeros(N)
    f = feature_lists
    for i_r in reference_indices:
        for i in non_reference_indices:
            for p in range(P):
                for n in range(N):
                    closeness_reward[n] += feature_distance(
                        f[i_r][p][n], f[i][p][n])
                    for p_false in range(p):
                        uniqueness_penalty[n] += feature_distance(
                            f[i_r][p][n], f[i][p_false][n])
    c = cvx.param('c',
                  value=cvx.matrix(closeness_reward -
                                   alpha / float(P - 1) * uniqueness_penalty))
    print c.value
    w = cvx.optvar('w', N)
    w.pos = True
    w | cvx.In | cvx.norm1ball(N)
    p = cvx.problem()
    p.objective = cvx.minimize(cvx.tp(c) * w)
    p.constr = [cvx.sum(w) == 1]
    print "Running solver"
    p.solve()
    print "Ran!"
    return np.array(w.value)
Esempio n. 18
0
    def _compute(self):
        C = self.C
        gamma = self.gamma
        (N, d) = self.data.shape
        X = self.data

        Xcmf = (
            (X.reshape(N, 1, d) > transpose(X.reshape(N, 1, d), [1, 0, 2])).prod(2).sum(1, dtype=float) / N
        ).reshape([N, 1])
        sigma = 0.75 / sqrt(N)

        K = self._K(X.reshape(N, 1, d), transpose(X.reshape(N, 1, d), [1, 0, 2]), gamma).reshape([N, N])
        # NOTE: this integral depends on K being the gaussian kernel
        Kint = (1.0 / gamma) * scipy.special.ndtr((X - X.T) / gamma)

        alpha = cvxmod.optvar("alpha", N, 1)
        alpha.pos = True
        xi = cvxmod.optvar("xi", N, 1)
        xi.pos = True
        pXcmf = cvxmod.param("Xcmf", N, 1)
        pXcmf.pos = True
        pXcmf.value = cvxopt.matrix(Xcmf, (N, 1))
        pKint = cvxmod.param("Kint", N, N)
        pKint.value = cvxopt.matrix(Kint, (N, N))

        objective = cvxmod.minimize(cvxmod.sum(cvxmod.atoms.power(alpha, 2)) + (C * cvxmod.sum(xi)))
        eq1 = cvxmod.abs((pKint * alpha) - pXcmf) <= sigma + xi
        eq2 = cvxmod.sum(alpha) == 1.0

        # Solve!
        p = cvxmod.problem(objective=objective, constr=[eq1, eq2])
        p.solve()

        beta = ma.masked_less(alpha.value, 1e-7)
        mask = ma.getmask(beta)
        data = ma.array(X, mask=mask)

        self.beta = beta.compressed().reshape([1, len(beta.compressed())])
        self.SV = data.compressed().reshape([len(beta.compressed()), 1])
        print "%s SV's found" % len(self.SV)
Esempio n. 19
0
    def update(self, dt=None):
        """
        """
        self.initialize_LQP()

        self.get_situation()

        self.compute_objectives()

        self.write_tasks()

        self.write_constraints()

        self.solve_LQP()

        M = param('M', value=matrix(self.world.mass))
        N = param('N', value=matrix(self.world.nleffects))
        # variables:
        dgvel = optvar('dgvel', self._wndof)
        tau = optvar('tau', self._wndof)
        #fc    = optvar('tau', self._wndof)
        gvel = param('gvel', value=matrix(self.world.gvel))
        taumax = param('taumax', value=matrix(array([10., 10., 10.])))

        ### resolution ###
        cost = norm2(tau)
        for task in self._tasks:
            cost += 100. * task.cost(dgvel)

        p = problem(minimize(cost))
        p.constr.append(M * dgvel + N * gvel == tau)
        p.constr.append(-taumax <= tau)
        p.constr.append(tau <= taumax)
        p.solve(True)
        tau = array(tau.value).reshape(self._wndof)
        self._rec_tau.append(tau)
        gforce = tau
        impedance = zeros((self._wndof, self._wndof))
        return (gforce, impedance)
Esempio n. 20
0
def run_opt( feature_lists, reference_indices, alpha ):
    """
    run_opt( feature_lists ) -> weights
    feature_lists is a list of I image_feature_sets
        image_feature_sets are a list of P stacked_features
        stacked_features are a list of N different feature types
    reference_indices is a set of indices which will be held out as "reference"
    performs the opt:
        min sum_{i_r in ref_idx} sum_{i < I not in ref_idx} sum_{p < P}
            w' * ||f_{i_r,p,n} - f_{i,p,n}||^2 - alpha/(P-1) * sum_{p'<p} || f_{i_r,p,n} - f_{i,p',n} ||^2
    """

    I = len( feature_lists )
    P = len( feature_lists[0] )
    N = len( feature_lists[0][0] )

    non_reference_indices = [i for i in range(I) if not i in reference_indices]
    closeness_reward = np.zeros( N )
    uniqueness_penalty = np.zeros( N )
    f = feature_lists
    for i_r in reference_indices:
        for i in non_reference_indices:
            for p in range(P):
                for n in range(N):
                    closeness_reward[ n ] += feature_distance( f[i_r][p][n], f[i][p][n] )
                    for p_false in range(p):
                        uniqueness_penalty[ n ] += feature_distance( f[i_r][p][n], f[i][p_false][n] )
    c = cvx.param('c', value = cvx.matrix( closeness_reward - alpha / float(P-1) * uniqueness_penalty ) )
    print c.value
    w = cvx.optvar('w', N )
    w.pos = True
    w | cvx.In | cvx.norm1ball(N)
    p = cvx.problem()
    p.objective = cvx.minimize( cvx.tp(c) * w  )
    p.constr = [ cvx.sum(w) == 1]
    print "Running solver"
    p.solve()
    print "Ran!"
    return np.array( w.value )
Esempio n. 21
0
    def update(self, dt=None):
        """
        """
        self.initialize_LQP()

        self.get_situation()

        self.compute_objectives()

        self.write_tasks()

        self.write_constraints()

        self.solve_LQP()

        M = param("M", value=matrix(self.world.mass))
        N = param("N", value=matrix(self.world.nleffects))
        # variables:
        dgvel = optvar("dgvel", self._wndof)
        tau = optvar("tau", self._wndof)
        # fc    = optvar('tau', self._wndof)
        gvel = param("gvel", value=matrix(self.world.gvel))
        taumax = param("taumax", value=matrix(array([10.0, 10.0, 10.0])))

        ### resolution ###
        cost = norm2(tau)
        for task in self._tasks:
            cost += 100.0 * task.cost(dgvel)

        p = problem(minimize(cost))
        p.constr.append(M * dgvel + N * gvel == tau)
        p.constr.append(-taumax <= tau)
        p.constr.append(tau <= taumax)
        p.solve(True)
        tau = array(tau.value).reshape(self._wndof)
        self._rec_tau.append(tau)
        gforce = tau
        impedance = zeros((self._wndof, self._wndof))
        return (gforce, impedance)
Esempio n. 22
0
    def fit(self, data):

        dat = phi_of_x(data)
        N = dat.shape[0]
        D = dat.shape[1]

        dat = cvxmod.matrix(dat)
        #### parameters

        # data
        X = cvxmod.param("X", N, D)

        #### varibales

        # parameter vector
        theta = cvxmod.optvar("theta", D)

        # simple objective 
        objective = cvxmod.atoms.norm2(X*theta)

        # create problem                                    
        p = cvxmod.problem(cvxmod.minimize(objective))
        p.constr.append(theta[0] + theta[1] == 1)
        
        ###### set values
        X.value = dat

        p.solve()
        
        cvxmod.printval(theta)

        theta_ = numpy.array(cvxmod.value(theta))
        #ellipse = conic_to_ellipse(theta_)

        #return ellipse
        return theta_
def solve_plain_l1_cvxmod(A, y):
    x = optvar('x', A.size[1])
    p = problem(minimize(norm1(x)), [A * x == y])
    p.solve(quiet=True, solver='glpk')
    return x.value
Esempio n. 24
0
def fit_ellipse_stack_squared(dx, dy, dz, di):
    """
    fit ellipoid using squared loss

    idea to learn all stacks together including smoothness
    """

    # sanity check
    assert len(dx) == len(dy)
    assert len(dx) == len(dz)
    assert len(dx) == len(di)

    # unique zs
    dat = defaultdict(list)

    # resort data
    for idx in range(len(dx)):
        dat[dz[idx]].append( [dx[idx], dy[idx], di[idx]] )

    # init ret
    ellipse_stack = []
    for idx in range(max(dz)):
        ellipse_stack.append(Ellipse(0, 0, idx, 1, 1, 0))
    

    total_N = len(dx)
    M = len(dat.keys())
    D = 5

    X_matrix = []
    thetas = []


    for z in dat.keys():

        x = numpy.array(dat[z])[:,0]
        y = numpy.array(dat[z])[:,1]

        # intensities
        i = numpy.array(dat[z])[:,2]

        # log intensities
        i = numpy.log(i)

        # create matrix
        ity = numpy.diag(i)

        # dimensionality
        N = len(x)
        d = numpy.zeros((N, D))

        d[:,0] = x*x
        d[:,1] = y*y
        #d[:,2] = x*y
        d[:,2] = x
        d[:,3] = y
        d[:,4] = numpy.ones(N)

        #d[:,0] = x*x
        #d[:,1] = y*y
        #d[:,2] = x*y
        #d[:,3] = x
        #d[:,4] = y
        #d[:,5] = numpy.ones(N)
    
        # consider intensities
        old_shape = d.shape

        d = numpy.dot(ity, d)
        assert d.shape == old_shape
    
        print d.shape   
        d = cvxmod.matrix(d)
        #### parameters

        # da
        X = cvxmod.param("X" + str(z), N, D)
        X.value = d
        X_matrix.append(X)


        #### varibales
    
        # parameter vector
        theta = cvxmod.optvar("theta" + str(z), D)
        thetas.append(theta)


    # contruct objective
    objective = 0
    for (i,X) in enumerate(X_matrix):
        #TODO try abs loss here!
        objective += cvxmod.sum(cvxmod.atoms.square(X*thetas[i]))
        #objective += cvxmod.sum(cvxmod.atoms.abs(X*thetas[i]))

    # add smoothness regularization
    reg_const = float(total_N) / float(M-1)
    for i in xrange(M-1):
        objective += reg_const * cvxmod.sum(cvxmod.atoms.square(thetas[i] - thetas[i+1]))

    print objective

    # create problem                                    
    p = cvxmod.problem(cvxmod.minimize(objective))

    # add constraints
    for i in xrange(M):
        p.constr.append(thetas[i][0] + thetas[i][1] == 1)
    
    
    ###### set values
    p.solve()
    

    # wrap up result
    ellipse_stack = {}

    active_layers = dat.keys()
    assert len(active_layers) == M

    for i in xrange(M):

        theta_ = numpy.array(cvxmod.value(thetas[i]))
        z_layer = active_layers[i]
        ellipse_stack[z_layer] = conic_to_ellipse(theta_)
        ellipse_stack[z_layer].cz = z_layer

    return ellipse_stack
Esempio n. 25
0
def fit_ellipse_stack_abs(dx, dy, dz, di):
    """
    fit ellipoid using squared loss

    idea to learn all stacks together including smoothness
    """

    # sanity check
    assert len(dx) == len(dy)
    assert len(dx) == len(dz)
    assert len(dx) == len(di)

    # unique zs
    dat = defaultdict(list)

    # resort data
    for idx in range(len(dx)):
        dat[dz[idx]].append( [dx[idx], dy[idx], di[idx]] )

    # init ret
    ellipse_stack = []
    for idx in range(max(dz)):
        ellipse_stack.append(Ellipse(0, 0, idx, 1, 1, 0))
    

    total_N = len(dx)
    M = len(dat.keys())
    D = 5

    X_matrix = []
    thetas = []
    slacks = []
    eps_slacks = []

    mean_di = float(numpy.mean(di))

    for z in dat.keys():

        x = numpy.array(dat[z])[:,0]
        y = numpy.array(dat[z])[:,1]

        # intensities
        i = numpy.array(dat[z])[:,2]

        # log intensities
        i = numpy.log(i)

        # create matrix
        ity = numpy.diag(i)# / mean_di

        # dimensionality
        N = len(x)
        d = numpy.zeros((N, D))

        d[:,0] = x*x
        d[:,1] = y*y
        #d[:,2] = x*y
        d[:,2] = x
        d[:,3] = y
        d[:,4] = numpy.ones(N)

        #d[:,0] = x*x
        #d[:,1] = y*y
        #d[:,2] = x*y
        #d[:,3] = x
        #d[:,4] = y
        #d[:,5] = numpy.ones(N)

        print "old", d
        # consider intensities
        old_shape = d.shape
        d = numpy.dot(ity, d)
        print "new", d
        assert d.shape == old_shape
    
        print d.shape   
        d = cvxmod.matrix(d)
        #### parameters

        # da
        X = cvxmod.param("X" + str(z), N, D)
        X.value = d
        X_matrix.append(X)


        #### varibales
    
        # parameter vector
        theta = cvxmod.optvar("theta" + str(z), D)
        thetas.append(theta)


    # construct obj
    objective = 0

    # loss term
    for i in xrange(M):
        objective += cvxmod.atoms.norm1(X_matrix[i] * thetas[i])

    # add smoothness regularization
    reg_const = 5 * float(total_N) / float(M-1)

    for i in xrange(M-1):
        objective += reg_const * cvxmod.norm1(thetas[i] - thetas[i+1])


    # create problem                                    
    prob = cvxmod.problem(cvxmod.minimize(objective))

    # add constraints
    """
    for (i,X) in enumerate(X_matrix):
        p.constr.append(X*thetas[i] <= slacks[i])
        p.constr.append(-X*thetas[i] <= slacks[i])

        #eps = 0.5
        #p.constr.append(slacks[i] - eps <= eps_slacks[i])
        #p.constr.append(0 <= eps_slacks[i])
    """

    # add non-degeneracy constraints
    for i in xrange(1, M-1):
        prob.constr.append(thetas[i][0] + thetas[i][1] == 1.0) # A + C = 1

    # pinch ends
    prob.constr.append(cvxmod.sum(thetas[0]) >= -0.01)
    prob.constr.append(cvxmod.sum(thetas[-1]) >= -0.01)

    print prob

    ###### set values
    from cvxopt import solvers
    solvers.options['reltol'] = 1e-1
    solvers.options['abstol'] = 1e-1
    print solvers.options

    prob.solve()
    

    # wrap up result
    ellipse_stack = {}

    active_layers = dat.keys()
    assert len(active_layers) == M


    # reconstruct original parameterization
    for i in xrange(M):

        theta_ = numpy.array(cvxmod.value(thetas[i]))
        z_layer = active_layers[i]
        ellipse_stack[z_layer] = conic_to_ellipse(theta_)
        ellipse_stack[z_layer].cz = z_layer

    return ellipse_stack
Esempio n. 26
0
def Main():
    options, _ = MakeOpts().parse_args(sys.argv)
    assert options.genes_filename
    assert options.protein_levels_a and options.protein_levels_b

    print 'Reading genes list from', options.genes_filename
    gene_ids = util.ReadProteinIDs(options.genes_filename)

    print 'Reading protein data A from', options.protein_levels_a
    gene_counts_a = util.ReadProteinCounts(options.protein_levels_a)
    print 'Reading protein data B from', options.protein_levels_b
    gene_counts_b = util.ReadProteinCounts(options.protein_levels_b)

    my_counts_a = dict(
        (id, (count, name))
        for id, name, count in util.ExtractCounts(gene_counts_a, gene_ids))
    my_counts_b = dict(
        (id, (count, name))
        for id, name, count in util.ExtractCounts(gene_counts_b, gene_ids))

    overlap_ids = set(my_counts_a.keys()).intersection(my_counts_b.keys())
    x = pylab.matrix([my_counts_a[id][0] for id in overlap_ids])
    y = pylab.matrix([my_counts_b[id][0] for id in overlap_ids])
    labels = [my_counts_b[id][1] for id in overlap_ids]

    xlog = pylab.log10(x)
    ylog = pylab.log10(y)
    a = cvxmod.optvar('a', 1)
    mx = cvxmod.matrix(xlog.T)
    my = cvxmod.matrix(ylog.T)

    p = cvxmod.problem(cvxmod.minimize(cvxmod.norm2(my - a - mx)))
    p.solve(quiet=True)
    offset = cvxmod.value(a)
    lin_factor = 10**offset
    lin_label = 'Y = %.2g*X' % lin_factor
    log_label = 'log10(Y) = %.2g + log10(X)' % offset

    f1 = pylab.figure(0)
    pylab.title('Linear scale')
    xylim = max([x.max(), y.max()]) + 5000
    linxs = pylab.arange(0.0, xylim, 0.1)
    linys = linxs * lin_factor
    pylab.plot(x.tolist()[0], y.tolist()[0], 'g.', label='Protein Data')
    pylab.plot(linxs, linys, 'b-', label=lin_label)
    for x_val, y_val, label in zip(x.tolist()[0], y.tolist()[0], labels):
        pylab.text(x_val, y_val, label, fontsize=8)

    pylab.xlabel(options.a_label)
    pylab.ylabel(options.b_label)
    pylab.legend()
    pylab.xlim((0.0, xylim))
    pylab.ylim((0.0, xylim))

    f2 = pylab.figure(1)
    pylab.title('Log10 scale')
    xylim = max([xlog.max(), ylog.max()]) + 1.0
    pylab.plot(xlog.tolist()[0],
               ylog.tolist()[0],
               'g.',
               label='Log10 Protein Data')
    linxs = pylab.arange(0.0, xylim, 0.1)
    linys = linxs + offset
    pylab.plot(linxs, linys, 'b-', label=log_label)

    for x_val, y_val, label in zip(xlog.tolist()[0], ylog.tolist()[0], labels):
        pylab.text(x_val, y_val, label, fontsize=8)

    pylab.xlabel(options.a_label + ' (log10)')
    pylab.ylabel(options.b_label + ' (log10)')
    pylab.legend()
    pylab.xlim((0.0, xylim))
    pylab.ylim((0.0, xylim))

    pylab.show()
Esempio n. 27
0
def Main():
    options, _ = MakeOpts().parse_args(sys.argv)
    assert options.genes_filename
    assert options.protein_levels_a and options.protein_levels_b
    
    
    print 'Reading genes list from', options.genes_filename
    gene_ids = util.ReadProteinIDs(options.genes_filename)
    
    print 'Reading protein data A from', options.protein_levels_a
    gene_counts_a = util.ReadProteinCounts(options.protein_levels_a)
    print 'Reading protein data B from', options.protein_levels_b
    gene_counts_b = util.ReadProteinCounts(options.protein_levels_b)

    my_counts_a = dict((id, (count, name)) for id, name, count in
                       util.ExtractCounts(gene_counts_a, gene_ids))
    my_counts_b = dict((id, (count, name)) for id, name, count in
                       util.ExtractCounts(gene_counts_b, gene_ids))
        
    overlap_ids = set(my_counts_a.keys()).intersection(my_counts_b.keys())
    x = pylab.matrix([my_counts_a[id][0] for id in overlap_ids])
    y = pylab.matrix([my_counts_b[id][0] for id in overlap_ids])
    labels = [my_counts_b[id][1] for id in overlap_ids]
    
    xlog = pylab.log10(x)
    ylog = pylab.log10(y)
    a = cvxmod.optvar('a', 1)
    mx = cvxmod.matrix(xlog.T)
    my = cvxmod.matrix(ylog.T)
    
    p = cvxmod.problem(cvxmod.minimize(cvxmod.norm2(my - a - mx)))
    p.solve(quiet=True)
    offset = cvxmod.value(a)
    lin_factor = 10**offset
    lin_label = 'Y = %.2g*X' % lin_factor
    log_label = 'log10(Y) = %.2g + log10(X)' % offset
    
    f1 = pylab.figure(0)
    pylab.title('Linear scale')
    xylim = max([x.max(), y.max()]) + 5000
    linxs = pylab.arange(0.0, xylim, 0.1)
    linys = linxs * lin_factor
    pylab.plot(x.tolist()[0], y.tolist()[0], 'g.', label='Protein Data')
    pylab.plot(linxs, linys, 'b-', label=lin_label)
    for x_val, y_val, label in zip(x.tolist()[0], y.tolist()[0], labels):
        pylab.text(x_val, y_val, label, fontsize=8)

    pylab.xlabel(options.a_label)
    pylab.ylabel(options.b_label)
    pylab.legend()
    pylab.xlim((0.0, xylim))
    pylab.ylim((0.0, xylim))
    
    f2 = pylab.figure(1)
    pylab.title('Log10 scale')
    xylim = max([xlog.max(), ylog.max()]) + 1.0    
    pylab.plot(xlog.tolist()[0], ylog.tolist()[0], 'g.', label='Log10 Protein Data')
    linxs = pylab.arange(0.0, xylim, 0.1)
    linys = linxs + offset
    pylab.plot(linxs, linys, 'b-', label=log_label)
    
    for x_val, y_val, label in zip(xlog.tolist()[0], ylog.tolist()[0], labels):
        pylab.text(x_val, y_val, label, fontsize=8)
    
    pylab.xlabel(options.a_label + ' (log10)')
    pylab.ylabel(options.b_label + ' (log10)')
    pylab.legend()
    pylab.xlim((0.0, xylim))
    pylab.ylim((0.0, xylim))
    
    
    
    pylab.show()
Esempio n. 28
0
def fit_ellipse_eps_insensitive(x, y):
    """
    fit ellipse using epsilon-insensitive loss
    """

    x = numpy.array(x)
    y = numpy.array(y)

    print "shapes", x.shape, y.shape

    assert len(x) == len(y)

    N = len(x)
    D = 5

    dat = numpy.zeros((N, D))
    dat[:,0] = x*x
    dat[:,1] = y*y
    #dat[:,2] = y*x
    dat[:,2] = x
    dat[:,3] = y
    dat[:,4] = numpy.ones(N)


    print dat.shape
    dat = cvxmod.matrix(dat)
    #### parameters

    # data
    X = cvxmod.param("X", N, D)

    # parameter for eps-insensitive loss
    eps = cvxmod.param("eps", 1)


    #### varibales

    # parameter vector
    theta = cvxmod.optvar("theta", D)

    # dim = (N x 1)
    s = cvxmod.optvar("s", N)

    t = cvxmod.optvar("t", N)

    # simple objective 
    objective = cvxmod.sum(t)
    
    # create problem                                    
    p = cvxmod.problem(cvxmod.minimize(objective))
    
    # add constraints 
    # (N x D) * (D X 1) = (N X 1)
    p.constr.append(X*theta <= s)
    p.constr.append(-X*theta <= s)
    
    p.constr.append(s - eps <= t)
    p.constr.append(0 <= t)
    
    #p.constr.append(theta[4] == 1)
    # trace constraint
    p.constr.append(theta[0] + theta[1] == 1)
    
    ###### set values
    X.value = dat
    eps.value = 0.0
    #solver = "mosek" 
    #p.solve(lpsolver=solver)
    p.solve()
    
    cvxmod.printval(theta)
    theta_ = numpy.array(cvxmod.value(theta))
    ellipse = conic_to_ellipse(theta_)

    return ellipse
Esempio n. 29
0
def solve_boosting(out, labels, nu, solver):
    '''
    solve boosting formulation used by gelher and novozin
    
    @param out: matrix (N,F) of predictions (for each f_i) for all examples
    @param y: vector (N,1) label for each example 
    @param p: regularization constant
    '''
    
    
    
    N = out.size[0]
    F = out.size[1]
    
    assert(N==len(labels))
    
    
    norm_fact = 1.0 / (nu * float(N))
    
    print norm_fact
    
    label_matrix = cvxmod.zeros((N,N))
    
    # avoid point-wise product
    for i in xrange(N):
        label_matrix[i,i] = labels[i] 
    
    
    #### parameters
    
    f = cvxmod.param("f", N, F)
    
    y = cvxmod.param("y", N, N, symm=True)
    
    norm = cvxmod.param("norm", 1) 
    
    #### varibales
    
    # rho
    rho = cvxmod.optvar("rho", 1)
    
    # dim = (N x 1)
    chi = cvxmod.optvar("chi", N)
    
    # dim = (F x 1)
    beta = cvxmod.optvar("beta", F)
    
    
    #objective = -rho + cvxmod.sum(chi) * norm_fact + square(norm2(beta)) 
    objective = -rho + cvxmod.sum(chi) * norm_fact
    
    print objective
    
    # create problem                                    
    p = cvxmod.problem(cvxmod.minimize(objective))
    
    
    # create contraint for probability simplex
    #p.constr.append(beta |cvxmod.In| probsimp(F))
    p.constr.append(cvxmod.sum(beta)==1.0)
    #p.constr.append(square(norm2(beta)) <= 1.0)
    p.constr.append(beta >= 0.0)
    
    
    #    y       f     beta          y    f*beta      y*f*beta
    # (N x N) (N x F) (F x 1) --> (N x N) (N x 1) --> (N x 1)
    p.constr.append(y * (f * beta) + chi >= rho)
    
    
    ###### set values
    f.value = out
    y.value = label_matrix
    norm.value = norm_fact 
    
    p.solve(lpsolver=solver)
    

    weights = numpy.array(cvxmod.value(beta))
    
    #print weights
    
    cvxmod.printval(chi)
    cvxmod.printval(beta)
    cvxmod.printval(rho)
    

    return p
Esempio n. 30
0
def fit_ellipse_linear(x, y):
    """
    fit ellipse stack using absolute loss
    """

    x = numpy.array(x)
    y = numpy.array(y)

    print "shapes", x.shape, y.shape

    assert len(x) == len(y)

    N = len(x)
    D = 6

    dat = numpy.zeros((N, D))
    dat[:,0] = x*x
    dat[:,1] = y*y
    dat[:,2] = y*x
    dat[:,3] = x
    dat[:,4] = y
    dat[:,5] = numpy.ones(N)


    print dat.shape
    dat = cvxmod.matrix(dat)


    # norm
    norm = numpy.zeros((N,N))
    for i in range(N):
        norm[i,i] = numpy.sqrt(numpy.dot(dat[i], numpy.transpose(dat[i])))
    norm = cvxmod.matrix(norm)

    #### parameters

    # data
    X = cvxmod.param("X", N, D)
    Q_grad = cvxmod.param("Q_grad", N, N)


    #### varibales
    
    # parameter vector
    theta = cvxmod.optvar("theta", D)
    
    # dim = (N x 1)
    s = cvxmod.optvar("s", N)
    
    # simple objective 
    objective = cvxmod.sum(s)
    
    # create problem                                    
    p = cvxmod.problem(cvxmod.minimize(objective))
    
    # add constraints 
    # (N x D) * (D X 1) = (N x N) * (N X 1)
    p.constr.append(X*theta <= Q_grad*s)
    p.constr.append(-X*theta <= Q_grad*s)
    
    #p.constr.append(theta[4] == 1)
    # trace constraint
    p.constr.append(theta[0] + theta[1] == 1)
    
    ###### set values
    X.value = dat
    Q_grad.value = norm
    #solver = "mosek" 
    #p.solve(lpsolver=solver)
    p.solve()
    
    cvxmod.printval(theta)
    theta_ = numpy.array(cvxmod.value(theta))
    ellipse = conic_to_ellipse(theta_)

    return ellipse
Esempio n. 31
0
 def initialize_LQP(self):
     self._Fopti = 0.
     self._p = problem(minimize(self._Fopti))
Esempio n. 32
0
 def initialize_LQP(self):
     self._Fopti = 0.0
     self._p = problem(minimize(self._Fopti))
Esempio n. 33
0
File: quanto.py Progetto: lab11/mni
    def get_energy_per_quanto_state_all(self, baseFileName, convexOpt=False):
        """This function evaluates the .pwr file and calculates the individual
        power consumption per state for every node. It then sets the variable
        statePower on every node to a dictionary, where the keys are the
        string representation of the state, and the value is the average power
        consumption for that state.

        This function expects a very specific .pwr file, where one line starts
        with "#states:". This line encodes all the states that are considered
        by quanto.
        """

        for n in self.nodes:
            f = open("%s.%s.log.pwr"%(baseFileName, n.ip), "r")
            X = []
            Y = []
            W = []
            totalTime = 0
            totalEnergy = 0
            states = []
            maxEntries = 0
            for line in f:
                l = line.strip().split()
                if len(l) > 0 and l[0] == "#states:":
                        # this line encodes the names of all the states.
                        states = l[1:]
                        continue
                if len(states) == 0 or len(l) != len(states)+3:
                    # +2 comes from the icount and time field
                    continue
                #time is in uS, convert it to seconds
                time = float(l[-3])/1e6
                icount = int(l[-2])
                occurences = int(l[-1])
                # cut away the time and icount values
                l = l[0:-3]
                activeStates = []
                for s in l:
                    if s == '-':
                        s = '0'
                        #continue
                    activeStates.append(int(s))

                # add the constant power state
                activeStates.append(1)

                if len(activeStates) > maxEntries:
                    maxEntries = len(activeStates)

                if time <= 0 or icount <= 0:
                    # FIXME: this is a wrong line at the end of the quanto files. I
                    # don't know why this happens!!!
                    continue

                E = n.get_power(icount, time)
                if E == -1:
                    raise CalibrationError, "Node with IP %s is not calibrated! \
Did you forget to load the calibration file?"%(n.ip,)
                if E < 0:
                    raise CalibrationError, "Node with IP %s returned a \
negative Energy value %f for icount %d, time %f!"%(n.ip, E, icount, time)
                    continue
                X.append(activeStates)
                Y.append(E)
                W.append(numpy.sqrt(E*time))
                totalTime += time
                totalEnergy += E*time



            # filter out the incomplete datasets
            Xnew = []
            Ynew = []
            Wnew = []
            for i in range(len(Y)):
                if len(X[i]) == maxEntries:
                    Xnew.append(X[i])
                    Ynew.append(Y[i])
                    Wnew.append(W[i])
            X = numpy.matrix(Xnew)
            Y = numpy.matrix(Ynew)
            W = numpy.matrix(numpy.diag(Wnew))

            # filter states with all 0's
            states.append('const')
            deletedLines = 0
            deletedStates = []
            alwaysOnStates = []
            # iterate through all the states, except the 'const' state
            for i in range(len(states)-1):
                correctedI = i - deletedLines
                if numpy.sum(X.T[correctedI]) == 0:
                    deletedStates.append(states[correctedI])
                    X = numpy.delete(X, numpy.s_[correctedI:correctedI+1], axis=1)
                    states = numpy.delete(states, correctedI)
                    deletedLines += 1
                elif numpy.sum(X.T[correctedI]) == len(X):
                    # this state is always active. W have to remove them and
                    # put them into the "const" category!
                    alwaysOnStates.append(states[correctedI])
                    X = numpy.delete(X, numpy.s_[correctedI:correctedI+1], axis=1)
                    states = numpy.delete(states, correctedI)
                    deletedLines += 1

            # search for linear dependent lines
            #for i in range(len(X)):
            #    #print X[i]
            #    for j in range(i+1, len(X)):
            #        #if numpy.sum(X[i]) == numpy.sum(X[j]):
            #        #    print X[i], X[j]
            #        equal = True
            #        for m in range(X[i].shape[1]):
            #            if X[i,m] != X[j,m]:
            #                equal = False
            #                break

            #        if equal:
            #            print X[i], X[j]

            # maxEntries includes the const state, which is not in the states
            # variable yet
            #states = states[:maxEntries - 1]

            #xtwx = X.T*X
            #for i in range(len(xtwx)):
            #    print xtwx[i]
            #print states

            #(x, resids, rank, s) = numpy.linalg.lstsq(W*X, W*Y.T)
            #(x, resids, rank, s) = numpy.linalg.lstsq(X, Y.T)

            if cvxAvailable and convexOpt:

                A = cvxmod.matrix(W*X)
                b = cvxmod.matrix(W*Y.T)
                x = cvxmod.optvar('x', cvxmod.size(A)[1])

                print A
                print b

                p = cvxmod.problem(cvxmod.minimize(norm2(A*x - b)), [x >= 0])
                #p.constr.append(x |In| probsimp(5))
                p.solve()

                print "Optimal problem value is %.4f." % p.value
                cvxmod.printval(x)
                x = x.value

            else:

                try:
                    x = numpy.linalg.inv(X.T*W*X)*X.T*W*Y.T
                except numpy.linalg.LinAlgError, e:
                    sys.stderr.write("State Matrix X for node with IP %s is singular. We did not \
    collect enough energy and state information. Please run the application for \
    longer!\n"%(n.ip,))
                    sys.stderr.write(repr(e))
                    sys.stderr.write("\n")
                    sys.stderr.flush()
                    n.statePower = {}
                    n.alwaysOffStates = []
                    n.alwaysOnStates = []
                    continue
            n.statePower = {}
            for i in range(len(states)):
                # the entries in x are matrices. convert them back into a
                # number
                n.statePower[states[i]] = float(x[i])
            n.alwaysOffStates = deletedStates
            n.alwaysOnStates = alwaysOnStates
            n.averagePower = totalEnergy / totalTime
Esempio n. 34
0
def solve_plain_l1_cvxmod(A, y):
    x = optvar('x', A.size[1])
    p = problem(minimize(norm1(x)), [A*x == y])
    p.solve(quiet=True, solver='glpk')
    return x.value
Esempio n. 35
0
def solve_boosting(out, labels, nu, solver):
    '''
    solve boosting formulation used by gelher and novozin
    
    @param out: matrix (N,F) of predictions (for each f_i) for all examples
    @param y: vector (N,1) label for each example 
    @param p: regularization constant
    '''
    
    
    
    N = out.size[0]
    F = out.size[1]
    
    assert(N==len(labels))
    
    
    norm_fact = 1.0 / (nu * float(N))
    
    print norm_fact
    
    label_matrix = cvxmod.zeros((N,N))
    
    # avoid point-wise product
    for i in xrange(N):
        label_matrix[i,i] = labels[i] 
    
    
    #### parameters
    
    f = cvxmod.param("f", N, F)
    
    y = cvxmod.param("y", N, N, symm=True)
    
    norm = cvxmod.param("norm", 1) 
    
    #### varibales
    
    # rho
    rho = cvxmod.optvar("rho", 1)
    
    # dim = (N x 1)
    chi = cvxmod.optvar("chi", N)
    
    # dim = (F x 1)
    beta = cvxmod.optvar("beta", F)
    
    
    #objective = -rho + cvxmod.sum(chi) * norm_fact + square(norm2(beta)) 
    objective = -rho + cvxmod.sum(chi) * norm_fact
    
    print objective
    
    # create problem                                    
    p = cvxmod.problem(cvxmod.minimize(objective))
    
    
    # create contraint for probability simplex
    #p.constr.append(beta |cvxmod.In| probsimp(F))
    p.constr.append(cvxmod.sum(beta)==1.0)
    #p.constr.append(square(norm2(beta)) <= 1.0)
    p.constr.append(beta >= 0.0)
    
    
    #    y       f     beta          y    f*beta      y*f*beta
    # (N x N) (N x F) (F x 1) --> (N x N) (N x 1) --> (N x 1)
    p.constr.append(y * (f * beta) + chi >= rho)
    
    
    ###### set values
    f.value = out
    y.value = label_matrix
    norm.value = norm_fact 
    
    p.solve(lpsolver=solver)
    

    weights = numpy.array(cvxmod.value(beta))
    
    #print weights
    
    cvxmod.printval(chi)
    cvxmod.printval(beta)
    cvxmod.printval(rho)
    

    return p
Esempio n. 36
0
def solve_svm(out, labels, nu, solver):
    '''
    solve boosting formulation used by gelher and nowozin
    
    @param out: matrix (N,F) of predictions (for each f_i) for all examples
    @param labels: vector (N,1) label for each example 
    @param nu: regularization constant
    @param solver: which solver to use. options: 'mosek', 'glpk'
    '''

    # get dimension
    N = out.size[0]
    F = out.size[1]

    assert N == len(labels), str(N) + " " + str(len(labels))

    norm_fact = 1.0 / (nu * float(N))
    print "normalization factor %f" % (norm_fact)

    # avoid point-wise product
    label_matrix = cvxmod.zeros((N, N))

    for i in xrange(N):
        label_matrix[i, i] = labels[i]

    #### parameters

    f = cvxmod.param("f", N, F)
    y = cvxmod.param("y", N, N, symm=True)
    norm = cvxmod.param("norm", 1)

    #### varibales

    # rho
    rho = cvxmod.optvar("rho", 1)

    # dim = (N x 1)
    chi = cvxmod.optvar("chi", N)

    # dim = (F x 1)
    beta = cvxmod.optvar("beta", F)

    #objective = -rho + cvxmod.sum(chi) * norm_fact + square(norm2(beta))
    objective = -rho + cvxmod.sum(chi) * norm_fact

    print objective

    # create problem
    p = cvxmod.problem(cvxmod.minimize(objective))

    # create contraints for probability simplex
    #p.constr.append(beta |cvxmod.In| probsimp(F))
    p.constr.append(cvxmod.sum(beta) == 1.0)
    p.constr.append(beta >= 0.0)
    p.constr.append(chi >= 0.0)

    # attempt to perform non-sparse boosting
    #p.constr.append(square(norm2(beta)) <= 1.0)

    #    y       f     beta          y    f*beta      y*f*beta
    # (N x N) (N x F) (F x 1) --> (N x N) (N x 1) --> (N x 1)
    p.constr.append(y * (f * beta) + chi >= rho)

    # set values for parameters
    f.value = out
    y.value = label_matrix
    norm.value = norm_fact

    print "solving problem"
    print "============================================="
    print p
    print "============================================="

    # start solver
    p.solve(lpsolver=solver)

    # print variables
    cvxmod.printval(chi)
    cvxmod.printval(beta)
    cvxmod.printval(rho)

    return numpy.array(cvxmod.value(beta))