def cost(self, dgvel): """Returns the cost function of the task. TODO: add a (possibly singular) weighting matrix (thus allow to control the orientation) """ J_ = self._controlled_frame.jacobian[3:6, :] J = param(value=matrix(J_)) dJ = self._controlled_frame.djacobian[3:6, :] gvel = self._world.gvel Pdes = self._target_frame.pose[0:3, 3] cf = self._controlled_frame dVdes = 10.0 * dot(cf.pose[0:3, 0:3].T, Pdes - cf.pose[0:3, 3]) - 2.0 * sqrt(10.0) * dot(J_, self._world.gvel) return norm2(J * dgvel + param(value=matrix(dot(dJ, gvel) - dVdes)))
def cost(self, dgvel): """Returns the cost function of the task. TODO: add a (possibly singular) weighting matrix (thus allow to control the orientation) """ J_ = self._controlled_frame.jacobian[3:6, :] J = param(value=matrix(J_)) dJ = self._controlled_frame.djacobian[3:6, :] gvel = self._world.gvel Pdes = self._target_frame.pose[0:3, 3] cf = self._controlled_frame dVdes = 10.*dot(cf.pose[0:3,0:3].T, Pdes - cf.pose[0:3,3]) -\ 2.*sqrt(10.)*dot(J_, self._world.gvel) return norm2(J * dgvel + param(value=matrix(dot(dJ, gvel) - dVdes)))
def _compute(self): start = datetime.datetime.now() gamma = self.gamma (N,d) = self.data.shape X = self.data Xcmf = ( (X.reshape(N,1,d) > transpose(X.reshape(N,1,d),[1,0,2])).prod(2).sum(1,dtype=float) / N ).reshape([N,1]) sigma = .75 / sqrt(N) K = self._K( X.reshape(N,1,d), transpose(X.reshape(N,1,d), [1,0,2]), gamma ).reshape([N,N]) #NOTE: this integral depends on K being the gaussian kernel Kint = ( (1.0/gamma)*scipy.special.ndtr( (X-X.T)/gamma ) ) alpha = cvxmod.optvar( 'alpha',N,1) alpha.pos = True pK = cvxmod.param( 'K',N,N ) pK.psd = True pK.value = cvxopt.matrix(K,(N,N) ) pKint = cvxmod.param( 'Kint',N,N ) pKint.value = cvxopt.matrix(Kint,(N,N)) #pKint.pos = True pXcmf = cvxmod.param( 'Xcmf',N,1) pXcmf.value = cvxopt.matrix(Xcmf, (N,1)) #pXcmf.pos = True objective = cvxmod.minimize( cvxmod.atoms.quadform(alpha, pK) ) eq1 = cvxmod.abs( pXcmf - ( pKint * alpha ) ) <= sigma eq2 = cvxmod.sum( alpha ) == 1.0 # Solve! p = cvxmod.problem( objective = objective, constr = [eq1, eq2] ) start = datetime.datetime.now() p.solve() duration = datetime.datetime.now() - start print "optimized in %ss" % (float(duration.microseconds)/1000000) beta = ma.masked_less( alpha.value, 1e-7 ) mask = ma.getmask( beta ) data = ma.array(X,mask=mask) self.Fl = Xcmf self.beta = beta.compressed().reshape([ 1, len(beta.compressed()) ]) self.SV = data.compressed().reshape([len(beta.compressed()),1]) print "%s SV's found" % len(self.SV)
def fit_ellipse_squared(x, y): """ fit ellipoid using squared loss """ assert len(x) == len(y) N = len(x) D = 5 dat = numpy.zeros((N, D)) dat[:,0] = x*x dat[:,1] = y*y #dat[:,2] = x*y dat[:,2] = x dat[:,3] = y dat[:,4] = numpy.ones(N) print dat.shape dat = cvxmod.matrix(dat) #### parameters # data X = cvxmod.param("X", N, D) #### varibales # parameter vector theta = cvxmod.optvar("theta", D) # simple objective objective = cvxmod.atoms.norm2(X*theta) # create problem p = cvxmod.problem(cvxmod.minimize(objective)) p.constr.append(theta[0] + theta[1] == 1) ###### set values X.value = dat #solver = "mosek" #p.solve(lpsolver=solver) p.solve() cvxmod.printval(theta) theta_ = numpy.array(cvxmod.value(theta)) ellipse = conic_to_ellipse(theta_) return ellipse
def compute_combinaison_safe(self,target,rcond = 0.0,regul = None): """ Computes the combination of base targets allowing to reproduce 'target' (or giving the best approximation), while keeping coefficients between 0 and 1. arguments : - target : target to fit - rcond : cut off on the singular values as a fraction of the biggest one. Only base vectors corresponding to singular values bigger than rcond*largest_singular_value - regul : regularisation factor for least square fitting. This force the algorithm to use fewer targets. """ from cvxmod import optvar,param,norm2,norm1,problem,matrix,minimize if type(target) is str or type(target) is unicode : target = read_target(target) cond = self.s>= rcond*self.s[0] u = self.u[ : , cond ] vt = self.vt[ cond ] s = self.s[cond] t = target.flatten() dim,ntargets = self.vt.shape nvert = target.shape[0] pt = np.dot(u.T,t.reshape(nvert*3,1)) A = param('A',value = matrix(s.reshape(dim,1)*vt)) b = param('b',value = matrix(pt)) x = optvar('x',ntargets) if regul is None : prob = problem(minimize(norm2(A*x-b)),[x>=0.,x<=1.]) else : prob = problem(minimize(norm2(A*x-b) + regul * norm1(x)),[x>=0.,x<=1.]) prob.solve() bs = np.array(x.value).flatten() # Body setting files have a precision of at most 1.e-3 return bs*(bs>=1e-3)
def _compute(self): C = self.C gamma = self.gamma (N, d) = self.data.shape X = self.data Xcmf = ( (X.reshape(N, 1, d) > transpose(X.reshape(N, 1, d), [1, 0, 2])).prod(2).sum(1, dtype=float) / N ).reshape([N, 1]) sigma = 0.75 / sqrt(N) K = self._K(X.reshape(N, 1, d), transpose(X.reshape(N, 1, d), [1, 0, 2]), gamma).reshape([N, N]) # NOTE: this integral depends on K being the gaussian kernel Kint = (1.0 / gamma) * scipy.special.ndtr((X - X.T) / gamma) alpha = cvxmod.optvar("alpha", N, 1) alpha.pos = True xi = cvxmod.optvar("xi", N, 1) xi.pos = True pXcmf = cvxmod.param("Xcmf", N, 1) pXcmf.pos = True pXcmf.value = cvxopt.matrix(Xcmf, (N, 1)) pKint = cvxmod.param("Kint", N, N) pKint.value = cvxopt.matrix(Kint, (N, N)) objective = cvxmod.minimize(cvxmod.sum(cvxmod.atoms.power(alpha, 2)) + (C * cvxmod.sum(xi))) eq1 = cvxmod.abs((pKint * alpha) - pXcmf) <= sigma + xi eq2 = cvxmod.sum(alpha) == 1.0 # Solve! p = cvxmod.problem(objective=objective, constr=[eq1, eq2]) p.solve() beta = ma.masked_less(alpha.value, 1e-7) mask = ma.getmask(beta) data = ma.array(X, mask=mask) self.beta = beta.compressed().reshape([1, len(beta.compressed())]) self.SV = data.compressed().reshape([len(beta.compressed()), 1]) print "%s SV's found" % len(self.SV)
def update(self, dt=None): """ """ self.initialize_LQP() self.get_situation() self.compute_objectives() self.write_tasks() self.write_constraints() self.solve_LQP() M = param("M", value=matrix(self.world.mass)) N = param("N", value=matrix(self.world.nleffects)) # variables: dgvel = optvar("dgvel", self._wndof) tau = optvar("tau", self._wndof) # fc = optvar('tau', self._wndof) gvel = param("gvel", value=matrix(self.world.gvel)) taumax = param("taumax", value=matrix(array([10.0, 10.0, 10.0]))) ### resolution ### cost = norm2(tau) for task in self._tasks: cost += 100.0 * task.cost(dgvel) p = problem(minimize(cost)) p.constr.append(M * dgvel + N * gvel == tau) p.constr.append(-taumax <= tau) p.constr.append(tau <= taumax) p.solve(True) tau = array(tau.value).reshape(self._wndof) self._rec_tau.append(tau) gforce = tau impedance = zeros((self._wndof, self._wndof)) return (gforce, impedance)
def update(self, dt=None): """ """ self.initialize_LQP() self.get_situation() self.compute_objectives() self.write_tasks() self.write_constraints() self.solve_LQP() M = param('M', value=matrix(self.world.mass)) N = param('N', value=matrix(self.world.nleffects)) # variables: dgvel = optvar('dgvel', self._wndof) tau = optvar('tau', self._wndof) #fc = optvar('tau', self._wndof) gvel = param('gvel', value=matrix(self.world.gvel)) taumax = param('taumax', value=matrix(array([10., 10., 10.]))) ### resolution ### cost = norm2(tau) for task in self._tasks: cost += 100. * task.cost(dgvel) p = problem(minimize(cost)) p.constr.append(M * dgvel + N * gvel == tau) p.constr.append(-taumax <= tau) p.constr.append(tau <= taumax) p.solve(True) tau = array(tau.value).reshape(self._wndof) self._rec_tau.append(tau) gforce = tau impedance = zeros((self._wndof, self._wndof)) return (gforce, impedance)
def reconstruct_target(target_file,base_prefix,regul = None): """ Reconstruct the target in 'target_file' using constrained, and optionally regularized, least square optimisation. arguments : target_file : file contaiing the target to fit base_prefix : prefix for the files of the base. """ vlist = read_vertex_list(base_prefix+'_vertices.dat') t = read_target(target_file,vlist) U = load(base_prefix+"_U.npy").astype('float') S = load(base_prefix+"_S.npy").astype('float') V = load(base_prefix+"_V.npy").astype('float') ntargets,dim = V.shape nvert = len(t) pt = dot(U.T,t.reshape(nvert*3,1)) pbase = S[:dim].reshape(dim,1)*V.T A = param('A',value = matrix(pbase)) b = param('b',value = matrix(pt)) x = optvar('x',ntargets) if regul is None : prob = problem(minimize(norm2(A*x-b)),[x>=0.,x<=1.]) else : prob = problem(minimize(norm2(A*x-b) + regul * norm1(x)),[x>=0.,x<=1.]) prob.solve() targ_names_file = base_prefix+"_names.txt" with open(targ_names_file) as f : tnames = [line.strip() for line in f.readlines() ] tnames.sort() base,ext = os.path.splitext(target_file) bs_name = base+".bs" with open(bs_name,"w") as f : for tn,v in zip(tnames,x.value): if v >= 1e-3 : f.write("%s %0.3f\n"%(tn,v))
def run_opt(feature_lists, reference_indices, alpha): """ run_opt( feature_lists ) -> weights feature_lists is a list of I image_feature_sets image_feature_sets are a list of P stacked_features stacked_features are a list of N different feature types reference_indices is a set of indices which will be held out as "reference" performs the opt: min sum_{i_r in ref_idx} sum_{i < I not in ref_idx} sum_{p < P} w' * ||f_{i_r,p,n} - f_{i,p,n}||^2 - alpha/(P-1) * sum_{p'<p} || f_{i_r,p,n} - f_{i,p',n} ||^2 """ I = len(feature_lists) P = len(feature_lists[0]) N = len(feature_lists[0][0]) non_reference_indices = [i for i in range(I) if not i in reference_indices] closeness_reward = np.zeros(N) uniqueness_penalty = np.zeros(N) f = feature_lists for i_r in reference_indices: for i in non_reference_indices: for p in range(P): for n in range(N): closeness_reward[n] += feature_distance( f[i_r][p][n], f[i][p][n]) for p_false in range(p): uniqueness_penalty[n] += feature_distance( f[i_r][p][n], f[i][p_false][n]) c = cvx.param('c', value=cvx.matrix(closeness_reward - alpha / float(P - 1) * uniqueness_penalty)) print c.value w = cvx.optvar('w', N) w.pos = True w | cvx.In | cvx.norm1ball(N) p = cvx.problem() p.objective = cvx.minimize(cvx.tp(c) * w) p.constr = [cvx.sum(w) == 1] print "Running solver" p.solve() print "Ran!" return np.array(w.value)
def run_opt( feature_lists, reference_indices, alpha ): """ run_opt( feature_lists ) -> weights feature_lists is a list of I image_feature_sets image_feature_sets are a list of P stacked_features stacked_features are a list of N different feature types reference_indices is a set of indices which will be held out as "reference" performs the opt: min sum_{i_r in ref_idx} sum_{i < I not in ref_idx} sum_{p < P} w' * ||f_{i_r,p,n} - f_{i,p,n}||^2 - alpha/(P-1) * sum_{p'<p} || f_{i_r,p,n} - f_{i,p',n} ||^2 """ I = len( feature_lists ) P = len( feature_lists[0] ) N = len( feature_lists[0][0] ) non_reference_indices = [i for i in range(I) if not i in reference_indices] closeness_reward = np.zeros( N ) uniqueness_penalty = np.zeros( N ) f = feature_lists for i_r in reference_indices: for i in non_reference_indices: for p in range(P): for n in range(N): closeness_reward[ n ] += feature_distance( f[i_r][p][n], f[i][p][n] ) for p_false in range(p): uniqueness_penalty[ n ] += feature_distance( f[i_r][p][n], f[i][p_false][n] ) c = cvx.param('c', value = cvx.matrix( closeness_reward - alpha / float(P-1) * uniqueness_penalty ) ) print c.value w = cvx.optvar('w', N ) w.pos = True w | cvx.In | cvx.norm1ball(N) p = cvx.problem() p.objective = cvx.minimize( cvx.tp(c) * w ) p.constr = [ cvx.sum(w) == 1] print "Running solver" p.solve() print "Ran!" return np.array( w.value )
def fit(self, data): dat = phi_of_x(data) N = dat.shape[0] D = dat.shape[1] dat = cvxmod.matrix(dat) #### parameters # data X = cvxmod.param("X", N, D) #### varibales # parameter vector theta = cvxmod.optvar("theta", D) # simple objective objective = cvxmod.atoms.norm2(X*theta) # create problem p = cvxmod.problem(cvxmod.minimize(objective)) p.constr.append(theta[0] + theta[1] == 1) ###### set values X.value = dat p.solve() cvxmod.printval(theta) theta_ = numpy.array(cvxmod.value(theta)) #ellipse = conic_to_ellipse(theta_) #return ellipse return theta_
def fit_ellipse_stack_squared(dx, dy, dz, di): """ fit ellipoid using squared loss idea to learn all stacks together including smoothness """ # sanity check assert len(dx) == len(dy) assert len(dx) == len(dz) assert len(dx) == len(di) # unique zs dat = defaultdict(list) # resort data for idx in range(len(dx)): dat[dz[idx]].append( [dx[idx], dy[idx], di[idx]] ) # init ret ellipse_stack = [] for idx in range(max(dz)): ellipse_stack.append(Ellipse(0, 0, idx, 1, 1, 0)) total_N = len(dx) M = len(dat.keys()) D = 5 X_matrix = [] thetas = [] for z in dat.keys(): x = numpy.array(dat[z])[:,0] y = numpy.array(dat[z])[:,1] # intensities i = numpy.array(dat[z])[:,2] # log intensities i = numpy.log(i) # create matrix ity = numpy.diag(i) # dimensionality N = len(x) d = numpy.zeros((N, D)) d[:,0] = x*x d[:,1] = y*y #d[:,2] = x*y d[:,2] = x d[:,3] = y d[:,4] = numpy.ones(N) #d[:,0] = x*x #d[:,1] = y*y #d[:,2] = x*y #d[:,3] = x #d[:,4] = y #d[:,5] = numpy.ones(N) # consider intensities old_shape = d.shape d = numpy.dot(ity, d) assert d.shape == old_shape print d.shape d = cvxmod.matrix(d) #### parameters # da X = cvxmod.param("X" + str(z), N, D) X.value = d X_matrix.append(X) #### varibales # parameter vector theta = cvxmod.optvar("theta" + str(z), D) thetas.append(theta) # contruct objective objective = 0 for (i,X) in enumerate(X_matrix): #TODO try abs loss here! objective += cvxmod.sum(cvxmod.atoms.square(X*thetas[i])) #objective += cvxmod.sum(cvxmod.atoms.abs(X*thetas[i])) # add smoothness regularization reg_const = float(total_N) / float(M-1) for i in xrange(M-1): objective += reg_const * cvxmod.sum(cvxmod.atoms.square(thetas[i] - thetas[i+1])) print objective # create problem p = cvxmod.problem(cvxmod.minimize(objective)) # add constraints for i in xrange(M): p.constr.append(thetas[i][0] + thetas[i][1] == 1) ###### set values p.solve() # wrap up result ellipse_stack = {} active_layers = dat.keys() assert len(active_layers) == M for i in xrange(M): theta_ = numpy.array(cvxmod.value(thetas[i])) z_layer = active_layers[i] ellipse_stack[z_layer] = conic_to_ellipse(theta_) ellipse_stack[z_layer].cz = z_layer return ellipse_stack
def fit_ellipse_stack_abs(dx, dy, dz, di): """ fit ellipoid using squared loss idea to learn all stacks together including smoothness """ # sanity check assert len(dx) == len(dy) assert len(dx) == len(dz) assert len(dx) == len(di) # unique zs dat = defaultdict(list) # resort data for idx in range(len(dx)): dat[dz[idx]].append( [dx[idx], dy[idx], di[idx]] ) # init ret ellipse_stack = [] for idx in range(max(dz)): ellipse_stack.append(Ellipse(0, 0, idx, 1, 1, 0)) total_N = len(dx) M = len(dat.keys()) D = 5 X_matrix = [] thetas = [] slacks = [] eps_slacks = [] mean_di = float(numpy.mean(di)) for z in dat.keys(): x = numpy.array(dat[z])[:,0] y = numpy.array(dat[z])[:,1] # intensities i = numpy.array(dat[z])[:,2] # log intensities i = numpy.log(i) # create matrix ity = numpy.diag(i)# / mean_di # dimensionality N = len(x) d = numpy.zeros((N, D)) d[:,0] = x*x d[:,1] = y*y #d[:,2] = x*y d[:,2] = x d[:,3] = y d[:,4] = numpy.ones(N) #d[:,0] = x*x #d[:,1] = y*y #d[:,2] = x*y #d[:,3] = x #d[:,4] = y #d[:,5] = numpy.ones(N) print "old", d # consider intensities old_shape = d.shape d = numpy.dot(ity, d) print "new", d assert d.shape == old_shape print d.shape d = cvxmod.matrix(d) #### parameters # da X = cvxmod.param("X" + str(z), N, D) X.value = d X_matrix.append(X) #### varibales # parameter vector theta = cvxmod.optvar("theta" + str(z), D) thetas.append(theta) # construct obj objective = 0 # loss term for i in xrange(M): objective += cvxmod.atoms.norm1(X_matrix[i] * thetas[i]) # add smoothness regularization reg_const = 5 * float(total_N) / float(M-1) for i in xrange(M-1): objective += reg_const * cvxmod.norm1(thetas[i] - thetas[i+1]) # create problem prob = cvxmod.problem(cvxmod.minimize(objective)) # add constraints """ for (i,X) in enumerate(X_matrix): p.constr.append(X*thetas[i] <= slacks[i]) p.constr.append(-X*thetas[i] <= slacks[i]) #eps = 0.5 #p.constr.append(slacks[i] - eps <= eps_slacks[i]) #p.constr.append(0 <= eps_slacks[i]) """ # add non-degeneracy constraints for i in xrange(1, M-1): prob.constr.append(thetas[i][0] + thetas[i][1] == 1.0) # A + C = 1 # pinch ends prob.constr.append(cvxmod.sum(thetas[0]) >= -0.01) prob.constr.append(cvxmod.sum(thetas[-1]) >= -0.01) print prob ###### set values from cvxopt import solvers solvers.options['reltol'] = 1e-1 solvers.options['abstol'] = 1e-1 print solvers.options prob.solve() # wrap up result ellipse_stack = {} active_layers = dat.keys() assert len(active_layers) == M # reconstruct original parameterization for i in xrange(M): theta_ = numpy.array(cvxmod.value(thetas[i])) z_layer = active_layers[i] ellipse_stack[z_layer] = conic_to_ellipse(theta_) ellipse_stack[z_layer].cz = z_layer return ellipse_stack
def solve_boosting(out, labels, nu, solver): ''' solve boosting formulation used by gelher and novozin @param out: matrix (N,F) of predictions (for each f_i) for all examples @param y: vector (N,1) label for each example @param p: regularization constant ''' N = out.size[0] F = out.size[1] assert(N==len(labels)) norm_fact = 1.0 / (nu * float(N)) print norm_fact label_matrix = cvxmod.zeros((N,N)) # avoid point-wise product for i in xrange(N): label_matrix[i,i] = labels[i] #### parameters f = cvxmod.param("f", N, F) y = cvxmod.param("y", N, N, symm=True) norm = cvxmod.param("norm", 1) #### varibales # rho rho = cvxmod.optvar("rho", 1) # dim = (N x 1) chi = cvxmod.optvar("chi", N) # dim = (F x 1) beta = cvxmod.optvar("beta", F) #objective = -rho + cvxmod.sum(chi) * norm_fact + square(norm2(beta)) objective = -rho + cvxmod.sum(chi) * norm_fact print objective # create problem p = cvxmod.problem(cvxmod.minimize(objective)) # create contraint for probability simplex #p.constr.append(beta |cvxmod.In| probsimp(F)) p.constr.append(cvxmod.sum(beta)==1.0) #p.constr.append(square(norm2(beta)) <= 1.0) p.constr.append(beta >= 0.0) # y f beta y f*beta y*f*beta # (N x N) (N x F) (F x 1) --> (N x N) (N x 1) --> (N x 1) p.constr.append(y * (f * beta) + chi >= rho) ###### set values f.value = out y.value = label_matrix norm.value = norm_fact p.solve(lpsolver=solver) weights = numpy.array(cvxmod.value(beta)) #print weights cvxmod.printval(chi) cvxmod.printval(beta) cvxmod.printval(rho) return p
def fit_ellipse_eps_insensitive(x, y): """ fit ellipse using epsilon-insensitive loss """ x = numpy.array(x) y = numpy.array(y) print "shapes", x.shape, y.shape assert len(x) == len(y) N = len(x) D = 5 dat = numpy.zeros((N, D)) dat[:,0] = x*x dat[:,1] = y*y #dat[:,2] = y*x dat[:,2] = x dat[:,3] = y dat[:,4] = numpy.ones(N) print dat.shape dat = cvxmod.matrix(dat) #### parameters # data X = cvxmod.param("X", N, D) # parameter for eps-insensitive loss eps = cvxmod.param("eps", 1) #### varibales # parameter vector theta = cvxmod.optvar("theta", D) # dim = (N x 1) s = cvxmod.optvar("s", N) t = cvxmod.optvar("t", N) # simple objective objective = cvxmod.sum(t) # create problem p = cvxmod.problem(cvxmod.minimize(objective)) # add constraints # (N x D) * (D X 1) = (N X 1) p.constr.append(X*theta <= s) p.constr.append(-X*theta <= s) p.constr.append(s - eps <= t) p.constr.append(0 <= t) #p.constr.append(theta[4] == 1) # trace constraint p.constr.append(theta[0] + theta[1] == 1) ###### set values X.value = dat eps.value = 0.0 #solver = "mosek" #p.solve(lpsolver=solver) p.solve() cvxmod.printval(theta) theta_ = numpy.array(cvxmod.value(theta)) ellipse = conic_to_ellipse(theta_) return ellipse
def fit_ellipse_linear(x, y): """ fit ellipse stack using absolute loss """ x = numpy.array(x) y = numpy.array(y) print "shapes", x.shape, y.shape assert len(x) == len(y) N = len(x) D = 6 dat = numpy.zeros((N, D)) dat[:,0] = x*x dat[:,1] = y*y dat[:,2] = y*x dat[:,3] = x dat[:,4] = y dat[:,5] = numpy.ones(N) print dat.shape dat = cvxmod.matrix(dat) # norm norm = numpy.zeros((N,N)) for i in range(N): norm[i,i] = numpy.sqrt(numpy.dot(dat[i], numpy.transpose(dat[i]))) norm = cvxmod.matrix(norm) #### parameters # data X = cvxmod.param("X", N, D) Q_grad = cvxmod.param("Q_grad", N, N) #### varibales # parameter vector theta = cvxmod.optvar("theta", D) # dim = (N x 1) s = cvxmod.optvar("s", N) # simple objective objective = cvxmod.sum(s) # create problem p = cvxmod.problem(cvxmod.minimize(objective)) # add constraints # (N x D) * (D X 1) = (N x N) * (N X 1) p.constr.append(X*theta <= Q_grad*s) p.constr.append(-X*theta <= Q_grad*s) #p.constr.append(theta[4] == 1) # trace constraint p.constr.append(theta[0] + theta[1] == 1) ###### set values X.value = dat Q_grad.value = norm #solver = "mosek" #p.solve(lpsolver=solver) p.solve() cvxmod.printval(theta) theta_ = numpy.array(cvxmod.value(theta)) ellipse = conic_to_ellipse(theta_) return ellipse
def solve_svm(out, labels, nu, solver): ''' solve boosting formulation used by gelher and nowozin @param out: matrix (N,F) of predictions (for each f_i) for all examples @param labels: vector (N,1) label for each example @param nu: regularization constant @param solver: which solver to use. options: 'mosek', 'glpk' ''' # get dimension N = out.size[0] F = out.size[1] assert N == len(labels), str(N) + " " + str(len(labels)) norm_fact = 1.0 / (nu * float(N)) print "normalization factor %f" % (norm_fact) # avoid point-wise product label_matrix = cvxmod.zeros((N, N)) for i in xrange(N): label_matrix[i, i] = labels[i] #### parameters f = cvxmod.param("f", N, F) y = cvxmod.param("y", N, N, symm=True) norm = cvxmod.param("norm", 1) #### varibales # rho rho = cvxmod.optvar("rho", 1) # dim = (N x 1) chi = cvxmod.optvar("chi", N) # dim = (F x 1) beta = cvxmod.optvar("beta", F) #objective = -rho + cvxmod.sum(chi) * norm_fact + square(norm2(beta)) objective = -rho + cvxmod.sum(chi) * norm_fact print objective # create problem p = cvxmod.problem(cvxmod.minimize(objective)) # create contraints for probability simplex #p.constr.append(beta |cvxmod.In| probsimp(F)) p.constr.append(cvxmod.sum(beta) == 1.0) p.constr.append(beta >= 0.0) p.constr.append(chi >= 0.0) # attempt to perform non-sparse boosting #p.constr.append(square(norm2(beta)) <= 1.0) # y f beta y f*beta y*f*beta # (N x N) (N x F) (F x 1) --> (N x N) (N x 1) --> (N x 1) p.constr.append(y * (f * beta) + chi >= rho) # set values for parameters f.value = out y.value = label_matrix norm.value = norm_fact print "solving problem" print "=============================================" print p print "=============================================" # start solver p.solve(lpsolver=solver) # print variables cvxmod.printval(chi) cvxmod.printval(beta) cvxmod.printval(rho) return numpy.array(cvxmod.value(beta))