def maximize_fapx_cvxopt( node, problem, scoop=False): ''' Finds the value of y solving maximize sum(fapx(x)) subject to: constr l <= x <= u fapx is calculated by approximating the functions given in fs as the concave envelope of the function that is tight, for each coordinate i, at the points in tight[i] fs should be a list of tuples containing the function and its derivative corresponding to each coordinate of the vector x. Returns a dict containing the optimal variable y as a list, the optimal value of the approximate objective, and the value of sum(f(x)) at x. scoop = True optionally dumps all problem parameters into a file which can be parsed and solved using the scoop second order cone modeling language ''' n = len(node.l); l = matrix(node.l); u = matrix(node.u) x = problem.variable constr = problem.constr # add box constraints box = [x[i]<=u[i] for i in xrange(n)] + [x[i]>=l[i] for i in xrange(n)] # find approximation to concave envelope of each function (fapx,slopes,offsets,fapxs) = utilities.get_fapx(node.tight,problem.fs,l,u,y=x) if problem.check_z: utilities.check_z(problem,node,fapx,x) if scoop: utilities.scoop(p,node,slopes,offsets) obj = sum( fapx ) o = op(obj,constr + box) try: o.solve(solver = 'glpk') except: o.solve() if not o.status == 'optimal': if o.status == 'unknown': raise ImportError('Unable to solve subproblem. Please try again after installing cvxopt with glpk binding.') else: # This node is dead, since the problem is infeasible return False else: # find the difference between the fapx and f for each coordinate i fi = numpy.array([problem.fs[i][0](x.value[i]) for i in range(n)]) fapxi = numpy.array([list(-fun.value())[0] for fun in fapx]) #if verbose: print 'fi',fi,'fapxi',fapxi maxdiff_index = numpy.argmax( fapxi - fi ) results = {'x': list(x.value), 'fapx': -list(obj.value())[0], 'f': float(sum(fi)), 'maxdiff_index': maxdiff_index} return results
def setUp(self): """ Use cvxopt to get ground truth values """ from cvxopt import lapack, solvers, matrix, spdiag, log, div, normal, setseed from cvxopt.modeling import variable, op, max, sum solvers.options['show_progress'] = 0 setseed() m, n = 100, 30 A = normal(m, n) b = normal(m, 1) b /= (1.1 * max(abs(b))) self.m, self.n, self.A, self.b = m, n, A, b # l1 approximation # minimize || A*x + b ||_1 x = variable(n) op(sum(abs(A * x + b))).solve() self.x1 = x.value # l2 approximation # minimize || A*x + b ||_2 bprime = -matrix(b) Aprime = matrix(A) lapack.gels(Aprime, bprime) self.x2 = bprime[:n] # Deadzone approximation # minimize sum(max(abs(A*x+b)-0.5, 0.0)) x = variable(n) dzop = op(sum(max(abs(A * x + b) - 0.5, 0.0))) dzop.solve() self.obj_dz = sum( np.max([np.abs(A * x.value + b) - 0.5, np.zeros((m, 1))], axis=0)) # Log barrier # minimize -sum (log ( 1.0 - (A*x+b)**2)) def F(x=None, z=None): if x is None: return 0, matrix(0.0, (n, 1)) y = A * x + b if max(abs(y)) >= 1.0: return None f = -sum(log(1.0 - y**2)) gradf = 2.0 * A.T * div(y, 1 - y**2) if z is None: return f, gradf.T H = A.T * spdiag(2.0 * z[0] * div(1.0 + y**2, (1.0 - y**2)**2)) * A return f, gradf.T, H self.cxlb = solvers.cp(F)['x']
def balance(request, engines, demand): engines = json.loads(engines) enabled_engines = filter(lambda engine: engine["_isEnabled"], engines) print enabled_engines if not enabled_engines: return HttpResponse(json.dumps(enabled_engines)) vector_size = len(enabled_engines) rpms = variable(vector_size, "rpms") fixed_engine_costs = matrix([float(EngineType.objects.get(pk=engine["_engineTypeId"]).fixed_engine_cost) for engine in enabled_engines]) linear_engine_costs = matrix([float(EngineType.objects.get(pk=engine["_engineTypeId"]).linear_engine_cost) for engine in enabled_engines]) fixed_energy_outputs = matrix([float(EngineType.objects.get(pk=int(engine["_engineTypeId"])).fixed_energy_output) for engine in enabled_engines]) linear_energy_outputs = matrix([float(EngineType.objects.get(pk=int(engine["_engineTypeId"])).linear_energy_output) for engine in enabled_engines]) minimum_rpms = matrix([float(EngineType.objects.get(pk=engine["_engineTypeId"]).minimum_rpm) for engine in enabled_engines]) maximum_rpms = matrix([float(EngineType.objects.get(pk=engine["_engineTypeId"]).maximum_rpm) for engine in enabled_engines]) demand_constraint = ((float(demand) - dot(rpms, linear_energy_outputs) - sum(fixed_energy_outputs)) <= 0) maximum_rpm_constraint = ((rpms - maximum_rpms) <= 0) minimum_rpm_constraint = ((rpms - minimum_rpms) >= 0) constraints = [demand_constraint, maximum_rpm_constraint, minimum_rpm_constraint] objective_function = op((dot(linear_engine_costs, rpms) + sum(fixed_engine_costs)), constraints) objective_function.solve() print rpms.value rpmsIndex = 0 for engine in engines: engine["_rpm"] = 0 engine["_energyOutput"] = 0 if engine["_isEnabled"]: print rpms if rpms.value: engine["_rpm"] = rpms.value[rpmsIndex] rpmsIndex += 1 else: engine["_rpm"] = float(EngineType.objects.get(pk=engine["_engineTypeId"]).maximum_rpm) energy_output_per_rpm = float(EngineType.objects.get(pk=engine["_engineTypeId"]).linear_energy_output) base_energy_output = float(EngineType.objects.get(pk=engine["_engineTypeId"]).fixed_energy_output) engine["_energyOutput"] = energy_output_per_rpm * float(engine["_rpm"]) + base_energy_output return HttpResponse(json.dumps(engines));
def setUp(self): """ Use cvxopt to get ground truth values """ from cvxopt import lapack,solvers,matrix,spdiag,log,div,normal,setseed from cvxopt.modeling import variable,op,max,sum solvers.options['show_progress'] = 0 setseed() m,n = 100,30 A = normal(m,n) b = normal(m,1) b /= (1.1*max(abs(b))) self.m,self.n,self.A,self.b = m,n,A,b # l1 approximation # minimize || A*x + b ||_1 x = variable(n) op(sum(abs(A*x+b))).solve() self.x1 = x.value # l2 approximation # minimize || A*x + b ||_2 bprime = -matrix(b) Aprime = matrix(A) lapack.gels(Aprime,bprime) self.x2 = bprime[:n] # Deadzone approximation # minimize sum(max(abs(A*x+b)-0.5, 0.0)) x = variable(n) dzop = op(sum(max(abs(A*x+b)-0.5, 0.0))) dzop.solve() self.obj_dz = sum(np.max([np.abs(A*x.value+b)-0.5,np.zeros((m,1))],axis=0)) # Log barrier # minimize -sum (log ( 1.0 - (A*x+b)**2)) def F(x=None, z=None): if x is None: return 0, matrix(0.0,(n,1)) y = A*x+b if max(abs(y)) >= 1.0: return None f = -sum(log(1.0 - y**2)) gradf = 2.0 * A.T * div(y, 1-y**2) if z is None: return f, gradf.T H = A.T * spdiag(2.0*z[0]*div(1.0+y**2,(1.0-y**2)**2))*A return f,gradf.T,H self.cxlb = solvers.cp(F)['x']
def solve_lad(X, Y): Y_cvx = matrix(Y) X_cvx = matrix(X) w_hat = variable(X.shape[1]) solvers.options['show_progress'] = False op(sum(abs(Y_cvx - X_cvx * w_hat))).solve() return w_hat.value
def balance(self, energy_demand): active_engines = filter(lambda engine: engine.is_enabled, self.engines) if len(active_engines) <= 0: return [] rpms = variable(len(active_engines), 'rpms') fixed_engine_costs = matrix([float(engine.engine_type.fixed_engine_cost) for engine in active_engines]) linear_engine_costs = matrix([float(engine.engine_type.linear_engine_cost) for engine in active_engines]) fixed_energy_outputs = matrix([float(engine.engine_type.fixed_energy_output) for engine in active_engines]) linear_energy_outputs = matrix([float(engine.engine_type.linear_energy_output) for engine in active_engines]) minimum_rpms = matrix([float(engine.engine_type.minimum_rpm) for engine in active_engines]) maximum_rpms = matrix([float(engine.engine_type.maximum_rpm) for engine in active_engines]) energy_demand_constraint = ((float(energy_demand) - dot(rpms, linear_energy_outputs) - sum(fixed_energy_outputs)) <= 0) maximum_rpm_constraint = ((rpms - maximum_rpms) <= 0) minimum_rpm_constraint = ((rpms - minimum_rpms) >= 0) constraints = [energy_demand_constraint, maximum_rpm_constraint, minimum_rpm_constraint] objective_function = op((dot(linear_engine_costs, rpms) - sum(fixed_engine_costs)), constraints) objective_function.solve() for i in range(len(active_engines)): engine = active_engines[i] engine.rpm = rpms.value[i] engine.energy_output = float(engine.engine_type.fixed_energy_output) * engine.rpm + float(engine.engine_type.fixed_energy_output) return active_engines
def test_problem_penalty(self): """ Compare cvxpy solutions to cvxopt ground truth """ from cvxpy import (matrix, variable, program, minimize, sum, abs, norm2, log, square, zeros, max, hstack, vstack) m, n = self.m, self.n A = matrix(self.A) b = matrix(self.b) # set tolerance to 5 significant digits tol_exp = 5 # l1 approximation x = variable(n) p = program(minimize(sum(abs(A * x + b)))) p.solve(True) np.testing.assert_array_almost_equal(x.value, self.x1, tol_exp) # l2 approximation x = variable(n) p = program(minimize(norm2(A * x + b))) p.solve(True) np.testing.assert_array_almost_equal(x.value, self.x2, tol_exp) # Deadzone approximation - implementation is currently ugly (need max along axis) x = variable(n) Axbm = abs(A * x + b) - 0.5 Axbm_deadzone = vstack( [max(hstack((Axbm[i, 0], 0.0))) for i in range(m)]) p = program(minimize(sum(Axbm_deadzone))) p.solve(True) obj_dz_cvxpy = np.sum( np.max([np.abs(A * x.value + b) - 0.5, np.zeros((m, 1))], axis=0)) np.testing.assert_array_almost_equal(obj_dz_cvxpy, self.obj_dz, tol_exp) # Log barrier x = variable(n) p = program(minimize(-sum(log(1.0 - square(A * x + b))))) p.solve(True) np.testing.assert_array_almost_equal(x.value, self.cxlb, tol_exp)
def F(x=None, z=None): if x is None: return 0, matrix(0.0, (n, 1)) y = A * x + b if max(abs(y)) >= 1.0: return None f = -sum(log(1.0 - y**2)) gradf = 2.0 * A.T * div(y, 1 - y**2) if z is None: return f, gradf.T H = A.T * spdiag(2.0 * z[0] * div(1.0 + y**2, (1.0 - y**2)**2)) * A return f, gradf.T, H
def F(x=None, z=None): if x is None: return 0, matrix(0.0,(n,1)) y = A*x+b if max(abs(y)) >= 1.0: return None f = -sum(log(1.0 - y**2)) gradf = 2.0 * A.T * div(y, 1-y**2) if z is None: return f, gradf.T H = A.T * spdiag(2.0*z[0]*div(1.0+y**2,(1.0-y**2)**2))*A return f,gradf.T,H
def solver(A, b): A = matrix(A) b = matrix(b) x = variable(n) start = time.time() op(sum(abs(A*x-b))).solve() end = time.time() sol_x = np.array(x.value).flatten() return ((end - start), sol_x)
def test_problem_penalty(self): """ Compare cvxpy solutions to cvxopt ground truth """ from cvxpy import (matrix,variable,program,minimize, sum,abs,norm2,log,square,zeros,max, hstack,vstack) m, n = self.m, self.n A = matrix(self.A) b = matrix(self.b) # set tolerance to 5 significant digits tol_exp = 5 # l1 approximation x = variable(n) p = program(minimize(sum(abs(A*x + b)))) p.solve(True) np.testing.assert_array_almost_equal(x.value,self.x1,tol_exp) # l2 approximation x = variable(n) p = program(minimize(norm2(A*x + b))) p.solve(True) np.testing.assert_array_almost_equal(x.value,self.x2,tol_exp) # Deadzone approximation - implementation is currently ugly (need max along axis) x = variable(n) Axbm = abs(A*x+b)-0.5 Axbm_deadzone = vstack([max(hstack((Axbm[i,0],0.0))) for i in range(m)]) p = program(minimize(sum(Axbm_deadzone))) p.solve(True) obj_dz_cvxpy = np.sum(np.max([np.abs(A*x.value+b)-0.5,np.zeros((m,1))],axis=0)) np.testing.assert_array_almost_equal(obj_dz_cvxpy,self.obj_dz,tol_exp) # Log barrier x = variable(n) p = program(minimize(-sum(log(1.0-square(A*x + b))))) p.solve(True) np.testing.assert_array_almost_equal(x.value,self.cxlb,tol_exp)
def test_case3(self): m, n = 500, 100 setseed(100) A = normal(m,n) b = normal(m) x1 = variable(n) lp1 = op(max(abs(A*x1-b))) lp1.solve() self.assertTrue(lp1.status == 'optimal') x2 = variable(n) lp2 = op(sum(abs(A*x2-b))) lp2.solve() self.assertTrue(lp2.status == 'optimal') x3 = variable(n) lp3 = op(sum(max(0, abs(A*x3-b)-0.75, 2*abs(A*x3-b)-2.25))) lp3.solve() self.assertTrue(lp3.status == 'optimal')
def test_case3(self): m, n = 500, 100 setseed(100) A = normal(m, n) b = normal(m) x1 = variable(n) lp1 = op(max(abs(A * x1 - b))) lp1.solve() self.assertTrue(lp1.status == 'optimal') x2 = variable(n) lp2 = op(sum(abs(A * x2 - b))) lp2.solve() self.assertTrue(lp2.status == 'optimal') x3 = variable(n) lp3 = op( sum(max(0, abs(A * x3 - b) - 0.75, 2 * abs(A * x3 - b) - 2.25))) lp3.solve() self.assertTrue(lp3.status == 'optimal')
def barrier(): # variables kept same from cvxopt example MAXITERS = 100 ALPHA = 0.01 BETA = 0.5 x = matrix(0.0, (n, 1)) H = matrix(0.0, (n, n)) # Symmetrix matrix for iter in range(MAXITERS): # get the gradient of the function d = (b - A * x)**-1 g = A.T * d # print(d[:, n*[0]].size) # print(A.size) """bug here: won't multiply of two matrix of same dimension. code is looking into another dimension?""" # get Hessian # lower diaganol multiplied to constraint matrix, n*[0] is first center x^t(0) h = np.zeros(shape=(m, n)) np.matmul(d[:, n * [0]], A, h) # use the BLAS solver to get the symmetric matrix and get roots blas.syrk(h, H, trans='T') # do Newton's step v = -g # g is our gradient # LAPACK solves the matrix and gives us the tep value to transverse with lapack.posv(H, v) # Stop condition if exceeding tolerance lam = blas.dot(g, v) if sqrt(-lam) < mu: return x # return the orignal value if we're above tolerance # Line search to go to optimal using ALPHA and BETA y = mul(A * v, d) step = 1.0 while 1 - step * max(y) < 0: step *= BETA while True: if -sum(log(1 - step * y)) < (ALPHA * step * lam): break step *= BETA # increment x by the step times the negative gradient otherwise x += step * v
def draw_interim_configs(x_i_C2val): res = [] for weighted_configs in x_i_C2val: weight_list = [v for k, v in weighted_configs] weight_list = [0.0 if v < 0.0 else v for v in weight_list] weights = np.array(weight_list, dtype=np.float32) weights /= np.sum(weights) # re-normalize in order to fix rounding issues configs = [k for k, v in weighted_configs] try: config = np.random.choice(configs, p=weights) except: logger.error('weights={}'.format(sum(weights))) raise ValueError('weights') res.append(config) return res
def stratified_sampling(X, a, y, emp_marginals, n_train_samples): emp_P_11 = emp_marginals[1, 1] emp_P_01 = emp_marginals[0, 1] emp_P_10 = emp_marginals[1, 0] emp_P_00 = emp_marginals[0, 0] X_11, X_01, X_10, X_00 = [], [], [], [] for i in range(X.shape[0]): if a[i] == 1 and y[i] == 1: X_11.append(X[i, :]) if a[i] == 0 and y[i] == 1: X_01.append(X[i, :]) if a[i] == 1 and y[i] == 0: X_10.append(X[i, :]) if a[i] == 0 and y[i] == 0: X_00.append(X[i, :]) ind_11 = np.random.randint(low=0, high=np.array(X_11).shape[0], size=int(emp_P_11 * n_train_samples)) ind_01 = np.random.randint(low=0, high=np.array(X_01).shape[0], size=int(emp_P_01 * n_train_samples)) ind_10 = np.random.randint(low=0, high=np.array(X_10).shape[0], size=int(emp_P_10 * n_train_samples)) ind_00 = np.random.randint(low=0, high=np.array(X_00).shape[0], size=int(emp_P_00 * n_train_samples)) X_train_11 = np.array(X_11)[ind_11, :] X_train_01 = np.array(X_01)[ind_01, :] X_train_10 = np.array(X_10)[ind_10, :] X_train_00 = np.array(X_00)[ind_00, :] X_test11 = np.delete(np.array(X_11), ind_11, axis=0) X_test01 = np.delete(np.array(X_01), ind_01, axis=0) X_test10 = np.delete(np.array(X_10), ind_10, axis=0) X_test00 = np.delete(np.array(X_00), ind_00, axis=0) test_sensitives = np.hstack([[1] * X_test11.shape[0], [0] * X_test01.shape[0], [1] * X_test10.shape[0], [0] * X_test00.shape[0]]) y_test = np.hstack([[1] * X_test11.shape[0], [1] * X_test01.shape[0], [0] * X_test10.shape[0], [0] * X_test00.shape[0]]) X_test = np.vstack([X_test11, X_test01, X_test10, X_test00]) y_train = np.hstack([[1] * int(emp_P_11 * n_train_samples), [1] * int(emp_P_01 * n_train_samples), [0] * int(emp_P_10 * n_train_samples), [0] * int(emp_P_00 * n_train_samples)]) train_sensitives = np.hstack([[1] * int(emp_P_11 * n_train_samples), [0] * int(emp_P_01 * n_train_samples), [1] * int(emp_P_10 * n_train_samples), [0] * int(emp_P_00 * n_train_samples)]) X_train = np.vstack([X_train_11, X_train_01, X_train_10, X_train_00]) threshold = 1 - sum(y == 1) / y.shape[0] return X_train, train_sensitives, y_train, X_test, test_sensitives, y_test, threshold
def maximize_fapx_glpk( node, problem, verbose = False ): ''' Finds the value of y solving maximize sum(fapx(x)) subject to: constr l <= x <= u fapx is calculated by approximating the functions given in fs as the concave envelope of the function that is tight, for each coordinate i, at the points in tight[i] fs should be a list of tuples containing the function and its derivative corresponding to each coordinate of the vector x. Returns a dict containing the optimal variable y as a list, the optimal value of the approximate objective, and the value of sum(f(x)) at x. ''' n = len(node.l); l = node.l; u = node.u # find approximation to concave envelope of each function (slopes,offsets,fapxs) = utilities.get_fapx(node.tight,problem.fs,l,u) # verify correctness of concave envelope if problem.check_z: utilities.check_z(problem,node,fapxs,x) # formulate concave problem as lp and solve lp = sigopt2pyglpk(slopes = slopes,offsets=offsets,l=l,u=u,**problem.constr) lp.simplex() # find the difference between the fapx and f for each coordinate i xstar = [c.primal for c in lp.cols[:n]] fi = numpy.array([problem.fs[i][0](xstar[i]) for i in range(n)]) fapxi = numpy.array([c.primal for c in lp.cols[n:]]) maxdiff_index = numpy.argmax( fapxi - fi ) results = {'x': xstar, 'fapx': lp.obj.value, 'f': float(sum(fi)), 'maxdiff_index': maxdiff_index} if verbose: print 'fi',fi,'fapxi',fapxi,results return results
def lin_regression(): raw_data = pd.read_csv("../Datasets/winequality-red.csv", sep=";", header=0) raw_training = raw_data[:1500] x = cm.matrix(raw_training.iloc[:, :-1].to_numpy(dtype=float)) y = cm.matrix(raw_training.iloc[:, -1:].to_numpy(dtype=float)) raw_test = raw_data[1500:].reset_index(drop=True) x_test = cm.matrix(raw_test.iloc[:, :-1].to_numpy(dtype=float)) y_test = cm.matrix(raw_test.iloc[:, -1:].to_numpy(dtype=float)) a = cm.variable(x.size[1]) b = cm.variable() z = cm.variable(x.size[0]) constraint_1 = (z >= (y - x * a - b)) constraint_2 = (z >= (x * a + b - y)) z_min = cm.op(cm.min(cm.sum(z) / x.size[0]), [constraint_1, constraint_2]) z_min.solve() calc_a = a.value calc_b = b.value z_train = y - x * calc_a - calc_b z_test = y_test - x_test * calc_a - calc_b train_results = x * calc_a + calc_b average_training_error = mean_square_error(y, train_results) test_results = x_test * calc_a + calc_b average_test_error = mean_square_error(y_test, test_results) print(f"average training error = {average_training_error}") print(f"average testing error = {average_test_error}")
def _update_learners_weights(self, t, samples_dist): """ Solve equation (3) in the paper, returns the set of lagrange multipliers that matches w """ n = len(samples_dist) batch_size = min(self.max_batch_size, int(n * self.batch_size_ratio)) batch_indices = np.random.choice(a=list(range(n)), replace=False, p=samples_dist, size=batch_size) # Weak learners weights - what we need to find w = modeling.variable(t, 'w') # Slack variables # zetas = {int(i): modeling.variable(1, 'zeta_%d' % int(i)) for i in batch_indices} zetas = modeling.variable(batch_size, 'zetas') # Margin rho = modeling.variable(1, 'rho') # Constraints c1 = (w >= 0) c2 = (sum(w) == 1) c_slacks = (zetas >= 0) c_soft_margins = [ (modeling.dot(matrix(self.u[sample_idx].astype(float).T), w) >= (rho - zetas[int(idx)])) for idx, sample_idx in enumerate(batch_indices) ] # Solve optimisation problems lp = modeling.op(-(rho - self.kappa * modeling.sum(zetas)), [c1, c2, c_slacks] + c_soft_margins) solvers.options['show_progress'] = False lp.solve() return w.value
def nn1_np(A, C, b, x, y, z, lbda, machine_eps): i = 0 prev_x = np.array(np.zeros(n), dtype=np.double, order='C', copy=False) while(sum(pow(x - prev_x, 2)) > machine_eps): i += 1 prev_x = x x_stage = x - np.dot(np.transpose(A), y) + np.dot(np.transpose(C), z) x_bar = [g(ele) for ele in x_stage] y_stage = y + np.dot(A, x_bar) - b y_bar = [g(ele) for ele in y_stage] z_stage = np.dot(C, x_bar) - z z_bar = [g(ele) for ele in z_stage] x_diff = lbda*(x_bar - x) y_diff = 2*lbda*(y_bar - y) z_diff = 2*lbda*(np.dot(C, x_bar) - z_bar) x = x + x_diff y = y + y_diff z = z + z_diff return (i, x)
# The robust LP example of section 10.5 (Examples). from cvxopt import normal, uniform from cvxopt.modeling import variable, dot, op, sum from cvxopt.blas import nrm2 m, n = 500, 100 A = normal(m, n) b = uniform(m) c = normal(n) x = variable(n) op(dot(c, x), A * x + sum(abs(x)) <= b).solve() x2 = variable(n) y = variable(n) op(dot(c, x2), [A * x2 + sum(y) <= b, -y <= x2, x2 <= y]).solve() print("\nDifference between two solutions %e" % nrm2(x.value - x2.value))
# The 1-norm support vector classifier of section 10.5 (Examples). from cvxopt import normal, setseed from cvxopt.modeling import variable, op, max, sum from cvxopt.blas import nrm2 m, n = 500, 100 A = normal(m,n) x = variable(A.size[1],'x') u = variable(A.size[0],'u') op(sum(abs(x)) + sum(u), [A*x >= 1-u, u >= 0]).solve() x2 = variable(A.size[1],'x') op(sum(abs(x2)) + sum(max(0, 1 - A*x2))).solve() print("\nDifference between two solutions: %e" %nrm2(x.value - x2.value))
import pylab, numpy from cvxopt import lapack, solvers, matrix, spdiag, log, div, normal from cvxopt.modeling import variable, op, max, sum solvers.options['show_progress'] = 0 m, n = 100, 30 A = normal(m, n) b = normal(m, 1) b /= (1.1 * max(abs(b))) # Make x = 0 feasible for log barrier. # l1 approximation # # minimize || A*x + b ||_1 x = variable(n) op(sum(abs(A * x + b))).solve() x1 = x.value pylab.figure(1, facecolor='w', figsize=(10, 10)) pylab.subplot(411) nbins = 100 bins = [-1.5 + 3.0 / (nbins - 1) * k for k in xrange(nbins)] pylab.hist(A * x1 + b, numpy.array(bins)) nopts = 200 xs = -1.5 + 3.0 / (nopts - 1) * matrix(range(nopts)) pylab.plot(xs, (35.0 / 1.5) * abs(xs), 'g-') pylab.axis([-1.5, 1.5, 0, 40]) pylab.ylabel('l1') pylab.title('Penalty function approximation (fig. 6.2)') # l2 approximation
try: import numpy, pylab except ImportError: pylab_installed = False else: pylab_installed = True m, n = 100, 30 A = normal(m,n) b = normal(m,1) b /= (1.1 * max(abs(b))) # Make x = 0 feasible for log barrier. # l1 approximation # # minimize || A*x + b ||_1 x = variable(n) op(sum(abs(A*x+b))).solve() x1 = x.value if pylab_installed: pylab.figure(1, facecolor='w', figsize=(10,10)) pylab.subplot(411) nbins = 100 bins = [-1.5 + 3.0/(nbins-1)*k for k in range(nbins)] pylab.hist( A*x1+b , numpy.array(bins)) nopts = 200 xs = -1.5 + 3.0/(nopts-1) * matrix(list(range(nopts))) pylab.plot(xs, (35.0/1.5) * abs(xs), 'g-') pylab.axis([-1.5, 1.5, 0, 40]) pylab.ylabel('l1') pylab.title('Penalty function approximation (fig. 6.2)')
# The robust LP example of section 10.5 (Examples). from cvxopt import normal, uniform from cvxopt.modeling import variable, dot, op, sum from cvxopt.blas import nrm2 m, n = 500, 100 A = normal(m,n) b = uniform(m) c = normal(n) x = variable(n) op(dot(c,x), A*x+sum(abs(x)) <= b).solve() x2 = variable(n) y = variable(n) op(dot(c,x2), [A*x2+sum(y) <= b, -y <= x2, x2 <= y]).solve() print("\nDifference between two solutions %e" %nrm2(x.value - x2.value))
# The norm and penalty approximation problems of section 10.5 (Examples). from cvxopt import normal, setseed from cvxopt.modeling import variable, op, max, sum setseed(0) m, n = 500, 100 A = normal(m, n) b = normal(m) x1 = variable(n) prob1 = op(max(abs(A * x1 + b))) prob1.solve() x2 = variable(n) prob2 = op(sum(abs(A * x2 + b))) prob2.solve() x3 = variable(n) prob3 = op(sum(max(0, abs(A * x3 + b) - 0.75, 2 * abs(A * x3 + b) - 2.25))) prob3.solve() try: import pylab except ImportError: pass else: pylab.subplot(311) pylab.hist(A * x1.value + b, m // 5) pylab.subplot(312) pylab.hist(A * x2.value + b, m // 5)