def branch_and_bound(n, A, B, c): from queue import PriorityQueue x = Variable(n) z = Variable(n) L = Parameter(n) U = Parameter(n) prob = Problem(Minimize(sum_squares(A * x + B * z - c)), [L <= z, z <= U]) visited = 0 best_z = None f_best = numpy.inf nodes = PriorityQueue() nodes.put((numpy.inf, 0, numpy.zeros(n), numpy.ones(n), 0)) while not nodes.empty(): visited += 1 # Evaluate the node with the lowest lower bound. _, _, L_val, U_val, idx = nodes.get() L.value = L_val U.value = U_val lower_bound = prob.solve() z_star = numpy.round(z.value) upper_bound = Problem(prob.objective, [z == z_star]).solve() f_best = min(f_best, upper_bound) if upper_bound == f_best: best_z = z_star # Add new nodes if not at a leaf and the branch cannot be pruned. if idx < n and lower_bound < f_best: for i in [0, 1]: L_val[idx] = U_val[idx] = i nodes.put( (lower_bound, i, L_val.copy(), U_val.copy(), idx + 1)) #print("Nodes visited: %s out of %s" % (visited, 2**(n+1)-1)) return f_best, best_z
def __init__(self, name, c, A, b, dims): logging.info(name + " started") logging.info(name + "'s dims = " + str(dims)) self.name = name _, n = np.shape(A) self.n = n self.dims = dims self.xbar = Parameter(n, value=np.zeros(n)) self.xbar_old = Parameter(n, value=np.zeros(n)) self.u = Parameter(n, value=np.zeros(n)) self.x = Variable(n) self.c = c self.A = A self.b = b self.f = self.c.T @ self.x self.rho = constants.RHO self.f += (self.rho / 2) * sum_squares(self.x[self.dims] - self.xbar[self.dims] + self.u[self.dims]) logging.info(name + "'s f = " + str(self.f)) self.prox = Problem(Minimize(self.f), [self.A @ self.x == self.b, self.x >= 0]) self.history = { 'objval': [], 'r_norm': [], 'eps_pri': [], 's_norm': [], 'eps_dual': [], 'iter': [] } self.has_converged = False self.k = 0
def run_process(f, pipe): xbar = Parameter(n, value=np.zeros(n)) u = Parameter(n, value=np.zeros(n)) f += (rho/2)*sum_squares(x - xbar + u) prox = Problem(Minimize(f)) # ADMM loop. while True: prox.solve() pipe.send(x.value) xbar.value = pipe.recv() u.value += x.value - xbar.value
def __init__(self, circle_list, cij=None, obj='ar0', overlap=False, min_edge=2, verbose=False, **kwargs): """ Weighted Circle Packing Problem k - eps - min_edge - """ FormulationR2.__init__(self, circle_list, **kwargs) # solver and record args self._solve_args = {'method': 'dccp'} self._in_dict = {'k': 1, 'min_edge': min_edge, 'eps': 1e-2} # gather inputs X = circle_list.point_vars n = len(circle_list) cij = cij if cij is not None else np.ones((n, n)) # compute radii areas = np.asarray([x.area for x in circle_list.inputs]) r = np.sqrt(areas / np.pi) # * np.log(1 + areas / (min_area - min_edge ** 2)) self.r = r # indices of upper tris inds = np.triu_indices(n, 1) xi, xj = [x.tolist() for x in inds] # gather inputs weights = Parameter(shape=len(xi), value=cij[inds], name='cij', nonneg=True) radii = Parameter(shape=len(xi), value=r[xi] + r[xj], name='radii', nonneg=True) dists = cvx.norm(X[xi, :] - X[xj, :], 2, axis=1) # constraints self._constr.append(dists >= radii) # objective self._obj = Minimize(cvx.sum(cvx.multiply(weights, dists)))
def mpt_opt(data, gamma_vec): NUM_SAMPLES = len(gamma_vec) w_vec_results = [None] * NUM_SAMPLES ret_results = np.zeros(NUM_SAMPLES) risk_results = np.zeros(NUM_SAMPLES) N = len(data) w_vec = Variable(N) mu_vec = np.array([np.mean(data[i]) for i in range(N)]) sigma_mat = np.cov(data) gamma = Parameter(nonneg=True) ret_val = mu_vec.T * w_vec risk_val = quad_form(w_vec, sigma_mat) # w^T Sigma w problem = Problem(Maximize(ret_val - gamma * risk_val), [sum(w_vec) == 1, w_vec >= 0]) for i, new_gamma in enumerate(gamma_vec): gamma.value = new_gamma problem.solve() w_vec_results[i] = w_vec.value ret_results[i] = ret_val.value risk_results[i] = sqrt(risk_val).value return (w_vec_results, ret_results, risk_results)
def as_constraint(self, *args): """ Imagine there are bubbles of a fixed size floating about constraining the discrete space each face is given a coordinate X = 1, Xg is coordinate dist_real <= r X = 1, Xg is (0, 0) dist_fake <= r + 1000 note - 2-norm is SIGNIFICANTLY Faster than 1norm. """ N = len(self.space.faces) # centroids.shape[N, 2] centroids = np.asarray(self.space.faces.centroids) M = 2 * centroids.max() centroids = Parameter(shape=centroids.shape, value=centroids) px, py = self._p.X, self._p.Y C = [] for i, face_set in enumerate(self._actions): X = face_set.vars # selected faces cx = cvx.multiply(centroids[:, 0], X) cy = cvx.multiply(centroids[:, 1], X) Xg = cvx.vstack([cx, cy]).T v = cvx.vstack( [cvx.promote(px[i], (N, )), cvx.promote(py[i], (N, ))]).T C = [cvx.norm(v - Xg, 2, axis=1) <= self._r[i] + M * (1 - X)] return C
def MarkowitzOpt(mean, variance, covariance, interest_rate, min_return): n = mean.size + 1 # Number of assets (number of stocks + interest rate) mu = mean.values # Mean returns of n assets temp = np.full(n, interest_rate) temp[:-1] = mu mu = temp counter = 0 Sigma = np.zeros((n,n)) # Covariance of n assets for i in np.arange(n-1): for j in np.arange(i, n-1): if i==j: Sigma[i,j] = variance[i] else: Sigma[i,j] = covariance[counter] Sigma[j,i] = Sigma[i,j] counter+=1 Sigma = nearestPD(Sigma) # Converting covariance to the nearest positive-definite matrix # Ensuring feasability of inequality contraint if mu.max() < min_return: min_return = interest_rate w = Variable(n) # Portfolio allocation vector ret = mu.T* w risk = quad_form(w, Sigma) min_ret = Parameter(nonneg=True) min_ret.value = min_return prob = Problem(Minimize(risk), # Restricting to long-only portfolio [ret >= min_ret, sum(w) == 1, w >= 0]) prob.solve() return w.value
def as_constraint(self, *args): """ Imagine there are bubbles of a fixed size floating about constraining the discrete space each face is given a coordinate X = 1, Xg is coordinate dist_real <= r X = 1, Xg is (0, 0) dist_fake <= r + 1000 note - 2-norm is SIGNIFICANTLY Faster than 1norm. """ N = len(self.space.faces) X = self.stacked # selected faces M = 100 # upper bound # centroids.shape[N, 2] centroids = np.asarray(self.space.faces.centroids) centroids = Parameter(shape=centroids.shape, value=centroids) cx = cvx.multiply(centroids[:, 0], X) cy = cvx.multiply(centroids[:, 1], X) Xg = cvx.vstack([cx, cy]).T v = cvx.vstack( [cvx.promote(self._bx, (N, )), cvx.promote(self._by, (N, ))]).T C = [cvx.norm(v - Xg, 2, axis=1) <= self._r + M * (1 - X)] return C
def test_square_param(self): """Test issue arising with square plus parameter. """ a = Parameter(value=1) b = Variable() obj = Minimize(b**2 + abs(a)) prob = Problem(obj) prob.solve() self.assertAlmostEqual(obj.value, 1.0)
def __init__(self, inputs, tgt, src=None, **kwargs): """ Convert a PointList to a segment list """ from src.cvopt.formulate.input_structs import PointList if isinstance(inputs, PointList): pass elif isinstance(inputs, int): inputs = PointList(inputs) FormulationR2.__init__(self, inputs, **kwargs) self._tgt_x = Parameter(value=tgt[0]) self._tgt_y = Parameter(value=tgt[1]) # source may not be an expression if src is None: self._src_x = Variable(pos=True, name='path_start_X') self._src_y = Variable(pos=True, name='path_start_Y') else: self._src_x = Parameter(value=src[0]) self._src_y = Parameter(value=src[1])
def setup_class(self): self.cvx = Variable()**2 self.ccv = Variable()**0.5 self.aff = Variable() self.const = Constant(5) self.unknown_curv = log(Variable()**3) self.pos = Constant(1) self.neg = Constant(-1) self.zero = Constant(0) self.unknown_sign = Parameter()
def as_constraint(self, *args): """ todo notes : performance goes with N=6 is around 1 second. when constraints are tightened, goes to 5-6 seconds overall worse than circle-BoundedSet, but more consistent results maybe """ # centroids.shape[N, 2] cent = np.asarray(self.space.faces.centroids) centrx = Parameter(shape=cent[:, 0].shape, value=cent[:, 0], name='centrx') centry = Parameter(shape=cent[:, 1].shape, value=cent[:, 1], name='centry') bX, bY, bW, bH = self._boxlist.vars # base constraints - whatever for now C = [bW >= 1, bH >= 1] for i, face_set in enumerate(self._actions): X = face_set.vars # selected faces M = 100 # todo upper bound cx = cvx.multiply(centrx, X) cy = cvx.multiply(centry, X) # todo maybe lienaerize furda, or use true facemin and face max instead of centroid\ # # cx is within box if X_i = 1, C += [ bX[i] - bW[i] / 2 <= cx - 0.4 + M * (1 - X), bX[i] + bW[i] / 2 >= cx + 0.4, bY[i] - bH[i] / 2 <= cy - 0.4 + M * (1 - X), bY[i] + bH[i] / 2 >= cy + 0.4, ] return C
def compute_node_be_curvature(g, n=None, solver=None, solver_options={}, verbose=False): if n is not None: if g.degree[n] > 0: dgamma2 = construct_dgamma2(g, n, verbose) gammax = construct_gammax(g, n) dim_b1 = gammax.shape[0] dim_b2 = dgamma2.shape[0] dim_s2 = dgamma2.shape[0] - gammax.shape[0] if verbose: print('dim dgamma2 {0}; dim gammax {1}'.format(dim_b2, dim_b1)) gammax_ext = np.block([ [gammax, np.zeros((dim_b1, dim_s2))], [np.zeros((dim_b1, dim_s2)).T, np.zeros((dim_s2, dim_s2))], ]) a = Parameter((dim_b2, dim_b2), value=dgamma2) b = Parameter((dim_b2, dim_b2), value=gammax_ext) kappa = Variable() constraints = [(a - kappa * b >> 0)] objective = Maximize(kappa) prob = Problem(objective, constraints) if verbose: print(prob.status) prob.solve(solver=solver, **solver_options) if verbose: print(prob.status) return prob.value else: return 0 else: r = {n: compute_node_be_curvature(g, n, solver=solver, solver_options=solver_options, verbose=verbose) for n in g.nodes()} return r
def create_update(f): x = Variable(n) u = Parameter(n) def local_update(xbar): # Update u. if x.value is None: u.value = np.zeros(n) else: u.value += x.value - xbar # Update x. obj = f(x) + (rho / 2) * sum_squares(x - xbar + u) Problem(Minimize(obj)).solve() return x.value return local_update
def as_constraint(self, *args): """ for the faces of self.space, each partitioning X must have atleast 2 adjacent faces of same selection, or if X is 0, then +---+---+---+ | | 0 | | +---+---+---+ | A | A | 0 | +---+---+---+ | A | A | | +---+---+---+ self._actions is a Variable of size 'space.num_faces' """ N = len(self.space.faces) M = np.zeros((N, N), dtype=int) for k, vs in self.space.faces.to_faces.items(): M[list(vs), k] = 1 M = Parameter(shape=M.shape, value=M, symmetric=True, name='face_adj_mat') # print(M) C = [] for action in self._actions: # [ N, N ] @ [N, 1] -> N # 2 versions of this tested -> # v1 - use inverse directly to meet constraint this is empirically slower # this enforces connectivity C += [ self._c <= M @ action.vars + (self._c + 1) * (1 - action.vars) ] # ------------------ # v2 - use a slack variable either X or V # v = Variable(shape=N, boolean=True, name='indicator.{}.{}'.format(self.name, action.name)) # C += [ # 1 <= action.vars + v, # self._adj_lim <= M @ action.vars + 4 * v, # # self._adj_lim <= M @ (1 - action.vars) # ] return C
def test_warm_start(self): """Test warm start. """ m = 200 n = 100 np.random.seed(1) A = np.random.randn(m, n) b = Parameter(m) # Construct the problem. x = Variable(n) prob = Problem(Minimize(sum_squares(A * x - b))) b.value = np.random.randn(m) result = prob.solve(warm_start=False) result2 = prob.solve(warm_start=True) self.assertAlmostEqual(result, result2) b.value = np.random.randn(m) result = prob.solve(warm_start=True) result2 = prob.solve(warm_start=False) self.assertAlmostEqual(result, result2) pass
def test_parametric(self): """Test solve parametric problem vs full problem""" x = Variable() a = 10 # b_vec = [-10, -2., 2., 3., 10.] b_vec = [-10, -2.] for solver in self.solvers: print(solver) # Solve from scratch with no parameters x_full = [] obj_full = [] for b in b_vec: obj = Minimize(a * (x ** 2) + b * x) constraints = [0 <= x, x <= 1] prob = Problem(obj, constraints) prob.solve(solver=solver) x_full += [x.value] obj_full += [prob.value] # Solve parametric x_param = [] obj_param = [] b = Parameter() obj = Minimize(a * (x ** 2) + b * x) constraints = [0 <= x, x <= 1] prob = Problem(obj, constraints) for b_value in b_vec: b.value = b_value prob.solve(solver=solver) x_param += [x.value] obj_param += [prob.value] print(x_full) print(x_param) for i in range(len(b_vec)): self.assertItemsAlmostEqual(x_full[i], x_param[i], places=3) self.assertAlmostEqual(obj_full[i], obj_param[i])
def test_lasso(self): # Solve the following consensus problem using ADMM: # Minimize sum_squares(A*x - b) + gamma*norm(x,1) # Problem data. m = 100 n = 75 np.random.seed(1) A = np.random.randn(m,n) b = np.random.randn(m) # Separate penalty from regularizer. x = Variable(n) gamma = Parameter(nonneg = True) funcs = [sum_squares(A*x - b), gamma*norm(x,1)] p_list = [Problem(Minimize(f)) for f in funcs] probs = Problems(p_list) # Solve via consensus. gamma.value = 1.0 probs.solve(method = "consensus", rho_init = 1.0, max_iter = 50) print("Objective:", probs.value) print("Solution:", x.value)
R = 0.1 * sparse.eye(1) # Initial and reference states x0 = np.array([0.1, 0.2]) # initial state # Reference input and states pref = 7.0 vref = 0 xref = np.array([pref, vref]) # reference state # Prediction horizon Np = 20 # Define problem u = Variable((nu, Np)) x = Variable((nx, Np + 1)) x_init = Parameter(nx) objective = 0 constraints = [x[:, 0] == x_init] for k in range(Np): objective += quad_form(x[:, k] - xref, Q) + quad_form( u[:, k], R) # objective function constraints += [x[:, k + 1] == Ad @ x[:, k] + Bd @ u[:, k] ] # system dynamics constraint constraints += [xmin <= x[:, k], x[:, k] <= xmax] # state interval constraint constraints += [umin <= u[:, k], u[:, k] <= umax] # input interval constraint objective += quad_form(x[:, Np] - xref, QN) prob = Problem(Minimize(objective), constraints) # Simulate in closed loop
# Uses the Alternating Direction Method of Multipliers # with a (non-convex) cardinality constraint. # Generate data. np.random.seed(1) N = 50 M = 40 n = 10 data = [] for i in range(N): data += [(1, np.random.normal(1.0, 2.0, (n, 1)))] for i in range(M): data += [(-1, np.random.normal(-1.0, 2.0, (n, 1)))] # Construct problem. gamma = Parameter(nonneg=True) gamma.value = 0.1 # 'a' is a variable constrained to have at most 6 non-zero entries. a = Card(n, k=6) b = Variable() slack = [pos(1 - label*(sample.T*a - b)) for (label, sample) in data] objective = Minimize(norm(a, 2) + gamma*sum(slack)) p = Problem(objective) # Extensions can attach new solve methods to the CVXPY Problem class. p.solve(method="admm") # Count misclassifications. error = 0 for label, sample in data: if not label*(a.value.T*sample - b.value)[0] >= 0:
from queue import PriorityQueue import numpy from cvxpy import Minimize, Parameter, Problem, sum_squares, Variable # Problem data. m = 25 n = 20 numpy.random.seed(1) A = numpy.matrix(numpy.random.randn(m, n)) b = numpy.matrix(numpy.random.randn(m, 1)) #b = A*numpy.random.uniform(-1, 1, size=(n, 1)) # Construct the problem. x = Variable(n) L = Parameter(n) U = Parameter(n) f = lambda x: sum_squares(A*x - b) prob = Problem(Minimize(f(x)), [L <= x, x <= U]) visited = 0 best_solution = numpy.inf best_x = 0 nodes = PriorityQueue() nodes.put((numpy.inf, 0, -numpy.ones(n), numpy.ones(n), 0)) while not nodes.empty(): visited += 1 # Evaluate the node with the lowest lower bound. _, _, L_val, U_val, idx = nodes.get() L.value = L_val
# Taken from CVX website http://cvxr.com/cvx/examples/ # Exercise 5.1d: Sensitivity analysis for a simple QCQP # Ported from cvx matlab to cvxpy by Misrab Faizullah-Khan # Original comments below # Boyd & Vandenberghe, "Convex Optimization" # Joelle Skaf - 08/29/05 # (a figure is generated) # # Let p_star(u) denote the optimal value of: # minimize x^2 + 1 # s.t. (x-2)(x-2)<=u # Finds p_star(u) and plots it versus u u = Parameter() x = Variable() objective = Minimize(quad_form(x, 1) + 1) constraint = [quad_form(x, 1) - 6 * x + 8 <= u] p = Problem(objective, constraint) # Assign a value to gamma and find the optimal x. def get_x(u_value): u.value = u_value result = p.solve() return x.value u_values = np.linspace(-0.9, 10, num=50)
def main(): class MyParser(argparse.ArgumentParser): def error(self, message): sys.stderr.write('error: %s\n' % message) self.print_help() sys.exit(2) parser = MyParser() parser.add_argument("-f", "--file", dest="filename", help="data file in CSV format", metavar="FILENAME") parser.add_argument("-o", "--output", dest="output", help="output in CSV format", metavar="OUTPUT") parser.add_argument("-ofig", "--outfigures", dest="outfigures", help="output plots in PDF format", metavar="OUTFIGURES") parser.add_argument("-t", "--threshold", dest="threshold", help="threshold distance", metavar="THRESHOLD") parser.add_argument("-r", "--regularization", dest="regularization", help="tv, tviso, divergence, tvtrace", metavar="REGULARIZATION") parser.add_argument("-n", "--nsolutions", dest="nsolutions", help="number of solutions", metavar="NSOLUTIONS") parser.add_argument("-s", "--solver", dest="solver", help="solver (cvxopt, ecos)", metavar="SOLVER") results = parser.parse_args() figure_outfile = "figureoutput.pdf" if results.outfigures is None else results.outfigures csv_outfile = "fittedout.csv" if results.output is None else results.output CUTOFF = 16 if results.threshold is None else float(results.threshold) REGULARIZATION = "tvnorm" if results.regularization is None else results.regularization N_SOLUTIONS = 10 if results.nsolutions is None else int( float(results.nsolutions)) coords, deflection, boundary = read_data(results.filename) # let's see if we have gridded data. If we do, then use the implicit data grid for all computations x_obs_positions = sorted(set(coords[:, 0])) y_obs_positions = sorted(set(coords[:, 1])) dx = abs(x_obs_positions[1] - x_obs_positions[0]) dy = abs(y_obs_positions[1] - y_obs_positions[0]) N = len(x_obs_positions) M = len(y_obs_positions) boundary2d = boundary.reshape((N, M)) mask = np.zeros(boundary2d.shape) for r in range(boundary2d.shape[1]): pts = np.where(boundary2d[:, r] == 1) if (len(pts[0]) > 0): mini = (min(min(pts))) maxi = max(max(pts)) mask[mini:maxi, r] = 1 distances2d = -ndimage.distance_transform_edt( mask) + ndimage.distance_transform_edt(1 - mask) distances2d = distances2d.flatten() condition_inside = distances2d <= 0 condition_outside = (distances2d > 0) * (distances2d <= CUTOFF) del distances2d, mask, boundary2d gc.collect() x_out = np.array(coords[condition_outside, 0] / dx, dtype=int) y_out = np.array(coords[condition_outside, 1] / dy, dtype=int) x_in = np.array(coords[condition_inside, 0] / dx, dtype=int) y_in = np.array(coords[condition_inside, 1] / dy, dtype=int) x_center = np.mean(x_in) y_center = np.mean(y_in) u_x_in = deflection[condition_inside] u_x_out = deflection[condition_outside] n_in = len(x_in) n_out = len(x_out) spacing = 1 G_in_in_xx, G_in_in_xy, G_out_in_xx, G_out_in_xy, G_in_in_yy, G_in_in_yx, G_out_in_yy, G_out_in_yx, Dx, Dy = gen_matrices( x_in, y_in, x_out, y_out, dx * spacing, dy * spacing, loworder=True) print("Size of the problem is " + str(n_in + n_out)) """ Setting up the problem ====================== We compute the coefficient matrices for the linear problem """ """ Setting up the optimization problem =================================== Define norms """ gamma = Parameter(sign="positive", value=1) sigma_xz = Variable(n_in) sigma_yz = Variable(n_in) # predicted_in = A_in_in*sigma_xz + D_in_in*sigma_yz # add higher order terms predicted_in_x = G_in_in_xx * sigma_xz + G_in_in_xy * sigma_yz predicted_out_x = G_out_in_xx * sigma_xz + G_out_in_xy * sigma_yz predicted_in_y = G_in_in_yx * sigma_xz + G_in_in_yy * sigma_yz predicted_out_y = G_out_in_yx * sigma_xz + G_out_in_yy * sigma_yz gamma_vals = np.logspace(-3, 2, N_SOLUTIONS) error = sum_squares(u_x_in - predicted_in_x) + sum_squares(u_x_out - predicted_out_x) if REGULARIZATION == "tvtrace": regularity_penalty = tvnorm_trace_2d(sigma_xz, sigma_yz, Dx, Dy) elif REGULARIZATION == "tviso": regularity_penalty = norm(Dx * sigma_xz / dx, 1) + norm( Dy * sigma_xz / dy, 1) + norm(Dx * sigma_yz / dx, 1) + norm( Dy * sigma_yz / dy, 1) elif REGULARIZATION == "tv": regularity_penalty = tvnorm2d(sigma_xz, Dx, Dy) + tvnorm2d( sigma_yz, Dx, Dy) elif REGULARIZATION == 'l2_grad': regularity_penalty = sum_squares( Dx * sigma_xz + Dx * sigma_yz) + sum_squares(Dy * sigma_xz + Dy * sigma_yz) elif REGULARIZATION == 'l1': regularity_penalty = norm(sigma_xz + sigma_yz, p=1) elif REGULARIZATION == 'l2': regularity_penalty = sum_squares(sigma_xz + sigma_yz) + sum_squares(sigma_xz + sigma_yz) elif REGULARIZATION == 'det': gamma_vals = np.logspace(-8, -5, N_SOLUTIONS) regularity_penalty = sum_entries(-log2(sigma_xz) - log2(sigma_yz)) else: print("Invalid regularization choice") sys.exit(0) forceconstraints = [ sum_entries(sigma_xz) == 0, sum_entries(sigma_yz) == 0 ] # add torque-free constraint here net_torque = sum_entries( mul_elemwise(x_in - x_center, sigma_yz) - mul_elemwise(y_in - y_center, sigma_xz)) torqueconstraints = [net_torque == 0] constraints = forceconstraints + torqueconstraints objective = Minimize(error + gamma * regularity_penalty) prob = Problem(objective, constraints) sq_penalty = [] l1_penalty = [] sigma_xz_values = [] sigma_yz_values = [] with PdfPages(figure_outfile) as pdf: for val in gamma_vals: gamma.value = val try: if results.solver is not None and results.solver == "ecos": prob.solve(verbose=True, max_iters=50, warm_start=True, solver=cvxpy.ECOS, feastol=1e-6, reltol=1e-5, abstol=1e-6) elif results.solver is not None and results.solver == "cvxopt": prob.solve(verbose=True, max_iters=50, warm_start=True, solver=cvxpy.CVXOPT, feastol=1e-6, reltol=1e-5, abstol=1e-6) else: prob.solve(verbose=True, max_iters=50, warm_start=True, feastol=1e-6, reltol=1e-5, abstol=1e-6) except cvxpy.SolverError: continue sq_penalty.append(error.value) l1_penalty.append(regularity_penalty.value) sigma_xz_values.append(sigma_xz.value) sigma_yz_values.append(sigma_yz.value) force = np.zeros_like(coords) force[condition_inside, 0] = sigma_xz.value.reshape((n_in, )) force[condition_inside, 1] = sigma_yz.value.reshape((n_in, )) u_x = np.zeros(coords.shape[0]) u_x[condition_inside] = predicted_in_x.value u_x[condition_outside] = predicted_out_x.value maxmagnitude = np.max(np.abs(force)) plt.rc('text', usetex=True) plt.rc('font', family='serif') plt.figure(figsize=(10, 10)) x_min = min(coords[boundary == 1, 0]) x_max = max(coords[boundary == 1, 0]) y_min = min(coords[boundary == 1, 1]) y_max = max(coords[boundary == 1, 1]) pdf.attach_note("$\gamma$: " + str(val)) plt.suptitle("$\gamma$: " + str(val) + "\n" + "mismatch: " + str(error.value) + " penalty: " + str(regularity_penalty.value)) plt.subplot(221) plt.xlim((x_min - 40, x_max + 40)) plt.ylim((y_min - 40, y_max + 40)) plt.pcolormesh(x_obs_positions, y_obs_positions, force[:, 0].reshape( (len(x_obs_positions), len(y_obs_positions))).transpose(), cmap='seismic_r', vmax=maxmagnitude * .75, vmin=-maxmagnitude * .8) plt.title("$\sigma_{xz}$") plt.colorbar() plt.subplot(222) plt.xlim((x_min - 40, x_max + 40)) plt.ylim((y_min - 40, y_max + 40)) plt.pcolormesh(x_obs_positions, y_obs_positions, force[:, 1].reshape( (len(x_obs_positions), len(y_obs_positions))).transpose(), cmap='seismic_r', vmax=maxmagnitude * .75, vmin=-maxmagnitude * .8) plt.title("$\sigma_{yz}$") plt.colorbar() plt.subplot(223) plt.xlim((x_min - 40, x_max + 40)) plt.ylim((y_min - 40, y_max + 40)) plt.pcolormesh(x_obs_positions, y_obs_positions, u_x.reshape((len(x_obs_positions), len(y_obs_positions))).transpose(), cmap='seismic_r') plt.title("$\hat{u}_x$") plt.colorbar() plt.subplot(224) plt.xlim((x_min - 40, x_max + 40)) plt.ylim((y_min - 40, y_max + 40)) plt.pcolormesh(x_obs_positions, y_obs_positions, (deflection - u_x).reshape( (len(x_obs_positions), len(y_obs_positions))).transpose(), cmap='seismic_r') plt.title("$u_x-\hat{u}_x$") plt.colorbar() pdf.savefig() #plt.show() plt.close() plt.plot(sq_penalty, l1_penalty) plt.xlabel("Mismatch", fontsize=16) plt.ylabel("Regularity", fontsize=16) plt.title('Trade-Off Curve', fontsize=16) l_curve_distances = np.abs((l1_penalty[-1]-l1_penalty[0])*sq_penalty - \ (sq_penalty[-1]-sq_penalty[0])*l1_penalty+sq_penalty[-1]*l1_penalty[0]-l1_penalty[-1]*sq_penalty[0]) # Choose the optimal lambda value pdf.savefig() plt.close() input("Press Enter to continue...")
Qy = np.diag(2 * [20]) # or sparse.diags([]) #QyN = np.diag(2*[20]) # final cost QDy = np.eye(ny) Qrg = 100 * np.eye(ny) QDg = 0.5 * sparse.eye(ny) # Quadratic cost for Du0, Du1, ...., Du_N-1 # Initial and reference x0 = np.array(2 * [0.0, 0.0]) # initial state # Prediction horizon Np = 40 # Define problem g = Variable((ng, Np)) x = Variable((nx, Np)) x_init = Parameter(nx) gminus1 = Parameter( ny ) # input at time instant negative one (from previous MPC window or uinit in the first MPC window) yminus1 = Parameter( ny ) # input at time instant negative one (from previous MPC window or uinit in the first MPC window) r = Parameter(ny) objective = 0.0 constraints = [x[:, 0] == x_init] y = Cd @ x + Dd @ g for k in range(Np): objective += quad_form(y[:, k] - r, Qy) # tracking cost objective += quad_form(g[:, k] - r, Qrg) # reference governor cost
exact = 0.5 * np.sin(2 * np.pi * t / n) * np.sin(0.01 * t) corrupt = exact + 0.05 * np.random.randn(len(exact)) corrupt = cvxopt.matrix(corrupt) e = np.ones(n).T ee = np.column_stack((-e, e)).T D = sparse.spdiags(ee, range(-1, 1), n, n) D = D.todense() D = cvxopt.matrix(D) # Solve in parallel nopts = 10 lambdas = np.linspace(0, 50, nopts) # Frame the problem with a parameter lamb = Parameter(nonneg=True) x = Variable(n) p = Problem(Minimize(norm(x - corrupt) + norm(D * x) * lamb)) # For a value of lambda g, we solve the problem # Returns [ ||Dx||_2 and ||x-x_cor||_2 ] def get_value(g): lamb.value = g result = p.solve() return [np.linalg.norm(x.value - corrupt), np.linalg.norm(D * x.value)] pool = Pool(processes=4) # compute allocation in parallel norms1, norms2 = zip(*pool.map(get_value, lambdas))
QDy = 10 * np.eye(ny) # penalty on Delta y Qrg = 10 * np.eye(ny) QDg = 100 * sparse.eye(ny) # Quadratic cost for Du0, Du1, ...., Du_N-1 # Initial state, reference, command x0 = np.array(2 * [0.0, 0.0]) # initial state y0 = Cd @ x0 # initial state gm1 = np.array(2 * [0.0]) # g at time -1, used for the constraint on Delta g # In[MPC Problem setup] g = Variable((ng, Np)) x = Variable((nx, Np)) eps_slack = Variable(ny) x_init = Parameter(nx) gminus1 = Parameter( ny ) # MPC command at time -1 (from previous MPC window or g_step_old for the first instant) yminus1 = Parameter( ny ) # system output at time -1 (from previous MPC window or y_step_old for the first instant) r = Parameter(ny) y = Cd @ x + Dd @ g # system output definition objective = 0.0 objective += quad_form(eps_slack, 1e4 * np.eye(ny)) # constraint violation penalty on slack constraints = [x[:, 0] == x_init] # initial state constraint constraints += [eps_slack >= 0.0] # slack positive constraint
def __init__(self, inputs, cvx_set, **kwargs): ObjectiveR2.__init__(self, inputs, **kwargs) shp = self.inputs.point_vars.shape self._cvx_set = Parameter(shape=shp, value=np.tile(cvx_set, (shp[0], 1)))
def __init__(self, space, limit=2, **kwargs): FormulationDisc.__init__(self, space, **kwargs) self._c = Parameter(value=limit)
# (a figure is generated) # # Let p_star(epsilon) be the optimal value of the following problem: # minimize ||Ax + b + epsilon*d||_1 # Plots p_star(epsilon) versus epsilon and demonstrates the fact that it's # affine on an interval that includes epsilon = 0. # Input data m = 6 n = 3 A = cvxopt.matrix( [-2, 7, 1, -5, -1, 3, -7, 3, -5, -1, 4, -4, 1, 5, 5, 2, -5, -1], (m, n)) b = cvxopt.matrix([-4, 3, 9, 0, -11, 5], (m, 1)) d = cvxopt.matrix([-10, -13, -27, -10, -7, 14], (m, 1)) epsilon = Parameter() # The problem x = Variable(n) objective = Minimize(norm(A * x + b + epsilon * d, 1)) p = Problem(objective, []) # Assign a value to gamma and find the optimal x def get_p(e_value): epsilon.value = e_value result = p.solve() return result # Range of epsilon values
def calculate_portfolio(cvxtype, returns_function, long_only, exp_return, selected_solver, max_pos_size, ticker_list): assert cvxtype in ['minimize_risk','maximize_return'] """ Variables: mu is the vector of expected returns. sigma is the covariance matrix. gamma is a Parameter that trades off risk and return. x is a vector of stock holdings as fractions of total assets. """ gamma = Parameter(nonneg=True) gamma.value = 1 returns, stocks, betas = returns_function cov_mat = returns.cov() Sigma = cov_mat.values # np.asarray(cov_mat.values) w = Variable(len(cov_mat)) # #number of stocks for portfolio weights risk = quad_form(w, Sigma) #expected_variance => w.T*C*w = quad_form(w, C) # num_stocks = len(cov_mat) if cvxtype == 'minimize_risk': # Minimize portfolio risk / portfolio variance if long_only == True: prob = Problem(Minimize(risk), [sum(w) == 1, w > 0 ]) # Long only else: prob = Problem(Minimize(risk), [sum(w) == 1]) # Long / short elif cvxtype == 'maximize_return': # Maximize portfolio return given required level of risk #mu #Expected return for each instrument #expected_return = mu*x #risk = quad_form(x, sigma) #objective = Maximize(expected_return - gamma*risk) #p = Problem(objective, [sum_entries(x) == 1]) #result = p.solve() mu = np.array([exp_return]*len(cov_mat)) # mu is the vector of expected returns. expected_return = np.reshape(mu,(-1,1)).T * w # w is a vector of stock holdings as fractions of total assets. objective = Maximize(expected_return - gamma*risk) # Maximize(expected_return - expected_variance) if long_only == True: constraints = [sum(w) == 1, w > 0] else: #constraints=[sum_entries(w) == 1,w <= max_pos_size, w >= -max_pos_size] constraints=[sum(w) == 1] prob = Problem(objective, constraints) prob.solve(solver=selected_solver) weights = [] for weight in w.value: weights.append(float(weight)) if cvxtype == 'maximize_return': optimal_weights = {"Optimal expected return":expected_return.value, "Optimal portfolio weights":np.round(weights,2), "tickers": ticker_list, "Optimal risk": risk.value*100 } elif cvxtype == 'minimize_risk': optimal_weights = {"Optimal portfolio weights":np.round(weights,2), "tickers": ticker_list, "Optimal risk": risk.value*100 } return optimal_weights