def solve_ip(min_decade = MIN_DECADE, max_decade = MAX_DECADE, min_players_per_decade = 0, max_players_per_decade = 1, players_per_position = 1, team = [], positions = field_pos + pitcher_pos): #initialize a dictionary of vectors to hold the position constraints pos_list = {pos: [] for pos in positions} #initialize a dictionary of vectors to hold the decade constraints decade_vec = {decade: [] for decade in range(min_decade, max_decade+1, 10)} print(positions) WAR_vec = [] all_df = pd.DataFrame() #loop through each position for pos in positions: df = pd.read_csv(DATA_DIR + pos + ".csv") #remove players from decades too earlier than our first df = df.loc[df['YEAR'] >= min_decade] #remove players from decades df = df.loc[df['YEAR'] <= max_decade] if (len(team) > 0): df = df.loc[df['TEAM'].isin(team)] if ("BATTER" in df.columns.tolist()): df = df[df['BATTER'] != 58809] print(len(df)) #create pos column on dataframe for easy printing df['POS'] = pos #create an overall dataframe all_df = all_df.append(df, sort=False) zeroes = np.zeros(len(df)) ones = np.ones(len(df)) #set the position vector for pos_list_item in positions: if (pos == pos_list_item): pos_list[pos_list_item].extend(ones) else: pos_list[pos_list_item].extend(zeroes) #set the decade vectors for decade in range(min_decade, max_decade + 1, 10): this_decade = np.where(df['YEAR'] == decade, 1, 0) decade_vec[decade].extend(this_decade) #create a vector WAR values WAR_vec.extend(df['WARP'].to_list()) selection = cp.Variable(len(WAR_vec), boolean = True) constraints = [(decade_vec[i] * selection <= max_players_per_decade) for i in range(min_decade, max_decade + 1, 10)] + [(decade_vec[i] * selection >= min_players_per_decade) for i in range(min_decade, max_decade + 1, 10)] for pos in positions: max_players = players_per_position if (pos == 'OF'): max_players = 3 * players_per_position constraints.append(pos_list[pos] * selection <= max_players) WAR = WAR_vec * selection problem = cp.Problem(cp.Maximize(WAR), constraints) print (cp.installed_solvers()) problem.solve(solver=cp.ECOS_BB, mi_max_iters=5) print("***********value is {}".format(problem.value)) all_df = all_df.reset_index(drop=True) if (problem.status not in ["infeasible", "infeasible_inaccurate", "unbounded"]): ret_val = [round(problem.value, 1)] + get_solution(selection.value, all_df) else: raise Exception('Infeasible') return ret_val
import matplotlib.pyplot as plt import cvxpy as cp import numpy as np r = 20 np.random.seed(1) A = np.hstack((np.random.randn(r, 1), np.ones([r, 1]))) c = np.array([A[:, 0]]).T b = (10.0 * np.random.randn() * c) + \ + (0.5 * np.random.randn(r, 1)) b = b.T.tolist() x = cp.Variable(2) obj = cp.Minimize(sum(cp.square(A @ x - b))) P = cp.Problem(obj) P.solve(verbose=True) print(x.value) s = np.arange(c.min() - 0.3, c.max() + 0.3, 0.1) t = np.asscalar(x.value[0]) * s + np.asscalar(x.value[1]) plt.plot(s, t) plt.plot(c.T, b, 'ro') plt.show()
def direct_optimization(N, dt, path_state, Ux, path_idx, ds): print('check') print(path_state) s0 = path_state[0] s_dot0 = path_state[1] e0 = path_state[2] e_dot0 = path_state[3] dpsi0 = path_state[4] dpsi_dot0 = path_state[5] est_idx = Ux * N * dt / ds split_idx = est_idx / N K = np.rint(split_idx * np.arange(0, N)) K_dot = np.zeros(N) for idx in range(1, N): K_dot[idx] = K[idx] - K[idx - 1] delta = cp.Variable((N, 1)) s = cp.Variable((N, 1)) s_dot = cp.Variable((N, 1)) e = cp.Variable((N, 1)) e_dot = cp.Variable((N, 1)) dpsi = cp.Variable((N, 1)) dpsi_dot = cp.Variable((N, 1)) f0 = -s[-1] objective = cp.Minimize(f0) #subject to x_i+1 = x_i + hAx_i + hBu_i + hC constraints = [ s[0] == s0, e[0] == e0, dpsi[0] == dpsi0, dpsi_dot[0] == dpsi_dot0, e_dot[0] == e_dot0, s_dot[0] == Ux ] constraints += [delta[0] <= np.radians(25), delta[0] >= np.radians(-25)] for idx in range(1, N): constraints += [s[idx] == s[idx - 1] + s_dot[idx - 1] * dt] constraints += [s_dot[idx] == Ux] constraints += [e[idx] == e[idx - 1] + e_dot[idx - 1] * dt] constraints += [ e_dot[idx] == e_dot[idx - 1] + dt * (-(Ca_f + Ca_r) / (m * Ux) * e_dot[idx - 1] + (Ca_f + Ca_r) / m * dpsi[idx - 1] + (b * Ca_r - a * Ca_f) / (m * Ux) * dpsi_dot[idx - 1]) + dt * (Ca_f / m) * delta[idx - 1] + dt * (-K[idx - 1] * (Ux**2 - (b * Ca_r - a * Ca_f) / m)) ] constraints += [dpsi[idx] == dpsi[idx - 1] + dpsi_dot[idx - 1] * dt] constraints += [ dpsi_dot[idx] == dpsi_dot[idx - 1] + dt * ((b * Ca_r - a * Ca_f) / (Iz * Ux) * e_dot[idx - 1] + (a * Ca_f - b * Ca_r) / Iz * dpsi[idx - 1] - (a**2 * Ca_f + b**2 * Ca_r) / (Iz * Ux) * dpsi_dot[idx - 1]) + dt * a * Ca_f / Iz * delta[idx - 1] + dt * (-K[idx - 1] * (a**2 * Ca_f + b**2 * Ca_r) / Iz - K_dot[idx - 1] * Ux) ] constraints += [ delta[idx] <= np.radians(25), delta[idx] >= np.radians(-25) ] constraints += [ e[idx] <= TRACK_WIDTH / 2.0, e[idx] >= -TRACK_WIDTH / 2.0 ] prob = cp.Problem(objective, constraints) result = prob.solve() return -delta.value[0][0]
def dist(lh_set, rh_set): objective = cp.Minimize(cp.norm2(lh_set - rh_set)) return cp.Problem(objective).solve()
def tight_infer_with_partial_graph(y_val, s1_val, s0_val): partial_cbn = load_xml_to_cbn(partial_model) partial_cbn.build_joint_table() S = partial_cbn.v['S'] W = partial_cbn.v['W'] A = partial_cbn.v['A'] Y_hat = partial_cbn.v['Y'] if s1_val == s0_val: # there is no difference when active value = reference value return 0.00, 0.00 else: # define variable for P(r) PR = cvx.Variable(W.domain_size**(S.domain_size)) # define ell functions g = {} for v in {S, W, A, Y_hat}: v_index = v.index v_domain_size = v.domain_size parents_index = partial_cbn.index_graph.pred[v_index].keys() parents_domain_size = np.prod( [partial_cbn.v[i].domain_size for i in parents_index]) g[v_index] = list( product(range(v_domain_size), repeat=int(parents_domain_size))) # format # [(), (), ()] # r corresponds to the tuple # parents corresponds to the location of the tuple # assert the response function. (t function of Pearl, I function in our paper) def Indicator(obs, parents, response): # sort the parents by id par_key = parents.keys() # map the value to index par_index = 0 for k in par_key: par_index = par_index * partial_cbn.v[ k].domain_size + parents.dict[k] return 1 if obs.first_value() == g[ obs.first_key()][response][par_index] else 0 # build the object function weights = np.zeros(shape=[W.domain_size**(S.domain_size)]) for rw in range(W.domain_size**(S.domain_size)): # the first term for pse sum_identity = 0.0 for w1, w0, a1 in product(W.domains.get_all(), W.domains.get_all(), A.domains.get_all()): product_i = Indicator(Event({W: w1}), Event({S: s1_val}), rw) * \ partial_cbn.get_prob(Event({A: a1}), Event({W: w1})) * \ Indicator(Event({W: w0}), Event({S: s0_val}), rw) * \ partial_cbn.get_prob(Event({Y_hat: y_val}), Event({S: s1_val, W: w0, A: a1})) sum_identity += product_i weights[rw] += sum_identity # the second term for pse sum_identity = 0.0 for w0, a0 in product(W.domains.get_all(), A.domains.get_all()): product_i = Indicator(Event({W: w0}), Event({S: s0_val}), rw) * \ partial_cbn.get_prob(Event({A: a0}), Event({W: w0})) * \ partial_cbn.get_prob(Event({Y_hat: y_val}), Event({S: s0_val, W: w0, A: a0})) sum_identity += product_i weights[rw] -= sum_identity # build the objective function objective = weights.reshape(1, -1) @ PR ############################ ### to build the constraints ############################ ### the inferred model is consistent with the observational distribution A_mat = np.zeros((S.domain_size, W.domain_size, A.domain_size, Y_hat.domain_size, W.domain_size**(S.domain_size))) b_vex = np.zeros( (S.domain_size, W.domain_size, A.domain_size, Y_hat.domain_size)) # assert r -> v for s, w, a, y in product(S.domains.get_all(), W.domains.get_all(), A.domains.get_all(), Y_hat.domains.get_all()): # calculate the probability of observation b_vex[s.index, w.index, a.index, y.index] = partial_cbn.get_prob( Event({ S: s, Y_hat: y, W: w, A: a })) # sum of P(r) for rw in range(W.domain_size**(S.domain_size)): product_i = partial_cbn.get_prob(Event({S: s}), Event({})) * partial_cbn.get_prob(Event({Y_hat: y}), Event({S: s, W: w, A: a})) * \ Indicator(Event({W: w}), Event({S: s}), rw) * partial_cbn.get_prob(Event({A: a}), Event({W: w})) A_mat[s.index, w.index, a.index, y.index, rw] = product_i # flatten the matrix and vector A_mat = A_mat.reshape( S.domain_size * W.domain_size * A.domain_size * Y_hat.domain_size, W.domain_size**(S.domain_size)) b_vex = b_vex.reshape(-1, 1) ### the probability <= 1 C_mat = np.identity(W.domain_size**(S.domain_size)) d_vec = np.ones(W.domain_size**(S.domain_size)) ### the probability is positive E_mat = np.identity(W.domain_size**(S.domain_size)) f_vec = np.zeros(W.domain_size**(S.domain_size)) constraints = [ A_mat @ PR == b_vex, # C_mat @ PR == d_vec, C_mat @ PR <= d_vec, E_mat @ PR >= f_vec ] # minimize the causal effect problem = cvx.Problem(cvx.Minimize(objective), constraints) problem.solve() # print('tight lower effect: %f' % (problem.value)) lower = problem.value # maximize the causal effect problem = cvx.Problem(cvx.Maximize(objective), constraints) problem.solve() # print('tight upper effect: %f' % (problem.value)) upper = problem.value return upper, lower
def cvx_fit(data, basis_matrix, weights=None, PSD=True, trace=None, trace_preserving=False, **kwargs): """ Reconstruct a quantum state using CVXPY convex optimization. Args: data (vector like): vector of expectation values basis_matrix (matrix like): matrix of measurement operators weights (vector like, optional): vector of weights to apply to the objective function (default: None) PSD (bool, optional): Enforced the fitted matrix to be positive semidefinite (default: True) trace (int, optional): trace constraint for the fitted matrix (default: None). trace_preserving (bool, optional): Enforce the fitted matrix to be trace preserving when fitting a Choi-matrix in quantum process tomography (default: False). **kwargs (optional): kwargs for cvxpy solver. Returns: The fitted matrix rho that minimizes ||basis_matrix * vec(rho) - data||_2. Additional Information: Objective function ------------------ This fitter solves the constrained least-squares minimization: minimize: ||a * x - b ||_2 subject to: x >> 0 (PSD, optional) trace(x) = t (trace, optional) partial_trace(x) = identity (trace_preserving, optional) where: a is the matrix of measurement operators a[i] = vec(M_i).H b is the vector of expectation value data for each projector b[i] ~ Tr[M_i.H * x] = (a * x)[i] x is the vectorized density matrix (or Choi-matrix) to be fitted PSD constraint -------------- The PSD keyword constrains the fitted matrix to be postive-semidefinite, which makes the optimization problem a SDP. If PSD=False the fitted matrix will still be constrained to be Hermitian, but not PSD. In this case the optimization problem becomes a SOCP. Trace constraint ---------------- The trace keyword constrains the trace of the fitted matrix. If trace=None there will be no trace constraint on the fitted matrix. This constraint should not be used for process tomography and the trace preserving constraint should be used instead. Trace preserving (TP) constraint -------------------------------- The trace_preserving keyword constrains the fitted matrix to be TP. This should only be used for process tomography, not state tomography. Note that the TP constraint implicitly enforces the trace of the fitted matrix to be equal to the square-root of the matrix dimension. If a trace constraint is also specified that differs from this value the fit will likely fail. CVXPY Solvers: ------- Various solvers can be called in CVXPY using the `solver` keyword argument. Solvers included in CVXPY are: 'CVXOPT': SDP and SOCP (default solver) 'SCS' : SDP and SOCP 'ECOS' : SOCP only See the documentation on CVXPY for more information on solvers. """ # Check if CVXPY package is installed if cvxpy is None: raise Exception('CVXPY is not installed. Use `mle_fit` instead.') # Check CVXPY version version = cvxpy.__version__ if not (version[0] == '1' or version[:3] == '0.4'): raise Exception('Incompatible CVXPY version. Install 1.0 or 0.4') # SDP VARIABLES # Since CVXPY only works with real variables we must specify the real # and imaginary parts of rho seperately: rho = rho_r + 1j * rho_i dim = int(np.sqrt(basis_matrix.shape[1])) if version[:3] == '0.4': # Compatibility with legacy 0.4 rho_r = cvxpy.Variable(dim, dim) rho_i = cvxpy.Variable(dim, dim) else: rho_r = cvxpy.Variable((dim, dim)) rho_i = cvxpy.Variable((dim, dim)) # CONSTRAINTS # The constraint that rho is Hermitian (rho.H = rho) # transforms to the two constraints # 1. rho_r.T = rho_r.T (real part is symmetric) # 2. rho_i.T = -rho_i.T (imaginary part is anti-symmetric) cons = [rho_r == rho_r.T, rho_i == -rho_i.T] # Trace constraint: note this should not be used at the same # time as the trace preserving constraint. if trace is not None: cons.append(cvxpy.trace(rho_r) == trace) # Since we can only work with real matrices in CVXPY we can specify # a complex PSD constraint as # rho >> 0 iff [[rho_r, -rho_i], [rho_i, rho_r]] >> 0 if PSD is True: rho = cvxpy.bmat([[rho_r, -rho_i], [rho_i, rho_r]]) cons.append(rho >> 0) # Trace preserving constraint when fiting Choi-matrices for # quantum process tomography. Note that this adds an implicity # trace constraint of trace(rho) = sqrt(len(rho)) = dim # if a different trace constraint is specified above this will # cause the fitter to fail. if trace_preserving is True: sdim = int(np.sqrt(dim)) ptr = partial_trace_super(sdim, sdim) cons.append(ptr * cvxpy.vec(rho_r) == np.identity(sdim).ravel()) # Rescale input data and matrix by weights if they are provided if weights is not None: w = np.array(weights) w = w / np.sqrt(sum(w**2)) basis_matrix = w[:, None] * basis_matrix data = w * data # OBJECTIVE FUNCTION # The function we wish to minimize is || arg ||_2 where # arg = bm * vec(rho) - data # Since we are working with real matrices in CVXPY we expand this as # bm * vec(rho) = (bm_r + 1j * bm_i) * vec(rho_r + 1j * rho_i) # = bm_r * vec(rho_r) - bm_i * vec(rho_i) # + 1j * (bm_r * vec(rho_i) + bm_i * vec(rho_r)) # = bm_r * vec(rho_r) - bm_i * vec(rho_i) # where we drop the imaginary part since the expectation value is real bm_r = np.real(basis_matrix) bm_i = np.imag(basis_matrix) # CVXPY doesn't seem to handle sparse matrices very well so we convert # sparse matrices to Numpy arrays. if isinstance(basis_matrix, sps.spmatrix): bm_r = bm_r.todense() bm_i = bm_i.todense() arg = bm_r * cvxpy.vec(rho_r) - bm_i * cvxpy.vec(rho_i) - np.array(data) # SDP objective function obj = cvxpy.Minimize(cvxpy.norm(arg, p=2)) # Solve SDP prob = cvxpy.Problem(obj, cons) iters = 5000 max_iters = kwargs.get('max_iters', 20000) # Set the default solver to 'CVXOPT' if 'solver' not in kwargs: kwargs['solver'] = 'CVXOPT' problem_solved = False while not problem_solved: kwargs['max_iters'] = iters prob.solve(**kwargs) if prob.status in ["optimal_inaccurate", "optimal"]: problem_solved = True elif prob.status == "unbounded_inaccurate": if iters < max_iters: iters *= 2 else: raise RuntimeError( "CVX fit failed, probably not enough iterations for the " "solver") elif prob.status in ["infeasible", "unbounded"]: raise RuntimeError( "CVX fit failed, problem status {} which should not " "happen".format(prob.status)) else: raise RuntimeError("CVX fit failed, reason unknown") rho_fit = rho_r.value + 1j * rho_i.value return rho_fit
def build_qp(self, point_collisions, sweep_collisions, N, theta0, mu, d_safe): """Create the qp problem to be solved. point_collisions is an Iterable of JointObjectInfo storing all point collision sweep_collisions is an Iterable of SweepJointObjectInfo storing all sweeping collisions theta0 is the trajectory at the last step mu is the penalty parameter to constraints warm means only trust region size is updated and we can reuse """ N, dim_x = theta0.shape thetas = cp.Variable((N, dim_x), name="Configurations") trsize = cp.Parameter(name='Trust Region Size') # the sos loss is easy loss = 0 if self.losses is None: loss += cp.sum_squares(thetas[1:] - thetas[:-1]) else: for lossi in self.losses: obj, grad, hess = lossi.compute(theta0.flatten(), 2) delta = thetas.flatten() - theta0.flatten() loss = obj + grad * delta loss += 0.5 * cp.quad_form(delta, hess) # constraints on initial and final configuration # cons = [thetas[0] == self.q0, thetas[-1] == self.qf] cons = [] if self.fixedq0: cons.append(thetas[0] == self.q0) if self.fixedqf: cons.append(thetas[-1] == self.qf) for idx_, con in self.constrs.eqs: for idx in self._indexes(idx_): if (idx == 0 and self.fixedq0) or (idx == N - 1 and self.fixedqf): continue val, jac = con.compute(theta0[idx], grad_level=1) pt = cp.Variable(val.size) loss += mu * cp.sum(pt) cons.append(pt >= val + jac * (thetas[idx] - theta0[idx])) cons.append(pt >= -(val + jac * (thetas[idx] - theta0[idx]))) for idx_, con in self.constrs.ineqs: for idx in self._indexes(idx_): if (idx == 0 and self.fixedq0) or (idx == N - 1 and self.fixedqf): continue val, jac = con.compute(theta0[idx], grad_level=1) ineqpt = cp.Variable(val.size) loss += mu * cp.sum(ineqpt) cons.append(ineqpt >= 0) cons.append(ineqpt >= val + jac * (thetas[idx] - theta0[idx])) if self.config.limit_joint and self.config.joint_limits_by_bound: cons.append( thetas >= np.tile(self.qmin[None, :], (N, 1))) # hope broadcasting works here cons.append(thetas <= np.tile(self.qmax[None, :], (N, 1))) # Now let's handle point_collisions n_point_collide = len(point_collisions) n_collide = n_point_collide + len(sweep_collisions) if n_collide > 0: pt = cp.Variable(n_collide, name='Auxiliary Variable') # all pt are added to the cost function and are positive loss += cp.sum(pt) * mu cons.append(pt >= 0) for i, pc in enumerate(point_collisions): # constraint that t > g_i(x) = d_safe - d0 - n^T J (theta - theta0) lin_coef = pc.normal.dot(pc.jacobian[:3]) qid = pc.qid cons.append( pt[i] >= d_safe - pc.distance - cp.sum(cp.multiply(lin_coef, thetas[qid] - theta0[qid]))) for i, pc in enumerate(sweep_collisions): lin_coef1 = pc.normal.dot(pc.jacobian1[:3]) lin_coef2 = pc.normal.dot(pc.jacobian2[:3]) alpha, beta = pc.alpha, 1 - pc.alpha qid1, qid2 = pc.qid1, pc.qid1 + 1 cons.append( pt[i + n_point_collide] >= d_safe - pc.distance - cp.sum(alpha * (cp.multiply(lin_coef1, thetas[qid1] - theta0[qid1])) + beta * (cp.multiply(lin_coef2, thetas[qid2] - theta0[qid2])))) # add trust region constraints, here I use infinite norm cons.append(cp.pnorm(thetas - theta0, 'inf') <= trsize) # ready to build the problem and cache them prob = cp.Problem(cp.Minimize(loss), cons) self.cp_cache = (prob, trsize, thetas)
#!/usr/bin/env python import math from numpy import array import cvxpy as cp x = cp.Variable(3, name="x") objective_fn = - cp.log(x[0]) - cp.log(x[1]) - cp.log(x[2]) constraints = [ x[0] + x[2] <= 1 , x[0] + x[1] <= 2 , x[2] <= 1 , x[0] >= 0 , x[1] >= 0 , x[2] >= 0 ] assert objective_fn.is_dcp() assert all(constraint.is_dcp() for constraint in constraints) problem = cp.Problem(cp.Minimize(objective_fn), constraints) problem.solve() print("status: ", problem.status) print(f'x*: {x.value}') print("p*: ", problem.value) print("Dual values: ", [*map(lambda x: x.dual_value, constraints)])
x = cy.Variable(n, boolean=True) # continues variable # x=cy.Bool(n) # binary variable #x=cy.Int(n) # integer variable # setting the constraints constraints = [x >= 0] for e in edges: constraints += [x[e[0]] + x[e[1]] <= 1] # Form objective. obj = cy.Maximize(sum(x[v] for v in vertices)) # Form and solve problem. prob = cy.Problem(obj, constraints) #prob.solve() # standard solver. #prob.solve(solver=cy.GLPK) # uses GLPK instead of the standard solver prob.solve(solver=cy.GLPK_MI) # uses GLPK_MI instead of the standard solver print("status:", prob.status) print("optimal value = ", prob.value) print("optimal var = \n", x.value) print('\n') for v in vertices: if x[v].value == 1: print("Vertex %s is in the independent set." % (v + 1))
def neuron_input_range(weights, bias, layer_index, neuron_index, network_input_box, input_range_all, activation_all_layer): weight_all_layer = weights bias_all_layer = bias layers = len(bias_all_layer) width = max([len(b) for b in bias_all_layer]) # define large positive number M to enable Big M method M = 10e4 # variables in the input layer network_in = cp.Variable((len(network_input_box),1)) # variables in previous layers if layer_index >= 1: x_in = cp.Variable((width, layer_index)) x_out = cp.Variable((width, layer_index)) z = {} z[0] = cp.Variable((width, layer_index), integer=True) z[1] = cp.Variable((width, layer_index), integer=True) # variables for the specific neuron x_in_neuron = cp.Variable() constraints = [] # add constraints for the input layer if layer_index >= 1: constraints += [ 0 <= z[0] ] constraints += [ z[0] <= 1] constraints += [ 0 <= z[1]] constraints += [ z[1] <= 1] for i in range(len(network_input_box)): constraints += [network_in[i,0] >= network_input_box[i][0]] constraints += [network_in[i,0] <= network_input_box[i][1]] #constraints += [network_in[i,0] == 0.7] if layer_index >= 1: #print(x_in[0,0].shape) #print(np.array(weight_all_layer[0]).shape) #print(network_in.shape) #print((np.array(weight_all_layer[0]) @ network_in).shape) #print(bias_all_layer[0].shape) constraints += [x_in[:,0:1] == np.array(weight_all_layer[0]) @ network_in + bias_all_layer[0]] # add constraints for the layers before the neuron for j in range(layer_index): weight_j = weight_all_layer[j] bias_j = bias_all_layer[j] # add constraint for linear transformation between layers if j+1 <= layer_index-1: weight_j_next = weight_all_layer[j+1] bias_j_next = bias_all_layer[j+1] #print(x_in[:,j+1:j+2].shape) #print(weight_j_next.shape) #print(x_out[:,j:j+1].shape) #print(bias_j_next.shape) constraints += [x_in[0:len(bias_j_next),j+1:j+2] == weight_j_next @ x_out[0:len(bias_j),j:j+1] + bias_j_next] # add constraint for sigmoid function relaxation for i in range(weight_j.shape[0]): low = input_range_all[j][i][0] upp = input_range_all[j][i][1] # define slack integers constraints += [z[0][i,j] + z[1][i,j] == 1] # The triangle constraint for 0<=x<=u constraints += [-x_in[i,j] <= M * (1-z[0][i,j])] constraints += [x_in[i,j] - upp <= M * (1-z[0][i,j])] constraints += [x_out[i,j] - sigmoid(0)*(1-sigmoid(0))*x_in[i,j]-sigmoid(0) <= M * (1-z[0][i,j])] constraints += [x_out[i,j] - sigmoid(upp)*(1-sigmoid(upp))*(x_in[i,j]-upp) - sigmoid(upp) <= M * (1-z[0][i,j])] constraints += [-x_out[i,j] + (sigmoid(upp)-sigmoid(0))/upp*x_in[i,j] + sigmoid(0) <= M * (1-z[0][i,j])] # The triangle constraint for l<=x<=0 constraints += [x_in[i,j] <= M * (1-z[1][i,j])] constraints += [-x_in[i,j] + low <= M * (1-z[1][i,j])] constraints += [-x_out[i,j] + sigmoid(0)*(1-sigmoid(0))*x_in[i,j] + sigmoid(0) <= M * (1-z[1][i,j])] constraints += [-x_out[i,j] + sigmoid(low)*(1-sigmoid(low))*(x_in[i,j]-low) + sigmoid(low) <= M * (1-z[1][i,j])] constraints += [x_out[i,j] - (sigmoid(low)-sigmoid(0))/low*x_in[i,j] - sigmoid(0) <= M * (1-z[1][i,j])] # add constraint for the last layer and the neuron weight_neuron = np.reshape(weight_all_layer[layer_index][neuron_index], (1, -1)) bias_neuron = np.reshape(bias_all_layer[layer_index][neuron_index], (1, -1)) #print(x_in_neuron.shape) #print(weight_neuron.shape) #print(x_out[0:len(bias_all_layer[layer_index-1]),layer_index-1:layer_index].shape) #print(bias_neuron.shape) if layer_index >= 1: constraints += [x_in_neuron == weight_neuron @ x_out[0:len(bias_all_layer[layer_index-1]),layer_index-1:layer_index] + bias_neuron] else: constraints += [x_in_neuron == weight_neuron @ network_in[0:len(network_input_box),0:1] + bias_neuron] # objective: smallest output of [layer_index, neuron_index] objective_min = cp.Minimize(x_in_neuron) prob_min = cp.Problem(objective_min, constraints) prob_min.solve(solver=cp.GUROBI) if prob_min.status == 'optimal': l_neuron = prob_min.value #print('lower bound: ' + str(l_neuron)) #for variable in prob_min.variables(): # print ('Variable ' + str(variable.name()) + ' value: ' + str(variable.value)) else: print('prob_min.status: ' + prob_min.status) print('Error: No result for lower bound!') # objective: largest output of [layer_index, neuron_index] objective_max = cp.Maximize(x_in_neuron) prob_max = cp.Problem(objective_max, constraints) prob_max.solve(solver=cp.GUROBI) if prob_max.status == 'optimal': u_neuron = prob_max.value #print('upper bound: ' + str(u_neuron)) #for variable in prob_max.variables(): # print ('Variable ' + str(variable.name()) + ' value: ' + str(variable.value)) else: print('prob_max.status: ' + prob_max.status) print('Error: No result for upper bound!') input_range_all[layer_index][neuron_index] = [l_neuron, u_neuron] return [l_neuron, u_neuron], input_range_all
import numpy as np import cvxpy as cp from data.ideal_pref_point_data import K, n, c, d, box, plot np.set_printoptions(precision=6, suppress=True) c_tilde = cp.Variable(n) constraints = [] for i, j in d: a = c[i] - c[j] b = a @ (c[i] + c[j]) / 2 constraints.append(a @ c_tilde >= b) for min_max in range(2): for i in range(n): func = cp.Minimize if min_max == 0 else cp.Maximize obj = func(c_tilde[i]) problem = cp.Problem(obj, constraints) problem.solve() print(problem.status) box[i][min_max] = problem.value box = np.array(box) for i in range(2): print(box[i, 1] - box[i, 0])
def mvo_cost(mu, Q, card, old_weight, old_ticker): """ :param mu: n*1 vector, expected returns of n assets :param Q: n*n matrix, covariance matrix of n assets :param card: a scalar, cardinality constraint :param old_weight: k*1 vector, weights of k stocks in the portfolio during the previous period :param old_ticker: k*1 vecotr, indices of k stocks in the portfolio during the previous period :return: norm_weight (the portfolio weight), ticker_index (the indices of selected stock) """ # Define the variables: w-weight, y-auxiliary variable for eliminating absolute values of transaction constraint w = cp.Variable(len(mu)) y = cp.Variable(len(mu)) # Create the weight array of the previous time period w_old = np.zeros(len(mu)) w_old[old_ticker] = old_weight # Create objective function p = Q p = p.T @ p lamb = 0.001 q = lamb * mu # set the upper limit of the weight (w_i) to be 50% to avoid over-concentration up = 0.5 # Create inequality constraint matrix: 0 <= w_i <= 0.5 # Disallow short selling G = -np.identity(len(mu)) j = np.identity(len(mu)) G = np.append(G, j, axis=0) h = np.zeros(len(mu)).reshape((len(mu))) i = (np.ones(len(mu)) * up).reshape((len(mu))) h = np.append(h, i, axis=0) # Create equality constraint matrix: sum(w_i) = 1 A = np.array([1.0] * len(mu)).reshape((1, len(mu))) b = np.array([1.0]) # Create the transaction cost constraint: w_i - w_i_old <= y_i # w_i_old - w_i <= y_i, sum(c_i*y_i) <= T # Define cost per transaction cost = 0.01 # Define total cost total_cost = 100 c = np.identity(len(mu)) * cost T = np.array([total_cost]) # Use cvxpy default optimizer obj = cp.Minimize(cp.quad_form(w, p) - q.T @ w) constraints = [G @ w <= h, A @ w == b, w - w_old <= y, c @ y <= T] prob = cp.Problem(obj, constraints) prob.solve() weight = w.value # Return the indices of the selected stocks ticker_index = np.asarray(weight).argsort()[-card:] # Return the weight based on the cardinality and normalize the weight to avoid extremely small weights (e.g. 10^-10) weight.sort() weight_opt = weight[-card:] norm_weight = [float(i) / np.sum(weight_opt) for i in weight_opt] return norm_weight, ticker_index
def solve_maxent(self): prob = cvx.Problem(cvx.Maximize(cvx.sum_entries(cvx.entr(self.Xs))), self.constraints) sol = prob.solve(solver=cvx.ECOS) values = self.Xs.value return values
P_lqr = solve_discrete_are(A, B, Q, R) P = R + B.T @ P_lqr @ B P_sqrt_lqr = sqrtm(P) # Construct CVXPY problem and layer x_cvxpy = cp.Parameter((n, 1)) P_sqrt_cvxpy = cp.Parameter((m, m)) P_21_cvxpy = cp.Parameter((n, m)) q_cvxpy = cp.Parameter((m, 1)) u_cvxpy = cp.Variable((m, 1)) y_cvxpy = cp.Variable((n, 1)) objective = .5 * cp.sum_squares( P_sqrt_cvxpy @ u_cvxpy) + x_cvxpy.T @ y_cvxpy + q_cvxpy.T @ u_cvxpy problem = cp.Problem(cp.Minimize(objective), [cp.norm(u_cvxpy) <= 1, y_cvxpy == P_21_cvxpy @ u_cvxpy]) assert problem.is_dpp() policy = CvxpyLayer(problem, [x_cvxpy, P_sqrt_cvxpy, P_21_cvxpy, q_cvxpy], [u_cvxpy]) ''' ----------------------------------------------------------------------------------- ''' def train(iters): # Initialize with LQR control lyapunov function P_sqrt = torch.from_numpy(P_sqrt_lqr).requires_grad_(True) P_21 = torch.from_numpy(A.T @ P_lqr @ B).requires_grad_(True) q = torch.zeros((m, 1), requires_grad=True, dtype=torch.float64) variables = [P_sqrt, P_21, q] A_tch, B_tch, Q_tch, R_tch = map(torch.from_numpy, [A, B, Q, R])
def proj(cvx_set, value): objective = cvx.Minimize(cvx.norm(cvx_set - value, 2)) cvx.Problem(objective).solve(solver=cvx.CVXOPT) return cvx_set.value
# Declare the objective function objective = (C.sum_squares(beta))*0.5 for i in range(0, m): if y[i] == 1: objective += slackvar[i]*opt.C1 else: objective += slackvar[i]*opt.C2 # Declare the constraints to enforce constraints = [slackvar >= 0] for i in range(0, m): constraints += [y[i]*(beta.T*X[i] + beta_0) >= 1 - slackvar[i]] # Define the problem problem = C.Problem(C.Minimize(objective), constraints) # Solve the problem problem.solve(solver=C.ECOS, abstol=1e-10, reltol=1e-09, feastol=1e-10, max_iters=1000) print("Problem exited with status: {0} and value attained: {1}".format(problem.status, round(problem.value, 5))) # Saving the y(<beta, x> + beta_0) into a text file beta_file = open('beta.txt', 'w') # Plotting section plt.ylabel('$x_{1}$', fontsize=20) plt.xlabel('$x_{2}$', fontsize=20) truths = [False, False, False] for i in range(0, m): beta_file.write('{0}\n'.format(round(y[i]*(np.dot(np.array(beta.value).reshape(-1), X[i]) + beta_0.value), 8)))
'Date', 'Factor 1', 'Factor 2', 'Factor 3', 'Factor 4', 'Factor 5', 'Factor 6', 'Factor 7', 'Factor 8', 'Factor 9', 'Factor 10', 'Sum of Weights' ]) # Calculate optimal weights for each rolling window for j in range(num_rolling_windows): # X represents matrix of risk factor returns X = data[j:j + (num_rollingmonths), 0:10] # Y represents security/index to be tested against y = data[j:j + (num_rollingmonths), (10 + i, )].reshape(num_rollingmonths, ) # b is optimal beta/ optimal weights to solve for b = cp.Variable(num_factors) print(b.value) cost = cp.sum_squares(X @ b - y) constraints = [cp.sum(b) == 1.0] prob = cp.Problem(cp.Minimize(cost), constraints) prob.solve() # Output to csv csv_writer.writerow([ date, ] + list(b.value) + [ sum(b.value), ]) date = date + rd(months=1) + rd(day=31) csv_file.close()
def test_sum_largest(self): self.skipTest("Enable test once sum_largest is implemented.") x = cvxpy.Variable((4, ), pos=True) obj = cvxpy.Minimize(cvxpy.sum_largest(x, 3)) constr = [x[0] * x[1] * x[2] * x[3] >= 16] dgp = cvxpy.Problem(obj, constr) dgp2dcp = cvxpy.reductions.Dgp2Dcp(dgp) dcp = dgp2dcp.reduce() dcp.solve(SOLVER) dgp.unpack(dgp2dcp.retrieve(dcp.solution)) opt = 6.0 self.assertAlmostEqual(dgp.value, opt) self.assertAlmostEqual((x[0] * x[1] * x[2] * x[3]).value, 16, places=2) dgp._clear_solution() dgp.solve(SOLVER, gp=True) self.assertAlmostEqual(dgp.value, opt) self.assertAlmostEqual((x[0] * x[1] * x[2] * x[3]).value, 16, places=2) # An unbounded problem. x = cvxpy.Variable((4, ), pos=True) y = cvxpy.Variable(pos=True) obj = cvxpy.Minimize(cvxpy.sum_largest(x, 3) * y) constr = [x[0] * x[1] * x[2] * x[3] >= 16] dgp = cvxpy.Problem(obj, constr) dgp2dcp = cvxpy.reductions.Dgp2Dcp(dgp) dcp = dgp2dcp.reduce() opt = dcp.solve(SOLVER) self.assertEqual(dcp.value, -float("inf")) dgp.unpack(dgp2dcp.retrieve(dcp.solution)) self.assertAlmostEqual(dgp.value, 0.0) self.assertAlmostEqual(dgp.status, "unbounded") dgp._clear_solution() dgp.solve(SOLVER, gp=True) self.assertAlmostEqual(dgp.value, 0.0) self.assertAlmostEqual(dgp.status, "unbounded") # Another unbounded problem. x = cvxpy.Variable(2, pos=True) obj = cvxpy.Minimize(cvxpy.sum_largest(x, 1)) dgp = cvxpy.Problem(obj, []) dgp2dcp = cvxpy.reductions.Dgp2Dcp(dgp) dcp = dgp2dcp.reduce() opt = dcp.solve(SOLVER) self.assertEqual(dcp.value, -float("inf")) dgp.unpack(dgp2dcp.retrieve(dcp.solution)) self.assertAlmostEqual(dgp.value, 0.0) self.assertAlmostEqual(dgp.status, "unbounded") dgp._clear_solution() dgp.solve(SOLVER, gp=True) self.assertAlmostEqual(dgp.value, 0.0) self.assertAlmostEqual(dgp.status, "unbounded") # Composition with posynomials. x = cvxpy.Variable((4, ), pos=True) obj = cvxpy.Minimize( cvxpy.sum_largest( cvxpy.hstack([ 3 * x[0]**0.5 * x[1]**0.5, x[0] * x[1] + 0.5 * x[1] * x[3]**3, x[2] ]), 2)) constr = [x[0] * x[1] >= 16] dgp = cvxpy.Problem(obj, constr) dgp2dcp = cvxpy.reductions.Dgp2Dcp(dgp) dcp = dgp2dcp.reduce() dcp.solve(SOLVER) dgp.unpack(dgp2dcp.retrieve(dcp.solution)) # opt = 3 * sqrt(4) * sqrt(4) + (4 * 4 + 0.5 * 4 * epsilon) = 28 opt = 28.0 self.assertAlmostEqual(dgp.value, opt, places=2) self.assertAlmostEqual((x[0] * x[1]).value, 16.0, places=2) self.assertAlmostEqual(x[3].value, 0.0, places=2) dgp._clear_solution() dgp.solve(SOLVER, gp=True) self.assertAlmostEqual(dgp.value, opt, places=2) self.assertAlmostEqual((x[0] * x[1]).value, 16.0, places=2) self.assertAlmostEqual(x[3].value, 0.0, places=2)
delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) writer.writerow( [epoch, epoch_w, loss_l_cum, loss_u_cum, loss_ent_cum]) # update unlabel estimates with cumulative loss: target_var_list = [{'params': estimated_y, 'lr': ETA_Y}] optimizer_y = optim.SGD(target_var_list, momentum=0, weight_decay=0) optimizer_y.step() optimizer_y.zero_grad() estimated_y.grad.data.zero_() # Project onto probability polytope est_y_num = estimated_y.data.cpu().numpy() U = cvxpy.Variable((est_y_num.shape[0], est_y_num.shape[1])) objective = cvxpy.Minimize(cvxpy.sum(cvxpy.square(U - est_y_num))) constraints = [U >= EPSILON, cvxpy.sum(U, axis=1) == 1] prob = cvxpy.Problem(objective, constraints) prob.solve() estimated_y.data = torch.from_numpy(np.float32(U.value)).cuda() # save estimated labels and labeled indexes on unlabeled data: state = { "lab_inds": lab_inds, "estimated_y": estimated_y.data.cpu().numpy(), "epoch": epoch, "y_acc": y_acc, } np.save(Y_U_FOLDER + file_name + '.npy', state)
def test_basic_gp(self): x, y, z = cvxpy.Variable((3, ), pos=True) constraints = [2 * x * y + 2 * x * z + 2 * y * z <= 1.0, x >= 2 * y] problem = cvxpy.Problem(cvxpy.Minimize(1 / (x * y * z)), constraints) problem.solve(SOLVER, gp=True) self.assertAlmostEqual(15.59, problem.value, places=2)
def contains(cvx_set, value): p = cp.Problem(cp.Minimize(0), [cvx_set == value]) return cp.get_status(p.solve()) == cp.SOLVED
def test_intro(self): """Test examples from cvxpy.org introduction. """ import numpy # Problem data. m = 30 n = 20 numpy.random.seed(1) A = numpy.random.randn(m, n) b = numpy.random.randn(m, 1) # Construct the problem. x = Variable(n) objective = Minimize(sum_squares(A * x - b)) constraints = [0 <= x, x <= 1] prob = Problem(objective, constraints) # The optimal objective is returned by p.solve(). result = prob.solve() # The optimal value for x is stored in x.value. print(x.value) # The optimal Lagrange multiplier for a constraint # is stored in constraint.dual_value. print(constraints[0].dual_value) ######################################## # Create two scalar variables. x = Variable() y = Variable() # Create two constraints. constraints = [x + y == 1, x - y >= 1] # Form objective. obj = Minimize(square(x - y)) # Form and solve problem. prob = Problem(obj, constraints) prob.solve() # Returns the optimal value. print("status:", prob.status) print("optimal value", prob.value) print("optimal var", x.value, y.value) ######################################## import cvxpy as cvx # Create two scalar variables. x = cvx.Variable() y = cvx.Variable() # Create two constraints. constraints = [x + y == 1, x - y >= 1] # Form objective. obj = cvx.Minimize(cvx.square(x - y)) # Form and solve problem. prob = cvx.Problem(obj, constraints) prob.solve() # Returns the optimal value. print("status:", prob.status) print("optimal value", prob.value) print("optimal var", x.value, y.value) self.assertEqual(prob.status, OPTIMAL) self.assertAlmostEqual(prob.value, 1.0) self.assertAlmostEqual(x.value, 1.0) self.assertAlmostEqual(y.value, 0) ######################################## # Replace the objective. prob.objective = Maximize(x + y) print("optimal value", prob.solve()) self.assertAlmostEqual(prob.value, 1.0) # Replace the constraint (x + y == 1). prob.constraints[0] = (x + y <= 3) print("optimal value", prob.solve()) self.assertAlmostEqual(prob.value, 3.0) ######################################## x = Variable() # An infeasible problem. prob = Problem(Minimize(x), [x >= 1, x <= 0]) prob.solve() print("status:", prob.status) print("optimal value", prob.value) self.assertEquals(prob.status, INFEASIBLE) self.assertAlmostEqual(prob.value, np.inf) # An unbounded problem. prob = Problem(Minimize(x)) prob.solve() print("status:", prob.status) print("optimal value", prob.value) self.assertEquals(prob.status, UNBOUNDED) self.assertAlmostEqual(prob.value, -np.inf) ######################################## # A scalar variable. a = Variable() # Column vector variable of length 5. x = Variable(5) # Matrix variable with 4 rows and 7 columns. A = Variable(4, 7) ######################################## import numpy # Problem data. m = 10 n = 5 numpy.random.seed(1) A = numpy.random.randn(m, n) b = numpy.random.randn(m, 1) # Construct the problem. x = Variable(n) objective = Minimize(sum_entries(square(A * x - b))) constraints = [0 <= x, x <= 1] prob = Problem(objective, constraints) print("Optimal value", prob.solve()) print("Optimal var") print(x.value) # A numpy matrix. self.assertAlmostEqual(prob.value, 4.14133859146) ######################################## # Positive scalar parameter. m = Parameter(sign="positive") # Column vector parameter with unknown sign (by default). c = Parameter(5) # Matrix parameter with negative entries. G = Parameter(4, 7, sign="negative") # Assigns a constant value to G. G.value = -numpy.ones((4, 7)) ######################################## # Create parameter, then assign value. rho = Parameter(sign="positive") rho.value = 2 # Initialize parameter with a value. rho = Parameter(sign="positive", value=2) ######################################## import numpy # Problem data. n = 15 m = 10 numpy.random.seed(1) A = numpy.random.randn(n, m) b = numpy.random.randn(n, 1) # gamma must be positive due to DCP rules. gamma = Parameter(sign="positive") # Construct the problem. x = Variable(m) error = sum_squares(A * x - b) obj = Minimize(error + gamma * norm(x, 1)) prob = Problem(obj) # Construct a trade-off curve of ||Ax-b||^2 vs. ||x||_1 sq_penalty = [] l1_penalty = [] x_values = [] gamma_vals = numpy.logspace(-4, 6) for val in gamma_vals: gamma.value = val prob.solve() # Use expr.value to get the numerical value of # an expression in the problem. sq_penalty.append(error.value) l1_penalty.append(norm(x, 1).value) x_values.append(x.value) ######################################## import numpy X = Variable(5, 4) A = numpy.ones((3, 5)) # Use expr.size to get the dimensions. print("dimensions of X:", X.size) print("dimensions of sum_entries(X):", sum_entries(X).size) print("dimensions of A*X:", (A * X).size) # ValueError raised for invalid dimensions. try: A + X except ValueError as e: print(e)
def proj(cvx_set, value): objective = cp.Minimize(cp.norm2(cvx_set - value)) cp.Problem(objective).solve() return cvx_set.value
np.random.seed(8888) BenchmarkIndex = R.dot(np.tile(1.0/N, N)) + st.norm(0.0, 3.0).rvs(T) #%% トラッキングエラー最小化問題のバックテスト MovingWindow = 96 BackTesting = T - MovingWindow V_Tracking = np.zeros(BackTesting) Weight = cvx.Variable(N) Error = cvx.Variable(MovingWindow) TrackingError = cvx.sum_squares(Error) Asset_srT = R / np.sqrt(T) Index_srT = BenchmarkIndex / np.sqrt(T) for Month in range(0, BackTesting): Asset = Asset_srT.values[Month:(Month + MovingWindow), :] Index = Index_srT.values[Month:(Month + MovingWindow)] Min_TrackingError = cvx.Problem(cvx.Minimize(TrackingError), [Index - Asset*Weight == Error, cvx.sum(Weight) == 1.0, Weight >= 0.0]) Min_TrackingError.solve() V_Tracking[Month] = R.values[Month + MovingWindow, :].dot(Weight.value) #%% バックテストの結果のグラフ fig1 = plt.figure(1, facecolor='w') plt.plot(list(range(1, BackTesting + 1)), BenchmarkIndex[MovingWindow:], 'k-') plt.plot(list(range(1, BackTesting + 1)), V_Tracking, 'k--') plt.legend([u'ベンチマーク・インデックス', u'インデックス・ファンド'], loc='best', frameon=False, prop=jpfont) plt.xlabel(u'運用期間(年)', fontproperties=jpfont) plt.ylabel(u'収益率(%)', fontproperties=jpfont) plt.xticks(list(range(12, BackTesting + 1, 12)), pd.date_range(R.index[MovingWindow], periods=BackTesting//12, freq='AS').year) plt.show()
def test_basic_init(basic_init_fixture, solver_name, i, H, g, x_ineq): """ A basic test case for wrappers. Notice that the input fixture `basic_init_fixture` is known to have two constraints, one velocity and one acceleration. Hence, in this test, I directly formulate an optimization with cvxpy to test the result. Parameters ---------- basic_init_fixture: a fixture with only two constraints, one velocity and one acceleration constraint. """ constraints, path, path_discretization, vlim, alim = basic_init_fixture if solver_name == "cvxpy": from toppra.solverwrapper.cvxpy_solverwrapper import cvxpyWrapper solver = cvxpyWrapper(constraints, path, path_discretization) elif solver_name == 'qpOASES': from toppra.solverwrapper.qpoases_solverwrapper import qpOASESSolverWrapper solver = qpOASESSolverWrapper(constraints, path, path_discretization) elif solver_name == 'hotqpOASES': from toppra.solverwrapper.hot_qpoases_solverwrapper import hotqpOASESSolverWrapper solver = hotqpOASESSolverWrapper(constraints, path, path_discretization) elif solver_name == 'ecos' and H is None: from toppra.solverwrapper.ecos_solverwrapper import ecosWrapper solver = ecosWrapper(constraints, path, path_discretization) elif solver_name == 'seidel' and H is None: from toppra.solverwrapper.cy_seidel_solverwrapper import seidelWrapper solver = seidelWrapper(constraints, path, path_discretization) else: return True # Skip all other tests xmin, xmax = x_ineq xnext_min = 0 xnext_max = 1 # Results from solverwrapper to test solver.setup_solver() result_ = solver.solve_stagewise_optim(i - 2, H, g, xmin, xmax, xnext_min, xnext_max) result_ = solver.solve_stagewise_optim(i - 1, H, g, xmin, xmax, xnext_min, xnext_max) result = solver.solve_stagewise_optim(i, H, g, xmin, xmax, xnext_min, xnext_max) solver.close_solver() # Results from cvxpy, used as the actual, desired values ux = cvxpy.Variable(2) u = ux[0] x = ux[1] _, _, _, _, _, _, xbound = solver.params[0] # vel constraint a, b, c, F, h, ubound, _ = solver.params[1] # accel constraint a2, b2, c2, F2, h2, _, _ = solver.params[2] # random constraint Di = path_discretization[i + 1] - path_discretization[i] v = a[i] * u + b[i] * x + c[i] v2 = a2[i] * u + b2[i] * x + c2[i] cvxpy_constraints = [ x <= xbound[i, 1], x >= xbound[i, 0], F * v <= h, F2[i] * v2 <= h2[i], x + u * 2 * Di <= xnext_max, x + u * 2 * Di >= xnext_min, ] if not np.isnan(xmin): cvxpy_constraints.append(x <= xmax) cvxpy_constraints.append(x >= xmin) if H is not None: objective = cvxpy.Minimize(0.5 * cvxpy.quad_form(ux, H) + g * ux) else: objective = cvxpy.Minimize(g * ux) problem = cvxpy.Problem(objective, cvxpy_constraints) problem.solve(verbose=True) # test with the same solver as cvxpywrapper if problem.status == "optimal": actual = np.array(ux.value).flatten() result = np.array(result).flatten() npt.assert_allclose(result, actual, atol=5e-3, rtol=1e-5) # Very bad accuracy? why? else: assert np.all(np.isnan(result))
def contains(cvx_set, value): p = cvx.Problem(cvx.Minimize(0), [cvx_set == value]) p.solve(solver=cvx.CVXOPT) return p.status == cvx.OPTIMAL
def __solve__(self): """ Solve the optimization problem associated with each point """ # Cones on the plus and minus side sup_plus_vars = cvx.Variable(shape=(len(self.points), self.dim)) inf_plus_vars = cvx.Variable(shape=(len(self.points), self.dim)) sup_minus_vars = cvx.Variable(shape=(len(self.points), self.dim)) inf_minus_vars = cvx.Variable(shape=(len(self.points), self.dim)) constraints = [ sup_plus_vars >= 0, inf_plus_vars >= 0, sup_minus_vars >= 0, sup_plus_vars >= 0 ] # inf/sup relational constraints constraints += [sup_plus_vars >= inf_plus_vars] constraints += [sup_minus_vars <= inf_minus_vars] # Cone constraints for (i, (pone, vone)) in enumerate(self.points.items()): # point i has to be able to project into point j for (j, (ptwo, vtwo)) in enumerate(self.points.items()): if i == j: continue lhs_sup = 0.0 lhs_inf = 0.0 for (di, (poned, ptwod)) in enumerate(zip(pone, ptwo)): run = ptwod - poned supvar = 0.0 infvar = 0.0 if ptwod > poned: supvar = sup_plus_vars[i, di] infvar = inf_plus_vars[i, di] elif ptwod < poned: supvar = sup_minus_vars[i, di] infvar = inf_minus_vars[i, di] lhs_sup += run * supvar lhs_inf += run * infvar constraints += [lhs_sup >= vtwo - vone, lhs_inf <= vtwo - vone] # Optimization : minimize cone width objective = cvx.Minimize( cvx.norm(sup_plus_vars - inf_plus_vars) + cvx.norm(inf_minus_vars - sup_minus_vars)) p = cvx.Problem(objective, constraints) # Run it! p.solve(verbose=False, solver=cvx.CVXOPT, kktsolver='robust', refinement=5) # Post-mortem... if p.status != 'optimal' and p.status != 'optimal_inaccurate': raise ScoreCreationError( "Could not create scoring function: Optimization Failed") return None # Pull out the coefficients from the variables plus_vars = [[(sup_plus_vars[i, j].value, inf_plus_vars[i, j].value) for j in range(self.dim)] for i in range(len(self.points))] minus_vars = [[(sup_minus_vars[i, j].value, inf_minus_vars[i, j].value) for j in range(self.dim)] for i in range(len(self.points))] return plus_vars, minus_vars
def dist(lh_set, rh_set): objective = cvx.Minimize(cvx.norm(lh_set - rh_set, 2)) return cvx.Problem(objective).solve(solver=cvx.CVXOPT)
np.random.seed(8888) BenchmarkIndex = R.dot(np.tile(1.0/N, N)) \ + st.norm.rvs(loc=0.0, scale=3.0, size=T) MovingWindow = 96 BackTesting = T - MovingWindow V_Tracking = np.zeros(BackTesting) Weight = cp.Variable(N) Error = cp.Variable(MovingWindow) TrackingError = cp.sum_squares(Error) Asset_srT = R / np.sqrt(T) Index_srT = BenchmarkIndex / np.sqrt(T) for Month in range(0, BackTesting): Asset = Asset_srT.values[Month:(Month + MovingWindow), :] Index = Index_srT.values[Month:(Month + MovingWindow)] Min_TrackingError = cp.Problem(cp.Minimize(TrackingError), [Index - Asset*Weight == Error, cp.sum_entries(Weight) == 1.0, Weight >= 0.0]) Min_TrackingError.solve() V_Tracking[Month] = R.values[Month + MovingWindow, :].dot(Weight.value) fig1 = plt.figure(num=1, facecolor='w') plt.plot(list(range(1, BackTesting + 1)), BenchmarkIndex[MovingWindow:], 'b-') plt.plot(list(range(1, BackTesting + 1)), V_Tracking, 'r--') plt.legend(['benchmark index', 'index fund'], loc='best', frameon=False) plt.xlabel('year') plt.ylabel('return (%)') plt.xticks(list(range(12, BackTesting + 1, 12)), pd.date_range(R.index[MovingWindow], periods=BackTesting//12, freq='AS').year) plt.show()
import numpy as np import cvxpy as cp x1, x2 = cp.Variable(), cp.Variable() obj = cp.Maximize(2*x1 + x2) cons = [ x1 - 0.5*x2 >= 1, x1 - x2 <= 2, x1 + x2 <= 4, x1 >= 0, x2 >= 0 ] P = cp.Problem(obj, cons) P.solve(verbose=True) print("最適値") print(P.value) print("最適解") print(x1.value, x2.value)