def graph_implementation(arg_objs, size, data=None): """Sum the linear expression's entries. Parameters ---------- arg_objs : list LinExpr for each argument. size : tuple The size of the resulting expression. data : Additional data required by the atom. Returns ------- tuple (LinOp for objective, list of constraints) """ axis = data[0] if axis is None: obj = lu.sum_entries(arg_objs[0]) elif axis == 1: const_size = (arg_objs[0].size[1], 1) ones = lu.create_const(np.ones(const_size), const_size) obj = lu.rmul_expr(arg_objs[0], ones, size) else: # axis == 0 const_size = (1, arg_objs[0].size[0]) ones = lu.create_const(np.ones(const_size), const_size) obj = lu.mul_expr(ones, arg_objs[0], size) return (obj, [])
def test_constant(self): """Test creating a constant. """ # Scalar constant. shape = (1, 1) mat = create_const(1.0, shape) self.assertEqual(mat.shape, shape) self.assertEqual(len(mat.args), 0) self.assertEqual(mat.type, SCALAR_CONST) assert mat.data == 1.0 # Dense matrix constant. shape = (5, 4) mat = create_const(np.ones(shape), shape) self.assertEqual(mat.shape, shape) self.assertEqual(len(mat.args), 0) self.assertEqual(mat.type, DENSE_CONST) assert (mat.data == np.ones(shape)).all() # Sparse matrix constant. shape = (5, 5) mat = create_const(sp.eye(5), shape, sparse=True) self.assertEqual(mat.shape, shape) self.assertEqual(len(mat.args), 0) self.assertEqual(mat.type, SPARSE_CONST) assert (mat.data.todense() == sp.eye(5).todense()).all()
def graph_implementation(arg_objs, size, data=None): """Reduces the atom to an affine expression and list of constraints. Parameters ---------- arg_objs : list LinExpr for each argument. size : tuple The size of the resulting expression. data : Additional data required by the atom. Returns ------- tuple (LinOp for objective, list of constraints) """ axis = data[0] if axis is None: t = lu.create_var((1, 1)) promoted_t = lu.promote(t, arg_objs[0].size) elif axis == 0: t = lu.create_var((1, arg_objs[0].size[1])) const_size = (arg_objs[0].size[0], 1) ones = lu.create_const(np.ones(const_size), const_size) promoted_t = lu.mul_expr(ones, t, arg_objs[0].size) else: # axis == 1 t = lu.create_var((arg_objs[0].size[0], 1)) const_size = (1, arg_objs[0].size[1]) ones = lu.create_const(np.ones(const_size), const_size) promoted_t = lu.rmul_expr(t, ones, arg_objs[0].size) constraints = [lu.create_leq(arg_objs[0], promoted_t)] return (t, constraints)
def graph_implementation(arg_objs, size, data=None): """Reduces the atom to an affine expression and list of constraints. Parameters ---------- arg_objs : list LinExpr for each argument. size : tuple The size of the resulting expression. data : Additional data required by the atom. Returns ------- tuple (LinOp for objective, list of constraints) """ x = arg_objs[0] axis = data[0] t = lu.create_var(size) # sum(exp(x - t)) <= 1 if axis is None: prom_t = lu.promote(t, x.size) expr = lu.sub_expr(x, prom_t) obj, constraints = exp.graph_implementation([expr], x.size) obj = lu.sum_entries(obj) elif axis == 0: prom_size = (x.size[0], 1) ones = lu.create_const(np.ones(prom_size), prom_size) prom_t = lu.mul_expr(ones, t, x.size) expr = lu.sub_expr(x, prom_t) obj, constraints = exp.graph_implementation([expr], x.size) const_size = (1, x.size[0]) ones = lu.create_const(np.ones(const_size), const_size) obj = lu.mul_expr(ones, obj, size) else: # axis == 1 prom_size = (1, x.size[1]) ones = lu.create_const(np.ones(prom_size), prom_size) prom_t = lu.rmul_expr(t, ones, x.size) expr = lu.sub_expr(x, prom_t) obj, constraints = exp.graph_implementation([expr], x.size) const_size = (x.size[1], 1) ones = lu.create_const(np.ones(const_size), const_size) obj = lu.rmul_expr(obj, ones, size) ones = lu.create_const(np.ones(size), size) constraints += [lu.create_leq(obj, ones)] return (t, constraints)
def canonicalize(self): obj, constraints = super(Assign, self).canonicalize() shape = (self.size[1], 1) one_row_vec = lu.create_const(np.ones(shape), shape) shape = (1, self.size[0]) one_col_vec = lu.create_const(np.ones(shape), shape) # Row sum <= 1 row_sum = lu.rmul_expr(obj, one_row_vec, (self.size[0], 1)) constraints += [lu.create_leq(row_sum, lu.transpose(one_col_vec))] # Col sum == 1. col_sum = lu.mul_expr(one_col_vec, obj, (1, self.size[1])) constraints += [lu.create_eq(col_sum, lu.transpose(one_row_vec))] return (obj, constraints)
def graph_implementation(arg_objs, size, data=None): # min 1-sqrt(2z-z^2) # s.t. x>=0, z<=1, z = x+s, s<=0 x = arg_objs[0] z = lu.create_var(size) s = lu.create_var(size) zeros = lu.create_const(np.mat(np.zeros(size)),size) ones = lu.create_const(np.mat(np.ones(size)),size) z2, constr_square = power.graph_implementation([z],size, (2, (Fraction(1,2), Fraction(1,2)))) two_z = lu.sum_expr([z,z]) sub = lu.sub_expr(two_z, z2) sq, constr_sqrt = power.graph_implementation([sub],size, (Fraction(1,2), (Fraction(1,2), Fraction(1,2)))) obj = lu.sub_expr(ones, sq) constr = [lu.create_eq(z, lu.sum_expr([x,s]))]+[lu.create_leq(zeros,x)]+[lu.create_leq(z, ones)]+[lu.create_leq(s,zeros)]+constr_square+constr_sqrt return (obj, constr)
def graph_implementation(arg_objs, size, data=None): """Reduces the atom to an affine expression and list of constraints. Parameters ---------- arg_objs : list LinExpr for each argument. size : tuple The size of the resulting expression. data : Additional data required by the atom. Returns ------- tuple (LinOp for objective, list of constraints) """ x = arg_objs[0] w = lu.create_var(size) v = lu.create_var(size) two = lu.create_const(2, (1, 1)) # w**2 + 2*v obj, constraints = square.graph_implementation([w], size) obj = lu.sum_expr([obj, lu.mul_expr(two, v, size)]) # x <= w + v constraints.append(lu.create_leq(x, lu.sum_expr([w, v]))) # v >= 0 constraints.append(lu.create_geq(v)) return (obj, constraints)
def graph_implementation(arg_objs, size, data=None): """Reduces the atom to an affine expression and list of constraints. Parameters ---------- arg_objs : list LinExpr for each argument. size : tuple The size of the resulting expression. data : Additional data required by the atom. Returns ------- tuple (LinOp for objective, list of constraints) """ A = arg_objs[0] n, _ = A.size # Requires that A is symmetric. # A == A.T obj, constraints = transpose.graph_implementation([A], (n, n)) constraints.append(lu.create_eq(A, obj)) # SDP constraint. t = lu.create_var((1, 1)) I = lu.create_const(sp.eye(n, n), (n, n)) # I*t - A expr = lu.sub_expr(lu.mul_expr(I, t, (n, n)), A) return (t, [SDP(expr)] + constraints)
def graph_implementation(arg_objs, size, data=None): """Reduces the atom to an affine expression and list of constraints. Parameters ---------- arg_objs : list LinExpr for each argument. size : tuple The size of the resulting expression. data : Additional data required by the atom. Returns ------- tuple (LinOp for objective, list of constraints) """ # TODO use log for n != 2. v = lu.create_var((1, 1)) x = arg_objs[0] y = arg_objs[1] two = lu.create_const(2, (1, 1)) # SOC(x + y, [y - x, 2*v]) constraints = [ SOC(lu.sum_expr([x, y]), [lu.sub_expr(y, x), lu.mul_expr(two, v, (1, 1))]) ] # 0 <= x, 0 <= y constraints += [lu.create_geq(x), lu.create_geq(y)] return (v, constraints)
def _scaled_lower_tri(self): """Returns a LinOp representing the lower triangular entries. Scales the strictly lower triangular entries by sqrt(2) as required by SCS. """ rows = cols = self.size[0] entries = rows*(cols + 1)//2 val_arr = [] row_arr = [] col_arr = [] count = 0 for j in range(cols): for i in range(rows): if j <= i: # Index in the original matrix. col_arr.append(j*rows + i) # Index in the extracted vector. row_arr.append(count) if j == i: val_arr.append(1.0) else: val_arr.append(np.sqrt(2)) count += 1 size = (entries, rows*cols) coeff = sp.coo_matrix((val_arr, (row_arr, col_arr)), size).tocsc() coeff = lu.create_const(coeff, size, sparse=True) vect = lu.reshape(self.A, (rows*cols, 1)) return lu.mul_expr(coeff, vect, (entries, 1))
def graph_implementation(arg_objs, size, data=None): """Cumulative sum via difference matrix. Parameters ---------- arg_objs : list LinExpr for each argument. size : tuple The size of the resulting expression. data : Additional data required by the atom. Returns ------- tuple (LinOp for objective, list of constraints) """ # Implicit O(n) definition: # X = Y[:1,:] - Y[1:,:] Y = lu.create_var(size) axis = data[0] dim = size[axis] diff_mat = get_diff_mat(dim, axis) diff_mat = lu.create_const(diff_mat, (dim, dim), sparse=True) if axis == 0: diff = lu.mul_expr(diff_mat, Y, size) else: diff = lu.rmul_expr(Y, diff_mat, size) return (Y, [lu.create_eq(arg_objs[0], diff)])
def graph_implementation(arg_objs, size, data=None): """Reduces the atom to an affine expression and list of constraints. Parameters ---------- arg_objs : list LinExpr for each argument. size : tuple The size of the resulting expression. data : Additional data required by the atom. Returns ------- tuple (LinOp for objective, list of constraints) """ # Promote scalars. for idx, arg in enumerate(arg_objs): if arg.size != size: arg_objs[idx] = lu.promote(arg, size) x = arg_objs[0] y = arg_objs[1] v = lu.create_var(x.size) two = lu.create_const(2, (1, 1)) # SOC(x + y, [y - x, 2*v]) constraints = [ SOC_Elemwise(lu.sum_expr([x, y]), [lu.sub_expr(y, x), lu.mul_expr(two, v, v.size)]) ] # 0 <= x, 0 <= y constraints += [lu.create_geq(x), lu.create_geq(y)] return (v, constraints)
def get_spacing_matrix(size, spacing, offset): """Returns a sparse matrix LinOp that spaces out an expression. Parameters ---------- size : tuple (rows in matrix, columns in matrix) spacing : int The number of rows between each non-zero. offset : int The number of zero rows at the beginning of the matrix. Returns ------- LinOp A sparse matrix constant LinOp. """ val_arr = [] row_arr = [] col_arr = [] # Selects from each column. for var_row in range(size[1]): val_arr.append(1.0) row_arr.append(spacing*var_row + offset) col_arr.append(var_row) mat = sp.coo_matrix((val_arr, (row_arr, col_arr)), size).tocsc() return lu.create_const(mat, size, sparse=True)
def graph_implementation(arg_objs, size, data=None): """Reduces the atom to an affine expression and list of constraints. Parameters ---------- arg_objs : list LinExpr for each argument. size : tuple The size of the resulting expression. data : Additional data required by the atom. Returns ------- tuple (LinOp for objective, list of constraints) """ # min sum_entries(t) + kq # s.t. x <= t + q # 0 <= t x = arg_objs[0] k = lu.create_const(data[0], (1, 1)) q = lu.create_var((1, 1)) t = lu.create_var(x.size) sum_t, constr = sum_entries.graph_implementation([t], (1, 1)) obj = lu.sum_expr([sum_t, lu.mul_expr(k, q, (1, 1))]) prom_q = lu.promote(q, x.size) constr.append(lu.create_leq(x, lu.sum_expr([t, prom_q]))) constr.append(lu.create_geq(t)) return (obj, constr)
def graph_implementation(arg_objs, size, data=None): """Reduces the atom to an affine expression and list of constraints. Parameters ---------- arg_objs : list LinExpr for each argument. size : tuple The size of the resulting expression. data : Additional data required by the atom. Returns ------- tuple (LinOp for objective, list of constraints) """ x = arg_objs[0] t = lu.create_var((1, 1)) # sum(exp(x - t)) prom_t = lu.promote(t, x.size) expr = lu.sub_expr(x, prom_t) obj, constraints = exp.graph_implementation([expr], x.size) obj, constr = sum_entries.graph_implementation([obj], (1, 1)) # obj <= 1 one = lu.create_const(1, (1, 1)) constraints += constr + [lu.create_leq(obj, one)] return (t, constraints)
def graph_implementation(self, arg_objs, shape, data=None): """Cumulative sum via difference matrix. Parameters ---------- arg_objs : list LinExpr for each argument. shape : tuple The shape of the resulting expression. data : Additional data required by the atom. Returns ------- tuple (LinOp for objective, list of constraints) """ # Implicit O(n) definition: # X = Y[1:,:] - Y[:-1, :] Y = lu.create_var(shape) axis = data[0] dim = shape[axis] diff_mat = get_diff_mat(dim, axis) diff_mat = lu.create_const(diff_mat, (dim, dim), sparse=True) if axis == 0: diff = lu.mul_expr(diff_mat, Y) else: diff = lu.rmul_expr(Y, diff_mat) return (Y, [lu.create_eq(arg_objs[0], diff)])
def graph_implementation(arg_objs, size, data=None): """Reduces the atom to an affine expression and list of constraints. Parameters ---------- arg_objs : list LinExpr for each argument. size : tuple The size of the resulting expression. data : Additional data required by the atom. Returns ------- tuple (LinOp for objective, list of constraints) """ x = arg_objs[0] y = arg_objs[1] # Known to be a scalar. v = lu.create_var((1, 1)) two = lu.create_const(2, (1, 1)) constraints = [SOC(lu.sum_expr([y, v]), [lu.sub_expr(y, v), lu.mul_expr(two, x, x.size)]), lu.create_geq(y)] return (v, constraints)
def qol_elemwise(arg_objs, size, data=None): """Reduces the atom to an affine expression and list of constraints. Parameters ---------- arg_objs : list LinExpr for each argument. size : tuple The size of the resulting expression. data : Additional data required by the atom. Returns ------- tuple (LinOp for objective, list of constraints) """ x = arg_objs[0] y = arg_objs[1] t = lu.create_var(x.size) two = lu.create_const(2, (1, 1)) constraints = [ SOC_Elemwise( lu.sum_expr([y, t]), [lu.sub_expr(y, t), lu.mul_expr(two, x, x.size)]), lu.create_geq(y) ] return (t, constraints)
def graph_implementation(arg_objs, size, data=None): """Reduces the atom to an affine expression and list of constraints. Parameters ---------- arg_objs : list LinExpr for each argument. size : tuple The size of the resulting expression. data : Additional data required by the atom. Returns ------- tuple (LinOp for objective, list of constraints) """ # min sum_entries(t) + kq # s.t. x <= t + q # 0 <= t x = arg_objs[0] k = lu.create_const(data[0], (1, 1)) q = lu.create_var((1, 1)) t = lu.create_var(x.size) sum_t, constr = sum_entries.graph_implementation([t], (1, 1)) obj = lu.sum_expr([sum_t, lu.mul_expr(k, q, (1, 1))]) prom_q = lu.promote(q, x.size) constr.append( lu.create_leq(x, lu.sum_expr([t, prom_q])) ) constr.append( lu.create_geq(t) ) return (obj, constr)
def graph_implementation(arg_objs, size, data=None): """Reduces the atom to an affine expression and list of constraints. Parameters ---------- arg_objs : list LinExpr for each argument. size : tuple The size of the resulting expression. data : Additional data required by the atom. Returns ------- tuple (LinOp for objective, list of constraints) """ A = arg_objs[0] rows, cols = A.size # Create the equivalent problem: # minimize (trace(U) + trace(V))/2 # subject to: # [U A; A.T V] is positive semidefinite X = lu.create_var((rows + cols, rows + cols)) constraints = [] # Fix X using the fact that A must be affine by the DCP rules. # X[0:rows,rows:rows+cols] == A index.block_eq(X, A, constraints, 0, rows, rows, rows + cols) half = lu.create_const(0.5, (1, 1)) trace = lu.mul_expr(half, lu.trace(X), (1, 1)) # Add SDP constraint. return (trace, [SDP(X)] + constraints)
def qol_elemwise(arg_objs, size, data=None): """Reduces the atom to an affine expression and list of constraints. Parameters ---------- arg_objs : list LinExpr for each argument. size : tuple The size of the resulting expression. data : Additional data required by the atom. Returns ------- tuple (LinOp for objective, list of constraints) """ x = arg_objs[0] y = arg_objs[1] t = lu.create_var(x.size) two = lu.create_const(2, (1, 1)) constraints = [SOC_Elemwise(lu.sum_expr([y, t]), [lu.sub_expr(y, t), lu.mul_expr(two, x, x.size)]), lu.create_geq(y)] return (t, constraints)
def graph_implementation(arg_objs, size, data=None): """Reduces the atom to an affine expression and list of constraints. Parameters ---------- arg_objs : list LinExpr for each argument. size : tuple The size of the resulting expression. data : Additional data required by the atom. Returns ------- tuple (LinOp for objective, list of constraints) """ x = arg_objs[0] t = lu.create_var(size) # log(1 + exp(x)) <= t <=> exp(-t) + exp(x - t) <= 1 obj0, constr0 = exp.graph_implementation([lu.neg_expr(t)], size) obj1, constr1 = exp.graph_implementation([lu.sub_expr(x, t)], size) lhs = lu.sum_expr([obj0, obj1]) ones = lu.create_const(np.mat(np.ones(size)), size) constr = constr0 + constr1 + [lu.create_leq(lhs, ones)] return (t, constr)
def graph_implementation(self, arg_objs, shape: Tuple[int, ...], data=None) -> Tuple[lo.LinOp, List[Constraint]]: """Index/slice into the expression. Parameters ---------- arg_objs : list LinExpr for each argument. shape : tuple The shape of the resulting expression. data : tuple A tuple of slices. Returns ------- tuple (LinOp, [constraints]) """ select_mat = self._select_mat final_shape = self._select_mat.shape select_vec = np.reshape(select_mat, select_mat.size, order='F') # Select the chosen entries from expr. arg = arg_objs[0] identity = sp.eye(self.args[0].size).tocsc() vec_arg = lu.reshape(arg, (self.args[0].size, )) mul_mat = identity[select_vec] mul_const = lu.create_const(mul_mat, mul_mat.shape, sparse=True) mul_expr = lu.mul_expr(mul_const, vec_arg, (mul_mat.shape[0], )) obj = lu.reshape(mul_expr, final_shape) return (obj, [])
def graph_implementation(arg_objs, size, data=None): """Reduces the atom to an affine expression and list of constraints. Parameters ---------- arg_objs : list LinExpr for each argument. size : tuple The size of the resulting expression. data : Additional data required by the atom. Returns ------- tuple (LinOp for objective, list of constraints) """ A = arg_objs[0] rows, cols = A.size # Create the equivalent problem: # minimize (trace(U) + trace(V))/2 # subject to: # [U A; A.T V] is positive semidefinite X = lu.create_var((rows+cols, rows+cols)) constraints = [] # Fix X using the fact that A must be affine by the DCP rules. # X[0:rows,rows:rows+cols] == A index.block_eq(X, A, constraints, 0, rows, rows, rows+cols) half = lu.create_const(0.5, (1, 1)) trace = lu.mul_expr(half, lu.trace(X), (1, 1)) # Add SDP constraint. return (trace, [SDP(X)] + constraints)
def graph_implementation(arg_objs, size, data=None): """Reduces the atom to an affine expression and list of constraints. Parameters ---------- arg_objs : list LinExpr for each argument. size : tuple The size of the resulting expression. data : Additional data required by the atom. Returns ------- tuple (LinOp for objective, list of constraints) """ x = arg_objs[0] y = arg_objs[1] # Known to be a scalar. v = lu.create_var((1, 1)) two = lu.create_const(2, (1, 1)) constraints = [ SOC(lu.sum_expr([y, v]), [lu.sub_expr(y, v), lu.mul_expr(two, x, x.size)]), lu.create_geq(y) ] return (v, constraints)
def graph_implementation(arg_objs, size, data=None): """Convolve two vectors. Parameters ---------- arg_objs : list LinExpr for each argument. size : tuple The size of the resulting expression. data : Additional data required by the atom. Returns ------- tuple (LinOp for objective, list of constraints) """ # Implicit O(n) definition: # X = Y[:1,:] - Y[1:,:] Y = lu.create_var(size) axis = data[0] dim = size[axis] diff_mat = get_diff_mat(dim, axis) diff_mat = lu.create_const(diff_mat, (dim, dim), sparse=True) if axis == 0: diff = lu.mul_expr(diff_mat, Y, size) else: diff = lu.rmul_expr(Y, diff_mat, size) return (Y, [lu.create_eq(arg_objs[0], diff)])
def get_spacing_matrix(shape, spacing, offset): """Returns a sparse matrix LinOp that spaces out an expression. Parameters ---------- shape : tuple (rows in matrix, columns in matrix) spacing : int The number of rows between each non-zero. offset : int The number of zero rows at the beginning of the matrix. Returns ------- LinOp A sparse matrix constant LinOp. """ val_arr = [] row_arr = [] col_arr = [] # Selects from each column. for var_row in range(shape[1]): val_arr.append(1.0) row_arr.append(spacing * var_row + offset) col_arr.append(var_row) mat = sp.coo_matrix((val_arr, (row_arr, col_arr)), shape).tocsc() return lu.create_const(mat, shape, sparse=True)
def canonicalize(self): """Returns the graph implementation of the object. Returns: A tuple of (affine expression, [constraints]). """ obj = lu.create_const(self.value, self.size, self._sparse) return (obj, [])
def format_axis(t, X, axis): """Formats all the row/column cones for the solver. Parameters ---------- t: The scalar part of the second-order constraint. X: A matrix whose rows/columns are each a cone. axis: Slice by column 0 or row 1. Returns ------- list A list of LinLeqConstr that represent all the elementwise cones. """ # Reduce to norms of columns. if axis == 1: X = lu.transpose(X) # Create matrices Tmat, Xmat such that Tmat*t + Xmat*X # gives the format for the elementwise cone constraints. cone_size = 1 + X.shape[0] terms = [] # Make t_mat mat_shape = (cone_size, 1) t_mat = sp.coo_matrix(([1.0], ([0], [0])), mat_shape).tocsc() t_mat = lu.create_const(t_mat, mat_shape, sparse=True) t_vec = t if not t.shape: # t is scalar t_vec = lu.reshape(t, (1, 1)) else: # t is 1D t_vec = lu.reshape(t, (1, t.shape[0])) mul_shape = (cone_size, t_vec.shape[1]) terms += [lu.mul_expr(t_mat, t_vec, mul_shape)] # Make X_mat if len(X.shape) == 1: X = lu.reshape(X, (X.shape[0], 1)) mat_shape = (cone_size, X.shape[0]) val_arr = (cone_size - 1) * [1.0] row_arr = list(range(1, cone_size)) col_arr = list(range(cone_size - 1)) X_mat = sp.coo_matrix((val_arr, (row_arr, col_arr)), mat_shape).tocsc() X_mat = lu.create_const(X_mat, mat_shape, sparse=True) mul_shape = (cone_size, X.shape[1]) terms += [lu.mul_expr(X_mat, X, mul_shape)] return [lu.create_geq(lu.sum_expr(terms))]
def graph_implementation(arg_objs, size, data=None): """Reduces the atom to an affine expression and list of constraints. Parameters ---------- arg_objs : list LinExpr for each argument. size : tuple The size of the resulting expression. data : Additional data required by the atom. Returns ------- tuple (LinOp for objective, list of constraints) """ x = arg_objs[0] t = lu.create_var(size) # log(1 + exp(x)) <= t <=> exp(-t) + exp(x - t) <= 1 ''' obj0, constr0 = exp.graph_implementation([lu.neg_expr(t)], size) obj1, constr1 = exp.graph_implementation([lu.sub_expr(x, t)], size) lhs = lu.sum_expr([obj0, obj1]) ones = lu.create_const(np.mat(np.ones(size)), size) constr = constr0 + constr1 + [lu.create_leq(lhs, ones)] ''' s = data[0] if isinstance(s, Parameter): s = lu.create_param(s, (1, 1)) else: # M is constant. s = lu.create_const(s, (1, 1)) #Wrong sign? obj0, constr0 = exp.graph_implementation([lu.neg_expr(t)], size) obj1, constr1 = exp.graph_implementation([lu.sub_expr(s, lu.sum_expr([t, x]))], size) obj2, constr2 = exp.graph_implementation([lu.sub_expr(lu.neg_expr(s), lu.sum_expr([t, x]))], size) obj3, constr3 = exp.graph_implementation([lu.sub_expr(lu.neg_expr(t), lu.mul_expr(2, x, size))], size) lhs = lu.sum_expr([obj0, obj1, obj2, obj3]) ones = lu.create_const(np.mat(np.ones(size)), size) constr = constr0 + constr1 + constr2 + constr3 + [lu.create_leq(lhs, ones)] return (t, constr)
def canonicalize(self): """Returns the graph implementation of the object. Returns: A tuple of (affine expression, [constraints]). """ constraints = [] for cons in self.args: constraints += cons.canonical_form[1] return (lu.create_const(0, (1, 1)), constraints)
def canonicalize(self): """Variable must be semidefinite and symmetric. """ upper_tri = lu.create_var((self.size[0], 1), self.id) fill_coeff = upper_tri_to_full(self.n) fill_coeff = lu.create_const(fill_coeff, (self.n*self.n, self.size[0]), sparse=True) full_mat = lu.mul_expr(fill_coeff, upper_tri, (self.n*self.n, 1)) full_mat = lu.reshape(full_mat, (self.n, self.n)) return (upper_tri, [SDP(full_mat, enforce_sym=False)])
def graph_implementation(arg_objs, shape, data=None): """Reduces the atom to an affine expression and list of constraints. minimize n^2 + 2M|s| subject to s + n = x Parameters ---------- arg_objs : list LinExpr for each argument. shape : tuple The shape of the resulting expression. data : Additional data required by the atom. Returns ------- tuple (LinOp for objective, list of constraints) """ M = data[0] x = arg_objs[0] n = lu.create_var(shape) s = lu.create_var(shape) two = lu.create_const(2, (1, 1)) if isinstance(M, Parameter): M = lu.create_param(M, (1, 1)) else: # M is constant. M = lu.create_const(M.value, (1, 1)) # n**2 + 2*M*|s| n2, constr_sq = power.graph_implementation( [n], shape, (2, (Fraction(1, 2), Fraction(1, 2))) ) abs_s, constr_abs = abs.graph_implementation([s], shape) M_abs_s = lu.mul_expr(M, abs_s) obj = lu.sum_expr([n2, lu.mul_expr(two, M_abs_s)]) # x == s + n constraints = constr_sq + constr_abs constraints.append(lu.create_eq(x, lu.sum_expr([n, s]))) return (obj, constraints)
def canonicalize(self): """Returns the graph implementation of the object. Returns: A tuple of (affine expression, [constraints]). """ if len(self.parameters()) > 0: obj = lu.create_param(self, self.size) else: obj = lu.create_const(self.value, self.size) return (obj, [])
def graph_implementation(arg_objs, size, data=None): # min 1-sqrt(2z-z^2) # s.t. x>=0, z<=1, z = x+s, s<=0 x = arg_objs[0] z = lu.create_var(size) s = lu.create_var(size) zeros = lu.create_const(np.mat(np.zeros(size)), size) ones = lu.create_const(np.mat(np.ones(size)), size) z2, constr_square = power.graph_implementation( [z], size, (2, (Fraction(1, 2), Fraction(1, 2)))) two_z = lu.sum_expr([z, z]) sub = lu.sub_expr(two_z, z2) sq, constr_sqrt = power.graph_implementation( [sub], size, (Fraction(1, 2), (Fraction(1, 2), Fraction(1, 2)))) obj = lu.sub_expr(ones, sq) constr = [lu.create_eq(z, lu.sum_expr([x, s]))] + [ lu.create_leq(zeros, x) ] + [lu.create_leq(z, ones)] + [lu.create_leq(s, zeros) ] + constr_square + constr_sqrt return (obj, constr)
def constraints(self): obj, constraints = super(BoolVar, self).canonicalize() one = lu.create_const(1, (1, 1)) constraints += [lu.create_geq(obj), lu.create_leq(obj, one)] for i in range(self.size[0]): row_sum = lu.sum_expr([self[i, j] for j in range(self.size[0])]) col_sum = lu.sum_expr([self[j, i] for j in range(self.size[0])]) constraints += [lu.create_eq(row_sum, one), lu.create_eq(col_sum, one)] return constraints
def graph_implementation(arg_objs, size, data=None): """Reduces the atom to an affine expression and list of constraints. Parameters ---------- arg_objs : list LinExpr for each argument. size : tuple The size of the resulting expression. data : Additional data required by the atom. Returns ------- tuple (LinOp for objective, list of constraints) """ A = arg_objs[0] # m by n matrix. n, m = A.size # Create a matrix with Schur complement I*t - (1/t)*A.T*A. X = lu.create_var((n+m, n+m)) t = lu.create_var((1, 1)) I_n = lu.create_const(sp.eye(n), (n, n)) I_m = lu.create_const(sp.eye(m), (m, m)) # Expand A.T. obj, constraints = transpose.graph_implementation([A], (m, n)) # Fix X using the fact that A must be affine by the DCP rules. # X[0:n, 0:n] == I_n*t index.block_eq(X, lu.mul_expr(I_n, t, (n, n)), constraints, 0, n, 0, n) # X[0:n, n:n+m] == A index.block_eq(X, A, constraints, 0, n, n, n+m) # X[n:n+m, 0:n] == obj index.block_eq(X, obj, constraints, n, n+m, 0, n) # X[n:n+m, n:n+m] == I_m*t index.block_eq(X, lu.mul_expr(I_m, t, (m, m)), constraints, n, n+m, n, n+m) # Add SDP constraint. return (t, constraints + [SDP(X)])
def graph_implementation(arg_objs, size, data=None): """Reduces the atom to an affine expression and list of constraints. minimize n^2 + 2M|s| subject to s + n = x Parameters ---------- arg_objs : list LinExpr for each argument. size : tuple The size of the resulting expression. data : Additional data required by the atom. Returns ------- tuple (LinOp for objective, list of constraints) """ M = data x = arg_objs[0] n = lu.create_var(size) s = lu.create_var(size) two = lu.create_const(2, (1, 1)) if isinstance(M, Parameter): M = lu.create_param(M, (1, 1)) else: # M is constant. M = lu.create_const(M.value, (1, 1)) # n**2 + 2*M*|s| n2, constr_sq = square.graph_implementation([n], size) abs_s, constr_abs = abs.graph_implementation([s], size) M_abs_s = lu.mul_expr(M, abs_s, size) obj = lu.sum_expr([n2, lu.mul_expr(two, M_abs_s, size)]) # x == s + n constraints = constr_sq + constr_abs constraints.append(lu.create_eq(x, lu.sum_expr([n, s]))) return (obj, constraints)
def format_axis(t, X, axis): """Formats all the row/column cones for the solver. Parameters ---------- t: The scalar part of the second-order constraint. X: A matrix whose rows/columns are each a cone. axis: Slice by column 0 or row 1. Returns ------- list A list of LinLeqConstr that represent all the elementwise cones. """ # Reduce to norms of columns. if axis == 1: X = lu.transpose(X) # Create matrices Tmat, Xmat such that Tmat*t + Xmat*X # gives the format for the elementwise cone constraints. num_cones = t.size[0] cone_size = 1 + X.size[0] terms = [] # Make t_mat mat_size = (cone_size, 1) prod_size = (cone_size, t.size[0]) t_mat = sp.coo_matrix(([1.0], ([0], [0])), mat_size).tocsc() t_mat = lu.create_const(t_mat, mat_size, sparse=True) terms += [lu.mul_expr(t_mat, lu.transpose(t), prod_size)] # Make X_mat mat_size = (cone_size, X.size[0]) prod_size = (cone_size, X.size[1]) val_arr = (cone_size - 1)*[1.0] row_arr = range(1, cone_size) col_arr = range(cone_size-1) X_mat = sp.coo_matrix((val_arr, (row_arr, col_arr)), mat_size).tocsc() X_mat = lu.create_const(X_mat, mat_size, sparse=True) terms += [lu.mul_expr(X_mat, X, prod_size)] return [lu.create_geq(lu.sum_expr(terms))]
def canonicalize(self): """Returns the graph implementation of the object. Marks the top level constraint as the dual_holder, so the dual value will be saved to the EqConstraint. Returns: A tuple of (affine expression, [constraints]). """ obj, constraints = self._expr.canonical_form half = lu.create_const(0.5, (1,1)) symm = lu.mul_expr(half, lu.sum_expr([obj, lu.transpose(obj)]), obj.size) dual_holder = SDP(symm, enforce_sym=False, constr_id=self.id) return (None, constraints + [dual_holder])
def _grad(self, values): """Gives the (sub/super)gradient of the atom w.r.t. each argument. Matrix expressions are vectorized, so the gradient is a matrix. Args: values: A list of numeric values for the arguments. Returns: A list of SciPy CSC sparse matrices or None. """ # TODO should be a simple function in CVXcanon for this. # Make a fake lin op tree for the function. fake_args = [] var_offsets = {} offset = 0 for idx, arg in enumerate(self.args): if arg.is_constant(): fake_args += [lu.create_const(arg.value, arg.size)] else: fake_args += [lu.create_var(arg.size, idx)] var_offsets[idx] = offset offset += arg.size[0]*arg.size[1] fake_expr, _ = self.graph_implementation(fake_args, self.size, self.get_data()) # Get the matrix representation of the function. V, I, J, _ = canonInterface.get_problem_matrix( [lu.create_eq(fake_expr)], var_offsets, None ) shape = (offset, self.size[0]*self.size[1]) stacked_grad = sp.coo_matrix((V, (J, I)), shape=shape).tocsc() # Break up into per argument matrices. grad_list = [] start = 0 for arg in self.args: if arg.is_constant(): grad_shape = (arg.size[0]*arg.size[1], shape[1]) if grad_shape == (1, 1): grad_list += [0] else: grad_list += [sp.coo_matrix(grad_shape, dtype='float64')] else: stop = start + arg.size[0]*arg.size[1] grad_list += [stacked_grad[start:stop, :]] start = stop return grad_list
def constr_func(aff_obj): theta = [lu.create_var((1, 1)) for i in xrange(len(values))] convex_objs = [] for val, theta_var in zip(values, theta): val_aff = val.canonical_form[0] convex_objs.append( lu.mul_expr(val_aff, theta_var, val_aff.size) ) convex_combo = lu.sum_expr(convex_objs) one = lu.create_const(1, (1, 1)) constraints = [lu.create_eq(aff_obj, convex_combo), lu.create_eq(lu.sum_expr(theta), one)] for theta_var in theta: constraints.append(lu.create_geq(theta_var)) return constraints