def _grad(self, values): """Gives the (sub/super)gradient of the atom w.r.t. each argument. Matrix expressions are vectorized, so the gradient is a matrix. Args: values: A list of numeric values for the arguments. Returns: A list of SciPy CSC sparse matrices or None. """ # TODO should be a simple function in CVXcanon for this. # Make a fake lin op tree for the function. fake_args = [] var_offsets = {} offset = 0 for idx, arg in enumerate(self.args): fake_args += [lu.create_var(arg.size, idx)] var_offsets[idx] = offset offset += arg.size[0] * arg.size[1] fake_expr, _ = self.graph_implementation(fake_args, self.size, self.get_data()) # Get the matrix representation of the function. V, I, J, _ = canonInterface.get_problem_matrix( [lu.create_eq(fake_expr)], var_offsets, None) shape = (offset, self.size[0] * self.size[1]) stacked_grad = sp.coo_matrix((V, (J, I)), shape=shape).tocsc() # Break up into per argument matrices. grad_list = [] start = 0 for idx, arg in enumerate(self.args): stop = start + arg.size[0] * arg.size[1] grad_list += [stacked_grad[start:stop, :]] start = stop return grad_list
def _coeffs_affine(self, expr): sz = expr.size[0]*expr.size[1] s, _ = expr.canonical_form V, I, J, R = canonInterface.get_problem_matrix([lu.create_eq(s)], self.id_map) Q = sp.csr_matrix((V, (I, J)), shape=(sz, self.N)) Ps = [sp.csr_matrix((self.N, self.N)) for i in range(sz)] return (Ps, Q, R.flatten())
def _coeffs_affine(self, expr): sz = expr.size[0] * expr.size[1] s, _ = expr.canonical_form V, I, J, R = canonInterface.get_problem_matrix([lu.create_eq(s)], self.id_map) Q = sp.csr_matrix((V, (I, J)), shape=(sz, self.N)) Ps = [sp.csr_matrix((self.N, self.N)) for i in range(sz)] return (Ps, Q, R.flatten())
def _grad(self, values): """Gives the (sub/super)gradient of the atom w.r.t. each argument. Matrix expressions are vectorized, so the gradient is a matrix. Args: values: A list of numeric values for the arguments. Returns: A list of SciPy CSC sparse matrices or None. """ # TODO should be a simple function in CVXcanon for this. # Make a fake lin op tree for the function. fake_args = [] var_offsets = {} offset = 0 for idx, arg in enumerate(self.args): if arg.is_constant(): fake_args += [lu.create_const(arg.value, arg.size)] else: fake_args += [lu.create_var(arg.size, idx)] var_offsets[idx] = offset offset += arg.size[0]*arg.size[1] fake_expr, _ = self.graph_implementation(fake_args, self.size, self.get_data()) # Get the matrix representation of the function. V, I, J, _ = canonInterface.get_problem_matrix( [lu.create_eq(fake_expr)], var_offsets, None ) shape = (offset, self.size[0]*self.size[1]) stacked_grad = sp.coo_matrix((V, (J, I)), shape=shape).tocsc() # Break up into per argument matrices. grad_list = [] start = 0 for arg in self.args: if arg.is_constant(): grad_shape = (arg.size[0]*arg.size[1], shape[1]) if grad_shape == (1, 1): grad_list += [0] else: grad_list += [sp.coo_matrix(grad_shape, dtype='float64')] else: stop = start + arg.size[0]*arg.size[1] grad_list += [stacked_grad[start:stop, :]] start = stop return grad_list
def presolve(objective, constr_map): """Eliminates unnecessary constraints and short circuits the solver if possible. Parameters ---------- objective : LinOp The canonicalized objective. constr_map : dict A map of constraint type to a list of constraints. Returns ------- bool Is the problem infeasible? """ # Remove redundant constraints. for key, constraints in constr_map.items(): uniq_constr = unique(constraints, key=lambda c: c.constr_id) constr_map[key] = list(uniq_constr) # If there are no constraints, the problem is unbounded # if any of the coefficients are non-zero. # If all the coefficients are zero then return the constant term # and set all variables to 0. if not any(constr_map.values()): str(objective) # TODO # Remove constraints with no variables or parameters. for key in [s.EQ, s.LEQ]: new_constraints = [] for constr in constr_map[key]: vars_ = lu.get_expr_vars(constr.expr) if len(vars_) == 0 and not lu.get_expr_params(constr.expr): V, I, J, coeff = canonInterface.get_problem_matrix([constr]) is_pos, is_neg = intf.sign(coeff) # For equality constraint, coeff must be zero. # For inequality (i.e. <= 0) constraint, # coeff must be negative. if key == s.EQ and not (is_pos and is_neg) or \ key == s.LEQ and not is_neg: return s.INFEASIBLE else: new_constraints.append(constr) constr_map[key] = new_constraints return None
def presolve(objective, constr_map): """Eliminates unnecessary constraints and short circuits the solver if possible. Parameters ---------- objective : LinOp The canonicalized objective. constr_map : dict A map of constraint type to a list of constraints. Returns ------- bool Is the problem infeasible? """ # Remove redundant constraints. for key, constraints in constr_map.items(): uniq_constr = unique(constraints, key=lambda c: c.constr_id) constr_map[key] = list(uniq_constr) # If there are no constraints, the problem is unbounded # if any of the coefficients are non-zero. # If all the coefficients are zero then return the constant term # and set all variables to 0. if not any(constr_map.values()): str(objective) # TODO # Remove constraints with no variables or parameters. for key in [s.EQ, s.LEQ]: new_constraints = [] for constr in constr_map[key]: vars_ = lu.get_expr_vars(constr.expr) if len(vars_) == 0 and not lu.get_expr_params(constr.expr): V, I, J, coeff = canonInterface.get_problem_matrix( [constr]) is_pos, is_neg = intf.sign(coeff) # For equality constraint, coeff must be zero. # For inequality (i.e. <= 0) constraint, # coeff must be negative. if key == s.EQ and not (is_pos and is_neg) or \ key == s.LEQ and not is_neg: return s.INFEASIBLE else: new_constraints.append(constr) constr_map[key] = new_constraints return None
def _lin_matrix(self, mat_cache, caching=False): """Computes a matrix and vector representing a list of constraints. In the matrix, each constraint is given a block of rows. Each variable coefficient is inserted as a block with upper left corner at matrix[variable offset, constraint offset]. The constant term in the constraint is added to the vector. Parameters ---------- mat_cache : MatrixCache The cached version of the matrix-vector pair. caching : bool Is the data being cached? """ active_constr = [] constr_offsets = [] vert_offset = 0 for constr in mat_cache.constraints: # Process the constraint if it has a parameter and not caching # or it doesn't have a parameter and caching. has_param = len(lu.get_expr_params(constr.expr)) > 0 if (has_param and not caching) or (not has_param and caching): # If parameterized, convert the parameters into constant nodes. if has_param: constr = lu.copy_constr(constr, lu.replace_params_with_consts) active_constr.append(constr) constr_offsets.append(vert_offset) vert_offset += constr.size[0]*constr.size[1] # Convert the constraints into a matrix and vector offset # and add them to the matrix cache. if len(active_constr) > 0: V, I, J, const_vec = canonInterface.get_problem_matrix( active_constr, self.sym_data.var_offsets, constr_offsets ) # Convert the constant offset to the correct data type. conv_vec = self.vec_intf.const_to_matrix(const_vec, convert_scalars=True) mat_cache.const_vec[:const_vec.size] += conv_vec for i, vals in enumerate([V, I, J]): mat_cache.coo_tup[i].extend(vals)
def _coeffs_affine_atom(self, expr): sz = expr.size[0] * expr.size[1] Ps = [sp.lil_matrix((self.N, self.N)) for i in range(sz)] Q = sp.lil_matrix((sz, self.N)) Parg = None Qarg = None Rarg = None fake_args = [] offsets = {} offset = 0 for idx, arg in enumerate(expr.args): if arg.is_constant(): fake_args += [lu.create_const(arg.value, arg.size)] else: if Parg is None: (Parg, Qarg, Rarg) = self.get_coeffs(arg) else: (p, q, r) = self.get_coeffs(arg) Parg += p Qarg = sp.vstack([Qarg, q]) Rarg = np.concatenate([Rarg, r]) fake_args += [lu.create_var(arg.size, idx)] offsets[idx] = offset offset += arg.size[0] * arg.size[1] fake_expr, _ = expr.graph_implementation(fake_args, expr.size, expr.get_data()) # Get the matrix representation of the function. V, I, J, R = canonInterface.get_problem_matrix( [lu.create_eq(fake_expr)], offsets) R = R.flatten() # return "AX+b" for (v, i, j) in zip(V, I.astype(int), J.astype(int)): Ps[i] += v * Parg[j] Q[i, :] += v * Qarg[j, :] R[i] += v * Rarg[j] Ps = [P.tocsr() for P in Ps] return (Ps, Q.tocsr(), R)
def affine(self, expr): """Extract A, b from an expression that is reducable to A*x + b. Parameters ---------- expr : Expression The expression to process. Returns ------- SciPy CSR matrix The coefficient matrix A of shape (np.prod(expr.shape), self.N). NumPy.ndarray The offset vector b of shape (np.prod(expr.shape,)). """ if not expr.is_affine(): raise ValueError("Expression is not affine") s, _ = expr.canonical_form V, I, J, b = canonInterface.get_problem_matrix([lu.create_eq(s)], self.id_map) A = sp.csr_matrix((V, (I, J)), shape=(expr.size, self.N)) return A, b.flatten()
def _coeffs_affine_atom(self, expr): sz = expr.size[0]*expr.size[1] Ps = [sp.lil_matrix((self.N, self.N)) for i in range(sz)] Q = sp.lil_matrix((sz, self.N)) Parg = None Qarg = None Rarg = None fake_args = [] offsets = {} offset = 0 for idx, arg in enumerate(expr.args): if arg.is_constant(): fake_args += [lu.create_const(arg.value, arg.size)] else: if Parg is None: (Parg, Qarg, Rarg) = self.get_coeffs(arg) else: (p, q, r) = self.get_coeffs(arg) Parg += p Qarg = sp.vstack([Qarg, q]) Rarg = np.vstack([Rarg, r]) fake_args += [lu.create_var(arg.size, idx)] offsets[idx] = offset offset += arg.size[0]*arg.size[1] fake_expr, _ = expr.graph_implementation(fake_args, expr.size, expr.get_data()) # Get the matrix representation of the function. V, I, J, R = canonInterface.get_problem_matrix([lu.create_eq(fake_expr)], offsets) R = R.flatten() # return "AX+b" for (v, i, j) in zip(V, I.astype(int), J.astype(int)): Ps[i] += v*Parg[j] Q[i, :] += v*Qarg[j, :] R[i] += v*Rarg[j] Ps = [P.tocsr() for P in Ps] return (Ps, Q.tocsr(), R)