Esempio n. 1
0
def form_prox(prob, x_vars):
    """ Given a CVXPY problem, form its prox.
    
    Given problem: min f(x),
    
    form the prox problem:

    min f(x) + tau*||x-x0||^2
    
    Parameters
    ----------
    prob: CVXPY problem
    x_vars: dict
        Dict of k:v pairs, where k is a string, and v is a CVXPY Variable that appears in `prob`
        
    Returns
    -------
    pxprob: CVXPY prox problem
    x0_vars: dict
        Dict of k:v pairs, where v is CVXPY Parameter object, corresponding to prox input x0
        x0_vars also contains special key '__tau', which corresponds to the regularization parameter
    """

    tau = cvx.Parameter(sign="positive")
    x0_vars = {'__tau': tau}

    obj = 0
    for k in x_vars:
        x = x_vars[k]
        x0 = cvx.Parameter(*x.size)
        x0_vars[k] = x0

        obj = obj + tau * cvx.sum_squares(x - x0)

    pxprob = cvx.Problem(prob.objective + cvx.Minimize(obj), prob.constraints)

    return pxprob, x0_vars
Esempio n. 2
0
    def power_ene_reservations(self, opt_vars, mask):
        """ Determines power and energy reservations required at the end of each timestep for the service to be provided.
        Additionally keeps track of the reservations per optimization window so the values maybe accessed later.

        Args:
            opt_vars (Dict): dictionary of variables being optimized
            mask (DataFrame): A boolean array that is true for indices corresponding to time_series data included
                in the subs data set

        Returns:
            A power reservation and a energy reservation array for the optimization window--
            C_max, C_min, D_max, D_min, E_upper, E, and E_lower (in that order)
        """
        eta = self.storage.rte
        size = opt_vars['ene'].shape

        # calculate reservations
        c_max = 0
        c_min = opt_vars['nsr_c']
        d_min = 0
        d_max = opt_vars['nsr_d']
        e_upper = cvx.Parameter(shape=size,
                                value=np.zeros(size),
                                name='e_upper')
        e = cvx.Parameter(shape=size, value=np.zeros(size), name='e')
        e_lower = opt_vars['nsr_c'] * eta * self.dt + opt_vars[
            'nsr_d'] * self.duration

        # save reservation for optimization window
        self.e.append(e)
        self.e_lower.append(e_lower)
        self.e_upper.append(e_upper)
        self.c_max.append(c_max)
        self.c_min.append(c_min)
        self.d_max.append(d_max)
        self.d_min.append(d_min)
        return [c_max, c_min, d_max, d_min], [e_upper, e, e_lower]
def simple_qp():
    # print(f'--- {sys._getframe().f_code.co_name} ---')
    print('simple qp')
    npr.seed(0)
    nx, ncon = 2, 3

    G = cp.Parameter((ncon, nx))
    h = cp.Parameter(ncon)
    x = cp.Variable(nx)
    obj = cp.Minimize(0.5 * cp.sum_squares(x - 1))
    cons = [G * x <= h]
    prob = cp.Problem(obj, cons)

    data, chain, inv_data = prob.get_problem_data(solver=cp.SCS)
    param_prob = data[cp.settings.PARAM_PROB]
    print(param_prob.A.A)

    x0 = npr.randn(nx)
    s0 = npr.randn(ncon)
    G.value = npr.randn(ncon, nx)
    h.value = G.value.dot(x0) + s0

    prob.solve(solver=cp.SCS)

    delC = npr.randn(param_prob.c.shape[0])[:-1]
    delA = npr.randn(param_prob.A.shape[0])
    num_con = delA.size // (param_prob.x.size + 1)
    delb = delA[-num_con:]
    delA = delA[:-num_con]
    delA = sp.csc_matrix(np.reshape(delA, (num_con, param_prob.x.size)))
    del_param_dict = param_prob.apply_param_jac(delC, delA, delb)
    print(del_param_dict)
    var_map = param_prob.split_solution(npr.randn(param_prob.x.size))
    print(var_map)
    print(param_prob.split_adjoint(var_map))

    print(x.value)
Esempio n. 4
0
    def test_least_squares(self):
        set_seed(243)
        m, n = 100, 20

        A = cp.Parameter((m, n))
        b = cp.Parameter(m)
        x = cp.Variable(n)
        obj = cp.sum_squares(A@x - b) + cp.sum_squares(x)
        prob = cp.Problem(cp.Minimize(obj))
        prob_th = CvxpyLayer(prob, [A, b], [x])

        A_th = torch.randn(m, n).double().requires_grad_()
        b_th = torch.randn(m).double().requires_grad_()

        x = prob_th(A_th, b_th, solver_args={"eps": 1e-10})[0]

        def lstsq(
            A,
            b): return torch.solve(
            (A.t() @ b).unsqueeze(1),
            A.t() @ A +
            torch.eye(n).double())[0]
        x_lstsq = lstsq(A_th, b_th)

        grad_A_cvxpy, grad_b_cvxpy = grad(x.sum(), [A_th, b_th])
        grad_A_lstsq, grad_b_lstsq = grad(x_lstsq.sum(), [A_th, b_th])

        self.assertAlmostEqual(
            torch.norm(
                grad_A_cvxpy -
                grad_A_lstsq).item(),
            0.0)
        self.assertAlmostEqual(
            torch.norm(
                grad_b_cvxpy -
                grad_b_lstsq).item(),
            0.0)
Esempio n. 5
0
  def __init__(self, nz, nu, K, Q, R, zmin, zmax, umin, umax):
    self._nz = nz
    self._nu = nu
    self._K = K
    self._Q = Q
    self._R = R
    self._zmin = zmin
    self._zmax = zmax
    self._umin = umin
    self._umax = umax

    # Define parameters
    self.z_init = cp.Parameter(self._nz)
    self.z_goal = cp.Parameter(self._nz)

    self.A = [None] * self._K
    self.B = [None] * self._K
    self.o = [None] * self._K

    for k in range(self._K):
      self.A[k] = cp.Parameter((self._nz, self._nz))
      self.B[k] = cp.Parameter((self._nz, self._nu))

    # Define action and observation vectors (variables)
    self.u = cp.Variable((self._nu, self._K))
    self.z = cp.Variable((self._nz, self._K + 1))

    objective = 0
    constraints = [self.z[:, 0] == self.z_init]

    for k in range(self._K):
        objective += cp.quad_form(self.z[:, k+1] - self.z_goal, self._Q) + cp.quad_form(self.u[:, k], self._R)
        constraints += [self.z[:, k + 1] == self.A[k] * self.z[:, k] + self.B[k] * self.u[:, k]]
        constraints += [self._zmin <= self.z[:, k], self.z[:, k] <= self._zmax]
        constraints += [self._umin <= self.u[:, k], self.u[:, k] <= self._umax]

    self.prob = cp.Problem(cp.Minimize(objective), constraints)
Esempio n. 6
0
    def test_least_squares(self):
        key = random.PRNGKey(0)
        m, n = 100, 20

        A = cp.Parameter((m, n))
        b = cp.Parameter(m)
        x = cp.Variable(n)
        obj = cp.sum_squares(A @ x - b) + cp.sum_squares(x)
        prob = cp.Problem(cp.Minimize(obj))
        prob_jax = CvxpyLayer(prob, [A, b], [x])

        key, k1, k2 = random.split(key, num=3)
        A_jax = random.normal(k1, shape=(m, n))
        b_jax = random.normal(k2, shape=(m, ))

        def lstsq_sum_cp(A_jax, b_jax):
            x = prob_jax(A_jax, b_jax, solver_args={'eps': 1e-10})[0]
            return sum(x)

        def lstsq_sum_linalg(A_jax, b_jax):
            x = jnp.linalg.solve(A_jax.T @ A_jax + jnp.eye(n), A_jax.T @ b_jax)
            return sum(x)

        d_lstsq_sum_cp = jax.grad(lstsq_sum_cp, [0, 1])
        d_lstsq_sum_linalg = jax.grad(lstsq_sum_linalg, [0, 1])

        grad_A_cvxpy, grad_b_cvxpy = d_lstsq_sum_cp(A_jax, b_jax)
        grad_A_lstsq, grad_b_lstsq = d_lstsq_sum_linalg(A_jax, b_jax)

        self.assertAlmostEqual(jnp.linalg.norm(grad_A_cvxpy -
                                               grad_A_lstsq).item(),
                               0.0,
                               places=6)
        self.assertAlmostEqual(jnp.linalg.norm(grad_b_cvxpy -
                                               grad_b_lstsq).item(),
                               0.0,
                               places=6)
Esempio n. 7
0
    def objective_function(self,
                           variables,
                           subs,
                           generation,
                           annuity_scalar=1):
        """ Generates the full objective function, including the optimization variables.

        Args:
            variables (Dict): dictionary of variables being optimized
            subs (DataFrame): table of load data for the optimization windows
            generation (list, Expression): the sum of generation within the system
            annuity_scalar (float): a scalar value to be multiplied by any yearly cost or benefit that helps capture the cost/benefit over
                        the entire project lifetime (only to be set iff sizing)

        Returns:
            The expression of the objective function that it affects. This can be passed into the cvxpy solver.

        """
        size = subs.index.size
        load = cvx.Parameter(size,
                             value=np.array(subs.loc[:, "load"]),
                             name='load')
        p_energy = cvx.Parameter(size,
                                 value=self.p_energy.loc[subs.index].values,
                                 name='energy_price')

        load_p = p_energy * load  # TODO: make sure element-wise multiplication
        discharge_p = p_energy * variables['dis']
        charge_p = p_energy * variables['ch']
        generation_p = p_energy * generation

        # self.costs.append(cvx.sum(load_penergy - discharge_penergy + charge_penergy - generation_penergy)*annuity_scalar)
        return {
            self.name:
            cvx.sum(load_p - discharge_p + charge_p - generation_p) * self.dt *
            annuity_scalar
        }
Esempio n. 8
0
    def objective_function(self, variables, subs, generation, annuity_scalar=1):
        """ Generates the full objective function, including the optimization variables.

        Args:
            variables (Dict): dictionary of variables being optimized
            subs (DataFrame): table of load data for the optimization windows
            generation (list, Expression): the sum of generation within the system
            annuity_scalar (float): a scalar value to be multiplied by any yearly cost or benefit that helps capture the cost/benefit over
                        the entire project lifetime (only to be set iff sizing)

        Returns:
            The portion of the objective function that it affects. This can be passed into the cvxpy solver. Returns costs - benefits

        """

        # pay for reg down energy, get paid for reg up energy
        # paid revenue for capacity to do both

        p_regu = cvx.Parameter(subs.index.size, value=self.p_regu.loc[subs.index].values, name='p_regu')
        p_regd = cvx.Parameter(subs.index.size, value=self.p_regd.loc[subs.index].values, name='p_regd')
        p_ene = cvx.Parameter(subs.index.size, value=self.price.loc[subs.index].values, name='price')

        regup_charge_payment = cvx.sum(variables['regu_c'] * -p_regu) * annuity_scalar
        regup_charge_settlement = cvx.sum(variables['regu_c'] * -p_ene) * self.dt * self.kru_avg * annuity_scalar

        regup_disch_payment = cvx.sum(variables['regu_d'] * -p_regu) * annuity_scalar
        regup_disch_settlement = cvx.sum(variables['regu_d'] * -p_ene) * self.dt * self.kru_avg * annuity_scalar

        regdown_charge_payment = cvx.sum(variables['regd_c'] * -p_regd) * annuity_scalar
        regdown_charge_settlement = cvx.sum(variables['regd_c'] * p_ene) * self.dt * self.krd_avg * annuity_scalar

        regdown_disch_payment = cvx.sum(variables['regd_d'] * -p_regd) * annuity_scalar
        regdown_disch_settlement = cvx.sum(variables['regd_d'] * p_ene) * self.dt * self.krd_avg * annuity_scalar

        return {'regup_payment': regup_charge_payment + regup_disch_payment,
                'regdown_payment': regdown_charge_payment + regdown_disch_payment,
                'fr_energy_settlement': regup_disch_settlement + regdown_disch_settlement + regup_charge_settlement + regdown_charge_settlement}
Esempio n. 9
0
    def _formulate(self, b: np.ndarray=None) -> None:
        """ Formulate Problem

        Internal methods that fromulate and prepare the cvxpy model to be fit

        Args
        ----
        b: np.array, dtype float

        Return
        ------
        Nothing
        """
        if (self.mask is None):
            raise ValueError("mask paramenter not provided. A mask is required to fit the model")

        # Define and construct variables and costants
        self.x = cvxpy.Variable(self.cfg.A.shape[1], 1)

        if b is None:
            self.b = cvxpy.Parameter(rows=self.cfg.A.shape[0], cols=1, sign="positive", value=np.zeros(self.cfg.A.shape[0]))
        else:
            self.b = cvxpy.Parameter(rows=self.cfg.A.shape[0], cols=1, sign="positive", value=b)
        self.A = cvxpy.Parameter(rows=self.cfg.A.shape[0], cols=self.cfg.A.shape[1], sign="positive", value=self.cfg.A)
        self.x_img = cvxpy.reshape(self.x, self.mask.shape[0], self.mask.shape[1])  # x must be reshaped to allow calling cvx.tv on it. Possible reimplementation of the tv filter might speed-up
        self.background = cvxpy.mul_elemwise(1. - self.mask.flat[:], self.x_img)

        # The definition of the problem
        self.objective = cvxpy.Minimize(cvxpy.sum_squares(self.A * self.x - self.b) +

                                        self.beta * cvxpy.tv(self.x_img) )#+ self.alpha * cvxpy.norm(self.x, 1) )
        self.constraints = [self.x >= 0.,
                            self.background == 0,
                            cvxpy.sum_entries(self.x) - 0.85*cvxpy.sum_entries(self.b)/2*self.b_n >= 0]

        self.problem = cvxpy.Problem(self.objective, self.constraints)
        self.formulated = True
def diamond_norm(choi0: np.ndarray, choi1: np.ndarray) -> float:
    """Return the diamond norm between two completely positive
    trace-preserving (CPTP) superoperators, represented as Choi matrices.

    The calculation uses the simplified semidefinite program of Watrous
    [arXiv:0901.4709](http://arxiv.org/abs/0901.4709). This calculation
    becomes very slow for 4 or more qubits.
    [J. Watrous, [Theory of Computing 5, 11, pp. 217-238
    (2009)](http://theoryofcomputing.org/articles/v005a011/)]

    :param choi0: A 4^N x 4^N matrix (where N is the number of qubits)
    :param choi1: A 4^N x 4^N matrix (where N is the number of qubits)
 
    """
    # Kudos: Based on MatLab code written by Marcus P. da Silva
    # (https://github.com/BBN-Q/matlab-diamond-norm/)
    import cvxpy as cvx
    assert choi0.shape == choi1.shape
    assert choi0.shape[0] == choi1.shape[1]
    dim2 = choi0.shape[0]
    dim = int(np.sqrt(dim2))

    delta_choi = choi0 - choi1
    delta_choi = (delta_choi.conj().T + delta_choi) / 2  # Enforce Hermiticity

    # Density matrix must be Hermitian, positive semidefinite, trace 1
    rho = cvx.Variable([dim, dim], complex=True)
    constraints = [rho == rho.H]
    constraints += [rho >> 0]
    constraints += [cvx.trace(rho) == 1]

    # W must be Hermitian, positive semidefinite
    W = cvx.Variable([dim2, dim2], complex=True)
    constraints += [W == W.H]
    constraints += [W >> 0]

    constraints += [(W - cvx.kron(np.eye(dim), rho)) << 0]

    J = cvx.Parameter([dim2, dim2], complex=True)
    objective = cvx.Maximize(cvx.real(cvx.trace(J.H * W)))

    prob = cvx.Problem(objective, constraints)

    J.value = delta_choi
    prob.solve()

    dnorm = prob.value * 2

    return dnorm
Esempio n. 11
0
def add_align_constraints(ctrl):
    """ Add the alignment constraints for the longitudinal MPC """
    ctrl.u_delay = cvxpy.Parameter(4, 4, "u_delay")
    ctrl.past_input = cvxpy.Variable(4, ctrl.T + 4, "past_input")

    deltax = cvxpy.sum_squares(ctrl.x[0, 2:] - ctrl.x[5, 2:])
    deltay = cvxpy.sum_squares(ctrl.x[1, 10:] - ctrl.x[6, 10:])

    # Distance -> 0
    ctrl.objective.append(1.0*deltax)
    ctrl.objective.append(1.0*deltay)

    ctrl.objective.append(cvxpy.sum_squares(ctrl.x[3, 2:]))   # a
    ctrl.objective.append(cvxpy.sum_squares(ctrl.x[8, 2:]))   # a

    for t in range(ctrl.T):
        ctrl.objective.append(2*cvxpy.sum_squares(ctrl.u[:, t]))

    for t in range(2,ctrl.T):
        # UAV velocity -> 20 
        ctrl.objective.append(cvxpy.sum_squares(ctrl.x[2, t] - 20))
        ctrl.objective.append(cvxpy.sum_squares(ctrl.x[7, t] - 20))
        # Rate of change, acceleration
        ctrl.objective.append(1*cvxpy.sum_squares(ctrl.u[2, t] - ctrl.u[2, t-1]))
        ctrl.objective.append(3*cvxpy.sum_squares(ctrl.u[0, t] - ctrl.u[0, t-1]))

    ### END CONSTRAINTS ###
    # Distance
    ctrl.constraints += [ctrl.x[0, -1] - ctrl.x[5, -1] <= 1.0]    # deltax
    ctrl.constraints += [ctrl.x[0, -1] - ctrl.x[5, -1] >= -1.0] 
    ctrl.constraints += [ctrl.x[1, -1] - ctrl.x[6, -1] <= 1.0]    # deltay
    ctrl.constraints += [ctrl.x[1, -1] - ctrl.x[6, -1] >= -1.0]
    # Velocity
    ctrl.constraints += [ctrl.x[2, -1] - ctrl.x[7, -1] == 0] 
    # Acceleration
    ctrl.constraints += [ctrl.x[3, -1] - ctrl.x[8, -1] == 0] 

    ### Dynamic constriants ###
    # Include past inputs: 0 = t-4, 1 = t-3, 2 = t-2, 3 = t-1, 4=t
    ctrl.constraints += [ctrl.past_input[:,0:4] == ctrl.u_delay]
    ctrl.constraints += [ctrl.past_input[:,4:] == ctrl.u[:,:]]

    ctrl.constraints += [ctrl.x[:, 1:ctrl.T+1] == ctrl.A*ctrl.x[:, 0:ctrl.T]
                         + ctrl.B[:,0]*ctrl.u[0,0:ctrl.T]              #a_uav(t)
                         + ctrl.B[:,2]*ctrl.u[2,0:ctrl.T]              #a_ugv(t)
                         + ctrl.B[:,3]*ctrl.u[3,0:ctrl.T]              #psi_ugv(t)
                         + ctrl.B[:,4]*ctrl.past_input[1, 2:ctrl.T+2]  #psi_uav(t - 2)
                         + ctrl.B[:,5]*ctrl.past_input[1, 1:ctrl.T+1]] #psi_uav(t - 3)
    return ctrl
Esempio n. 12
0
    def efficient_risk(self, target_volatility, market_neutral=False):
        """
        Maximise return for a target risk. The resulting portfolio will have a volatility
        less than the target (but not guaranteed to be equal).

        :param target_volatility: the desired maximum volatility of the resulting portfolio.
        :type target_volatility: float
        :param market_neutral: whether the portfolio should be market neutral (weights sum to zero),
                               defaults to False. Requires negative lower weight bound.
        :param market_neutral: bool, optional
        :raises ValueError: if ``target_volatility`` is not a positive float
        :raises ValueError: if no portfolio can be found with volatility equal to ``target_volatility``
        :raises ValueError: if ``risk_free_rate`` is non-numeric
        :return: asset weights for the efficient risk portfolio
        :rtype: OrderedDict
        """
        if not isinstance(target_volatility,
                          (float, int)) or target_volatility < 0:
            raise ValueError("target_volatility should be a positive float")

        global_min_volatility = np.sqrt(
            1 / np.sum(np.linalg.pinv(self.cov_matrix)))

        if target_volatility < global_min_volatility:
            raise ValueError(
                "The minimum volatility is {:.3f}. Please use a higher target_volatility"
                .format(global_min_volatility))

        update_existing_parameter = self.is_parameter_defined(
            "target_variance")
        if update_existing_parameter:
            self._validate_market_neutral(market_neutral)
            self.update_parameter_value("target_variance",
                                        target_volatility**2)
        else:
            self._objective = objective_functions.portfolio_return(
                self._w, self.expected_returns)
            variance = objective_functions.portfolio_variance(
                self._w, self.cov_matrix)

            for obj in self._additional_objectives:
                self._objective += obj

            target_variance = cp.Parameter(name="target_variance",
                                           value=target_volatility**2,
                                           nonneg=True)
            self.add_constraint(lambda _: variance <= target_variance)
            self._make_weight_sum_constraint(market_neutral)
        return self._solve_cvxpy_opt_problem()
Esempio n. 13
0
    def __init__(self, n, eps):
        cvxopt.glpk.options["msg_lev"] = "GLP_MSG_OFF"
        # self.objs = objs # the two objs [l, g].
        self.n = n # the dimension of \theta
        self.eps = eps # the error bar of the optimization process [eps1 < g, eps2 < delta1, eps3 < delta2]
        self.deltas = cp.Parameter(2) # the two deltas of the objectives [l1, l2]
        self.Ca1 = cp.Parameter((2,1))       # [d_l, d_g] * d_l or [d_l, d_g] * d_g.
        self.Ca2 = cp.Parameter((2,1))

        self.alpha = cp.Variable((1,2))     # Variable to optimize
         # disparities has been satisfies, in this case we only maximize the performance
        obj_dom = cp.Maximize(self.alpha @  self.Ca1) 
        obj_fair = cp.Maximize(self.alpha @ self.Ca2)


        constraints_dom = [self.alpha >= 0, cp.sum(self.alpha) == 1]
        constraints_fair = [self.alpha >= 0, cp.sum(self.alpha) == 1,
                            self.alpha @ self.Ca1 >= 0]

        self.prob_dom = cp.Problem(obj_dom, constraints_dom)  # LP balance
        self.prob_fair = cp.Problem(obj_fair, constraints_fair)

        self.gamma = 0     # Stores the latest Optimum value of the LP problem
        self.disparity = 0     # Stores the latest selected K max disparities
Esempio n. 14
0
    def test_basic_gp(self):
        x = cp.Variable(pos=True)
        y = cp.Variable(pos=True)
        z = cp.Variable(pos=True)

        a = cp.Parameter(pos=True, value=2.0)
        b = cp.Parameter(pos=True, value=1.0)
        c = cp.Parameter(value=0.5)

        objective_fn = 1 / (x * y * z)
        constraints = [a * (x * y + x * z + y * z) <= b, x >= y**c]
        problem = cp.Problem(cp.Minimize(objective_fn), constraints)
        problem.solve(cp.SCS, gp=True)

        layer = CvxpyLayer(problem,
                           parameters=[a, b, c],
                           variables=[x, y, z],
                           gp=True)
        a_jax = jnp.array(2.0)
        b_jax = jnp.array(1.0)
        c_jax = jnp.array(0.5)
        x_jax, y_jax, z_jax = layer(a_jax, b_jax, c_jax)

        self.assertAlmostEqual(x.value, x_jax, places=5)
        self.assertAlmostEqual(y.value, y_jax, places=5)
        self.assertAlmostEqual(z.value, z_jax, places=5)

        check_grads(lambda a, b, c: jnp.sum(
            layer(
                a,
                b,
                c,
                solver_args={"acceleration_lookback": 0},
            )[0]), [a_jax, b_jax, c_jax],
                    order=1,
                    modes=['rev'])
Esempio n. 15
0
 def test_rank_one_nmf(self) -> None:
     X = cp.Variable((3, 3), pos=True)
     x = cp.Variable((3, ), pos=True)
     y = cp.Variable((3, ), pos=True)
     xy = cp.vstack([x[0] * y, x[1] * y, x[2] * y])
     a = cp.Parameter(value=-1.0)
     b = cp.Parameter(pos=True,
                      shape=(6, ),
                      value=np.array([1.0, 1.9, 0.8, 3.2, 5.9, 1.0]))
     R = cp.maximum(cp.multiply(X, (xy)**(a)), cp.multiply(X**(a), xy))
     objective = cp.sum(R)
     constraints = [
         X[0, 0] == b[0],
         X[0, 2] == b[1],
         X[1, 1] == b[2],
         X[2, 0] == b[3],
         X[2, 1] == b[4],
         x[0] * x[1] * x[2] == b[5],
     ]
     problem = cp.Problem(cp.Minimize(objective), constraints)
     # SCS struggles to solves this problem (solved/inaccurate, unless
     # max_iters is very high like 10000)
     gradcheck(problem, gp=True, atol=1e-2)
     perturbcheck(problem, gp=True, atol=1e-2)
Esempio n. 16
0
    def test_entropy_maximization(self) -> None:
        np.random.seed(0)
        n, m, p = 5, 3, 2

        tmp = np.random.rand(n)
        A_np = np.random.randn(m, n)
        b_np = A_np.dot(tmp)
        F_np = np.random.randn(p, n)
        g_np = F_np.dot(tmp) + np.random.rand(p)

        x = cp.Variable(n)
        A = cp.Parameter((m, n))
        b = cp.Parameter(m)
        F = cp.Parameter((p, n))
        g = cp.Parameter(p)
        obj = cp.Maximize(cp.sum(cp.entr(x)) - cp.sum_squares(x))
        constraints = [A @ x == b, F @ x <= g]
        problem = cp.Problem(obj, constraints)
        A.value = A_np
        b.value = b_np
        F.value = F_np
        g.value = g_np
        gradcheck(problem, atol=1e-2, eps=1e-8)
        perturbcheck(problem, atol=1e-4)
Esempio n. 17
0
    def _cvxpy_probs_setup(self):
        """
        Formulate the upper and lower bound computations as CVXPY optimization
        problems.
        """
        arg_test = cp.Parameter((1, self.arg_dim), name='arg_test')

        (const_lb, const_ub, fun_test) = self._cvxpy_objective_and_constraints(
            self.arg, arg_test, self.fun, self.grad, self.grad_lips_constant,
            is_monotone_inc=self.is_monotone_inc,
            is_monotone_dec=self.is_monotone_dec, is_convex=self.is_convex,
            is_concave=self.is_concave)

        self.ub_prob = cp.Problem(cp.Maximize(fun_test), const_ub)
        self.lb_prob = cp.Problem(cp.Minimize(fun_test), const_lb)
Esempio n. 18
0
def sdp():
    print('sdp')
    npr.seed(0)

    d = 2
    X = cp.Variable((d, d), PSD=True)
    Y = cp.Parameter((d, d))
    obj = cp.Minimize(cp.trace(Y * X))
    prob = cp.Problem(obj, [X >= 1])

    Y.value = np.abs(npr.randn(d, d))
    print(Y.value.sum())

    prob.solve(solver=cp.SCS, verbose=True)
    print(X.value)
Esempio n. 19
0
def sigmoid():
    # print(f'--- {sys._getframe().f_code.co_name} ---')
    print('sigmoid')
    npr.seed(0)

    n = 4
    _x = cp.Parameter((n, 1))
    _y = cp.Variable(n)
    obj = cp.Minimize(-_x.T * _y - cp.sum(cp.entr(_y) + cp.entr(1. - _y)))
    prob = cp.Problem(obj)

    _x.value = npr.randn(n, 1)

    prob.solve(solver=cp.SCS)
    print(_y.value)
Esempio n. 20
0
 def test_equality(self):
     set_seed(243)
     n = 10
     A = np.eye(n)
     x = cp.Variable(n)
     b = cp.Parameter(n)
     prob = cp.Problem(cp.Minimize(cp.sum_squares(x)), [A @ x == b])
     layer = CvxpyLayer(prob, parameters=[b], variables=[x])
     b_tch = torch.randn(n, requires_grad=True)
     torch.autograd.gradcheck(
         lambda b: layer(b,
                         solver_args={
                             "eps": 1e-10,
                             "acceleration_lookback": 0
                         })[0].sum(), (b_tch, ))
Esempio n. 21
0
def get_CAPM_weights(er, cov, gamma):
    n = cov.shape[0]
    w = cp.Variable((n, 1))
    gamma = cp.Parameter(nonneg=True, value=gamma)
    ret = w.T @ er
    risk = cp.quad_form(w, cov)
    constraints = [
        cp.sum(w) == 1,
        w <= 0.1,
        w >= 0,
    ]
    obj = cp.Maximize(ret - gamma * risk)
    prob = cp.Problem(obj, constraints)
    prob.solve()
    return w.value
Esempio n. 22
0
def cvxpy_test():
    numpy.random.seed(1)
    n = 10
    mu = numpy.abs(numpy.random.randn(n, 1))
    Sigma = numpy.random.randn(n, n)
    Sigma = Sigma.T.dot(Sigma)

    w = cvxpy.Variable(n)
    gamma = cvxpy.Parameter(sign='positive')
    ret = mu.T * w
    risk = cvxpy.quad_form(w, Sigma)
    print(
        "csvpy test >>> ",
        cvxpy.Problem(cvxpy.Maximize(ret - gamma * risk),
                      [cvxpy.sum_entries(w) == 1, w >= 0]))
Esempio n. 23
0
    def __init__(self, alpha: float = 1, beta: float = 0.01,
                 config: ReconstructionConfig = ReconstructionConfig(),
                 solver: str=cvxpy.SCS, solver_kwargs: dict={}) -> None:
        # Alpha and beta parameter might be moved in the formulation to make it more general
        # But this would require to remove alpha,beta from the init with something like pars_dict
        self.alpha = cvxpy.Parameter(sign="positive", value=alpha)
        self.beta = cvxpy.Parameter(sign="positive", value=beta)


        self.solver = solver
        self.solver_kwargs = solver_kwargs

        self.cfg = config
        self.mask = self.cfg.mask_bw
        self.proj_N = self.cfg.proj_N
        self.b_n = cvxpy.Parameter(sign = "positive", value=self.proj_N)
        self.w = self.proj_N / (self.proj_N - 1)  # a weight to mantain the proportion regularized / RSS in crossvalidation

        self._formulate(b=None)
        self.formulated = True
        self.reformulated = False
        self.fit_at_least_once = False

        self.norm_factor = None
Esempio n. 24
0
def ball_con():
    # print(f'--- {sys._getframe().f_code.co_name} ---')
    print('ball con')
    npr.seed(0)

    n = 2

    A = cp.Parameter((n, n))
    z = cp.Parameter(n)
    p = cp.Parameter(n)
    x = cp.Variable(n)
    t = cp.Variable(n)
    obj = cp.Minimize(0.5 * cp.sum_squares(x - p))
    # TODO automate introduction of variables.
    cons = [0.5 * cp.sum_squares(A * t) <= 1, t == (x - z)]
    prob = cp.Problem(obj, cons)

    L = npr.randn(n, n)
    A.value = L.T
    z.value = npr.randn(n)
    p.value = npr.randn(n)

    prob.solve(solver=cp.SCS)
    print(x.value)
Esempio n. 25
0
def LASSO_unmix(A, b, lam=0.01):
    m = A.shape[0]
    n = A.shape[1]

    # setup problem
    x = cp.Variable(n)
    lam_cp = cp.Parameter(nonneg=True)
    lam_cp.value = lam
    objective = cp.Minimize(cp.sum_squares(A * x - b) + lam_cp * cp.norm(x, 1))
    constraints = [0 <= x]
    prob = cp.Problem(objective, constraints)

    # find optimal solution
    loss = prob.solve()
    return x.value, loss
    def construct_controller(self):
        u = cvx.Variable((self.m, self.n_pred))
        x = cvx.Variable((self.n, self.n_pred+1))
        self.x_init = cvx.Parameter(self.n)
        objective = 0
        constraints = [x[:,0] == self.x_init]

        for k in range(self.n_pred):
            objective += cvx.quad_form(x[:,k] - self.set_pt, self.Q) + cvx.quad_form(u[:,k]+self.const_offset, self.R)
            constraints += [x[:,k+1] == self.linear_dynamics.A @ x[:,k] + self.linear_dynamics.B @ u[:,k]]
            constraints += [self.xmin <= x[:,k], x[:,k] <= self.xmax]
            constraints += [self.umin <= u[:,k], u[:,k] <= self.umax]

        objective += cvx.quad_form(x[:,self.n_pred] - self.set_pt, self.Q_n)
        self.mpc_prob = cvx.Problem(cvx.Minimize(objective), constraints)
Esempio n. 27
0
    def __init__(self, config, iterations):
        N = iterations
        self.N = N
        self.config = config

        # Parameters
        self.dt = cvp.Parameter()
        self.x_0 = cvp.Parameter(6)

        # Solved variables
        self.x = cvp.Variable((6, N))
        self.u = cvp.Variable((3, N))
        self.gam = cvp.Variable(N)
        self.z = cvp.Variable(N)

        # Problem 3
        self.constr = []
        self.constr = set_initial_constraints(self.constr, config, self.x,
                                              self.u, self.gam, self.z, N,
                                              self.x_0)
        self.constr = running_constraints(self.constr, config, self.x, self.u,
                                          self.gam, self.z, self.dt, N)
        self.obj = cvp.norm(self.x[0:3, N - 1] - config.q[:])
        self.problem = cvp.Problem(cvp.Minimize(self.obj), self.constr)
Esempio n. 28
0
    def __init__(self):
        """Currently this class is for demonstration purposes only.
        
        Consequently, the blank initialization is it for now.
        """
        self.nads = 5
        self.periods = 24

        self.cmin = cp.Parameter(self.nads,
                                 value=10 * np.ones(self.nads),
                                 nonneg=True)
        self.cmax = cp.Parameter(self.nads,
                                 value=1000 * np.ones(self.nads),
                                 nonneg=True)
        self.cpc = cp.Parameter(self.nads,
                                value=5 * np.ones(self.nads),
                                nonneg=True)

        self.seed = 123456
        np.random.seed(self.seed)

        self.ads = list(
            map(
                lambda i: Ad(i, self.update, self.update_constraints, self.
                             cmin, self.cmax, self.cpc), range(self.nads)))

        self._clickProbabilities = self._generateRandomClickProbabilities()
        self._trafficData = self._generateTrafficData()

        self.schedule = cp.Variable((self.nads, self.periods), nonneg=True)
        self.expectation = cp.multiply(self._clickProbabilities, self.schedule)

        self.prob = cp.Problem(self.objective(), self.constraints())

        self.width = 800
        self.panel_width = 350
Esempio n. 29
0
    def test_parameterized_cone_matrix_stuffing_with_many_constraints(self):
        self.skipTest("This benchmark takes too long.")
        m = 2000
        n = 2000
        A = cp.Parameter((m, n))
        C = cp.Parameter(m // 2)
        b = cp.Parameter(m)
        A.value = np.random.randn(m, n)
        C.value = np.random.rand(m // 2)
        b.value = np.random.randn(m)

        x = cp.Variable(n)
        cost = cp.sum(A @ x)

        constraints = [C[i] * x[i] <= b[i] for i in range(m // 2)]
        constraints.extend(
            [C[i] * x[m // 2 + i] == b[m // 2 + i] for i in range(m // 2)])

        problem = cp.Problem(cp.Minimize(cost), constraints)

        def parameterized_cone_matrix_stuffing():
            ConeMatrixStuffing().apply(problem)

        benchmark(parameterized_cone_matrix_stuffing, iters=1)
Esempio n. 30
0
    def test_parallel_resolve(self):
        """Test parallel resolve (to avoid hanging)"""

        np.random.seed(1)
        # This needs to work for different
        p = 10
        n = p * 10
        F = np.random.randn(n, p)
        D = np.diag(np.random.rand(n) * np.sqrt(p))
        Sigma = F.dot(F.T) + D
        gamma = 1.0
        mu = cp.Parameter(n, name='mu')
        x = cp.Variable(n)
        cost = -mu @ x + gamma * cp.quad_form(x, Sigma)
        constraints = [cp.sum(x) == 1, x >= 0]

        # Define optimizer
        problem = cp.Problem(cp.Minimize(cost), constraints)
        m = Optimizer(problem, name="portfolio")
        '''
        Sample points
        '''
        theta_bar = np.random.randn(n)
        radius = 1.0
        '''
        Train and solve
        '''

        # Training and testing data
        n_train = 1000
        n_test = 1000
        # Sample points from multivariate ball
        X_d = uniform_sphere_sample(theta_bar, radius, n=n_train)
        X_d_test = uniform_sphere_sample(theta_bar, radius, n=n_test)
        df = pd.DataFrame({'mu': list(X_d)})
        df_test = pd.DataFrame({'mu': list(X_d_test)})

        # Train and test using pytorch
        m.train(df,
                parallel=True,
                filter_strategies=True,
                n_train_trials=10,
                learner=PYTORCH)
        m.performance(df_test, parallel=True)

        # Run parallel loop again to enforce instability
        # in multiprocessing
        m.performance(df_test, parallel=True)