Ejemplo n.º 1
0
def test_infeasible():
    np.random.seed(0)
    c = np.ones(1)
    b = np.array([1.0, -1.0])
    A = sparse.csc_matrix(np.ones((2, 1)))
    cone_dims = {cone_lib.EQ_DIM: 2}
    with pytest.raises(cone_prog.SolverError, match=r"Solver ecos returned status Infeasible"):
        cone_prog.solve_and_derivative(A, b, c, cone_dims, solve_method="ECOS")
Ejemplo n.º 2
0
 def test_infeasible(self):
     np.random.seed(0)
     c = np.ones(1)
     b = np.array([1.0, -1.0])
     A = sparse.csc_matrix(np.ones((2, 1)))
     cone_dims = {"f": 2}
     with self.assertRaises(cone_prog.SolverError, msg='Solver scs returned status.*'):
         cone_prog.solve_and_derivative(A, b, c, cone_dims)
Ejemplo n.º 3
0
    def test_ecos_solve(self):
        np.random.seed(0)
        m = 20
        n = 10

        A, b, c, cone_dims = utils.least_squares_eq_scs_data(m, n)
        cone_dims.pop("q")
        cone_dims.pop("s")
        cone_dims.pop("ep")
        x, y, s, derivative, adjoint_derivative = cone_prog.solve_and_derivative(
            A, b, c, cone_dims, solve_method="ECOS")

        # check optimality conditions
        np.testing.assert_allclose(A @ x + s, b, atol=1e-8)
        np.testing.assert_allclose(A.T @ y + c, 0, atol=1e-8)
        np.testing.assert_allclose(s @ y, 0, atol=1e-8)
        np.testing.assert_allclose(s,
                                   cone_lib.pi(
                                       s,
                                       cone_lib.parse_cone_dict(cone_dims),
                                       dual=False),
                                   atol=1e-8)
        np.testing.assert_allclose(y,
                                   cone_lib.pi(
                                       y,
                                       cone_lib.parse_cone_dict(cone_dims),
                                       dual=True),
                                   atol=1e-8)

        x = cp.Variable(10)
        prob = cp.Problem(
            cp.Minimize(
                cp.sum_squares(np.random.randn(5, 10) @ x) +
                np.random.randn(10) @ x), [
                    cp.norm2(x) <= 1,
                    np.random.randn(2, 10) @ x == np.random.randn(2)
                ])
        A, b, c, cone_dims = utils.scs_data_from_cvxpy_problem(prob)
        x, y, s, derivative, adjoint_derivative = cone_prog.solve_and_derivative(
            A, b, c, cone_dims, solve_method="ECOS")

        # check optimality conditions
        np.testing.assert_allclose(A @ x + s, b, atol=1e-8)
        np.testing.assert_allclose(A.T @ y + c, 0, atol=1e-8)
        np.testing.assert_allclose(s @ y, 0, atol=1e-8)
        np.testing.assert_allclose(s,
                                   cone_lib.pi(
                                       s,
                                       cone_lib.parse_cone_dict(cone_dims),
                                       dual=False),
                                   atol=1e-8)
        np.testing.assert_allclose(y,
                                   cone_lib.pi(
                                       y,
                                       cone_lib.parse_cone_dict(cone_dims),
                                       dual=True),
                                   atol=1e-8)
Ejemplo n.º 4
0
def test_infeasible():
    np.random.seed(0)
    c = np.ones(1)
    b = np.array([1.0, -1.0])
    A = sparse.csc_matrix(np.ones((2, 1)))
    cone_dims = {"f": 2}
    with pytest.raises(cone_prog.SolverError,
                       match=r"Solver scs returned status.*"):
        cone_prog.solve_and_derivative(A, b, c, cone_dims)
Ejemplo n.º 5
0
    def test_warm_start(self):
        np.random.seed(0)
        m = 20
        n = 10
        A, b, c, cone_dims = utils.least_squares_eq_scs_data(m, n)
        x, y, s, _, _ = cone_prog.solve_and_derivative(
            A, b, c, cone_dims, eps=1e-11)
        x_p, y_p, s_p, _, _ = cone_prog.solve_and_derivative(
            A, b, c, cone_dims, warm_start=(x, y, s), max_iters=1)

        np.testing.assert_allclose(x, x_p, atol=1e-7)
        np.testing.assert_allclose(y, y_p, atol=1e-7)
        np.testing.assert_allclose(s, s_p, atol=1e-7)
Ejemplo n.º 6
0
    def test_solve_and_derivative(self):
        np.random.seed(0)
        m = 20
        n = 10

        A, b, c, cone_dims = utils.least_squares_eq_scs_data(m, n)
        for mode in ["lsqr", "dense"]:
            x, y, s, derivative, adjoint_derivative = cone_prog.solve_and_derivative(
                A, b, c, cone_dims, eps=1e-10, mode=mode, solve_method="SCS")

            dA = utils.get_random_like(
                A, lambda n: np.random.normal(0, 1e-6, size=n))
            db = np.random.normal(0, 1e-6, size=b.size)
            dc = np.random.normal(0, 1e-6, size=c.size)

            dx, dy, ds = derivative(dA, db, dc)

            x_pert, y_pert, s_pert, _, _ = cone_prog.solve_and_derivative(
                A + dA,
                b + db,
                c + dc,
                cone_dims,
                eps=1e-10,
                solve_method="SCS")

            np.testing.assert_allclose(x_pert - x, dx, atol=1e-8)
            np.testing.assert_allclose(y_pert - y, dy, atol=1e-8)
            np.testing.assert_allclose(s_pert - s, ds, atol=1e-8)

            x, y, s, derivative, adjoint_derivative = cone_prog.solve_and_derivative(
                A, b, c, cone_dims, eps=1e-10, mode=mode, solve_method="SCS")

            objective = c.T @ x
            dA, db, dc = adjoint_derivative(c, np.zeros(y.size),
                                            np.zeros(s.size))

            x_pert, _, _, _, _ = cone_prog.solve_and_derivative(
                A + 1e-6 * dA,
                b + 1e-6 * db,
                c + 1e-6 * dc,
                cone_dims,
                eps=1e-10,
                solve_method="SCS")
            objective_pert = c.T @ x_pert

            np.testing.assert_allclose(objective_pert - objective,
                                       1e-6 * dA.multiply(dA).sum() +
                                       1e-6 * db @ db + 1e-6 * dc @ dc,
                                       atol=1e-8)
def _single_d_conic_(p, d, Q, G, h, A, b, T=24, sol_opt=cp.SCS, verbose=0):
    if d.shape == (T,):
        d = np.expand_dims(d, 1)

    Diff_coef_ = np.concatenate([np.eye(T), -np.eye(T)], axis=1)

    x_ = cp.Variable(3 * T)
    obj = cp.Minimize(0.5 * cp.quad_form(x_, Q) + p.T * cp.pos(Diff_coef_ * x_[0:(2 * T), 0] + d))
    ineqCon = G * x_ <= h
    eqCon = A * x_ == b
    cons = [ineqCon, eqCon]
    prob = cp.Problem(obj, cons)
    A_, b_, c_, cone_dims = scs_data_from_cvxpy_problem(prob, cp_SCS=sol_opt)

    x, y, s, derivative, adjoint_derivative = diffcp_cprog.solve_and_derivative(
        A_, b_, c_, cone_dims, eps=1e-5)

    x_hat = x[:(3 * T)]
    # dx, dy, ds = derivative(A_, b_, c_, atol=1e-4, btol=1e-4)
    # dA, db, dc = adjoint_derivative(dx, np.zeros(y.size), np.zeros(s.size))
    dA, db, dc = adjoint_derivative(c_, np.zeros(y.size), np.zeros(s.size))
    # print("c_ toward db ", db[T:2*(T)])
    # dA, db, dc = adjoint_derivative(np.ones(c_.size), np.zeros(y.size), np.zeros(s.size))
    # print("ones toward dc ", db[T:2*(T)])
    # the demand d was converted in the Ax=b format b = [0,.., d, 0,...]
    return x_hat, db[T:2*(T)]
Ejemplo n.º 8
0
    def test_threading(self):
        np.random.seed(0)
        m = 20
        n = 10
        As, bs, cs, cone_dicts = [], [], [], []
        results = []

        for _ in range(50):
            A, b, c, cone_dims = utils.least_squares_eq_scs_data(m, n)
            As += [A]
            bs += [b]
            cs += [c]
            cone_dicts += [cone_dims]
            results.append(cone_prog.solve_and_derivative(A, b, c, cone_dims))

        for n_jobs in [1, -1]:
            xs, ys, ss, _, DT_batch = cone_prog.solve_and_derivative_batch(
                As, bs, cs, cone_dicts, n_jobs_forward=n_jobs, n_jobs_backward=n_jobs)

            for i in range(50):
                np.testing.assert_allclose(results[i][0], xs[i])
                np.testing.assert_allclose(results[i][1], ys[i])
                np.testing.assert_allclose(results[i][2], ss[i])
            
            dAs, dbs, dcs = DT_batch(xs, ys, ss)
            for i in range(50):
                dA, db, dc = results[i][-1](results[i][0], results[i][1], results[i][2])
                np.testing.assert_allclose(dA.todense(), dAs[i].todense())
                np.testing.assert_allclose(dbs[i], db)
                np.testing.assert_allclose(dcs[i], dc)
Ejemplo n.º 9
0
    def test_threading(self):
        m = 20
        n = 10
        As, bs, cs, cone_dicts = [], [], [], []
        results = []

        serial_time = 0.0
        for _ in range(50):
            A, b, c, cone_dims = utils.least_squares_eq_scs_data(m, n)
            As += [A]
            bs += [b]
            cs += [c]
            cone_dicts += [cone_dims]
            tic = time.time()
            results.append(cone_prog.solve_and_derivative(A, b, c, cone_dims))
            toc = time.time()
            serial_time += toc - tic

        tic = time.time()
        results_thread = cone_prog.solve_and_derivative_batch(
            As, bs, cs, cone_dicts)
        toc = time.time()
        parallel_time = toc - tic

        self.assertTrue(parallel_time < serial_time)

        for i in range(50):
            np.testing.assert_allclose(results[i][0], results_thread[i][0])
            np.testing.assert_allclose(results[i][1], results_thread[i][1])
            np.testing.assert_allclose(results[i][2], results_thread[i][2])
Ejemplo n.º 10
0
    def test_solve_and_derivative(self):
        m = 20
        n = 10
        A, b, c, cone_dims = utils.least_squares_eq_scs_data(m, n)

        x, y, s, derivative, _ = cone_prog.solve_and_derivative(A,
                                                                b,
                                                                c,
                                                                cone_dims,
                                                                eps=1e-8)

        dA = utils.get_random_like(A,
                                   lambda n: np.random.normal(0, 1e-6, size=n))
        db = np.random.normal(0, 1e-6, size=b.size)
        dc = np.random.normal(0, 1e-6, size=c.size)

        dx, dy, ds = derivative(dA, db, dc)

        x_pert, y_pert, s_pert, _, _ = cone_prog.solve_and_derivative(
            A + dA, b + db, c + dc, cone_dims, eps=1e-8)

        np.testing.assert_allclose(x_pert - x, dx, atol=1e-6, rtol=1e-6)
def forward_conic_format_solve_problem(Q, q, G, h, A, b, sol_opt=cp.SCS, verbose=False):
    """

    :param Q:
    :param q:
    :param G:
    :param h:
    :param A:
    :param b:
    :param sol_opt:
    :param verbose:
    :return:
    """
    nz, neq, nineq = q.shape[0], A.shape[0] if A is not None else 0, G.shape[0]

    x_ = cp.Variable(nz)
    # print("x size {}, num of ineq {}".format(x_.size, nineq))
    obj = cp.Minimize(0.5 * cp.quad_form(x_, Q) + q.T * x_)
    eqCon = A * x_ == b if neq > 0 else None
    if nineq > 0:
        slacks = cp.Variable(nineq)  # define slack variables
        ineqCon = G * x_ + slacks == h
        slacksCon = slacks >= 0
    else:
        ineqCon = slacks = slacksCon = None

    cons = [constraint for constraint in [eqCon, ineqCon, slacksCon] if constraint is not None]
    prob = cp.Problem(obj, cons)
    # The current form only accept cvx problem and scs solver option
    A, b, c, cone_dims = scs_data_from_cvxpy_problem(prob, sol_opt)
    # @note: after converting into scs conic form
    # The A, b, c here represents general form of :
    #      min  c^T x
    #    s.t.   Ax + s = b
    #           s \in \mathcal{K}
    #  where K is a cone
    # ----------------------------
    # calculate time
    # ----------------------------
    # start = time.perf_counter()
    x, y, s, derivative, adjoint_derivative = diffcp_cprog.solve_and_derivative(
        A, b, c, cone_dims, eps=1e-5)
    # end = time.perf_counter()
    # print("[DIFFCP] Compute solution and set up derivative: %.4f s." % (end - start))

    return x, y, s, derivative, adjoint_derivative, A, b, c
def forward_single_d_conic_solve_Filter(Q, q, G, h, A, b, d, epsilon, xi, delta=0.01,
                                        T=48, p=None, sol_opt=cp.CVXOPT, verbose=False):
    nz, neq, nineq = q.shape[0], A.shape[0] if A is not None else 0, G.shape[0]

    if p.shape == (T,):
        p = np.expand_dims(p, 1)  # convert the price into a column vector

    if d.shape == (T,):
        d = np.expand_dims(d, 1)

    if verbose:
        print("\n inside the cvx np filter :", T, nz)
        print([part.shape for part in [Q, q, G, h, A, b]])

    x_ = cp.Variable(nz)
    # GAMMA = cp.Semidef(T)
    GAMMA = cp.Variable(rows=T, cols=T)
    # assert T == nz / 3
    # print("x size {}, num of ineq {}".format(x_.size, nineq))
    term1 = GAMMA * epsilon + d

    obj = cp.Minimize(0.5 * cp.quad_form(x_, Q) + q.T * x_ + p.T * cp.pos(term1) + cp.pos(cp.norm(GAMMA, "nuc") - xi))
    eqCon = A * x_ == b if neq > 0 else None
    prob_ineqCon = [cp.norm(GAMMA[:, i], 2) <= (d[i, 0] / abs(ut.function_normal_cdf_inv(delta))) for i in
                    range(T)]  # ut.function_normal_cdf_inv(delta)

    eqCon_sdp = None  # convert the SDP constraint in the objective, # eqCon_sdp = cp.norm(GAMMA, "nuc") == xi
    if nineq > 0:
        slacks = cp.Variable(nineq)  # define slack variables
        ineqCon = G * x_ + slacks == h
        slacksCon = slacks >= 0
    else:
        ineqCon = slacks = slacksCon = None
    cons_collected = [eqCon, eqCon_sdp, ineqCon] + prob_ineqCon + [slacksCon]

    cons = [constraint for constraint in cons_collected if constraint is not None]
    prob = cp.Problem(obj, cons)

    A_, b_, c_, cone_dims = scs_data_from_cvxpy_problem(prob, cp_SCS=sol_opt)

    x, y, s, derivative, adjoint_derivative = diffcp_cprog.solve_and_derivative(
        A_, b_, c_, cone_dims, eps=1e-5)
    # end = time.perf_counter()
    # print("[DIFFCP] Compute solution and set up derivative: %.4f s." % (end - start))

    return x, y, s, derivative, adjoint_derivative, A_, b_, c_
def _convex_formulation_w_GAMMA_d_conic(p, GAMMA, d, epsilon, y_onehot, Q, G, h, A, b, T, sol_opt=cp.SCS, verbose=True):

    cat_vec = None
    if epsilon.shape == (T,) and y_onehot.shape == (2,):
        cat_vec = np.concatenate([epsilon, y_onehot], axis=0)
        cat_vec = np.expand_dims(cat_vec, 1)
        # epsilon = np.expand_dims(epsilon, 1)

    if d.shape == (T,):
        d = np.expand_dims(d, 1)

    x_ = cp.Variable(3 * T)

    Diff_coef_ = np.concatenate([np.eye(T), -np.eye(T)], axis=1)

    obj = cp.Minimize(0.5 * cp.quad_form(x_, Q) + p.T * cp.pos(Diff_coef_ * x_[0:(2 * T), 0] + d + GAMMA.dot(cat_vec)))
    ineqCon = G * x_ <= h
    eqCon = A * x_ == b
    cons = [ineqCon, eqCon]
    prob = cp.Problem(obj, cons)

    A_, b_, c_, cone_dims = scs_data_from_cvxpy_problem(prob, cp_SCS=sol_opt)

    # print("A:", A_.shape, A_)
    # print("b:", b_.shape, np.expand_dims(b_, 1))
    # tau = 96
    # print("b[:{:d}] == {}".format(tau, b_[:tau]))
    # print("c:", c_.shape, np.expand_dims(c_, 1))
    # print("==="*20)
    # print("price:\n", p)
    # print("=="*20)
    # print("d_tilde:\n", d + GAMMA.dot(cat_vec))

    x, y, s, derivative, adjoint_derivative = diffcp_cprog.solve_and_derivative(
        A_, b_, c_, cone_dims, eps=1e-5)

    x_hat = x[:3*T]

    dA, db, dc =adjoint_derivative(c_, np.zeros(y.size), np.zeros(s.size))
    print(dA.shape, db.shape, dc.shape)
    return x_hat, db
Ejemplo n.º 14
0
import numpy as np
from scipy import sparse
from scipy.sparse import linalg as splinalg
import time

import diffcp.cone_program as cone_prog
import diffcp.cones as cone_lib
import diffcp.utils as utils


m = 100
n = 50

A, b, c, cone_dims = utils.least_squares_eq_scs_data(m, n)
for mode in ["lsqr", "dense"]:
    x, y, s, derivative, adjoint_derivative = cone_prog.solve_and_derivative(
        A, b, c, cone_dims, eps=1e-10, mode=mode)

    dA = utils.get_random_like(
        A, lambda n: np.random.normal(0, 1e-2, size=n))
    db = np.random.normal(0, 1e-2, size=b.size)
    dc = np.random.normal(0, 1e-2, size=c.size)

    derivative_time = 0.0
    for _ in range(10):
        tic = time.time()
        dx, dy, ds = derivative(dA, db, dc)
        toc = time.time()
        derivative_time += (toc - tic) / 10

    adjoint_derivative_time = 0.0
    for _ in range(10):