예제 #1
0
    def test_affine(self) -> None:
        """Test grad for affine atoms.
        """
        expr = -self.a
        self.a.value = 2
        self.assertAlmostEqual(expr.grad[self.a], -1)

        expr = 2 * self.a
        self.a.value = 2
        self.assertAlmostEqual(expr.grad[self.a], 2)

        expr = self.a / 2
        self.a.value = 2
        self.assertAlmostEqual(expr.grad[self.a], 0.5)

        expr = -(self.x)
        self.x.value = [3, 4]
        val = np.zeros((2, 2)) - np.diag([1, 1])
        self.assertItemsAlmostEqual(expr.grad[self.x].toarray(), val)

        expr = -(self.A)
        self.A.value = [[1, 2], [3, 4]]
        val = np.zeros((4, 4)) - np.diag([1, 1, 1, 1])
        self.assertItemsAlmostEqual(expr.grad[self.A].toarray(), val)

        expr = self.A[0, 1]
        self.A.value = [[1, 2], [3, 4]]
        val = np.zeros((4, 1))
        val[2] = 1
        self.assertItemsAlmostEqual(expr.grad[self.A].toarray(), val)

        z = Variable(3)
        expr = cp.hstack([self.x, z])
        self.x.value = [1, 2]
        z.value = [1, 2, 3]
        val = np.zeros((2, 5))
        val[:, 0:2] = np.eye(2)
        self.assertItemsAlmostEqual(expr.grad[self.x].toarray(), val)

        val = np.zeros((3, 5))
        val[:, 2:] = np.eye(3)
        self.assertItemsAlmostEqual(expr.grad[z].toarray(), val)

        # cumsum
        expr = cp.cumsum(self.x)
        self.x.value = [1, 2]
        val = np.ones((2, 2))
        val[1, 0] = 0
        self.assertItemsAlmostEqual(expr.grad[self.x].toarray(), val)

        expr = cp.cumsum(self.x[:, None], axis=1)
        self.x.value = [1, 2]
        val = np.eye(2)
        self.assertItemsAlmostEqual(expr.grad[self.x].toarray(), val)
예제 #2
0
 def degree_sequence(self, n, matrix):
     # for the variable matrix i'm trying to take the cumulative sum of a matrix that doesn't exist
     degree_sequence = []
     print(matrix)
     for i in range(1, n):
         print(i)
         degree_sequence += [cvx.cumsum(matrix, i)]
     return np.sort(degree_sequence)  #sorted is important
예제 #3
0
    def test_scalar_sum(self) -> None:
        x = cp.Variable(pos=True)
        problem = cp.Problem(cp.Minimize(cp.sum(1 / x)))
        problem.solve(SOLVER, qcp=True)
        self.assertAlmostEqual(problem.value, 0, places=3)

        problem = cp.Problem(cp.Minimize(cp.cumsum(1 / x)))
        problem.solve(SOLVER, qcp=True)
        self.assertAlmostEqual(problem.value, 0, places=3)
예제 #4
0
def solve_min_cost(drivers, requests):
    n_d = len(drivers)
    n_r = len(requests)
    if n_d <= 1 or n_r <= 1:
        return None

    dist = [[
        haversine(d.pos, r.store_pos) + haversine(r.store_pos, r.pos)
        for r in requests
    ] for d in drivers]
    c = np.asarray(dist)
    x = cvx.Variable((n_d, n_r), integer=True)
    objective = cvx.Minimize(dot(c, x))
    constraints = [
        x <= 1, x >= 0,
        cvx.cumsum(x) <= 1,
        cvx.cumsum(x, axis=1) <= 1,
        cvx.sum(x) == min(n_r, n_d)
    ]
    p = cvx.Problem(objective, constraints)
    p.solve(solver=cvx.ECOS_BB)
    x_val = np.around(x.value).astype(int)

    return x_val
예제 #5
0
def solve(Q, C, D):
    q1 = cp.Variable(1)
    c = cp.Variable(T)
    q = cp.hstack([q1, cp.cumsum(c)[:-1]])

    constraints = [
        q <= Q,
        q >= 0,
        c <= C,
        c >= -D,
        q[-1] + c[-1] == q[0],
        u + c >= 0,
    ]

    obj = cp.Minimize(p @ (u + c))
    problem = cp.Problem(obj, constraints)
    problem.solve()
    return q.value, c.value, problem.value
예제 #6
0
def simultaneous_planning_cvx(S,
                              A,
                              D,
                              t_max=2000,
                              delta=5,
                              file_suffix='',
                              save_dir=''):

    # Setup problem parameters
    # make p_tau uniform between 500 and 2000
    p_tau = np.ones(t_max)
    p_tau[:5] = 0
    p_tau = p_tau / np.sum(p_tau)

    n_dict_elem = D.shape[1]

    # compute variance of dictionary elements.
    stas_norm = np.expand_dims(np.sum(A**2, 0), 0)  # 1 x # cells
    var_dict = np.squeeze(np.dot(stas_norm, D * (1 - D)))  # # dict

    # Construct the problem.

    y = cvxpy.Variable(n_dict_elem, t_max)

    x = cvxpy.cumsum(y, 1)
    S_expanded = np.repeat(np.expand_dims(S, 1), t_max, 1)
    objective = cvxpy.Minimize((cvxpy.sum_entries(
        (S_expanded - A * (D * x))**2, 0) + var_dict * x) * p_tau)
    constraints = [
        0 <= y,
        cvxpy.sum_entries(y, 0).T <= delta * np.ones((1, t_max)).T
    ]
    prob = cvxpy.Problem(objective, constraints)

    # The optimal objective is returned by prob.solve().
    result = prob.solve(verbose=True)
    # The optimal value for x is stored in x.value.
    print(x.value)
    # The optimal Lagrange multiplier for a constraint
    # is stored in constraint.dual_value.
    print(constraints[0].dual_value)
예제 #7
0
def log_sum_exp_axis_1(x):
    return cp.log_sum_exp(x, axis=1)  # noqa E371


# Atom, solver pairs known to fail.
KNOWN_SOLVER_ERRORS = [
    # See https://github.com/cvxgrp/cvxpy/issues/249
    (log_sum_exp_axis_0, CVXOPT),
    (log_sum_exp_axis_1, CVXOPT),
    (cp.kl_div, CVXOPT),
]

atoms_minimize = [
    (cp.abs, (2, 2), [[[-5, 2], [-3, 1]]], Constant([[5, 2], [3, 1]])),
    (lambda x: cp.cumsum(x, axis=1), (2, 2), [[[-5, 2], [-3, 1]]],
     Constant([[-5, 2], [-8, 3]])),
    (lambda x: cp.cumsum(x, axis=0), (2, 2), [[[-5, 2], [-3, 1]]],
     Constant([[-5, -3], [-3, -2]])),
    (lambda x: cp.cummax(x, axis=1), (2, 2), [[[-5, 2], [-3, 1]]],
     Constant([[-5, 2], [-3, 2]])),
    (lambda x: cp.cummax(x, axis=0), (2, 2), [[[-5, 2], [-3, 1]]],
     Constant([[-5, 2], [-3, 1]])),
    (cp.diag, (2, ), [[[-5, 2], [-3, 1]]], Constant([-5, 1])),
    (cp.diag, (2, 2), [[-5, 1]], Constant([[-5, 0], [0, 1]])),
    (cp.exp, (2, 2), [[[1, 0], [2, -1]]],
     Constant([[math.e, 1], [math.e**2, 1.0 / math.e]])),
    (cp.huber, (2, 2), [[[0.5, -1.5], [4, 0]]], Constant([[0.25, 2], [7, 0]])),
    (lambda x: cp.huber(x, 2.5), (2, 2), [[[0.5, -1.5], [4, 0]]],
     Constant([[0.25, 2.25], [13.75, 0]])),
    (cp.inv_pos, (2, 2), [[[1, 2], [3, 4]]],
예제 #8
0
from veh_speed_sched_data import n, a, b, c, d, smin, smax, tau_min, tau_max
#fixing data
d = np.asarray(d)
smin = np.asarray(smin)
smax = np.asarray(smax)
tau_min = np.asarray(tau_min)
tau_max = np.asarray(tau_max)

d = d[:, 0]
smin = smin[:, 0]
smax = smax[:, 0]
tau_min = tau_min[:, 0]
tau_max = tau_max[:, 0]

k = cp.Variable(n)
h = cp.cumsum(k)
phi = a * cp.multiply(cp.inv_pos(k), d**2) + c * k + cp.multiply(b, d)
obj = cp.Minimize(cp.sum(phi))
constraints = [
    cp.multiply(smin, k) <= d,
    cp.multiply(smax, k) >= d,
    tau_min <= h,
    tau_max >= h,
]
problem = cp.Problem(obj, constraints)
problem.solve()
print(f"status: {problem.status}")
if problem.status == 'optimal':
    print(f"Total fuel: {problem.value}")
    s = d / k.value
    plt.step(np.arange(n), s)
def __total_variation_regularized_derivative__(x,
                                               dt,
                                               N,
                                               gamma,
                                               solver='MOSEK'):
    """
    Use convex optimization (cvxpy) to solve for the Nth total variation regularized derivative.
    Default solver is MOSEK: https://www.mosek.com/

    :param x: (np.array of floats, 1xN) time series to differentiate
    :param dt: (float) time step
    :param N: (int) 1, 2, or 3, the Nth derivative to regularize
    :param gamma: (float) regularization parameter
    :param solver: (string) Solver to use. Solver options include: 'MOSEK' and 'CVXOPT',
                            in testing, 'MOSEK' was the most robust.
    :return: x_hat    : estimated (smoothed) x
             dxdt_hat : estimated derivative of x
    """

    # Normalize
    mean = np.mean(x)
    std = np.std(x)
    x = (x - mean) / std

    # Define the variables for the highest order derivative and the integration constants
    var = cvxpy.Variable(len(x) + N)

    # Recursively integrate the highest order derivative to get back to the position
    derivatives = [var[N:]]
    for i in range(N):
        d = cvxpy.cumsum(derivatives[-1]) + var[i]
        derivatives.append(d)

    # Compare the recursively integration position to the noisy position
    sum_squared_error = cvxpy.sum_squares(derivatives[-1] - x)

    # Total variation regularization on the highest order derivative
    r = cvxpy.sum(gamma * cvxpy.tv(derivatives[0]))

    # Set up and solve the optimization problem
    obj = cvxpy.Minimize(sum_squared_error + r)
    prob = cvxpy.Problem(obj)
    prob.solve(solver=solver)

    # Recursively calculate the value of each derivative
    final_derivative = var.value[N:]
    derivative_values = [final_derivative]
    for i in range(N):
        d = np.cumsum(derivative_values[-1]) + var.value[i]
        derivative_values.append(d)
    for i, _ in enumerate(derivative_values):
        derivative_values[i] = derivative_values[i] / (dt**(N - i))

    # Extract the velocity and smoothed position
    dxdt_hat = derivative_values[-2]
    x_hat = derivative_values[-1]

    dxdt_hat = (dxdt_hat[0:-1] + dxdt_hat[1:]) / 2
    ddxdt_hat_f = dxdt_hat[-1] - dxdt_hat[-2]
    dxdt_hat_f = dxdt_hat[-1] + ddxdt_hat_f
    dxdt_hat = np.hstack((dxdt_hat, dxdt_hat_f))

    # fix first point
    d = dxdt_hat[2] - dxdt_hat[1]
    dxdt_hat[0] = dxdt_hat[1] - d

    return x_hat * std + mean, dxdt_hat * std