Exemple #1
0
def log_normcdf(x):
    """Elementwise log of the cumulative distribution function of a standard normal random variable.

    Implementation is a quadratic approximation with modest accuracy over [-4, 4].
    """
    A = scipy.sparse.diags(
        np.sqrt(
            [
                0.02301291,
                0.08070214,
                0.16411522,
                0.09003495,
                0.08200854,
                0.01371543,
                0.04641081,
            ]
        )
    )
    b = np.array([[3.0, 2.0, 1.0, 0.0, -1.0, -2.5, -3.5]]).reshape(-1, 1)

    x = Expression.cast_to_const(x)
    flat_x = reshape(x, (1, x.size))

    y = A @ (b @ np.ones(flat_x.shape) - np.ones(b.shape) @ flat_x)
    out = -sum_(maximum(y, 0) ** 2, axis=0)

    return reshape(out, x.shape)
Exemple #2
0
def gm(t, x, y):
    length = t.size
    return SOC(t=reshape(x + y, (length, )),
               X=vstack(
                   [reshape(x - y, (1, length)),
                    reshape(2 * t, (1, length))]),
               axis=0)
Exemple #3
0
def log_normcdf(x):  # noqa: E501
    """Elementwise log of the cumulative distribution function of a standard normal random variable.

    The implementation is a quadratic approximation with modest accuracy over [-4, 4].
    For details on the nature of the approximation, refer to
    `CVXPY GitHub PR #1224 <https://github.com/cvxpy/cvxpy/pull/1224#issue-793221374>`_.

    .. note::

        SciPy's analog of ``log_normcdf`` is called `log_ndtr <https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.log_ndtr.html>`_.
        We opted not to use that name because its meaning would not be obvious to the casual user.
    """
    A = scipy.sparse.diags(
        np.sqrt(
            [
                0.02301291,
                0.08070214,
                0.16411522,
                0.09003495,
                0.08200854,
                0.01371543,
                0.04641081,
            ]
        )
    )
    b = np.array([[3.0, 2.0, 1.0, 0.0, -1.0, -2.5, -3.5]]).reshape(-1, 1)

    x = Expression.cast_to_const(x)
    flat_x = reshape(x, (1, x.size))

    y = A @ (b @ np.ones(flat_x.shape) - np.ones(b.shape) @ flat_x)
    out = -sum_(maximum(y, 0) ** 2, axis=0)

    return reshape(out, x.shape)
Exemple #4
0
    def test_reshape_with_lists(self) -> None:
        n = 2
        a = Variable([n, n])
        b = Variable(n**2)
        c = reshape(b, [n, n])
        self.assertEqual((a + c).shape, (n, n))

        d = reshape(b, (n, n))
        self.assertEqual((a + d).shape, (n, n))
Exemple #5
0
    def get_special_slice(expr, key):
        """Indexing using logical indexing or a list of indices.

        Parameters
        ----------
        expr : Expression
            The expression being indexed/sliced into.
        key : tuple
            ndarrays or lists.
        Returns
        -------
        Expression
            An expression representing the index/slice.
        """
        expr = index.cast_to_const(expr)
        # Order the entries of expr and select them using key.
        idx_mat = np.arange(expr.size[0]*expr.size[1])
        idx_mat = np.reshape(idx_mat, expr.size, order='F')
        select_mat = idx_mat[key]
        if select_mat.ndim == 2:
            final_size = select_mat.shape
        else:  # Always cast 1d arrays as column vectors.
            final_size = (select_mat.size, 1)
        select_vec = np.reshape(select_mat, select_mat.size, order='F')
        # Select the chosen entries from expr.
        identity = sp.eye(expr.size[0]*expr.size[1]).tocsc()
        return reshape(identity[select_vec]*vec(expr), *final_size)
Exemple #6
0
def scaled_lower_tri(matrix):
    """Returns an expression representing the lower triangular entries

    Scales the strictly lower triangular entries by sqrt(2), as required
    by SCS.

    Parameters
    ----------
    matrix : Expression
        A 2-dimensional CVXPY expression.

    Returns
    -------
    Expression
        An expression representing the (scaled) lower triangular part of
        the supplied matrix expression.
    """
    rows = cols = matrix.shape[0]
    entries = rows * (cols + 1)//2

    row_arr = np.arange(0, entries)

    lower_diag_indices = np.tril_indices(rows)
    col_arr = np.sort(np.ravel_multi_index(lower_diag_indices, (rows, cols), order='F'))

    val_arr = np.zeros((rows, cols))
    val_arr[lower_diag_indices] = np.sqrt(2)
    np.fill_diagonal(val_arr, 1)
    val_arr = np.ravel(val_arr, order='F')
    val_arr = val_arr[np.nonzero(val_arr)]

    shape = (entries, rows*cols)
    coeff = Constant(sp.csc_matrix((val_arr, (row_arr, col_arr)), shape))
    vectorized_matrix = reshape(matrix, (rows*cols, 1))
    return coeff * vectorized_matrix
def vec_to_upper_tri(expr, strict=False):
    expr = Expression.cast_to_const(expr)
    ell = expr.shape[0]
    if strict:
        # n * (n-1)/2 == ell
        n = ((8 * ell + 1)**0.5 + 1) // 2
    else:
        # n * (n+1)/2 == ell
        n = ((8 * ell + 1)**0.5 - 1) // 2
    n = int(n)
    # form a matrix P, of shape (n**2, ell).
    #       the i-th block of n rows of P gives the entries of the i-th row
    #       of the upper-triangular matrix associated with expr.
    # compute expr2 = P @ expr
    # compute expr3 = reshape(expr2, shape=(n, n)).T
    #   expr3 is the matrix formed by reading length-n blocks of expr2,
    #   and letting each block form a row of expr3.
    P_rows = []
    P_row = 0
    for mat_row in range(n):
        entries_in_row = n - mat_row
        if strict:
            entries_in_row -= 1
        P_row += n - entries_in_row  # these are zeros
        P_rows.extend(range(P_row, P_row + entries_in_row))
        P_row += entries_in_row
    P_cols = np.arange(ell)
    P_vals = np.ones(P_cols.size)
    P = csc_matrix((P_vals, (P_rows, P_cols)), shape=(n**2, ell))
    expr2 = P @ expr
    expr3 = reshape(expr2, (n, n)).T
    return expr3
Exemple #8
0
def diag(expr):
    """Extracts the diagonal from a matrix or makes a vector a diagonal matrix.

    Parameters
    ----------
    expr : Expression or numeric constant
        A vector or square matrix.

    Returns
    -------
    Expression
        An Expression representing the diagonal vector/matrix.
    """
    expr = AffAtom.cast_to_const(expr)
    if expr.is_vector():
        if expr.size[1] == 1:
            return diag_vec(expr)
        # Convert a row vector to a column vector.
        else:
            expr = reshape(expr, expr.size[1], 1)
            return diag_vec(expr)
    elif expr.size[0] == expr.size[1]:
        return diag_mat(expr)
    else:
        raise ValueError("Argument to diag must be a vector or square matrix.")
def var_cone_canon(expr, args):
    """Expand implicit constraints on variable.
    """
    # Convert attributes into constraints.
    new_attr = expr.attributes.copy()
    for key in ['nonneg', 'nonpos', 'symmetric', 'PSD', 'NSD']:
        if new_attr[key]:
            new_attr[key] = False

    if expr.is_symmetric():
        n = expr.shape[0]
        shape = (n * (n + 1) // 2, 1)
        upper_tri = Variable(shape, var_id=expr.id, **new_attr)
        fill_coeff = Constant(upper_tri_to_full(n))
        full_mat = fill_coeff * upper_tri
        obj = reshape(full_mat, (n, n))
    else:
        obj = Variable(expr.shape, var_id=expr.id, **new_attr)

    constr = []
    if expr.is_nonneg():
        constr.append(obj >= 0)
    elif expr.is_nonpos():
        constr.append(obj <= 0)
    elif expr.attributes['PSD']:
        constr.append(obj >> 0)
    elif expr.attributes['NSD']:
        constr.append(obj << 0)
    return (obj, constr)
Exemple #10
0
def sum_canon(expr, args):
    X = args[0]
    if expr.axis is None:
        summation = explicit_sum(X)
        canon, _ = add_canon(summation, summation.args)
        return reshape(canon, expr.shape), []

    if expr.axis == 0:
        X = X.T

    rows = []
    for i in range(X.shape[0]):
        summation = explicit_sum(X[i])
        canon, _ = add_canon(summation, summation.args)
        rows.append(canon)
    canon = hstack(rows)
    return reshape(canon, expr.shape), []
def mulexpression_canon(expr, args):
    lhs = args[0]
    rhs = args[1]
    lhs_shape, rhs_shape, _ = mul_shapes_promote(lhs.shape, rhs.shape)
    lhs = reshape(lhs, lhs_shape)
    rhs = reshape(rhs, rhs_shape)
    rows = []
    # TODO(akshayka): Parallelize this for large matrices.
    for i in range(lhs.shape[0]):
        row = []
        for j in range(rhs.shape[1]):
            arr = hstack([lhs[i, k] + rhs[k, j] for k in range(lhs.shape[1])])
            row.append(log_sum_exp(arr))
        rows.append(row)
    mat = bmat(rows)
    if mat.shape != expr.shape:
        mat = reshape(mat, expr.shape)
    return mat, []
Exemple #12
0
def special_index_canon(expr, args):
    select_mat = expr._select_mat
    final_shape = expr._select_mat.shape
    select_vec = np.reshape(select_mat, select_mat.size, order='F')
    # Select the chosen entries from expr.
    arg = args[0]
    identity = sp.eye(arg.size).tocsc()
    lowered = reshape(identity[select_vec] * vec(arg), final_shape)
    return lowered, []
Exemple #13
0
def add_canon(expr, args):
    if expr.is_scalar():
        return log_sum_exp(hstack(args)), []

    rows = []
    summands = [promote(s, expr.shape) if s.is_scalar() else s for s in args]
    if len(expr.shape) == 1:
        for i in range(expr.shape[0]):
            row = []
            row.append(
                log_sum_exp(hstack([summand[i] for summand in summands])))
            rows.append(row)
        return reshape(bmat(rows), expr.shape), []
    else:
        for i in range(expr.shape[0]):
            row = []
            for j in range(expr.shape[1]):
                row.append(
                    log_sum_exp(hstack([summand[i, j]
                                        for summand in summands])))
            rows.append(row)
        return reshape(bmat(rows), expr.shape), []
Exemple #14
0
def vec(X):
    """Flattens the matrix X into a vector in column-major order.

    Parameters
    ----------
    X : Expression or numeric constant
        The matrix to flatten.

    Returns
    -------
    Expression
        An Expression representing the flattened matrix.
    """
    X = Expression.cast_to_const(X)
    return reshape(X, (X.size, ))
Exemple #15
0
    def grad(self):
        """Gives the (sub/super)gradient of the expression w.r.t. each variable.

        Matrix expressions are vectorized, so the gradient is a matrix.
        None indicates variable values unknown or outside domain.

        Returns:
            A map of variable to SciPy CSC sparse matrix or None.
        """
        select_vec = np.reshape(self._select_mat,
                                self._select_mat.size,
                                order='F')
        identity = sp.eye(self.args[0].size).tocsc()
        lowered = reshape(identity[select_vec] @ vec(self.args[0]),
                          self._shape)
        return lowered.grad
Exemple #16
0
def vec(X):
    """Flattens the matrix X into a vector in column-major order.

    Parameters
    ----------
    X : Expression or numeric constant
        The matrix to flatten.

    Returns
    -------
    Expression
        An Expression representing the flattened matrix.
    """
    X = Expression.cast_to_const(X)

    return reshape(X, X.size[0]*X.size[1], 1)
Exemple #17
0
def pow_nd_canon(con, args):
    """
    con : PowConeND
        We can extract metadata from this.
        For example, con.alpha and con.axis.
    args : tuple of length two
        W,z = args[0], args[1]
    """
    alpha, axis = con.get_data()
    alpha = alpha.value
    W, z = args
    if axis == 1:
        W = W.T
        alpha = alpha.T
    if W.ndim == 1:
        W = reshape(W, (W.size, 1))
        alpha = np.reshape(alpha, (W.size, 1))
    n, k = W.shape
    if n == 2:
        can_con = PowCone3D(W[0, :], W[1, :], z, alpha[0, :])
    else:
        T = Variable(shape=(n - 2, k))
        x_3d, y_3d, z_3d, alpha_3d = [], [], [], []
        for j in range(k):
            x_3d.append(W[:-1, j])
            y_3d.append(T[:, j])
            y_3d.append(W[n - 1, j])
            z_3d.append(z[j])
            z_3d.append(T[:, j])
            r_nums = alpha[:, j]
            r_dens = np.cumsum(r_nums[::-1])[::-1]
            # ^ equivalent to [np.sum(alpha[i:, j]) for i in range(n)]
            r = r_nums / r_dens
            alpha_3d.append(r[:n - 1])
        x_3d = hstack(x_3d)
        y_3d = hstack(y_3d)
        z_3d = hstack(z_3d)
        alpha_p3d = hstack(alpha_3d)
        # TODO: Ideally we should construct x,y,z,alpha_p3d by
        #   applying suitable sparse matrices to W,z,T, rather
        #   than using the hstack atom. (hstack will probably
        #   result in longer compile times).
        can_con = PowCone3D(x_3d, y_3d, z_3d, alpha_p3d)
    # Return a single PowCone3D constraint defined over all auxiliary
    # variables needed for the reduction to go through.
    # There are no "auxiliary constraints" beyond this one.
    return can_con, []
def ILP_form(m, P, sinkresource, n, C, Ce, Cr, Tt, Tc, B, E, e, stagemat, Adj):
    # -- Variables --
    T = cp.Variable(P + 1)  # task time
    M = cp.Variable(P)  # models resources
    x = cp.Variable((n, m), boolean=True)  # binary decisions

    # -- Objective --
    obj = cp.Minimize(T[P])

    # -- Constraints --
    constraints = []

    # (1) completion time (Note that M(i) = M_{i-1})
    constraints.append(T[0] == 0)
    for p in range(1, P + 1):
        constraints.append(T[p] == T[p - 1] + M[p - 1])

    # (2) find the slowest process
    for p in range(P):
        for i in range(n):
            for j in range(m):
                constraints.append(Tt[i,j] * stagemat[p,i] * x[i,j] + \
                    stagemat[p,i] * (Adj[:,i].reshape(1,n) @ x @ Tc[:,j]) <= M[p])

    # (3) resource constraints
    for p in range(P):
        for j in range(m):
            constraints.append(stagemat[p] @ multiply(reshape(x[:, j], (
                n, 1)), C[:, j].reshape(n, 1)) <= B[j])

    # (5) assign tasks
    for i in range(n):
        constraints.append(sum(x[i]) == 1)

    # -- Solve --
    prob = cp.Problem(obj, constraints)
    val = prob.solve()

    print('T.value:', T.value)
    print('M.value:', M.value)
    print('x.value:\n', x.value)
    print('T:', val)

    return x.value
Exemple #19
0
def tv(value, *args):
    """Total variation of a vector, matrix, or list of matrices.

    Uses L1 norm of discrete gradients for vectors and
    L2 norm of discrete gradients for matrices.

    Parameters
    ----------
    value : Expression or numeric constant
        The value to take the total variation of.
    args : Matrix constants/expressions
        Additional matrices extending the third dimension of value.

    Returns
    -------
    Expression
        An Expression representing the total variation.
    """
    # Accept single list as argument.
    if isinstance(value, list) and len(args) == 0:
        args = value[1:]
        value = value[0]
    value = Expression.cast_to_const(value)
    rows, cols = value.size
    if value.is_scalar():
        raise ValueError("tv cannot take a scalar argument.")
    # L1 norm for vectors.
    elif value.is_vector():
        return norm(value[1:] - value[0:max(rows, cols)-1], 1)
    # L2 norm for matrices.
    else:
        args = list(map(Expression.cast_to_const, args))
        values = [value] + list(args)
        diffs = []
        for mat in values:
            diffs += [
                mat[0:rows-1, 1:cols] - mat[0:rows-1, 0:cols-1],
                mat[1:rows, 0:cols-1] - mat[0:rows-1, 0:cols-1],
            ]
        length = diffs[0].size[0]*diffs[1].size[1]
        stacked = vstack(*[reshape(diff, 1, length) for diff in diffs])
        return sum_entries(norm(stacked, p='fro', axis=0))
Exemple #20
0
def tv(value, *args):
    """Total variation of a vector, matrix, or list of matrices.

    Uses L1 norm of discrete gradients for vectors and
    L2 norm of discrete gradients for matrices.

    Parameters
    ----------
    value : Expression or numeric constant
        The value to take the total variation of.
    args : Matrix constants/expressions
        Additional matrices extending the third dimension of value.

    Returns
    -------
    Expression
        An Expression representing the total variation.
    """
    # Accept single list as argument.
    if isinstance(value, list) and len(args) == 0:
        args = value[1:]
        value = value[0]
    value = Expression.cast_to_const(value)
    rows, cols = value.size
    if value.is_scalar():
        raise ValueError("tv cannot take a scalar argument.")
    # L1 norm for vectors.
    elif value.is_vector():
        return norm(value[1:] - value[0:max(rows, cols)-1], 1)
    # L2 norm for matrices.
    else:
        args = map(Expression.cast_to_const, args)
        values = [value] + list(args)
        diffs = []
        for mat in values:
            diffs += [
                mat[0:rows-1, 1:cols] - mat[0:rows-1, 0:cols-1],
                mat[1:rows, 0:cols-1] - mat[0:rows-1, 0:cols-1],
            ]
        length = diffs[0].size[0]*diffs[1].size[1]
        stacked = vstack(*[reshape(diff, 1, length) for diff in diffs])
        return sum_entries(norm(stacked, p='fro', axis=0))
Exemple #21
0
def tvnorm2d(value, Dx, Dy):
    """Total variation of a vector, matrix, or list of matrices.
    Uses L1 norm of discrete gradients for vectors and
    L2 norm of discrete gradients for matrices.
    Parameters
    ----------
    value : Expression or numeric constant
        The value to take the total variation of.
    Returns
    -------
    Expression
        An Expression representing the total variation.
    """
    value = Expression.cast_to_const(value)
    len = value.size[0]

    diffs = [Dx * value, Dy * value]

    stack = vstack(*[reshape(diff, 1, len) for diff in diffs])
    return sum_entries(cvxnorm(stack, p='fro', axis=0))
Exemple #22
0
def scaled_lower_tri(matrix):
    """Returns an expression representing the lower triangular entries

    Scales the strictly lower triangular entries by sqrt(2), as required
    by SCS.

    Parameters
    ----------
    matrix : Expression
        A 2-dimensional CVXPY expression.

    Returns
    -------
    Expression
        An expression representing the (scaled) lower triangular part of
        the supplied matrix expression.
    """
    rows = cols = matrix.shape[0]
    entries = rows * (cols + 1) // 2
    val_arr = []
    row_arr = []
    col_arr = []
    count = 0
    for j in range(cols):
        for i in range(rows):
            if j <= i:
                # Index in the original matrix.
                col_arr.append(j * rows + i)
                # Index in the extracted vector.
                row_arr.append(count)
                if j == i:
                    val_arr.append(1.0)
                else:
                    val_arr.append(np.sqrt(2))
                count += 1
    shape = (entries, rows * cols)
    coeff = Constant(
        sp.coo_matrix((val_arr, (row_arr, col_arr)), shape).tocsc())
    vectorized_matrix = reshape(matrix, (rows * cols, 1))
    return coeff * vectorized_matrix
Exemple #23
0
def tv(value, *args):
    """Total variation of a vector, matrix, or list of matrices.

    Uses L1 norm of discrete gradients for vectors and
    L2 norm of discrete gradients for matrices.

    Parameters
    ----------
    value : Expression or numeric constant
        The value to take the total variation of.
    args : Matrix constants/expressions
        Additional matrices extending the third dimension of value.

    Returns
    -------
    Expression
        An Expression representing the total variation.
    """
    value = Expression.cast_to_const(value)
    if value.ndim == 0:
        raise ValueError("tv cannot take a scalar argument.")
    # L1 norm for vectors.
    elif value.ndim == 1:
        return norm(value[1:] - value[0:value.shape[0]-1], 1)
    # L2 norm for matrices.
    else:
        rows, cols = value.shape
        args = map(Expression.cast_to_const, args)
        values = [value] + list(args)
        diffs = []
        for mat in values:
            diffs += [
                mat[0:rows-1, 1:cols] - mat[0:rows-1, 0:cols-1],
                mat[1:rows, 0:cols-1] - mat[0:rows-1, 0:cols-1],
            ]
        length = diffs[0].shape[0]*diffs[1].shape[1]
        stacked = vstack([reshape(diff, (1, length)) for diff in diffs])
        return sum(norm(stacked, p=2, axis=0))
Exemple #24
0
def tvnorm_anisotropic_2d(signal, Dx, Dy):
    magnitudes = pnorm(signal, 2, axis=1)
    diffs = [Dx * magnitudes, Dy * magnitudes]
    stack = vstack(*[reshape(diff, 1, magnitudes.size[0]) for diff in diffs])
    return sum_entries(pnorm(stack, 2, axis=0))
def LP_relax(m,
             P,
             sinkresource,
             n,
             C,
             Ce,
             Cr,
             Tt,
             Tc,
             B,
             E,
             e,
             stagemat,
             Adj,
             verbose=1):
    # -- Variables --
    T = cp.Variable(P + 1)  # task time
    M = cp.Variable(P)  # models resources
    xl = cp.Variable((n, m), boolean=False)  # binary decisions

    # -- Objective --
    obj = cp.Minimize(T[P])

    # -- Constraints --
    constraints = []

    # (1) completion time (Note that M(i) = M_{i-1})
    constraints.append(T[0] == 0)
    for p in range(1, P + 1):
        constraints.append(T[p] == T[p - 1] + M[p - 1])

    # (2) find the slowest process
    for p in range(P):
        for i in range(n):
            for j in range(m):
                constraints.append(Tt[i,j] * stagemat[p,i] * xl[i,j] + \
                    stagemat[p,i] * (Adj[:,i].reshape(1,n) @ xl @ Tc[:,j]) <= M[p])

    # (3) resource constraints
    for p in range(P):
        for j in range(m):
            constraints.append(
                stagemat[p] @ multiply(reshape(xl[:, j], (
                    n, 1)), C[:, j].reshape(n, 1)) <= B[j])

    # (5) assign tasks
    for i in range(n):
        constraints.append(sum(xl[i]) == 1)

    # (6) bounded between 0 and 1
    for i in range(n):
        for j in range(m):
            constraints.append(xl[i, j] >= 0)

    for i in range(n):
        for j in range(m):
            constraints.append(xl[i, j] <= 1)

    # -- Solve --
    prob = cp.Problem(obj, constraints)
    val = prob.solve()

    if verbose == 1:
        print('prob.solve:', val)
        print('T.value:', np.around(T.value, 3))
        print('M.value:', np.around(M.value, 3))
        print('xl.value:\n', np.around(xl.value, 3))
        print('T:', val)

    return val, xl.value