예제 #1
0
def add_canon(expr, args):
    if expr.is_scalar():
        return log_sum_exp(hstack(args)), []

    rows = []
    summands = [promote(s, expr.shape) if s.is_scalar() else s for s in args]
    if len(expr.shape) == 1:
        for i in range(expr.shape[0]):
            row = []
            row.append(
                log_sum_exp(hstack([summand[i] for summand in summands])))
            rows.append(row)
        return reshape(bmat(rows), expr.shape), []
    else:
        for i in range(expr.shape[0]):
            row = []
            for j in range(expr.shape[1]):
                row.append(
                    log_sum_exp(hstack([summand[i, j]
                                        for summand in summands])))
            rows.append(row)
        return reshape(bmat(rows), expr.shape), []
예제 #2
0
def sigma_max_canon(expr, args):
    A = args[0]
    n, m = A.shape
    shape = expr.shape
    if not np.prod(shape) == 1:
        raise RuntimeError('Invalid shape of expr in sigma_max canonicalization.')
    t = Variable(shape)
    tI_n = sp.eye(n) * t
    tI_m = sp.eye(m) * t
    X = bmat([[tI_n, A],
              [A.T, tI_m]])
    constraints = [PSD(X)]
    return t, constraints
예제 #3
0
def normNuc_canon(expr, args):
    A = args[0]
    m, n = A.shape
    # Create the equivalent problem:
    #   minimize (trace(U) + trace(V))/2
    #   subject to:
    #            [U A; A.T V] is positive semidefinite
    constraints = []
    U = Variable(shape=(m, m), symmetric=True)
    V = Variable(shape=(n, n), symmetric=True)
    X = bmat([[U, A], [A.T, V]])
    constraints.append(X >> 0)
    trace_value = 0.5 * (trace(U) + trace(V))
    return trace_value, constraints
예제 #4
0
def log_det_canon(expr, args):
    """Reduces the atom to an affine expression and list of constraints.

    Creates the equivalent problem::

       maximize    sum(log(D[i, i]))
       subject to: D diagonal
                   diag(D) = diag(Z)
                   Z is upper triangular.
                   [D Z; Z.T A] is positive semidefinite

    The problem computes the LDL factorization:

    .. math::

       A = (Z^TD^{-1})D(D^{-1}Z)

    This follows from the inequality:

    .. math::

       \\det(A) >= \\det(D) + \\det([D, Z; Z^T, A])/\\det(D)
               >= \\det(D)

    because (Z^TD^{-1})D(D^{-1}Z) is a feasible D, Z that achieves
    det(A) = det(D) and the objective maximizes det(D).

    Parameters
    ----------
    expr : log_det
    args : list
        The arguments for the expression

    Returns
    -------
    tuple
        (Variable for objective, list of constraints)
    """
    A = args[0]  # n by n matrix.
    n, _ = A.shape
    z = Variable(shape=(n * (n + 1) // 2, ))
    Z = vec_to_upper_tri(z, strict=False)
    d = diag_mat(Z)  # a vector
    D = diag_vec(d)  # a matrix
    X = bmat([[D, Z], [Z.T, A]])
    constraints = [PSD(X)]
    log_expr = log(d)
    obj, constr = log_canon(log_expr, log_expr.args)
    constraints += constr
    return sum(obj), constraints
예제 #5
0
def mulexpression_canon(expr, args):
    lhs = args[0]
    rhs = args[1]
    lhs_shape, rhs_shape, _ = mul_shapes_promote(lhs.shape, rhs.shape)
    lhs = reshape(lhs, lhs_shape)
    rhs = reshape(rhs, rhs_shape)
    rows = []
    # TODO(akshayka): Parallelize this for large matrices.
    for i in range(lhs.shape[0]):
        row = []
        for j in range(rhs.shape[1]):
            arr = hstack([lhs[i, k] + rhs[k, j] for k in range(lhs.shape[1])])
            row.append(log_sum_exp(arr))
        rows.append(row)
    mat = bmat(rows)
    if mat.shape != expr.shape:
        mat = reshape(mat, expr.shape)
    return mat, []