示例#1
0
def _Nesterov_grad(beta, A, rng=RandomUniform(-1, 1), grad_norm=grad_l2):

    grad_Ab = 0
    for i in xrange(len(A)):
        Ai = A[i]
        Ab = Ai.dot(beta)
        grad_Ab += Ai.T.dot(grad_norm(Ab, rng))

    return grad_Ab
示例#2
0
    def __init__(self,
                 l,
                 A,
                 mu=TOLERANCE,
                 rng=RandomUniform(-1, 1),
                 norm=L2.grad,
                 **kwargs):

        super(NesterovFunction, self).__init__(l, rng=rng, norm=norm, **kwargs)

        self.A = A
        self.mu = mu
示例#3
0
def grad_l1(beta, rng=RandomUniform(-1, 1)):
    """Sub-gradient of the function

        f(x) = |x|_1,

    where |x|_1 is the L1-norm.
    """
    grad = np.zeros((beta.shape[0], 1))
    grad[beta >= TOLERANCE] = 1.0
    grad[beta <= -TOLERANCE] = -1.0
    between = (beta > -TOLERANCE) & (beta < TOLERANCE)
    grad[between] = rng(between.sum())

    return grad
示例#4
0
def grad_l2(beta, rng=RandomUniform(0, 1)):
    """Sub-gradient of the function

        f(x) = |x|_2,

    where |x|_2 is the L2-norm.
    """
    norm_beta = norm2(beta)
    if norm_beta > TOLERANCE:
        return beta * (1.0 / norm_beta)
    else:
        D = beta.shape[0]
        u = (rng(D, 1) * 2.0) - 1.0  # [-1, 1]^D
        norm_u = norm2(u)
        a = rng()  # [0, 1]

        return u * (a / norm_u)
示例#5
0
def grad_tv(beta, A, rng=RandomUniform(0, 1)):
    beta_flat = beta.ravel()
    Ab = np.vstack([Ai.dot(beta_flat) for Ai in A]).T
    Ab_norm2 = np.sqrt(np.sum(Ab**2.0, axis=1))

    upper = Ab_norm2 > TOLERANCE
    grad_Ab_norm2 = Ab
    grad_Ab_norm2[upper] = (Ab[upper].T / Ab_norm2[upper]).T

    lower = Ab_norm2 <= TOLERANCE
    n_lower = lower.sum()

    if n_lower:
        D = len(A)
        vec_rnd = (rng(n_lower, D) * 2.0) - 1.0
        norm_vec = np.sqrt(np.sum(vec_rnd**2.0, axis=1))
        a = rng(n_lower)
        grad_Ab_norm2[lower] = (vec_rnd.T * (a / norm_vec)).T

    grad = np.vstack([A[i].T.dot(grad_Ab_norm2[:, i]) for i in xrange(len(A))])
    grad = grad.sum(axis=0)

    return grad.reshape(beta.shape)
示例#6
0
    def __init__(self, l, rng=RandomUniform(-1, 1)):

        super(L1, self).__init__(l, rng=rng)
示例#7
0
def grad_gl(beta, A, rng=RandomUniform(-1, 1)):

    return _Nesterov_grad(beta, A, rng, grad_l2)
示例#8
0
    def __init__(self, l, A, rng=RandomUniform(-1, 1)):

        super(GroupLasso, self).__init__(l, A, rng=rng)
示例#9
0
    def __init__(self, l, A, rng=RandomUniform(0, 1)):

        super(TotalVariation, self).__init__(l, A=A, rng=rng)