Beispiel #1
0
def one_bit_MC_fully_observed(M,
                              link,
                              link_gradient,
                              tau,
                              gamma,
                              max_rank=None,
                              apg_max_iter=100,
                              apg_eps=1e-12,
                              apg_use_restart=True):
    # parameters are the same as in the paper; if `max_rank` is set to None,
    # then exact SVD is used
    m = M.shape[0]
    n = M.shape[1]
    tau_sqrt_mn = tau * np.sqrt(m * n)

    def prox(_A, t):
        _A = _A.reshape(m, n)

        # project so nuclear norm is at most tau*sqrt(m*n)
        if max_rank is None:
            U, S, VT = np.linalg.svd(_A, full_matrices=False)
        else:
            U, S, VT = randomized_svd(_A, max_rank)
        nuclear_norm = np.sum(S)
        if nuclear_norm > tau_sqrt_mn:
            S *= tau_sqrt_mn / nuclear_norm
            _A = np.dot(U * S, VT)

        # clip matrix entries with absolute value greater than gamma
        mask = np.abs(_A) > gamma
        if mask.sum() > 0:
            _A[mask] = np.sign(_A[mask]) * gamma

        return _A.flatten()

    M_one_mask = (M == 1)
    M_zero_mask = (M == 0)

    def grad(_A):
        _A = _A.reshape(m, n)

        grad = np.zeros((m, n))
        grad[M_one_mask] = -link_gradient(_A[M_one_mask]) / link(
            _A[M_one_mask])
        grad[M_zero_mask] = \
            link_gradient(_A[M_zero_mask])/(1 - link(_A[M_zero_mask]))

        return grad.flatten()

    A_hat = apgpy.solve(grad,
                        prox,
                        np.zeros(m * n),
                        max_iters=apg_max_iter,
                        eps=apg_eps,
                        use_gra=True,
                        use_restart=apg_use_restart,
                        quiet=True)
    P_hat = link(A_hat.reshape(m, n))
    return P_hat
Beispiel #2
0
def weighted_softimpute(X,
                        M,
                        W,
                        lmbda,
                        max_rank=None,
                        min_value=None,
                        max_value=None,
                        apg_max_iter=100,
                        apg_eps=1e-6,
                        apg_use_restart=True):
    # if `max_rank` is set to None, then exact SVD is used
    m = X.shape[0]
    n = X.shape[1]

    def prox(Z, t):
        Z = Z.reshape(m, n)

        # singular value shrinkage
        if max_rank is None:
            U, S, VT = np.linalg.svd(Z, full_matrices=False)
        else:
            U, S, VT = randomized_svd(Z, max_rank)
        S = np.maximum(S - lmbda * t, 0)
        Z = np.dot(U * S, VT)

        # clip values
        if min_value is not None:
            mask = Z < min_value
            if mask.sum() > 0:
                Z[mask] = min_value
        if max_value is not None:
            mask = Z > max_value
            if mask.sum() > 0:
                Z[mask] = max_value

        return Z.flatten()

    M_one_mask = (M == 1)
    masked_weights = W[M_one_mask]
    masked_X = X[M_one_mask]

    def grad(Z):
        grad = np.zeros((m, n))
        grad[M_one_mask] = (Z.reshape(m, n)[M_one_mask] -
                            masked_X) * masked_weights
        return grad.flatten()

    X_hat = apgpy.solve(grad,
                        prox,
                        np.zeros(m * n),
                        max_iters=apg_max_iter,
                        eps=apg_eps,
                        use_gra=True,
                        use_restart=apg_use_restart,
                        quiet=True).reshape((m, n))
    return X_hat
Beispiel #3
0
    def _update(self, W=None):
        X = self.X
        m, n = X.shape
        if W is None:
            W = np.ones((m, n))
        tot, rn, cn, rp, cp = self.compute_rc_weight()

        def grad(_UV):
            assert (len(_UV) == m * self.k + n * self.k)
            U = _UV[:m * self.k].reshape((m, self.k))
            V = _UV[m * self.k:].reshape((n, self.k))
            X_hat_tmp = U.dot(V.T)
            grad_U = -np.dot((X - X_hat_tmp) * W * self.X_train_mask, V)
            grad_V = -np.dot(U.T, (X - X_hat_tmp) * W * self.X_train_mask).T
            grad_UV = np.concatenate([grad_U.flatten(), grad_V.flatten()])
            return grad_UV

        def l2_prox(x, t):
            return np.maximum(1 - t / np.maximum(np.linalg.norm(x, 2), 1e-6),
                              0.0) * x

        def prox(_UV, t):
            assert (len(_UV) == m * self.k + n * self.k)
            U_tmp = _UV[:m * self.k].reshape((m, self.k))
            V_tmp = _UV[m * self.k:].reshape((n, self.k))
            U = U_tmp.copy()
            for i in range(m):
                U[i] = l2_prox(
                    U_tmp[i], self.lambda1 / 2 *
                    (np.power(rp[i], self.alpha) / np.maximum(rn[i], 1)) * t)
            V = V_tmp.copy()
            for i in range(n):
                V[i] = l2_prox(
                    V_tmp[i], self.lambda1 / 2 *
                    (np.power(cp[i], self.alpha) / np.maximum(cn[i], 1)) * t)
            prox_UV = np.concatenate([U.flatten(), V.flatten()])
            return prox_UV

        _UV_hat = apgpy.solve(grad,
                              prox,
                              np.concatenate(
                                  [self.U.flatten(),
                                   self.V.flatten()]),
                              max_iters=self.max_iter,
                              eps=self.tol,
                              use_restart=self.apg_use_restart,
                              quiet=(not self.verbose),
                              use_gra=True)
        U = _UV_hat[:m * self.k].reshape((m, self.k))
        V = _UV_hat[m * self.k:].reshape((n, self.k))
        self.U = U
        self.V = V
        self.X_hat = self.U.dot(self.V.T)
        return self
    def non_private1(X, y, lambda2):
        n, d = X.shape[0], X.shape[1]        
        #y = np.matrix(y).reshape((n, 1))
        #X = np.array(np.multiply(X, y))
        XtX = np.dot(X.T, X)
        Xty = np.dot(X.T, y)
 
        def log_reg_grad(v):
            l = np.exp(np.dot(X, v))
            return -np.dot(X.T, 1. / (1 + l)) / n + lambda2 * v
            
        def lin_reg_grad(v):
            return (np.dot(XtX, v) - Xty) / n + lambda2 * v

        beta = apg.solve(lin_reg_grad, {}, np.zeros(d), eps=1e-9, quiet=True, step_size=1., fixed_step_size=True, max_iters=100, use_gra=True)
        return beta
    def baseline11(X, y, lambda2, epsilon):
        n, d = X.shape[0], X.shape[1]        
        #y = np.matrix(y).reshape((n, 1))
        #X = np.array(np.multiply(X, y))
        XtX = np.dot(X.T, X)
        Xty = np.dot(X.T, y)
 
        def log_reg_grad(v):
            l = np.exp(np.dot(X, v))
            return -np.dot(X.T, 1. / (1 + l)) / n + lambda2 * v

        def lin_reg_grad(v):
            return (np.dot(XtX, v) - Xty) / n + lambda2 * v

        beta = apg.solve(lin_reg_grad, {}, np.zeros(d), eps=1e-9, quiet=True, step_size=1., fixed_step_size=True, max_iters=100, use_gra=True) 
        beta += np.random.laplace(0, 2. / (n * lambda2 * epsilon), d)
        return beta
Beispiel #6
0
    def _update(self, X, **kwargs):
        m, n = X.shape

        def grad(_UV):
            assert (len(_UV) == m * self.n_components + n * self.n_components)
            U = _UV[:m * self.n_components].reshape((m, self.n_components))
            V = _UV[m * self.n_components:].reshape((n, self.n_components))
            X_hat_tmp = U.dot(V.T)
            grad_U = -np.dot((X - X_hat_tmp) * self.X_train_mask, V)
            grad_V = -np.dot(U.T, (X - X_hat_tmp) * self.X_train_mask).T
            grad_UV = np.concatenate([grad_U.flatten(), grad_V.flatten()])
            return grad_UV

        def prox(_UV, t):
            assert (len(_UV) == m * self.n_components + n * self.n_components)
            U_tmp = _UV[:m * self.n_components].reshape((m, self.n_components))
            V_tmp = _UV[m * self.n_components:].reshape((n, self.n_components))
            tmp_norm_inf = np.linalg.norm(U_tmp.dot(V_tmp.T), np.inf)
            if tmp_norm_inf > self.alpha:
                U_tmp = np.sqrt(self.alpha) / np.sqrt(tmp_norm_inf) * U_tmp
                V_tmp = np.sqrt(self.alpha) / np.sqrt(tmp_norm_inf) * V_tmp
            U = self._proj(U_tmp, self.R)
            V = self._proj(V_tmp, self.R)
            prox_UV = np.concatenate([U.flatten(), V.flatten()])
            return prox_UV

        _UV_hat = apgpy.solve(grad,
                              prox,
                              np.concatenate(
                                  [self.U.flatten(),
                                   self.V.flatten()]),
                              max_iters=self.max_iter,
                              eps=self.tol,
                              use_restart=self.apg_use_restart,
                              quiet=(not self.verbose),
                              use_gra=True)
        U = _UV_hat[:m * self.n_components].reshape((m, self.n_components))
        V = _UV_hat[m * self.n_components:].reshape((n, self.n_components))
        self.U = U
        self.V = V
        self.X_hat = self.U.dot(self.V.T)
        return self
    def baseline2(X, y, lambda2, epsilon): # Chaudhuri et al. 2011 - Objective Perturbation
        n, d = X.shape[0], X.shape[1]
        epsilon2 = epsilon - 2 * math.log(1 / (4 * n * lambda2)) # was log(1 + 1/(4...))
        if epsilon2 > 0:
            Delta = 0.
        else:
            Delta = 1 / (4 * n * (math.exp(epsilon / 4.) - 1)) - lambda2
            epsilon2 = epsilon / 2.
        
        #y = np.matrix(y).reshape((n, 1))
        #X = np.array(np.multiply(X, y))
        b = np.random.laplace(0, 2. / epsilon2, d)
        XtX = np.dot(X.T, X)
        Xty = np.dot(X.T, y)
 
        def log_reg_grad(v):
            l = np.exp(np.dot(X, v))
            return -np.dot(X.T, 1. / (1 + l)) / n + (Delta + lambda2) * v + b / n # divided l(.) by n
        
        def lin_reg_grad(v):
            return (np.dot(XtX, v) - Xty) / n + (Delta + lambda2) * v + b / n

        beta = apg.solve(lin_reg_grad, {}, np.zeros(d), eps=1e-9, quiet=True, step_size=1., fixed_step_size=True, max_iters=100, use_gra=True)
        return beta
Beispiel #8
0
import numpy as np
import apgpy as apg

n = 2000
m = 5000
A = np.random.randn(m, n)
b = np.random.randn(m)
mu = 1.

#U, s, V = np.linalg.svd(A, full_matrices=True)
#S = np.zeros((m, n))
#S[:n, :n] = np.diag(s)
#S **= 2
#A = np.dot(U, np.dot(S, V))
#np.linalg.cond(A)

AtA = np.dot(A.T, A)
Atb = np.dot(A.T, b)

def quad_grad(y):
    return np.dot(AtA, y) - Atb

def soft_thresh(y, t):
    return np.sign(y) * np.maximum(abs(y) - t * mu, 0.)

x = apg.solve(quad_grad, soft_thresh, np.zeros(n), eps=1e-8, gen_plots=False, quiet=True)
#plt.show()
Beispiel #9
0
m = 5000
A = np.random.randn(m, n)
b = np.random.randn(m)
mu = 1.

#U, s, V = np.linalg.svd(A, full_matrices=True)
#S = np.zeros((m, n))
#S[:n, :n] = np.diag(s)
#S **= 2
#A = np.dot(U, np.dot(S, V))
#np.linalg.cond(A)

AtA = np.dot(A.T, A)
Atb = np.dot(A.T, b)


def quad_grad(y):
    return np.dot(AtA, y) - Atb


def soft_thresh(y, t):
    return np.sign(y) * np.maximum(abs(y) - t * mu, 0.)


x = apg.solve(quad_grad,
              soft_thresh,
              np.zeros(n),
              eps=1e-8,
              gen_plots=False,
              quiet=True)
#plt.show()
Beispiel #10
0
 def run(self, g, lmda=1.0):
     self.sampleGraph(g)
     self.lmda = lmda
     x = apg.solve(self.grad, self.proximal, np.zeros(self.n ** 2), quiet=True)
     return self.getFourTrans(x)