Exemple #1
0
    def __init__(self, numComponents, data, num_iter=100, step_fn = lambda x : 9e-4/(x+1)):
        Y = np.random.randn(data.shape[1], numComponents)
        def pca_grad(w, x):
                x = x[np.newaxis].T
                return -x.dot(x.T).dot(w)

        def pca_objective(w, x):
                x = x[np.newaxis].T
                return np.linalg.norm(w.dot(x.dot(x.T)).dot(w.T))

        loss= []
        def iteration_logger(sgd):
            current_objective = objective(sgd.w.T)
            loss.append(current_objective)

        self.pca_grad = pca_grad
        self.pca_objective = pca_objective
        pca_loss = EmpiricalLossFn(pca_objective, pca_grad)
        objective = pca_loss.partial_eval_objective_full(data)
        sgd = AlectonStochasticGradientMethod(Y, step_fn, iteration_logger)
        gradients = [(pca_loss.partial_eval_gradient_streaming(x)) for x in data]
        sgd.train(gradients)
        sgd.train(gradients)
        sgd.train(gradients)
        self.loss = loss
        self.pca_mat = sgd.w.T
Exemple #2
0
def pca_loss_test():
    #This is nonconvex!!!

    digits = sklearn.datasets.load_digits(2)
    X = digits.data
    w_star = np.linalg.svd(X)[-1][0]

    def pca_grad(w, x):
        x = x[np.newaxis].T
        return -x.dot(x.T).dot(w)

    def pca_objective(w, x):
        x = x[np.newaxis].T
        return -w.dot(x.dot(x.T)).dot(w)

    pca_loss = EmpiricalLossFn(pca_objective, pca_grad)
    objective = pca_loss.partial_eval_objective_full(X)
    gradients = [pca_loss.partial_eval_gradient_streaming(x) for x in X]
    shuffled_gradients = np.random.permutation(gradients)

    w_0 = np.random.rand(*X[0].shape)
    w_0 = w_0 / np.linalg.norm(w_0)

    sgd = ShamirStochasticGradientMethod(w_0, lambda x: 1e-3 / (x + 1))
    sgd.train(np.random.permutation(gradients), False)
    sgd.train(np.random.permutation(gradients), False)
    assert np.linalg.norm(sgd.w - w_star) < 0.01
Exemple #3
0
def hinge_loss_test():
    def objective(w, x):
        y = x[-1]
        x_ = x[:-1]
        return max(0, 1 - x_.dot(w) * y)

    def gradient_fn(w, x):
        y = x[-1]
        x_ = x[:-1]
        if (y * w.dot(x_) < 1):
            return -x_ * y
        else:
            return np.zeros(x_.shape)

    hinge_loss = EmpiricalLossFn(objective, gradient_fn)
    digits = sklearn.datasets.load_digits(2)
    data = digits.data
    labels = (digits.target * 2) - 1
    X = np.hstack((data, labels[np.newaxis].T))
    objective = hinge_loss.partial_eval_objective_full(X)
    gradients = [hinge_loss.partial_eval_gradient_streaming(x) for x in X]
    sgd = BasicStochasticGradientMethod(np.zeros(data[0].shape), lambda x: 1)

    # Do 3 passes JIC
    sgd.train(gradients)
    sgd.train(gradients)
    sgd.train(gradients)

    assert objective(sgd.w) == 0
    assert sum(np.sign(data.dot(sgd.w)) == labels) == 360
Exemple #4
0
def shamirPCA():
    digits = sklearn.datasets.load_digits(2)
    X = digits.data
    w_star = np.linalg.svd(X)[-1][0]

    def pca_grad(w, x):
        x = x[np.newaxis].T
        return -x.dot(x.T).dot(w)

    def pca_objective(w, x):
        x = x[np.newaxis].T
        return -w.dot(x.dot(x.T)).dot(w)

    pca_loss = EmpiricalLossFn(pca_objective, pca_grad)
    objective = pca_loss.partial_eval_objective_full(X)
    gradients = [pca_loss.partial_eval_gradient_streaming(x) for x in X]
    shuffled_gradients = np.random.permutation(gradients)
    w_0 = np.random.rand(*X[0].shape)
    w_0 = w_0 / np.linalg.norm(w_0)

    loss_per_iter = []
    power_loss_per_iter = []

    def iteration_logger(sgd):
        print "Sgd Iteration: {0}, Loss: {1}".format(sgd._train_iter,
                                                     objective(sgd.w))
        loss_per_iter.append(objective(sgd.w))

    sgd = ShamirStochasticGradientMethod(w_0, lambda x: 1e-3 / (x + 1),
                                         iteration_logger)
    sgd.train(np.random.permutation(gradients))

    for i in range(len(loss_per_iter)):
        w_p = X.T.dot(np.random.rand(*X[:, 0].shape))
        w_p = w_p / np.linalg.norm(w_p)
        print "Power Iteration: {0}, Loss: {1}".format(i, objective(w_p))
        power_loss_per_iter.append(objective(w_p))

    print power_loss_per_iter
    plt.figure()
    plt.plot(zip(loss_per_iter, power_loss_per_iter))
    plt.show()
Exemple #5
0
def shamirPCA():
    digits = sklearn.datasets.load_digits(2)
    X = digits.data
    w_star = np.linalg.svd(X)[-1][0]

    def pca_grad(w, x):
        x = x[np.newaxis].T
        return -x.dot(x.T).dot(w)

    def pca_objective(w, x):
        x = x[np.newaxis].T
        return -w.dot(x.dot(x.T)).dot(w)

    pca_loss = EmpiricalLossFn(pca_objective, pca_grad)
    objective = pca_loss.partial_eval_objective_full(X)
    gradients = [pca_loss.partial_eval_gradient_streaming(x) for x in X]
    shuffled_gradients = np.random.permutation(gradients)
    w_0 = np.random.rand(*X[0].shape)
    w_0 = w_0 / np.linalg.norm(w_0)

    loss_per_iter = []
    power_loss_per_iter = []

    def iteration_logger(sgd):
        print "Sgd Iteration: {0}, Loss: {1}".format(sgd._train_iter, objective(sgd.w))
        loss_per_iter.append(objective(sgd.w))

    sgd = ShamirStochasticGradientMethod(w_0, lambda x: 1e-3 / (x + 1), iteration_logger)
    sgd.train(np.random.permutation(gradients))

    for i in range(len(loss_per_iter)):
        w_p = X.T.dot(np.random.rand(*X[:, 0].shape))
        w_p = w_p / np.linalg.norm(w_p)
        print "Power Iteration: {0}, Loss: {1}".format(i, objective(w_p))
        power_loss_per_iter.append(objective(w_p))

    print power_loss_per_iter
    plt.figure()
    plt.plot(zip(loss_per_iter, power_loss_per_iter))
    plt.show()
Exemple #6
0
    def __init__(self, numComponents, data, step_fn = lambda x: 1e-3/(x+1)):
        w_0 = np.random.rand(*data[0].shape)
        w_0 = w_0/np.linalg.norm(w_0)
        self.step_fn = step_fn
        sub = 0*w_0
        pc = []
        losses = []
        for i in range(numComponents):
            def pca_grad(w, x):
                x = x[np.newaxis].T - sub.T
                return -x.dot(x.T).dot(w)

            def pca_objective(w, x):
                x = x[np.newaxis].T - sub.T
                return -w.dot(x.dot(x.T)).dot(w)

            w_star = np.linalg.svd(data)[-1][0]
            self.pca_grad = pca_grad
            self.pca_objective = pca_objective
            pca_loss = EmpiricalLossFn(pca_objective, pca_grad)
            objective = pca_loss.partial_eval_objective_full(data)
            target_obj = objective(w_star)
            loss_per_iter = []
            def iteration_logger(sgd):
                current_objective = objective(sgd.w)
                loss_per_iter.append(objective(sgd.w))

            sgd = ShamirStochasticGradientMethod(w_0, self.step_fn)
            gradients = [(pca_loss.partial_eval_gradient_streaming(x)) for x in data]
            sgd.train(gradients)
            pc.append(sgd.w)
            losses.append(loss_per_iter)
        self.pca_mat = np.array(pc)
        self.losses = losses

        print self.pca_mat.shape