def test_logregAdmm(): numpy.random.seed(0) n = 10 m = 10 N = 5 w = sprandn(n, 1, .5).todense() v = np.random.randn(1, 1) X0 = sprandn(m * N, n, .1) btrue = np.sign(X0 * w + v) x_true = np.concatenate((v, w)) b0 = np.sign(X0 * w + v + np.sqrt(.1) * np.random.randn(m * N, 1)) A0 = (spdiags(b0.transpose(), 0, m * N, m * N) * X0).todense() A0_ones = np.concatenate((np.ones([A0.shape[0], 1]), A0), axis=1) ratio = 1.0 * np.sum(b0 == 1) / (m * N) mu = .1 * 1 / (m * N) * norm((1 - ratio) * np.sum(A0[b0 == 1, :], 1) + ratio * np.sum(A0[b0 == -1, :], 1), np.Inf) (x_admm, history_admm, sln) = logregAdmm(A0, b0, mu, N, 10.0, .5) (x, history) = logreg(A0, b0, mu, N, 10.0, .5) score_admm = np.sum(A0_ones.dot(x_admm) > 0) score_compare = np.sum(A0_ones.dot(x[:, 0]) > 0) assert_almost_equal(score_admm, score_compare, delta=5) # assert_almost_equal(history.obj[-1], history_admm.obj[-1], places=1) if PLOT: K = len(history.obj) from pylab import figure, plot, ylabel, xlabel, subplot, semilogy, hold, show h = figure() plot(np.arange(K), history.obj, 'k', markersize=10, linewidth=2) hold(True) plot(np.arange(K), history_admm.obj, 'r', markersize=10, linewidth=2) ylabel('f(x^k) + g(x^k)') xlabel('iter (k)') g = figure() subplot(2, 1, 1) semilogy(np.arange(K), np.maximum(1e-8, history.r), 'k') hold(True) semilogy(np.arange(K), np.maximum(1e-8, history_admm.r), 'r') semilogy(np.arange(K), history.eps_p, 'k--', linewidth=2) ylabel('||r||_2') xlabel('iter (k)') subplot(2, 1, 2) semilogy(np.arange(K), np.maximum(1e-8, history.s), 'k') hold(True) semilogy(np.arange(K), history.eps_d, 'k--', linewidth=2) ylabel('||s||_2') xlabel('iter (k)') show()
#import matplotlib.pyplot as plt import matplotlib.pyplot as plt import time from theano import function, shared, Out import theano.tensor as T N = 1000 p = float32(0.1) g = float32(1.5) alpha = float32(1.0) nsecs = 100 #1440 dt = float32(0.1) learn_every = 2 scale = 1.0/math.sqrt(p*N) M = sprandn(N,N,p)*g*scale M = float32(M.todense()) nRec2Out = N; simtime = float32(arange(0,nsecs-dt,dt)) simtime_len = len(simtime) simtime2 = float32(arange(1*nsecs, 2*nsecs-dt, dt)) wo_len = float32(zeros(simtime_len)) zt = float32(zeros(simtime_len)) zpt = float32(zeros(simtime_len)) x0 = float32(0.5*random.randn(N, 1)) z0 = float32(0.5*random.randn(1, 1)) P0 = float32((1.0/alpha)*eye(nRec2Out))
""" from sprandn import sprandn import numpy as np from l1_cvx_gurobi import l1_cvx_gurobi from l1_cvx_mosek import l1_cvx_mosek from l1_gurobi import l1_gurobi from l1_mosek import l1_mosek from project_gradient import project_gradient from subgradient import subgradient #%% m = 512 n = 1024 mu = 1e-3 A = np.random.randn(m,n) u = sprandn(n,1,0.1).todense() b = np.dot(A,u) #%% l1_cvx_gurobi(n,A,b,mu) #%% l1_cvx_mosek(n,A,b,mu) #%% Q =np.dot(np.matrix(A).T,np.matrix(A)) c = np.dot(np.matrix(A).T,b) u = (np.matrix(b).T * b)[0,0] #%% l1_gurobi(n,c,Q,u,mu) #%% l1_mosek(n,c,Q,u,mu) #%%
@author: hadoop """ import matplotlib.pylab as plt import numpy as np from sprandn import sprandn from l1_cvx_mosek import l1_cvx_mosek #%% l1_cvx_mosek(n, A, b, mu) #%% m = 1024 n = 512 mu = 1e-3 A = np.matrix(np.random.randn(m, n)) u = np.matrix(sprandn(n, 1, 0.1).todense()) b = np.matrix(np.dot(A, u)) #%% def f_x(A_h, b, mu, x): return 0.5 * np.sqrt(np.sum( np.square(np.dot(A_h, x) - b))) + mu * np.sum(x) def f_gra(A_h, b, mu, x): return np.dot(np.dot(A_h.T, A_h), x) - np.dot(A_h.T, b) + mu * np.ones( (1024, 1)) def pro_box(x):