def example3(lambda1=10): #Example using a smooth approximation to the non-negativity constraint # On large problems this might be faster than using the actual constraint n = 100 p = 1000 X = np.random.standard_normal(n*p).reshape((n,p)) Y = 10*np.random.standard_normal(n) loss = rr.l2normsq.affine(X,-Y,coef=1.) sparsity = rr.l1norm(p, lagrange = lambda1) nonnegative = rr.nonnegative(p) smooth_nonnegative = rr.smoothed_atom(nonnegative, epsilon = 1e-4) problem = rr.container(loss, sparsity, smooth_nonnegative) solver = rr.FISTA(problem) solver.fit(debug=True) solution1 = solver.composite.coefs loss = rr.l2normsq.affine(X,-Y,coef=1.) sparsity = rr.l1norm(p, lagrange = lambda1) nonnegative = rr.nonnegative(p) problem = rr.container(loss, sparsity, nonnegative) solver = rr.FISTA(problem) solver.fit(debug=True) solution2 = solver.composite.coefs pl.subplot(1,2,1) pl.hist(solution1, bins=40) pl.subplot(1,2,2) pl.scatter(solution2,solution1) pl.xlabel("Constraint") pl.ylabel("Smooth constraint")
import numpy as np import scipy.linalg import pylab X = np.random.standard_normal((500,1000)) beta = np.zeros(1000) beta[:100] = 3 * np.sqrt(2 * np.log(1000)) Y = np.random.standard_normal((500,)) + np.dot(X, beta) Xnorm = scipy.linalg.eigvalsh(np.dot(X.T,X), eigvals=(998,999)).max() import regreg.api as R from regreg.smooth import linear smooth_linf_constraint = R.smoothed_atom(R.maxnorm(1000, bound=1), epsilon=0.01, store_argmin=True) loss = R.affine_smooth(smooth_linf_constraint, -X.T, None) smooth_f = R.smooth_function(loss, linear(Y)) norm_Y = np.linalg.norm(Y) l2_constraint_value = np.sqrt(0.1) * norm_Y l2_lagrange = R.l2norm(500, lagrange=l2_constraint_value) basis_pursuit = R.container(smooth_f, l2_lagrange) solver = R.FISTA(basis_pursuit.composite(initial=np.random.standard_normal(500))) tol = 1.0e-08 solver = R.FISTA(basis_pursuit.composite(initial=np.random.standard_normal(500))) for epsilon in [0.6**i for i in range(20)]:
import pylab from scipy import sparse import regreg.api as R # generate the data Y = np.random.standard_normal(500); Y[100:150] += 7; Y[250:300] += 14 loss = R.quadratic.shift(-Y, coef=1) sparsity = R.l1norm(len(Y), lagrange=1.8) # fused D = (np.identity(500) + np.diag([-1]*499,k=1))[:-1] D D = sparse.csr_matrix(D) fused = R.l1norm.linear(D, lagrange=25.5) smoothed_sparsity = R.smoothed_atom(sparsity, epsilon=0.01) smoothed_fused = R.smoothed_atom(fused, epsilon=0.01) problem = R.container(loss, smoothed_sparsity, smoothed_fused) solver = R.FISTA(problem) solns = [] for eps in [.5**i for i in range(15)]: smoothed_fused.epsilon = smoothed_sparsity = eps solver.fit() solns.append(solver.composite.coefs.copy()) pylab.plot(solns[-1])
import regreg.api as R # generate the data Y = np.random.standard_normal(500) Y[100:150] += 7 Y[250:300] += 14 loss = R.quadratic.shift(-Y, coef=1) sparsity = R.l1norm(len(Y), lagrange=1.8) # fused D = (np.identity(500) + np.diag([-1] * 499, k=1))[:-1] D D = sparse.csr_matrix(D) fused = R.l1norm.linear(D, lagrange=25.5) smoothed_sparsity = R.smoothed_atom(sparsity, epsilon=0.01) smoothed_fused = R.smoothed_atom(fused, epsilon=0.01) problem = R.container(loss, smoothed_sparsity, smoothed_fused) solver = R.FISTA(problem) solns = [] for eps in [.5**i for i in range(15)]: smoothed_fused.epsilon = smoothed_sparsity = eps solver.fit() solns.append(solver.composite.coefs.copy()) pylab.plot(solns[-1])
N = 1000 P = 200 Y = 2 * np.random.binomial(1, 0.5, size=(N, )) - 1. X = np.random.standard_normal((N, P)) X[Y == 1] += np.array([30, -20] + (P - 2) * [0])[np.newaxis, :] X -= X.mean(0)[np.newaxis, :] X_1 = np.hstack([X, np.ones((N, 1))]) transform = rr.affine_transform(-Y[:, np.newaxis] * X_1, np.ones(N)) C = 0.2 hinge = rr.positive_part(N, lagrange=C) hinge_loss = rr.linear_atom(hinge, transform) epsilon = 0.04 smoothed_hinge_loss = rr.smoothed_atom(hinge_loss, epsilon=epsilon) s = rr.selector(slice(0, P), (P + 1, )) sparsity = rr.l1norm.linear(s, lagrange=3.) quadratic = rr.quadratic.linear(s, coef=0.5) from regreg.affine import power_L ltransform = rr.linear_transform(X_1) singular_value_sq = power_L(X_1) # the other smooth piece is a quadratic with identity # for quadratic form, so its lipschitz constant is 1 lipschitz = 1.05 * singular_value_sq / epsilon + 1.1 problem = rr.container(quadratic, smoothed_hinge_loss, sparsity) solver = rr.FISTA(problem)
import numpy as np import scipy.linalg import pylab X = np.random.standard_normal((500, 1000)) beta = np.zeros(1000) beta[:100] = 3 * np.sqrt(2 * np.log(1000)) Y = np.random.standard_normal((500, )) + np.dot(X, beta) Xnorm = scipy.linalg.eigvalsh(np.dot(X.T, X), eigvals=(998, 999)).max() import regreg.api as R from regreg.smooth import linear smooth_linf_constraint = R.smoothed_atom(R.supnorm(1000, bound=1), epsilon=0.01, store_argmin=True) transform = R.linear_transform(-X.T) loss = R.affine_smooth(smooth_linf_constraint, transform) norm_Y = np.linalg.norm(Y) l2_constraint_value = np.sqrt(0.1) * norm_Y l2_lagrange = R.l2norm(500, lagrange=l2_constraint_value) basis_pursuit = R.container(loss, linear(Y), l2_lagrange) solver = R.FISTA(basis_pursuit) tol = 1.0e-08 for epsilon in [0.6**i for i in range(20)]: smooth_linf_constraint.epsilon = epsilon solver.composite.lipschitz = 1.1 / epsilon * Xnorm
N = 1000 P = 200 Y = 2 * np.random.binomial(1, 0.5, size=(N,)) - 1. X = np.random.standard_normal((N,P)) X[Y==1] += np.array([30,-20] + (P-2)*[0])[np.newaxis,:] X -= X.mean(0)[np.newaxis, :] X_1 = np.hstack([X, np.ones((N,1))]) transform = rr.affine_transform(-Y[:,np.newaxis] * X_1, np.ones(N)) C = 0.2 hinge = rr.positive_part(N, lagrange=C) hinge_loss = rr.linear_atom(hinge, transform) epsilon = 0.04 smoothed_hinge_loss = rr.smoothed_atom(hinge_loss, epsilon=epsilon) s = rr.selector(slice(0,P), (P+1,)) sparsity = rr.l1norm.linear(s, lagrange=3.) quadratic = rr.quadratic.linear(s, coef=0.5) from regreg.affine import power_L ltransform = rr.linear_transform(X_1) singular_value_sq = power_L(X_1) # the other smooth piece is a quadratic with identity # for quadratic form, so its lipschitz constant is 1 lipschitz = 1.05 * singular_value_sq / epsilon + 1.1
delta2 = np.fabs(solution).sum() fused_constraint = rr.l1norm.linear(D, bound=delta1) sparsity_constraint = rr.l1norm(500, bound=delta2) constrained_problem = rr.container(loss, fused_constraint, sparsity_constraint) constrained_solver = rr.FISTA(constrained_problem) constrained_solver.composite.lipschitz = 1.01 vals = constrained_solver.fit(max_its=10, tol=1e-06, backtrack=False, monotonicity_restart=False) constrained_solution = constrained_solver.composite.coefs fused_constraint = rr.l1norm.linear(D, bound=delta1) smoothed_fused_constraint = rr.smoothed_atom(fused_constraint, epsilon=1e-2) smoothed_constrained_problem = rr.container(loss, smoothed_fused_constraint, sparsity_constraint) smoothed_constrained_solver = rr.FISTA(smoothed_constrained_problem) vals = smoothed_constrained_solver.fit(tol=1e-06) smoothed_constrained_solution = smoothed_constrained_solver.composite.coefs #pylab.clf() pylab.scatter(np.arange(Y.shape[0]), Y, c='red', label=r'$Y$') pylab.plot(solution, c='yellow', linewidth=5, label='Lagrange') pylab.plot(constrained_solution, c='green', linewidth=3, label='Constrained') pylab.plot(smoothed_constrained_solution, c='black', linewidth=1, label='Smoothed') pylab.legend()
def graphnet_bound_smooth(self): return rr.smoothed_atom(self.graphnet_bound, epsilon=0.001)
def ridge_bound_smooth(self): return rr.smoothed_atom(self.ridge_bound, 0.001)
solution = solver.composite.coefs delta1 = np.fabs(D * solution).sum() delta2 = np.fabs(solution).sum() fused_constraint = rr.l1norm.linear(D, bound=delta1) sparsity_constraint = rr.l1norm(500, bound=delta2) constrained_problem = rr.container(loss, fused_constraint, sparsity_constraint) constrained_solver = rr.FISTA(constrained_problem) constrained_solver.composite.lipschitz = 1.01 vals = constrained_solver.fit(max_its=10, tol=1e-06, backtrack=False, monotonicity_restart=False) constrained_solution = constrained_solver.composite.coefs fused_constraint = rr.l1norm.linear(D, bound=delta1) smoothed_fused_constraint = rr.smoothed_atom(fused_constraint, epsilon=1e-2) smoothed_constrained_problem = rr.container(loss, smoothed_fused_constraint, sparsity_constraint) smoothed_constrained_solver = rr.FISTA(smoothed_constrained_problem) vals = smoothed_constrained_solver.fit(tol=1e-06) smoothed_constrained_solution = smoothed_constrained_solver.composite.coefs #pylab.clf() pylab.scatter(np.arange(Y.shape[0]), Y,c='red', label=r'$Y$') pylab.plot(solution, c='yellow', linewidth=5, label='Lagrange') pylab.plot(constrained_solution, c='green', linewidth=3, label='Constrained') pylab.plot(smoothed_constrained_solution, c='black', linewidth=1, label='Smoothed') pylab.legend() #pylab.plot(conjugate_coefs, c='black', linewidth=3) #pylab.plot(conjugate_coefs_gen, c='gray', linewidth=1)