def test_prox_multi(self): """...Test of ProxMulti """ coeffs = self.coeffs.copy().astype(self.dtype) double_coeffs = np.concatenate([coeffs, coeffs]).astype(self.dtype) half_size = coeffs.shape[0] full_size = double_coeffs.shape[0] l_tv = 0.5 t = 1.7 prox_tv = ProxTV(strength=l_tv).astype(self.dtype) prox_tv_multi = ProxTV(strength=l_tv, range=(0, half_size)).astype(self.dtype) l_enet = 3e-2 ratio = .3 prox_enet = ProxElasticNet(l_enet, ratio=ratio).astype(self.dtype) prox_enet_multi = ProxElasticNet(l_enet, ratio=ratio, range=(half_size, full_size)).astype(self.dtype) prox_multi = ProxMulti((prox_tv_multi, prox_enet_multi)) # Test that the value of the prox is correct val_multi = prox_multi.value(double_coeffs) val_correct = prox_enet.value(coeffs) + prox_tv.value(coeffs) self.assertAlmostEqual(val_multi, val_correct, places=self.decimal_places) # Test that the prox is correct out1 = prox_tv.call(coeffs, step=t) out2 = prox_enet.call(coeffs, step=t) out_correct = np.concatenate([out1, out2]) out_multi = prox_multi.call(double_coeffs, step=t) np.testing.assert_almost_equal(out_multi, out_correct) # An example with overlapping coefficients start1 = 5 end1 = 13 start2 = 10 end2 = 17 prox_tv = ProxTV(strength=l_tv, range=(start1, end1)).astype(self.dtype) prox_enet = ProxElasticNet(strength=l_enet, ratio=ratio, range=(start2, end2)).astype(self.dtype) prox_multi = ProxMulti((prox_tv, prox_enet)) val_correct = prox_tv.value(double_coeffs) val_correct += prox_enet.value(double_coeffs) val_multi = prox_multi.value(double_coeffs) self.assertAlmostEqual(val_multi, val_correct) out_correct = prox_tv.call(double_coeffs) out_correct = prox_enet.call(out_correct) out_multi = prox_multi.call(double_coeffs) np.testing.assert_almost_equal(out_multi, out_correct)
def test_ProxElasticNet(self): """...Test of ProxElasticNet """ coeffs = self.coeffs.copy() l_enet = 3e-2 ratio = .3 t = 1.7 prox_enet = ProxElasticNet(l_enet, ratio=ratio) prox_l1 = ProxL1(ratio * l_enet) prox_l2 = ProxL2Sq((1 - ratio) * l_enet) self.assertAlmostEqual( prox_enet.value(coeffs), prox_l1.value(coeffs) + prox_l2.value(coeffs), delta=1e-15) out = coeffs.copy() prox_l1.call(out, t, out) prox_l2.call(out, t, out) assert_almost_equal(prox_enet.call(coeffs, step=t), out, decimal=10) prox_enet = ProxElasticNet(l_enet, ratio=ratio, positive=True) prox_l1 = ProxL1(ratio * l_enet, positive=True) prox_l2 = ProxL2Sq((1 - ratio) * l_enet, positive=True) self.assertAlmostEqual( prox_enet.value(coeffs), prox_l1.value(coeffs) + prox_l2.value(coeffs), delta=1e-15) out = coeffs.copy() prox_l1.call(out, t, out) prox_l2.call(out, t, out) assert_almost_equal(prox_enet.call(coeffs, step=t), out, decimal=10)
def compare_solver_sdca(self): """...Compare SDCA solution with SVRG solution """ np.random.seed(12) n_samples = Test.n_samples n_features = Test.n_features for fit_intercept in [True, False]: y, X, coeffs0, interc0 = TestSolver.generate_logistic_data( n_features, n_samples) model = ModelLogReg(fit_intercept=fit_intercept).fit(X, y) ratio = 0.5 l_enet = 1e-2 # SDCA "elastic-net" formulation is different from elastic-net # implementation l_l2_sdca = ratio * l_enet l_l1_sdca = (1 - ratio) * l_enet sdca = SDCA(l_l2sq=l_l2_sdca, max_iter=100, verbose=False, tol=0, seed=Test.sto_seed).set_model(model) prox_l1 = ProxL1(l_l1_sdca) sdca.set_prox(prox_l1) coeffs_sdca = sdca.solve() # Compare with SVRG svrg = SVRG(max_iter=100, verbose=False, tol=0, seed=Test.sto_seed).set_model(model) prox_enet = ProxElasticNet(l_enet, ratio) svrg.set_prox(prox_enet) coeffs_svrg = svrg.solve(step=0.1) np.testing.assert_allclose(coeffs_sdca, coeffs_svrg)
def test_dense_and_sparse_match(self): """...Test in SVRG that dense and sparse code matches in all possible settings """ variance_reductions = ['last', 'rand'] rand_types = ['perm', 'unif'] seed = 123 tol = 0. max_iter = 50 n_samples = 500 n_features = 20 # Crazy prox examples proxs = [ ProxTV(strength=1e-2, range=(5, 13), positive=True).astype(self.dtype), ProxElasticNet(strength=1e-2, ratio=0.9).astype(self.dtype), ProxEquality(range=(0, n_features)).astype(self.dtype), ProxL1(strength=1e-3, range=(5, 17)).astype(self.dtype), ProxL1w(strength=1e-3, weights=np.arange(5, 17, dtype=np.double), range=(5, 17)).astype(self.dtype), ] for intercept in [-1, None]: X, y = self.simu_linreg_data(dtype=self.dtype, interc=intercept, n_features=n_features, n_samples=n_samples) fit_intercept = intercept is not None model_dense, model_spars = self.get_dense_and_sparse_linreg_model( X, y, dtype=self.dtype, fit_intercept=fit_intercept) step = 1 / model_spars.get_lip_max() for variance_reduction, rand_type, prox in product( variance_reductions, rand_types, proxs): solver_sparse = SVRG(step=step, tol=tol, max_iter=max_iter, verbose=False, variance_reduction=variance_reduction, rand_type=rand_type, seed=seed) solver_sparse.set_model(model_spars).set_prox(prox) solver_dense = SVRG(step=step, tol=tol, max_iter=max_iter, verbose=False, variance_reduction=variance_reduction, rand_type=rand_type, seed=seed) solver_dense.set_model(model_dense).set_prox(prox) solver_sparse.solve() solver_dense.solve() places = 7 if self.dtype is "float32": places = 3 np.testing.assert_array_almost_equal(solver_sparse.solution, solver_dense.solution, decimal=places)
def test_asaga_solver(self): """...Check ASAGA solver for a Logistic Regression with Elastic net penalization """ seed = 1398 np.random.seed(seed) n_samples = 4000 n_features = 30 weights = weights_sparse_gauss(n_features, nnz=3).astype(self.dtype) intercept = 0.2 penalty_strength = 1e-3 sparsity = 1e-4 features = sparse.rand(n_samples, n_features, density=sparsity, format='csr', random_state=8).astype(self.dtype) simulator = SimuLogReg(weights, n_samples=n_samples, features=features, verbose=False, intercept=intercept, dtype=self.dtype) features, labels = simulator.simulate() model = ModelLogReg(fit_intercept=True) model.fit(features, labels) prox = ProxElasticNet(penalty_strength, ratio=0.1, range=(0, n_features)) solver_step = 1. / model.get_lip_max() saga = SAGA(step=solver_step, max_iter=100, tol=1e-10, verbose=False, n_threads=1, record_every=10, seed=seed) saga.set_model(model).set_prox(prox) saga.solve() asaga = SAGA(step=solver_step, max_iter=100, tol=1e-10, verbose=False, n_threads=2, record_every=10, seed=seed) asaga.set_model(model).set_prox(prox) asaga.solve() np.testing.assert_array_almost_equal(saga.solution, asaga.solution, decimal=4) self.assertGreater(np.linalg.norm(saga.solution[:-1]), 0)
def test_solver_gfb(self): """...Check GFB's solver for a Logistic Regression with ElasticNet penalization Notes ----- Using GFB solver with l1 and l2 penalizations is obviously a bad idea as ElasticNet prox is meant to do this, but it allows us to compare with another algorithm. """ n_samples = 200 n_features = 10 y, X, w, c = TestSolver.generate_logistic_data(n_features=n_features, n_samples=n_samples, dtype=self.dtype) strength = 1e-3 ratio = 0.3 prox_elasticnet = ProxElasticNet(strength, ratio).astype(self.dtype) prox_l1 = ProxL1(strength * ratio).astype(self.dtype) prox_l2 = ProxL2Sq(strength * (1 - ratio)).astype(self.dtype) # First we get GFB solution with prox l1 and prox l2 gfb = GFB(tol=1e-13, max_iter=1000, verbose=False, step=1) TestSolver.prepare_solver(gfb, X, y, prox=None) gfb.set_prox([prox_l1, prox_l2]) gfb_solution = gfb.solve() # Then we get AGD solution with prox ElasticNet agd = AGD(tol=1e-13, max_iter=1000, verbose=False, step=0.5, linesearch=False) TestSolver.prepare_solver(agd, X, y, prox=prox_elasticnet) agd_solution = agd.solve() # Finally we assert that both algorithms lead to the same solution np.testing.assert_almost_equal(gfb_solution, agd_solution, decimal=1)
n_samples = 40000 n_features = 20000 sparsity = 1e-4 penalty_strength = 1e-5 weights = weights_sparse_gauss(n_features, nnz=1000) intercept = 0.2 features = sparse.rand(n_samples, n_features, density=sparsity, format='csr') simulator = SimuLogReg(weights, n_samples=n_samples, features=features, verbose=False, intercept=intercept) features, labels = simulator.simulate() model = ModelLogReg(fit_intercept=True) model.fit(features, labels) prox = ProxElasticNet(penalty_strength, ratio=0.5, range=(0, n_features)) svrg_step = 1. / model.get_lip_max() test_n_threads = [1, 2, 4] fig, axes = plt.subplots(1, 2, figsize=(8, 4)) for ax, SolverClass in zip(axes, [SVRG, SAGA]): solver_list = [] solver_labels = [] for n_threads in test_n_threads: solver = SolverClass(step=svrg_step, seed=seed, max_iter=50, verbose=False, n_threads=n_threads, tol=0, record_every=3) solver.set_model(model).set_prox(prox)
from cycler import cycler from tick.simulation import weights_sparse_gauss from tick.solver import SVRG from tick.linear_model import SimuLogReg, ModelLogReg from tick.prox import ProxElasticNet from tick.plot import plot_history n_samples, n_features, = 5000, 50 weights0 = weights_sparse_gauss(n_features, nnz=10) intercept0 = 0.2 X, y = SimuLogReg(weights=weights0, intercept=intercept0, n_samples=n_samples, seed=123, verbose=False).simulate() model = ModelLogReg(fit_intercept=True).fit(X, y) prox = ProxElasticNet(strength=1e-3, ratio=0.5, range=(0, n_features)) x0 = np.zeros(model.n_coeffs) optimal_step = 1 / model.get_lip_max() tested_steps = [optimal_step, 1e-2 * optimal_step, 10 * optimal_step] solvers = [] solver_labels = [] for step in tested_steps: svrg = SVRG(max_iter=30, tol=1e-10, verbose=False) svrg.set_model(model).set_prox(prox) svrg.solve(step=step) svrg_bb = SVRG(max_iter=30, tol=1e-10, verbose=False, step_type='bb') svrg_bb.set_model(model).set_prox(prox)
import matplotlib.pyplot as plt from tick.prox import ProxL1, ProxElasticNet, ProxL2Sq, \ ProxPositive, ProxSlope, ProxTV, ProxZero, ProxBinarsity, ProxGroupL1, \ ProxEquality, ProxL1w np.random.seed(12) x = np.random.randn(50) a, b = x.min() - 1e-1, x.max() + 1e-1 s = 0.4 proxs = [ ProxZero(), ProxPositive(), ProxL2Sq(strength=s), ProxL1(strength=s), ProxElasticNet(strength=s, ratio=0.5), ProxSlope(strength=s), ProxTV(strength=s), ProxEquality(range=(25, 40)), ProxL1w(strength=s, weights=0.1 * np.arange(50, dtype=np.double)), ProxGroupL1(strength=2 * s, blocks_start=np.arange(0, 50, 10), blocks_length=10 * np.ones((5, ))), ProxBinarsity(strength=s, blocks_start=np.arange(0, 50, 10), blocks_length=10 * np.ones((5, ))) ] fig, _ = plt.subplots(3, 4, figsize=(16, 12), sharey=True, sharex=True) fig.axes[0].stem(x) fig.axes[0].set_title("original vector", fontsize=16)