def test_sdca_identity_poisreg(self): """...Test SDCA on specific case of Poisson regression with indentity link """ l_l2sq = 1e-3 n_samples = 10000 n_features = 3 np.random.seed(123) weight0 = np.random.rand(n_features) features = np.random.rand(n_samples, n_features) for intercept in [None, 0.45]: if intercept is None: fit_intercept = False else: fit_intercept = True simu = SimuPoisReg(weight0, intercept=intercept, features=features, n_samples=n_samples, link='identity', verbose=False) features, labels = simu.simulate() model = ModelPoisReg(fit_intercept=fit_intercept, link='identity') model.fit(features, labels) sdca = SDCA(l_l2sq=l_l2sq, max_iter=100, verbose=False, tol=1e-14, seed=Test.sto_seed) sdca.set_model(model).set_prox(ProxZero()) start_dual = np.sqrt(sdca._rand_max * l_l2sq) start_dual = start_dual * np.ones(sdca._rand_max) sdca.solve(start_dual) # Check that duality gap is 0 self.assertAlmostEqual( sdca.objective(sdca.solution), sdca.dual_objective(sdca.dual_solution)) # Check that original vector is approximatively retrieved if fit_intercept: original_coeffs = np.hstack((weight0, intercept)) else: original_coeffs = weight0 np.testing.assert_array_almost_equal(original_coeffs, sdca.solution, decimal=1) # Ensure that we solve the same problem as other solvers svrg = SVRG(max_iter=100, verbose=False, tol=1e-14, seed=Test.sto_seed) svrg.set_model(model).set_prox(ProxL2Sq(l_l2sq)) svrg.solve(0.5 * np.ones(model.n_coeffs), step=1e-2) np.testing.assert_array_almost_equal(svrg.solution, sdca.solution, decimal=4)
def test_poisreg_sdca_rand_max(self): """...Test that SDCA's rand_max is worrect depending on link type """ labels = np.array([0, 1, 2, 0, 4], dtype=self.dtype) features = np.random.rand(len(labels), 3).astype(self.dtype) model = ModelPoisReg(link='exponential').fit(features, labels) with self.assertRaises(NotImplementedError): model._sdca_rand_max self.assertEqual(model._rand_max, 5) model = ModelPoisReg(link='identity').fit(features, labels) self.assertEqual(model._sdca_rand_max, 3) self.assertEqual(model._rand_max, 5)
def prepare_solver(solver, X, y, fit_intercept=True, model="logistic", prox="l2"): if model == "logistic": model = ModelLogReg(fit_intercept=fit_intercept).fit(X, y) elif model == "poisson": model = ModelPoisReg(fit_intercept=fit_intercept).fit(X, y) solver.set_model(model) if prox == "l2": l_l2sq = TestSolver.l_l2sq prox = ProxL2Sq(l_l2sq, (0, model.n_coeffs)) if prox is not None: solver.set_prox(prox)
def create_model(model_type, n_samples, n_features, with_intercept=True): weights = np.random.randn(n_features) intercept = None if with_intercept: intercept = np.random.normal() if model_type == 'Poisson': # we need to rescale features to avoid overflows weights /= n_features if intercept is not None: intercept /= n_features if model_type == 'Linear': simulator = SimuLinReg(weights, intercept=intercept, n_samples=n_samples, verbose=False) elif model_type == 'Logistic': simulator = SimuLogReg(weights, intercept=intercept, n_samples=n_samples, verbose=False) elif model_type == 'Poisson': simulator = SimuPoisReg(weights, intercept=intercept, n_samples=n_samples, verbose=False) labels, features = simulator.simulate() if model_type == 'Linear': model = ModelLinReg(fit_intercept=with_intercept) elif model_type == 'Logistic': model = ModelLogReg(fit_intercept=with_intercept) elif model_type == 'Poisson': model = ModelPoisReg(fit_intercept=with_intercept) model.fit(labels, features) return model
y = [model.loss(np.array([t])) for t in x] plt.plot(x, y, lw=4, label=model.name) plt.xlabel(r"$y y'$", fontsize=18) plt.ylabel(r"$y y' \mapsto \ell(y, y')$", fontsize=18) plt.title('Losses for binary classification', fontsize=20) plt.xticks(fontsize=14) plt.yticks(fontsize=14) plt.legend(fontsize=13) plt.subplot(1, 3, 3) n = 1000 x = np.linspace(-1.5, 2, n) models = [ ModelPoisReg(fit_intercept=False, link='exponential'), ModelPoisReg(fit_intercept=False, link='identity') ] labels = ["ModelPoisReg(link='exponential')", "ModelPoisReg(link='identity')"] for model, label in zip(models, labels): model.fit(np.array([[1.]]), np.array([1.])) y = [model.loss(np.array([t])) for t in x] plt.plot(x, y, lw=4, label=label) plt.xlabel(r"$y'$", fontsize=16) plt.ylabel(r"$y' \mapsto \ell(1, y')$", fontsize=16) plt.title('Losses for count data', fontsize=20) plt.xticks(fontsize=14) plt.yticks(fontsize=14) plt.legend(fontsize=13)
def test_ModelPoisReg(self): """...Numerical consistency check of loss and gradient for Poisson Regression """ np.random.seed(12) n_samples, n_features = 100, 10 w0 = np.random.randn(n_features) / n_features c0 = np.random.randn() / n_features # First check with intercept X, y = SimuPoisReg(w0, c0, n_samples=n_samples, verbose=False, seed=1234, dtype=self.dtype).simulate() # Rescale features since ModelPoisReg with exponential link # (default) is not overflow proof X /= n_features X_spars = csr_matrix(X, dtype=self.dtype) model = ModelPoisReg(fit_intercept=True).fit(X, y) model_sparse = ModelPoisReg(fit_intercept=True).fit(X_spars, y) self.run_test_for_glm(model, model_sparse) self._test_glm_intercept_vs_hardcoded_intercept(model) # Then check without intercept X, y = SimuPoisReg(w0, None, n_samples=n_samples, verbose=False, seed=1234, dtype=self.dtype).simulate() X /= n_features X_spars = csr_matrix(X, dtype=self.dtype) model = ModelPoisReg(fit_intercept=False).fit(X, y) model_sparse = ModelPoisReg(fit_intercept=False).fit(X_spars, y) self.run_test_for_glm(model, model_sparse) self._test_glm_intercept_vs_hardcoded_intercept(model) # Test the self-concordance constant n_samples, n_features = 5, 2 X = np.zeros((n_samples, n_features)) X_spars = csr_matrix(X) y = np.array([0, 0, 3, 2, 5], dtype=np.double) model = ModelPoisReg(fit_intercept=True, link="identity").fit(X, y) model_sparse = ModelPoisReg(fit_intercept=True, link="identity").fit( X_spars, y) self.assertAlmostEqual(model._sc_constant, 1.41421356237, places=self.decimal_places) self.assertAlmostEqual(model_sparse._sc_constant, 1.41421356237, places=self.decimal_places) y = np.array([0, 0, 3, 2, 1], dtype=np.double) model.fit(X, y) model_sparse.fit(X_spars, y) self.assertAlmostEqual(model._sc_constant, 2., places=self.decimal_places) self.assertAlmostEqual(model_sparse._sc_constant, 2., places=self.decimal_places)
def check_solver(self, solver, fit_intercept=True, model='logreg', decimal=1): """Check solver instance finds same parameters as scipy BFGS Parameters ---------- solver : `Solver` Instance of a solver to be tested fit_intercept : `bool`, default=True Model uses intercept is `True` model : 'linreg' | 'logreg' | 'poisreg', default='logreg' Name of the model used to test the solver decimal : `int`, default=1 Number of decimals required for the test """ # Set seed for data simulation np.random.seed(12) n_samples = TestSolver.n_samples n_features = TestSolver.n_features coeffs0 = weights_sparse_gauss(n_features, nnz=5) if fit_intercept: interc0 = 2. else: interc0 = None if model == 'linreg': X, y = SimuLinReg(coeffs0, interc0, n_samples=n_samples, verbose=False, seed=123).simulate() model = ModelLinReg(fit_intercept=fit_intercept).fit(X, y) elif model == 'logreg': X, y = SimuLogReg(coeffs0, interc0, n_samples=n_samples, verbose=False, seed=123).simulate() model = ModelLogReg(fit_intercept=fit_intercept).fit(X, y) elif model == 'poisreg': X, y = SimuPoisReg(coeffs0, interc0, n_samples=n_samples, verbose=False, seed=123).simulate() # Rescale features to avoid overflows in Poisson simulations X /= np.linalg.norm(X, axis=1).reshape(n_samples, 1) model = ModelPoisReg(fit_intercept=fit_intercept).fit(X, y) else: raise ValueError("``model`` must be either 'linreg', 'logreg' or" " 'poisreg'") solver.set_model(model) strength = 1e-2 prox = ProxL2Sq(strength, (0, model.n_features)) if type(solver) is not SDCA: solver.set_prox(prox) else: solver.set_prox(ProxZero()) solver.l_l2sq = strength coeffs_solver = solver.solve() # Compare with BFGS bfgs = BFGS(max_iter=100, verbose=False).set_model(model).set_prox(prox) coeffs_bfgs = bfgs.solve() np.testing.assert_almost_equal(coeffs_solver, coeffs_bfgs, decimal=decimal) # We ensure that reached coeffs are not equal to zero self.assertGreater(norm(coeffs_solver), 0) self.assertAlmostEqual(solver.objective(coeffs_bfgs), solver.objective(coeffs_solver), delta=1e-2)