def test_cdfast(distr): """Test all functionality related to fast coordinate descent.""" scaler = StandardScaler() n_samples = 1000 n_features = 100 n_classes = 5 density = 0.1 # Batch gradient not available for gamma if distr == 'gamma': return glm = GLM(distr, solver='cdfast') np.random.seed(glm.random_state) # coefficients beta0 = np.random.rand() beta = sps.rand(n_features, 1, density=density).toarray()[:, 0] # data X = np.random.normal(0.0, 1.0, [n_samples, n_features]) X = scaler.fit_transform(X) y = simulate_glm(glm.distr, beta0, beta, X) # compute grad and hess beta_ = np.zeros((n_features + 1,)) beta_[0] = beta0 beta_[1:] = beta z = beta_[0] + np.dot(X, beta_[1:]) k = 1 xk = X[:, k - 1] gk, hk = _gradhess_logloss_1d(glm.distr, xk, y, z, glm.eta, glm.theta) # test grad and hess if distr != 'multinomial': assert(np.size(gk) == 1) assert(np.size(hk) == 1) assert(isinstance(gk, float)) assert(isinstance(hk, float)) else: assert(gk.shape[0] == n_classes) assert(hk.shape[0] == n_classes) assert(isinstance(gk, np.ndarray)) assert(isinstance(hk, np.ndarray)) assert(gk.ndim == 1) assert(hk.ndim == 1) # test cdfast ActiveSet = np.ones(n_features + 1) beta_ret = glm._cdfast(X, y, ActiveSet, beta_, glm.reg_lambda) assert(beta_ret.shape == beta_.shape) assert(True not in np.isnan(beta_ret))
def test_cdfast(): """Test all functionality related to fast coordinate descent""" scaler = StandardScaler() n_samples = 1000 n_features = 100 n_classes = 5 density = 0.1 distrs = ['softplus', 'gaussian', 'binomial', 'poisson', 'probit'] for distr in distrs: glm = GLM(distr, solver='cdfast') np.random.seed(glm.random_state) # coefficients beta0 = np.random.rand() beta = sps.rand(n_features, 1, density=density).toarray()[:, 0] # data X = np.random.normal(0.0, 1.0, [n_samples, n_features]) X = scaler.fit_transform(X) y = simulate_glm(glm.distr, beta0, beta, X) # compute grad and hess beta_ = np.zeros((n_features + 1, )) beta_[0] = beta0 beta_[1:] = beta z = beta_[0] + np.dot(X, beta_[1:]) k = 1 xk = X[:, k - 1] gk, hk = _gradhess_logloss_1d(glm.distr, xk, y, z, glm.eta) # test grad and hess if distr != 'multinomial': assert_equal(np.size(gk), 1) assert_equal(np.size(hk), 1) assert_true(isinstance(gk, float)) assert_true(isinstance(hk, float)) else: assert_equal(gk.shape[0], n_classes) assert_equal(hk.shape[0], n_classes) assert_true(isinstance(gk, np.ndarray)) assert_true(isinstance(hk, np.ndarray)) assert_equal(gk.ndim, 1) assert_equal(hk.ndim, 1) # test cdfast ActiveSet = np.ones(n_features + 1) beta_ret, z_ret = glm._cdfast(X, y, z, ActiveSet, beta_, glm.reg_lambda) assert_equal(beta_ret.shape, beta_.shape) assert_equal(z_ret.shape, z.shape)
def test_cdfast(): """Test all functionality related to fast coordinate descent""" scaler = StandardScaler() n_samples = 1000 n_features = 100 n_classes = 5 density = 0.1 distrs = ['softplus', 'gaussian', 'binomial', 'poisson', 'probit'] for distr in distrs: glm = GLM(distr, solver='cdfast') np.random.seed(glm.random_state) # coefficients beta0 = np.random.rand() beta = sps.rand(n_features, 1, density=density).toarray()[:, 0] # data X = np.random.normal(0.0, 1.0, [n_samples, n_features]) X = scaler.fit_transform(X) y = simulate_glm(glm.distr, beta0, beta, X) # compute grad and hess beta_ = np.zeros((n_features + 1,)) beta_[0] = beta0 beta_[1:] = beta z = beta_[0] + np.dot(X, beta_[1:]) k = 1 xk = X[:, k - 1] gk, hk = _gradhess_logloss_1d(glm.distr, xk, y, z, glm.eta) # test grad and hess if distr != 'multinomial': assert_equal(np.size(gk), 1) assert_equal(np.size(hk), 1) assert_true(isinstance(gk, float)) assert_true(isinstance(hk, float)) else: assert_equal(gk.shape[0], n_classes) assert_equal(hk.shape[0], n_classes) assert_true(isinstance(gk, np.ndarray)) assert_true(isinstance(hk, np.ndarray)) assert_equal(gk.ndim, 1) assert_equal(hk.ndim, 1) # test cdfast ActiveSet = np.ones(n_features + 1) beta_ret, z_ret = glm._cdfast(X, y, z, ActiveSet, beta_, glm.reg_lambda) assert_equal(beta_ret.shape, beta_.shape) assert_equal(z_ret.shape, z.shape)