def test_lipschitz_constant_loss_logreg(): rng = check_random_state(42) X, _, w, mask = _make_data(rng=rng, masked=True) l1_ratio = 1. alpha = .1 grad_weight = alpha * X.shape[0] * (1. - l1_ratio) a = _logistic_derivative_lipschitz_constant(X, mask, grad_weight) b = _logistic_loss_lipschitz_constant(X) assert a == b
def test_lipschitz_constant_loss_logreg(): rng = check_random_state(42) X, _, w, mask = _make_data(rng=rng, masked=True) l1_ratio = 1. alpha = .1 grad_weight = alpha * X.shape[0] * (1. - l1_ratio) a = _logistic_derivative_lipschitz_constant(X, mask, grad_weight) b = _logistic_loss_lipschitz_constant(X) assert_equal(a, b)
def test_logistic_lipschitz(n_samples=4, n_features=2, random_state=42): rng = np.random.RandomState(random_state) for scaling in np.logspace(-3, 3, num=7): X = rng.randn(n_samples, n_features) * scaling y = rng.randn(n_samples) n_features = X.shape[1] L = _logistic_loss_lipschitz_constant(X) _check_lipschitz_continuous(lambda w: _logistic(X, y, w), n_features + 1, L)
def test_logistic_lipschitz(n_samples=4, n_features=2, random_state=42): rng = np.random.RandomState(random_state) for scaling in np.logspace(-3, 3, num=7): X = rng.randn(n_samples, n_features) * scaling y = rng.randn(n_samples) n_features = X.shape[1] L = _logistic_loss_lipschitz_constant(X) _check_lipschitz_continuous(lambda w: _logistic( X, y, w), n_features + 1, L)