예제 #1
0
def test_lipschitz_constant_loss_mse():
    rng = check_random_state(42)
    X, _, w, mask = _make_data(rng=rng, masked=True)
    l1_ratio = 1.
    alpha = .1
    mask = np.ones(X.shape[1]).astype(bool)
    grad_weight = alpha * X.shape[0] * (1. - l1_ratio)
    a = _squared_loss_derivative_lipschitz_constant(X, mask, grad_weight)
    b = spectral_norm_squared(X)
    np.testing.assert_almost_equal(a, b)
예제 #2
0
def test_lipschitz_constant_loss_mse():
    rng = check_random_state(42)
    X, _, w, mask = _make_data(rng=rng, masked=True)
    l1_ratio = 1.
    alpha = .1
    mask = np.ones(X.shape[1]).astype(np.bool)
    grad_weight = alpha * X.shape[0] * (1. - l1_ratio)
    a = _squared_loss_derivative_lipschitz_constant(X, mask, grad_weight)
    b = spectral_norm_squared(X)
    np.testing.assert_almost_equal(a, b)
예제 #3
0
def test_mfista_solver_graph_net_no_l1_term():
    w = np.zeros(2)
    X = np.array([[1, 0], [0, 4]])
    y = np.array([-10, 20])
    f1 = lambda w: 0.5 * np.dot(np.dot(X, w) - y, np.dot(X, w) - y)
    f1_grad = lambda w: np.dot(X.T, np.dot(X, w) - y)
    f2_prox = lambda w, l, *args, **kwargs: (w, dict(converged=True))
    lipschitz_constant = _squared_loss_derivative_lipschitz_constant(
        X, (np.eye(2) == 1).astype(np.bool), 1)
    estimate_solution, _, _ = mfista(
        f1_grad, f2_prox, f1, lipschitz_constant, w.size, tol=1e-8)

    solution = np.array([-10, 5])
    assert_almost_equal(estimate_solution, solution, decimal=4)
예제 #4
0
def test_mfista_solver_graph_net_no_l1_term():
    w = np.zeros(2)
    X = np.array([[1, 0], [0, 4]])
    y = np.array([-10, 20])
    f1 = lambda w: 0.5 * np.dot(np.dot(X, w) - y, np.dot(X, w) - y)
    f1_grad = lambda w: np.dot(X.T, np.dot(X, w) - y)
    f2_prox = lambda w, l, *args, **kwargs: (w, dict(converged=True))
    lipschitz_constant = _squared_loss_derivative_lipschitz_constant(
        X, (np.eye(2) == 1).astype(bool), 1)
    estimate_solution, _, _ = mfista(
        f1_grad, f2_prox, f1, lipschitz_constant, w.size, tol=1e-8)

    solution = np.array([-10, 5])
    assert_almost_equal(estimate_solution, solution, decimal=4)
예제 #5
0
def test__squared_loss_derivative_lipschitz_constant():
    # Tests Lipschitz-continuity of the derivative of _squared_loss loss
    # function
    rng = check_random_state(42)
    grad_weight = 2.08e-1
    lipschitz_constant = _squared_loss_derivative_lipschitz_constant(
        X, mask, grad_weight)
    for _ in range(20):
        x_1 = rng.rand(*w.shape) * rng.randint(1000)
        x_2 = rng.rand(*w.shape) * rng.randint(1000)
        gradient_difference = linalg.norm(
            _squared_loss_and_spatial_grad_derivative(X, y, x_1, mask,
                                                      grad_weight) -
            _squared_loss_and_spatial_grad_derivative(X, y, x_2, mask,
                                                      grad_weight))
        point_difference = linalg.norm(x_1 - x_2)
        assert gradient_difference <= lipschitz_constant * point_difference
예제 #6
0
def test__squared_loss_derivative_lipschitz_constant():
    # Tests Lipschitz-continuity of the derivative of _squared_loss loss
    # function
    rng = check_random_state(42)
    grad_weight = 2.08e-1
    lipschitz_constant = _squared_loss_derivative_lipschitz_constant(
        X, mask, grad_weight)
    for _ in range(20):
        x_1 = rng.rand(*w.shape) * rng.randint(1000)
        x_2 = rng.rand(*w.shape) * rng.randint(1000)
        gradient_difference = extmath.norm(
            _squared_loss_and_spatial_grad_derivative(X, y, x_1, mask,
                                                      grad_weight)
            - _squared_loss_and_spatial_grad_derivative(X, y, x_2, mask,
                                                        grad_weight))
        point_difference = extmath.norm(x_1 - x_2)
        assert_true(
            gradient_difference <= lipschitz_constant * point_difference)