Esempio n. 1
0
def test_mfista_solver_graph_net_no_l1_term():
    w = np.zeros(2)
    X = np.array([[1, 0], [0, 4]])
    y = np.array([-10, 20])
    f1 = lambda w: 0.5 * np.dot(np.dot(X, w) - y, np.dot(X, w) - y)
    f1_grad = lambda w: np.dot(X.T, np.dot(X, w) - y)
    f2_prox = lambda w, l, *args, **kwargs: (w, dict(converged=True))
    lipschitz_constant = _squared_loss_derivative_lipschitz_constant(
        X, (np.eye(2) == 1).astype(np.bool), 1)
    estimate_solution, _, _ = mfista(
        f1_grad, f2_prox, f1, lipschitz_constant, w.size, tol=1e-8)

    solution = np.array([-10, 5])
    assert_almost_equal(estimate_solution, solution, decimal=4)
Esempio n. 2
0
def test_mfista_solver_graph_net_no_l1_term():
    w = np.zeros(2)
    X = np.array([[1, 0], [0, 4]])
    y = np.array([-10, 20])
    f1 = lambda w: 0.5 * np.dot(np.dot(X, w) - y, np.dot(X, w) - y)
    f1_grad = lambda w: np.dot(X.T, np.dot(X, w) - y)
    f2_prox = lambda w, l, *args, **kwargs: (w, dict(converged=True))
    lipschitz_constant = _squared_loss_derivative_lipschitz_constant(
        X, (np.eye(2) == 1).astype(bool), 1)
    estimate_solution, _, _ = mfista(
        f1_grad, f2_prox, f1, lipschitz_constant, w.size, tol=1e-8)

    solution = np.array([-10, 5])
    assert_almost_equal(estimate_solution, solution, decimal=4)