def test_same_energy_calculus_pure_lasso(): rng = check_random_state(42) X, y, w, mask = _make_data(rng=rng, masked=True) # check funcvals f1 = _squared_loss(X, y, w) f2 = _squared_loss_and_spatial_grad(X, y, w.ravel(), mask, 0.) assert f1 == f2 # check derivatives g1 = _squared_loss_grad(X, y, w) g2 = _squared_loss_and_spatial_grad_derivative(X, y, w.ravel(), mask, 0.) np.testing.assert_array_equal(g1, g2)
def test_same_energy_calculus_pure_lasso(): rng = check_random_state(42) X, y, w, mask = _make_data(rng=rng, masked=True) # check funcvals f1 = _squared_loss(X, y, w) f2 = _squared_loss_and_spatial_grad(X, y, w.ravel(), mask, 0.) assert_equal(f1, f2) # check derivatives g1 = _squared_loss_grad(X, y, w) g2 = _squared_loss_and_spatial_grad_derivative(X, y, w.ravel(), mask, 0.) np.testing.assert_array_equal(g1, g2)
def test__squared_loss_gradient_at_simple_points(): """Tests gradient of data loss function in points near to zero. This is a not so hard test, just for detecting big errors""" X, y, w, mask = create_graph_net_simulation_data(n_samples=10, size=4) grad_weight = 1 func = lambda w: _squared_loss_and_spatial_grad(X, y, w, mask, grad_weight) func_grad = lambda w: _squared_loss_and_spatial_grad_derivative( X, y, w, mask, grad_weight) for i in range(0, w.size, 2): point = np.zeros(*w.shape) point[i] = 1 assert_almost_equal(sp.optimize.check_grad(func, func_grad, point), 0, decimal=3)