def test_mlgk_gradient(caseitem, nodal): '''derivative w.r.t. hyperparameters''' _, case = caseitem G = case['graphs'] knode = case['knode'] kedge = case['kedge'] for q in case['q']: mlgk = MarginalizedGraphKernel(knode, kedge, q=q) np.set_printoptions(precision=4, linewidth=999, suppress=True) R, dR = mlgk(G, nodal=nodal, eval_gradient=True) assert (len(dR.shape) == 3) assert (R.shape[0] == dR.shape[0]) assert (R.shape[1] == dR.shape[1]) assert (dR.shape[2] >= 1) for i in range(len(mlgk.theta)): theta = mlgk.theta eps = 1e-3 t = np.copy(theta) t[i] += eps mlgk.theta = t Rr = mlgk(G, nodal=nodal) t = np.copy(theta) t[i] -= eps mlgk.theta = t Rl = mlgk(G, nodal=nodal) mlgk.theta = theta dR_dLogt = (Rr - Rl) / (2 * eps) dLogt_dt = 1 / np.exp(theta)[i] dR_dt = dR_dLogt * dLogt_dt assert np.allclose(dR[:, :, i], dR_dt, rtol=0.05, atol=0.05)
def test_mlgk_diag_gradient(caseitem, nodal): '''derivative w.r.t. hyperparameters''' _, case = caseitem G = case['graphs'] knode = case['knode'] kedge = case['kedge'] for q in case['q']: mlgk = MarginalizedGraphKernel(knode, kedge, q=q) R, dR = mlgk.diag(G, nodal=nodal, eval_gradient=True) assert (len(dR.shape) == 2) assert (R.shape[0] == dR.shape[0]) assert (dR.shape[1] >= 1) for i in range(len(mlgk.theta)): theta = mlgk.theta eps = 1e-3 t = np.copy(theta) t[i] += eps mlgk.theta = t Rr = mlgk.diag(G, nodal=nodal, eval_gradient=False) t = np.copy(theta) t[i] -= eps mlgk.theta = t Rl = mlgk.diag(G, nodal=nodal, eval_gradient=False) mlgk.theta = theta dR_dLogt = (Rr - Rl) / (2 * eps) dLogt_dt = 1 / np.exp(theta)[i] dR_dt = dR_dLogt * dLogt_dt for a, b in zip(dR[:, i].ravel(), dR_dt.ravel()): assert (a == pytest.approx(b, rel=0.05, abs=0.05))