def test_exp_log(seed, rand_spd, rand_sym, d): spd = SPD(d) x = rand_spd(10, d) u = rand_sym(10, d) y = spd.exp(x, u) assert_allclose(u, spd.log(x, y), atol=1e-4) assert_allclose(spd.norm(x, u), spd.dist(x, y), atol=1e-4)
def test_stein_pdiv(seed, d): spd = SPD(2) xs = spd.rand(10, ir=1.0, out=torch.empty(10, d, d, dtype=torch.float64)) pdivs = spd.stein_pdiv(xs) m = torch.triu_indices(10, 10, 1) ref_pdivs = spd.stein_div(xs[m[0]], xs[m[1]]) assert_allclose(ref_pdivs, pdivs, atol=1e-4)
def test_gradient(seed, d): spd = SPD(d) x, y = spd.rand(2, ir=1.0, out=torch.empty(2, d, d, dtype=torch.float64)) x.requires_grad_() dist = 0.5 * spd.dist(x, y, squared=True) grad_e = torch.autograd.grad(dist, x)[0] grad = spd.egrad2rgrad(x, grad_e) assert_allclose(grad.detach(), -spd.log(x.detach(), y), atol=1e-4)
def test_unit_distance(d, seed): spd = SPD(d) u_vec = torch.randn(spd.dim) u = SPD.from_vec(u_vec / u_vec.norm()) x = torch.eye(d) assert_allclose(1.0, spd.norm(x, u), atol=1e-4) y = spd.exp(x, u) assert_allclose(1.0, spd.dist(x, y), atol=1e-4)
def test_h2_to_sspd2(seed, n, d): spd = SPD(2) lorentz = Lorentz(3) x = lorentz.rand(n, ir=1.0).mul_(d) y = h2_to_sspd2(x) assert_allclose(sspd2_to_h2(y), x / d, atol=1e-4) hyp_dists = sspd2_hyp_radius_ * lorentz.pdist(x / d) assert_allclose(spd.pdist(y), hyp_dists, atol=1e-4)
def test_distance_formulas(seed, rand_spd, d): spd = SPD(d) x, y = rand_spd(2, d) ref_dist = spd.dist(x, y) # compute :math:`Y^{-1} X` and take its eigenvalues (we have to use # `torch.eig` for this as the resulting matrix might not be symmetric) d1 = torch.solve(y, x)[0].eig()[0][:, 0].log_().pow_(2).sum().sqrt_() assert_allclose(ref_dist, d1, atol=1e-4) d2 = torch.solve(x, y)[0].eig()[0][:, 0].log_().pow_(2).sum().sqrt_() assert_allclose(ref_dist, d2, atol=1e-4)
def test_sspd2_to_h2(seed, n): spd = SPD(2) lorentz = Lorentz(3) x = spd.rand(n, ir=1.0) x.div_(x.det().sqrt_().reshape(-1, 1, 1)) # unit determinant assert_allclose(x.det(), torch.ones(n), atol=1e-4) assert_allclose(x, spd.projx(x), atol=1e-4) y = sspd2_to_h2(x) hyp_dists = sspd2_hyp_radius_ * lorentz.pdist(y) assert_allclose(spd.pdist(x), hyp_dists, atol=1e-4)
def main(): args = parse_args() # Fix the random seeds. set_seeds(args.random_seed) # Default torch settings. torch.set_default_dtype(torch.float64) if torch.cuda.is_available(): torch.set_default_tensor_type(torch.cuda.DoubleTensor) # load data gpdists, g = load_graph_pdists(args.input_graph, cache_dir='.cached_pdists') n_nodes = g.number_of_nodes() ds = GraphDataset(gpdists) fp = FastPrecision(g) # run hyp2 hyp = Lorentz(3) emb = ManifoldEmbedding(n_nodes, [hyp] * args.n_factors) for i in range(args.n_factors): emb.scales[i] = torch.nn.Parameter(torch.tensor(2.0)) man_name = '_'.join('hyp2' for _ in range(args.n_factors)) save_dir = os.path.join(args.save_dir, man_name) if args.hyp_snapshot or args.hyp_pretrained: logging.info('Loading embedding for %s', man_name) load_embedding(emb, save_dir) if not args.hyp_pretrained: train(ds, fp, emb, args.n_epochs, save_dir) # map it to SPD spd = SPD(2 * args.n_factors) spd_emb = ManifoldEmbedding(n_nodes, [spd]) save_dir = os.path.join(args.save_dir, 'spd{}'.format(spd.dim)) if args.spd_snapshot: logging.info('Loading embedding for SPD%d', spd.dim) load_embedding(spd_emb, save_dir) else: with torch.no_grad(): spd_emb.xs[0] = ManifoldParameter(block_diag([ h2_to_sspd2(emb.xs[i].mul(math.sqrt(2))) for i in range(args.n_factors) ]), manifold=spd) hyp_dists = emb.to('cpu').compute_dists(None) spd_dists = spd_emb.compute_dists(None).to('cpu') assert torch.allclose(hyp_dists, spd_dists, atol=1e-4) # run spd2 train(ds, fp, spd_emb, args.n_epochs, save_dir, args.n_epochs)
def test_sspd2_to_h2_nonconst_factor(seed, n, d): spd = SPD(2) lorentz = Lorentz(3) x = spd.rand(n, ir=1.0) x.div_(x.det().sqrt_().reshape(-1, 1, 1)) # unit determinant x.mul_(d) # d**2 determinant dets = torch.empty(n).fill_(d**2) assert_allclose(x.det(), dets, atol=1e-4) assert_allclose(x, spd.projx(x), atol=1e-4) y = sspd2_to_h2(x) hyp_dists = sspd2_hyp_radius_ * lorentz.pdist(y) # The determinant essentially does not affect the curvatures, they are all # isometric to the 2-dimensional hyperbolic space of -1/2 constant sectional # curvature. assert_allclose(spd.pdist(x), hyp_dists, atol=1e-4)
def main(): args = parse_args() # Fix the random seeds. set_seeds(args.random_seed) # Default torch settings. torch.set_default_dtype(torch.float64) if torch.cuda.is_available(): torch.set_default_tensor_type(torch.cuda.DoubleTensor) # load data gpdists, g = load_graph_pdists(args.input_graph, cache_dir='.cached_pdists') n_nodes = g.number_of_nodes() ds = GraphDataset(gpdists) fp = FastPrecision(g) # run hyp2 emb = ManifoldEmbedding(n_nodes, [Lorentz(3)]) path = os.path.join(args.save_dir, 'hyp2') train(ds, fp, emb, args.n_epochs, path) curvature_sq = 1 / emb.scales[0] # map it to SSPD sspd_emb = ManifoldEmbedding(n_nodes, [SPD(2)]) sspd_emb.xs[0] = ManifoldParameter(h2_to_sspd2(emb.xs[0] / curvature_sq.sqrt()), manifold=sspd_emb.manifolds[0]) sspd_emb.scales[0] = torch.nn.Parameter(1 / curvature_sq / 2) assert torch.allclose(emb.compute_dists(None), sspd_emb.compute_dists(None), atol=1e-4) # run spd2 path = os.path.join(args.save_dir, 'spd2') train(ds, fp, sspd_emb, args.n_epochs, path, args.n_epochs)
def test_dim(): assert 3 == SPD(2).dim assert 6 == SPD(3).dim assert 10 == SPD(4).dim
def test_inner_norm(seed, d): spd = SPD(d) xs = spd.rand(100, ir=1.0, out=torch.empty(100, d, d, dtype=torch.float64)) us = spd.randvec(xs) assert_allclose(spd.inner(xs, us, us)**0.5, spd.norm(xs, us), atol=1e-4)
def test_no_nan_dists(seed, rand_spd, d, n): spd = SPD(d) x = rand_spd(n, d) assert not torch.isnan(spd.pdist(x)).any()
import numpy as np from matplotlib.patches import Ellipse import matplotlib.pyplot as plt import torch from graphembed.manifolds import SymmetricPositiveDefinite as SPD spd = SPD(2) x, y = spd.rand(2, ir=3.0) u = spd.log(x, y) n = 100 ys = spd.exp(x.repeat(n, 1, 1), torch.linspace(0, 1.0, n).reshape(n, 1, 1) * u) def add_ellipsis(x, offset): ws, us = x.symeig(eigenvectors=True) rad = torch.atan2(us[0][1], us[0][0]) degs = np.rad2deg(rad) ellipse = Ellipse(xy=(offset, 0), width=ws[0], height=ws[1], angle=degs) max_x = max(rad.cos().abs() * ws[0], rad.sin().abs() * ws[1]) / 2 max_y = max(rad.sin().abs() * ws[0], rad.cos().abs() * ws[1]) / 2 return ellipse, max_x, max_y fig, ax = plt.subplots() max_width = 0 max_height = 0 min_width = 0 for i, y in enumerate(ys):
help='The maximum number of neighbor pairs to compute seccurvs for.') args = parser.parse_args() emb_state = torch.load(os.path.join(args.input, 'best_embedding.pth'), map_location='cpu') for comp_id in range(len(emb_state) // 2): logging.warning('Processing %s, comp %d', args.input, comp_id) xs = emb_state[f'xs.{comp_id}'] if xs.ndim != 3 and xs.shape[-2] != xs.shape[-1]: continue if torch.cuda.is_available(): xs = xs.to('cuda') n_nodes, n, _ = xs.shape spd = SPD(n) def sectional_curvatures(i): x = xs[i] n_neighs = min(n_nodes, int(np.sqrt(args.max_neigh_pairs))) neighs = torch.as_tensor(random.sample(range(n_nodes), n_neighs)) x_rep = x.repeat(n_neighs, 1, 1) x_logs = spd.log(x_rep, xs[neighs]) x_rep = x_rep.repeat(n_neighs, 1, 1) us = x_logs.repeat(n_neighs, 1, 1) vs = x_logs.repeat_interleave(n_neighs, dim=0) seccurvs = spd.seccurv(x_rep, us, vs) return seccurvs