def test_distances_batch(seed):
    man = Euclidean(10)
    x = torch.rand(20, 10)
    x_cpu = x.cpu()
    dists_ref = np.diag(cdist(x_cpu, x_cpu))
    dists = man.dist(x, x)
    assert_allclose(dists_ref, dists, atol=1e-4)
Beispiel #2
0
def evaluate_loss(xs, ys, loss_fn):
    max_off = get_max_offset()
    n_steps = 150
    dxs = np.linspace(-max_off, +max_off, n_steps)
    dys = np.linspace(-max_off, +max_off, n_steps)

    xs = torch.as_tensor(xs)
    ys = torch.as_tensor(ys)
    if torch.cuda.is_available():
        xs = xs.to('cuda')
        ys = ys.to('cuda')

    euc = Euclidean(2)
    xpdists = euc.pdist(xs, squared=True).mul_(args.alpha)
    y_center_indices = ys == 0

    loss_matrix = np.zeros(shape=(len(dys), len(dxs)))
    for i, dx in tqdm.tqdm(enumerate(dxs), desc='Generating loss matrix'):
        for j, dy in enumerate(dys):
            dxdy = torch.tensor([dx, dy], device=xs.device)
            zs = xs.clone()
            zs[y_center_indices] += dxdy
            zpdists = euc.pdist(zs, squared=True).mul_(args.alpha)
            loss = loss_fn(xpdists, zpdists, alpha=1.0)
            loss_matrix[i, j] = loss.item()

    return loss_matrix
def load_embedding(f):
    emb_state = torch.load(f, map_location='cpu')
    n_nodes, dim = emb_state['xs.0'].shape
    if 'scales.0' in emb_state:
        emb = Embedding(n_nodes, [Euclidean(dim)])
        emb.load_state_dict(emb_state)
    else:
        n_factors = len(emb_state) // 2
        emb = UniversalEmbedding(n_nodes, [dim] * n_factors)
        try:
            emb.load_state_dict(emb_state)
        except:
            i, j = 0, 0
            with torch.no_grad():
                for key, tensor in emb_state.items():
                    if key.startswith('xs'):
                        emb.xs[i].set_(tensor)
                        i += 1
                    elif key.startswith('c'):
                        emb.manifolds[j].c.set_(tensor)
                        j += 1
    emb.burnin(True)
    for x in emb.xs:
        x.requires_grad_(False)
    return emb
Beispiel #4
0
def build_manifold(*names):
    from graphembed.manifolds import (Euclidean, Grassmann, Lorentz,
                                      SymmetricPositiveDefinite,
                                      SpecialOrthogonalGroup, Sphere)

    factors = []
    for name in names:
        parts = name.split('_')
        identifier = parts[0]
        if identifier in ['euc', 'sph', 'hyp', 'so', 'spd', 'spdstein']:
            n = int(parts[1])
        elif identifier in ['grass']:
            n1 = int(parts[1])
            n2 = int(parts[2])
        else:
            raise ValueError(f'Unkown manifold identifier {identifier}')

        if identifier == 'euc':
            man = Euclidean(n)
        elif identifier == 'sph':
            man = Sphere(n)
        elif identifier == 'hyp':
            man = Lorentz(n)
        elif identifier == 'so':
            man = SpecialOrthogonalGroup(n)
        elif identifier == 'spd':
            man = SymmetricPositiveDefinite(n)
        elif identifier == 'spdstein':
            man = SymmetricPositiveDefinite(n, use_stein_div=True)
        elif identifier == 'grass':
            man = Grassmann(n1, n2)

        factors.append(man)

    return factors
Beispiel #5
0
def exp_run_eucl(ds_name, ds, loss_fn, alpha, tpool, fp, output_dir):
    emb = Embedding(len(ds), [Euclidean(args.dim)])
    optim = RiemannianAdam([
        dict(params=emb.xs, lr=0.05, exact=True),
    ])
    training_engine = TrainingEngine(
            embedding=emb,
            optimizer=optim,
            objective_fn=loss_fn,
            alpha=alpha,
            n_epochs=5000,
            batch_size=4096,
            burnin_epochs=10,
            burnin_lower_lr=args.burnin_lower_lr,
            burnin_higher_lr=args.burnin_higher_lr,
            val_every_epochs=args.eval_every,
            save_every_epochs=1000,
            lazy_metrics={
                'Layer_Mean_F1': lambda p: \
                        tpool.submit(fp.layer_mean_f1_scores, p),
            },
            save_dir=output_dir)
    training_engine(ds)
Beispiel #6
0
import torch
from torch.optim.optimizer import required

from graphembed.manifolds import Euclidean
from graphembed.modules import ManifoldParameter

_default_manifold = Euclidean(1)


class RiemannianSGD(torch.optim.Optimizer):

    def __init__(self,
                 params,
                 lr=required,
                 momentum=0,
                 dampening=0,
                 max_grad_norm=None,
                 exact=False):
        if momentum < 0.0:
            raise ValueError("Invalid momentum value: {}".format(momentum))
        defaults = dict(
                lr=lr,
                momentum=momentum,
                dampening=dampening,
                max_grad_norm=max_grad_norm,
                exact=exact)
        super().__init__(params, defaults)

    def step(self, closure=None):
        loss = None
        if closure is not None:
def test_pdists(seed, n, d):
    man = Euclidean(d)
    x = torch.rand(n, d)
    dists_ref = pdist(x.cpu())
    dists = man.pdist(x)
    assert_allclose(dists_ref, dists, atol=1e-4)
def test_dim():
    man = Euclidean(100)
    assert man.dim == 100
    man = Euclidean(10, 5, 2)
    assert man.dim == 100