Exemplo n.º 1
0
def test_fit_spectral(device):
    # TODO deflake this test
    pytest.skip("This test is flaky on macOS.")
    np.random.seed(0)
    torch.random.manual_seed(0)
    n = 200
    m = 3
    max_iter = 1000

    edges = util.all_edges(n)
    weights = torch.ones(edges.shape[0])
    f = penalties.Quadratic(weights)

    mde = problem.MDE(
        n,
        m,
        edges=edges,
        distortion_function=f,
        constraint=Standardized(),
        device=device,
    )
    X = mde.embed(max_iter=max_iter, eps=1e-10, memory_size=10)

    assert id(X) == id(mde.X)
    X_spectral = quadratic.spectral(n,
                                    m,
                                    edges=edges,
                                    weights=weights,
                                    device=device)

    testing.assert_allclose(
        mde.average_distortion(X).detach().cpu().numpy(),
        mde.average_distortion(X_spectral).detach().cpu().numpy(),
        atol=1e-4,
    )
Exemplo n.º 2
0
def test_self_edges_raises_error(device):
    torch.random.manual_seed(0)
    edges = np.array([(0, 1), (0, 0), (0, 2), (1, 2), (1, 1)])
    with pytest.raises(ValueError,
                       match=r"The edge list must not contain self edges.*"):
        problem.MDE(
            3,
            3,
            edges,
            penalties.Quadratic(torch.ones(edges.shape[0])),
            constraint=Standardized(),
            device=device,
        )
Exemplo n.º 3
0
def test_differences(device):
    torch.random.manual_seed(0)
    edges = np.array([(0, 1), (0, 2), (1, 2)])
    X = torch.randn((3, 3), dtype=torch.float32, device=device)
    mde = problem.MDE(
        3,
        3,
        edges,
        penalties.Quadratic(torch.ones(3)),
        constraint=Standardized(),
        device=device,
    )
    diff = mde.differences(X)
    testing.assert_allclose(X[edges[:, 0]] - X[edges[:, 1]], diff)
Exemplo n.º 4
0
def test_norm_grad_zero(device):
    torch.random.manual_seed(0)
    edges = np.array([(0, 1)])
    mde = problem.MDE(
        3,
        3,
        edges,
        penalties.Quadratic(torch.ones(3)),
        constraint=Standardized(),
        device=device,
    )
    X = torch.ones((3, 3), requires_grad=True, device=device)
    norms = mde.distances(X)
    norms.backward()
    testing.assert_allclose(X.grad, 0.0)
Exemplo n.º 5
0
def _spectral(
    L,
    m,
    cg=False,
    max_iter=40,
    edges=None,
    weights=None,
    warm_start=False,
    device=None,
):
    n = L.shape[0]
    if not cg:
        k = m + 1
        num_lanczos_vectors = max(2 * k + 1, int(np.sqrt(L.shape[0])))
        eigenvalues, eigenvectors = scipy.sparse.linalg.eigsh(
            L,
            k,
            which="SM",
            ncv=num_lanczos_vectors,
            tol=1e-4,
            v0=np.ones(L.shape[0]),
            maxiter=L.shape[0] * 5,
        )
        order = np.argsort(eigenvalues)[1:k]
    else:
        k = m
        if warm_start:
            mde = problem.MDE(
                n, m, edges, f=penalties.Quadratic(weights), device=device
            )
            X_init = mde.fit(max_iter=40, use_line_search=False)
        else:
            X_init = util.proj_standardized(
                torch.tensor(np.random.randn(n, m), device=device), demean=True
            )
        eigenvalues, eigenvectors = scipy.sparse.linalg.lobpcg(
            A=L,
            X=X_init.cpu().numpy(),
            # Y: search in the orthogonal complement of the ones vector
            Y=np.ones((L.shape[0], 1)),
            tol=None,
            # largest: find the smallest eigenvalues
            largest=False,
            maxiter=max_iter,
        )
        order = np.argsort(eigenvalues)[0:k]
    return eigenvectors[:, order]
Exemplo n.º 6
0
def test_average_distortion(device):
    torch.random.manual_seed(0)
    edges = np.array([(0, 1), (0, 2), (1, 2)])
    mde = problem.MDE(
        3,
        2,
        edges,
        penalties.Quadratic(torch.tensor([1.0, 2.0, 3.0])),
        constraint=Standardized(),
        device=device,
    )
    X = torch.tensor(
        [[0.0, 0.0], [1.0, 1.0], [3.0, 3.0]],
        dtype=torch.float32,
        device=device,
    )
    average_distortion = mde.average_distortion(X)
    # (1*2 + 2*18 + 3*8)/3 = (2 + 36 + 24)/3 = 62/3
    testing.assert_allclose(average_distortion.detach().cpu().numpy(),
                            62.0 / 3)
Exemplo n.º 7
0
def test_average_distortion_grad(device):
    torch.random.manual_seed(0)
    edges = np.array([(0, 1), (0, 2), (1, 2)])
    f = penalties.Quadratic(torch.tensor([1.0, 2.0, 3.0], device=device))
    mde = problem.MDE(3, 2, edges, f, Standardized(), device=device)
    X = torch.randn(
        (3, 2),
        requires_grad=True,
        dtype=torch.float32,
        device=device,
    )
    average_distortion = mde.average_distortion(X)
    average_distortion.backward()
    A = torch.tensor(
        [[1, 1, 0], [-1, 0, 1], [0, -1, -1]],
        device=device,
    ).float()
    auto_grad = X.grad
    X.grad = None
    util._distortion(X, f, A, mde._lhs, mde._rhs).backward()
    manual_grad = X.grad
    testing.assert_allclose(auto_grad, manual_grad)