def test_hk_big_sigma(pts, dims):
    """We expect that with a huge sigma, the diagrams are so diluted that
    they are almost 0. Effectively, verifies that the smoothing is applied."""
    n_bins = 10
    x = get_input(pts, dims)

    hk = HeatKernel(sigma=100 * np.max(np.abs(x)), n_bins=n_bins)
    x_t = hk.fit_transform(x)

    assert np.all(np.abs(x_t) <= 1e-4)
def test_hk_shape(n_jobs, pts, dims):
    n_bins = 10
    X = get_input(pts, dims)
    sigma = (np.max(X[:, :, :2]) - np.min(X[:, :, :2])) / 2

    hk = HeatKernel(sigma=sigma, n_bins=n_bins, n_jobs=n_jobs)
    num_dimensions = len(np.unique(dims))
    X_t = hk.fit_transform(X)

    assert X_t.shape == (X.shape[0], num_dimensions, n_bins, n_bins)
def test_hk_positive(pts, dims):
    """We expect the points above the PD-diagonal to be non-negative (up to a
    numerical error)"""
    n_bins = 10
    X = get_input(pts, dims)
    sigma = (np.max(X[:, :, :2]) - np.min(X[:, :, :2])) / 2

    hk = HeatKernel(sigma=sigma, n_bins=n_bins)
    X_t = hk.fit_transform(X)

    assert np.all((np.tril(X_t[:, :, ::-1, :]) + 1e-13) >= 0.)
def test_large_hk_shape_parallel():
    """Test that HeatKernel returns something of the right shape when the input
    array is at least 1MB and more than 1 process is used, triggering joblib's
    use of memmaps"""
    X = np.linspace(0, 100, 300000)
    n_bins = 10
    diagrams = np.expand_dims(np.stack([X, X, np.zeros(len(X))]).transpose(),
                              axis=0)

    hk = HeatKernel(sigma=1, n_bins=n_bins, n_jobs=2)
    num_dimensions = 1
    x_t = hk.fit_transform(diagrams)

    assert x_t.shape == (diagrams.shape[0], num_dimensions, n_bins, n_bins)
def get_heat_kernel(persistence_diagram):
    pi = HeatKernel(sigma=0.001, n_bins=N_BINS, n_jobs=N_JOBS)
    print("Computed heat kernel")
    return pi.fit_transform(persistence_diagram)