def test_sinkhorn_consistency_exp_log_sym(entropy, rtol, p, m, reach):
    """Test if the exp sinkhorn is consistent with its log form"""
    entropy.reach = reach
    cost = euclidean_cost(p)
    a, x = generate_measure(2, 5, 3)
    solver1 = BatchVanillaSinkhorn(nits=10000, nits_grad=10, tol=1e-12, assume_convergence=True)
    solver2 = BatchExpSinkhorn(nits=10000, nits_grad=10, tol=1e-12, assume_convergence=True)
    _, g_a = solver1.sinkhorn_sym(m * a, x, cost=cost, entropy=entropy)
    _, v_a = solver2.sinkhorn_sym(m * a, x, cost=cost, entropy=entropy)
    assert torch.allclose(g_a, v_a, rtol=rtol)
Exemple #2
0
import pytest

import torch
from common.functional import regularized_ot, hausdorff_divergence, sinkhorn_divergence, energyDistance
from common.sinkhorn import BatchVanillaSinkhorn
from common.entropy import KullbackLeibler, Balanced, TotalVariation, Range, PowerEntropy
from common.utils import generate_measure, convolution, scal, euclidean_cost

torch.set_printoptions(precision=10)
torch.set_default_tensor_type(torch.DoubleTensor)
solver = BatchVanillaSinkhorn(nits=5000,
                              nits_grad=5,
                              tol=1e-15,
                              assume_convergence=True)


@pytest.mark.parametrize('p', [1, 1.5, 2])
@pytest.mark.parametrize('reach', [0.5, 1., 2.])
@pytest.mark.parametrize('m', [1., 0.7, 2.])
@pytest.mark.parametrize('entropy', [
    KullbackLeibler(1e0, 1e0),
    Balanced(1e0),
    TotalVariation(1e0, 1e0),
    Range(1e0, 0.3, 2),
    PowerEntropy(1e0, 1e0, 0),
    PowerEntropy(1e0, 1e0, -1)
])
@pytest.mark.parametrize('div', [sinkhorn_divergence, hausdorff_divergence])
def test_divergence_zero(div, entropy, reach, p, m):
    entropy.reach = reach
    cost = euclidean_cost(p)
import pytest

import torch

from common.sinkhorn import BatchVanillaSinkhorn, BatchScalingSinkhorn, BatchExpSinkhorn
from common.entropy import KullbackLeibler, Balanced, TotalVariation, Range, PowerEntropy
from common.utils import generate_measure, euclidean_cost

torch.set_default_tensor_type(torch.DoubleTensor)



@pytest.mark.parametrize('entropy', [Balanced(1e1), KullbackLeibler(1e1, 1e0), TotalVariation(1e1, 1e0),
                                     Range(1e1, 0.3, 2), PowerEntropy(1e1, 1e0, 0), PowerEntropy(1e1, 1e0, -1)])
@pytest.mark.parametrize('solv', [BatchVanillaSinkhorn(nits=10, nits_grad=10, tol=1e-5, assume_convergence=True),
                                  BatchVanillaSinkhorn(nits=10, nits_grad=10, tol=1e-5, assume_convergence=False),
                                  BatchScalingSinkhorn(budget=10, nits_grad=10, assume_convergence=True),
                                  BatchScalingSinkhorn(budget=10, nits_grad=10, assume_convergence=False),
                                  BatchExpSinkhorn(nits=10, nits_grad=10, tol=1e-5, assume_convergence=True),
                                  BatchExpSinkhorn(nits=10, nits_grad=10, tol=1e-5, assume_convergence=False)])
def test_sinkhorn_no_bug(entropy, solv):
    a, x = generate_measure(2, 5, 3)
    b, y = generate_measure(2, 6, 3)
    solv.sinkhorn_asym(a, x, b, y, cost=euclidean_cost(1), entropy=entropy)
    solv.sinkhorn_sym(a, x, cost=euclidean_cost(1), entropy=entropy, y_j=y)


# TODO: Adapt the error function for TV due to translation invariance when masses are both 1
@pytest.mark.parametrize('p', [1, 1.5, 2])
@pytest.mark.parametrize('reach', [0.5, 1., 2.])
@pytest.mark.parametrize('m,n', [(1., 1.), (0.7, 2.), (0.5, 0.7), (1.5, 2.)])
Exemple #4
0
import pytest

import torch
from common.functional import regularized_ot, hausdorff_divergence, sinkhorn_divergence, energyDistance
from common.sinkhorn import BatchVanillaSinkhorn
from common.entropy import KullbackLeibler, Balanced, TotalVariation, Range, PowerEntropy
from common.utils import generate_measure, euclidean_cost

torch.set_default_tensor_type(torch.cuda.FloatTensor)
solver = BatchVanillaSinkhorn(nits=10, tol=0, assume_convergence=True)

@pytest.mark.parametrize('entropy', [KullbackLeibler(1e0, 1e0), Balanced(1e0), TotalVariation(1e0, 1e0),
                                     Range(1e0, 0.3, 2), PowerEntropy(1e0, 1e0, 0), PowerEntropy(1e0, 1e0, -1)])
def test_divergence_zero(entropy):
    a, x = generate_measure(1, 5, 2)
    a, x = a.float().cuda(), x.float().cuda()
    b, y = generate_measure(1, 6, 2)
    b, y = b.float().cuda(), y.float().cuda()
    sinkhorn_divergence(a, x, b, y, cost=euclidean_cost(2), entropy=entropy, solver=solver)
Exemple #5
0
torch.set_printoptions(precision=10)


@pytest.mark.parametrize('p', [2])
@pytest.mark.parametrize('reach', [0.5, 1., 2.])
@pytest.mark.parametrize('m,n', [(1., 1.), (0.7, 2.), (0.5, 0.7), (1.5, 2.)])
@pytest.mark.parametrize('entropy', [
    KullbackLeibler(1e0, 1e0),
    TotalVariation(1e0, 1e0),
    Range(1e0, 0.3, 2),
    PowerEntropy(1e0, 1e0, 0),
    PowerEntropy(1e0, 1e0, -1)
])
@pytest.mark.parametrize('div', [regularized_ot])
@pytest.mark.parametrize('solv', [
    BatchVanillaSinkhorn(
        nits=5000, nits_grad=20, tol=1e-14, assume_convergence=True),
    BatchExpSinkhorn(
        nits=5000, nits_grad=20, tol=1e-14, assume_convergence=True)
])
def test_gradient_unbalanced_weight_and_position_asym(solv, div, entropy,
                                                      reach, p, m, n):
    entropy.reach = reach
    cost = euclidean_cost(p)
    a, x = generate_measure(1, 5, 2)
    a = m * a
    a.requires_grad = True
    x.requires_grad = True
    b, y = generate_measure(1, 6, 2)
    f, g = solv.sinkhorn_asym(a, x, n * b, y, cost, entropy)
    func = entropy.output_regularized(a, x, n * b, y, cost, f, g)
    [grad_num_x, grad_num_a] = torch.autograd.grad(func, [x, a])
Exemple #6
0
    y = np.concatenate((y1, y2))
    b = np.concatenate((0.45 * b1, 0.55 * b2))
    b = b / np.sum(b)

    return a, x, b, y


# Init of measures and solvers
a, x, b, y = template_measure(300)
A, X, B, Y = torch.from_numpy(a)[None, :], torch.from_numpy(x)[None, :, None], torch.from_numpy(b)[None, :], \
             torch.from_numpy(y)[None, :, None]
blur = 1e-3
reach = np.array([10**x for x in np.linspace(-2, np.log10(0.5), 4)])
cost = euclidean_cost(2)
solver = BatchVanillaSinkhorn(nits=10000,
                              nits_grad=2,
                              tol=1e-8,
                              assume_convergence=True)
list_entropy = [
    KullbackLeibler(blur, reach[0]),
    TotalVariation(blur, reach[0])
]

# Init of plot
blue = (.55, .55, .95)
red = (.95, .55, .55)
fig, ax = plt.subplots(nrows=2, ncols=4, figsize=(48, 12))

# Plotting transport marginals for each entropy
for i in range(len(list_entropy)):
    for j in range(len(reach)):
        entropy = list_entropy[i]