コード例 #1
0
from unbalancedot.utils import generate_measure, dist_matrix, euclidean_cost

torch.set_default_tensor_type(torch.DoubleTensor)
torch.manual_seed(0)
torch.set_printoptions(precision=10)


@pytest.mark.parametrize("p", [2])
@pytest.mark.parametrize("reach", [0.5, 1.0, 2.0])
@pytest.mark.parametrize("m,n", [(1.0, 1.0), (0.7, 2.0), (0.5, 0.7),
                                 (1.5, 2.0)])
@pytest.mark.parametrize(
    "entropy",
    [
        KullbackLeibler(1e0, 1e0),
        TotalVariation(1e0, 1e0),
        Range(1e0, 0.3, 2),
        PowerEntropy(1e0, 1e0, 0),
        PowerEntropy(1e0, 1e0, -1),
    ],
)
@pytest.mark.parametrize("div", [regularized_ot])
@pytest.mark.parametrize(
    "solv",
    [
        BatchVanillaSinkhorn(
            nits=5000, nits_grad=20, tol=1e-14, assume_convergence=True),
        BatchExpSinkhorn(
            nits=5000, nits_grad=20, tol=1e-14, assume_convergence=True),
    ],
)
コード例 #2
0
# Init of measures and solvers
a, x, b, y = template_measure(500)
A, X, B, Y = (
    torch.from_numpy(a)[None, :],
    torch.from_numpy(x)[None, :, None],
    torch.from_numpy(b)[None, :],
    torch.from_numpy(y)[None, :, None],
)
blur = 1e-3
reach = np.array([10 ** x for x in np.linspace(-2, np.log10(0.5), 4)])
cost = euclidean_cost(2)
solver = BatchVanillaSinkhorn(
    nits=10000, nits_grad=2, tol=1e-8, assume_convergence=True
)
list_entropy = [KullbackLeibler(blur, reach[0]),
                TotalVariation(blur, reach[0])]

# Init of plot
blue = (0.55, 0.55, 0.95)
red = (0.95, 0.55, 0.55)

# Plotting transport marginals for each entropy
for i in range(len(list_entropy)):
    for j in range(len(reach)):
        fig = plt.figure(figsize=(8, 4))
        entropy = list_entropy[i]
        entropy.reach = reach[j]
        f, g = solver.sinkhorn_asym(A, X, B, Y, cost, entropy)
        C = cost(X, Y)
        pi = (
            ((f[:, :, None] + g[:, None, :] - C) / blur).exp()
                      p=p,
                      lr_x=60.,
                      lr_a=0.,
                      Nsteps=300)
        gradient_flow(sinkhorn_divergence,
                      entropy=Balanced(1e-3),
                      solver=solver,
                      cost=cost,
                      p=p,
                      lr_x=60.,
                      lr_a=0.3,
                      Nsteps=300)

    if setting == 4:  # Compute flow for TV with various blurring levels
        gradient_flow(sinkhorn_divergence,
                      entropy=TotalVariation(1e-2, 0.1),
                      solver=solver,
                      cost=cost,
                      p=p,
                      lr_x=60.,
                      lr_a=0.3,
                      Nsteps=300)
        gradient_flow(sinkhorn_divergence,
                      entropy=TotalVariation(1e-3, 0.1),
                      solver=solver,
                      cost=cost,
                      p=p,
                      lr_x=60.,
                      lr_a=0.3,
                      Nsteps=300)
コード例 #4
0
a, x, b, y = template_measure(1000)
A, X, B, Y = (
    torch.from_numpy(a)[None, :],
    torch.from_numpy(x)[None, :, None],
    torch.from_numpy(b)[None, :],
    torch.from_numpy(y)[None, :, None],
)
p, blur, reach = 2, 1e-3, 0.1
cost = euclidean_cost(p)
solver = BatchVanillaSinkhorn(
    nits=10000, nits_grad=1, tol=1e-5, assume_convergence=True
)
list_entropy = [
    Balanced(blur),
    KullbackLeibler(blur, reach),
    TotalVariation(blur, reach),
    Range(blur, 0.7, 1.3),
    PowerEntropy(blur, reach, 0.0),
]

# Init of plot
blue = (0.55, 0.55, 0.95)
red = (0.95, 0.55, 0.55)
fig = plt.figure(figsize=(8, 4))
plt.fill_between(x, 0, a, color="b")
plt.fill_between(y, 0, b, color="r")
plt.tight_layout()
plt.savefig(path + "/comparison_entropy_reference.eps", format="eps")


# Plotting each entropy separately
コード例 #5
0
torch.manual_seed(0)
solver = BatchVanillaSinkhorn(nits=5000,
                              nits_grad=5,
                              tol=1e-15,
                              assume_convergence=True)


@pytest.mark.parametrize("p", [1, 1.5, 2])
@pytest.mark.parametrize("reach", [0.5, 1.0, 2.0])
@pytest.mark.parametrize("m", [1.0, 0.7, 2.0])
@pytest.mark.parametrize(
    "entropy",
    [
        KullbackLeibler(1e0, 1e0),
        Balanced(1e0),
        TotalVariation(1e0, 1e0),
        Range(1e0, 0.3, 2),
        PowerEntropy(1e0, 1e0, 0),
        PowerEntropy(1e0, 1e0, -1),
    ],
)
@pytest.mark.parametrize("div", [sinkhorn_divergence, hausdorff_divergence])
def test_divergence_zero(div, entropy, reach, p, m):
    entropy.reach = reach
    cost = euclidean_cost(p)
    a, x = generate_measure(1, 5, 2)
    func = div(m * a, x, m * a, x, cost, entropy, solver=solver)
    assert torch.allclose(func, torch.Tensor([0.0]), rtol=1e-6)


@pytest.mark.parametrize("p", [1, 1.5, 2])
    if setting == 2:  # Compute Kl dynamic for an almost L1 metric
        gradient_flow(sinkhorn_divergence, KullbackLeibler(1e-2, 0.3),
                      solver=solver, cost=euclidean_cost(1.1), p=1.1,
                      lr_x=10., lr_a=0.3, Nsteps=300)

    # Compare Balanced OT with and without mass creation allowed
    if setting == 3:
        gradient_flow(sinkhorn_divergence, entropy=Balanced(1e-3),
                      solver=solver, cost=cost, p=p,
                      lr_x=60., lr_a=0., Nsteps=300)
        gradient_flow(sinkhorn_divergence, entropy=Balanced(1e-3),
                      solver=solver, cost=cost, p=p,
                      lr_x=60., lr_a=0.3, Nsteps=300)

    if setting == 4:  # Compute flow for TV with various blurring levels
        gradient_flow(sinkhorn_divergence, entropy=TotalVariation(1e-2, 0.1),
                      solver=solver, cost=cost, p=p,
                      lr_x=60., lr_a=0.3, Nsteps=300)
        gradient_flow(sinkhorn_divergence, entropy=TotalVariation(1e-3, 0.1),
                      solver=solver, cost=cost, p=p,
                      lr_x=60., lr_a=0.3, Nsteps=300)

    if setting == 5:  # Compute flow for the Range divergence
        gradient_flow(sinkhorn_divergence, entropy=Range(1e-2, 0.7, 1.3),
                      solver=solver, cost=cost, p=p,
                      lr_x=60., lr_a=0.3, Nsteps=300)
        gradient_flow(sinkhorn_divergence, entropy=Range(1e-3, 0.7, 1.3),
                      solver=solver, cost=cost, p=p,
                      lr_x=60., lr_a=0.3, Nsteps=300)

    if setting == 6:  # Flow for the Regularized OT
コード例 #7
0
    TotalVariation,
    Range,
    PowerEntropy,
)
from unbalancedot.utils import generate_measure, euclidean_cost

torch.set_default_tensor_type(torch.DoubleTensor)
torch.manual_seed(0)


@pytest.mark.parametrize(
    "entropy",
    [
        Balanced(1e1),
        KullbackLeibler(1e1, 1e0),
        TotalVariation(1e1, 1e0),
        Range(1e1, 0.3, 2),
        PowerEntropy(1e1, 1e0, 0),
        PowerEntropy(1e1, 1e0, -1),
    ],
)
@pytest.mark.parametrize(
    "solv",
    [
        BatchVanillaSinkhorn(
            nits=10, nits_grad=10, tol=1e-5, assume_convergence=True),
        BatchVanillaSinkhorn(
            nits=10, nits_grad=10, tol=1e-5, assume_convergence=False),
        BatchScalingSinkhorn(budget=10, nits_grad=10, assume_convergence=True),
        BatchScalingSinkhorn(budget=10, nits_grad=10,
                             assume_convergence=False),