コード例 #1
0
ファイル: test_bregman.py プロジェクト: eddardd/POT
def test_sinkhorn2_variants_multi_b(nx):
    # test sinkhorn
    n = 50
    rng = np.random.RandomState(0)

    x = rng.randn(n, 2)
    u = ot.utils.unif(n)

    b = rng.rand(n, 3)
    b = b / np.sum(b, 0, keepdims=True)

    M = ot.dist(x, x)

    ub, bb, M_nx = nx.from_numpy(u, b, M)

    G = ot.sinkhorn2(u, b, M, 1, method='sinkhorn', stopThr=1e-10)
    Gl = nx.to_numpy(
        ot.sinkhorn2(ub, bb, M_nx, 1, method='sinkhorn_log', stopThr=1e-10))
    G0 = nx.to_numpy(
        ot.sinkhorn2(ub, bb, M_nx, 1, method='sinkhorn', stopThr=1e-10))
    Gs = nx.to_numpy(
        ot.sinkhorn2(ub,
                     bb,
                     M_nx,
                     1,
                     method='sinkhorn_stabilized',
                     stopThr=1e-10))

    # check values
    np.testing.assert_allclose(G, G0, atol=1e-05)
    np.testing.assert_allclose(G, Gl, atol=1e-05)
    np.testing.assert_allclose(G0, Gs, atol=1e-05)
コード例 #2
0
ファイル: test_bregman.py プロジェクト: skn123/POT
def test_empirical_sinkhorn_divergence():
    # Test sinkhorn divergence
    n = 10
    a = np.linspace(1, n, n)
    a /= a.sum()
    b = ot.unif(n)
    X_s = np.reshape(np.arange(n), (n, 1))
    X_t = np.reshape(np.arange(0, n * 2, 2), (n, 1))
    M = ot.dist(X_s, X_t)
    M_s = ot.dist(X_s, X_s)
    M_t = ot.dist(X_t, X_t)

    emp_sinkhorn_div = ot.bregman.empirical_sinkhorn_divergence(X_s, X_t, 1, a=a, b=b)
    sinkhorn_div = (ot.sinkhorn2(a, b, M, 1) - 1 / 2 * ot.sinkhorn2(a, a, M_s, 1) - 1 / 2 * ot.sinkhorn2(b, b, M_t, 1))

    emp_sinkhorn_div_log, log_es = ot.bregman.empirical_sinkhorn_divergence(X_s, X_t, 1, a=a, b=b, log=True)
    sink_div_log_ab, log_s_ab = ot.sinkhorn2(a, b, M, 1, log=True)
    sink_div_log_a, log_s_a = ot.sinkhorn2(a, a, M_s, 1, log=True)
    sink_div_log_b, log_s_b = ot.sinkhorn2(b, b, M_t, 1, log=True)
    sink_div_log = sink_div_log_ab - 1 / 2 * (sink_div_log_a + sink_div_log_b)
    # check constraints
    np.testing.assert_allclose(
        emp_sinkhorn_div, sinkhorn_div, atol=1e-05)  # cf conv emp sinkhorn
    np.testing.assert_allclose(
        emp_sinkhorn_div_log, sink_div_log, atol=1e-05)  # cf conv emp sinkhorn
コード例 #3
0
ファイル: test_bregman.py プロジェクト: eddardd/POT
def test_not_implemented_method():
    # test sinkhorn
    w = 10
    n = w**2
    rng = np.random.RandomState(42)
    A_img = rng.rand(2, w, w)
    A_flat = A_img.reshape(n, 2)
    a1, a2 = A_flat.T
    M_flat = ot.utils.dist0(n)
    not_implemented = "new_method"
    reg = 0.01
    with pytest.raises(ValueError):
        ot.sinkhorn(a1, a2, M_flat, reg, method=not_implemented)
    with pytest.raises(ValueError):
        ot.sinkhorn2(a1, a2, M_flat, reg, method=not_implemented)
    with pytest.raises(ValueError):
        ot.barycenter(A_flat, M_flat, reg, method=not_implemented)
    with pytest.raises(ValueError):
        ot.bregman.barycenter_debiased(A_flat,
                                       M_flat,
                                       reg,
                                       method=not_implemented)
    with pytest.raises(ValueError):
        ot.bregman.convolutional_barycenter2d(A_img,
                                              reg,
                                              method=not_implemented)
    with pytest.raises(ValueError):
        ot.bregman.convolutional_barycenter2d_debiased(A_img,
                                                       reg,
                                                       method=not_implemented)
コード例 #4
0
ファイル: test_bregman.py プロジェクト: eddardd/POT
def test_sinkhorn_stabilization():
    # test sinkhorn
    n = 100
    a1 = ot.datasets.make_1D_gauss(n, m=30, s=10)
    a2 = ot.datasets.make_1D_gauss(n, m=40, s=10)
    M = ot.utils.dist0(n)
    reg = 1e-5
    loss1 = ot.sinkhorn2(a1, a2, M, reg, method="sinkhorn_log")
    loss2 = ot.sinkhorn2(a1, a2, M, reg, tau=1, method="sinkhorn_stabilized")
    np.testing.assert_allclose(loss1, loss2,
                               atol=1e-06)  # cf convergence sinkhorn
コード例 #5
0
def coupling_W2(coupling_1, coupling_2, source, target, epsilon):
    """
    Returns the entropically-regularized W2 distance between two couplings
    """
    cost_matrix = coupling_to_coupling_cost_matrix(source, target)
    return ot.sinkhorn2(coupling_1.flatten(), coupling_2.flatten(),
                        cost_matrix, epsilon)
コード例 #6
0
ファイル: test_bregman.py プロジェクト: AdrienCorenflos/POT
def test_lazy_empirical_sinkhorn():
    # test sinkhorn
    n = 10
    a = ot.unif(n)
    b = ot.unif(n)
    numIterMax = 1000

    X_s = np.reshape(np.arange(n), (n, 1))
    X_t = np.reshape(np.arange(0, n), (n, 1))
    M = ot.dist(X_s, X_t)
    M_m = ot.dist(X_s, X_t, metric='minkowski')

    f, g = ot.bregman.empirical_sinkhorn(X_s,
                                         X_t,
                                         1,
                                         numIterMax=numIterMax,
                                         isLazy=True,
                                         batchSize=(1, 3),
                                         verbose=True)
    G_sqe = np.exp(f[:, None] + g[None, :] - M / 1)
    sinkhorn_sqe = ot.sinkhorn(a, b, M, 1)

    f, g, log_es = ot.bregman.empirical_sinkhorn(X_s,
                                                 X_t,
                                                 0.1,
                                                 numIterMax=numIterMax,
                                                 isLazy=True,
                                                 batchSize=1,
                                                 log=True)
    G_log = np.exp(f[:, None] + g[None, :] - M / 0.1)
    sinkhorn_log, log_s = ot.sinkhorn(a, b, M, 0.1, log=True)

    f, g = ot.bregman.empirical_sinkhorn(X_s,
                                         X_t,
                                         1,
                                         metric='minkowski',
                                         numIterMax=numIterMax,
                                         isLazy=True,
                                         batchSize=1)
    G_m = np.exp(f[:, None] + g[None, :] - M_m / 1)
    sinkhorn_m = ot.sinkhorn(a, b, M_m, 1)

    loss_emp_sinkhorn, log = ot.bregman.empirical_sinkhorn2(
        X_s, X_t, 1, numIterMax=numIterMax, isLazy=True, batchSize=1, log=True)
    loss_sinkhorn = ot.sinkhorn2(a, b, M, 1)

    # check constratints
    np.testing.assert_allclose(sinkhorn_sqe.sum(1), G_sqe.sum(1),
                               atol=1e-05)  # metric sqeuclidian
    np.testing.assert_allclose(sinkhorn_sqe.sum(0), G_sqe.sum(0),
                               atol=1e-05)  # metric sqeuclidian
    np.testing.assert_allclose(sinkhorn_log.sum(1), G_log.sum(1),
                               atol=1e-05)  # log
    np.testing.assert_allclose(sinkhorn_log.sum(0), G_log.sum(0),
                               atol=1e-05)  # log
    np.testing.assert_allclose(sinkhorn_m.sum(1), G_m.sum(1),
                               atol=1e-05)  # metric euclidian
    np.testing.assert_allclose(sinkhorn_m.sum(0), G_m.sum(0),
                               atol=1e-05)  # metric euclidian
    np.testing.assert_allclose(loss_emp_sinkhorn, loss_sinkhorn, atol=1e-05)
コード例 #7
0
ファイル: test_bregman.py プロジェクト: eddardd/POT
def test_sinkhorn_multi_b(method, verbose, warn):
    # test sinkhorn
    n = 10
    rng = np.random.RandomState(0)

    x = rng.randn(n, 2)
    u = ot.utils.unif(n)

    b = rng.rand(n, 3)
    b = b / np.sum(b, 0, keepdims=True)

    M = ot.dist(x, x)

    loss0, log = ot.sinkhorn(u,
                             b,
                             M,
                             .1,
                             method=method,
                             stopThr=1e-10,
                             log=True)

    loss = [
        ot.sinkhorn2(u,
                     b[:, k],
                     M,
                     .1,
                     method=method,
                     stopThr=1e-10,
                     verbose=verbose,
                     warn=warn) for k in range(3)
    ]
    # check constraints
    np.testing.assert_allclose(loss0, loss,
                               atol=1e-4)  # cf convergence sinkhorn
コード例 #8
0
def _edge_curvature(
    edge,
    measures,
    geodesic_distances,
    measure_cutoff=1e-6,
    sinkhorn_regularisation=0,
    weighted_curvature=False,
):
    """Compute curvature for an edge."""
    node_x, node_y = edge
    m_x, m_y = measures[node_x], measures[node_y]

    Nx = np.where(m_x >= measure_cutoff * np.max(m_x))[0]
    Ny = np.where(m_y >= measure_cutoff * np.max(m_y))[0]

    m_x, m_y = m_x[Nx], m_y[Ny]
    m_x /= m_x.sum()
    m_y /= m_y.sum()

    distances_xy = geodesic_distances[np.ix_(Nx, Ny)]

    if sinkhorn_regularisation > 0:
        wasserstein_distance = ot.sinkhorn2(m_x, m_y, distances_xy, sinkhorn_regularisation)[0]
    else:
        wasserstein_distance = ot.emd2(m_x, m_y, distances_xy)

    if weighted_curvature:
        return geodesic_distances[node_x, node_y] - wasserstein_distance
    return 1.0 - wasserstein_distance / geodesic_distances[node_x, node_y]
コード例 #9
0
ファイル: test_bregman.py プロジェクト: eddardd/POT
def test_convergence_warning(method):
    # test sinkhorn
    n = 100
    a1 = ot.datasets.make_1D_gauss(n, m=30, s=10)
    a2 = ot.datasets.make_1D_gauss(n, m=40, s=10)
    A = np.asarray([a1, a2]).T
    M = ot.utils.dist0(n)

    with pytest.warns(UserWarning):
        ot.sinkhorn(a1, a2, M, 1., method=method, stopThr=0, numItermax=1)

    if method in ["sinkhorn", "sinkhorn_stabilized", "sinkhorn_log"]:
        with pytest.warns(UserWarning):
            ot.barycenter(A, M, 1, method=method, stopThr=0, numItermax=1)
        with pytest.warns(UserWarning):
            ot.sinkhorn2(a1, a2, M, 1, method=method, stopThr=0, numItermax=1)
コード例 #10
0
def _sinkhorn_distance(x, y, d):
    """Compute the approximate optimal transportation distance (Sinkhorn distance) of the given density distributions.

    Parameters
    ----------
    x : (m,) numpy.ndarray
        Source's density distributions, includes source and source's neighbors.
    y : (n,) numpy.ndarray
        Target's density distributions, includes source and source's neighbors.
    d : (m, n) numpy.ndarray
        Shortest path matrix.

    Returns
    -------
    m : float
        Sinkhorn distance, an approximate optimal transportation distance.

    """
    t0 = time.time()
    m = ot.sinkhorn2(x, y, d, 1e-1, method='sinkhorn')[0]
    logger.debug(
        "%8f secs for Sinkhorn. dist. \t#source_nbr: %d, #target_nbr: %d" %
        (time.time() - t0, len(x), len(y)))

    return m
コード例 #11
0
def ot_loss(mapped, target, device='cpu'):
    reg = 5
    nx = mapped.shape[0]
    ny = target.shape[0]
    ab = torch.ones(nx) / nx
    ab = ab.to(device)
    M = ot.dist(mapped, target)  # euclidean by default
    loss = ot.sinkhorn2(ab, ab, M, reg)
    return loss
コード例 #12
0
def sinkhorn(X, Y, options):
    """sinkhorn distance(regularized OT)"""
    D = pdist2(X, Y, options.metric)
    N, M = np.shape(X)[0], np.shape(Y)[0]
    a, b = np.ones(N) / N, np.ones(M) / M

    dist = ot.sinkhorn2(a, b, D, options.regularize)[0]
    T = ot.sinkhorn(a, b, D, 0.1)
    return dist, T
コード例 #13
0
ファイル: align.py プロジェクト: ShuheiKuriki/RVSML
def sinkhorn(X, Y, options):
    """Sinkhorn Distance Regularized OT"""
    device = 'cuda' if options.cuda else 'cpu'
    D = pdist2(X, Y, options)
    N, M = X.size()[0], Y.size()[0]
    a, b = torch.ones(N, dtype=torch.float64).to(device) / N, torch.ones(
        M, dtype=torch.float64).to(device) / M

    dist = ot.sinkhorn2(a, b, D, options.regularize)[0]
    T = ot.sinkhorn(a, b, D, 0.1)
    return dist, T
コード例 #14
0
ファイル: test_bregman.py プロジェクト: eddardd/POT
def test_empirical_sinkhorn_divergence(nx):
    # Test sinkhorn divergence
    n = 10
    a = np.linspace(1, n, n)
    a /= a.sum()
    b = ot.unif(n)
    X_s = np.reshape(np.arange(n, dtype=np.float64), (n, 1))
    X_t = np.reshape(np.arange(0, n * 2, 2, dtype=np.float64), (n, 1))
    M = ot.dist(X_s, X_t)
    M_s = ot.dist(X_s, X_s)
    M_t = ot.dist(X_t, X_t)

    ab, bb, X_sb, X_tb, M_nx, M_sb, M_tb = nx.from_numpy(
        a, b, X_s, X_t, M, M_s, M_t)

    emp_sinkhorn_div = nx.to_numpy(
        ot.bregman.empirical_sinkhorn_divergence(X_sb, X_tb, 1, a=ab, b=bb))
    sinkhorn_div = nx.to_numpy(
        ot.sinkhorn2(ab, bb, M_nx, 1) - 1 / 2 * ot.sinkhorn2(ab, ab, M_sb, 1) -
        1 / 2 * ot.sinkhorn2(bb, bb, M_tb, 1))
    emp_sinkhorn_div_np = ot.bregman.empirical_sinkhorn_divergence(X_s,
                                                                   X_t,
                                                                   1,
                                                                   a=a,
                                                                   b=b)

    # check constraints
    np.testing.assert_allclose(emp_sinkhorn_div,
                               emp_sinkhorn_div_np,
                               atol=1e-05)
    np.testing.assert_allclose(emp_sinkhorn_div, sinkhorn_div,
                               atol=1e-05)  # cf conv emp sinkhorn

    ot.bregman.empirical_sinkhorn_divergence(X_sb,
                                             X_tb,
                                             1,
                                             a=ab,
                                             b=bb,
                                             log=True)
コード例 #15
0
def check_gradient():
    (loss, log) = ot.sinkhorn2(a, b, M, lambd, log=True)
    subgrad = lambd * np.log(log["u"])
    subgrad = subgrad.reshape((subgrad.shape[0], ))
    subgrad -= np.mean(subgrad)
    eps = 1e-4
    grad = np.zeros(a.shape[0])
    for i in range(a.shape[0]):
        direction = np.zeros(a.shape[0])
        direction[i] = 1
        direction -= np.mean(direction)
        grad[i] = (ot.sinkhorn2(a + eps * direction, b, M, lambd) - loss) / eps

    print(a.shape)
    print(subgrad.shape)
    loss2 = ot.sinkhorn2(a - 0.1 * subgrad, b, M, lambd)
    # disturbed_subgrad = subgrad + 0.01 * np.random.normal(size = (subgrad.shape[0],))
    # disturbed_subgrad -= np.mean(disturbed_subgrad)
    # loss3 = ot.sinkhorn2(a - 0.01 * disturbed_subgrad, b, M, lambd)
    print(loss)
    print(loss2)
    # print(loss3)
    return (grad, subgrad)
コード例 #16
0
    def _sinkhorn_distance(self, x, y, d):
        """
        Compute the approximate optimal transportation distance (Sinkhorn distance) of the given density distributions.
        :param x: Source's neighbors distributions
        :param y: Target's neighbors distributions
        :param d: Cost dictionary
        :return: Sinkhorn distance
        """
        t0 = time.time()
        m = ot.sinkhorn2(x, y, d, 1e-1, method='sinkhorn')[0]
        logger.debug(
            "%8f secs for Sinkhorn. dist. \t#source_nbr: %d, #target_nbr: %d" % (time.time() - t0, len(x), len(y)))

        return m
コード例 #17
0
ファイル: test_bregman.py プロジェクト: eddardd/POT
def test_sinkhorn2_variants_device_tf(method):
    nx = ot.backend.TensorflowBackend()
    n = 100
    x = np.random.randn(n, 2)
    u = ot.utils.unif(n)
    M = ot.dist(x, x)

    # Check that everything stays on the CPU
    with tf.device("/CPU:0"):
        ub, Mb = nx.from_numpy(u, M)
        Gb = ot.sinkhorn(ub, ub, Mb, 1, method=method, stopThr=1e-10)
        lossb = ot.sinkhorn2(ub, ub, Mb, 1, method=method, stopThr=1e-10)
        nx.assert_same_dtype_device(Mb, Gb)
        nx.assert_same_dtype_device(Mb, lossb)

    if len(tf.config.list_physical_devices('GPU')) > 0:
        # Check that everything happens on the GPU
        ub, Mb = nx.from_numpy(u, M)
        Gb = ot.sinkhorn(ub, ub, Mb, 1, method=method, stopThr=1e-10)
        lossb = ot.sinkhorn2(ub, ub, Mb, 1, method=method, stopThr=1e-10)
        nx.assert_same_dtype_device(Mb, Gb)
        nx.assert_same_dtype_device(Mb, lossb)
        assert nx.dtype_device(Gb)[1].startswith("GPU")
コード例 #18
0
def main():
    na = 100
    nb = 150
    reg = 0.5

    mu_s = np.array([0, 0])
    cov_s = np.array([[1, 0], [0, 1]])

    mu_t = np.array([4, 4])
    cov_t = np.array([[1, -.8], [-.8, 1]])

    x_tf = tf.placeholder(dtype=tf.float32, shape=[na, 2])
    y_tf = tf.placeholder(dtype=tf.float32, shape=[nb, 2])
    M_tf = dmat_tf(x_tf, y_tf)
    tf_sinkhorn_loss = sink_tf(M_tf, (na, nb), reg)

    print("I can compute the gradient for a",
          tf.gradients(tf_sinkhorn_loss, x_tf))
    print("I can compute the gradient for b",
          tf.gradients(tf_sinkhorn_loss, y_tf))

    sess = tf.InteractiveSession()
    sess.run(tf.global_variables_initializer())

    xs = ot.datasets.make_2D_samples_gauss(na, mu_s, cov_s)
    xt = ot.datasets.make_2D_samples_gauss(nb, mu_t, cov_t)

    # Visualization
    plt.figure(1)
    plt.plot(xs[:, 0], xs[:, 1], '+b', label='Source samples')
    plt.plot(xt[:, 0], xt[:, 1], 'xr', label='Target samples')
    plt.legend(loc=0)
    plt.title('Source and target distributions')
    plt.show()

    # TF - sinkhorn
    tf_sinkhorn_loss_val = sess.run(tf_sinkhorn_loss,
                                    feed_dict={
                                        x_tf: xs,
                                        y_tf: xt
                                    })
    print(' tf_sinkhorn_loss', tf_sinkhorn_loss_val)

    # POT - sinkhorn
    M = ot.dist(xs.copy(), xt.copy(), metric='euclidean')
    a = np.ones((na, )) / na
    b = np.ones((nb, )) / nb  # uniform distribution on samples
    pot_sinkhorn_loss = ot.sinkhorn2(a, b, M, reg)[0]
    print('pot_sinkhorn_loss', pot_sinkhorn_loss)
コード例 #19
0
def min_a(b, M, lambd):
    n = M.shape[0]
    a_tilde = np.ones(n) / n
    a_hat = a_tilde.copy()
    a_hat_old = a_hat.copy()
    converged = False
    t = 0.5
    tol = 1e-5
    its = 0
    gamma = 1e-2

    while not converged:
        # its += 1
        # print("Iteration: {}".format(its))
        # beta = (t+1)/2
        # a = (1 - 1/beta)*a_hat + 1/beta * a_tilde
        # (_, log) = ot.sinkhorn2(a, b, M, lambd, log = True)
        # u = log["u"].reshape(n)
        # alpha = lambd * np.log(u)
        # alpha -= np.mean(alpha)
        # # a_tilde *= u**(-t * beta * lambd)
        # a_tilde *= np.exp(-t * beta * alpha)
        # a_tilde /= np.sum(a_tilde)
        # a_hat_old = a_hat.copy()
        # a_hat = (1 - 1/beta) * a_hat + 1/beta * a_tilde
        # t += 1

        its += 1
        print("Iteration: {}".format(its))
        beta = (t + 1) / 2
        a = (1 - 1 / beta) * a_hat + 1 / beta * a_tilde
        (_, log) = ot.sinkhorn2(a, b, M, lambd, log=True)
        u = log["u"].reshape(n)
        alpha = lambd * np.log(u)
        alpha -= np.mean(alpha)
        # a_tilde *= u**(-t * beta * lambd)
        a_hat_old = a_hat.copy()
        a_hat -= gamma * alpha
        a_hat[a_hat < 0] = 0
        a_hat /= np.sum(a_hat)

        print(a_hat_old)
        print(a_hat)

        if np.linalg.norm(a_hat - a_hat_old) < tol:
            converged = True

    return a_hat
コード例 #20
0
ファイル: test_bregman.py プロジェクト: eddardd/POT
def test_sinkhorn2_variants_dtype_device(nx, method):
    n = 100

    x = np.random.randn(n, 2)
    u = ot.utils.unif(n)

    M = ot.dist(x, x)

    for tp in nx.__type_list__:
        print(nx.dtype_device(tp))

        ub, Mb = nx.from_numpy(u, M, type_as=tp)

        lossb = ot.sinkhorn2(ub, ub, Mb, 1, method=method, stopThr=1e-10)

        nx.assert_same_dtype_device(Mb, lossb)
コード例 #21
0
ファイル: Wasserstein.py プロジェクト: nmonath/coref_tools
 def quick_e_score(self, n1, n2):
     """Pass in an AvgLink and return negative average distance."""
     if n1.needs_update:
         n1._update()
     if n2.needs_update:
         n2._update()
     # rows i want 1 is num_samples by dim
     rows_i_want_1 = n1.mat
     # rows i want 2 is num_samples by dim
     rows_i_want_2 = n2.mat
     # compute the point cloud wasserstein distance between the normalized
     # distributions.
     M = cdist(rows_i_want_1, rows_i_want_2)
     a = np.ones(rows_i_want_1.shape[0]) / rows_i_want_1.shape[0]
     b = np.ones(rows_i_want_2.shape[0]) / rows_i_want_2.shape[0]
     dist = ot.sinkhorn2(a,b,M,self.sinkhorn_reg,method='sinkhorn_stabilized',numItermax=self.max_sinkhorn_iter)
     return -dist[0]
 def _wmd(self, i, row, X_train):
     union_idx = np.union1d(X_train[i].indices, row.indices)
     W_minimal = self.W_embed[union_idx]
     W_dist = euclidean_distances(W_minimal)
     bow_i = X_train[i, union_idx].A.ravel()
     bow_j = row[:, union_idx].A.ravel()
     if self.sinkhorn:
         return ot.sinkhorn2(
             bow_i,
             bow_j,
             W_dist,
             self.sinkhorn_reg,
             numItermax=50,
             method="sinkhorn_stabilized",
         )[0]
     else:
         return ot.emd2(bow_i, bow_j, W_dist)
コード例 #23
0
ファイル: train.py プロジェクト: yangYlin/KDD18_M3DN
    def forward(self, x, y):
        self.save_for_backward(x, y)
        loss = torch.zeros(1).cuda()
        for i in range(x.shape[0]):
            a = x[i].cpu().numpy()
            a[a <= 0] = 1e-9
            a = a / np.sum(a)

            b = y[i].cpu()
            b = nn.Softmax()(b.view(1, -1)).data.numpy().reshape((-1, ))
            b[b <= 0] = 1e-9
            b = b / np.sum(b)

            dis, log_i = ot.sinkhorn2(a, b, self.m, self.reg, log=True)
            self.log[i] = torch.FloatTensor(log_i['u'])
            loss += dis[0]

        return loss
コード例 #24
0
ファイル: test_bregman.py プロジェクト: eddardd/POT
def test_sinkhorn2_backends(nx):
    n_samples = 100
    n_features = 2
    rng = np.random.RandomState(0)

    x = rng.randn(n_samples, n_features)
    y = rng.randn(n_samples, n_features)
    a = ot.utils.unif(n_samples)

    M = ot.dist(x, y)

    G = ot.sinkhorn(a, a, M, 1)

    ab, M_nx = nx.from_numpy(a, M)

    Gb = ot.sinkhorn2(ab, ab, M_nx, 1)

    np.allclose(G, nx.to_numpy(Gb))
コード例 #25
0
ファイル: test_bregman.py プロジェクト: eddardd/POT
def test_empirical_sinkhorn(nx):
    # test sinkhorn
    n = 10
    a = ot.unif(n)
    b = ot.unif(n)

    X_s = np.reshape(1.0 * np.arange(n), (n, 1))
    X_t = np.reshape(1.0 * np.arange(0, n), (n, 1))
    M = ot.dist(X_s, X_t)
    M_m = ot.dist(X_s, X_t, metric='euclidean')

    ab, bb, X_sb, X_tb, M_nx, M_mb = nx.from_numpy(a, b, X_s, X_t, M, M_m)

    G_sqe = nx.to_numpy(ot.bregman.empirical_sinkhorn(X_sb, X_tb, 1))
    sinkhorn_sqe = nx.to_numpy(ot.sinkhorn(ab, bb, M_nx, 1))

    G_log, log_es = ot.bregman.empirical_sinkhorn(X_sb, X_tb, 0.1, log=True)
    G_log = nx.to_numpy(G_log)
    sinkhorn_log, log_s = ot.sinkhorn(ab, bb, M_nx, 0.1, log=True)
    sinkhorn_log = nx.to_numpy(sinkhorn_log)

    G_m = nx.to_numpy(
        ot.bregman.empirical_sinkhorn(X_sb, X_tb, 1, metric='euclidean'))
    sinkhorn_m = nx.to_numpy(ot.sinkhorn(ab, bb, M_mb, 1))

    loss_emp_sinkhorn = nx.to_numpy(
        ot.bregman.empirical_sinkhorn2(X_sb, X_tb, 1))
    loss_sinkhorn = nx.to_numpy(ot.sinkhorn2(ab, bb, M_nx, 1))

    # check constraints
    np.testing.assert_allclose(sinkhorn_sqe.sum(1), G_sqe.sum(1),
                               atol=1e-05)  # metric sqeuclidian
    np.testing.assert_allclose(sinkhorn_sqe.sum(0), G_sqe.sum(0),
                               atol=1e-05)  # metric sqeuclidian
    np.testing.assert_allclose(sinkhorn_log.sum(1), G_log.sum(1),
                               atol=1e-05)  # log
    np.testing.assert_allclose(sinkhorn_log.sum(0), G_log.sum(0),
                               atol=1e-05)  # log
    np.testing.assert_allclose(sinkhorn_m.sum(1), G_m.sum(1),
                               atol=1e-05)  # metric euclidian
    np.testing.assert_allclose(sinkhorn_m.sum(0), G_m.sum(0),
                               atol=1e-05)  # metric euclidian
    np.testing.assert_allclose(loss_emp_sinkhorn, loss_sinkhorn, atol=1e-05)
コード例 #26
0
ファイル: metrics.py プロジェクト: lminvielle/mom-kde
def ws(p, q, x, dist='euclidean'):
    """
    Wasserstein distance btwn p and q

    Parameters
    -----
    p : distribution
    q : distribution
    x : support of p and q
    p, q, and x must be of shape (n_samples, dimension)

    Returns
    -----
    Wasserstein distance (scalar)
    """
    reg = 1e-2
    M = ot.dist(x, x, metric=dist)
    M /= M.max()
    ws = ot.sinkhorn2(p, q, M, reg)
    return ws[0]
コード例 #27
0
    def forward(self, x, y):
        self.save_for_backward(x, y)
        loss = torch.zeros(1).cuda()
        for i in range(x.shape[0]):
            a = x[i].cpu().numpy().reshape((-1, ))
            a[a <= 0] = 1e-9
            a = a / np.sum(a)
            #print(y[i])
            b = y[i].cpu().numpy().reshape((-1, ))
            b[b <= 0] = 1e-9
            b = b / np.sum(b)

            dis, log_i = ot.sinkhorn2(a,
                                      b,
                                      self.m,
                                      self.reg,
                                      log=True,
                                      method='sinkhorn_stabilized')
            self.log[i] = torch.FloatTensor(log_i['logu'][:, 0])

            loss += dis[0]

        return loss
コード例 #28
0
ファイル: test_bregman.py プロジェクト: wangyongguang/POT
def test_empirical_sinkhorn():
    # test sinkhorn
    n = 100
    a = ot.unif(n)
    b = ot.unif(n)

    X_s = np.reshape(np.arange(n), (n, 1))
    X_t = np.reshape(np.arange(0, n), (n, 1))
    M = ot.dist(X_s, X_t)
    M_m = ot.dist(X_s, X_t, metric='minkowski')

    G_sqe = ot.bregman.empirical_sinkhorn(X_s, X_t, 1)
    sinkhorn_sqe = ot.sinkhorn(a, b, M, 1)

    G_log, log_es = ot.bregman.empirical_sinkhorn(X_s, X_t, 0.1, log=True)
    sinkhorn_log, log_s = ot.sinkhorn(a, b, M, 0.1, log=True)

    G_m = ot.bregman.empirical_sinkhorn(X_s, X_t, 1, metric='minkowski')
    sinkhorn_m = ot.sinkhorn(a, b, M_m, 1)

    loss_emp_sinkhorn = ot.bregman.empirical_sinkhorn2(X_s, X_t, 1)
    loss_sinkhorn = ot.sinkhorn2(a, b, M, 1)

    # check constratints
    np.testing.assert_allclose(sinkhorn_sqe.sum(1), G_sqe.sum(1),
                               atol=1e-05)  # metric sqeuclidian
    np.testing.assert_allclose(sinkhorn_sqe.sum(0), G_sqe.sum(0),
                               atol=1e-05)  # metric sqeuclidian
    np.testing.assert_allclose(sinkhorn_log.sum(1), G_log.sum(1),
                               atol=1e-05)  # log
    np.testing.assert_allclose(sinkhorn_log.sum(0), G_log.sum(0),
                               atol=1e-05)  # log
    np.testing.assert_allclose(sinkhorn_m.sum(1), G_m.sum(1),
                               atol=1e-05)  # metric euclidian
    np.testing.assert_allclose(sinkhorn_m.sum(0), G_m.sum(0),
                               atol=1e-05)  # metric euclidian
    np.testing.assert_allclose(loss_emp_sinkhorn, loss_sinkhorn, atol=1e-05)
コード例 #29
0
def reg_wasserstein(root_1: Root,
                    root_2: Root,
                    reg=0.5,
                    method="sinkhorn_stabilized",
                    numItermax=1000):
    """Regularized Wasserstein distance, computed with Sinkhorn algorithm."""
    sq_dist_matrix = distance_matrix(root_1.nodes, root_2.nodes)**2
    rescaled_nodes_1 = root_1.nodes_weights / root_1.nodes_weights.sum()
    rescaled_nodes_2 = root_2.nodes_weights / root_2.nodes_weights.sum()

    coupling = ot.sinkhorn(a=rescaled_nodes_1,
                           b=rescaled_nodes_2,
                           M=sq_dist_matrix,
                           reg=reg,
                           method=method,
                           numItermax=numItermax)
    cost = ot.sinkhorn2(a=rescaled_nodes_1,
                        b=rescaled_nodes_2,
                        M=sq_dist_matrix,
                        reg=reg,
                        method=method,
                        numItermax=numItermax)

    return np.sqrt(cost), coupling
コード例 #30
0
ファイル: test_bregman.py プロジェクト: eddardd/POT
def test_sinkhorn2_gradients():
    n_samples = 100
    n_features = 2
    rng = np.random.RandomState(0)

    x = rng.randn(n_samples, n_features)
    y = rng.randn(n_samples, n_features)
    a = ot.utils.unif(n_samples)

    M = ot.dist(x, y)

    if torch:

        a1 = torch.tensor(a, requires_grad=True)
        b1 = torch.tensor(a, requires_grad=True)
        M1 = torch.tensor(M, requires_grad=True)

        val = ot.sinkhorn2(a1, b1, M1, 1)

        val.backward()

        assert a1.shape == a1.grad.shape
        assert b1.shape == b1.grad.shape
        assert M1.shape == M1.grad.shape