def test_disp(self):
        """Check if something is printed when `disp` is True."""
        with Capturing() as output:
            cg(self.matvec, self.b, self.x0, disp=True)

        self.assertTrue(
            len(output) > 0,
            'You should print the progress when `disp` is True.')
コード例 #2
0
    def test_default(self):
        """Check if everything works correctly with default parameters."""
        with Capturing() as output:
            x_sol, status = cg(matvec, b, x0)

        assert_equal(status, 0)
        self.assertTrue(norm(A.dot(x_sol) - b, np.inf) <= 1e-5)
        self.assertTrue(len(output) == 0, 'You should not print anything by default.')
    def test_default(self):
        """Check if everything works correctly with default parameters."""
        with Capturing() as output:
            x_sol, status = cg(self.matvec, self.b, self.x0)

        assert_equal(status, 0)
        self.assertTrue(norm(self.A.dot(x_sol) - self.b, np.inf) <= 1e-4)
        self.assertTrue(
            len(output) == 0, 'You should not print anything by default.')
コード例 #4
0
def fgw_lp(M,
           C1,
           C2,
           p,
           q,
           loss_fun='square_loss',
           alpha=1,
           amijo=True,
           G0=None,
           **kwargs):
    """
    Computes the FGW distance between two graphs see [3]
    .. math::
        \gamma = arg\min_\gamma (1-\alpha)*<\gamma,M>_F + alpha* \sum_{i,j,k,l} L(C1_{i,k},C2_{j,l})*T_{i,j}*T_{k,l}
        s.t. \gamma 1 = p
             \gamma^T 1= q
             \gamma\geq 0
    where :
    - M is the (ns,nt) metric cost matrix
    - :math:`f` is the regularization term ( and df is its gradient)
    - a and b are source and target weights (sum to 1)
    The algorithm used for solving the problem is conditional gradient as discussed in  [1]_
    Parameters
    ----------
    M  : ndarray, shape (ns, nt)
         Metric cost matrix between features across domains
    C1 : ndarray, shape (ns, ns)
         Metric cost matrix respresentative of the structure in the source space
    C2 : ndarray, shape (nt, nt)
         Metric cost matrix espresentative of the structure in the target space
    p :  ndarray, shape (ns,)
         distribution in the source space
    q :  ndarray, shape (nt,)
         distribution in the target space
    loss_fun :  string,optionnal
        loss function used for the solver 
    max_iter : int, optional
        Max number of iterations
    tol : float, optional
        Stop threshold on error (>0)
    verbose : bool, optional
        Print information along iterations
    log : bool, optional
        record log if True
    amijo : bool, optional
        If True the steps of the line-search is found via an amijo research. Else closed form is used.
        If there is convergence issues use False.
    **kwargs : dict
        parameters can be directly pased to the ot.optim.cg solver
    Returns
    -------
    gamma : (ns x nt) ndarray
        Optimal transportation matrix for the given parameters
    log : dict
        log dictionary return only if log==True in parameters
    References
    ----------
    .. [3] Vayer Titouan, Chapel Laetitia, Flamary R{\'e}mi, Tavenard Romain
          and Courty Nicolas
        "Optimal Transport for structured data with application on graphs"
        International Conference on Machine Learning (ICML). 2019.
    """

    constC, hC1, hC2 = init_matrix(C1, C2, p, q, loss_fun)

    if G0 is None:
        G0 = p[:, None] * q[None, :]

    def f(G):
        return gwloss(constC, hC1, hC2, G)

    def df(G):
        return gwggrad(constC, hC1, hC2, G)

    return optim.cg(p,
                    q,
                    M,
                    alpha,
                    f,
                    df,
                    G0,
                    amijo=amijo,
                    C1=C1,
                    C2=C2,
                    constC=constC,
                    **kwargs)
コード例 #5
0
def gw_lp(C1, C2, p, q, loss_fun='square_loss', alpha=1, amijo=True, **kwargs):
    """
    Returns the gromov-wasserstein transport between (C1,p) and (C2,q)
    The function solves the following optimization problem:
    .. math::
        \GW_Dist = \min_T \sum_{i,j,k,l} L(C1_{i,k},C2_{j,l})*T_{i,j}*T_{k,l}
    Where :
        C1 : Metric cost matrix in the source space
        C2 : Metric cost matrix in the target space
        p  : distribution in the source space
        q  : distribution in the target space
        L  : loss function to account for the misfit between the similarity matrices
        H  : entropy
    Parameters
    ----------
    C1 : ndarray, shape (ns, ns)
         Metric cost matrix in the source space
    C2 : ndarray, shape (nt, nt)
         Metric costfr matrix in the target space
    p :  ndarray, shape (ns,)
         distribution in the source space
    q :  ndarray, shape (nt,)
         distribution in the target space
    loss_fun :  string
        loss function used for the solver
    max_iter : int, optional
        Max number of iterations
    tol : float, optional
        Stop threshold on error (>0)
    verbose : bool, optional
        Print information along iterations
    log : bool, optional
        record log if True
    amijo : bool, optional
        If True the step of the line-search is found via an amijo research. Else closed form is used.
        If there is convergence issues use False.
    **kwargs : dict
        parameters can be directly pased to the ot.optim.cg solver
    Returns
    -------
    T : ndarray, shape (ns, nt)
        coupling between the two spaces that minimizes :
            \sum_{i,j,k,l} L(C1_{i,k},C2_{j,l})*T_{i,j}*T_{k,l}
    log : dict
        convergence information and loss
    References
    ----------
    .. [1] Peyré, Gabriel, Marco Cuturi, and Justin Solomon,
        "Gromov-Wasserstein averaging of kernel and distance matrices."
        International Conference on Machine Learning (ICML). 2016.
    .. [2] Mémoli, Facundo. Gromov–Wasserstein distances and the
        metric approach to object matching. Foundations of computational
        mathematics 11.4 (2011): 417-487.
    """

    constC, hC1, hC2 = init_matrix(C1, C2, p, q, loss_fun)
    M = np.zeros((C1.shape[0], C2.shape[0]))

    G0 = p[:, None] * q[None, :]

    def f(G):
        return gwloss(constC, hC1, hC2, G)

    def df(G):
        return gwggrad(constC, hC1, hC2, G)

    return optim.cg(p,
                    q,
                    M,
                    alpha,
                    f,
                    df,
                    G0,
                    amijo=amijo,
                    constC=constC,
                    C1=C1,
                    C2=C2,
                    **kwargs)
コード例 #6
0
# plot_performance(hist_hfn['f'], optimal_value, 'r', "Inexact Newton", time_vec=None)

dim = 100
num_clusters = 50
np.random.seed(0)
Q = np.random.rand(dim, dim)
Q = orth(Q)
s = np.array(list(range(1, 101, 2)) * 2, dtype=float)
s += np.random.normal(0, 0.001, s.shape)
A = (Q.dot(np.diag(s))).dot(Q.T)
np.random.seed(1)
b = np.random.rand(dim, 1)
def matvec(x):
    return A.dot(x)
# print(np.linalg.solve(A, b))
ans, hist = cg(matvec, b, np.zeros(b.shape), disp=False, tol=1e-10, maxiter=100)
plot_performance(hist['norm_r'], 0, 'b', "50 кластеров")

s = np.array(list(range(1, 11)) * 10, dtype=float)
s += np.random.normal(0, 0.001, s.shape)
A = (Q.dot(np.diag(s))).dot(Q.T)
np.random.seed(1)
b = np.random.rand(dim, 1)
def matvec(x):
    return A.dot(x)

# print(np.linalg.solve(A, b))
ans, hist = cg(matvec, b, np.zeros(b.shape), disp=False, tol=1e-10, maxiter=100)
plot_performance(hist['norm_r'], 0, 'r', "10 кластеров")

plt.ylabel(r'$\log||Ax_k - b||$')
コード例 #7
0
    def test_trace(self):
        """Check if the history is returned correctly when `trace` is True."""
        x_sol, status, hist = cg(matvec, b, x0, trace=True)

        self.assertTrue(isinstance(hist['norm_r'], np.ndarray))
コード例 #8
0
    def test_disp(self):
        """Check if something is printed when `disp` is True."""
        with Capturing() as output:
            cg(matvec, b, x0, disp=True)

        self.assertTrue(len(output) > 0, 'You should print the progress when `disp` is True.')
コード例 #9
0
    def test_max_iter(self):
        """Check argument `max_iter` is supported and can be set to None."""
        x_sol, status = cg(matvec, b, x0, max_iter=None)

        assert_equal(status, 0)
        self.assertTrue(norm(A.dot(x_sol) - b, np.inf) <= 1e-5)
コード例 #10
0
    def test_tol(self):
        """Try high accuracy."""
        x_sol, status = cg(matvec, b, x0, tol=1e-10)

        assert_equal(status, 0)
        self.assertTrue(norm(A.dot(x_sol) - b, np.inf) <= 1e-10)
コード例 #11
0
Q = orth(Q)
s = np.array(list(range(1, 101, 2)) * 2, dtype=float)
s += np.random.normal(0, 0.001, s.shape)
A = (Q.dot(np.diag(s))).dot(Q.T)
np.random.seed(1)
b = np.random.rand(dim, 1)


def matvec(x):
    return A.dot(x)


# print(np.linalg.solve(A, b))
ans, hist = cg(matvec,
               b,
               np.zeros(b.shape),
               disp=False,
               tol=1e-10,
               maxiter=100)
plot_performance(hist['norm_r'], 0, 'b', "50 кластеров")

s = np.array(list(range(1, 11)) * 10, dtype=float)
s += np.random.normal(0, 0.001, s.shape)
A = (Q.dot(np.diag(s))).dot(Q.T)
np.random.seed(1)
b = np.random.rand(dim, 1)


def matvec(x):
    return A.dot(x)

    def test_trace(self):
        """Check if the history is returned correctly when `trace` is True."""
        x_sol, status, hist = cg(self.matvec, self.b, self.x0, trace=True)

        self.assertTrue(isinstance(hist['norm_r'], np.ndarray))
 def test_max_iter(self):
     """Check argument `max_iter` is supported and can be set to None."""
     cg(self.matvec, self.b, self.x0, max_iter=None)
 def test_tol(self):
     """Check if argument `tol` is supported."""
     cg(self.matvec, self.b, self.x0, tol=1e-6)