def test_mlfbf(self):
        """
        Test the MLFBF solver with arbitrarily selected functions.

        """
        x = [1., 1., 1.]
        L = np.array([[5, 9, 3], [7, 8, 5], [4, 4, 9], [0, 1, 7]])
        max_step = 1 / (1 + np.linalg.norm(L, 2))
        solver = solvers.mlfbf(L=L, step=max_step / 2.)
        params = {'solver': solver, 'verbosity': 'NONE'}

        def x0():
            return np.zeros(len(x))

        # L2-norm prox and dummy prox.
        f = functions.dummy()
        f._prox = lambda x, T: np.maximum(np.zeros(len(x)), x)
        g = functions.norm_l2(lambda_=0.5)
        h = functions.norm_l2(y=np.array([294, 390, 361]), lambda_=0.5)
        ret = solvers.solve([f, g, h], x0(), maxit=1000, rtol=0, **params)
        nptest.assert_allclose(ret['sol'], x, rtol=1e-5)

        # Same test, but with callable L
        solver = solvers.mlfbf(L=lambda x: np.dot(L, x),
                               Lt=lambda y: np.dot(L.T, y),
                               d0=np.dot(L, x0()),
                               step=max_step / 2.)
        ret = solvers.solve([f, g, h], x0(), maxit=1000, rtol=0, **params)
        nptest.assert_allclose(ret['sol'], x, rtol=1e-5)

        # Sanity check
        self.assertRaises(ValueError, solver.pre, [f, g], x0())
Example #2
0
    def test_mlfbf(self):
        """
        Test the MLFBF solver with arbitrarily selected functions.

        """
        x = [1., 1., 1.]
        L = np.array([[5, 9, 3], [7, 8, 5], [4, 4, 9], [0, 1, 7]])
        max_step = 1 / (1 + np.linalg.norm(L, 2))
        solver = solvers.mlfbf(L=L, step=max_step / 2.)
        params = {'solver': solver, 'verbosity': 'NONE'}

        def x0(): return np.zeros(len(x))

        # L2-norm prox and dummy prox.
        f = functions.dummy()
        f._prox = lambda x, T: np.maximum(np.zeros(len(x)), x)
        g = functions.norm_l2(lambda_=0.5)
        h = functions.norm_l2(y=np.array([294, 390, 361]), lambda_=0.5)
        ret = solvers.solve([f, g, h], x0(), maxit=1000, rtol=0, **params)
        nptest.assert_allclose(ret['sol'], x, rtol=1e-5)

        # Same test, but with callable L
        solver = solvers.mlfbf(L=lambda x: np.dot(L, x),
                               Lt=lambda y: np.dot(L.T, y),
                               d0=np.dot(L, x0()),
                               step=max_step / 2.)
        ret = solvers.solve([f, g, h], x0(), maxit=1000, rtol=0, **params)
        nptest.assert_allclose(ret['sol'], x, rtol=1e-5)

        # Sanity check
        self.assertRaises(ValueError, solver.pre, [f, g], x0())
Example #3
0
    def test_primal_dual_solver_comparison(self):
        """
        Test that all primal-dual solvers return the same and correct solution.

        I had to create this separate function because the primal-dual solvers
        were too slow for the problem above.

        """

        # Convex functions.
        y = np.random.randn(3)
        L = np.random.randn(4, 3)

        sol = y
        y2 = L.dot(y)
        f1 = functions.norm_l1(y=y)
        f2 = functions.norm_l2(y=y2)
        f3 = functions.dummy()

        # Solvers.
        step = 0.5 / (1 + np.linalg.norm(L, 2))
        slvs = []
        slvs.append(solvers.mlfbf(step=step, L=L))
        slvs.append(solvers.projection_based(step=step, L=L))

        # Compare solutions.
        niter = 1000
        params = {'rtol': 0, 'verbosity': 'NONE', 'maxit': niter}
        for solver in slvs:
            x0 = np.zeros(len(y))

            if type(solver) is solvers.mlfbf:
                ret = solvers.solve([f1, f2, f3], x0, solver, **params)
            else:
                ret = solvers.solve([f1, f2], x0, solver, **params)
            nptest.assert_allclose(ret['sol'], sol)
            self.assertEqual(ret['niter'], niter)
            # The initial value was not modified.
            nptest.assert_array_equal(x0, np.zeros(len(y)))

            if type(solver) is solvers.mlfbf:
                ret = solvers.solve([f1, f2, f3],
                                    x0,
                                    solver,
                                    inplace=True,
                                    **params)
            else:
                ret = solvers.solve([f1, f2],
                                    x0,
                                    solver,
                                    inplace=True,
                                    **params)
            # The initial value was modified.
            self.assertIs(ret['sol'], x0)
            nptest.assert_allclose(ret['sol'], sol)
    def test_primal_dual_solver_comparison(self):
        """
        Test that all primal-dual solvers return the same and correct solution.

        I had to create this separate function because the primal-dual solvers
        were too slow for the problem above.

        """

        # Convex functions.
        y = np.array([294, 390, 361])
        sol = [1., 1., 1.]
        L = np.array([[5, 9, 3], [7, 8, 5], [4, 4, 9], [0, 1, 7]])
        f1 = functions.norm_l1(y=y)
        f2 = functions.norm_l1()
        f3 = functions.dummy()

        # Solvers.
        step = 0.5 / (1 + np.linalg.norm(L, 2))
        slvs = []
        slvs.append(solvers.mlfbf(step=step))
        slvs.append(solvers.projection_based(step=step))

        # Compare solutions.
        params = {'rtol': 0, 'verbosity': 'NONE', 'maxit': 50}
        niters = [50, 50]
        for solver, niter in zip(slvs, niters):
            x0 = np.zeros(len(y))

            if type(solver) is solvers.mlfbf:
                ret = solvers.solve([f1, f2, f3], x0, solver, **params)
            else:
                ret = solvers.solve([f1, f2], x0, solver, **params)

            nptest.assert_allclose(ret['sol'], sol)
            self.assertEqual(ret['niter'], niter)
            self.assertIs(ret['sol'], x0)  # The initial value was modified.
Example #5
0
    def test_primal_dual_solver_comparison(self):
        """
        Test that all primal-dual solvers return the same and correct solution.

        I had to create this separate function because the primal-dual solvers
        were too slow for the problem above.

        """

        # Convex functions.
        y = np.array([294, 390, 361])
        sol = [1., 1., 1.]
        L = np.array([[5, 9, 3], [7, 8, 5], [4, 4, 9], [0, 1, 7]])
        f1 = functions.norm_l1(y=y)
        f2 = functions.norm_l1()
        f3 = functions.dummy()

        # Solvers.
        step = 0.5 / (1 + np.linalg.norm(L, 2))
        slvs = []
        slvs.append(solvers.mlfbf(step=step))
        slvs.append(solvers.projection_based(step=step))

        # Compare solutions.
        params = {'rtol': 0, 'verbosity': 'NONE', 'maxit': 50}
        niters = [50, 50]
        for solver, niter in zip(slvs, niters):
            x0 = np.zeros(len(y))

            if type(solver) is solvers.mlfbf:
                ret = solvers.solve([f1, f2, f3], x0, solver, **params)
            else:
                ret = solvers.solve([f1, f2], x0, solver, **params)

            nptest.assert_allclose(ret['sol'], sol)
            self.assertEqual(ret['niter'], niter)
            self.assertIs(ret['sol'], x0)  # The initial value was modified.
Example #6
0
def l2_degree_reg(X,
                  dist_type='sqeuclidean',
                  alpha=1,
                  s=None,
                  step=0.5,
                  w0=None,
                  maxit=1000,
                  rtol=1e-5,
                  retall=False,
                  verbosity='NONE'):
    r"""
    Learn graph by regularizing the l2-norm of the degrees.

    This is done by solving
    :math:`\tilde{W} = \underset{W \in \mathcal{W}_m}{\text{arg}\min} \,
    \|W \odot Z\|_{1,1} + \alpha \|W1}\|^2 + \alpha \| W \|_{F}^{2}`, subject
    to :math:`\|W\|_{1,1} = s`, where :math:`Z` is a pairwise distance matrix,
    and :math:`\mathcal{W}_m`is the set of valid symmetric weighted adjacency
    matrices.

    Parameters
    ----------
    X : array_like
        An N-by-M data matrix of N variable observations in an M-dimensional
        space. The learned graph will have N nodes.
    dist_type : string
        Type of pairwise distance between variables. See
        :func:`spatial.distance.pdist` for the possible options.
    alpha : float, optional
        Regularization parameter acting on the l2-norm.
    s : float, optional
        The "sparsity level" of the weight matrix, as measured by its l1-norm.
    step : float, optional
        A number between 0 and 1 defining a stepsize value in the admissible
        stepsize interval (see [Komodakis & Pesquet, 2015], Algorithm 6)
    w0 : array_like, optional
        Initialization of the edge weights. Must be an N(N-1)/2-dimensional
        vector.
    maxit : int, optional
        Maximum number of iterations.
    rtol : float, optional
        Stopping criterion. Relative tolerance between successive updates.
    retall : boolean
        Return solution and problem details. See output of
        :func:`pyunlocbox.solvers.solve`.
    verbosity : {'NONE', 'LOW', 'HIGH', 'ALL'}, optional
        Level of verbosity of the solver. See :func:`pyunlocbox.solvers.solve`.

    Returns
    -------
    W : array_like
        Learned weighted adjacency matrix
    problem : dict, optional
        Information about the solution of the optimization. Only returned if
        retall == True.

    Notes
    -----
    This is the problem proposed in [Dong et al., 2015].


    Examples
    --------

    """

    # Parse X
    N = X.shape[0]
    E = int(N * (N - 1.) / 2.)
    z = spatial.distance.pdist(X, dist_type)  # Pairwise distances

    # Parse s
    s = N if s is None else s

    # Parse step
    if (step <= 0) or (step > 1):
        raise ValueError("step must be a number between 0 and 1.")

    # Parse initial weights
    w0 = np.zeros(z.shape) if w0 is None else w0
    if (w0.shape != z.shape):
        raise ValueError("w0 must be of dimension N(N-1)/2.")

    # Get primal-dual linear map
    one_vec = np.ones(E)

    def K(w):
        return np.array([2 * np.dot(one_vec, w)])

    def Kt(n):
        return 2 * n * one_vec

    norm_K = 2 * np.sqrt(E)

    # Get weight-to-degree map
    S, St = utils.weight2degmap(N)

    # Assemble functions in the objective
    f1 = functions.func()
    f1._eval = lambda w: 2 * np.dot(w, z)
    f1._prox = lambda w, gamma: np.maximum(0, w - (2 * gamma * z))

    f2 = functions.func()
    f2._eval = lambda w: 0.
    f2._prox = lambda d, gamma: s

    f3 = functions.func()
    f3._eval = lambda w: alpha * (2 * np.sum(w**2) + np.sum(S(w)**2))
    f3._grad = lambda w: alpha * (4 * w + St(S(w)))
    lipg = 2 * alpha * (N + 1)

    # Rescale stepsize
    stepsize = step / (1 + lipg + norm_K)

    # Solve problem
    solver = solvers.mlfbf(L=K, Lt=Kt, step=stepsize)
    problem = solvers.solve([f1, f2, f3],
                            x0=w0,
                            solver=solver,
                            maxit=maxit,
                            rtol=rtol,
                            verbosity=verbosity)

    # Transform weight matrix from vector form to matrix form
    W = spatial.distance.squareform(problem['sol'])

    if retall:
        return W, problem
    else:
        return W
Example #7
0
def log_degree_barrier(X,
                       dist_type='sqeuclidean',
                       alpha=1,
                       beta=1,
                       step=0.5,
                       w0=None,
                       maxit=1000,
                       rtol=1e-5,
                       retall=False,
                       verbosity='NONE'):
    r"""
    Learn graph by imposing a log barrier on the degrees

    This is done by solving
    :math:`\tilde{W} = \underset{W \in \mathcal{W}_m}{\text{arg}\min} \,
    \|W \odot Z\|_{1,1} - \alpha 1^{T} \log{W1} + \beta \| W \|_{F}^{2}`,
    where :math:`Z` is a pairwise distance matrix, and :math:`\mathcal{W}_m`
    is the set of valid symmetric weighted adjacency matrices.

    Parameters
    ----------
    X : array_like
        An N-by-M data matrix of N variable observations in an M-dimensional
        space. The learned graph will have N nodes.
    dist_type : string
        Type of pairwise distance between variables. See
        :func:`spatial.distance.pdist` for the possible options.
    alpha : float, optional
        Regularization parameter acting on the log barrier
    beta : float, optional
        Regularization parameter controlling the density of the graph
    step : float, optional
        A number between 0 and 1 defining a stepsize value in the admissible
        stepsize interval (see [Komodakis & Pesquet, 2015], Algorithm 6)
    w0 : array_like, optional
        Initialization of the edge weights. Must be an N(N-1)/2-dimensional
        vector.
    maxit : int, optional
        Maximum number of iterations.
    rtol : float, optional
        Stopping criterion. Relative tolerance between successive updates.
    retall : boolean
        Return solution and problem details. See output of
        :func:`pyunlocbox.solvers.solve`.
    verbosity : {'NONE', 'LOW', 'HIGH', 'ALL'}, optional
        Level of verbosity of the solver. See :func:`pyunlocbox.solvers.solve`.

    Returns
    -------
    W : array_like
        Learned weighted adjacency matrix
    problem : dict, optional
        Information about the solution of the optimization. Only returned if
        retall == True.

    Notes
    -----
    This is the solver proposed in [Kalofolias, 2016] :cite:`kalofolias2016`.


    Examples
    --------
    >>> import learn_graph as lg
    >>> import networkx as nx
    >>> import numpy as np
    >>> import matplotlib.pyplot as plt
    >>> from scipy import spatial
    >>> G_gt = nx.waxman_graph(100)
    >>> pos = nx.random_layout(G_gt)
    >>> coords = np.array(list(pos.values()))
    >>> def s1(x, y):
            return np.sin((2 - x - y)**2)
    >>> def s2(x, y):
            return np.cos((x + y)**2)
    >>> def s3(x, y):
            return (x - 0.5)**2 + (y - 0.5)**3 + x - y
    >>> def s4(x, y):
            return np.sin(3 * ( (x - 0.5)**2 + (y - 0.5)**2 ) )
    >>> X = np.array((s1(coords[:,0], coords[:,1]),
                      s2(coords[:,0], coords[:,1]),
                      s3(coords[:,0], coords[:,1]),
                      s4(coords[:,0], coords[:,1]))).T
    >>> z = 25 * spatial.distance.pdist(X, 'sqeuclidean')
    >>> W = lg.log_degree_barrier(z)
    >>> W[W < np.percentile(W, 96)] = 0
    >>> G_learned = nx.from_numpy_matrix(W)
    >>> plt.figure(figsize=(12, 6))
    >>> plt.subplot(1,2,1)
    >>> nx.draw(G_gt, pos=pos)
    >>> plt.title('Ground Truth')
    >>> plt.subplot(1,2,2)
    >>> nx.draw(G_learned, pos=pos)
    >>> plt.title('Learned')
    """

    # Parse X
    N = X.shape[0]
    z = spatial.distance.pdist(X, dist_type)  # Pairwise distances

    # Parse stepsize
    if (step <= 0) or (step > 1):
        raise ValueError("step must be a number between 0 and 1.")

    # Parse initial weights
    w0 = np.zeros(z.shape) if w0 is None else w0
    if (w0.shape != z.shape):
        raise ValueError("w0 must be of dimension N(N-1)/2.")

    # Get primal-dual linear map
    K, Kt = utils.weight2degmap(N)
    norm_K = np.sqrt(2 * (N - 1))

    # Assemble functions in the objective
    f1 = functions.func()
    f1._eval = lambda w: 2 * np.dot(w, z)
    f1._prox = lambda w, gamma: np.maximum(0, w - (2 * gamma * z))

    f2 = functions.func()
    f2._eval = lambda w: -alpha * np.sum(
        np.log(np.maximum(np.finfo(np.float64).eps, K(w))))
    f2._prox = lambda d, gamma: np.maximum(
        0, 0.5 * (d + np.sqrt(d**2 + (4 * alpha * gamma))))

    f3 = functions.func()
    f3._eval = lambda w: beta * np.sum(w**2)
    f3._grad = lambda w: 2 * beta * w
    lipg = 2 * beta

    # Rescale stepsize
    stepsize = step / (1 + lipg + norm_K)

    # Solve problem
    solver = solvers.mlfbf(L=K, Lt=Kt, step=stepsize)
    problem = solvers.solve([f1, f2, f3],
                            x0=w0,
                            solver=solver,
                            maxit=maxit,
                            rtol=rtol,
                            verbosity=verbosity)

    # Transform weight matrix from vector form to matrix form
    W = spatial.distance.squareform(problem['sol'])

    if retall:
        return W, problem
    else:
        return W
Example #8
0
    def graph_pnorm_interpolation(self, gradient, P, w, labels_bin, x0=None, p=1., **kwargs):
        r"""
        Solve an interpolation problem via gradient p-norm minimization.

        A signal :math:`x` is estimated from its measurements :math:`y = A(x)` by solving
        :math:`\text{arg}\underset{z \in \mathbb{R}^n}{\min}
        \| \nabla_G z \|_p^p \text{ subject to } Az = y` 
        via a primal-dual, forward-backward-forward algorithm.

        Parameters
        ----------
        gradient : array_like
            A matrix representing the graph gradient operator
        P : callable
            Orthogonal projection operator mapping points in :math:`z \in \mathbb{R}^n` 
            onto the set satisfying :math:`A P(z) = A z`.
        x0 : array_like, optional
            Initial point of the iteration. Must be of dimension n.
            (Default is `numpy.random.randn(n)`)
        p : {1., 2.}
        labels_bin : array_like
            A vector that holds the binary labels.
        kwargs :
            Additional solver parameters, such as maximum number of iterations
            (maxit), relative tolerance on the objective (rtol), and verbosity
            level (verbosity). See :func:`pyunlocbox.solvers.solve` for the full
            list of options.

        Returns
        -------
        x : array_like
            The solution to the optimization problem.

        """

        grad = lambda z: gradient.dot(z)
        div = lambda z: gradient.transpose().dot(z)

        # Indicator function of the set satisfying :math:`y = A(z)`
        f = functions.func()
        f._eval = lambda z: 0
        f._prox = lambda z, gamma: P(z, w, labels_bin)

        # :math:`\ell_1` norm of the dual variable :math:`d = \nabla_G z`
        g = functions.func()
        g._eval = lambda z: np.sum(np.abs(grad(z)))
        g._prox = lambda d, gamma: functions._soft_threshold(d, gamma)

        # :math:`\ell_2` norm of the gradient (for the smooth case)
        h = functions.norm_l2(A=grad, At=div)

        stepsize = (0.9 / (1. + scipy.sparse.linalg.norm(gradient, ord='fro'))) ** p

        solver = solvers.mlfbf(L=grad, Lt=div, step=stepsize)

        if p == 1.:
            problem = solvers.solve([f, g, functions.dummy()], x0=x0, solver=solver, **kwargs)
            return problem['sol']
        if p == 2.:
            problem = solvers.solve([f, functions.dummy(), h], x0=x0, solver=solver, **kwargs)
            return problem['sol']
        else:
            return x0
Example #9
0
    def test_mlfbf(self):
        """
        Test the MLFBF solver with arbitrarily selected functions.

        """
        x = [1., 1., 1.]
        L = np.array([[5, 9, 3], [7, 8, 5], [4, 4, 9], [0, 1, 7]])
        max_step = 1 / (1 + np.linalg.norm(L, 2))
        solver = solvers.mlfbf(L=L, step=max_step / 2.)
        params = {'solver': solver, 'verbosity': 'NONE'}

        def x0():
            return np.zeros(len(x))

        # L2-norm prox and dummy prox.
        f = functions.dummy()
        f._prox = lambda x, T: np.maximum(np.zeros(len(x)), x)
        g = functions.norm_l2(lambda_=0.5)
        h = functions.norm_l2(y=np.array([294, 390, 361]), lambda_=0.5)
        ret = solvers.solve([f, g, h], x0(), maxit=1000, rtol=0, **params)
        nptest.assert_allclose(ret['sol'], x, rtol=1e-5)

        # Same test, but with callable L
        solver = solvers.mlfbf(L=lambda x: np.dot(L, x),
                               Lt=lambda y: np.dot(L.T, y),
                               d0=np.dot(L, x0()),
                               step=max_step / 2.)
        ret = solvers.solve([f, g, h], x0(), maxit=1000, rtol=0, **params)
        nptest.assert_allclose(ret['sol'], x, rtol=1e-5)

        # Sanity check
        self.assertRaises(ValueError, solver.pre, [f, g], x0())

        # Make a second test where the solution is calculated by hand
        n = 10
        y = np.random.rand(n) * 2
        z = np.random.rand(n)
        c = 1

        delta = (y - z - c)**2 + 4 * (1 + y * z - z * c)
        sol = 0.5 * ((y - z - c) + np.sqrt(delta))

        class mlog(functions.func):
            def __init__(self, z):
                super().__init__()
                self.z = z

            def _eval(self, x):
                return -np.sum(np.log(x + self.z))

            def _prox(self, x, T):
                delta = (x - self.z)**2 + 4 * (T + x * self.z)
                sol = 0.5 * (x - self.z + np.sqrt(delta))
                return sol

        f = functions.norm_l1(lambda_=c)
        g = mlog(z=z)
        h = functions.norm_l2(lambda_=0.5, y=y)

        mu = 1 + 1
        step = 1 / mu / 2

        solver = solvers.mlfbf(step=step)
        ret = solvers.solve([f, g, h],
                            y.copy(),
                            solver,
                            maxit=200,
                            rtol=0,
                            verbosity="NONE")

        nptest.assert_allclose(ret["sol"], sol, atol=1e-10)

        # Make a final test where the function g can not be evaluate
        # on the primal variables
        y = np.random.rand(3)
        y_2 = L.dot(y)
        L = np.array([[5, 9, 3], [7, 8, 5], [4, 4, 9], [0, 1, 7]])
        x0 = np.zeros(len(y))
        f = functions.norm_l1(y=y)
        g = functions.norm_l2(lambda_=0.5, y=y_2)
        h = functions.norm_l2(y=y, lambda_=0.5)
        max_step = 1 / (1 + np.linalg.norm(L, 2))
        solver = solvers.mlfbf(L=L, step=max_step / 2.)
        ret = solvers.solve([f, g, h], x0, solver, maxit=1000, rtol=0)
        np.testing.assert_allclose(ret["sol"], y)