def test_backtracking(self):
        """
        Test forward-backward splitting solver with backtracking, solving
        problems with L1-norm, L2-norm, and dummy functions.

        """
        # Test constructor sanity
        a = acceleration.backtracking()
        self.assertRaises(ValueError, a.__init__, 2.)
        self.assertRaises(ValueError, a.__init__, -2.)

        y = [4., 5., 6., 7.]
        accel = acceleration.backtracking()
        step = 10  # Make sure backtracking is called
        solver = solvers.forward_backward(accel=accel, step=step)
        param = {'solver': solver, 'atol': 1e-32, 'verbosity': 'NONE'}

        # L2-norm prox and dummy gradient.
        f1 = functions.norm_l2(y=y)
        f2 = functions.dummy()
        ret = solvers.solve([f1, f2], np.zeros(len(y)), **param)
        nptest.assert_allclose(ret['sol'], y)
        self.assertEqual(ret['crit'], 'ATOL')
        self.assertEqual(ret['niter'], 13)

        # L1-norm prox and L2-norm gradient.
        f1 = functions.norm_l1(y=y, lambda_=1.0)
        f2 = functions.norm_l2(y=y, lambda_=0.8)
        ret = solvers.solve([f1, f2], np.zeros(len(y)), **param)
        nptest.assert_allclose(ret['sol'], y)
        self.assertEqual(ret['crit'], 'ATOL')
        self.assertLessEqual(ret['niter'], 4)  # win64 takes one iteration
Exemple #2
0
    def test_backtracking(self):
        """
        Test forward-backward splitting solver with backtracking, solving
        problems with L1-norm, L2-norm, and dummy functions.

        """
        # Test constructor sanity
        a = acceleration.backtracking()
        self.assertRaises(ValueError, a.__init__, 2.)
        self.assertRaises(ValueError, a.__init__, -2.)

        y = [4., 5., 6., 7.]
        accel = acceleration.backtracking()
        step = 10  # Make sure backtracking is called
        solver = solvers.forward_backward(accel=accel, step=step)
        param = {'solver': solver, 'atol': 1e-32, 'verbosity': 'NONE'}

        # L2-norm prox and dummy gradient.
        f1 = functions.norm_l2(y=y)
        f2 = functions.dummy()
        ret = solvers.solve([f1, f2], np.zeros(len(y)), **param)
        nptest.assert_allclose(ret['sol'], y)
        self.assertEqual(ret['crit'], 'ATOL')
        self.assertEqual(ret['niter'], 13)

        # L1-norm prox and L2-norm gradient.
        f1 = functions.norm_l1(y=y, lambda_=1.0)
        f2 = functions.norm_l2(y=y, lambda_=0.8)
        ret = solvers.solve([f1, f2], np.zeros(len(y)), **param)
        nptest.assert_allclose(ret['sol'], y)
        self.assertEqual(ret['crit'], 'ATOL')
        self.assertEqual(ret['niter'], 4)
Exemple #3
0
    def test_forward_backward_fista_backtracking(self):
        """
        Test forward-backward splitting solver with fista acceleration and
        backtracking, solving problems with L1-norm, L2-norm, and dummy
        functions.

        """
        y = [4., 5., 6., 7.]
        accel = acceleration.fista_backtracking()
        solver = solvers.forward_backward(accel=accel)
        param = {'solver': solver, 'rtol': 1e-6, 'verbosity': 'NONE'}

        # L2-norm prox and dummy gradient.
        f1 = functions.norm_l2(y=y)
        f2 = functions.dummy()
        ret = solvers.solve([f1, f2], np.zeros(len(y)), **param)
        nptest.assert_allclose(ret['sol'], y)
        self.assertEqual(ret['crit'], 'RTOL')
        self.assertEqual(ret['niter'], 60)

        # L1-norm prox and L2-norm gradient.
        f1 = functions.norm_l1(y=y, lambda_=1.0)
        f2 = functions.norm_l2(y=y, lambda_=0.8)
        ret = solvers.solve([f1, f2], np.zeros(len(y)), **param)
        nptest.assert_allclose(ret['sol'], y)
        self.assertEqual(ret['crit'], 'RTOL')
        self.assertEqual(ret['niter'], 3)
    def test_forward_backward_fista_backtracking(self):
        """
        Test forward-backward splitting solver with fista acceleration and
        backtracking, solving problems with L1-norm, L2-norm, and dummy
        functions.

        """
        y = [4., 5., 6., 7.]
        accel = acceleration.fista_backtracking()
        solver = solvers.forward_backward(accel=accel)
        param = {'solver': solver, 'rtol': 1e-6, 'verbosity': 'NONE'}

        # L2-norm prox and dummy gradient.
        f1 = functions.norm_l2(y=y)
        f2 = functions.dummy()
        ret = solvers.solve([f1, f2], np.zeros(len(y)), **param)
        nptest.assert_allclose(ret['sol'], y)
        self.assertEqual(ret['crit'], 'RTOL')
        self.assertEqual(ret['niter'], 60)

        # L1-norm prox and L2-norm gradient.
        f1 = functions.norm_l1(y=y, lambda_=1.0)
        f2 = functions.norm_l2(y=y, lambda_=0.8)
        ret = solvers.solve([f1, f2], np.zeros(len(y)), **param)
        nptest.assert_allclose(ret['sol'], y)
        self.assertEqual(ret['crit'], 'RTOL')
        self.assertEqual(ret['niter'], 3)
Exemple #5
0
    def test_mlfbf(self):
        """
        Test the MLFBF solver with arbitrarily selected functions.

        """
        x = [1., 1., 1.]
        L = np.array([[5, 9, 3], [7, 8, 5], [4, 4, 9], [0, 1, 7]])
        max_step = 1 / (1 + np.linalg.norm(L, 2))
        solver = solvers.mlfbf(L=L, step=max_step / 2.)
        params = {'solver': solver, 'verbosity': 'NONE'}

        def x0(): return np.zeros(len(x))

        # L2-norm prox and dummy prox.
        f = functions.dummy()
        f._prox = lambda x, T: np.maximum(np.zeros(len(x)), x)
        g = functions.norm_l2(lambda_=0.5)
        h = functions.norm_l2(y=np.array([294, 390, 361]), lambda_=0.5)
        ret = solvers.solve([f, g, h], x0(), maxit=1000, rtol=0, **params)
        nptest.assert_allclose(ret['sol'], x, rtol=1e-5)

        # Same test, but with callable L
        solver = solvers.mlfbf(L=lambda x: np.dot(L, x),
                               Lt=lambda y: np.dot(L.T, y),
                               d0=np.dot(L, x0()),
                               step=max_step / 2.)
        ret = solvers.solve([f, g, h], x0(), maxit=1000, rtol=0, **params)
        nptest.assert_allclose(ret['sol'], x, rtol=1e-5)

        # Sanity check
        self.assertRaises(ValueError, solver.pre, [f, g], x0())
Exemple #6
0
    def test_douglas_rachford(self):
        """
        Test douglas-rachford solver with L1-norm, L2-norm and dummy functions.

        """
        y = [4, 5, 6, 7]
        solver = solvers.douglas_rachford()
        param = {'solver': solver, 'verbosity': 'NONE'}

        # L2-norm prox and dummy prox.
        f1 = functions.norm_l2(y=y)
        f2 = functions.dummy()
        ret = solvers.solve([f1, f2], np.zeros(len(y)), **param)
        nptest.assert_allclose(ret['sol'], y)
        self.assertEqual(ret['crit'], 'RTOL')
        self.assertEqual(ret['niter'], 35)

        # L2-norm prox and L1-norm prox.
        f1 = functions.norm_l2(y=y)
        f2 = functions.norm_l1(y=y)
        ret = solvers.solve([f1, f2], np.zeros(len(y)), **param)
        nptest.assert_allclose(ret['sol'], y)
        self.assertEqual(ret['crit'], 'RTOL')
        self.assertEqual(ret['niter'], 4)

        # Sanity checks
        x0 = np.zeros((4,))
        solver.lambda_ = 2.
        self.assertRaises(ValueError, solver.pre, [f1, f2], x0)
        solver.lambda_ = -2.
        self.assertRaises(ValueError, solver.pre, [f1, f2], x0)
        self.assertRaises(ValueError, solver.pre, [f1, f2, f1], x0)
Exemple #7
0
    def test_forward_backward(self):
        """
        Test forward-backward splitting algorithm without acceleration, and
        with L1-norm, L2-norm, and dummy functions.

        """
        y = [4., 5., 6., 7.]
        solver = solvers.forward_backward(accel=acceleration.dummy())
        param = {'solver': solver, 'rtol': 1e-6, 'verbosity': 'NONE'}

        # L2-norm prox and dummy gradient.
        f1 = functions.norm_l2(y=y)
        f2 = functions.dummy()
        ret = solvers.solve([f1, f2], np.zeros(len(y)), **param)
        nptest.assert_allclose(ret['sol'], y)
        self.assertEqual(ret['crit'], 'RTOL')
        self.assertEqual(ret['niter'], 35)

        # L1-norm prox and L2-norm gradient.
        f1 = functions.norm_l1(y=y, lambda_=1.0)
        f2 = functions.norm_l2(y=y, lambda_=0.8)
        ret = solvers.solve([f1, f2], np.zeros(len(y)), **param)
        nptest.assert_allclose(ret['sol'], y)
        self.assertEqual(ret['crit'], 'RTOL')
        self.assertEqual(ret['niter'], 4)

        # Sanity check
        f3 = functions.dummy()
        x0 = np.zeros((4,))
        self.assertRaises(ValueError, solver.pre, [f1, f2, f3], x0)
    def test_douglas_rachford(self):
        """
        Test douglas-rachford solver with L1-norm, L2-norm and dummy functions.

        """
        y = [4, 5, 6, 7]
        solver = solvers.douglas_rachford()
        param = {'solver': solver, 'verbosity': 'NONE'}

        # L2-norm prox and dummy prox.
        f1 = functions.norm_l2(y=y)
        f2 = functions.dummy()
        ret = solvers.solve([f1, f2], np.zeros(len(y)), **param)
        nptest.assert_allclose(ret['sol'], y)
        self.assertEqual(ret['crit'], 'RTOL')
        self.assertEqual(ret['niter'], 35)

        # L2-norm prox and L1-norm prox.
        f1 = functions.norm_l2(y=y)
        f2 = functions.norm_l1(y=y)
        ret = solvers.solve([f1, f2], np.zeros(len(y)), **param)
        nptest.assert_allclose(ret['sol'], y)
        self.assertEqual(ret['crit'], 'RTOL')
        self.assertEqual(ret['niter'], 4)

        # Sanity checks
        x0 = np.zeros((4, ))
        solver.lambda_ = 2.
        self.assertRaises(ValueError, solver.pre, [f1, f2], x0)
        solver.lambda_ = -2.
        self.assertRaises(ValueError, solver.pre, [f1, f2], x0)
        self.assertRaises(ValueError, solver.pre, [f1, f2, f1], x0)
    def test_forward_backward(self):
        """
        Test forward-backward splitting algorithm without acceleration, and
        with L1-norm, L2-norm, and dummy functions.

        """
        y = [4., 5., 6., 7.]
        solver = solvers.forward_backward(accel=acceleration.dummy())
        param = {'solver': solver, 'rtol': 1e-6, 'verbosity': 'NONE'}

        # L2-norm prox and dummy gradient.
        f1 = functions.norm_l2(y=y)
        f2 = functions.dummy()
        ret = solvers.solve([f1, f2], np.zeros(len(y)), **param)
        nptest.assert_allclose(ret['sol'], y)
        self.assertEqual(ret['crit'], 'RTOL')
        self.assertEqual(ret['niter'], 35)

        # L1-norm prox and L2-norm gradient.
        f1 = functions.norm_l1(y=y, lambda_=1.0)
        f2 = functions.norm_l2(y=y, lambda_=0.8)
        ret = solvers.solve([f1, f2], np.zeros(len(y)), **param)
        nptest.assert_allclose(ret['sol'], y)
        self.assertEqual(ret['crit'], 'RTOL')
        self.assertEqual(ret['niter'], 4)

        # Sanity check
        f3 = functions.dummy()
        x0 = np.zeros((4, ))
        self.assertRaises(ValueError, solver.pre, [f1, f2, f3], x0)
    def test_mlfbf(self):
        """
        Test the MLFBF solver with arbitrarily selected functions.

        """
        x = [1., 1., 1.]
        L = np.array([[5, 9, 3], [7, 8, 5], [4, 4, 9], [0, 1, 7]])
        max_step = 1 / (1 + np.linalg.norm(L, 2))
        solver = solvers.mlfbf(L=L, step=max_step / 2.)
        params = {'solver': solver, 'verbosity': 'NONE'}

        def x0():
            return np.zeros(len(x))

        # L2-norm prox and dummy prox.
        f = functions.dummy()
        f._prox = lambda x, T: np.maximum(np.zeros(len(x)), x)
        g = functions.norm_l2(lambda_=0.5)
        h = functions.norm_l2(y=np.array([294, 390, 361]), lambda_=0.5)
        ret = solvers.solve([f, g, h], x0(), maxit=1000, rtol=0, **params)
        nptest.assert_allclose(ret['sol'], x, rtol=1e-5)

        # Same test, but with callable L
        solver = solvers.mlfbf(L=lambda x: np.dot(L, x),
                               Lt=lambda y: np.dot(L.T, y),
                               d0=np.dot(L, x0()),
                               step=max_step / 2.)
        ret = solvers.solve([f, g, h], x0(), maxit=1000, rtol=0, **params)
        nptest.assert_allclose(ret['sol'], x, rtol=1e-5)

        # Sanity check
        self.assertRaises(ValueError, solver.pre, [f, g], x0())
Exemple #11
0
    def test_primal_dual_solver_comparison(self):
        """
        Test that all primal-dual solvers return the same and correct solution.

        I had to create this separate function because the primal-dual solvers
        were too slow for the problem above.

        """

        # Convex functions.
        y = np.random.randn(3)
        L = np.random.randn(4, 3)

        sol = y
        y2 = L.dot(y)
        f1 = functions.norm_l1(y=y)
        f2 = functions.norm_l2(y=y2)
        f3 = functions.dummy()

        # Solvers.
        step = 0.5 / (1 + np.linalg.norm(L, 2))
        slvs = []
        slvs.append(solvers.mlfbf(step=step, L=L))
        slvs.append(solvers.projection_based(step=step, L=L))

        # Compare solutions.
        niter = 1000
        params = {'rtol': 0, 'verbosity': 'NONE', 'maxit': niter}
        for solver in slvs:
            x0 = np.zeros(len(y))

            if type(solver) is solvers.mlfbf:
                ret = solvers.solve([f1, f2, f3], x0, solver, **params)
            else:
                ret = solvers.solve([f1, f2], x0, solver, **params)
            nptest.assert_allclose(ret['sol'], sol)
            self.assertEqual(ret['niter'], niter)
            # The initial value was not modified.
            nptest.assert_array_equal(x0, np.zeros(len(y)))

            if type(solver) is solvers.mlfbf:
                ret = solvers.solve([f1, f2, f3],
                                    x0,
                                    solver,
                                    inplace=True,
                                    **params)
            else:
                ret = solvers.solve([f1, f2],
                                    x0,
                                    solver,
                                    inplace=True,
                                    **params)
            # The initial value was modified.
            self.assertIs(ret['sol'], x0)
            nptest.assert_allclose(ret['sol'], sol)
Exemple #12
0
    def test_generalized_forward_backward(self):
        """
        Test the generalized forward-backward algorithm.

        """
        y = [4, 5, 6, 7]
        L = 4  # Gradient of the smooth function is Lipschitz continuous.
        solver = solvers.generalized_forward_backward(step=.9 / L, lambda_=.8)
        params = {'solver': solver, 'verbosity': 'NONE'}

        # Functions.
        f1 = functions.norm_l1(y=y, lambda_=.7)    # Non-smooth.
        f2 = functions.norm_l2(y=y, lambda_=L / 2.)  # Smooth.

        # Solve with 1 smooth and 1 non-smooth.
        ret = solvers.solve([f1, f2], np.zeros(len(y)), **params)
        nptest.assert_allclose(ret['sol'], y)
        self.assertEqual(ret['niter'], 25)

        # Solve with 1 smooth.
        ret = solvers.solve([f1], np.zeros(len(y)), **params)
        nptest.assert_allclose(ret['sol'], y)
        self.assertEqual(ret['niter'], 77)

        # Solve with 1 non-smooth.
        ret = solvers.solve([f2], np.zeros(len(y)), **params)
        nptest.assert_allclose(ret['sol'], y)
        self.assertEqual(ret['niter'], 18)

        # Solve with 1 smooth and 2 non-smooth.
        ret = solvers.solve([f1, f2, f2], np.zeros(len(y)), **params)
        nptest.assert_allclose(ret['sol'], y)
        self.assertEqual(ret['niter'], 26)

        # Solve with 2 smooth and 2 non-smooth.
        ret = solvers.solve([f2, f1, f2, f1], np.zeros(len(y)), **params)
        nptest.assert_allclose(ret['sol'], y)
        self.assertEqual(ret['niter'], 25)

        # Sanity checks
        x0 = np.zeros((4,))
        solver.lambda_ = 2.
        self.assertRaises(ValueError, solver.pre, [f1, f2], x0)
        solver.lambda_ = -2.
        self.assertRaises(ValueError, solver.pre, [f1, f2], x0)
        f1 = functions.func()
        f2 = functions.func()
        f3 = functions.func()
        solver.lambda_ = 1.
        self.assertRaises(ValueError, solver.pre, [f1, f2, f3], x0)
    def test_generalized_forward_backward(self):
        """
        Test the generalized forward-backward algorithm.

        """
        y = [4, 5, 6, 7]
        L = 4  # Gradient of the smooth function is Lipschitz continuous.
        solver = solvers.generalized_forward_backward(step=.9 / L, lambda_=.8)
        params = {'solver': solver, 'verbosity': 'NONE'}

        # Functions.
        f1 = functions.norm_l1(y=y, lambda_=.7)  # Non-smooth.
        f2 = functions.norm_l2(y=y, lambda_=L / 2.)  # Smooth.

        # Solve with 1 smooth and 1 non-smooth.
        ret = solvers.solve([f1, f2], np.zeros(len(y)), **params)
        nptest.assert_allclose(ret['sol'], y)
        self.assertEqual(ret['niter'], 25)

        # Solve with 1 smooth.
        ret = solvers.solve([f1], np.zeros(len(y)), **params)
        nptest.assert_allclose(ret['sol'], y)
        self.assertEqual(ret['niter'], 77)

        # Solve with 1 non-smooth.
        ret = solvers.solve([f2], np.zeros(len(y)), **params)
        nptest.assert_allclose(ret['sol'], y)
        self.assertEqual(ret['niter'], 18)

        # Solve with 1 smooth and 2 non-smooth.
        ret = solvers.solve([f1, f2, f2], np.zeros(len(y)), **params)
        nptest.assert_allclose(ret['sol'], y)
        self.assertEqual(ret['niter'], 26)

        # Solve with 2 smooth and 2 non-smooth.
        ret = solvers.solve([f2, f1, f2, f1], np.zeros(len(y)), **params)
        nptest.assert_allclose(ret['sol'], y)
        self.assertEqual(ret['niter'], 25)

        # Sanity checks
        x0 = np.zeros((4, ))
        solver.lambda_ = 2.
        self.assertRaises(ValueError, solver.pre, [f1, f2], x0)
        solver.lambda_ = -2.
        self.assertRaises(ValueError, solver.pre, [f1, f2], x0)
        f1 = functions.func()
        f2 = functions.func()
        f3 = functions.func()
        solver.lambda_ = 1.
        self.assertRaises(ValueError, solver.pre, [f1, f2, f3], x0)
Exemple #14
0
    def test_solver_comparison(self):
        """
        Test that all solvers return the same and correct solution.

        """

        # Convex functions.
        y = [1, 0, 0.1, 8, -6.5, 0.2, 0.004, 0.01]
        sol = [0.75, 0, 0, 7.75, -6.25, 0, 0, 0]
        w1, w2 = .8, .4
        f1 = functions.norm_l2(y=y, lambda_=w1 / 2.)  # Smooth.
        f2 = functions.norm_l1(lambda_=w2 / 2.)       # Non-smooth.

        # Solvers.
        L = w1  # Lipschitz continuous gradient.
        step = 1. / L
        lambda_ = 0.5
        params = {'step': step, 'lambda_': lambda_}
        slvs = []
        slvs.append(solvers.forward_backward(accel=acceleration.dummy(),
                                             step=step))
        slvs.append(solvers.douglas_rachford(**params))
        slvs.append(solvers.generalized_forward_backward(**params))

        # Compare solutions.
        params = {'rtol': 1e-14, 'verbosity': 'NONE', 'maxit': 1e4}
        niters = [2, 61, 26]
        for solver, niter in zip(slvs, niters):
            x0 = np.zeros(len(y))
            ret = solvers.solve([f1, f2], x0, solver, **params)
            nptest.assert_allclose(ret['sol'], sol)
            self.assertEqual(ret['niter'], niter)
            self.assertIs(ret['sol'], x0)  # The initial value was modified.
    def test_regularized_nonlinear(self):
        """
        Test gradient descent solver with regularized non-linear acceleration,
        solving problems with L2-norm functions.

        """
        dim = 25
        np.random.seed(0)
        x0 = np.random.rand(dim)
        xstar = np.random.rand(dim)
        x0 = xstar + 5. * (x0 - xstar) / np.linalg.norm(x0 - xstar)

        A = np.random.rand(dim, dim)
        step = 1 / np.linalg.norm(np.dot(A.T, A))

        accel = acceleration.regularized_nonlinear(k=5)
        solver = solvers.gradient_descent(step=step, accel=accel)
        param = {'solver': solver, 'rtol': 0,
                 'maxit': 200, 'verbosity': 'NONE'}

        # L2-norm prox and dummy gradient.
        f1 = functions.norm_l2(lambda_=0.5, A=A, y=np.dot(A, xstar))
        f2 = functions.dummy()
        ret = solvers.solve([f1, f2], x0, **param)
        pctdiff = 100 * np.sum((xstar - ret['sol'])**2) / np.sum(xstar**2)
        nptest.assert_array_less(pctdiff, 1.91)

        # Sanity checks
        accel = acceleration.regularized_nonlinear()
        self.assertRaises(ValueError, accel.__init__, 10, ['not', 'good'])
        self.assertRaises(ValueError, accel.__init__, 10, 'nope')
Exemple #16
0
    def test_acceleration_comparison(self):
        """
        Test that all solvers return the same and correct solution.

        """

        # Convex functions.
        y = [1, 0, 0.1, 8, -6.5, 0.2, 0.004, 0.01]
        sol = [0.75, 0, 0, 7.75, -6.25, 0, 0, 0]
        w1, w2 = .8, .4
        f1 = functions.norm_l2(y=y, lambda_=w1 / 2.)  # Smooth.
        f2 = functions.norm_l1(lambda_=w2 / 2.)  # Non-smooth.

        # Solvers.
        L = w1  # Lipschitz continuous gradient.
        step = 1. / L
        slvs = []
        slvs.append(
            solvers.forward_backward(accel=acceleration.dummy(), step=step))
        slvs.append(
            solvers.forward_backward(accel=acceleration.fista(), step=step))
        slvs.append(
            solvers.forward_backward(
                accel=acceleration.fista_backtracking(eta=.999), step=step))

        # Compare solutions.
        params = {'rtol': 1e-14, 'verbosity': 'NONE', 'maxit': 1e4}
        niters = [2, 2, 6]
        for solver, niter in zip(slvs, niters):
            x0 = np.zeros(len(y))
            ret = solvers.solve([f1, f2], x0, solver, **params)
            nptest.assert_allclose(ret['sol'], sol)
            self.assertEqual(ret['niter'], niter)
    def test_projection_based(self):
        """
        Test the projection-based solver with arbitrarily selected functions.

        """
        x = [0, 0, 0]
        L = np.array([[5, 9, 3], [7, 8, 5], [4, 4, 9], [0, 1, 7]])
        solver = solvers.projection_based(L=L, step=1.)
        params = {'solver': solver, 'verbosity': 'NONE'}

        # L1-norm prox and dummy prox.
        f = functions.norm_l1(y=np.array([294, 390, 361]))
        g = functions.norm_l1()
        ret = solvers.solve([f, g],
                            np.array([500, 1000, -400]),
                            maxit=1000,
                            rtol=None,
                            xtol=0.1,
                            **params)
        nptest.assert_allclose(ret['sol'], x, rtol=1e-5)

        # Sanity checks
        def x0():
            return np.zeros(len(x))

        self.assertRaises(ValueError, solver.pre, [f], x0())
        solver.lambda_ = 3.
        self.assertRaises(ValueError, solver.pre, [f, g], x0())
        solver.lambda_ = -3.
        self.assertRaises(ValueError, solver.pre, [f, g], x0())
    def test_solver_comparison(self):
        """
        Test that all solvers return the same and correct solution.

        """

        # Convex functions.
        y = [1, 0, 0.1, 8, -6.5, 0.2, 0.004, 0.01]
        sol = [0.75, 0, 0, 7.75, -6.25, 0, 0, 0]
        w1, w2 = .8, .4
        f1 = functions.norm_l2(y=y, lambda_=w1 / 2.)  # Smooth.
        f2 = functions.norm_l1(lambda_=w2 / 2.)  # Non-smooth.

        # Solvers.
        L = w1  # Lipschitz continuous gradient.
        step = 1. / L
        lambda_ = 0.5
        params = {'step': step, 'lambda_': lambda_}
        slvs = []
        slvs.append(
            solvers.forward_backward(accel=acceleration.dummy(), step=step))
        slvs.append(solvers.douglas_rachford(**params))
        slvs.append(solvers.generalized_forward_backward(**params))

        # Compare solutions.
        params = {'rtol': 1e-14, 'verbosity': 'NONE', 'maxit': 1e4}
        niters = [2, 61, 26]
        for solver, niter in zip(slvs, niters):
            x0 = np.zeros(len(y))
            ret = solvers.solve([f1, f2], x0, solver, **params)
            nptest.assert_allclose(ret['sol'], sol)
            self.assertEqual(ret['niter'], niter)
            self.assertIs(ret['sol'], x0)  # The initial value was modified.
    def test_acceleration_comparison(self):
        """
        Test that all solvers return the same and correct solution.

        """

        # Convex functions.
        y = [1, 0, 0.1, 8, -6.5, 0.2, 0.004, 0.01]
        sol = [0.75, 0, 0, 7.75, -6.25, 0, 0, 0]
        w1, w2 = .8, .4
        f1 = functions.norm_l2(y=y, lambda_=w1 / 2.)  # Smooth.
        f2 = functions.norm_l1(lambda_=w2 / 2.)       # Non-smooth.

        # Solvers.
        L = w1  # Lipschitz continuous gradient.
        step = 1. / L
        slvs = []
        slvs.append(solvers.forward_backward(accel=acceleration.dummy(),
                                             step=step))
        slvs.append(solvers.forward_backward(accel=acceleration.fista(),
                                             step=step))
        slvs.append(solvers.forward_backward(
            accel=acceleration.fista_backtracking(eta=.999), step=step))

        # Compare solutions.
        params = {'rtol': 1e-14, 'verbosity': 'NONE', 'maxit': 1e4}
        niters = [2, 2, 6]
        for solver, niter in zip(slvs, niters):
            x0 = np.zeros(len(y))
            ret = solvers.solve([f1, f2], x0, solver, **params)
            nptest.assert_allclose(ret['sol'], sol)
            self.assertEqual(ret['niter'], niter)
Exemple #20
0
    def test_regularized_nonlinear(self):
        """
        Test gradient descent solver with regularized non-linear acceleration,
        solving problems with L2-norm functions.

        """
        dim = 25
        np.random.seed(0)
        x0 = np.random.rand(dim)
        xstar = np.random.rand(dim)
        x0 = xstar + 5. * (x0 - xstar) / np.linalg.norm(x0 - xstar)

        A = np.random.rand(dim, dim)
        step = 1 / np.linalg.norm(np.dot(A.T, A))

        accel = acceleration.regularized_nonlinear(k=5)
        solver = solvers.gradient_descent(step=step, accel=accel)
        param = {
            'solver': solver,
            'rtol': 0,
            'maxit': 200,
            'verbosity': 'NONE'
        }

        # L2-norm prox and dummy gradient.
        f1 = functions.norm_l2(lambda_=0.5, A=A, y=np.dot(A, xstar))
        f2 = functions.dummy()
        ret = solvers.solve([f1, f2], x0, **param)
        pctdiff = 100 * np.sum((xstar - ret['sol'])**2) / np.sum(xstar**2)
        nptest.assert_array_less(pctdiff, 1.91)

        # Sanity checks
        accel = acceleration.regularized_nonlinear()
        self.assertRaises(ValueError, accel.__init__, 10, ['not', 'good'])
        self.assertRaises(ValueError, accel.__init__, 10, 'nope')
Exemple #21
0
def optGFB(D, GK0, triangles, fxy, E, maxit, x1, step, tau1, tau2, tau3, flag):
    ####
    N = len(E)

    f2 = functions.func()
    f2._eval = lambda x: 0
    f2._prox = lambda x, T: np.clip(x, 0, 0.7)  #
    ###
    T1 = Nodeneighbor(triangles, N)
    g = lambda x: Ggradient(x, T1)
    f3 = functions.norm_l1(A=g, At=None, dim=1, y=np.zeros(N),
                           lambda_=tau2)  #['EVAL', 'PROX']
    ######
    yy2 = np.ravel(fxy)
    ####
    N_cov = np.zeros((2 * N, 2 * N))
    for j in np.arange(0, 2 * N, 2):
        N_cov[j, j] = 3
        N_cov[j + 1, j + 1] = 1
    gamma = GK0 @ N_cov @ GK0.T  #
    gamma += np.eye(gamma.shape[1]) * 1e-5
    gamma_inv = np.linalg.inv(gamma)
    f8 = functions.func(lambda_=tau1)
    f8._eval = lambda x: (yy2 - D @ x).T @ gamma_inv @ (yy2 - D @ x) * 1e+7
    f8._grad = lambda x: -D.T @ gamma_inv @ (yy2 - D @ x) * 1e+7
    #######
    #step = 0.08#0.5 /tau1# (np.linalg.norm(func(x0),2)**2/np.linalg.norm(x0,2)**2) #0.5/tau#2e3/scale
    solver2 = solvers.generalized_forward_backward(
        step=step * 0.2
    )  #generalized_forward_backward(step=step*0.1)#step*0.1)douglas_rachford
    # without f3 -->singular matrix
    ret2 = solvers.solve([f8, f3, f2], x1, solver2, rtol=1e-15, maxit=maxit)
    objective = np.array(ret2['objective'])
    sol = ret2['sol']

    import matplotlib.pyplot as plt

    if flag == 1:
        _ = plt.figure(figsize=(10, 4))
        _ = plt.subplot(121)
        _ = plt.plot(E, 'o', label='Original E')  #-np.ones(N)*0.1
        _ = plt.plot(ret2['sol'], 'xr', label='Reconstructed E')
        _ = plt.grid(True)
        _ = plt.title('Achieved reconstruction')
        _ = plt.legend(numpoints=1)
        _ = plt.xlabel('Signal dimension number')
        _ = plt.ylabel('Signal value')

        _ = plt.subplot(122)
        _ = plt.semilogy(objective[:, 0], '-.', label='l2-norm')  #
        _ = plt.semilogy(np.sum(objective, axis=1), label='Global objective')
        _ = plt.grid(True)
        _ = plt.title('Convergence')
        _ = plt.legend(numpoints=1)
        _ = plt.xlabel('Iteration number')
        _ = plt.ylabel('Objective function value')
        _ = plt.show()
    return sol
Exemple #22
0
    def test_primal_dual_solver_comparison(self):
        """
        Test that all primal-dual solvers return the same and correct solution.

        I had to create this separate function because the primal-dual solvers
        were too slow for the problem above.

        """

        # Convex functions.
        y = np.array([294, 390, 361])
        sol = [1., 1., 1.]
        L = np.array([[5, 9, 3], [7, 8, 5], [4, 4, 9], [0, 1, 7]])
        f1 = functions.norm_l1(y=y)
        f2 = functions.norm_l1()
        f3 = functions.dummy()

        # Solvers.
        step = 0.5 / (1 + np.linalg.norm(L, 2))
        slvs = []
        slvs.append(solvers.mlfbf(step=step))
        slvs.append(solvers.projection_based(step=step))

        # Compare solutions.
        params = {'rtol': 0, 'verbosity': 'NONE', 'maxit': 50}
        niters = [50, 50]
        for solver, niter in zip(slvs, niters):
            x0 = np.zeros(len(y))

            if type(solver) is solvers.mlfbf:
                ret = solvers.solve([f1, f2, f3], x0, solver, **params)
            else:
                ret = solvers.solve([f1, f2], x0, solver, **params)

            nptest.assert_allclose(ret['sol'], sol)
            self.assertEqual(ret['niter'], niter)
            self.assertIs(ret['sol'], x0)  # The initial value was modified.
    def test_primal_dual_solver_comparison(self):
        """
        Test that all primal-dual solvers return the same and correct solution.

        I had to create this separate function because the primal-dual solvers
        were too slow for the problem above.

        """

        # Convex functions.
        y = np.array([294, 390, 361])
        sol = [1., 1., 1.]
        L = np.array([[5, 9, 3], [7, 8, 5], [4, 4, 9], [0, 1, 7]])
        f1 = functions.norm_l1(y=y)
        f2 = functions.norm_l1()
        f3 = functions.dummy()

        # Solvers.
        step = 0.5 / (1 + np.linalg.norm(L, 2))
        slvs = []
        slvs.append(solvers.mlfbf(step=step))
        slvs.append(solvers.projection_based(step=step))

        # Compare solutions.
        params = {'rtol': 0, 'verbosity': 'NONE', 'maxit': 50}
        niters = [50, 50]
        for solver, niter in zip(slvs, niters):
            x0 = np.zeros(len(y))

            if type(solver) is solvers.mlfbf:
                ret = solvers.solve([f1, f2, f3], x0, solver, **params)
            else:
                ret = solvers.solve([f1, f2], x0, solver, **params)

            nptest.assert_allclose(ret['sol'], sol)
            self.assertEqual(ret['niter'], niter)
            self.assertIs(ret['sol'], x0)  # The initial value was modified.
Exemple #24
0
def lasso(A, b, err, nvis, nt3phi, l, maxit):
    from pyunlocbox import functions
    from pyunlocbox import solvers
    x = np.zeros(A.shape[1])
    x = x.reshape(-1, 1)

    for ty in range(A.shape[1]):
        A[nvis:, ty] = np.rad2deg(np.arctan(np.tan(np.deg2rad(A[nvis:, ty]))))

    b[nvis:, 0] = np.rad2deg(np.arctan(np.tan(np.deg2rad(b[nvis:, 0]))))

    f1 = functions.norm_l1(lambda_=l)
    f2 = functions.norm_l2(y=b, A=A)
    step = 0.5 / np.linalg.norm(A, ord=2)**2
    solver = solvers.forward_backward(step=step)
    ret = solvers.solve([f1, f2], x, solver, rtol=1e-6, maxit=maxit)
    return ret
Exemple #25
0
    def inner_solve(self,
                    x,
                    y0,
                    Lyy=1,
                    r2=functions.dummy(),
                    rtol=1e-9,
                    maxit=100000,
                    verbosity='NONE'):
        """
        Solve min_y f(x,y) + r_2(y)

        Input
        -----
        x           - array
        y0          - initial iterate
        Lyy         - Lipschitz constant of f
        r2          - pyunlocbox function object
        rtol        - stopping criterion for solver
        maxit       - max iteration count for inner solve
        verbosity   - level of verbosity for inner solver

        Output
        ------
        y           - array
        """

        # get pyunlocbox function object for f(x,.)
        f = self.misfit(x=x)

        # setup solver
        accel = acceleration.fista_backtracking()
        solver = solvers.forward_backward(step=1 / Lyy, accel=accel)

        # run solver
        results = solvers.solve([f, r2],
                                y0,
                                solver,
                                rtol=rtol,
                                maxit=maxit,
                                verbosity=verbosity)

        # return result
        y = results['sol']

        return y
    def test_gradient_descent(self):
        """
        Test gradient descent solver with l2-norms in the objective.

        """
        y = [4., 5., 6., 7.]
        A = np.array([[1., 1., 1., 0.], [0., 1., 1., 1.], [0., 1., 0., 0.],
                      [1., 0., 0., 1.]])
        sol = np.array([0.28846154, 0.11538462, 1.23076923, 1.78846154])
        step = 0.5 / (np.linalg.norm(A) + 1.)
        solver = solvers.gradient_descent(step=step)
        param = {'solver': solver, 'rtol': 0, 'verbosity': 'NONE'}

        f1 = functions.norm_l2(y=y)
        f2 = functions.norm_l2(A=A)
        ret = solvers.solve([f1, f2], np.zeros(len(y)), **param)
        nptest.assert_allclose(ret['sol'], sol)
        self.assertEqual(ret['crit'], 'MAXIT')
        self.assertEqual(ret['niter'], 200)
Exemple #27
0
    def test_gradient_descent(self):
        """
        Test gradient descent solver with l2-norms in the objective.

        """
        y = [4., 5., 6., 7.]
        A = np.array([[1., 1., 1., 0.], [0., 1., 1., 1.], [0., 1., 0., 0.],
                      [1., 0., 0., 1.]])
        sol = np.array([0.28846154,  0.11538462,  1.23076923,  1.78846154])
        step = 0.5 / (np.linalg.norm(A) + 1.)
        solver = solvers.gradient_descent(step=step)
        param = {'solver': solver, 'rtol': 0, 'verbosity': 'NONE'}

        f1 = functions.norm_l2(y=y)
        f2 = functions.norm_l2(A=A)
        ret = solvers.solve([f1, f2], np.zeros(len(y)), **param)
        nptest.assert_allclose(ret['sol'], sol)
        self.assertEqual(ret['crit'], 'MAXIT')
        self.assertEqual(ret['niter'], 200)
Exemple #28
0
def Experiment(iterable):
    M = 0
    Omega = np.random.permutation(N)
    x_0 = np.zeros(N)
    x_0[Omega[0:k]] = np.random.standard_normal(k)
    psi = np.ones(N)
    Psi = np.diag(psi)
    Phi = np.random.randn(n, N)
    A = np.dot(Phi, Psi)
    y = np.dot(A, x_0)
    x = np.zeros(N)
    f1 = functions.norm_l1()
    f2 = functions.proj_b2(epsilon=0, y=y, A=A, tight=False,\
    nu=np.linalg.norm(A, ord=2)**2)
    solver = solvers.douglas_rachford(step=1e-2)
    ret = solvers.solve([f1, f2], x, solver, rtol=1e-4, maxit=300)
    x = ret.get('sol')
    residual = (float(LA.norm(x - x_0, 2)) / LA.norm(x_0, 2))**2
    if residual <= tolerance:
        M += 1
    return M
Exemple #29
0
 def solve_lasso(A_val, y_val, hparams):  #(n,m), (1,m)
     if hparams.lasso_solver == 'sklearn':
         lasso_est = Lasso(alpha=hparams.lmbd)
         lasso_est.fit(A_val.T, y_val.reshape(hparams.num_measurements))
         x_hat = lasso_est.coef_
         x_hat = np.reshape(x_hat, [-1])
     elif hparams.lasso_solver == 'cvxopt':
         A_mat = matrix(A_val.T)  #[m,n]
         y_mat = matrix(y_val.T)  ###
         x_hat_mat = l1regls(A_mat, y_mat)
         x_hat = np.asarray(x_hat_mat)
         x_hat = np.reshape(x_hat, [-1])  #[n, ]
     elif hparams.lasso_solver == 'pyunlocbox':
         tau = hparams.tau
         f1 = functions.norm_l1(lambda_=tau)
         f2 = functions.norm_l2(y=y_val.T, A=A_val.T)
         if hparams.model_type == 'compressing':
             if hparams.image_mode == '3D':
                 A_real = np.random.normal(
                     size=(int(hparams.num_measurements / 3),
                           sig_shape))
             else:
                 A_real = np.random.normal(
                     size=(hparams.num_measurements, sig_shape))
             step = 0.5 / np.linalg.norm(A_real, ord=2)**2
         else:
             step = 0.5 / np.linalg.norm(A_val, ord=2)**2
         solver = solvers.forward_backward(step=step)
         x0 = np.zeros((sig_shape, 1))
         ret = solvers.solve([f1, f2],
                             x0,
                             solver,
                             rtol=1e-4,
                             maxit=3000)
         x_hat_mat = ret['sol']
         x_hat = np.asarray(x_hat_mat)
         x_hat = np.reshape(x_hat, [-1])  #[n, ]
     return x_hat
Exemple #30
0
    def test_projection_based(self):
        """
        Test the projection-based solver with arbitrarily selected functions.

        """
        x = [0, 0, 0]
        L = np.array([[5, 9, 3], [7, 8, 5], [4, 4, 9], [0, 1, 7]])
        solver = solvers.projection_based(L=L, step=1.)
        params = {'solver': solver, 'verbosity': 'NONE'}

        # L1-norm prox and dummy prox.
        f = functions.norm_l1(y=np.array([294, 390, 361]))
        g = functions.norm_l1()
        ret = solvers.solve([f, g], np.array([500, 1000, -400]),
                            maxit=1000, rtol=None, xtol=0.1, **params)
        nptest.assert_allclose(ret['sol'], x, rtol=1e-5)

        # Sanity checks
        def x0(): return np.zeros(len(x))
        self.assertRaises(ValueError, solver.pre, [f], x0())
        solver.lambda_ = 3.
        self.assertRaises(ValueError, solver.pre, [f, g], x0())
        solver.lambda_ = -3.
        self.assertRaises(ValueError, solver.pre, [f, g], x0())
Exemple #31
0
def main_tv(hparams):

    ## === Set up=== ##
    # Printer setup
    #sys.stdout = open(hparams.text_file_path, 'w')

    # Get inputs
    if hparams.image_mode == '1D':
        x_real = np.array(load_1D(hparams.path, hparams.img_name)).astype(
            np.float32)  #[4096,1]
    elif hparams.image_mode == '2D':
        x_real = np.array(load_2D(hparams.path, hparams.img_name)).astype(
            np.float32)  #[64,64]
    elif hparams.image_mode == '3D':
        x_real = np.array(
            load_img(hparams.path,
                     hparams.img_name, hparams.decoder_type)).astype(
                         np.float32)  #[178,218,3] /  [224,224,3]

    # Initialization
    #np.random.seed(7)
    sig_shape = x_real.shape[0] * x_real.shape[
        1]  #n = 4096*1 or 64*64 or 178*218 or 224*224
    random_vector = None  #initialization
    A = None  #initialization
    selection_mask = None  #initialization
    random_arr = random_flip(sig_shape)  #initialization #[n,]
    mask = None  #initialization

    # Get measurement matirx
    if hparams.model_type == 'denoising' or hparams.model_type == 'compressing':
        if hparams.type_measurements == 'random':  #compressed sensing
            if hparams.image_mode != '3D':
                A = np.random.randn(hparams.num_measurements,
                                    sig_shape).astype(np.float32)  #[m,n]
                noise_shape = [hparams.num_measurements, 1]  #[m,1]
            else:
                A = np.random.randn(int(hparams.num_measurements / 3),
                                    sig_shape).astype(np.float32)  #[m,n]
                noise_shape = [int(hparams.num_measurements / 3), 1]  #[m,1]
        elif hparams.type_measurements == 'identity':  #denoising
            A = np.identity(sig_shape).astype(np.float32)  #[n,n]
            noise_shape = [sig_shape, 1]  #[n,1]
            observ_noise = hparams.noise_level * np.random.randn(
                noise_shape[0], noise_shape[1])  #[n,1]
        elif hparams.type_measurements == 'circulant':  #compressed sensing
            if hparams.image_mode != '3D':
                random_vector = np.random.normal(size=sig_shape)  #[n,]
                selection_mask = create_A_selection(
                    sig_shape, hparams.num_measurements)  #[1,n]
            else:
                random_vector = np.random.normal(size=sig_shape)  #[n,]
                selection_mask = create_A_selection(
                    sig_shape, int(hparams.num_measurements / 3))  #[1,n]

            def circulant_np(signal_vector,
                             random_arr_p=random_arr.reshape(-1, 1),
                             random_vector_p=random_vector.reshape(-1, 1),
                             selection_mask_p=selection_mask.reshape(-1, 1)):
                #step 0: Flip
                signal_vector = signal_vector * random_arr_p  #[n,1] * [n,1] -> [n,1]
                #step 1: F^{-1} @ x
                r1 = ifft(signal_vector)  #[n,1]
                #step 2: Diag() @ F^{-1} @ x
                Ft = fft(random_vector_p)  #[n,1]
                r2 = np.multiply(r1, Ft)  #[n,1] * [n,1] -> [n,1]
                #step 3: F @ Diag() @ F^{-1} @ x
                compressive = fft(r2)  #[n,1]
                #step 4:  R_{omega} @ C_{t} @ D){epsilon}
                compressive = compressive.real  #[n,1]
                select_compressive = compressive * selection_mask_p  #[n,1] * [n,1] -> [n,1]
                return select_compressive

    elif hparams.model_type == 'inpainting':
        if hparams.image_mode == '1D':
            mask = load_mask('Masks', hparams.mask_name_1D, hparams.image_mode,
                             hparams.decoder_type)  #[n,1]
        elif hparams.image_mode == '2D' or hparams.image_mode == '3D':
            mask = load_mask('Masks', hparams.mask_name_2D, hparams.image_mode,
                             hparams.decoder_type)  #[n,n]

    ## === TV norm === ##
    if hparams.decoder_type == 'tv_norm':
        # Construct observation and perform reconstruction
        if hparams.model_type == 'inpainting':
            # measurements and observation
            g = lambda x: mask * x  #[4096,1] * [4096,1] / [178,218,3] * [178,218,3]
            y_real = g(x_real)  #[4096,1] / [178,218,3]
            # tv norm
            if hparams.image_mode == '1D':
                f1 = functions.norm_tv(dim=1)
            elif hparams.image_mode == '2D':
                f1 = functions.norm_tv(dim=2)
            elif hparams.image_mode == '3D':
                f1 = functions.norm_tv(dim=3)
            # L2 norm
            tau = hparams.tau
            f2 = functions.norm_l2(y=y_real, A=g, lambda_=tau)
            # optimisation
            solver = solvers.forward_backward(step=0.5 / tau)
            x0 = np.array(y_real)  # Make a copy to preserve im_masked.
            ret = solvers.solve([f1, f2], x0, solver,
                                maxit=3000)  #output = ret['sol']
            # output
            out_img = ret['sol']  #[4096,1] / [178,218,3]
        elif hparams.model_type == 'denoising':
            assert hparams.type_measurements == 'identity'
            if hparams.image_mode == '3D':
                out_img_list = []
                for i in range(x_real.shape[-1]):
                    # measurements and observation
                    y_real = np.matmul(A, x_real[:, :, i].reshape(
                        -1, 1)) + observ_noise  # [n,n] * [n,1] -> [n,1]
                    # tv norm
                    f1 = functions.norm_tv(dim=1)
                    # epsilon
                    N = math.sqrt(sig_shape)
                    epsilon = N * hparams.noise_level
                    # L2 ball
                    y = np.reshape(y_real, -1)  #[n,1] -> [n,]
                    f = functions.proj_b2(y=y, epsilon=epsilon)
                    f2 = functions.func()
                    # Indicator functions
                    f2._eval = lambda x: 0

                    def prox(x, step):
                        return np.reshape(f.prox(np.reshape(x, -1), 0),
                                          y_real.shape)

                    f2._prox = prox
                    # solver
                    solver = solvers.douglas_rachford(step=0.1)
                    x0 = np.array(y_real)  #[n,1]
                    ret = solvers.solve([f1, f2], x0, solver)
                    # output
                    out_img_piece = ret['sol'].reshape(
                        x_real.shape[0], x_real.shape[1])  #[178,218]
                    out_img_list.append(out_img_piece)
                out_img = np.transpose(np.array(out_img_list), (1, 2, 0))
            else:
                # measurements and observation
                y_real = np.matmul(A, x_real.reshape(
                    -1, 1)) + observ_noise  # [n,n] * [n,1] -> [n,1]
                # tv norm
                f1 = functions.norm_tv(dim=1)
                # epsilon
                N = math.sqrt(sig_shape)
                epsilon = N * hparams.noise_level
                # L2 ball
                y = np.reshape(y_real, -1)  #[n,1] -> [n,]
                f = functions.proj_b2(y=y, epsilon=epsilon)
                f2 = functions.func()
                # Indicator functions
                f2._eval = lambda x: 0

                def prox(x, step):
                    return np.reshape(f.prox(np.reshape(x, -1), 0),
                                      y_real.shape)

                f2._prox = prox
                # solver
                solver = solvers.douglas_rachford(step=0.1)
                x0 = np.array(y_real)  #[n,1]
                ret = solvers.solve([f1, f2], x0, solver)
                # output
                out_img = ret['sol']  #[n,1]
        elif hparams.model_type == 'compressing':
            assert hparams.type_measurements == 'circulant'
            if hparams.image_mode == '3D':
                out_img_list = []
                for i in range(x_real.shape[-1]):
                    # construct observation
                    g = circulant_np
                    y_real = g(x_real[:, :, i].reshape(-1, 1))  #[n,1] -> [n,1]
                    # tv norm
                    f1 = functions.norm_tv(dim=1)
                    # L2 norm
                    tau = hparams.tau
                    f2 = functions.norm_l2(y=y_real, A=g, lambda_=tau)
                    # optimisation solver
                    A_real = np.random.normal(
                        size=(int(hparams.num_measurements / 3), sig_shape))
                    step = 0.5 / np.linalg.norm(A_real, ord=2)**2
                    solver = solvers.forward_backward(
                        step=step
                    )  #solver = solvers.forward_backward(step=0.5/tau)
                    # initialisation
                    x0 = np.array(y_real)  #[n,1]
                    # output
                    ret = solvers.solve([f1, f2],
                                        x0,
                                        solver,
                                        rtol=1e-4,
                                        maxit=3000)  #output = ret['sol']
                    out_img_piece = ret['sol'].reshape(
                        x_real.shape[0], x_real.shape[1])  #[178,218]
                    out_img_list.append(out_img_piece)
                out_img = np.transpose(np.array(out_img_list), (1, 2, 0))
            else:
                # construct observation
                g = circulant_np
                y_real = g(x_real.reshape(-1, 1))  #[n,1] -> [n,1]
                # tv norm
                f1 = functions.norm_tv(dim=1)
                # L2 norm
                tau = hparams.tau
                f2 = functions.norm_l2(y=y_real, A=g, lambda_=tau)
                # optimisation solver
                A_real = np.random.normal(size=(hparams.num_measurements,
                                                sig_shape))
                step = 0.5 / np.linalg.norm(A_real, ord=2)**2
                solver = solvers.forward_backward(
                    step=step
                )  #solver = solvers.forward_backward(step=0.5/tau)
                # initialisation
                x0 = np.array(y_real)  #[n,1]
                # output
                ret = solvers.solve([f1, f2],
                                    x0,
                                    solver,
                                    rtol=1e-4,
                                    maxit=3000)  #output = ret['sol']
                out_img = ret['sol']  #[n,1]

    # ## === Lasso  wavelet === ##
    elif hparams.decoder_type == 'lasso_wavelet':
        # Construct lasso wavelet functions
        def solve_lasso(A_val, y_val, hparams):  #(n,m), (1,m)
            if hparams.lasso_solver == 'sklearn':
                lasso_est = Lasso(alpha=hparams.lmbd)
                lasso_est.fit(A_val.T, y_val.reshape(hparams.num_measurements))
                x_hat = lasso_est.coef_
                x_hat = np.reshape(x_hat, [-1])
            elif hparams.lasso_solver == 'cvxopt':
                A_mat = matrix(A_val.T)  #[m,n]
                y_mat = matrix(y_val.T)  ###
                x_hat_mat = l1regls(A_mat, y_mat)
                x_hat = np.asarray(x_hat_mat)
                x_hat = np.reshape(x_hat, [-1])  #[n, ]
            elif hparams.lasso_solver == 'pyunlocbox':
                tau = hparams.tau
                f1 = functions.norm_l1(lambda_=tau)
                f2 = functions.norm_l2(y=y_val.T, A=A_val.T)
                if hparams.model_type == 'compressing':
                    if hparams.image_mode == '3D':
                        A_real = np.random.normal(
                            size=(int(hparams.num_measurements / 3),
                                  sig_shape))
                    else:
                        A_real = np.random.normal(
                            size=(hparams.num_measurements, sig_shape))
                    step = 0.5 / np.linalg.norm(A_real, ord=2)**2
                else:
                    step = 0.5 / np.linalg.norm(A_val, ord=2)**2
                solver = solvers.forward_backward(step=step)
                x0 = np.zeros((sig_shape, 1))
                ret = solvers.solve([f1, f2],
                                    x0,
                                    solver,
                                    rtol=1e-4,
                                    maxit=3000)
                x_hat_mat = ret['sol']
                x_hat = np.asarray(x_hat_mat)
                x_hat = np.reshape(x_hat, [-1])  #[n, ]
            return x_hat

        #generate basis
        def generate_basis(size):
            """generate the basis"""
            x = np.zeros((size, size))
            coefs = pywt.wavedec2(x, 'db1')
            n_levels = len(coefs)
            basis = []
            for i in range(n_levels):
                coefs[i] = list(coefs[i])
                n_filters = len(coefs[i])
                for j in range(n_filters):
                    for m in range(coefs[i][j].shape[0]):
                        try:
                            for n in range(coefs[i][j].shape[1]):
                                coefs[i][j][m][n] = 1
                                temp_basis = pywt.waverec2(coefs, 'db1')
                                basis.append(temp_basis)
                                coefs[i][j][m][n] = 0
                        except IndexError:
                            coefs[i][j][m] = 1
                            temp_basis = pywt.waverec2(coefs, 'db1')
                            basis.append(temp_basis)
                            coefs[i][j][m] = 0
            basis = np.array(basis)
            return basis

        def wavelet_basis(path_):
            if path_ == 'Ieeg_signal':
                W_ = generate_basis(32)
                W_ = W_.reshape((1024, 1024))
            elif path_ == 'Celeb_signal':
                W_ = generate_basis(128)
                W_ = W_.reshape((16384, 16384))
            else:
                W_ = generate_basis(64)
                W_ = W_.reshape((4096, 4096))
            return W_

        def lasso_wavelet_estimator(A_val, y_val, hparams):  #(n,m), (1,m)
            W = wavelet_basis(hparams.path)  #[n,n]
            if not callable(A_val):
                WA = np.dot(W, A_val)  #[n,n] * [n,m] = [n,m]
            else:
                WA = np.array([
                    A_val(W[i, :].reshape(-1, 1)).reshape(-1)
                    for i in range(len(W))
                ])  #[n,n] -> [n,n]
            z_hat = solve_lasso(WA, y_val, hparams)  # [n, ]
            x_hat = np.dot(z_hat, W)  #[n, ] * [n,n] = [n, ]
            x_hat_max = np.abs(x_hat).max()
            x_hat = x_hat / (1.0 * x_hat_max)
            return x_hat

        # Construct inpainting masks
        def get_A_inpaint(mask_p):
            mask = mask_p.reshape(1, -1)
            A = np.eye(np.prod(mask.shape)) * np.tile(mask,
                                                      [np.prod(mask.shape), 1])
            A = np.asarray([a for a in A if np.sum(a) != 0])
            A = np.sqrt(
                sig_shape
            ) * A  # Make sure that the norm of each row of A is sig_shape
            assert all(np.abs(np.sum(A**2, 1) - sig_shape) < 1e-6)
            return A.T

        # Perofrm reconstruction
        if hparams.model_type == 'inpainting':
            # measurements and observation
            A_val = get_A_inpaint(mask)  #(n,m)
            if hparams.image_mode == '3D':
                out_img_list = []
                for i in range(x_real.shape[-1]):
                    y_real = np.matmul(x_real[:, :, i].reshape(1, -1),
                                       A_val)  #(1,m)
                    out_img_piece = lasso_wavelet_estimator(
                        A_val, y_real, hparams)
                    out_img_piece = out_img_piece.reshape(
                        x_real.shape[0], x_real.shape[1])
                    out_img_list.append(out_img_piece)
                out_img = np.transpose(np.array(out_img_list), (1, 2, 0))
            elif hparams.image_mode == '1D':
                y_real = np.matmul(x_real.reshape(1, -1), A_val)  #(1,m)
                out_img = lasso_wavelet_estimator(A_val, y_real, hparams)
                out_img = out_img.reshape(-1, 1)
        elif hparams.model_type == 'denoising':
            assert hparams.type_measurements == 'identity'
            A_val = A  #(n,n)
            if hparams.image_mode == '3D':
                out_img_list = []
                for i in range(x_real.shape[-1]):
                    y_real = x_real[:, :, i].reshape(1, -1) + observ_noise.T
                    out_img_piece = lasso_wavelet_estimator(
                        A_val, y_real, hparams)
                    out_img_piece = out_img_piece.reshape(
                        x_real.shape[0], x_real.shape[1])
                    out_img_list.append(out_img_piece)
                out_img = np.transpose(np.array(out_img_list), (1, 2, 0))
            elif hparams.image_mode == '1D':
                y_real = np.matmul(x_real.reshape(1, -1),
                                   A_val) + observ_noise.T
                out_img = lasso_wavelet_estimator(A_val, y_real, hparams)
                out_img = out_img.reshape(-1, 1)
        elif hparams.model_type == 'compressing':
            assert hparams.type_measurements == 'circulant'
            A_val = circulant_np
            if hparams.image_mode == '3D':
                out_img_list = []
                for i in range(x_real.shape[-1]):
                    y_real = A_val(x_real[:, :, i].reshape(-1, 1)).reshape(
                        1, -1)  #[n,1] -> [1,n]
                    out_img_piece = lasso_wavelet_estimator(
                        A_val, y_real, hparams)
                    out_img_piece = out_img_piece.reshape(
                        x_real.shape[0], x_real.shape[1])
                    out_img_list.append(out_img_piece)
                out_img = np.transpose(np.array(out_img_list), (1, 2, 0))
            elif hparams.image_mode == '1D':
                y_real = A_val(x_real).reshape(1, -1)  #[n,1] -> [1,n]
                out_img = lasso_wavelet_estimator(A_val, y_real, hparams)
                out_img = out_img.reshape(-1, 1)

    ## === Printer === ##
    # Compute and print measurement and l2 loss
    # if hparams.image_mode == '3D' and hparams.model_type != 'inpainting':
    #     x_real = x_real.reshape(-1,1)
    l2_losses = get_l2_loss(out_img, x_real, hparams.image_mode,
                            hparams.decoder_type)
    psnr = 10 * np.log10(1 * 1 / l2_losses)  #PSNR

    # Printer info
    if hparams.model_type == 'inpainting':
        if hparams.image_mode == '1D':
            mask_info = hparams.mask_name_1D[8:-4]
        elif hparams.image_mode == '2D' or hparams.image_mode == '3D':
            mask_info = hparams.mask_name_2D[8:-4]
        type_mea_info = 'NA'
        num_mea_info = 'NA'
        noise_level_info = 'NA'
    elif hparams.model_type == 'compressing':
        mask_info = 'NA'
        type_mea_info = hparams.type_measurements
        num_mea_info = str(hparams.num_measurements)
        noise_level_info = 'NA'
    elif hparams.model_type == 'denoising':
        mask_info = 'NA'
        type_mea_info = 'NA'
        num_mea_info = 'NA'
        noise_level_info = str(hparams.noise_level)

    # Print result
    print(
        'Final representation PSNR for img_name:{}, model_type:{}, type_mea:{}, num_mea:{}, mask:{}, decoder:{} tau:{} noise:{} is {}'
        .format(hparams.img_name, hparams.model_type, type_mea_info,
                num_mea_info, mask_info, hparams.decoder_type, hparams.tau,
                noise_level_info, psnr))
    print('END')
    print('\t')
    #sys.stdout.close()

    ## == to pd frame == ##
    if hparams.pickle == 1:
        pickle_file_path = hparams.pickle_file_path
        if not os.path.exists(pickle_file_path):
            d = {
                'img_name': [hparams.img_name],
                'model_type': [hparams.model_type],
                'type_mea': [type_mea_info],
                'num_mea': [num_mea_info],
                'mask_info': [mask_info],
                'decoder_type': [hparams.decoder_type],
                'tau': [hparams.tau],
                'noise': [noise_level_info],
                'psnr': [psnr]
            }
            df = pd.DataFrame(data=d)
            df.to_pickle(pickle_file_path)
        else:
            d = {
                'img_name': hparams.img_name,
                'model_type': hparams.model_type,
                'type_mea': type_mea_info,
                'num_mea': num_mea_info,
                'mask_info': mask_info,
                'decoder_type': hparams.decoder_type,
                'tau': hparams.tau,
                'noise': noise_level_info,
                'psnr': psnr
            }
            df = pd.read_pickle(pickle_file_path)
            df = df.append(d, ignore_index=True)
            df.to_pickle(pickle_file_path)

    ## === Save === ##
    if hparams.save == 1:
        save_out_img(out_img, 'result/', hparams.img_name,
                     hparams.decoder_type, hparams.model_type, num_mea_info,
                     mask_info, noise_level_info, hparams.image_mode)
Exemple #32
0
    def test_forward_backward_fista(self):
        """
        Test forward-backward splitting solver with fista acceleration,
        solving problems with L1-norm, L2-norm, and dummy functions.

        """
        y = [4., 5., 6., 7.]
        solver = solvers.forward_backward(accel=acceleration.fista())
        param = {'solver': solver, 'rtol': 1e-6, 'verbosity': 'NONE'}

        # L2-norm prox and dummy gradient.
        f1 = functions.norm_l2(y=y)
        f2 = functions.dummy()
        ret = solvers.solve([f1, f2], np.zeros(len(y)), **param)
        nptest.assert_allclose(ret['sol'], y)
        self.assertEqual(ret['crit'], 'RTOL')
        self.assertEqual(ret['niter'], 60)

        # Dummy prox and L2-norm gradient.
        f1 = functions.dummy()
        f2 = functions.norm_l2(y=y, lambda_=0.6)
        ret = solvers.solve([f1, f2], np.zeros(len(y)), **param)
        nptest.assert_allclose(ret['sol'], y)
        self.assertEqual(ret['crit'], 'RTOL')
        self.assertEqual(ret['niter'], 84)

        # L2-norm prox and L2-norm gradient.
        f1 = functions.norm_l2(y=y)
        f2 = functions.norm_l2(y=y)
        ret = solvers.solve([f1, f2], np.zeros(len(y)), **param)
        nptest.assert_allclose(ret['sol'], y, rtol=1e-2)
        self.assertEqual(ret['crit'], 'MAXIT')
        self.assertEqual(ret['niter'], 200)

        # L1-norm prox and dummy gradient.
        f1 = functions.norm_l1(y=y)
        f2 = functions.dummy()
        ret = solvers.solve([f1, f2], np.zeros(len(y)), **param)
        nptest.assert_allclose(ret['sol'], y)
        self.assertEqual(ret['crit'], 'RTOL')
        self.assertEqual(ret['niter'], 6)

        # Dummy prox and L1-norm gradient. As L1-norm possesses no gradient,
        # the algorithm exchanges the functions : exact same solution.
        f1 = functions.dummy()
        f2 = functions.norm_l1(y=y)
        ret = solvers.solve([f1, f2], np.zeros(len(y)), **param)
        nptest.assert_allclose(ret['sol'], y)
        self.assertEqual(ret['crit'], 'RTOL')
        self.assertEqual(ret['niter'], 6)

        # L1-norm prox and L1-norm gradient. L1-norm possesses no gradient.
        f1 = functions.norm_l1(y=y)
        f2 = functions.norm_l1(y=y)
        self.assertRaises(ValueError, solvers.solve, [f1, f2],
                          np.zeros(len(y)), **param)

        # L1-norm prox and L2-norm gradient.
        f1 = functions.norm_l1(y=y, lambda_=1.0)
        f2 = functions.norm_l2(y=y, lambda_=0.8)
        ret = solvers.solve([f1, f2], np.zeros(len(y)), **param)
        nptest.assert_allclose(ret['sol'], y)
        self.assertEqual(ret['crit'], 'RTOL')
        self.assertEqual(ret['niter'], 10)
Exemple #33
0
    def test_mlfbf(self):
        """
        Test the MLFBF solver with arbitrarily selected functions.

        """
        x = [1., 1., 1.]
        L = np.array([[5, 9, 3], [7, 8, 5], [4, 4, 9], [0, 1, 7]])
        max_step = 1 / (1 + np.linalg.norm(L, 2))
        solver = solvers.mlfbf(L=L, step=max_step / 2.)
        params = {'solver': solver, 'verbosity': 'NONE'}

        def x0():
            return np.zeros(len(x))

        # L2-norm prox and dummy prox.
        f = functions.dummy()
        f._prox = lambda x, T: np.maximum(np.zeros(len(x)), x)
        g = functions.norm_l2(lambda_=0.5)
        h = functions.norm_l2(y=np.array([294, 390, 361]), lambda_=0.5)
        ret = solvers.solve([f, g, h], x0(), maxit=1000, rtol=0, **params)
        nptest.assert_allclose(ret['sol'], x, rtol=1e-5)

        # Same test, but with callable L
        solver = solvers.mlfbf(L=lambda x: np.dot(L, x),
                               Lt=lambda y: np.dot(L.T, y),
                               d0=np.dot(L, x0()),
                               step=max_step / 2.)
        ret = solvers.solve([f, g, h], x0(), maxit=1000, rtol=0, **params)
        nptest.assert_allclose(ret['sol'], x, rtol=1e-5)

        # Sanity check
        self.assertRaises(ValueError, solver.pre, [f, g], x0())

        # Make a second test where the solution is calculated by hand
        n = 10
        y = np.random.rand(n) * 2
        z = np.random.rand(n)
        c = 1

        delta = (y - z - c)**2 + 4 * (1 + y * z - z * c)
        sol = 0.5 * ((y - z - c) + np.sqrt(delta))

        class mlog(functions.func):
            def __init__(self, z):
                super().__init__()
                self.z = z

            def _eval(self, x):
                return -np.sum(np.log(x + self.z))

            def _prox(self, x, T):
                delta = (x - self.z)**2 + 4 * (T + x * self.z)
                sol = 0.5 * (x - self.z + np.sqrt(delta))
                return sol

        f = functions.norm_l1(lambda_=c)
        g = mlog(z=z)
        h = functions.norm_l2(lambda_=0.5, y=y)

        mu = 1 + 1
        step = 1 / mu / 2

        solver = solvers.mlfbf(step=step)
        ret = solvers.solve([f, g, h],
                            y.copy(),
                            solver,
                            maxit=200,
                            rtol=0,
                            verbosity="NONE")

        nptest.assert_allclose(ret["sol"], sol, atol=1e-10)

        # Make a final test where the function g can not be evaluate
        # on the primal variables
        y = np.random.rand(3)
        y_2 = L.dot(y)
        L = np.array([[5, 9, 3], [7, 8, 5], [4, 4, 9], [0, 1, 7]])
        x0 = np.zeros(len(y))
        f = functions.norm_l1(y=y)
        g = functions.norm_l2(lambda_=0.5, y=y_2)
        h = functions.norm_l2(y=y, lambda_=0.5)
        max_step = 1 / (1 + np.linalg.norm(L, 2))
        solver = solvers.mlfbf(L=L, step=max_step / 2.)
        ret = solvers.solve([f, g, h], x0, solver, maxit=1000, rtol=0)
        np.testing.assert_allclose(ret["sol"], y)
Exemple #34
0
    def graph_pnorm_interpolation(self, gradient, P, w, labels_bin, x0=None, p=1., **kwargs):
        r"""
        Solve an interpolation problem via gradient p-norm minimization.

        A signal :math:`x` is estimated from its measurements :math:`y = A(x)` by solving
        :math:`\text{arg}\underset{z \in \mathbb{R}^n}{\min}
        \| \nabla_G z \|_p^p \text{ subject to } Az = y` 
        via a primal-dual, forward-backward-forward algorithm.

        Parameters
        ----------
        gradient : array_like
            A matrix representing the graph gradient operator
        P : callable
            Orthogonal projection operator mapping points in :math:`z \in \mathbb{R}^n` 
            onto the set satisfying :math:`A P(z) = A z`.
        x0 : array_like, optional
            Initial point of the iteration. Must be of dimension n.
            (Default is `numpy.random.randn(n)`)
        p : {1., 2.}
        labels_bin : array_like
            A vector that holds the binary labels.
        kwargs :
            Additional solver parameters, such as maximum number of iterations
            (maxit), relative tolerance on the objective (rtol), and verbosity
            level (verbosity). See :func:`pyunlocbox.solvers.solve` for the full
            list of options.

        Returns
        -------
        x : array_like
            The solution to the optimization problem.

        """

        grad = lambda z: gradient.dot(z)
        div = lambda z: gradient.transpose().dot(z)

        # Indicator function of the set satisfying :math:`y = A(z)`
        f = functions.func()
        f._eval = lambda z: 0
        f._prox = lambda z, gamma: P(z, w, labels_bin)

        # :math:`\ell_1` norm of the dual variable :math:`d = \nabla_G z`
        g = functions.func()
        g._eval = lambda z: np.sum(np.abs(grad(z)))
        g._prox = lambda d, gamma: functions._soft_threshold(d, gamma)

        # :math:`\ell_2` norm of the gradient (for the smooth case)
        h = functions.norm_l2(A=grad, At=div)

        stepsize = (0.9 / (1. + scipy.sparse.linalg.norm(gradient, ord='fro'))) ** p

        solver = solvers.mlfbf(L=grad, Lt=div, step=stepsize)

        if p == 1.:
            problem = solvers.solve([f, g, functions.dummy()], x0=x0, solver=solver, **kwargs)
            return problem['sol']
        if p == 2.:
            problem = solvers.solve([f, functions.dummy(), h], x0=x0, solver=solver, **kwargs)
            return problem['sol']
        else:
            return x0
Exemple #35
0
g = lambda x: mask * x
im_masked = g(im_original)
mask = 1
g = lambda x: mask * x

from pyunlocbox import functions
f1 = functions.norm_tv(maxit=50, dim=2)

tau = 100
f2 = functions.norm_l2(y=im_masked, A=g, lambda_=tau)

from pyunlocbox import solvers
solver = solvers.forward_backward(step=0.5 / tau)

x0 = np.array(im_masked)  # Make a copy to preserve im_masked.
ret = solvers.solve([f1, f2], x0, solver, maxit=100)

import matplotlib.pyplot as plt
fig = plt.figure(figsize=(8, 2.5))
ax1 = fig.add_subplot(1, 3, 1)
_ = ax1.imshow(im_original, cmap='gray')
_ = ax1.axis('off')
_ = ax1.set_title('Original image')
ax2 = fig.add_subplot(1, 3, 2)
_ = ax2.imshow(im_masked, cmap='gray')
_ = ax2.axis('off')
_ = ax2.set_title('Masked image')
ax3 = fig.add_subplot(1, 3, 3)
_ = ax3.imshow(ret['sol'], cmap='gray')
_ = ax3.axis('off')
_ = ax3.set_title('Reconstructed image')
Exemple #36
0
def run(scale=1.99,
        sigma_blur=0.1,
        noise_level_denoiser=0.005,
        num=None,
        method='FBS',
        pretrained_weights=True):
    if not os.path.isdir('results_conv'):
        os.mkdir('results_conv')
    # declare model
    act = tf.keras.activations.relu
    num_filters = 64
    max_dim = 128
    num_layers = 8
    sizes = [None] * (num_layers)
    conv_shapes = [(num_filters, max_dim)] * num_layers
    filter_length = 5
    model = StiefelModel(sizes,
                         None,
                         convolutional=True,
                         filter_length=filter_length,
                         dim=2,
                         conv_shapes=conv_shapes,
                         activation=act,
                         scale_layer=scale)
    pred = model(tf.random.normal((10, 40, 40)))
    model.fast_execution = True

    # load weights
    if pretrained_weights:
        file_name = 'data/pretrained_weights/scale' + str(
            scale) + '_noise_level' + str(noise_level_denoiser) + '.pickle'
    else:
        if num is None:
            file_name = 'results_conv/scale' + str(
                scale) + '_noise_level' + str(
                    noise_level_denoiser) + '/adam.pickle'
        else:
            file_name = 'results_conv/scale' + str(
                scale) + '_noise_level' + str(
                    noise_level_denoiser) + '/adam' + str(num) + '.pickle'
    with open(file_name, 'rb') as f:
        trainable_vars = pickle.load(f)
    for i in range(len(model.trainable_variables)):
        model.trainable_variables[i].assign(trainable_vars[i])
    beta = 1e8
    project = True
    if project:
        # project convolution matrices on the Stiefel manifold
        for i in range(len(model.stiefel)):
            convs = model.stiefel[i].convs
            smaller = convs.shape[0] < convs.shape[1]
            if smaller:
                convs = transpose_convs(convs)
            iden = np.zeros((convs.shape[1], convs.shape[1],
                             4 * filter_length + 1, 4 * filter_length + 1),
                            dtype=np.float32)
            for j in range(convs.shape[1]):
                iden[j, j, 2 * filter_length, 2 * filter_length] = 1
            iden = tf.constant(iden)
            C = tf.identity(convs)

            def projection_objective(C):
                return 0.5 * beta * tf.reduce_sum(
                    (conv_mult(transpose_convs(C), C) - iden)**
                    2) + .5 * tf.reduce_sum((C - convs)**2)

            for iteration in range(100):
                with tf.GradientTape(persistent=True) as tape:
                    tape.watch(C)
                    val = projection_objective(C)
                    grad = tape.gradient(val, C)
                    grad_sum = tf.reduce_sum(grad * grad)
                hess = tape.gradient(grad_sum, C)
                hess *= 0.5 / tf.sqrt(grad_sum)
                C -= grad / tf.sqrt(tf.reduce_sum(hess * hess))
            if smaller:
                C = transpose_convs(C)
            model.stiefel[i].convs.assign(C)

    # load data
    test_directory = 'data/BSD68'
    fileList = os.listdir(test_directory + '/')
    fileList.sort()
    img_names = fileList
    save_path = 'results_conv/PnP_blur_' + method + str(sigma_blur)
    if not os.path.isdir(save_path):
        os.mkdir(save_path)
    if not os.path.isdir(save_path + '/blurred_data'):
        os.mkdir(save_path + '/blurred_data')
    if not os.path.isdir(save_path + '/l2tv'):
        os.mkdir(save_path + '/l2tv')
    psnr_sum = 0.
    psnr_noisy_sum = 0.
    psnr_l2tv_sum = 0.
    error_sum = 0.
    error_bm3d_sum = 0.
    counter = 0
    sig = sigma_blur
    sig_sq = sig**2
    noise_level = 0.01
    kernel_width = 9
    x_range = 1. * np.array(range(kernel_width))
    kernel_x = np.tile(x_range[:, np.newaxis],
                       (1, kernel_width)) - .5 * (kernel_width - 1)
    y_range = 1. * np.array(range(kernel_width))
    kernel_y = np.tile(y_range[np.newaxis, :],
                       (kernel_width, 1)) - .5 * (kernel_width - 1)
    kernel = np.exp(-(kernel_x**2 + kernel_y**2) / (2 * sig_sq))
    kernel /= np.sum(kernel)
    kernel = tf.constant(kernel, dtype=tf.float32)
    myfile = open(save_path + "/psnrs.txt", "w")
    myfile.write("PSNRs:\n")
    myfile.close()
    np.random.seed(25)
    for name in img_names:
        # load image and compute blurred version
        counter += 1
        img = Image.open(test_directory + '/' + name)
        img = img.convert('L')
        img_gray = 1.0 * np.array(img)
        img_gray /= 255.0

        img_gray_pil = Image.fromarray(img_gray * 255.0)
        img_gray_pil = img_gray_pil.convert('RGB')
        img_gray_pil.save(save_path + '/original' + name)
        one_img = tf.ones(img_gray.shape)

        img_blurred = tf.nn.conv2d(
            tf.expand_dims(
                tf.expand_dims(tf.constant(img_gray, dtype=tf.float32), 0),
                -1), tf.expand_dims(tf.expand_dims(kernel, -1), -1), 1, 'SAME')
        img_blurred = tf.squeeze(img_blurred).numpy()
        ones_blurred = tf.nn.conv2d(
            tf.expand_dims(
                tf.expand_dims(tf.constant(one_img, dtype=tf.float32), 0), -1),
            tf.expand_dims(tf.expand_dims(kernel, -1), -1), 1, 'SAME')
        ones_blurred = tf.squeeze(ones_blurred).numpy()
        img_blurred /= ones_blurred
        noise = np.random.normal(0, 1, img_blurred.shape)
        img_blurred += noise_level * noise
        pad = kernel_width // 2
        img_obs = img_blurred[pad:-pad, pad:-pad]
        img_start = np.pad(img_obs, ((pad, pad), (pad, pad)), 'edge')
        img_obs_big = np.concatenate([
            np.zeros((img_obs.shape[0], pad)), img_obs,
            np.zeros((img_obs.shape[0], pad))
        ], 1)
        img_obs_big = np.concatenate([
            np.zeros((pad, img_obs_big.shape[1])), img_obs_big,
            np.zeros((pad, img_obs_big.shape[1]))
        ], 0)
        savemat(save_path + '/blurred_data/' + name[:-4] + '_blurred.mat',
                {'img_blur': (img_blurred) * 255})
        scalar = scale
        alpha_star = 0.5
        conv_coord = 1 - scalar + 2 * alpha_star * scalar

        # declare functions for PnP
        def my_f(signal, inp_signal):
            signal_blurred = tf.nn.conv2d(
                tf.expand_dims(signal, -1),
                tf.expand_dims(tf.expand_dims(kernel, -1), -1), 1, 'VALID')
            signal_blurred = tf.reshape(signal_blurred,
                                        signal_blurred.shape[:3])
            out = .5 * tf.reduce_sum((signal_blurred - img_obs)**2)
            return out

        def prox_my_f(signal, lam, inp_signal):
            out_signal = tf.identity(signal)
            for i in range(50):
                with tf.GradientTape(persistent=True) as tape:
                    tape.watch(out_signal)
                    term1 = my_f(out_signal, inp_signal)
                    term2 = .5 * tf.reduce_sum((out_signal - signal)**2)
                    objective = term1 / lam + term2
                    grad = tape.gradient(objective, out_signal)
                    grad_sum = tf.reduce_sum(grad**2)
                hess = .5 * tape.gradient(grad_sum,
                                          out_signal) / tf.sqrt(grad_sum)
                out_signal -= grad / tf.sqrt(tf.reduce_sum(hess**2))
            return out_signal

        def grad_f(signal):
            signal_blurred = tf.nn.conv2d(
                tf.expand_dims(signal, -1),
                tf.expand_dims(tf.expand_dims(kernel, -1), -1), 1, 'SAME')
            signal_blurred_minus_inp = tf.reshape(
                signal_blurred, signal_blurred.shape[:3]) - img_blurred

            AtA = tf.nn.conv2d(tf.expand_dims(signal_blurred_minus_inp, -1),
                               tf.expand_dims(tf.expand_dims(kernel, -1), -1),
                               1, 'SAME')
            AtA = tf.reshape(AtA, signal_blurred.shape[:3])
            return AtA

        #L2-TV
        def g(signal):
            signal_blurred = tf.nn.conv2d(
                tf.expand_dims(
                    tf.expand_dims(tf.constant(signal, tf.float32), -1), 0),
                tf.expand_dims(tf.expand_dims(kernel, -1), -1), 1, 'VALID')
            signal_blurred = tf.squeeze(signal_blurred)
            signal_blurred = np.concatenate([
                np.zeros((signal_blurred.shape[0], pad)),
                signal_blurred.numpy(),
                np.zeros((signal_blurred.shape[0], pad))
            ], 1)
            signal_blurred = np.concatenate([
                np.zeros((pad, signal_blurred.shape[1])), signal_blurred,
                np.zeros((pad, signal_blurred.shape[1]))
            ], 0)
            return signal_blurred

        f1 = functions.norm_tv(maxit=50, dim=2)
        l2tv_lambda = 0.001
        f2 = functions.norm_l2(y=img_obs_big, A=g, lambda_=1 / l2tv_lambda)
        solver = solvers.forward_backward(step=0.5 * l2tv_lambda)
        img_blurred2 = tf.identity(img_start).numpy()
        l2tv = solvers.solve([f1, f2],
                             img_blurred2,
                             solver,
                             maxit=100,
                             verbosity='NONE')
        l2tv = l2tv['sol']

        def my_T(inp, model):
            my_fac = 1.
            return (1 - 1 /
                    (conv_coord)) * l2tv + 1 / (conv_coord) * (inp - model(
                        (inp - .5) * my_fac))

        # Compute PnP result
        if method == 'FBS':
            pred = PnP_FBS(model,
                           l2tv[np.newaxis, :, :],
                           tau=1.9,
                           T_fun=my_T,
                           eps=1e-3,
                           fun=my_f)
        elif method == 'ADMM':
            pred = PnP_ADMM(l2tv[np.newaxis, :, :],
                            lambda x: my_T(x, model),
                            gamma=.52,
                            prox_fun=prox_my_f)
        else:
            raise ValueError('Unknown method!')

        # save results
        noisy = (img_start) * 255
        reconstructed = (tf.reshape(
            pred, [pred.shape[1], pred.shape[2]]).numpy()) * 255.
        img_gray = (img_gray) * 255.
        l2tv *= 255
        error_sum += tf.reduce_sum(
            ((reconstructed - img_gray) / 255.)**2).numpy()
        psnr = meanPSNR(
            tf.keras.backend.flatten(reconstructed[2 * pad:-2 * pad,
                                                   2 * pad:-2 * pad]).numpy() /
            255.0,
            tf.keras.backend.flatten(
                img_gray[2 * pad:-2 * pad, 2 * pad:-2 * pad]).numpy() / 255.0,
            one_dist=True)
        psnr_l2tv = meanPSNR(
            tf.keras.backend.flatten(l2tv[2 * pad:-2 * pad,
                                          2 * pad:-2 * pad]).numpy() / 255.0,
            tf.keras.backend.flatten(
                img_gray[2 * pad:-2 * pad, 2 * pad:-2 * pad]).numpy() / 255.0,
            one_dist=True)
        psnr_noisy = meanPSNR(
            tf.keras.backend.flatten(noisy[2 * pad:-2 * pad,
                                           2 * pad:-2 * pad]).numpy() / 255.0,
            tf.keras.backend.flatten(
                img_gray[2 * pad:-2 * pad, 2 * pad:-2 * pad]).numpy() / 255.0,
            one_dist=True)
        print('PSNR of ' + name + ':                    ' + str(psnr))
        print('PSNR L2TV of ' + name + ':               ' + str(psnr_l2tv))
        print('PSNR of noisy ' + name + ':              ' + str(psnr_noisy))
        psnr_sum += psnr
        psnr_noisy_sum += psnr_noisy
        psnr_l2tv_sum += psnr_l2tv
        print('Mean PSNR PPNN:      ' + str(psnr_sum / counter))
        print('Mean PSNR L2TV:      ' + str(psnr_l2tv_sum / counter))
        print('Mean PSNR noisy:     ' + str(psnr_noisy_sum / counter))
        myfile = open(save_path + "/psnrs.txt", "a")
        myfile.write('PSNR of ' + name + ':                    ' + str(psnr) +
                     '\n')
        myfile.write('PSNR L2TV of ' + name + ':               ' +
                     str(psnr_l2tv) + '\n')
        myfile.write('PSNR of noisy ' + name + ':              ' +
                     str(psnr_noisy) + '\n')
        myfile.close()
        img = Image.fromarray(noisy)
        img = img.convert('RGB')
        img.save(save_path + '/noisy' + name)
        img = Image.fromarray(l2tv)
        img = img.convert('RGB')
        img.save(save_path + '/l2tv/l2tv' + name)
        img = Image.fromarray(reconstructed)
        img = img.convert('RGB')
        img.save(save_path + '/reconstructed' + name)
    print('Mean PSNR on images: ' + str(psnr_sum / len(img_names)))
    print('Mean PSNR on noisy images: ' + str(psnr_noisy_sum / len(img_names)))
    def test_solve(self):
        """
        Test some features of the solving function.

        """

        # We have to set a seed here for the random draw if we are required
        # below to assert that the number of iterations of the solvers are
        # equal to some specific values. Otherwise, we get trivial errors when
        # x0 is a little farther away from y in a given draw.
        rs = np.random.RandomState(42)

        y = 5 - 10 * rs.uniform(size=(15, 4))

        def x0():
            return np.zeros(y.shape)

        nverb = {'verbosity': 'NONE'}

        # Function verbosity.
        f = functions.dummy()
        self.assertEqual(f.verbosity, 'NONE')
        f.verbosity = 'LOW'
        solvers.solve([f], x0(), **nverb)
        self.assertEqual(f.verbosity, 'LOW')

        # Input parameters.
        self.assertRaises(ValueError, solvers.solve, [f], x0(), verbosity='??')

        # Addition of dummy function.
        self.assertRaises(ValueError, solvers.solve, [], x0(), **nverb)
        solver = solvers.forward_backward()
        solvers.solve([f], x0(), solver, **nverb)
        # self.assertIsInstance(solver.f1, functions.dummy)
        # self.assertIsInstance(solver.f2, functions.dummy)

        # Automatic solver selection.
        f0 = functions.func()
        f0._eval = lambda x: 0
        f0._grad = lambda x: x
        f1 = functions.func()
        f1._eval = lambda x: 0
        f1._grad = lambda x: x
        f1._prox = lambda x, T: x
        f2 = functions.func()
        f2._eval = lambda x: 0
        f2._prox = lambda x, T: x
        self.assertRaises(ValueError, solvers.solve, [f0, f0], x0(), **nverb)
        ret = solvers.solve([f0, f1], x0(), **nverb)
        self.assertEqual(ret['solver'], 'forward_backward')
        ret = solvers.solve([f1, f0], x0(), **nverb)
        self.assertEqual(ret['solver'], 'forward_backward')
        ret = solvers.solve([f1, f2], x0(), **nverb)
        self.assertEqual(ret['solver'], 'forward_backward')
        ret = solvers.solve([f2, f2], x0(), **nverb)
        self.assertEqual(ret['solver'], 'douglas_rachford')
        ret = solvers.solve([f1, f2, f0], x0(), **nverb)
        self.assertEqual(ret['solver'], 'generalized_forward_backward')

        # Stopping criteria.
        f = functions.norm_l2(y=y)
        tol = 1e-6
        r = solvers.solve([f], x0(), None, tol, None, None, None, None, 'NONE')
        self.assertEqual(r['crit'], 'ATOL')
        self.assertLess(np.sum(r['objective'][-1]), tol)
        self.assertEqual(r['niter'], 9)
        tol = 1e-8
        r = solvers.solve([f], x0(), None, None, tol, None, None, None, 'NONE')
        self.assertEqual(r['crit'], 'DTOL')
        err = np.abs(np.sum(r['objective'][-1]) - np.sum(r['objective'][-2]))
        self.assertLess(err, tol)
        self.assertEqual(r['niter'], 17)
        tol = .1
        r = solvers.solve([f], x0(), None, None, None, tol, None, None, 'NONE')
        self.assertEqual(r['crit'], 'RTOL')
        err = np.abs(np.sum(r['objective'][-1]) - np.sum(r['objective'][-2]))
        err /= np.sum(r['objective'][-1])
        self.assertLess(err, tol)
        self.assertEqual(r['niter'], 13)
        tol = 1e-4
        r = solvers.solve([f], x0(), None, None, None, None, tol, None, 'NONE')
        self.assertEqual(r['crit'], 'XTOL')
        r2 = solvers.solve([f], x0(), maxit=r['niter'] - 1, **nverb)
        err = np.linalg.norm(r['sol'] - r2['sol']) / np.sqrt(x0().size)
        self.assertLess(err, tol)
        self.assertEqual(r['niter'], 14)
        nit = 15
        r = solvers.solve([f], x0(), None, None, None, None, None, nit, 'NONE')
        self.assertEqual(r['crit'], 'MAXIT')
        self.assertEqual(r['niter'], nit)

        # Return values.
        f = functions.norm_l2(y=y)
        ret = solvers.solve([f], x0(), **nverb)
        self.assertEqual(len(ret), 6)
        self.assertIsInstance(ret['sol'], np.ndarray)
        self.assertIsInstance(ret['solver'], str)
        self.assertIsInstance(ret['crit'], str)
        self.assertIsInstance(ret['niter'], int)
        self.assertIsInstance(ret['time'], float)
        self.assertIsInstance(ret['objective'], list)
Exemple #38
0
    plt.figure(14)
    plt.title("Estimated Super resolution gaussian - Wiener filter")
    plt.imshow(H_estimated_wf_gaussian, cmap='gray')
    plt.figure(15)
    plt.title("Estimated Super resolution box - Wiener filter")
    plt.imshow(H_estimated_wf_box, cmap='gray')

    # Task 5.2
    tau = 100

    g = lambda H:  signal.convolve2d(H, K_gaussian, boundary='symm', mode='same')
    l_blurred_cpy = np.array(blurred_image_gaussian_l)
    tv_prior_f = functions.norm_tv(maxit=50, dim=2)
    norm_l2_f = functions.norm_l2(y=l_blurred_cpy, A=g, lambda_=tau)
    solver = solvers.forward_backward(step=0.0001 / tau)
    H_estimated_lms_tv_gaussian = solvers.solve([tv_prior_f, norm_l2_f], l_blurred_cpy, solver, maxit=100)

    g = lambda H:  signal.convolve2d(H, K_box, boundary='symm', mode='same')
    l_blurred_cpy = np.array(blurred_image_box_l)
    tv_prior_f = functions.norm_tv(maxit=50, dim=2)
    norm_l2_f = functions.norm_l2(y=l_blurred_cpy, A=g, lambda_=tau)
    solver = solvers.forward_backward(step=0.0001 / tau)
    H_estimated_lms_tv_box = solvers.solve([tv_prior_f, norm_l2_f], l_blurred_cpy, solver, maxit=100)

    plt.figure(16)
    plt.title("Estimated Super resolution gaussian - Least mean square with TV prior")
    plt.imshow(H_estimated_lms_tv_gaussian['sol'], cmap='gray')

    plt.figure(17)
    plt.title("Estimated Super resolution box - Least mean square with TV prior")
    plt.imshow(H_estimated_lms_tv_box['sol'], cmap='gray')
Exemple #39
0
def log_degree_barrier(X,
                       dist_type='sqeuclidean',
                       alpha=1,
                       beta=1,
                       step=0.5,
                       w0=None,
                       maxit=1000,
                       rtol=1e-5,
                       retall=False,
                       verbosity='NONE'):
    r"""
    Learn graph by imposing a log barrier on the degrees

    This is done by solving
    :math:`\tilde{W} = \underset{W \in \mathcal{W}_m}{\text{arg}\min} \,
    \|W \odot Z\|_{1,1} - \alpha 1^{T} \log{W1} + \beta \| W \|_{F}^{2}`,
    where :math:`Z` is a pairwise distance matrix, and :math:`\mathcal{W}_m`
    is the set of valid symmetric weighted adjacency matrices.

    Parameters
    ----------
    X : array_like
        An N-by-M data matrix of N variable observations in an M-dimensional
        space. The learned graph will have N nodes.
    dist_type : string
        Type of pairwise distance between variables. See
        :func:`spatial.distance.pdist` for the possible options.
    alpha : float, optional
        Regularization parameter acting on the log barrier
    beta : float, optional
        Regularization parameter controlling the density of the graph
    step : float, optional
        A number between 0 and 1 defining a stepsize value in the admissible
        stepsize interval (see [Komodakis & Pesquet, 2015], Algorithm 6)
    w0 : array_like, optional
        Initialization of the edge weights. Must be an N(N-1)/2-dimensional
        vector.
    maxit : int, optional
        Maximum number of iterations.
    rtol : float, optional
        Stopping criterion. Relative tolerance between successive updates.
    retall : boolean
        Return solution and problem details. See output of
        :func:`pyunlocbox.solvers.solve`.
    verbosity : {'NONE', 'LOW', 'HIGH', 'ALL'}, optional
        Level of verbosity of the solver. See :func:`pyunlocbox.solvers.solve`.

    Returns
    -------
    W : array_like
        Learned weighted adjacency matrix
    problem : dict, optional
        Information about the solution of the optimization. Only returned if
        retall == True.

    Notes
    -----
    This is the solver proposed in [Kalofolias, 2016] :cite:`kalofolias2016`.


    Examples
    --------
    >>> import learn_graph as lg
    >>> import networkx as nx
    >>> import numpy as np
    >>> import matplotlib.pyplot as plt
    >>> from scipy import spatial
    >>> G_gt = nx.waxman_graph(100)
    >>> pos = nx.random_layout(G_gt)
    >>> coords = np.array(list(pos.values()))
    >>> def s1(x, y):
            return np.sin((2 - x - y)**2)
    >>> def s2(x, y):
            return np.cos((x + y)**2)
    >>> def s3(x, y):
            return (x - 0.5)**2 + (y - 0.5)**3 + x - y
    >>> def s4(x, y):
            return np.sin(3 * ( (x - 0.5)**2 + (y - 0.5)**2 ) )
    >>> X = np.array((s1(coords[:,0], coords[:,1]),
                      s2(coords[:,0], coords[:,1]),
                      s3(coords[:,0], coords[:,1]),
                      s4(coords[:,0], coords[:,1]))).T
    >>> z = 25 * spatial.distance.pdist(X, 'sqeuclidean')
    >>> W = lg.log_degree_barrier(z)
    >>> W[W < np.percentile(W, 96)] = 0
    >>> G_learned = nx.from_numpy_matrix(W)
    >>> plt.figure(figsize=(12, 6))
    >>> plt.subplot(1,2,1)
    >>> nx.draw(G_gt, pos=pos)
    >>> plt.title('Ground Truth')
    >>> plt.subplot(1,2,2)
    >>> nx.draw(G_learned, pos=pos)
    >>> plt.title('Learned')
    """

    # Parse X
    N = X.shape[0]
    z = spatial.distance.pdist(X, dist_type)  # Pairwise distances

    # Parse stepsize
    if (step <= 0) or (step > 1):
        raise ValueError("step must be a number between 0 and 1.")

    # Parse initial weights
    w0 = np.zeros(z.shape) if w0 is None else w0
    if (w0.shape != z.shape):
        raise ValueError("w0 must be of dimension N(N-1)/2.")

    # Get primal-dual linear map
    K, Kt = utils.weight2degmap(N)
    norm_K = np.sqrt(2 * (N - 1))

    # Assemble functions in the objective
    f1 = functions.func()
    f1._eval = lambda w: 2 * np.dot(w, z)
    f1._prox = lambda w, gamma: np.maximum(0, w - (2 * gamma * z))

    f2 = functions.func()
    f2._eval = lambda w: -alpha * np.sum(
        np.log(np.maximum(np.finfo(np.float64).eps, K(w))))
    f2._prox = lambda d, gamma: np.maximum(
        0, 0.5 * (d + np.sqrt(d**2 + (4 * alpha * gamma))))

    f3 = functions.func()
    f3._eval = lambda w: beta * np.sum(w**2)
    f3._grad = lambda w: 2 * beta * w
    lipg = 2 * beta

    # Rescale stepsize
    stepsize = step / (1 + lipg + norm_K)

    # Solve problem
    solver = solvers.mlfbf(L=K, Lt=Kt, step=stepsize)
    problem = solvers.solve([f1, f2, f3],
                            x0=w0,
                            solver=solver,
                            maxit=maxit,
                            rtol=rtol,
                            verbosity=verbosity)

    # Transform weight matrix from vector form to matrix form
    W = spatial.distance.squareform(problem['sol'])

    if retall:
        return W, problem
    else:
        return W
    def test_forward_backward_fista(self):
        """
        Test forward-backward splitting solver with fista acceleration,
        solving problems with L1-norm, L2-norm, and dummy functions.

        """
        y = [4., 5., 6., 7.]
        solver = solvers.forward_backward(accel=acceleration.fista())
        param = {'solver': solver, 'rtol': 1e-6, 'verbosity': 'NONE'}

        # L2-norm prox and dummy gradient.
        f1 = functions.norm_l2(y=y)
        f2 = functions.dummy()
        ret = solvers.solve([f1, f2], np.zeros(len(y)), **param)
        nptest.assert_allclose(ret['sol'], y)
        self.assertEqual(ret['crit'], 'RTOL')
        self.assertEqual(ret['niter'], 60)

        # Dummy prox and L2-norm gradient.
        f1 = functions.dummy()
        f2 = functions.norm_l2(y=y, lambda_=0.6)
        ret = solvers.solve([f1, f2], np.zeros(len(y)), **param)
        nptest.assert_allclose(ret['sol'], y)
        self.assertEqual(ret['crit'], 'RTOL')
        self.assertEqual(ret['niter'], 84)

        # L2-norm prox and L2-norm gradient.
        f1 = functions.norm_l2(y=y)
        f2 = functions.norm_l2(y=y)
        ret = solvers.solve([f1, f2], np.zeros(len(y)), **param)
        nptest.assert_allclose(ret['sol'], y, rtol=1e-2)
        self.assertEqual(ret['crit'], 'MAXIT')
        self.assertEqual(ret['niter'], 200)

        # L1-norm prox and dummy gradient.
        f1 = functions.norm_l1(y=y)
        f2 = functions.dummy()
        ret = solvers.solve([f1, f2], np.zeros(len(y)), **param)
        nptest.assert_allclose(ret['sol'], y)
        self.assertEqual(ret['crit'], 'RTOL')
        self.assertEqual(ret['niter'], 6)

        # Dummy prox and L1-norm gradient. As L1-norm possesses no gradient,
        # the algorithm exchanges the functions : exact same solution.
        f1 = functions.dummy()
        f2 = functions.norm_l1(y=y)
        ret = solvers.solve([f1, f2], np.zeros(len(y)), **param)
        nptest.assert_allclose(ret['sol'], y)
        self.assertEqual(ret['crit'], 'RTOL')
        self.assertEqual(ret['niter'], 6)

        # L1-norm prox and L1-norm gradient. L1-norm possesses no gradient.
        f1 = functions.norm_l1(y=y)
        f2 = functions.norm_l1(y=y)
        self.assertRaises(ValueError, solvers.solve,
                          [f1, f2], np.zeros(len(y)), **param)

        # L1-norm prox and L2-norm gradient.
        f1 = functions.norm_l1(y=y, lambda_=1.0)
        f2 = functions.norm_l2(y=y, lambda_=0.8)
        ret = solvers.solve([f1, f2], np.zeros(len(y)), **param)
        nptest.assert_allclose(ret['sol'], y)
        self.assertEqual(ret['crit'], 'RTOL')
        self.assertEqual(ret['niter'], 10)
Exemple #41
0
def l2_degree_reg(X,
                  dist_type='sqeuclidean',
                  alpha=1,
                  s=None,
                  step=0.5,
                  w0=None,
                  maxit=1000,
                  rtol=1e-5,
                  retall=False,
                  verbosity='NONE'):
    r"""
    Learn graph by regularizing the l2-norm of the degrees.

    This is done by solving
    :math:`\tilde{W} = \underset{W \in \mathcal{W}_m}{\text{arg}\min} \,
    \|W \odot Z\|_{1,1} + \alpha \|W1}\|^2 + \alpha \| W \|_{F}^{2}`, subject
    to :math:`\|W\|_{1,1} = s`, where :math:`Z` is a pairwise distance matrix,
    and :math:`\mathcal{W}_m`is the set of valid symmetric weighted adjacency
    matrices.

    Parameters
    ----------
    X : array_like
        An N-by-M data matrix of N variable observations in an M-dimensional
        space. The learned graph will have N nodes.
    dist_type : string
        Type of pairwise distance between variables. See
        :func:`spatial.distance.pdist` for the possible options.
    alpha : float, optional
        Regularization parameter acting on the l2-norm.
    s : float, optional
        The "sparsity level" of the weight matrix, as measured by its l1-norm.
    step : float, optional
        A number between 0 and 1 defining a stepsize value in the admissible
        stepsize interval (see [Komodakis & Pesquet, 2015], Algorithm 6)
    w0 : array_like, optional
        Initialization of the edge weights. Must be an N(N-1)/2-dimensional
        vector.
    maxit : int, optional
        Maximum number of iterations.
    rtol : float, optional
        Stopping criterion. Relative tolerance between successive updates.
    retall : boolean
        Return solution and problem details. See output of
        :func:`pyunlocbox.solvers.solve`.
    verbosity : {'NONE', 'LOW', 'HIGH', 'ALL'}, optional
        Level of verbosity of the solver. See :func:`pyunlocbox.solvers.solve`.

    Returns
    -------
    W : array_like
        Learned weighted adjacency matrix
    problem : dict, optional
        Information about the solution of the optimization. Only returned if
        retall == True.

    Notes
    -----
    This is the problem proposed in [Dong et al., 2015].


    Examples
    --------

    """

    # Parse X
    N = X.shape[0]
    E = int(N * (N - 1.) / 2.)
    z = spatial.distance.pdist(X, dist_type)  # Pairwise distances

    # Parse s
    s = N if s is None else s

    # Parse step
    if (step <= 0) or (step > 1):
        raise ValueError("step must be a number between 0 and 1.")

    # Parse initial weights
    w0 = np.zeros(z.shape) if w0 is None else w0
    if (w0.shape != z.shape):
        raise ValueError("w0 must be of dimension N(N-1)/2.")

    # Get primal-dual linear map
    one_vec = np.ones(E)

    def K(w):
        return np.array([2 * np.dot(one_vec, w)])

    def Kt(n):
        return 2 * n * one_vec

    norm_K = 2 * np.sqrt(E)

    # Get weight-to-degree map
    S, St = utils.weight2degmap(N)

    # Assemble functions in the objective
    f1 = functions.func()
    f1._eval = lambda w: 2 * np.dot(w, z)
    f1._prox = lambda w, gamma: np.maximum(0, w - (2 * gamma * z))

    f2 = functions.func()
    f2._eval = lambda w: 0.
    f2._prox = lambda d, gamma: s

    f3 = functions.func()
    f3._eval = lambda w: alpha * (2 * np.sum(w**2) + np.sum(S(w)**2))
    f3._grad = lambda w: alpha * (4 * w + St(S(w)))
    lipg = 2 * alpha * (N + 1)

    # Rescale stepsize
    stepsize = step / (1 + lipg + norm_K)

    # Solve problem
    solver = solvers.mlfbf(L=K, Lt=Kt, step=stepsize)
    problem = solvers.solve([f1, f2, f3],
                            x0=w0,
                            solver=solver,
                            maxit=maxit,
                            rtol=rtol,
                            verbosity=verbosity)

    # Transform weight matrix from vector form to matrix form
    W = spatial.distance.squareform(problem['sol'])

    if retall:
        return W, problem
    else:
        return W
Exemple #42
0
    def test_solve(self):
        """
        Test some features of the solving function.

        """

        # We have to set a seed here for the random draw if we are required
        # below to assert that the number of iterations of the solvers are
        # equal to some specific values. Otherwise, we get trivial errors when
        # x0 is a little farther away from y in a given draw.
        rs = np.random.RandomState(42)

        y = 5 - 10 * rs.uniform(size=(15, 4))

        def x0(): return np.zeros(y.shape)
        nverb = {'verbosity': 'NONE'}

        # Function verbosity.
        f = functions.dummy()
        self.assertEqual(f.verbosity, 'NONE')
        f.verbosity = 'LOW'
        solvers.solve([f], x0(), **nverb)
        self.assertEqual(f.verbosity, 'LOW')

        # Input parameters.
        self.assertRaises(ValueError, solvers.solve, [f], x0(), verbosity='??')

        # Addition of dummy function.
        self.assertRaises(ValueError, solvers.solve, [], x0(), **nverb)
        solver = solvers.forward_backward()
        solvers.solve([f], x0(), solver, **nverb)
        # self.assertIsInstance(solver.f1, functions.dummy)
        # self.assertIsInstance(solver.f2, functions.dummy)

        # Automatic solver selection.
        f0 = functions.func()
        f0._eval = lambda x: 0
        f0._grad = lambda x: x
        f1 = functions.func()
        f1._eval = lambda x: 0
        f1._grad = lambda x: x
        f1._prox = lambda x, T: x
        f2 = functions.func()
        f2._eval = lambda x: 0
        f2._prox = lambda x, T: x
        self.assertRaises(ValueError, solvers.solve, [f0, f0], x0(), **nverb)
        ret = solvers.solve([f0, f1], x0(), **nverb)
        self.assertEqual(ret['solver'], 'forward_backward')
        ret = solvers.solve([f1, f0], x0(), **nverb)
        self.assertEqual(ret['solver'], 'forward_backward')
        ret = solvers.solve([f1, f2], x0(), **nverb)
        self.assertEqual(ret['solver'], 'forward_backward')
        ret = solvers.solve([f2, f2], x0(), **nverb)
        self.assertEqual(ret['solver'], 'douglas_rachford')
        ret = solvers.solve([f1, f2, f0], x0(), **nverb)
        self.assertEqual(ret['solver'], 'generalized_forward_backward')

        # Stopping criteria.
        f = functions.norm_l2(y=y)
        tol = 1e-6
        r = solvers.solve([f], x0(), None, tol, None, None, None, None, 'NONE')
        self.assertEqual(r['crit'], 'ATOL')
        self.assertLess(np.sum(r['objective'][-1]), tol)
        self.assertEqual(r['niter'], 9)
        tol = 1e-8
        r = solvers.solve([f], x0(), None, None, tol, None, None, None, 'NONE')
        self.assertEqual(r['crit'], 'DTOL')
        err = np.abs(np.sum(r['objective'][-1]) - np.sum(r['objective'][-2]))
        self.assertLess(err, tol)
        self.assertEqual(r['niter'], 17)
        tol = .1
        r = solvers.solve([f], x0(), None, None, None, tol, None, None, 'NONE')
        self.assertEqual(r['crit'], 'RTOL')
        err = np.abs(np.sum(r['objective'][-1]) - np.sum(r['objective'][-2]))
        err /= np.sum(r['objective'][-1])
        self.assertLess(err, tol)
        self.assertEqual(r['niter'], 13)
        tol = 1e-4
        r = solvers.solve([f], x0(), None, None, None, None, tol, None, 'NONE')
        self.assertEqual(r['crit'], 'XTOL')
        r2 = solvers.solve([f], x0(), maxit=r['niter'] - 1, **nverb)
        err = np.linalg.norm(r['sol'] - r2['sol']) / np.sqrt(x0().size)
        self.assertLess(err, tol)
        self.assertEqual(r['niter'], 14)
        nit = 15
        r = solvers.solve([f], x0(), None, None, None, None, None, nit, 'NONE')
        self.assertEqual(r['crit'], 'MAXIT')
        self.assertEqual(r['niter'], nit)

        # Return values.
        f = functions.norm_l2(y=y)
        ret = solvers.solve([f], x0(), **nverb)
        self.assertEqual(len(ret), 6)
        self.assertIsInstance(ret['sol'], np.ndarray)
        self.assertIsInstance(ret['solver'], str)
        self.assertIsInstance(ret['crit'], str)
        self.assertIsInstance(ret['niter'], int)
        self.assertIsInstance(ret['time'], float)
        self.assertIsInstance(ret['objective'], list)
Exemple #43
0
def classification_tikhonov_simplex(G, y, M, tau=0.1, **kwargs):
    r"""Solve a classification problem on graph via Tikhonov minimization
    with simple constraints.

    The function first transforms :math:`y` in logits :math:`Y`, then solves

    .. math:: \operatorname*{arg min}_X \| M X - Y \|_2^2 + \tau \ tr(X^T L X)
              \text{ s.t. } sum(X) = 1 \text{ and } X >= 0,

    where :math:`X` and :math:`Y` are logits.

    Parameters
    ----------
    G : :class:`pygsp.graphs.Graph`
    y : array, length G.n_vertices
        Measurements.
    M : array of boolean, length G.n_vertices
        Masking vector.
    tau : float
        Regularization parameter.
    kwargs : dict
        Parameters for :func:`pyunlocbox.solvers.solve`.

    Returns
    -------
    logits : array, length G.n_vertices
        The logits :math:`X`.

    Examples
    --------
    >>> from pygsp import graphs, learning
    >>> import matplotlib.pyplot as plt
    >>>
    >>> G = graphs.Logo()
    >>> G.estimate_lmax()

    Create a ground truth signal:

    >>> signal = np.zeros(G.n_vertices)
    >>> signal[G.info['idx_s']] = 1
    >>> signal[G.info['idx_p']] = 2

    Construct a measurement signal from a binary mask:

    >>> rs = np.random.RandomState(42)
    >>> mask = rs.uniform(0, 1, G.n_vertices) > 0.5
    >>> measures = signal.copy()
    >>> measures[~mask] = np.nan

    Solve the classification problem by reconstructing the signal:

    >>> recovery = learning.classification_tikhonov_simplex(
    ...     G, measures, mask, tau=0.1, verbosity='NONE')

    Plot the results.
    Note that we recover the class with ``np.argmax(recovery, axis=1)``.

    >>> prediction = np.argmax(recovery, axis=1)
    >>> fig, ax = plt.subplots(2, 3, sharey=True, figsize=(10, 6))
    >>> _ = G.plot_signal(signal, ax=ax[0, 0], title='Ground truth')
    >>> _ = G.plot_signal(measures, ax=ax[0, 1], title='Measurements')
    >>> _ = G.plot_signal(prediction, ax=ax[0, 2], title='Recovered class')
    >>> _ = G.plot_signal(recovery[:, 0], ax=ax[1, 0], title='Logit 0')
    >>> _ = G.plot_signal(recovery[:, 1], ax=ax[1, 1], title='Logit 1')
    >>> _ = G.plot_signal(recovery[:, 2], ax=ax[1, 2], title='Logit 2')
    >>> _ = fig.tight_layout()

    """

    functions, solvers = _import_pyunlocbox()

    if tau <= 0:
        raise ValueError('Tau should be greater than 0.')

    y = y.copy()
    y[M == False] = 0
    Y = _to_logits(y.astype(np.int))
    Y[M == False, :] = 0

    def proj_simplex(y):
        d = y.shape[1]
        a = np.ones(d)
        idx = np.argsort(y)

        def evalpL(y, k, idx):
            return np.sum(y[idx[k:]] - y[idx[k]]) - 1

        def bisectsearch(idx, y):
            idxL, idxH = 0, d - 1
            L = evalpL(y, idxL, idx)
            H = evalpL(y, idxH, idx)

            if L < 0:
                return idxL

            while (idxH - idxL) > 1:
                iMid = int((idxL + idxH) / 2)
                M = evalpL(y, iMid, idx)

                if M > 0:
                    idxL, L = iMid, M
                else:
                    idxH, H = iMid, M

            return idxH

        def proj(idx, y):
            k = bisectsearch(idx, y)
            lam = (np.sum(y[idx[k:]]) - 1) / (d - k)
            return np.maximum(0, y - lam)

        x = np.empty_like(y)
        for i in range(len(y)):
            x[i] = proj(idx[i], y[i])
        # x = np.stack(map(proj, idx, y))

        return x

    def smooth_eval(x):
        xTLx = np.sum(x * (G.L.dot(x)))
        e = M * ((M * x.T) - Y.T)
        l2 = np.sum(e * e)
        return tau * xTLx + l2

    def smooth_grad(x):
        return 2 * ((M * (M * x.T - Y.T)).T + tau * G.L * x)

    f1 = functions.func()
    f1._eval = smooth_eval
    f1._grad = smooth_grad

    f2 = functions.func()
    f2._eval = lambda x: 0  # Indicator functions evaluate to zero.
    f2._prox = lambda x, step: proj_simplex(x)

    step = 0.5 / (1 + tau * G.lmax)
    solver = solvers.forward_backward(step=step)
    ret = solvers.solve([f1, f2], Y.copy(), solver, **kwargs)
    return ret['sol']
Exemple #44
0
def classification_tikhonov_simplex(G, y, M, tau=0.1, **kwargs):
    r"""Solve a classification problem on graph via Tikhonov minimization
    with simple constraints.

    The function first transforms :math:`y` in logits :math:`Y`, then solves

    .. math:: \operatorname*{arg min}_X \| M X - Y \|_2^2 + \tau \ tr(X^T L X)
              \text{ s.t. } sum(X) = 1 \text{ and } X >= 0,

    where :math:`X` and :math:`Y` are logits.

    Parameters
    ----------
    G : :class:`pygsp.graphs.Graph`
    y : array, length G.n_vertices
        Measurements.
    M : array of boolean, length G.n_vertices
        Masking vector.
    tau : float
        Regularization parameter.
    kwargs : dict
        Parameters for :func:`pyunlocbox.solvers.solve`.

    Returns
    -------
    logits : array, length G.n_vertices
        The logits :math:`X`.

    Examples
    --------
    >>> from pygsp import graphs, learning
    >>> import matplotlib.pyplot as plt
    >>>
    >>> G = graphs.Logo()
    >>> G.estimate_lmax()

    Create a ground truth signal:

    >>> signal = np.zeros(G.n_vertices)
    >>> signal[G.info['idx_s']] = 1
    >>> signal[G.info['idx_p']] = 2

    Construct a measurement signal from a binary mask:

    >>> rs = np.random.RandomState(42)
    >>> mask = rs.uniform(0, 1, G.n_vertices) > 0.5
    >>> measures = signal.copy()
    >>> measures[~mask] = np.nan

    Solve the classification problem by reconstructing the signal:

    >>> recovery = learning.classification_tikhonov_simplex(
    ...     G, measures, mask, tau=0.1, verbosity='NONE')

    Plot the results.
    Note that we recover the class with ``np.argmax(recovery, axis=1)``.

    >>> prediction = np.argmax(recovery, axis=1)
    >>> fig, ax = plt.subplots(2, 3, sharey=True, figsize=(10, 6))
    >>> _ = G.plot_signal(signal, ax=ax[0, 0], title='Ground truth')
    >>> _ = G.plot_signal(measures, ax=ax[0, 1], title='Measurements')
    >>> _ = G.plot_signal(prediction, ax=ax[0, 2], title='Recovered class')
    >>> _ = G.plot_signal(recovery[:, 0], ax=ax[1, 0], title='Logit 0')
    >>> _ = G.plot_signal(recovery[:, 1], ax=ax[1, 1], title='Logit 1')
    >>> _ = G.plot_signal(recovery[:, 2], ax=ax[1, 2], title='Logit 2')
    >>> _ = fig.tight_layout()

    """

    functions, solvers = _import_pyunlocbox()

    if tau <= 0:
        raise ValueError('Tau should be greater than 0.')

    y[M == False] = 0
    Y = _to_logits(y.astype(np.int))
    Y[M == False, :] = 0

    def proj_simplex(y):
        d = y.shape[1]
        a = np.ones(d)
        idx = np.argsort(y)

        def evalpL(y, k, idx):
            return np.sum(y[idx[k:]] - y[idx[k]]) - 1

        def bisectsearch(idx, y):
            idxL, idxH = 0, d-1
            L = evalpL(y, idxL, idx)
            H = evalpL(y, idxH, idx)

            if L < 0:
                return idxL

            while (idxH-idxL) > 1:
                iMid = int((idxL + idxH) / 2)
                M = evalpL(y, iMid, idx)

                if M > 0:
                    idxL, L = iMid, M
                else:
                    idxH, H = iMid, M

            return idxH

        def proj(idx, y):
            k = bisectsearch(idx, y)
            lam = (np.sum(y[idx[k:]]) - 1) / (d - k)
            return np.maximum(0, y - lam)

        x = np.empty_like(y)
        for i in range(len(y)):
            x[i] = proj(idx[i], y[i])
        # x = np.stack(map(proj, idx, y))

        return x

    def smooth_eval(x):
        xTLx = np.sum(x * (G.L.dot(x)))
        e = M * ((M * x.T) - Y.T)
        l2 = np.sum(e * e)
        return tau * xTLx + l2

    def smooth_grad(x):
        return 2 * ((M * (M * x.T - Y.T)).T + tau * G.L * x)

    f1 = functions.func()
    f1._eval = smooth_eval
    f1._grad = smooth_grad

    f2 = functions.func()
    f2._eval = lambda x: 0  # Indicator functions evaluate to zero.
    f2._prox = lambda x, step: proj_simplex(x)

    step = 0.5 / (1 + tau * G.lmax)
    solver = solvers.forward_backward(step=step)
    ret = solvers.solve([f1, f2], Y.copy(), solver, **kwargs)
    return ret['sol']